summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Marineau <marineam@gentoo.org>2008-02-22 23:48:42 +0000
committerMichael Marineau <marineam@gentoo.org>2008-02-22 23:48:42 +0000
commit03c9445642841756b9c5fb6d5ff7873d0adcad7d (patch)
tree39234eb1416d0fa0b113b8e5a050d33f60ec1246
parentupdate readme for 2.6.21 (diff)
downloadxen-03c9445642841756b9c5fb6d5ff7873d0adcad7d.tar.gz
xen-03c9445642841756b9c5fb6d5ff7873d0adcad7d.tar.bz2
xen-03c9445642841756b9c5fb6d5ff7873d0adcad7d.zip
Releasing 2.6.20-5
svn path=/patches/; revision=73
-rw-r--r--tags/2.6.20-5/00000_README63
-rw-r--r--tags/2.6.20-5/20950_linux-2.6.20.14-xen-3.1.0.patch93230
-rw-r--r--tags/2.6.20-5/20952-linux-2.6-xen-x86_64-silence-up-apic-errors.patch13
-rw-r--r--tags/2.6.20-5/20957-linux-2.6-fix-x86_64-vgetcpu.patch65
-rw-r--r--tags/2.6.20-5/20958-linux-2.6-xen-iscsi-x86_64-no_iommu_init.patch45
-rw-r--r--tags/2.6.20-5/20960-linux-2.6-xen-blkfront-wait-add.patch82
-rw-r--r--tags/2.6.20-5/20961_linux-2.6-xen-backwards-time.patch63
-rw-r--r--tags/2.6.20-5/20962_linux-2.6-xen-add-packet_auxdata-cmsg-1.patch233
-rw-r--r--tags/2.6.20-5/20963_linux-2.6-xen-add-packet_auxdata-cmsg-2.patch144
-rw-r--r--tags/2.6.20-5/20965_linux-2.6-xen-sleazy-fpu-i386.patch96
-rw-r--r--tags/2.6.20-5/20966_linux-2.6-xen-sleazy-fpu-x86_64.patch93
-rw-r--r--tags/2.6.20-5/21665-linux-2.6-disable-netback-checksum.patch37
-rw-r--r--tags/2.6.20-5/23000-linux-2.6-acpi-config_pm-poweroff.patch38
-rw-r--r--tags/2.6.20-5/26000_linux-2.6-cve-2008-0600.patch37
-rw-r--r--tags/2.6.20-5/30037_amd64-zero-extend-32bit-ptrace-xen.patch50
-rw-r--r--tags/2.6.20-5/40001_i386-fix-xen_l1_entry_update-for-highptes.patch26
-rw-r--r--tags/2.6.20-5/50001_make-install.patch52
-rw-r--r--tags/2.6.20-5/50002_always-enable-xen-genapic.patch12
-rw-r--r--tags/2.6.20-5/50003_console-tty-fix.patch75
-rw-r--r--tags/2.6.20-5/50004_quirks-no-smp-fix.patch12
-rw-r--r--tags/2.6.20-5/50006_pgetable-build-fix.patch14
-rw-r--r--tags/2.6.20-5/50008_reenable-tls-warning.patch23
-rw-r--r--tags/2.6.20-5/50009_gentooify-tls-warning.patch16
23 files changed, 94519 insertions, 0 deletions
diff --git a/tags/2.6.20-5/00000_README b/tags/2.6.20-5/00000_README
new file mode 100644
index 0000000..7673e98
--- /dev/null
+++ b/tags/2.6.20-5/00000_README
@@ -0,0 +1,63 @@
+Xen Patches README
+------------------
+
+These patches are intended to be stacked on top of genpatches-base.
+
+Many of the patches included here are swiped from various sources which
+use their own four digit patch numbering scheme, so we are stuck with five
+digits to indiciate the source for easier tracking and re-syncing.
+
+Numbering
+---------
+
+0xxxx Gentoo, not related to Xen. (in case we pull something from extras)
+1xxxx XenSource, upstream Xen patch for 2.6.18
+2xxxx Redhat, we use their Xen patch for >=2.6.20
+3xxxx Debian, we use their security fixes for 2.6.18
+4xxxx Misc
+5xxxx Gentoo, Xen and other fixes for Redhat and/or Debian patches.
+
+Patches
+-------
+
+20950_linux-2.6.20.14-xen-3.1.0.patch
+ Main Xen patch
+
+20xxx-?
+ Various bug-fix patches from Redhat.
+
+26000_linux-2.6-cve-2008-0600.patch
+ Fix the vmsplice issue CVE 2008-0600 from the 2.6.21 patchset.
+
+30037_amd64-zero-extend-32bit-ptrace-xen.patch
+ [SECURITY] Zero extend all registers after ptrace in 32-bit entry path
+ (Xen).
+ See CVE-2007-4573
+
+40001_i386-fix-xen_l1_entry_update-for-highptes.patch
+ Fix for kernels compiled with CONFIG_HIGHPTE.
+ Pulled from linux-2.6.18-xen.hg, changeset e79729740288.
+
+50001_make-install.patch
+ Handle make install in a semi-sane way that plays nice with
+ split domU/dom0 kernels.
+
+50002_always-enable-xen-genapic.patch
+ Compile fix for non-SMP (UP) kernels. Since UP support is broken in
+ upstream Xen I'm not sure if I trust it or not. :-P
+
+50003_console-tty-fix.patch
+ Steal tty1-63 as the upstream Xen release does so people don't get
+ any supprises. Redhat switched to using the special Xen tty device.
+
+50004_quirks-no-smp-fix.patch
+ Another compile fix for non-SMP (UP) kernels.
+
+50006_pgetable-build-fix.patch
+ Fix a function re-definition error when PAE is not enabled.
+
+50008_reenable-tls-warning.patch
+ Issue only one big fat tls warning as upstream xen does.
+
+50009_gentooify-tls-warning.patch
+ Change tls warning instructions to apply directly to Gentoo.
diff --git a/tags/2.6.20-5/20950_linux-2.6.20.14-xen-3.1.0.patch b/tags/2.6.20-5/20950_linux-2.6.20.14-xen-3.1.0.patch
new file mode 100644
index 0000000..b46f4a4
--- /dev/null
+++ b/tags/2.6.20-5/20950_linux-2.6.20.14-xen-3.1.0.patch
@@ -0,0 +1,93230 @@
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/Kconfig
+--- a/arch/i386/Kconfig Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/Kconfig Wed Aug 08 16:25:28 2007 -0300
+@@ -16,6 +16,7 @@ config X86_32
+
+ config GENERIC_TIME
+ bool
++ depends on !X86_XEN
+ default y
+
+ config LOCKDEP_SUPPORT
+@@ -107,6 +108,15 @@ config X86_PC
+ bool "PC-compatible"
+ help
+ Choose this option if your computer is a standard PC or compatible.
++
++config X86_XEN
++ bool "Xen-compatible"
++ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
++ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
++ select SWIOTLB
++ help
++ Choose this option if you plan to run this kernel on top of the
++ Xen Hypervisor.
+
+ config X86_ELAN
+ bool "AMD Elan"
+@@ -229,6 +239,7 @@ source "arch/i386/Kconfig.cpu"
+
+ config HPET_TIMER
+ bool "HPET Timer Support"
++ depends on !X86_XEN
+ help
+ This enables the use of the HPET for the kernel's internal timer.
+ HPET is the next generation timer replacing legacy 8254s.
+@@ -279,7 +290,7 @@ source "kernel/Kconfig.preempt"
+
+ config X86_UP_APIC
+ bool "Local APIC support on uniprocessors"
+- depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH)
++ depends on !SMP && !(X86_VISWS || X86_VOYAGER || X86_GENERICARCH || XEN_UNPRIVILEGED_GUEST)
+ help
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -304,12 +315,12 @@ config X86_UP_IOAPIC
+
+ config X86_LOCAL_APIC
+ bool
+- depends on X86_UP_APIC || ((X86_VISWS || SMP) && !X86_VOYAGER) || X86_GENERICARCH
++ depends on X86_UP_APIC || ((X86_VISWS || SMP) && !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
+ default y
+
+ config X86_IO_APIC
+ bool
+- depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER)) || X86_GENERICARCH
++ depends on X86_UP_IOAPIC || (SMP && !(X86_VISWS || X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)) || X86_GENERICARCH
+ default y
+
+ config X86_VISWS_APIC
+@@ -319,7 +330,7 @@ config X86_VISWS_APIC
+
+ config X86_MCE
+ bool "Machine Check Exception"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || X86_XEN)
+ ---help---
+ Machine Check Exception support allows the processor to notify the
+ kernel if it detects a problem (e.g. overheating, component failure).
+@@ -418,6 +429,7 @@ config X86_REBOOTFIXUPS
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ select FW_LOADER
+ ---help---
+ If you say Y here and also to "/dev file system support" in the
+@@ -441,6 +453,7 @@ config MICROCODE_OLD_INTERFACE
+
+ config X86_MSR
+ tristate "/dev/cpu/*/msr - Model-specific register support"
++ depends on !X86_XEN
+ help
+ This device gives privileged processes access to the x86
+ Model-Specific Registers (MSRs). It is a character device with
+@@ -455,6 +468,10 @@ config X86_CPUID
+ be executed on a specific processor. It is a character device
+ with major 203 and minors 0 to 31 for /dev/cpu/0/cpuid to
+ /dev/cpu/31/cpuid.
++
++config SWIOTLB
++ bool
++ default n
+
+ source "drivers/firmware/Kconfig"
+
+@@ -638,6 +655,7 @@ config HIGHPTE
+
+ config MATH_EMULATION
+ bool "Math emulation"
++ depends on !X86_XEN
+ ---help---
+ Linux can emulate a math coprocessor (used for floating point
+ operations) if you don't have one. 486DX and Pentium processors have
+@@ -663,6 +681,8 @@ config MATH_EMULATION
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
++ default y if X86_XEN
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -697,7 +717,7 @@ config MTRR
+
+ config EFI
+ bool "Boot from EFI support"
+- depends on ACPI
++ depends on ACPI && !X86_XEN
+ default n
+ ---help---
+ This enables the kernel to boot on EFI platforms using
+@@ -715,7 +735,7 @@ config EFI
+
+ config IRQBALANCE
+ bool "Enable kernel irq balancing"
+- depends on SMP && X86_IO_APIC
++ depends on SMP && X86_IO_APIC && !X86_XEN
+ default y
+ help
+ The default yes will allow the kernel to do irq load balancing.
+@@ -749,6 +769,7 @@ source kernel/Kconfig.hz
+
+ config KEXEC
+ bool "kexec system call"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -866,6 +887,7 @@ config COMPAT_VDSO
+ bool "Compat VDSO support"
+ default y
+ depends on !PARAVIRT
++ depends on !X86_XEN
+ help
+ Map the VDSO to the predictable old-style address too.
+ ---help---
+@@ -882,18 +904,20 @@ config ARCH_ENABLE_MEMORY_HOTPLUG
+ depends on HIGHMEM
+
+ menu "Power management options (ACPI, APM)"
+- depends on !X86_VOYAGER
+-
++ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
++
++if !X86_XEN
+ source kernel/power/Kconfig
++endif
+
+ source "drivers/acpi/Kconfig"
+
+ menu "APM (Advanced Power Management) BIOS Support"
+-depends on PM && !X86_VISWS
++depends on PM && !(X86_VISWS || X86_XEN)
+
+ config APM
+ tristate "APM (Advanced Power Management) BIOS support"
+- depends on PM
++ depends on PM && PM_LEGACY
+ ---help---
+ APM is a BIOS specification for saving power using several different
+ techniques. This is mostly useful for battery powered laptops with
+@@ -1078,6 +1102,7 @@ choice
+
+ config PCI_GOBIOS
+ bool "BIOS"
++ depends on !X86_XEN
+
+ config PCI_GOMMCONFIG
+ bool "MMConfig"
+@@ -1085,6 +1110,13 @@ config PCI_GODIRECT
+ config PCI_GODIRECT
+ bool "Direct"
+
++config PCI_GOXEN_FE
++ bool "Xen PCI Frontend"
++ depends on X86_XEN
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
+ config PCI_GOANY
+ bool "Any"
+
+@@ -1092,7 +1124,7 @@ endchoice
+
+ config PCI_BIOS
+ bool
+- depends on !X86_VISWS && PCI && (PCI_GOBIOS || PCI_GOANY)
++ depends on !(X86_VISWS || X86_XEN) && PCI && (PCI_GOBIOS || PCI_GOANY)
+ default y
+
+ config PCI_DIRECT
+@@ -1104,6 +1136,18 @@ config PCI_MMCONFIG
+ bool
+ depends on PCI && ACPI && (PCI_GOMMCONFIG || PCI_GOANY)
+ default y
++
++config XEN_PCIDEV_FRONTEND
++ bool
++ depends on PCI && X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)
++ default y
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
+
+ source "drivers/pci/pcie/Kconfig"
+
+@@ -1115,7 +1159,7 @@ config ISA_DMA_API
+
+ config ISA
+ bool "ISA support"
+- depends on !(X86_VOYAGER || X86_VISWS)
++ depends on !(X86_VOYAGER || X86_VISWS || X86_XEN)
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+@@ -1142,7 +1186,7 @@ source "drivers/eisa/Kconfig"
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+- bool "MCA support" if !(X86_VISWS || X86_VOYAGER)
++ bool "MCA support" if !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y if X86_VOYAGER
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+@@ -1218,6 +1262,8 @@ source "security/Kconfig"
+
+ source "crypto/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+
+ #
+@@ -1243,7 +1289,7 @@ config X86_SMP
+
+ config X86_HT
+ bool
+- depends on SMP && !(X86_VISWS || X86_VOYAGER)
++ depends on SMP && !(X86_VISWS || X86_VOYAGER || X86_XEN)
+ default y
+
+ config X86_BIOS_REBOOT
+@@ -1256,6 +1302,16 @@ config X86_TRAMPOLINE
+ depends on X86_SMP || (X86_VOYAGER && SMP)
+ default y
+
++config X86_NO_TSS
++ bool
++ depends on X86_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_XEN
++ default y
++
+ config KTIME_SCALAR
+ bool
+ default y
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/Kconfig.cpu
+--- a/arch/i386/Kconfig.cpu Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/Kconfig.cpu Wed Aug 08 16:25:28 2007 -0300
+@@ -267,7 +267,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ bool
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
+ default y
+
+ config X86_WP_WORKS_OK
+@@ -327,5 +327,5 @@ config X86_OOSTORE
+
+ config X86_TSC
+ bool
+- depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ
+- default y
++ depends on (MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ && !X86_XEN
++ default y
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/Kconfig.debug
+--- a/arch/i386/Kconfig.debug Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/Kconfig.debug Wed Aug 08 16:25:28 2007 -0300
+@@ -79,6 +79,7 @@ config DOUBLEFAULT
+ config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
++ depends on !X86_NO_TSS
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/Makefile
+--- a/arch/i386/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -60,6 +60,11 @@ AFLAGS += $(call as-instr,.cfi_startproc
+
+ CFLAGS += $(cflags-y)
+
++cppflags-$(CONFIG_XEN) += \
++ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++
++CPPFLAGS += $(cppflags-y)
++
+ # Default subarch .c files
+ mcore-y := mach-default
+
+@@ -82,6 +87,10 @@ mcore-$(CONFIG_X86_BIGSMP) := mach-defau
+ #Summit subarch support
+ mflags-$(CONFIG_X86_SUMMIT) := -Iinclude/asm-i386/mach-summit
+ mcore-$(CONFIG_X86_SUMMIT) := mach-default
++
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-i386/mach-xen
++mcore-$(CONFIG_X86_XEN) := mach-xen
+
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH) := -Iinclude/asm-i386/mach-generic
+@@ -117,6 +126,19 @@ PHONY += zImage bzImage compressed zlilo
+ PHONY += zImage bzImage compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+
++ifdef CONFIG_XEN
++CPPFLAGS := -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++head-y := arch/i386/kernel/head-xen.o arch/i386/kernel/init_task.o
++boot := arch/i386/boot-xen
++.PHONY: vmlinuz
++all: vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $@
++
++install:
++ $(Q)$(MAKE) $(build)=$(boot) XENGUEST=$(XENGUEST) $@
++else
+ all: bzImage
+
+ # KBUILD_IMAGE specify target image being built
+@@ -139,6 +161,7 @@ fdimage fdimage144 fdimage288 isoimage:
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
++endif
+
+ archclean:
+ $(Q)$(MAKE) $(clean)=arch/i386/boot
+@@ -157,3 +180,4 @@ CLEAN_FILES += arch/$(ARCH)/boot/fdimage
+ CLEAN_FILES += arch/$(ARCH)/boot/fdimage \
+ arch/$(ARCH)/boot/image.iso \
+ arch/$(ARCH)/boot/mtools.conf
++CLEAN_FILES += vmlinuz vmlinux-stripped
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/boot-xen/Makefile
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/boot-xen/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,21 @@
++
++OBJCOPYFLAGS := -g --strip-unneeded
++
++vmlinuz: vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++
++vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
++INSTALL_ROOT := $(patsubst %/boot,%,$(INSTALL_PATH))
++
++XINSTALL_NAME ?= $(KERNELRELEASE)
++install:
++ mkdir -p $(INSTALL_ROOT)/boot
++ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL).$(SUBLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
++ rm -f $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0644 vmlinuz $(INSTALL_ROOT)/boot/vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0644 vmlinux $(INSTALL_ROOT)/boot/vmlinux-syms-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0664 .config $(INSTALL_ROOT)/boot/config-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ install -m0664 System.map $(INSTALL_ROOT)/boot/System.map-$(XINSTALL_NAME)$(INSTALL_SUFFIX)
++ ln -f -s vmlinuz-$(XINSTALL_NAME)$(INSTALL_SUFFIX) $(INSTALL_ROOT)/boot/vmlinuz-$(VERSION).$(PATCHLEVEL)$(XENGUEST)$(INSTALL_SUFFIX)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/Makefile
+--- a/arch/i386/kernel/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -47,6 +47,12 @@ EXTRA_AFLAGS := -traditional
+
+ obj-$(CONFIG_SCx200) += scx200.o
+
++ifdef CONFIG_XEN
++vsyscall_note := vsyscall-note-xen.o
++else
++vsyscall_note := vsyscall-note.o
++endif
++
+ # vsyscall.o contains the vsyscall DSO images as __initdata.
+ # We must build both images before we can assemble it.
+ # Note: kbuild does not track this dependency due to usage of .incbin
+@@ -68,7 +74,7 @@ SYSCFLAGS_vsyscall-int80.so = $(vsyscall
+
+ $(obj)/vsyscall-int80.so $(obj)/vsyscall-sysenter.so: \
+ $(obj)/vsyscall-%.so: $(src)/vsyscall.lds \
+- $(obj)/vsyscall-%.o $(obj)/vsyscall-note.o FORCE
++ $(obj)/vsyscall-%.o $(obj)/$(vsyscall_note) FORCE
+ $(call if_changed,syscall)
+
+ # We also create a special relocatable object that should mirror the symbol
+@@ -80,9 +86,21 @@ extra-y += vsyscall-syms.o
+
+ SYSCFLAGS_vsyscall-syms.o = -r
+ $(obj)/vsyscall-syms.o: $(src)/vsyscall.lds \
+- $(obj)/vsyscall-sysenter.o $(obj)/vsyscall-note.o FORCE
++ $(obj)/vsyscall-sysenter.o $(obj)/$(vsyscall_note) FORCE
+ $(call if_changed,syscall)
+
+ k8-y += ../../x86_64/kernel/k8.o
+ stacktrace-y += ../../x86_64/kernel/stacktrace.o
+
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++
++obj-y += fixup.o
++microcode-$(subst m,y,$(CONFIG_MICROCODE)) := microcode-xen.o
++n-obj-xen := i8259.o timers/ reboot.o smpboot.o trampoline.o
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++extra-y := $(call cherrypickxen, $(extra-y))
++%/head-xen.o %/head-xen.s: EXTRA_AFLAGS :=
++endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/acpi/Makefile
+--- a/arch/i386/kernel/acpi/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/acpi/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -7,4 +7,3 @@ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += cstate.o processor.o
+ endif
+-
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/acpi/boot.c
+--- a/arch/i386/kernel/acpi/boot.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/acpi/boot.c Wed Aug 08 16:25:28 2007 -0300
+@@ -107,7 +107,7 @@ EXPORT_SYMBOL(x86_acpiid_to_apicid);
+ */
+ enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PIC;
+
+-#ifdef CONFIG_X86_64
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
+
+ /* rely on all ACPI tables being in the direct mapping */
+ char *__acpi_map_table(unsigned long phys_addr, unsigned long size)
+@@ -140,8 +140,10 @@ char *__acpi_map_table(unsigned long phy
+ unsigned long base, offset, mapped_size;
+ int idx;
+
++#ifndef CONFIG_XEN
+ if (phys + size < 8 * 1024 * 1024)
+ return __va(phys);
++#endif
+
+ offset = phys & (PAGE_SIZE - 1);
+ mapped_size = PAGE_SIZE - offset;
+@@ -611,7 +613,13 @@ acpi_scan_rsdp(unsigned long start, unsi
+ * RSDP signature.
+ */
+ for (offset = 0; offset < length; offset += 16) {
++#ifdef CONFIG_XEN
++ unsigned long vstart = (unsigned long)isa_bus_to_virt(start);
++
++ if (strncmp((char *)(vstart + offset), "RSD PTR ", sig_len))
++#else
+ if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
++#endif
+ continue;
+ return (start + offset);
+ }
+@@ -724,7 +732,7 @@ static int __init acpi_parse_fadt(unsign
+ acpi_fadt.force_apic_physical_destination_mode =
+ fadt->force_apic_physical_destination_mode;
+
+-#ifdef CONFIG_X86_PM_TIMER
++#if defined(CONFIG_X86_PM_TIMER) && !defined(CONFIG_XEN)
+ /* detect the location of the ACPI PM Timer */
+ if (fadt->revision >= FADT2_REVISION_ID) {
+ /* FADT rev. 2 */
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/apic-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/apic-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,223 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
++
++#include "io_ports.h"
++
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
++#endif
++
++/*
++ * Knob to control our willingness to enable the local APIC.
++ */
++static int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++
++static inline void lapic_disable(void)
++{
++ enable_local_apic = -1;
++ clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++}
++
++static inline void lapic_enable(void)
++{
++ enable_local_apic = 1;
++}
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++static int modern_apic(void)
++{
++#ifndef CONFIG_XEN
++ unsigned int lvr, version;
++ /* AMD systems use old APIC versions, so check the CPU */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 0xf)
++ return 1;
++ lvr = apic_read(APIC_LVR);
++ version = GET_APIC_VERSION(lvr);
++ return version >= 0x14;
++#else
++ return 1;
++#endif
++}
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
++}
++
++#ifndef CONFIG_XEN
++void __init apic_intr_init(void)
++{
++#ifdef CONFIG_SMP
++ smp_intr_init();
++#endif
++ /* self generated IPI for local APIC timer */
++ set_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
++
++ /* IPI vectors for APIC spurious and error interrupts */
++ set_intr_gate(SPURIOUS_APIC_VECTOR, spurious_interrupt);
++ set_intr_gate(ERROR_APIC_VECTOR, error_interrupt);
++
++ /* thermal monitor LVT interrupt */
++#ifdef CONFIG_X86_MCE_P4THERMAL
++ set_intr_gate(THERMAL_APIC_VECTOR, thermal_interrupt);
++#endif
++}
++
++/* Using APIC to generate smp_local_timer_interrupt? */
++int using_apic_timer __read_mostly = 0;
++
++static int enabled_via_apicbase;
++
++void enable_NMI_through_LVT0 (void * dummy)
++{
++ unsigned int v, ver;
++
++ ver = apic_read(APIC_LVR);
++ ver = GET_APIC_VERSION(ver);
++ v = APIC_DM_NMI; /* unmask and set to NMI */
++ if (!APIC_INTEGRATED(ver)) /* 82489DX */
++ v |= APIC_LVT_LEVEL_TRIGGER;
++ apic_write_around(APIC_LVT0, v);
++}
++#endif /* !CONFIG_XEN */
++
++int get_physical_broadcast(void)
++{
++ if (modern_apic())
++ return 0xff;
++ else
++ return 0xf;
++}
++
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(void)
++{
++ int cpu = smp_processor_id();
++
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ per_cpu(irq_stat, cpu).apic_timer_irqs++;
++
++ smp_local_timer_interrupt();
++}
++#endif
++
++void smp_send_timer_broadcast_ipi(void)
++{
++ cpumask_t mask;
++
++ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++ if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++ /*
++ * We can directly call the apic timer interrupt handler
++ * in UP case. Minus all irq related functions
++ */
++ up_apic_timer_interrupt_call();
++#endif
++ }
++}
++#endif
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 0;
++}
++
++static int __init parse_lapic(char *arg)
++{
++ lapic_enable();
++ return 0;
++}
++early_param("lapic", parse_lapic);
++
++static int __init parse_nolapic(char *arg)
++{
++ lapic_disable();
++ return 0;
++}
++early_param("nolapic", parse_nolapic);
++
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/asm-offsets.c
+--- a/arch/i386/kernel/asm-offsets.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/asm-offsets.c Wed Aug 08 16:25:28 2007 -0300
+@@ -89,9 +89,14 @@ void foo(void)
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
++#ifndef CONFIG_X86_NO_TSS
+ /* Offset from the sysenter stack to tss.esp0 */
+- DEFINE(TSS_sysenter_esp0, offsetof(struct tss_struct, esp0) -
++ DEFINE(SYSENTER_stack_esp0, offsetof(struct tss_struct, esp0) -
+ sizeof(struct tss_struct));
++#else
++ /* sysenter stack points directly to esp0 */
++ DEFINE(SYSENTER_stack_esp0, 0);
++#endif
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(VDSO_PRELINK, VDSO_PRELINK);
+@@ -111,4 +116,10 @@ void foo(void)
+ OFFSET(PARAVIRT_iret, paravirt_ops, iret);
+ OFFSET(PARAVIRT_read_cr0, paravirt_ops, read_cr0);
+ #endif
++
++
++#ifdef CONFIG_XEN
++ BLANK();
++ OFFSET(XEN_START_mfn_list, start_info, mfn_list);
++#endif
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/cpu/common.c
+--- a/arch/i386/kernel/cpu/common.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/cpu/common.c Wed Aug 08 16:25:28 2007 -0300
+@@ -19,6 +19,10 @@
+ #include <mach_apic.h>
+ #endif
+ #include <asm/pda.h>
++
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
+
+ #include "cpu.h"
+
+@@ -601,6 +605,31 @@ void __init early_cpu_init(void)
+ #endif
+ }
+
++/* We can't move load_gdt to asm/desc.h because it lacks make_lowmen_page_readonly()
++ definition, and as this is still the only user of load_gdt in xen.
++ ToDo: JQ
++ */
++
++#ifdef CONFIG_XEN
++#undef load_gdt
++static void __cpuinit load_gdt(struct Xgt_desc_struct *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, gdt_descr->size / 8))
++ BUG();
++}
++#endif /* CONFIG_XEN */
++
+ /* Make sure %gs is initialized properly in idle threads */
+ struct pt_regs * __devinit idle_regs(struct pt_regs *regs)
+ {
+@@ -633,6 +662,10 @@ static __cpuinit int alloc_gdt(int cpu)
+
+ memset(gdt, 0, PAGE_SIZE);
+ memset(pda, 0, sizeof(*pda));
++#ifdef CONFIG_XEN
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++ cpu_gdt_descr->size = GDT_SIZE;
++#endif
+ } else {
+ /* GDT and PDA might already have been allocated if
+ this is a CPU hotplug re-insertion. */
+@@ -690,14 +723,18 @@ __cpuinit int init_gdt(int cpu, struct t
+
+ BUG_ON(gdt == NULL || pda == NULL);
+
++#ifndef CONFIG_XEN
+ /*
+ * Initialize the per-CPU GDT with the boot GDT,
+ * and set up the GDT descriptor:
+ */
+ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
+ cpu_gdt_descr->size = GDT_SIZE - 1;
+-
+- pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
++#endif
++
++
++ if (cpu == 0)
++ pack_descriptor((u32 *)&gdt[GDT_ENTRY_PDA].a,
+ (u32 *)&gdt[GDT_ENTRY_PDA].b,
+ (unsigned long)pda, sizeof(*pda) - 1,
+ 0x80 | DESCTYPE_S | 0x2, 0); /* present read-write data segment */
+@@ -724,7 +761,9 @@ void __cpuinit cpu_set_gdt(int cpu)
+ /* Common CPU init for both boot and secondary CPUs */
+ static void __cpuinit _cpu_init(int cpu, struct task_struct *curr)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct * t = &per_cpu(init_tss, cpu);
++#endif
+ struct thread_struct *thread = &curr->thread;
+
+ if (cpu_test_and_set(cpu, cpu_initialized)) {
+@@ -743,7 +782,9 @@ static void __cpuinit _cpu_init(int cpu,
+ set_in_cr4(X86_CR4_TSD);
+ }
+
++#ifndef CONFIG_X86_NO_IDT
+ load_idt(&idt_descr);
++#endif
+
+ /*
+ * Set up and load the per-CPU TSS and LDT
+@@ -755,8 +796,10 @@ static void __cpuinit _cpu_init(int cpu,
+ enter_lazy_tlb(&init_mm, curr);
+
+ load_esp0(t, thread);
++#ifndef CONFIG_X86_NO_TSS
+ set_tss_desc(cpu,t);
+ load_TR_desc();
++#endif
+ load_LDT(&init_mm.context);
+
+ #ifdef CONFIG_DOUBLEFAULT
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/cpu/mtrr/Makefile
+--- a/arch/i386/kernel/cpu/mtrr/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/cpu/mtrr/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -1,3 +1,10 @@ obj-y := main.o if.o generic.o state.o
+ obj-y := main.o if.o generic.o state.o
+ obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++n-obj-xen := generic.o state.o amd.o cyrix.o centaur.o
++
++obj-y := $(call filterxen, $(obj-y), $(n-obj-xen))
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/cpu/mtrr/main-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/cpu/mtrr/main-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,198 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
++#include <linux/mutex.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++static DEFINE_MUTEX(mtrr_mutex);
++
++static void generic_get_mtrr(unsigned int reg, unsigned long *base,
++ unsigned long *size, mtrr_type * type)
++{
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = reg;
++ (void)HYPERVISOR_platform_op(&op);
++
++ *size = op.u.read_memtype.nr_mfns;
++ *base = op.u.read_memtype.mfn;
++ *type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++ .use_intel_if = 1,
++ .get = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++/* This function returns the number of variable MTRRs */
++static void __init set_num_var_ranges(void)
++{
++ struct xen_platform_op op;
++
++ for (num_var_ranges = 0; ; num_var_ranges++) {
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = num_var_ranges;
++ if (HYPERVISOR_platform_op(&op) != 0)
++ break;
++ }
++}
++
++static void __init init_table(void)
++{
++ int i, max;
++
++ max = num_var_ranges;
++ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++ == NULL) {
++ printk(KERN_ERR "mtrr: could not allocate\n");
++ return;
++ }
++ for (i = 0; i < max; i++)
++ usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size,
++ unsigned int type, char increment)
++{
++ int error;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ op.cmd = XENPF_add_memtype;
++ op.u.add_memtype.mfn = base;
++ op.u.add_memtype.nr_mfns = size;
++ op.u.add_memtype.type = type;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ mutex_unlock(&mtrr_mutex);
++ BUG_ON(error > 0);
++ return error;
++ }
++
++ if (increment)
++ ++usage_table[op.u.add_memtype.reg];
++
++ mutex_unlock(&mtrr_mutex);
++
++ return op.u.add_memtype.reg;
++}
++
++static int mtrr_check(unsigned long base, unsigned long size)
++{
++ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++ printk(KERN_WARNING
++ "mtrr: size and base must be multiples of 4 kiB\n");
++ printk(KERN_DEBUG
++ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
++ dump_stack();
++ return -1;
++ }
++ return 0;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++ char increment)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++ increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++ unsigned i;
++ mtrr_type ltype;
++ unsigned long lbase, lsize;
++ int error = -EINVAL;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ if (reg < 0) {
++ /* Search for existing MTRR */
++ for (i = 0; i < num_var_ranges; ++i) {
++ mtrr_if->get(i, &lbase, &lsize, &ltype);
++ if (lbase == base && lsize == size) {
++ reg = i;
++ break;
++ }
++ }
++ if (reg < 0) {
++ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++ size);
++ goto out;
++ }
++ }
++ if (usage_table[reg] < 1) {
++ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++ goto out;
++ }
++ if (--usage_table[reg] < 1) {
++ op.cmd = XENPF_del_memtype;
++ op.u.del_memtype.handle = 0;
++ op.u.del_memtype.reg = reg;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ BUG_ON(error > 0);
++ goto out;
++ }
++ }
++ error = reg;
++ out:
++ mutex_unlock(&mtrr_mutex);
++ return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++void __init mtrr_bp_init(void)
++{
++}
++
++void mtrr_ap_init(void)
++{
++}
++
++static int __init mtrr_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!is_initial_xendomain())
++ return -ENODEV;
++
++ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++ return -ENODEV;
++
++ set_num_var_ranges();
++ init_table();
++
++ return 0;
++}
++
++subsys_initcall(mtrr_init);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/crash.c
+--- a/arch/i386/kernel/crash.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/crash.c Wed Aug 08 16:25:28 2007 -0300
+@@ -31,6 +31,7 @@
+ /* This keeps a track of which one is crashing cpu. */
+ static int crashing_cpu;
+
++#ifndef CONFIG_XEN
+ #if defined(CONFIG_SMP) && defined(CONFIG_X86_LOCAL_APIC)
+ static atomic_t waiting_for_crash_ipi;
+
+@@ -112,6 +113,7 @@ static void nmi_shootdown_cpus(void)
+ /* There are no cpus to shootdown */
+ }
+ #endif
++#endif /* CONFIG_XEN */
+
+ void machine_crash_shutdown(struct pt_regs *regs)
+ {
+@@ -128,10 +130,12 @@ void machine_crash_shutdown(struct pt_re
+
+ /* Make a note of crashing cpu. Will be used in NMI callback.*/
+ crashing_cpu = safe_smp_processor_id();
++#ifndef CONFIG_XEN
+ nmi_shootdown_cpus();
+ lapic_shutdown();
+ #if defined(CONFIG_X86_IO_APIC)
+ disable_IO_APIC();
+ #endif
++#endif /* CONFIG_XEN */
+ crash_save_cpu(regs, safe_smp_processor_id());
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/e820.c
+--- a/arch/i386/kernel/e820.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/e820.c Wed Aug 08 16:25:28 2007 -0300
+@@ -15,10 +15,24 @@
+ #include <asm/page.h>
+ #include <asm/e820.h>
+
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#endif
++
+ #ifdef CONFIG_EFI
+ int efi_enabled = 0;
+ EXPORT_SYMBOL(efi_enabled);
+ #endif
++
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map);
+
+ struct e820map e820;
+ struct change_member {
+@@ -180,6 +194,12 @@ static void __init probe_roms(void)
+ unsigned char *rom;
+ int i;
+
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
+ /* video rom */
+ upper = adapter_rom_resources[0].start;
+ for (start = video_rom_resource.start; start < upper; start += 2048) {
+@@ -247,36 +267,54 @@ legacy_init_iomem_resources(struct resou
+ legacy_init_iomem_resources(struct resource *code_resource, struct resource *data_resource)
+ {
+ int i;
++ struct e820entry *map = e820.map;
++ int nr_map = e820.nr_map;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ struct xen_memory_map memmap;
++
++ map = machine_e820.map;
++ memmap.nr_entries = E820MAX;
++
++ set_xen_guest_handle(memmap.buffer, map);
++
++ if(HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ nr_map = memmap.nr_entries;
++ e820_setup_gap(map, memmap.nr_entries);
++#endif
+
+ probe_roms();
+- for (i = 0; i < e820.nr_map; i++) {
++ for (i = 0; i < nr_map; i++) {
+ struct resource *res;
+ #ifndef CONFIG_RESOURCES_64BIT
+- if (e820.map[i].addr + e820.map[i].size > 0x100000000ULL)
++ if (map[i].addr + map[i].size > 0x100000000ULL)
+ continue;
+ #endif
+ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
+- switch (e820.map[i].type) {
++ switch (map[i].type) {
+ case E820_RAM: res->name = "System RAM"; break;
+ case E820_ACPI: res->name = "ACPI Tables"; break;
+ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
+ default: res->name = "reserved";
+ }
+- res->start = e820.map[i].addr;
+- res->end = res->start + e820.map[i].size - 1;
++ res->start = map[i].addr;
++ res->end = res->start + map[i].size - 1;
+ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
+ if (request_resource(&iomem_resource, res)) {
+ kfree(res);
+ continue;
+ }
+- if (e820.map[i].type == E820_RAM) {
++ if (map[i].type == E820_RAM) {
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
++#ifndef CONFIG_XEN
+ request_resource(res, code_resource);
+ request_resource(res, data_resource);
++#endif
+ #ifdef CONFIG_KEXEC
+ request_resource(res, &crashk_res);
+ #endif
+@@ -295,6 +333,11 @@ static int __init request_standard_resou
+ int i;
+
+ printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++#endif
+ if (efi_enabled)
+ efi_initialize_iomem_resources(&code_resource, &data_resource);
+ else
+@@ -514,10 +557,13 @@ int __init sanitize_e820_map(struct e820
+ */
+ int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
+ {
++#ifndef CONFIG_XEN
+ /* Only one memory region (or negative)? Ignore it */
+ if (nr_map < 2)
+ return -1;
+-
++#else
++ BUG_ON(nr_map < 1);
++#endif
+ do {
+ unsigned long long start = biosmap->addr;
+ unsigned long long size = biosmap->size;
+@@ -529,6 +575,7 @@ int __init copy_e820_map(struct e820entr
+ if (start > end)
+ return -1;
+
++#ifndef CONFIG_XEN
+ /*
+ * Some BIOSes claim RAM in the 640k - 1M region.
+ * Not right. Fix it up.
+@@ -549,6 +596,7 @@ int __init copy_e820_map(struct e820entr
+ size = end - start;
+ }
+ }
++#endif
+ add_memory_region(start, size, type);
+ } while (biosmap++,--nr_map);
+ return 0;
+@@ -653,6 +701,15 @@ void __init register_bootmem_low_pages(u
+ */
+ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
+
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
+ if (last_pfn > max_low_pfn)
+ last_pfn = max_low_pfn;
+
+@@ -668,7 +725,12 @@ void __init register_bootmem_low_pages(u
+ }
+ }
+
+-void __init e820_register_memory(void)
++/*
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
++ */
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
+ {
+ unsigned long gapstart, gapsize, round;
+ unsigned long long last;
+@@ -681,10 +743,10 @@ void __init e820_register_memory(void)
+ last = 0x100000000ull;
+ gapstart = 0x10000000;
+ gapsize = 0x400000;
+- i = e820.nr_map;
++ i = nr_map;
+ while (--i >= 0) {
+- unsigned long long start = e820.map[i].addr;
+- unsigned long long end = start + e820.map[i].size;
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
+
+ /*
+ * Since "last" is at most 4GB, we know we'll
+@@ -714,6 +776,13 @@ void __init e820_register_memory(void)
+
+ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
+ pci_mem_start, gapstart, gapsize);
++}
++
++void __init e820_register_memory(void)
++{
++#ifndef CONFIG_XEN
++ e820_setup_gap(e820.map, e820.nr_map);
++#endif
+ }
+
+ void __init print_memory_map(char *who)
+@@ -784,7 +853,7 @@ static __init __always_inline void efi_l
+
+ void __init limit_regions(unsigned long long size)
+ {
+- unsigned long long current_addr;
++ unsigned long long current_addr = 0;
+ int i;
+
+ print_memory_map("limit_regions start");
+@@ -813,6 +882,19 @@ void __init limit_regions(unsigned long
+ print_memory_map("limit_regions endfor");
+ return;
+ }
++#ifdef CONFIG_XEN
++ if (i==e820.nr_map && current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
+ print_memory_map("limit_regions endfunc");
+ }
+
+@@ -828,8 +910,15 @@ e820_all_mapped(unsigned long s, unsigne
+ u64 start = s;
+ u64 end = e;
+ int i;
++#ifndef CONFIG_XEN
+ for (i = 0; i < e820.nr_map; i++) {
+ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
+ if (type && ei->type != type)
+ continue;
+ /* is the region (part) in overlap with the current region ?*/
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/entry-xen.S
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/entry-xen.S Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,1264 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - %gs
++ * 28(%esp) - orig_eax
++ * 2C(%esp) - %eip
++ * 30(%esp) - %cs
++ * 34(%esp) - %eflags
++ * 38(%esp) - %oldesp
++ * 3C(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/percpu.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
++
++/*
++ * We use macros for low-level operations which need to be overridden
++ * for paravirtualization. The following will never clobber any registers:
++ * INTERRUPT_RETURN (aka. "iret")
++ * GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
++ * ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
++ *
++ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
++ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
++ * Allowing a register to be clobbered can shrink the paravirt replacement
++ * enough to patch inline, increasing performance.
++ */
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK = 0x80000000
++
++#ifdef CONFIG_XEN
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl %gs:PDA_cpu,%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
++#else
++#define preempt_stop(clobbers)
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %gs; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET gs, 0;*/\
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es; \
++ movl $(__KERNEL_PDA), %edx; \
++ movl %edx, %gs
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++3: popl %gs; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE gs;*/\
++.pushsection .fixup,"ax"; \
++4: movl $0,(%esp); \
++ jmp 1b; \
++5: movl $0,(%esp); \
++ jmp 2b; \
++6: movl $0,(%esp); \
++ jmp 3b; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,4b; \
++ .long 2b,5b; \
++ .long 3b,6b; \
++.popsection
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_SIGNAL_FRAME;\
++ CFI_DEF_CFA esp, PT_OLDESP-PT_EBX;\
++ /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/\
++ CFI_OFFSET eip, PT_EIP-PT_OLDESP;\
++ /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/\
++ /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/\
++ CFI_OFFSET eax, PT_EAX-PT_OLDESP;\
++ CFI_OFFSET ebp, PT_EBP-PT_OLDESP;\
++ CFI_OFFSET edi, PT_EDI-PT_OLDESP;\
++ CFI_OFFSET esi, PT_ESI-PT_OLDESP;\
++ CFI_OFFSET edx, PT_EDX-PT_OLDESP;\
++ CFI_OFFSET ecx, PT_ECX-PT_OLDESP;\
++ CFI_OFFSET ebx, PT_EBX-PT_OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop(CLBR_ANY)
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | SEGMENT_RPL_MASK), %eax
++ cmpl $USER_RPL, %eax
++ jb resume_kernel # not returning to v8086 or userspace
++
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ DISABLE_INTERRUPTS(CLBR_ANY)
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,PT_EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_SIGNAL_FRAME
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ ENABLE_INTERRUPTS(CLBR_NONE)
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++#ifndef CONFIG_COMPAT_VDSO
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++#else
++ pushl $SYSENTER_RETURN
++#endif
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp)
++ DISABLE_INTERRUPTS(CLBR_ECX|CLBR_EDX)
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl PT_EIP(%esp), %edx
++ movl PT_OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++ TRACE_IRQS_ON
++1: mov PT_GS(%esp), %gs
++#ifdef CONFIG_XEN
++ __ENABLE_INTERRUPTS
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ movl PT_ESI(%esp), %esi
++ sysexit
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
++ push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++#else
++ ENABLE_INTERRUPTS_SYSEXIT
++#endif /* !CONFIG_XEN */
++ CFI_ENDPROC
++.pushsection .fixup,"ax"
++2: movl $0,PT_GS(%esp)
++ jmp 1b
++.section __ex_table,"a"
++ .align 4
++ .long 1b,2b
++.popsection
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ testl $TF_MASK,PT_EFLAGS(%esp)
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,PT_EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++#ifndef CONFIG_XEN
++ movl PT_EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb PT_OLDSS(%esp), %ah
++ movb PT_CS(%esp), %al
++ andl $(VM_MASK | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
++ cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++#else
++restore_nocheck:
++ movl PT_EFLAGS(%esp), %eax
++ testl $(VM_MASK|NMI_MASK), %eax
++ CFI_REMEMBER_STATE
++ jnz hypervisor_iret
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ GET_VCPU_INFO
++ andb evtchn_upcall_mask(%esi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ CFI_REMEMBER_STATE
++ jnz restore_all_enable_events # != 0 => enable event delivery
++ CFI_REMEMBER_STATE
++#endif
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp # skip orig_eax/error_code
++ CFI_ADJUST_CFA_OFFSET -4
++1: INTERRUPT_RETURN
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_NONE)
++#endif
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++ larl PT_OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++
++#ifdef CONFIG_PARAVIRT
++ /*
++ * The kernel can't run on a non-flat stack if paravirt mode
++ * is active. Rather than try to fixup the high bits of
++ * ESP, bypass this code entirely. This may break DOSemu
++ * and/or Wine support in a paravirt VM, although the option
++ * is still available to implement the setting of the high
++ * 16-bits in the INTERRUPT_RETURN paravirt-op.
++ */
++ cmpl $0, paravirt_ops+PARAVIRT_enabled
++ jne restore_nocheck
++#endif
++
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ movl PT_OLDESP(%esp), %eax
++ movl %esp, %edx
++ call patch_espfix_desc
++ pushl $__ESPFIX_SS
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ DISABLE_INTERRUPTS(CLBR_EAX)
++ TRACE_IRQS_OFF
++ lss (%esp), %esp
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp restore_nocheck
++#else
++ ALIGN
++restore_all_enable_events:
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++scrit: /**** START OF CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ jmp 11f
++ecrit: /**** END OF CRITICAL REGION ****/
++
++ CFI_RESTORE_STATE
++hypervisor_iret:
++ andl $~NMI_MASK, PT_EFLAGS(%esp)
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++ CFI_ENDPROC
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS(CLBR_ANY) # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++#ifdef CONFIG_VM86
++ testl $VM_MASK, PT_EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++#else
++ movl %esp, %eax
++#endif
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,PT_EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl PT_ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS(CLBR_ANY) # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,PT_EAX(%esp)
++ jmp resume_userspace
++
++syscall_badsys:
++ movl $-ENOSYS,PT_EAX(%esp)
++ jmp resume_userspace
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++ /* since we are on a wrong stack, we cant make it a C code :( */ \
++ movl %gs:PDA_cpu, %ebx; \
++ PER_CPU(cpu_gdt_descr, %ebx); \
++ movl GDS_address(%ebx), %ebx; \
++ GET_DESC_BASE(GDT_ENTRY_ESPFIX_SS, %ebx, %eax, %ax, %al, %ah); \
++ addl %esp, %eax; \
++ pushl $__KERNEL_DS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ lss (%esp), %esp; \
++ CFI_ADJUST_CFA_OFFSET -8;
++#define UNWIND_ESPFIX_STACK \
++ movl %ss, %eax; \
++ /* see if on espfix stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ jne 27f; \
++ movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to normal stack */ \
++ FIXUP_ESPFIX_STACK; \
++27:;
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++.data
++ .long 1b
++.text
++vector=vector+1
++.endr
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_/**/name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#else
++#define UNWIND_ESPFIX_STACK
++#endif
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ /* the function address is in %gs's slot on the stack */
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %gs
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET gs, 0*/
++ movl $(__KERNEL_PDA), %ecx
++ movl %ecx, %gs
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl PT_GS(%esp), %edi # get the function address
++ movl PT_ORIG_EAX(%esp), %edx # get the error code
++ movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
++ mov %ecx, PT_GS(%esp)
++ /*CFI_REL_OFFSET gs, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(page_fault)
++
++#ifdef CONFIG_XEN
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++#
++# The sysexit critical region is slightly different. sysexit
++# atomically removes the entire stack frame. If we interrupt in the
++# critical region we know that the entire frame is present and correct
++# so we can simply throw away the new one.
++ENTRY(hypervisor_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl PT_EIP(%esp),%eax
++ cmpl $scrit,%eax
++ jb 11f
++ cmpl $ecrit,%eax
++ jb critical_region_fixup
++ cmpl $sysexit_scrit,%eax
++ jb 11f
++ cmpl $sysexit_ecrit,%eax
++ ja 11f
++ # interrupted in sysexit critical
++ addl $PT_OLDESP,%esp # Remove cs...ebx from stack frame.
++11: push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
++ cmpb $0xff,%cl # 0xff => vcpu_info critical region
++ jne 15f
++ xorl %ecx,%ecx
++15: leal (%esp,%ecx),%esi # %esi points at end of src region
++ leal PT_OLDESP(%esp),%edi # %edi points at end of dst region
++ shrl $2,%ecx # convert words to bytes
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
++ subl $4,%edi
++ movl (%esi),%eax
++ movl %eax,(%edi)
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
++ jmp 11b
++
++.section .rodata,"a"
++critical_fixup_table:
++ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
++ .byte 0xff,0xff # jnz 14f
++ .byte 0x00 # pop %ebx
++ .byte 0x04 # pop %ecx
++ .byte 0x08 # pop %edx
++ .byte 0x0c # pop %esi
++ .byte 0x10 # pop %edi
++ .byte 0x14 # pop %ebp
++ .byte 0x18 # pop %eax
++ .byte 0x1c # pop %ds
++ .byte 0x20 # pop %es
++ .byte 0x24,0x24 # pop %gs
++ .byte 0x28,0x28,0x28 # add $4,%esp
++ .byte 0x2c # iret
++ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
++ .byte 0x00,0x00 # jmp 11b
++.previous
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jz 5f
++ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
++ jmp iret_exc
++5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
++ pushl $0
++ SAVE_ALL
++ jmp ret_from_exception
++.section .fixup,"ax"; \
++6: xorl %eax,%eax; \
++ movl %eax,4(%esp); \
++ jmp 1b; \
++7: xorl %eax,%eax; \
++ movl %eax,8(%esp); \
++ jmp 2b; \
++8: xorl %eax,%eax; \
++ movl %eax,12(%esp); \
++ jmp 3b; \
++9: xorl %eax,%eax; \
++ movl %eax,16(%esp); \
++ jmp 4b; \
++.previous; \
++.section __ex_table,"a"; \
++ .align 4; \
++ .long 1b,6b; \
++ .long 2b,7b; \
++ .long 3b,8b; \
++ .long 4b,9b; \
++.previous
++#endif
++ CFI_ENDPROC
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++#ifndef CONFIG_XEN
++ GET_CR0_INTO_EAX
++ testl $0x4, %eax # EM (math emulation bit)
++ je device_available_emulate
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++device_available_emulate:
++#endif
++ preempt_stop(CLBR_ANY)
++ call math_state_restore
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ CFI_DEF_CFA esp, 0; \
++ CFI_UNDEFINED eip; \
++ pushfl; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $__KERNEL_CS; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ pushl $sysenter_past_esp; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ CFI_REL_OFFSET eip, 0
++#endif /* CONFIG_XEN */
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++#ifndef CONFIG_XEN
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(debug)
++ .previous .text
++
++#ifndef CONFIG_XEN
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_espfix_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ /* We have a RING0_INT_FRAME here */
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ RING0_INT_FRAME
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_debug_stack_check:
++ /* We have a RING0_INT_FRAME here */
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_espfix_stack:
++ /* We have a RING0_INT_FRAME here.
++ *
++ * create the pointer to lss back
++ */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to espfix stack
++ CFI_ADJUST_CFA_OFFSET -24
++1: INTERRUPT_RETURN
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++KPROBE_END(nmi)
++#else
++KPROBE_ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ orl $NMI_MASK, PT_EFLAGS(%esp)
++ jmp restore_all
++ CFI_ENDPROC
++KPROBE_END(nmi)
++#endif
++
++#ifdef CONFIG_PARAVIRT
++ENTRY(native_iret)
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ENTRY(native_irq_enable_sysexit)
++ sti
++ sysexit
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++KPROBE_END(int3)
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++KPROBE_END(general_protection)
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif
++
++#ifndef CONFIG_XEN
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif /* !CONFIG_XEN */
++
++ENTRY(kernel_thread_helper)
++ pushl $0 # fake return address for unwinder
++ CFI_STARTPROC
++ movl %edx,%eax
++ push %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ call *%ebx
++ push %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(kernel_thread_helper)
++
++ENTRY(fixup_4gb_segment)
++ RING0_EC_FRAME
++ pushl $do_fixup_4gb_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++.section .rodata,"a"
++.align 4
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/entry.S
+--- a/arch/i386/kernel/entry.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/entry.S Wed Aug 08 16:25:28 2007 -0300
+@@ -284,7 +284,7 @@ ENTRY(sysenter_entry)
+ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
+- movl TSS_sysenter_esp0(%esp),%esp
++ movl SYSENTER_stack_esp0(%esp),%esp
+ sysenter_past_esp:
+ /*
+ * No need to follow this irqs on/off section: the syscall
+@@ -727,7 +727,7 @@ device_not_available_emulate:
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+- * "TSS_sysenter_esp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+@@ -739,7 +739,7 @@ device_not_available_emulate:
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+ label: \
+- movl TSS_sysenter_esp0+offset(%esp),%esp; \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
+ CFI_DEF_CFA esp, 0; \
+ CFI_UNDEFINED eip; \
+ pushfl; \
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/fixup.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/fixup.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,95 @@
++/******************************************************************************
++ * fixup.c
++ *
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ *
++ * **** NOTE ****
++ * Issues with the binary rewriting have caused it to be removed. Instead
++ * we rely on Xen's emulator to boot the kernel, and then print a banner
++ * message recommending that the user disables /lib/tls.
++ *
++ * Copyright (c) 2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++#if 0
++ static unsigned long printed = 0;
++ char info[100];
++ int i;
++
++ /* Ignore statically-linked init. */
++ if (current->tgid == 1)
++ return;
++
++ HYPERVISOR_vm_assist(
++ VMASST_CMD_disable, VMASST_TYPE_4gb_segments_notify);
++
++ if (test_and_set_bit(0, &printed))
++ return;
++
++ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++ DP("");
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("** WARNING: Currently emulating unsupported memory accesses **");
++ DP("** in /lib/tls glibc libraries. The emulation is **");
++ DP("** slow. To ensure full performance you should **");
++ DP("** install a 'xen-friendly' (nosegneg) version of **");
++ DP("** the library, or disable tls support by executing **");
++ DP("** the following as root: **");
++ DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** Offending process: %-38.38s **", info);
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("");
++
++ for (i = 5; i > 0; i--) {
++ touch_softlockup_watchdog();
++ printk("Pausing... %d", i);
++ mdelay(1000);
++ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++ }
++
++ printk("Continuing...\n\n");
++#else
++ if (printk_ratelimit())
++ printk(KERN_WARNING
++ "4gb seg fixup, process %s (pid %d), cs:ip %02x:%08lx\n",
++ current->comm, current->tgid, regs->xcs, regs->eip);
++#endif
++}
++
++static int __init fixup_init(void)
++{
++ HYPERVISOR_vm_assist(
++ VMASST_CMD_enable, VMASST_TYPE_4gb_segments_notify);
++ return 0;
++}
++__initcall(fixup_init);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/head-xen.S
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/head-xen.S Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,292 @@
++
++
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/boot.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86 new_cpu_data+CPUINFO_x86
++#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL new_cpu_data+CPUINFO_x86_model
++#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
++
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
++
++#ifdef CONFIG_PARAVIRT
++ movl %cs, %eax
++ testl $0x3, %eax
++ jnz startup_paravirt
++#endif
++
++ movl %esi,xen_start_info
++ cld
++
++ call setup_pda
++
++ /* Set up the stack pointer */
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* get vendor info */
++ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
++ XEN_CPUID
++ movl %eax,X86_CPUID # save CPUID level
++ movl %ebx,X86_VENDOR_ID # lo 4 chars
++ movl %edx,X86_VENDOR_ID+4 # next 4 chars
++ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
++
++ movl $1,%eax # Use the CPUID instruction to get CPU type
++ XEN_CPUID
++ movb %al,%cl # save reg for future use
++ andb $0x0f,%ah # mask processor family
++ movb %ah,X86
++ andb $0xf0,%al # mask model
++ shrb $4,%al
++ movb %al,X86_MODEL
++ andb $0x0f,%cl # mask mask revision
++ movb %cl,X86_MASK
++ movl %edx,X86_CAPABILITY
++
++ movb $1,X86_HARD_MATH
++
++ xorl %eax,%eax # Clear FS and LDT
++ movl %eax,%fs
++
++ movl $(__KERNEL_PDA),%eax
++ mov %eax,%gs
++
++ cld # gcc2 wants the direction flag cleared at all times
++
++ pushl %eax # fake return address
++ jmp start_kernel
++
++/*
++ * Point the GDT at this CPU's PDA. This will be
++ * cpu_gdt_table and boot_pda.
++ */
++setup_pda:
++ /* get the PDA pointer */
++ movl $boot_pda, %eax
++
++ /* slot the PDA address into the GDT */
++ mov $cpu_gdt_table, %ecx
++ mov %ax, (__KERNEL_PDA+0+2)(%ecx) /* base & 0x0000ffff */
++ shr $16, %eax
++ mov %al, (__KERNEL_PDA+4+0)(%ecx) /* base & 0x00ff0000 */
++ mov %ah, (__KERNEL_PDA+4+3)(%ecx) /* base & 0xff000000 */
++
++ # %esi still points to start_info, and no registers
++ # need to be preserved.
++
++ movl XEN_START_mfn_list(%esi), %ebx
++ movl $(cpu_gdt_table - __PAGE_OFFSET), %eax
++ shrl $PAGE_SHIFT, %eax
++ movl (%ebx,%eax,4), %ecx
++ pushl %ecx # frame number for set_gdt below
++
++ xorl %esi, %esi
++ xorl %edx, %edx
++ shldl $PAGE_SHIFT, %ecx, %edx
++ shll $PAGE_SHIFT, %ecx
++ orl $0x61, %ecx
++ movl $cpu_gdt_table, %ebx
++ movl $__HYPERVISOR_update_va_mapping, %eax
++ int $0x82
++
++ movl $(PAGE_SIZE_asm / 8), %ecx
++ movl %esp, %ebx
++ movl $__HYPERVISOR_set_gdt, %eax
++ int $0x82
++
++ popl %ecx
++ ret
++
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++ CFI_STARTPROC
++.skip 0x1000
++ CFI_ENDPROC
++
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
++ * BSS section
++ */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++/*
++ * This starts the data section.
++ */
++.data
++ENTRY(start_pda)
++ .long boot_pda
++
++#ifdef CONFIG_PARAVIRT
++startup_paravirt:
++ cld
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* We take pains to preserve all the regs. */
++ pushl %edx
++ pushl %ecx
++ pushl %eax
++
++ /* paravirt.o is last in link, and that probe fn never returns */
++ pushl $__start_paravirtprobe
++1:
++ movl 0(%esp), %eax
++ pushl (%eax)
++ movl 8(%esp), %eax
++ call *(%esp)
++ popl %eax
++
++ movl 4(%esp), %eax
++ movl 8(%esp), %ecx
++ movl 12(%esp), %edx
++
++ addl $4, (%esp)
++ jmp 1b
++#endif
++
++/*
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
++ */
++ .section .data.page_aligned, "aw"
++ .align PAGE_SIZE_asm
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x0000000000000000 /* 0x90 32-bit code */
++ .quad 0x0000000000000000 /* 0x98 16-bit code */
++ .quad 0x0000000000000000 /* 0xa0 16-bit data */
++ .quad 0x0000000000000000 /* 0xa8 16-bit data */
++ .quad 0x0000000000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x0000000000000000 /* 0xb8 APM CS code */
++ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0000000000000000 /* 0xc8 APM DS data */
++
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX SS */
++ .quad 0x00cf92000000ffff /* 0xd8 - PDA */
++ .quad 0x0000000000000000 /* 0xe0 - unused */
++ .quad 0x0000000000000000 /* 0xe8 - unused */
++ .quad 0x0000000000000000 /* 0xf0 - unused */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++ .align PAGE_SIZE_asm
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",VIRT_ENTRY=0x"
++ utoa (__PAGE_OFFSET + LOAD_PHYSICAL_ADDR + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoa ((LOAD_PHYSICAL_ADDR+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|pae_pgdir_above_4gb"
++ .ascii "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++ .ascii ",PAE=yes[extended-cr3]"
++#else
++ .ascii ",PAE=no"
++#endif
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/init_task.c
+--- a/arch/i386/kernel/init_task.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/init_task.c Wed Aug 08 16:25:28 2007 -0300
+@@ -14,7 +14,14 @@ static struct files_struct init_files =
+ static struct files_struct init_files = INIT_FILES;
+ static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#ifdef CONFIG_XEN
++#define swapper_pg_dir ((pgd_t *)NULL)
+ struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++#else
++struct mm_struct init_mm = INIT_MM(init_mm);
++#endif
+
+ EXPORT_SYMBOL(init_mm);
+
+@@ -38,9 +45,11 @@ struct task_struct init_task = INIT_TASK
+
+ EXPORT_SYMBOL(init_task);
+
++#ifndef CONFIG_X86_NO_TSS
+ /*
+ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
+ * no more per-task TSS's.
+ */
+ DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
++#endif
+
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/io_apic-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/io_apic-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,2973 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/pci.h>
++#include <linux/msi.h>
++#include <linux/htirq.h>
++#include <linux/freezer.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++#include <asm/msidef.h>
++#include <asm/hypertransport.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
++
++/*
++ * Is the SiS APIC rmw bug present ?
++ * -1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++static int disable_timer_pin_1 __initdata;
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++#ifndef CONFIG_XEN
++struct io_apic {
++ unsigned int index;
++ unsigned int unused[3];
++ unsigned int data;
++};
++
++static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
++{
++ return (void __iomem *) __fix_to_virt(FIX_IO_APIC_BASE_0 + idx)
++ + (mp_ioapics[idx].mpc_apicaddr & ~PAGE_MASK);
++}
++
++static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ return readl(&io_apic->data);
++}
++
++static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct io_apic __iomem *io_apic = io_apic_base(apic);
++ writel(reg, &io_apic->index);
++ writel(value, &io_apic->data);
++}
++
++/*
++ * Re-write a value: to be used for read-modify-write
++ * cycles where the read already set up the index register.
++ *
++ * Older SiS APIC requires we rewrite the index register
++ */
++static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ volatile struct io_apic *io_apic = io_apic_base(apic);
++ if (sis_apic_bug)
++ writel(reg, &io_apic->index);
++ writel(value, &io_apic->data);
++}
++#endif /* !CONFIG_XEN */
++
++union entry_union {
++ struct { u32 w1, w2; };
++ struct IO_APIC_route_entry entry;
++};
++
++static struct IO_APIC_route_entry ioapic_read_entry(int apic, int pin)
++{
++ union entry_union eu;
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ eu.w1 = io_apic_read(apic, 0x10 + 2 * pin);
++ eu.w2 = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ return eu.entry;
++}
++
++/*
++ * When we write a new IO APIC routing entry, we need to write the high
++ * word first! If the mask bit in the low word is clear, we will enable
++ * the interrupt, and we need to make sure the entry is fully populated
++ * before that happens.
++ */
++static void
++__ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ union entry_union eu;
++ eu.entry = e;
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++}
++
++static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(apic, pin, e);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++/*
++ * When we mask an IO APIC routing entry, we need to write the low
++ * word first, in order to set the mask bit before we change the
++ * high bits!
++ */
++
++#ifndef CONFIG_XEN
++static void ioapic_mask_entry(int apic, int pin)
++{
++ unsigned long flags;
++ union entry_union eu = { .entry.mask = 1 };
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2*pin, eu.w1);
++ io_apic_write(apic, 0x11 + 2*pin, eu.w2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: whoops");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
++#else
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++ int oldapic, int oldpin,
++ int newapic, int newpin)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (1) {
++ if (entry->apic == oldapic && entry->pin == oldpin) {
++ entry->apic = newapic;
++ entry->pin = newpin;
++ }
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int pin, reg;
++
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ reg = io_apic_read(entry->apic, 0x10 + pin*2);
++ reg &= ~disable;
++ reg |= enable;
++ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ entry = ioapic_read_entry(apic, pin);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ ioapic_mask_entry(apic, pin);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++ unsigned long flags;
++ int pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int apicid_value;
++ cpumask_t tmp;
++
++ cpus_and(tmp, cpumask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++ apicid_value = cpu_mask_to_apicid(cpumask);
++ /* Prepare to do the io_apic_write */
++ apicid_value = apicid_value << 24;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ set_native_irq_info(irq, cpumask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h> /* kernel_thread() */
++# include <linux/kernel_stat.h> /* kstat */
++# include <linux/slab.h> /* kmalloc() */
++# include <linux/timer.h> /* time_after() */
++
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++# define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++# define TDprintk(x...)
++# define Dprintk(x...)
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
++#define BALANCED_IRQ_MORE_DELTA (HZ/10)
++#define BALANCED_IRQ_LESS_DELTA (HZ)
++
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
++
++static struct irq_cpu_info {
++ unsigned long * last_irq;
++ unsigned long * irq_delta;
++ unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++ [0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
++
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ balance_irq_affinity[irq] = mask;
++}
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++ unsigned long now, int direction)
++{
++ int search_idle = 1;
++ int cpu = curr_cpu;
++
++ goto inside;
++
++ do {
++ if (unlikely(cpu == curr_cpu))
++ search_idle = 0;
++inside:
++ if (direction == 1) {
++ cpu++;
++ if (cpu >= NR_CPUS)
++ cpu = 0;
++ } else {
++ cpu--;
++ if (cpu == -1)
++ cpu = NR_CPUS-1;
++ }
++ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++ (search_idle && !IDLE_ENOUGH(cpu,now)));
++
++ return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++ unsigned long now = jiffies;
++ cpumask_t allowed_mask;
++ unsigned int new_cpu;
++
++ if (irqbalance_disabled)
++ return;
++
++ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++ new_cpu = move(cpu, allowed_mask, now, 1);
++ if (cpu != new_cpu) {
++ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++ }
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++ int i, j;
++ Dprintk("Rotating IRQs among CPUs.\n");
++ for_each_online_cpu(i) {
++ for (j = 0; j < NR_IRQS; j++) {
++ if (!irq_desc[j].action)
++ continue;
++ /* Is it a significant load ? */
++ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++ useful_load_threshold)
++ continue;
++ balance_irq(i, j);
++ }
++ }
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++}
++
++static void do_irq_balance(void)
++{
++ int i, j;
++ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++ unsigned long move_this_load = 0;
++ int max_loaded = 0, min_loaded = 0;
++ int load;
++ unsigned long useful_load_threshold = balanced_irq_interval + 10;
++ int selected_irq;
++ int tmp_loaded, first_attempt = 1;
++ unsigned long tmp_cpu_irq;
++ unsigned long imbalance = 0;
++ cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++ for_each_possible_cpu(i) {
++ int package_index;
++ CPU_IRQ(i) = 0;
++ if (!cpu_online(i))
++ continue;
++ package_index = CPU_TO_PACKAGEINDEX(i);
++ for (j = 0; j < NR_IRQS; j++) {
++ unsigned long value_now, delta;
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if ( package_index == i )
++ IRQ_DELTA(package_index,j) = 0;
++ /* Determine the total count per processor per IRQ */
++ value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++ /* Determine the activity per processor per IRQ */
++ delta = value_now - LAST_CPU_IRQ(i,j);
++
++ /* Update last_cpu_irq[][] for the next time */
++ LAST_CPU_IRQ(i,j) = value_now;
++
++ /* Ignore IRQs whose rate is less than the clock */
++ if (delta < useful_load_threshold)
++ continue;
++ /* update the load for the processor or package total */
++ IRQ_DELTA(package_index,j) += delta;
++
++ /* Keep track of the higher numbered sibling as well */
++ if (i != package_index)
++ CPU_IRQ(i) += delta;
++ /*
++ * We have sibling A and sibling B in the package
++ *
++ * cpu_irq[A] = load for cpu A + load for cpu B
++ * cpu_irq[B] = load for cpu B
++ */
++ CPU_IRQ(package_index) += delta;
++ }
++ }
++ /* Find the least loaded processor package */
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (min_cpu_irq > CPU_IRQ(i)) {
++ min_cpu_irq = CPU_IRQ(i);
++ min_loaded = i;
++ }
++ }
++ max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++ /* Look for heaviest loaded processor.
++ * We may come back to get the next heaviest loaded processor.
++ * Skip processors with trivial loads.
++ */
++ tmp_cpu_irq = 0;
++ tmp_loaded = -1;
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (max_cpu_irq <= CPU_IRQ(i))
++ continue;
++ if (tmp_cpu_irq < CPU_IRQ(i)) {
++ tmp_cpu_irq = CPU_IRQ(i);
++ tmp_loaded = i;
++ }
++ }
++
++ if (tmp_loaded == -1) {
++ /* In the case of small number of heavy interrupt sources,
++ * loading some of the cpus too much. We use Ingo's original
++ * approach to rotate them around.
++ */
++ if (!first_attempt && imbalance >= useful_load_threshold) {
++ rotate_irqs_among_cpus(useful_load_threshold);
++ return;
++ }
++ goto not_worth_the_effort;
++ }
++
++ first_attempt = 0; /* heaviest search */
++ max_cpu_irq = tmp_cpu_irq; /* load */
++ max_loaded = tmp_loaded; /* processor */
++ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++
++ Dprintk("max_loaded cpu = %d\n", max_loaded);
++ Dprintk("min_loaded cpu = %d\n", min_loaded);
++ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++ Dprintk("load imbalance = %lu\n", imbalance);
++
++ /* if imbalance is less than approx 10% of max load, then
++ * observe diminishing returns action. - quit
++ */
++ if (imbalance < (max_cpu_irq >> 3)) {
++ Dprintk("Imbalance too trivial\n");
++ goto not_worth_the_effort;
++ }
++
++tryanotherirq:
++ /* if we select an IRQ to move that can't go where we want, then
++ * see if there is another one to try.
++ */
++ move_this_load = 0;
++ selected_irq = -1;
++ for (j = 0; j < NR_IRQS; j++) {
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if (imbalance <= IRQ_DELTA(max_loaded,j))
++ continue;
++ /* Try to find the IRQ that is closest to the imbalance
++ * without going over.
++ */
++ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++ move_this_load = IRQ_DELTA(max_loaded,j);
++ selected_irq = j;
++ }
++ }
++ if (selected_irq == -1) {
++ goto tryanothercpu;
++ }
++
++ imbalance = move_this_load;
++
++ /* For physical_balance case, we accumlated both load
++ * values in the one of the siblings cpu_irq[],
++ * to use the same code for physical and logical processors
++ * as much as possible.
++ *
++ * NOTE: the cpu_irq[] array holds the sum of the load for
++ * sibling A and sibling B in the slot for the lowest numbered
++ * sibling (A), _AND_ the load for sibling B in the slot for
++ * the higher numbered sibling.
++ *
++ * We seek the least loaded sibling by making the comparison
++ * (A+B)/2 vs B
++ */
++ load = CPU_IRQ(min_loaded) >> 1;
++ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++ if (load > CPU_IRQ(j)) {
++ /* This won't change cpu_sibling_map[min_loaded] */
++ load = CPU_IRQ(j);
++ min_loaded = j;
++ }
++ }
++
++ cpus_and(allowed_mask,
++ cpu_online_map,
++ balance_irq_affinity[selected_irq]);
++ target_cpu_mask = cpumask_of_cpu(min_loaded);
++ cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++ if (!cpus_empty(tmp)) {
++
++ Dprintk("irq = %d moved to cpu = %d\n",
++ selected_irq, min_loaded);
++ /* mark for change destination */
++ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
++
++ /* Since we made a change, come back sooner to
++ * check for more variation.
++ */
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++ }
++ goto tryanotherirq;
++
++not_worth_the_effort:
++ /*
++ * if we did not find an IRQ to move, then adjust the time interval
++ * upward
++ */
++ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
++ Dprintk("IRQ worth rotating not found\n");
++ return;
++}
++
++static int balanced_irq(void *unused)
++{
++ int i;
++ unsigned long prev_balance_time = jiffies;
++ long time_remaining = balanced_irq_interval;
++
++ daemonize("kirqd");
++
++ /* push everything to CPU 0 to give us a starting point. */
++ for (i = 0 ; i < NR_IRQS ; i++) {
++ irq_desc[i].pending_mask = cpumask_of_cpu(0);
++ set_pending_irq(i, cpumask_of_cpu(0));
++ }
++
++ for ( ; ; ) {
++ time_remaining = schedule_timeout_interruptible(time_remaining);
++ try_to_freeze();
++ if (time_after(jiffies,
++ prev_balance_time+balanced_irq_interval)) {
++ preempt_disable();
++ do_irq_balance();
++ prev_balance_time = jiffies;
++ time_remaining = balanced_irq_interval;
++ preempt_enable();
++ }
++ }
++ return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++ int i;
++ struct cpuinfo_x86 *c;
++ cpumask_t tmp;
++
++ cpus_shift_right(tmp, cpu_online_map, 2);
++ c = &boot_cpu_data;
++ /* When not overwritten by the command line ask subarchitecture. */
++ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++ irqbalance_disabled = NO_BALANCE_IRQ;
++ if (irqbalance_disabled)
++ return 0;
++
++ /* disable irqbalance completely if there is only one processor online */
++ if (num_online_cpus() < 2) {
++ irqbalance_disabled = 1;
++ return 0;
++ }
++ /*
++ * Enable physical balance only if more than 1 physical processor
++ * is present
++ */
++ if (smp_num_siblings > 1 && !cpus_empty(tmp))
++ physical_balance = 1;
++
++ for_each_online_cpu(i) {
++ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++ printk(KERN_ERR "balanced_irq_init: out of memory");
++ goto failed;
++ }
++ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++ }
++
++ printk(KERN_INFO "Starting balanced_irq\n");
++ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ return 0;
++ else
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++ for_each_possible_cpu(i) {
++ kfree(irq_cpu_data[i].irq_delta);
++ irq_cpu_data[i].irq_delta = NULL;
++ kfree(irq_cpu_data[i].last_irq);
++ irq_cpu_data[i].last_irq = NULL;
++ }
++ return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++ irqbalance_disabled = 1;
++ return 1;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif /* !CONFIG_XEN */
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ unsigned int cfg;
++
++ /*
++ * Wait for idle.
++ */
++ apic_wait_icr_idle();
++ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++ /*
++ * Send the IPI. The write to APIC_ICR fires this off.
++ */
++ apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++ "slot:%d, pin:%d.\n", bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ return best_guess;
++}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif /* !CONFIG_XEN */
++#endif
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++
++ /*
++ * For MPS mode, so far only needed by ES7000 platform
++ */
++ if (ioapic_renumber_irq)
++ irq = ioapic_renumber_irq(apic, irq);
++
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++static int __assign_irq_vector(int irq)
++{
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
++
++ if (irq_vector[irq] > 0)
++ return irq_vector[irq];
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
++ return -ENOSPC;
++
++ vector = irq_op.vector;
++ irq_vector[irq] = vector;
++
++ return vector;
++}
++
++static int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++
++ spin_lock_irqsave(&vector_lock, flags);
++ vector = __assign_irq_vector(irq);
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++#ifndef CONFIG_XEN
++static struct irq_chip ioapic_chip;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_fasteoi_irq, "fasteoi");
++ else {
++ irq_desc[irq].status |= IRQ_DELAYED_DISABLE;
++ set_irq_chip_and_handler_name(irq, &ioapic_chip,
++ handle_edge_irq, "edge");
++ }
++ set_intr_gate(vector, interrupt[irq]);
++}
++#else
++#define ioapic_register_intr(_irq,_vector,_trigger) ((void)0)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest =
++ cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ " IO-APIC (apicid-pin) %d-%d",
++ mp_ioapics[apic].mpc_apicid,
++ pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d",
++ mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ /*
++ * skip adding the timer int on secondary nodes, which causes
++ * a small but painful rift in the time-space continuum
++ */
++ if (multi_timer_check(apic, irq))
++ continue;
++ else
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(apic, pin, entry);
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_chip;
++ set_irq_handler(0, handle_edge_irq);
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ ioapic_write_entry(apic, pin, entry);
++
++ enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ union IO_APIC_reg_03 reg_03;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ if (reg_01.bits.version >= 0x20)
++ reg_03.raw = io_apic_read(apic, 3);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
++ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
++ if (reg_00.bits.ID >= get_physical_broadcast())
++ UNEXPECTED_IO_APIC();
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++ * but the value of reg_02 is read as the previous read register
++ * value, so ignore it if reg_02 == reg_01.
++ */
++ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++ * or reg_03, but the value of reg_0[23] is read as the previous read
++ * register value, so ignore it if reg_03 == reg_0[12].
++ */
++ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++ reg_03.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
++ if (reg_03.bits.__reserved_1)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ entry = ioapic_read_entry(apic, i);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++#if 0
++
++static void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++ }
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
++ apic_write(APIC_ESR, 0);
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++
++#endif /* 0 */
++
++#else
++void __init print_IO_APIC(void) { }
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++ int i8259_apic, i8259_pin;
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ entry = ioapic_read_entry(apic, pin);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ /* If we could not find the appropriate pin by looking at the ioapic
++ * the i8259 probably is not connected the ioapic but give the
++ * mptable a chance anyway.
++ */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ ioapic_write_entry(ioapic_i8259.apic, ioapic_i8259.pin, entry);
++ }
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++ union IO_APIC_reg_00 reg_00;
++ physid_mask_t phys_id_present_map;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Don't check I/O APIC IDs for xAPIC systems. They have
++ * no meaning without the serial APIC bus.
++ */
++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ return;
++ /*
++ * This is broken; anything with a real cpu count has to
++ * circumvent this idiocy regardless.
++ */
++ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ reg_00.bits.ID);
++ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++ }
++
++ /*
++ * Sanity check, is the ID really free? Every APIC in a
++ * system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(phys_id_present_map,
++ mp_ioapics[apic].mpc_apicid)) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ for (i = 0; i < get_physical_broadcast(); i++)
++ if (!physid_isset(i, phys_id_present_map))
++ break;
++ if (i >= get_physical_broadcast())
++ panic("Max APIC ID exceeded!\n");
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ i);
++ physid_set(i, phys_id_present_map);
++ mp_ioapics[apic].mpc_apicid = i;
++ } else {
++ physid_mask_t tmp;
++ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++ apic_printk(APIC_VERBOSE, "Setting %d in the "
++ "phys_id_present_map\n",
++ mp_ioapics[apic].mpc_apicid);
++ physids_or(phys_id_present_map, phys_id_present_map, tmp);
++ }
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE, " ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++static int no_timer_check __initdata;
++
++static int __init notimercheck(char *s)
++{
++ no_timer_check = 1;
++ return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ if (no_timer_check)
++ return 1;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++ if (jiffies - t1 > 4)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Startup quirk:
++ *
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ *
++ * (We do this for level-triggered IRQs too - it cannot hurt.)
++ */
++static unsigned int startup_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++static void ack_ioapic_irq(unsigned int irq)
++{
++ move_native_irq(irq);
++ ack_APIC_irq();
++}
++
++static void ack_ioapic_quirk_irq(unsigned int irq)
++{
++ unsigned long v;
++ int i;
++
++ move_native_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets). Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless. As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source. The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually. We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt. We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul. --macro
++ */
++ i = irq_vector[irq];
++
++ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++ ack_APIC_irq();
++
++ if (!(v & (1 << (i & 0x1f)))) {
++ atomic_inc(&irq_mis_count);
++ spin_lock(&ioapic_lock);
++ __mask_and_edge_IO_APIC_irq(irq);
++ __unmask_and_level_IO_APIC_irq(irq);
++ spin_unlock(&ioapic_lock);
++ }
++}
++
++static int ioapic_retrigger_irq(unsigned int irq)
++{
++ send_IPI_self(irq_vector[irq]);
++
++ return 1;
++}
++
++static struct irq_chip ioapic_chip __read_mostly = {
++ .name = "IO-APIC",
++ .startup = startup_ioapic_irq,
++ .mask = mask_IO_APIC_irq,
++ .unmask = unmask_IO_APIC_irq,
++ .ack = ack_ioapic_irq,
++ .eoi = ack_ioapic_quirk_irq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity_irq,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (IO_APIC_IRQ(tmp) && !irq_vector[tmp]) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_chip;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++/*
++ * The local APIC irq-chip implementation:
++ */
++
++static void ack_apic(unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void mask_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void unmask_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static struct irq_chip lapic_chip __read_mostly = {
++ .name = "local-APIC-edge",
++ .mask = mask_lapic_irq,
++ .unmask = unmask_lapic_irq,
++ .eoi = ack_apic,
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++ apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ if (pin == -1) {
++ WARN_ON_ONCE(1);
++ return;
++ }
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (apic == -1) {
++ WARN_ON_ONCE(1);
++ return;
++ }
++
++ entry0 = ioapic_read_entry(apic, pin);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ ioapic_write_entry(apic, pin, entry1);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ ioapic_write_entry(apic, pin, entry0);
++}
++#endif /* !CONFIG_XEN */
++
++int timer_uses_ioapic_pin_0;
++
++#ifndef CONFIG_XEN
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void __init check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ timer_ack = 1;
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (timer_irq_works()) {
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++ "IO-APIC\n");
++ }
++
++ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++ if (pin2 != -1) {
++ printk("\n..... (found pin %d) ...", pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ printk("works.\n");
++ if (pin1 != -1)
++ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++ else
++ add_pin_to_irq(0, apic2, pin2);
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ printk(" failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ set_irq_chip_and_handler_name(0, &lapic_chip, handle_fasteoi_irq,
++ "fasteio");
++ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ printk(" failed.\n");
++
++ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ timer_ack = 0;
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ printk(" failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
++ "report. Then try booting with the 'noapic' option");
++}
++#else
++#define check_timer() ((void)0)
++#endif /* CONFIG_XEN */
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ printk("ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up IO-APIC IRQ routing.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++
++/*
++ * Called after all the initialization is done. If we didnt find any
++ * APIC bugs then we can allow the modify fast path
++ */
++
++static int __init io_apic_bug_finalize(void)
++{
++ if(sis_apic_bug == -1)
++ sis_apic_bug = 0;
++ if (is_initial_xendomain()) {
++ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
++ op.u.platform_quirk.quirk_id = sis_apic_bug ?
++ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++ HYPERVISOR_platform_op(&op);
++ }
++ return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++ entry[i] = ioapic_read_entry(dev->id, i);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++)
++ ioapic_write_entry(dev->id, i, entry[i]);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/*
++ * Dynamic irq allocate and deallocation
++ */
++int create_irq(void)
++{
++ /* Allocate an unused irq */
++ int irq, new, vector = 0;
++ unsigned long flags;
++
++ irq = -ENOSPC;
++ spin_lock_irqsave(&vector_lock, flags);
++ for (new = (NR_IRQS - 1); new >= 0; new--) {
++ if (platform_legacy_irq(new))
++ continue;
++ if (irq_vector[new] != 0)
++ continue;
++ vector = __assign_irq_vector(new);
++ if (likely(vector > 0))
++ irq = new;
++ break;
++ }
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ if (irq >= 0) {
++#ifndef CONFIG_XEN
++ set_intr_gate(vector, interrupt[irq]);
++#endif
++ dynamic_irq_init(irq);
++ }
++ return irq;
++}
++
++void destroy_irq(unsigned int irq)
++{
++ unsigned long flags;
++
++ dynamic_irq_cleanup(irq);
++
++ spin_lock_irqsave(&vector_lock, flags);
++ irq_vector[irq] = 0;
++ spin_unlock_irqrestore(&vector_lock, flags);
++}
++
++/*
++ * MSI mesage composition
++ */
++#ifdef CONFIG_PCI_MSI
++static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
++{
++ int vector;
++ unsigned dest;
++
++ vector = assign_irq_vector(irq);
++ if (vector >= 0) {
++ dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++ msg->address_hi = MSI_ADDR_BASE_HI;
++ msg->address_lo =
++ MSI_ADDR_BASE_LO |
++ ((INT_DEST_MODE == 0) ?
++ MSI_ADDR_DEST_MODE_PHYSICAL:
++ MSI_ADDR_DEST_MODE_LOGICAL) |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_ADDR_REDIRECTION_CPU:
++ MSI_ADDR_REDIRECTION_LOWPRI) |
++ MSI_ADDR_DEST_ID(dest);
++
++ msg->data =
++ MSI_DATA_TRIGGER_EDGE |
++ MSI_DATA_LEVEL_ASSERT |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ MSI_DATA_DELIVERY_FIXED:
++ MSI_DATA_DELIVERY_LOWPRI) |
++ MSI_DATA_VECTOR(vector);
++ }
++ return vector;
++}
++
++#ifdef CONFIG_SMP
++static void set_msi_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ struct msi_msg msg;
++ unsigned int dest;
++ cpumask_t tmp;
++ int vector;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ vector = assign_irq_vector(irq);
++ if (vector < 0)
++ return;
++
++ dest = cpu_mask_to_apicid(mask);
++
++ read_msi_msg(irq, &msg);
++
++ msg.data &= ~MSI_DATA_VECTOR_MASK;
++ msg.data |= MSI_DATA_VECTOR(vector);
++ msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
++ msg.address_lo |= MSI_ADDR_DEST_ID(dest);
++
++ write_msi_msg(irq, &msg);
++ set_native_irq_info(irq, mask);
++}
++#endif /* CONFIG_SMP */
++
++/*
++ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
++ * which implement the MSI or MSI-X Capability Structure.
++ */
++static struct irq_chip msi_chip = {
++ .name = "PCI-MSI",
++ .unmask = unmask_msi_irq,
++ .mask = mask_msi_irq,
++ .ack = ack_ioapic_irq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_msi_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_msi_irq(unsigned int irq, struct pci_dev *dev)
++{
++ struct msi_msg msg;
++ int ret;
++ ret = msi_compose_msg(dev, irq, &msg);
++ if (ret < 0)
++ return ret;
++
++ write_msi_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &msi_chip, handle_edge_irq,
++ "edge");
++
++ return 0;
++}
++
++void arch_teardown_msi_irq(unsigned int irq)
++{
++ return;
++}
++
++#endif /* CONFIG_PCI_MSI */
++
++/*
++ * Hypertransport interrupt support
++ */
++#ifdef CONFIG_HT_IRQ
++
++#ifdef CONFIG_SMP
++
++static void target_ht_irq(unsigned int irq, unsigned int dest)
++{
++ struct ht_irq_msg msg;
++ fetch_ht_irq_msg(irq, &msg);
++
++ msg.address_lo &= ~(HT_IRQ_LOW_DEST_ID_MASK);
++ msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
++
++ msg.address_lo |= HT_IRQ_LOW_DEST_ID(dest);
++ msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
++
++ write_ht_irq_msg(irq, &msg);
++}
++
++static void set_ht_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ unsigned int dest;
++ cpumask_t tmp;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ dest = cpu_mask_to_apicid(mask);
++
++ target_ht_irq(irq, dest);
++ set_native_irq_info(irq, mask);
++}
++#endif
++
++static struct irq_chip ht_irq_chip = {
++ .name = "PCI-HT",
++ .mask = mask_ht_irq,
++ .unmask = unmask_ht_irq,
++ .ack = ack_ioapic_irq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ht_irq_affinity,
++#endif
++ .retrigger = ioapic_retrigger_irq,
++};
++
++int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
++{
++ int vector;
++
++ vector = assign_irq_vector(irq);
++ if (vector >= 0) {
++ struct ht_irq_msg msg;
++ unsigned dest;
++ cpumask_t tmp;
++
++ cpus_clear(tmp);
++ cpu_set(vector >> 8, tmp);
++ dest = cpu_mask_to_apicid(tmp);
++
++ msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
++
++ msg.address_lo =
++ HT_IRQ_LOW_BASE |
++ HT_IRQ_LOW_DEST_ID(dest) |
++ HT_IRQ_LOW_VECTOR(vector) |
++ ((INT_DEST_MODE == 0) ?
++ HT_IRQ_LOW_DM_PHYSICAL :
++ HT_IRQ_LOW_DM_LOGICAL) |
++ HT_IRQ_LOW_RQEOI_EDGE |
++ ((INT_DELIVERY_MODE != dest_LowestPrio) ?
++ HT_IRQ_LOW_MT_FIXED :
++ HT_IRQ_LOW_MT_ARBITRATED) |
++ HT_IRQ_LOW_IRQ_MASKED;
++
++ write_ht_irq_msg(irq, &msg);
++
++ set_irq_chip_and_handler_name(irq, &ht_irq_chip,
++ handle_edge_irq, "edge");
++ }
++ return vector;
++}
++#endif /* CONFIG_HT_IRQ */
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++ union IO_APIC_reg_00 reg_00;
++ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++ physid_mask_t tmp;
++ unsigned long flags;
++ int i = 0;
++
++ /*
++ * The P4 platform supports up to 256 APIC IDs on two separate APIC
++ * buses (one for LAPICs, one for IOAPICs), where predecessors only
++ * supports up to 16 on one shared APIC bus.
++ *
++ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++ * advantage of new APIC bus architecture.
++ */
++
++ if (physids_empty(apic_id_map))
++ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ if (apic_id >= get_physical_broadcast()) {
++ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++ "%d\n", ioapic, apic_id, reg_00.bits.ID);
++ apic_id = reg_00.bits.ID;
++ }
++
++ /*
++ * Every APIC in a system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(apic_id_map, apic_id)) {
++
++ for (i = 0; i < get_physical_broadcast(); i++) {
++ if (!check_apicid_used(apic_id_map, i))
++ break;
++ }
++
++ if (i == get_physical_broadcast())
++ panic("Max apic_id exceeded!\n");
++
++ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++ "trying %d\n", ioapic, apic_id, i);
++
++ apic_id = i;
++ }
++
++ tmp = apicid_to_cpu_present(apic_id);
++ physids_or(apic_id_map, apic_id_map, tmp);
++
++ if (reg_00.bits.ID != apic_id) {
++ reg_00.bits.ID = apic_id;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0, reg_00.raw);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /* Sanity check */
++ if (reg_00.bits.ID != apic_id) {
++ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++ return -1;
++ }
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++ return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1;
++
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __ioapic_write_entry(ioapic, pin, entry);
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
++
++static int __init parse_disable_timer_pin_1(char *arg)
++{
++ disable_timer_pin_1 = 1;
++ return 0;
++}
++early_param("disable_timer_pin_1", parse_disable_timer_pin_1);
++
++static int __init parse_enable_timer_pin_1(char *arg)
++{
++ disable_timer_pin_1 = -1;
++ return 0;
++}
++early_param("enable_timer_pin_1", parse_enable_timer_pin_1);
++
++static int __init parse_noapic(char *arg)
++{
++ /* disable IO-APIC */
++ disable_ioapic_setup();
++ return 0;
++}
++early_param("noapic", parse_noapic);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/ioport-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/ioport-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,122 @@
++/*
++ * linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ unsigned long mask;
++ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++ unsigned int low_index = base & (BITS_PER_LONG-1);
++ int length = low_index + extent;
++
++ if (low_index != 0) {
++ mask = (~0UL << low_index);
++ if (length < BITS_PER_LONG)
++ mask &= ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ length -= BITS_PER_LONG;
++ }
++
++ mask = (new_value ? ~0UL : 0UL);
++ while (length >= BITS_PER_LONG) {
++ *bitmap_base++ = mask;
++ length -= BITS_PER_LONG;
++ }
++
++ if (length > 0) {
++ mask = ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ }
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = &current->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned long unused)
++{
++ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++ unsigned int level = regs->ebx;
++ struct thread_struct *t = &current->thread;
++ unsigned int old = (t->iopl >> 12) & 3;
++
++ if (level > 3)
++ return -EINVAL;
++ /* Trying to gain more privileges? */
++ if (level > old) {
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++ }
++ t->iopl = level << 12;
++ set_iopl_mask(t->iopl);
++ return 0;
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/irq.c
+--- a/arch/i386/kernel/irq.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/irq.c Wed Aug 08 16:25:28 2007 -0300
+@@ -288,7 +288,9 @@ skip:
+ }
+
+ #ifdef CONFIG_HOTPLUG_CPU
++#ifndef CONFIG_XEN
+ #include <mach_apic.h>
++#endif
+
+ void fixup_irqs(cpumask_t map)
+ {
+@@ -302,7 +304,9 @@ void fixup_irqs(cpumask_t map)
+
+ cpus_and(mask, irq_desc[irq].affinity, map);
+ if (any_online_cpu(mask) == NR_CPUS) {
++#ifndef CONFIG_XEN
+ printk("Breaking affinity for irq %i\n", irq);
++#endif
+ mask = map;
+ }
+ if (irq_desc[irq].chip->set_affinity)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/ldt-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/ldt-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,268 @@
++/*
++ * linux/arch/i386/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(&current->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ int oldsize;
++
++ if (mincount <= pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ init_MUTEX(&mm->context.sem);
++ mm->context.size = 0;
++ mm->context.has_foreign_mappings = 0;
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++
++ err = 0;
++ size = 5*sizeof(struct desc_struct);
++ if (size > bytecount)
++ size = bytecount;
++
++ err = size;
++ if (clear_user(ptr, size))
++ err = -EFAULT;
++
++ return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct mm_struct * mm = current->mm;
++ __u32 entry_1, entry_2;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= mm->context.size) {
++ error = alloc_ldt(&current->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++ entry_1, entry_2);
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/machine_kexec.c
+--- a/arch/i386/kernel/machine_kexec.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/machine_kexec.c Wed Aug 08 16:25:28 2007 -0300
+@@ -20,6 +20,10 @@
+ #include <asm/desc.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
+ #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+ static u32 kexec_pgd[1024] PAGE_ALIGNED;
+ #ifdef CONFIG_X86_PAE
+@@ -28,6 +32,40 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED
+ #endif
+ static u32 kexec_pte0[1024] PAGE_ALIGNED;
+ static u32 kexec_pte1[1024] PAGE_ALIGNED;
++
++#ifdef CONFIG_XEN
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++
++}
++
++#else /* CONFIG_XEN */
+
+ static void set_idt(void *newidt, __u16 limit)
+ {
+@@ -70,6 +108,7 @@ static void load_segments(void)
+ #undef STR
+ #undef __STR
+ }
++#endif /* !CONFIG_XEN */
+
+ /*
+ * A architecture hook called to validate the
+@@ -97,6 +136,7 @@ void machine_kexec_cleanup(struct kimage
+ {
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+@@ -147,6 +187,7 @@ NORET_TYPE void machine_kexec(struct kim
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start, cpu_has_pae);
+ }
++#endif
+
+ /* crashkernel=size@addr specifies the location to reserve for
+ * a crash kernel. By reserving this memory we guarantee
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/microcode-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/microcode-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,144 @@
++/*
++ * Intel CPU Microcode Update Driver for Linux
++ *
++ * Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ * This driver allows to upgrade microcode on Intel processors
++ * belonging to IA-32 family - PentiumPro, Pentium II,
++ * Pentium III, Xeon, Pentium 4, etc.
++ *
++ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
++ * Order Number 245472 or free download from:
++ *
++ * http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ * For more information, go to http://www.urbanmyth.org/microcode
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_LICENSE("GPL");
++
++static int verbose;
++module_param(verbose, int, 0644);
++
++#define MICROCODE_VERSION "1.14a-xen"
++
++#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
++#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
++
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (const void __user *ubuf, size_t len)
++{
++ int err;
++ void *kbuf;
++
++ kbuf = vmalloc(len);
++ if (!kbuf)
++ return -ENOMEM;
++
++ if (copy_from_user(kbuf, ubuf, len) == 0) {
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, kbuf);
++ op.u.microcode.length = len;
++ err = HYPERVISOR_platform_op(&op);
++ } else
++ err = -EFAULT;
++
++ vfree(kbuf);
++
++ return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++ ssize_t ret;
++
++ if (len < MC_HEADER_SIZE) {
++ printk(KERN_ERR "microcode: not enough data\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(&microcode_mutex);
++
++ ret = do_microcode_update(buf, len);
++ if (!ret)
++ ret = (ssize_t)len;
++
++ mutex_unlock(&microcode_mutex);
++
++ return ret;
++}
++
++static struct file_operations microcode_fops = {
++ .owner = THIS_MODULE,
++ .write = microcode_write,
++ .open = microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++ .minor = MICROCODE_MINOR,
++ .name = "microcode",
++ .fops = &microcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = misc_register(&microcode_dev);
++ if (error) {
++ printk(KERN_ERR
++ "microcode: can't misc_register on minor=%d\n",
++ MICROCODE_MINOR);
++ return error;
++ }
++
++ printk(KERN_INFO
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++ misc_deregister(&microcode_dev);
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/mpparse.c
+--- a/arch/i386/kernel/mpparse.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/mpparse.c Wed Aug 08 16:25:28 2007 -0300
+@@ -106,6 +106,7 @@ static struct mpc_config_translation *tr
+
+ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
+ {
++#ifndef CONFIG_XEN
+ int ver, apicid;
+ physid_mask_t phys_cpu;
+
+@@ -196,8 +197,9 @@ static void __cpuinit MP_processor_info
+ }
+
+ cpu_set(num_processors, cpu_possible_map);
++#endif /* CONFIG_XEN */
+ num_processors++;
+-
++#ifndef CONFIG_XEN
+ /*
+ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
+ * but we need to work other dependencies like SMP_SUSPEND etc
+@@ -218,6 +220,7 @@ static void __cpuinit MP_processor_info
+ }
+ }
+ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++#endif /* CONFIG_XEN */
+ }
+
+ static void __init MP_bus_info (struct mpc_config_bus *m)
+@@ -684,7 +687,11 @@ void __init get_smp_config (void)
+ * Read the physical hardware table. Anything here will
+ * override the defaults.
+ */
++#ifdef CONFIG_XEN
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++#else
+ if (!smp_read_mpc(phys_to_virt(mpf->mpf_physptr))) {
++#endif
+ smp_found_config = 0;
+ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
+ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
+@@ -719,7 +726,11 @@ void __init get_smp_config (void)
+
+ static int __init smp_scan_config (unsigned long base, unsigned long length)
+ {
++#ifdef CONFIG_XEN
++ unsigned long *bp = isa_bus_to_virt(base);
++#else
+ unsigned long *bp = phys_to_virt(base);
++#endif
+ struct intel_mp_floating *mpf;
+
+ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
+@@ -735,6 +746,7 @@ static int __init smp_scan_config (unsig
+ || (mpf->mpf_specification == 4)) ) {
+
+ smp_found_config = 1;
++#ifndef CONFIG_XEN
+ printk(KERN_INFO "found SMP MP-table at %08lx\n",
+ virt_to_phys(mpf));
+ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
+@@ -754,6 +766,10 @@ static int __init smp_scan_config (unsig
+ size = end - mpf->mpf_physptr;
+ reserve_bootmem(mpf->mpf_physptr, size);
+ }
++#else
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
+
+ mpf_found = mpf;
+ return 1;
+@@ -766,7 +782,9 @@ static int __init smp_scan_config (unsig
+
+ void __init find_smp_config (void)
+ {
++#ifndef CONFIG_XEN
+ unsigned int address;
++#endif
+
+ /*
+ * FIXME: Linux assumes you have 640K of base ram..
+@@ -797,9 +815,11 @@ void __init find_smp_config (void)
+ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
+ */
+
++#ifndef CONFIG_XEN
+ address = get_bios_ebda();
+ if (address)
+ smp_scan_config(address, 0x400);
++#endif
+ }
+
+ int es7000_plat;
+@@ -812,6 +832,7 @@ int es7000_plat;
+
+ void __init mp_register_lapic_address(u64 address)
+ {
++#ifndef CONFIG_XEN
+ mp_lapic_addr = (unsigned long) address;
+
+ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
+@@ -820,6 +841,7 @@ void __init mp_register_lapic_address(u6
+ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
+
+ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
+ }
+
+ void __cpuinit mp_register_lapic (u8 id, u8 enabled)
+@@ -836,6 +858,7 @@ void __cpuinit mp_register_lapic (u8 id,
+ if (id == boot_cpu_physical_apicid)
+ boot_cpu = 1;
+
++#ifndef CONFIG_XEN
+ processor.mpc_type = MP_PROCESSOR;
+ processor.mpc_apicid = id;
+ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
+@@ -846,6 +869,7 @@ void __cpuinit mp_register_lapic (u8 id,
+ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
+ processor.mpc_reserved[0] = 0;
+ processor.mpc_reserved[1] = 0;
++#endif
+
+ MP_processor_info(&processor);
+ }
+@@ -900,7 +924,9 @@ void __init mp_register_ioapic(u8 id, u3
+ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
+ mp_ioapics[idx].mpc_apicaddr = address;
+
++#ifndef CONFIG_XEN
+ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
+ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
+ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
+ tmpid = io_apic_get_unique_id(idx, id);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/pci-dma-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/pci-dma-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,378 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <asm/swiotlb.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm/bug.h>
++
++#ifdef __x86_64__
++#include <asm/proto.h>
++
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
++
++/* This tells the BIO block layer to assume merging. Default to off
++ because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int iommu_sac_force __read_mostly = 0;
++EXPORT_SYMBOL(iommu_sac_force);
++
++int no_iommu __read_mostly;
++#ifdef CONFIG_IOMMU_DEBUG
++int panic_on_overflow __read_mostly = 1;
++int force_iommu __read_mostly = 1;
++#else
++int panic_on_overflow __read_mostly = 0;
++int force_iommu __read_mostly= 0;
++#endif
++
++/* Set this to 1 if there is a HW IOMMU in the system */
++int iommu_detected __read_mostly = 0;
++
++void __init pci_iommu_alloc(void)
++{
++ /*
++ * The order of these functions is important for
++ * fall-back/fail-over reasons
++ */
++#ifdef CONFIG_IOMMU
++ iommu_hole_init();
++#endif
++
++#ifdef CONFIG_CALGARY_IOMMU
++#include <asm/calgary.h>
++ /* shut up compiler */
++ use_calgary = use_calgary;
++ detect_calgary();
++#endif
++
++#ifdef CONFIG_SWIOTLB
++ pci_swiotlb_init();
++#endif
++}
++
++#endif
++
++struct dma_coherent_mem {
++ void *virt_base;
++ u32 device_base;
++ int size;
++ int flags;
++ unsigned long *bitmap;
++};
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error! " \
++ "Please use 'swiotlb=force'\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i, rc;
++
++ BUG_ON(!valid_dma_direction(direction));
++ WARN_ON(nents == 0 || sg[0].length == 0);
++
++ if (swiotlb) {
++ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++ } else {
++ for (i = 0; i < nents; i++ ) {
++ sg[i].dma_address =
++ page_to_bus(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++ BUG_ON(!sg[i].page);
++ IOMMU_BUG_ON(address_needs_mapping(
++ hwdev, sg[i].dma_address));
++ }
++ rc = nents;
++ }
++
++ flush_write_buffers();
++ return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (swiotlb)
++ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++#ifdef CONFIG_HIGHMEM
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction)
++{
++ dma_addr_t dma_addr;
++
++ BUG_ON(!valid_dma_direction(direction));
++
++ if (swiotlb) {
++ dma_addr = swiotlb_map_page(
++ dev, page, offset, size, direction);
++ } else {
++ dma_addr = page_to_bus(page) + offset;
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++ }
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (swiotlb)
++ swiotlb_unmap_page(dev, dma_address, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* CONFIG_HIGHMEM */
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (swiotlb)
++ return swiotlb_dma_mapping_error(dma_addr);
++ return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++ if (swiotlb)
++ return swiotlb_dma_supported(dev, mask);
++ /*
++ * By default we'll BUG when an infeasible DMA is requested, and
++ * request swiotlb=force (see IOMMU_BUG_ON).
++ */
++ return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ void *ret;
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /* ignore region specifiers */
++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (mem) {
++ int page = bitmap_find_free_region(mem->bitmap, mem->size,
++ order);
++ if (page >= 0) {
++ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
++ ret = mem->virt_base + (page << PAGE_SHIFT);
++ memset(ret, 0, size);
++ return ret;
++ }
++ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++ return NULL;
++ }
++
++ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++ gfp |= GFP_DMA;
++
++ vstart = __get_free_pages(gfp, order);
++ ret = (void *)vstart;
++
++ if (dev != NULL && dev->coherent_dma_mask)
++ mask = dev->coherent_dma_mask;
++ else
++ mask = 0xffffffff;
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ int order = get_order(size);
++
++ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++ bitmap_release_region(mem->bitmap, page, order);
++ } else {
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++ }
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags)
++{
++ void __iomem *mem_base = NULL;
++ int pages = size >> PAGE_SHIFT;
++ int bitmap_size = (pages + 31)/32;
++
++ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++ goto out;
++ if (!size)
++ goto out;
++ if (dev->dma_mem)
++ goto out;
++
++ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++ mem_base = ioremap(bus_addr, size);
++ if (!mem_base)
++ goto out;
++
++ dev->dma_mem = kzalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ if (!dev->dma_mem)
++ goto out;
++ dev->dma_mem->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
++ if (!dev->dma_mem->bitmap)
++ goto free1_out;
++
++ dev->dma_mem->virt_base = mem_base;
++ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->size = pages;
++ dev->dma_mem->flags = flags;
++
++ if (flags & DMA_MEMORY_MAP)
++ return DMA_MEMORY_MAP;
++
++ return DMA_MEMORY_IO;
++
++ free1_out:
++ kfree(dev->dma_mem->bitmap);
++ out:
++ if (mem_base)
++ iounmap(mem_base);
++ return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++
++ if(!mem)
++ return;
++ dev->dma_mem = NULL;
++ iounmap(mem->virt_base);
++ kfree(mem->bitmap);
++ kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ int pos, err;
++
++ if (!mem)
++ return ERR_PTR(-EINVAL);
++
++ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++ if (err != 0)
++ return ERR_PTR(err);
++ return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_addr_t dma;
++
++ BUG_ON(!valid_dma_direction(direction));
++ WARN_ON(size == 0);
++
++ if (swiotlb) {
++ dma = swiotlb_map_single(dev, ptr, size, direction);
++ } else {
++ dma = virt_to_bus(ptr);
++ IOMMU_BUG_ON(range_straddles_page_boundary(ptr, size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++ }
++
++ flush_write_buffers();
++ return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (swiotlb)
++ swiotlb_unmap_single(dev, dma_addr, size, direction);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/process-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/process-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,884 @@
++/*
++ * linux/arch/i386/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++#include <linux/personality.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++#include <asm/pda.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++ return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++ hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++ hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->work.need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0, %1;"
++ "rep; nop;"
++ "je 2b;"
++ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ current_thread_info()->status &= ~TS_POLLING;
++ /*
++ * TS_POLLING-cleared state must be visible before we
++ * test NEED_RESCHED:
++ */
++ smp_mb();
++
++ local_irq_disable();
++ if (!need_resched())
++ safe_halt(); /* enables interrupts racelessly */
++ else
++ local_irq_enable();
++ current_thread_info()->status |= TS_POLLING;
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++ int cpu = smp_processor_id();
++
++ current_thread_info()->status |= TS_POLLING;
++
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ rmb();
++ idle = xen_idle; /* no alternatives */
++
++ if (cpu_is_offline(cpu))
++ play_dead();
++
++ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
++ idle();
++ }
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map, tmp = current->cpus_allowed;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++
++ set_cpus_allowed(current, tmp);
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++/* XXX XEN doesn't use mwait_idle(), select_idle_routine(), idle_setup(). */
++/* Always use xen_idle() instead. */
++void mwait_idle_with_hints(unsigned long eax, unsigned long ecx) {}
++
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++void show_regs(struct pt_regs * regs)
++{
++ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
++
++ printk("\n");
++ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++ print_symbol("EIP is at %s\n", regs->eip);
++
++ if (user_mode_vm(regs))
++ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
++ regs->eflags, print_tainted(), init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
++ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++ regs->eax,regs->ebx,regs->ecx,regs->edx);
++ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++ regs->esi, regs->edi, regs->ebp);
++ printk(" DS: %04x ES: %04x GS: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xgs);
++
++ cr0 = read_cr0();
++ cr2 = read_cr2();
++ cr3 = read_cr3();
++ cr4 = read_cr4_safe();
++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++ show_trace(NULL, regs, &regs->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++ struct pt_regs regs;
++
++ memset(&regs, 0, sizeof(regs));
++
++ regs.ebx = (unsigned long) fn;
++ regs.edx = (unsigned long) arg;
++
++ regs.xds = __USER_DS;
++ regs.xes = __USER_DS;
++ regs.xgs = __KERNEL_PDA;
++ regs.orig_eax = -1;
++ regs.eip = (unsigned long) kernel_thread_helper;
++ regs.xcs = __KERNEL_CS | get_kernel_rpl();
++ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++ /* Ok, create the new process.. */
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, &regs, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ /* The process may have allocated an io port bitmap... nuke it. */
++ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++ struct task_struct *tsk = current;
++ struct thread_struct *t = &tsk->thread;
++ struct physdev_set_iobitmap set_iobitmap;
++ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &set_iobitmap);
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
++ }
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++
++ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ BUG_ON(dead_task->mm);
++ release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ struct pt_regs * childregs;
++ struct task_struct *tsk;
++ int err;
++
++ childregs = task_pt_regs(p);
++ *childregs = *regs;
++ childregs->eax = 0;
++ childregs->esp = esp;
++
++ p->thread.esp = (unsigned long) childregs;
++ p->thread.esp0 = (unsigned long) (childregs+1);
++
++ p->thread.eip = (unsigned long) ret_from_fork;
++
++ savesegment(fs,p->thread.fs);
++
++ tsk = current;
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++ p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++ struct desc_struct *desc;
++ struct user_desc info;
++ int idx;
++
++ err = -EFAULT;
++ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++ goto out;
++ err = -EINVAL;
++ if (LDT_empty(&info))
++ goto out;
++
++ idx = info.entry_number;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ goto out;
++
++ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++ out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++ int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++ dump->magic = CMAGIC;
++ dump->start_code = 0;
++ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++ dump->u_dsize -= dump->u_tsize;
++ dump->u_ssize = 0;
++ for (i = 0; i < 8; i++)
++ dump->u_debugreg[i] = current->thread.debugreg[i];
++
++ if (dump->start_stack < TASK_SIZE)
++ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++ dump->regs.ebx = regs->ebx;
++ dump->regs.ecx = regs->ecx;
++ dump->regs.edx = regs->edx;
++ dump->regs.esi = regs->esi;
++ dump->regs.edi = regs->edi;
++ dump->regs.ebp = regs->ebp;
++ dump->regs.eax = regs->eax;
++ dump->regs.ds = regs->xds;
++ dump->regs.es = regs->xes;
++ savesegment(fs,dump->regs.fs);
++ dump->regs.gs = regs->xgs;
++ dump->regs.orig_eax = regs->orig_eax;
++ dump->regs.eip = regs->eip;
++ dump->regs.cs = regs->xcs;
++ dump->regs.eflags = regs->eflags;
++ dump->regs.esp = regs->esp;
++ dump->regs.ss = regs->xss;
++
++ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++EXPORT_SYMBOL(dump_thread);
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs ptregs = *task_pt_regs(tsk);
++ ptregs.xcs &= 0xffff;
++ ptregs.xds &= 0xffff;
++ ptregs.xes &= 0xffff;
++ ptregs.xss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ return 1;
++}
++
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++ struct thread_struct *next;
++
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ set_debugreg(next->debugreg[0], 0);
++ set_debugreg(next->debugreg[1], 1);
++ set_debugreg(next->debugreg[2], 2);
++ set_debugreg(next->debugreg[3], 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg[6], 6);
++ set_debugreg(next->debugreg[7], 7);
++ }
++#ifndef CONFIG_XEN
++ if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
++ /*
++ * Disable the bitmap via an invalid offset. We still cache
++ * the previous bitmap owner and the IO bitmap contents:
++ */
++ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
++ return;
++ }
++
++ if (likely(next == tss->io_bitmap_owner)) {
++ /*
++ * Previous owner of the bitmap (hence the bitmap content)
++ * matches the next task, we dont have to do anything but
++ * to set a valid offset in the TSS:
++ */
++ tss->io_bitmap_base = IO_BITMAP_OFFSET;
++ return;
++ }
++ /*
++ * Lazy TSS's I/O bitmap copy. We set an invalid offset here
++ * and we let the task to get a GPF in case an I/O instruction
++ * is performed. The handler of the GPF will verify that the
++ * faulting task has a valid I/O bitmap and, it true, does the
++ * real copy and restart the instruction. This will save us
++ * redundant copies when the currently switched task does not
++ * perform any I/O during its timeslice.
++ */
++ tss->io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
++#endif /* !CONFIG_XEN */
++}
++
++/*
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
++ */
++static inline void disable_tsc(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_info *prev, *next;
++
++ /*
++ * gcc should eliminate the ->thread_info dereference if
++ * has_secure_computing returns 0 at compile time (SECCOMP=n).
++ */
++ prev = task_thread_info(prev_p);
++ next = task_thread_info(next_p);
++
++ if (has_secure_computing(prev) || has_secure_computing(next)) {
++ /* slow path here */
++ if (has_secure_computing(prev) &&
++ !has_secure_computing(next)) {
++ write_cr4(read_cr4() & ~X86_CR4_TSD);
++ } else if (!has_secure_computing(prev) &&
++ has_secure_computing(next))
++ write_cr4(read_cr4() | X86_CR4_TSD);
++ }
++}
++
++/*
++ * switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++ struct physdev_set_iobitmap iobmp_op;
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++#if 0 /* lazy fpu sanity check */
++ else BUG_ON(!(read_cr0() & 8));
++#endif
++
++ /*
++ * Reload esp0.
++ * This is load_esp0(tss, next) with a multicall.
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->esp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
++ next->tls_array[i].b != prev->tls_array[i].b)) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ *(u64 *)&mcl->args[0] = virt_to_machine( \
++ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++ mcl++;
++ }
++
++ (void)HYPERVISOR_multicall(_mcl, mcl - _mcl);
++
++ /*
++ * Restore %fs if needed.
++ *
++ * Glibc normally makes %fs be zero.
++ */
++ if (unlikely(next->fs))
++ loadsegment(fs, next->fs);
++
++ write_pda(pcurrent, next_p);
++
++ /* we're going to use this soon, after a few expensive things */
++ if (next_p->fpu_counter > 5)
++ prefetch(&next->i387.fxsave);
++
++ /*
++ * Now maybe handle debug registers
++ */
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(next_p);
++
++ disable_tsc(prev_p, next_p);
++
++ /* If the task has used fpu the last 5 timeslices, just do a full
++ * restore of the math state immediately to avoid the trap; the
++ * chances of needing FPU soon are obviously high now
++ */
++ if (next_p->fpu_counter > 5)
++ math_state_restore();
++
++ return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++ return do_fork(SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++ unsigned long clone_flags;
++ unsigned long newsp;
++ int __user *parent_tidptr, *child_tidptr;
++
++ clone_flags = regs.ebx;
++ newsp = regs.ecx;
++ parent_tidptr = (int __user *)regs.edx;
++ child_tidptr = (int __user *)regs.edi;
++ if (!newsp)
++ newsp = regs.esp;
++ return do_fork(clone_flags, newsp, &regs, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, &regs, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++ int error;
++ char * filename;
++
++ filename = getname((char __user *) regs.ebx);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ goto out;
++ error = do_execve(filename,
++ (char __user * __user *) regs.ecx,
++ (char __user * __user *) regs.edx,
++ &regs);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ /* Make sure we don't return using sysenter.. */
++ set_thread_flag(TIF_IRET);
++ }
++ putname(filename);
++out:
++ return error;
++}
++
++#define top_esp (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long ebp, esp, eip;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++ stack_page = (unsigned long)task_stack_page(p);
++ esp = p->thread.esp;
++ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++ return 0;
++ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
++ ebp = *(unsigned long *) esp;
++ do {
++ if (ebp < stack_page || ebp > top_ebp+stack_page)
++ return 0;
++ eip = *(unsigned long *) (ebp+4);
++ if (!in_sched_functions(eip))
++ return eip;
++ ebp = *(unsigned long *) ebp;
++ } while (count++ < 16);
++ return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++ struct thread_struct *t = &current->thread;
++ int idx;
++
++ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++ if (desc_empty(t->tls_array + idx))
++ return idx + GDT_ENTRY_TLS_MIN;
++ return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++ struct thread_struct *t = &current->thread;
++ struct user_desc info;
++ struct desc_struct *desc;
++ int cpu, idx;
++
++ if (copy_from_user(&info, u_info, sizeof(info)))
++ return -EFAULT;
++ idx = info.entry_number;
++
++ /*
++ * index -1 means the kernel should try to find and
++ * allocate an empty descriptor:
++ */
++ if (idx == -1) {
++ idx = get_free_idx();
++ if (idx < 0)
++ return idx;
++ if (put_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ }
++
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ /*
++ * We must not get preempted while modifying the TLS.
++ */
++ cpu = get_cpu();
++
++ if (LDT_empty(&info)) {
++ desc->a = 0;
++ desc->b = 0;
++ } else {
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++ load_TLS(t, cpu);
++
++ put_cpu();
++
++ return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++ (((desc)->a >> 16) & 0x0000ffff) | \
++ (((desc)->b << 16) & 0x00ff0000) | \
++ ( (desc)->b & 0xff000000) )
++
++#define GET_LIMIT(desc) ( \
++ ((desc)->a & 0x0ffff) | \
++ ((desc)->b & 0xf0000) )
++
++#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
++#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++ struct user_desc info;
++ struct desc_struct *desc;
++ int idx;
++
++ if (get_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ memset(&info, 0, sizeof(info));
++
++ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ info.entry_number = idx;
++ info.base_addr = GET_BASE(desc);
++ info.limit = GET_LIMIT(desc);
++ info.seg_32bit = GET_32BIT(desc);
++ info.contents = GET_CONTENTS(desc);
++ info.read_exec_only = !GET_WRITABLE(desc);
++ info.limit_in_pages = GET_LIMIT_PAGES(desc);
++ info.seg_not_present = !GET_PRESENT(desc);
++ info.useable = GET_USEABLE(desc);
++
++ if (copy_to_user(u_info, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/quirks.c
+--- a/arch/i386/kernel/quirks.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/quirks.c Wed Aug 08 16:25:28 2007 -0300
+@@ -7,7 +7,7 @@
+ #include <asm/genapic.h>
+ #include <asm/cpu.h>
+
+-#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_SMP) && defined(CONFIG_PCI)
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
+ static void __devinit verify_quirk_intel_irqbalance(struct pci_dev *dev)
+ {
+ u8 config, rev;
+@@ -68,11 +68,19 @@ void __init quirk_intel_irqbalance(void)
+ word = read_pci_config_16(0, 0, 0x40, 0x4c);
+
+ if (!(word & (1 << 13))) {
++#ifdef CONFIG_XEN
++ struct xen_platform_op op;
++ printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ op.cmd = XENPF_platform_quirk;
++ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++ (void)HYPERVISOR_platform_op(&op);
++#else
+ printk(KERN_INFO "Disabling irq balancing and affinity\n");
+ #ifdef CONFIG_IRQBALANCE
+ irqbalance_disable("");
+ #endif
+ noirqdebug_setup("");
++#endif /* CONFIG_XEN */
+ #ifdef CONFIG_PROC_FS
+ no_irq_affinity = 1;
+ #endif
+@@ -80,12 +88,12 @@ void __init quirk_intel_irqbalance(void)
+ printk(KERN_INFO "Disabling cpu hotplug control\n");
+ enable_cpu_hotplug = 0;
+ #endif
+-#ifdef CONFIG_X86_64
++#if defined(CONFIG_X86_64) && !defined(CONFIG_XEN)
+ /* force the genapic selection to flat mode so that
+ * interrupts can be redirected to more than one CPU.
+ */
+ genapic_force = &apic_flat;
+-#endif
++#endif /* CONFIG_XEN */
+ }
+
+ /* put back the original value for config space */
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/setup-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/setup-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,845 @@
++/*
++ * linux/arch/i386/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ * Memory region support
++ * David Parsons <orc@pell.chi.il.us>, July-August 1999
++ *
++ * Added E820 sanitization routine (removes overlapping memory regions);
++ * Brian Moyle <bmoyle@mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ * Patrick Mochel <mochel@osdl.org>, March 2002
++ *
++ * Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/mmzone.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/xencons.h>
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __devinitdata = 0;
++
++/*
++ * Machine setup..
++ */
++extern struct resource code_resource;
++extern struct resource data_resource;
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __cpuinitdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++
++extern void early_cpu_init(void);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++int __initdata user_defined_memmap = 0;
++
++/*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++static int __init parse_mem(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ if (strcmp(arg, "nopentium") == 0) {
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
++
++ mem_size = memparse(arg, &arg);
++ limit_regions(mem_size);
++ user_defined_memmap = 1;
++ }
++ return 0;
++}
++early_param("mem", parse_mem);
++
++#ifdef CONFIG_PROC_VMCORE
++/* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++static int __init parse_elfcorehdr(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ elfcorehdr_addr = memparse(arg, &arg);
++ return 0;
++}
++early_param("elfcorehdr", parse_elfcorehdr);
++#endif /* CONFIG_PROC_VMCORE */
++
++/*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++static int __init parse_highmem(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT;
++ return 0;
++}
++early_param("highmem", parse_highmem);
++
++/*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++static int __init parse_vmalloc(char *arg)
++{
++ if (!arg)
++ return -EINVAL;
++
++ __VMALLOC_RESERVE = memparse(arg, &arg);
++ return 0;
++}
++early_param("vmalloc", parse_vmalloc);
++
++/*
++ * reservetop=size reserves a hole at the top of the kernel address space which
++ * a hypervisor can load into later. Needed for dynamically loaded hypervisors,
++ * so relocating the fixmap can be done before paging initialization.
++ */
++static int __init parse_reservetop(char *arg)
++{
++ unsigned long address;
++
++ if (!arg)
++ return -EINVAL;
++
++ address = memparse(arg, &arg);
++ reserve_top_address(address);
++ return 0;
++}
++early_param("reservetop", parse_reservetop);
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++ unsigned long max_low_pfn;
++
++ max_low_pfn = max_pfn;
++ if (max_low_pfn > MAXMEM_PFN) {
++ if (highmem_pages == -1)
++ highmem_pages = max_pfn - MAXMEM_PFN;
++ if (highmem_pages + MAXMEM_PFN < max_pfn)
++ max_pfn = MAXMEM_PFN + highmem_pages;
++ if (highmem_pages + MAXMEM_PFN > max_pfn) {
++ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++ /* Maximum memory usable is what is directly addressable */
++ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++ MAXMEM>>20);
++ if (max_pfn > MAX_NONPAE_PFN)
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ else
++ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++ max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++ if (max_pfn > MAX_NONPAE_PFN) {
++ max_pfn = MAX_NONPAE_PFN;
++ printk(KERN_WARNING "Warning only 4GB will be used.\n");
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ }
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++ } else {
++ if (highmem_pages == -1)
++ highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++ if (highmem_pages >= max_pfn) {
++ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++ highmem_pages = 0;
++ }
++ if (highmem_pages) {
++ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn -= highmem_pages;
++ }
++#else
++ if (highmem_pages)
++ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++ }
++ return max_low_pfn;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++ unsigned int addr;
++ addr = get_bios_ebda();
++ if (addr)
++ reserve_bootmem(addr, PAGE_SIZE);
++}
++#endif
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames;
++
++ find_max_pfn();
++
++ max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++ highstart_pfn = highend_pfn = max_pfn;
++ if (max_pfn > max_low_pfn) {
++ highstart_pfn = max_low_pfn;
++ }
++ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++ pages_to_mb(highend_pfn - highstart_pfn));
++ num_physpages = highend_pfn;
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ num_physpages = max_low_pfn;
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
++#endif
++ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++ pages_to_mb(max_low_pfn));
++
++ setup_bootmem_allocator();
++
++ return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++ /*
++ * XEN: Our notion of "DMA memory" is fake when running over Xen.
++ * We simply put all RAM in the DMA zone so that those drivers which
++ * needlessly specify GFP_DMA do not get starved of RAM unnecessarily.
++ * Those drivers that *do* require lowmem are screwed anyway when
++ * running over Xen!
++ */
++ unsigned long max_zone_pfns[MAX_NR_ZONES];
++ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
++ max_zone_pfns[ZONE_DMA] = max_low_pfn;
++ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
++#ifdef CONFIG_HIGHMEM
++ max_zone_pfns[ZONE_HIGHMEM] = highend_pfn;
++ add_active_range(0, 0, highend_pfn);
++#else
++ add_active_range(0, 0, max_low_pfn);
++#endif
++
++ free_area_init_nodes(max_zone_pfns);
++}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++
++void __init setup_bootmem_allocator(void)
++{
++ unsigned long bootmap_size;
++ /*
++ * Initialize the boot-time allocator (with low memory only):
++ */
++ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++ register_bootmem_low_pages(max_low_pfn);
++
++ /*
++ * Reserve the bootmem bitmap itself as well. We do this in two
++ * steps (first step was init_bootmem()) because this catches
++ * the (very unlikely) case of us accidentally initializing the
++ * bootmem allocator with an invalid RAM area.
++ */
++ reserve_bootmem(__pa_symbol(_text), (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - __pa_symbol(_text));
++
++#ifndef CONFIG_XEN
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem(0, PAGE_SIZE);
++
++ /* reserve EBDA region, it's a 4K region */
++ reserve_ebda_region();
++
++ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
++ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++ unless you have no PS/2 mouse plugged in. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 == 6)
++ reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ INITRD_START + INITRD_SIZE,
++ max_low_pfn << PAGE_SHIFT);
++ initrd_start = 0;
++ }
++ }
++#endif
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++#endif
++#endif
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem. node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++ int nid;
++
++ for_each_online_node(nid) {
++ if (nid != 0)
++ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++ }
++}
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++ MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/* Overridden in paravirt.c if CONFIG_PARAVIRT */
++char * __init __attribute__((weak)) memory_setup(void)
++{
++ return machine_specific_memory_setup();
++}
++
++/*
++ * Determine if we were loaded by an EFI loader. If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization. Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++ int i, j, k, fpp;
++ struct physdev_set_iopl set_iopl;
++ unsigned long max_low_pfn;
++
++ /* Force a quick death if the kernel panics (not domain 0). */
++ extern int panic_timeout;
++ if (!panic_timeout && !is_initial_xendomain())
++ panic_timeout = 1;
++
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
++ HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables);
++
++ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++ pre_setup_arch_hook();
++ early_cpu_init();
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP)
++ prefill_possible_map();
++#endif
++
++ /*
++ * FIXME: This isn't an official loader_type right
++ * now but does currently work with elilo.
++ * If we were configured as an EFI kernel, check to make
++ * sure that we were loaded correctly from elilo and that
++ * the system table is valid. If not, then initialize normally.
++ */
++#ifdef CONFIG_EFI
++ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++ efi_enabled = 1;
++#endif
++
++ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++ */
++ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ drive_info = DRIVE_INFO;
++ screen_info = SCREEN_INFO;
++ edid_info = EDID_INFO;
++ apm_info.bios = APM_BIOS_INFO;
++ ist_info = IST_INFO;
++ saved_videomode = VIDEO_MODE;
++ if( SYS_DESC_TABLE.length != 0 ) {
++ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++ machine_id = SYS_DESC_TABLE.table[0];
++ machine_submodel_id = SYS_DESC_TABLE.table[1];
++ BIOS_revision = SYS_DESC_TABLE.table[2];
++ }
++ bootloader_type = LOADER_TYPE;
++
++ if (is_initial_xendomain()) {
++ /* This is drawn from a dump from vgacon:startup in
++ * standard Linux. */
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = 25;
++ screen_info.orig_video_cols = 80;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_points = 16;
++ screen_info.orig_y = screen_info.orig_video_lines - 1;
++ if (xen_start_info->console.dom0.info_size >=
++ sizeof(struct dom0_vga_console_info)) {
++ const struct dom0_vga_console_info *info =
++ (struct dom0_vga_console_info *)(
++ (char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++ dom0_init_screen_info(info);
++ }
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++ setup_xen_features();
++
++ ARCH_SETUP
++ if (efi_enabled)
++ efi_init();
++ else {
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ print_memory_map(memory_setup());
++ }
++
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++ code_resource.start = virt_to_phys(_text);
++ code_resource.end = virt_to_phys(_etext)-1;
++ data_resource.start = virt_to_phys(_etext);
++ data_resource.end = virt_to_phys(_edata)-1;
++
++ parse_early_param();
++
++ if (user_defined_memmap) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
++ }
++
++ strlcpy(command_line, saved_command_line, COMMAND_LINE_SIZE);
++ *cmdline_p = command_line;
++
++ max_low_pfn = setup_memory();
++
++ /*
++ * NOTE: before this point _nobody_ is allowed to allocate
++ * any memory using the bootmem allocator. Although the
++ * alloctor is now initialised only the first 8Mb of the kernel
++ * virtual address space has been mapped. All allocations before
++ * paging_init() has completed must use the alloc_bootmem_low_pages()
++ * variant (which allocates DMA'able memory) and care must be taken
++ * not to exceed the 8Mb limit.
++ */
++
++#ifdef CONFIG_SMP
++ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++ paging_init();
++ remapped_pgdat_init();
++ sparse_init();
++ zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++ numa_kva_reserve();
++
++ /* Make sure we have a correctly sized P->M table. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ xen_start_info->nr_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the list of
++ * frames that make up the p2m table. Used by save/restore
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=16);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_low_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /*
++ * NOTE: at this point the bootmem allocator is fully available.
++ */
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_apic_probe();
++#endif
++ if (efi_enabled)
++ efi_map_memmap();
++
++ set_iopl.iopl = 1;
++ HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl);
++
++#ifdef CONFIG_ACPI
++ if (!is_initial_xendomain()) {
++ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++ acpi_disabled = 1;
++ acpi_ht = 0;
++ }
++
++ /*
++ * Parse the ACPI tables for possible boot-time SMP configuration.
++ */
++ acpi_boot_table_init();
++#endif
++
++#ifdef CONFIG_PCI
++#ifdef CONFIG_X86_IO_APIC
++ check_acpi_pci(); /* Checks more than just ACPI actually */
++#endif
++#endif
++
++#ifdef CONFIG_ACPI
++ acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++ if (def_to_bigsmp)
++ printk(KERN_WARNING "More than 8 CPUs detected and "
++ "CONFIG_X86_PC cannot handle it.\nUse "
++ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++ if (smp_found_config)
++ get_smp_config();
++#endif
++
++ e820_register_memory();
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ if (!efi_enabled ||
++ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ tsc_init();
++
++ xencons_early_setup();
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/smp-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/smp-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,628 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
++
++/*
++ * Some notes on x86 processor bugs affecting SMP operation:
++ *
++ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ * The Linux implications for SMP are handled as follows:
++ *
++ * Pentium III / [Xeon]
++ * None of the E1AP-E3AP errata are visible to the user.
++ *
++ * E1AP. see PII A1AP
++ * E2AP. see PII A2AP
++ * E3AP. see PII A3AP
++ *
++ * Pentium II / [Xeon]
++ * None of the A1AP-A3AP errata are visible to the user.
++ *
++ * A1AP. see PPro 1AP
++ * A2AP. see PPro 2AP
++ * A3AP. see PPro 7AP
++ *
++ * Pentium Pro
++ * None of 1AP-9AP errata are visible to the normal user,
++ * except occasional delivery of 'spurious interrupt' as trap #15.
++ * This is very rare and a non-problem.
++ *
++ * 1AP. Linux maps APIC as non-cacheable
++ * 2AP. worked around in hardware
++ * 3AP. fixed in C0 and above steppings microcode update.
++ * Linux does not use excessive STARTUP_IPIs.
++ * 4AP. worked around in hardware
++ * 5AP. symmetric IO mode (normal Linux operation) not affected.
++ * 'noapic' mode has vector 0xf filled out properly.
++ * 6AP. 'noapic' mode might be affected - fixed in later steppings
++ * 7AP. We do not assume writes to the LVT deassering IRQs
++ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
++ * 9AP. We do not use mixed mode
++ *
++ * Pentium
++ * There is a marginal case where REP MOVS on 100MHz SMP
++ * machines with B stepping processors can fail. XXX should provide
++ * an L1cache=Writethrough or L1cache=off option.
++ *
++ * B stepping CPUs may hang. There are hardware work arounds
++ * for this. We warn about it in case your board doesn't have the work
++ * arounds. Basically thats so I can tell anyone with a B stepping
++ * CPU and SMP problems "tough".
++ *
++ * Specific items [From Pentium Processor Specification Update]
++ *
++ * 1AP. Linux doesn't use remote read
++ * 2AP. Linux doesn't trust APIC errors
++ * 3AP. We work around this
++ * 4AP. Linux never generated 3 interrupts of the same priority
++ * to cause a lost local interrupt.
++ * 5AP. Remote read is never used
++ * 6AP. not affected - worked around in hardware
++ * 7AP. not affected - worked around in hardware
++ * 8AP. worked around in hardware - we get explicit CS errors if not
++ * 9AP. only 'noapic' mode affected. Might generate spurious
++ * interrupts, we log only the first one and count the
++ * rest silently.
++ * 10AP. not affected - worked around in hardware
++ * 11AP. Linux reads the APIC between writes to avoid this, as per
++ * the documentation. Make sure you preserve this as it affects
++ * the C stepping chips too.
++ * 12AP. not affected - worked around in hardware
++ * 13AP. not affected - worked around in hardware
++ * 14AP. we always deassert INIT during bootup
++ * 15AP. not affected - worked around in hardware
++ * 16AP. not affected - worked around in hardware
++ * 17AP. not affected - worked around in hardware
++ * 18AP. not affected - worked around in hardware
++ * 19AP. not affected - worked around in BIOS
++ *
++ * If this sounds worrying believe me these bugs are either ___RARE___,
++ * or are signal timing bugs worked around in hardware and there's
++ * about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++ switch (vector) {
++ default:
++ icr |= APIC_DM_FIXED | vector;
++ break;
++ case NMI_VECTOR:
++ icr |= APIC_DM_NMI;
++ break;
++ }
++ return icr;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++ return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++void fastcall send_IPI_self(int vector)
++{
++ __send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t cpumask, int vector)
++{
++ unsigned long mask = cpus_addr(cpumask)[0];
++ unsigned long flags;
++ unsigned int cpu;
++
++ local_irq_save(flags);
++ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpumask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++
++ local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++ send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL 0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superflous
++ * tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ unsigned long cpu;
++
++ cpu = get_cpu();
++
++ if (!cpu_isset(cpu, flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++ if (flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(flush_va);
++ } else
++ leave_mm(cpu);
++ }
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, flush_cpumask);
++ smp_mb__after_clear_bit();
++out:
++ put_cpu_no_resched();
++
++ return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ /*
++ * A couple of (to be removed) sanity checks:
++ *
++ * - current CPU must not be in mask
++ * - mask must exist :)
++ */
++ BUG_ON(cpus_empty(cpumask));
++ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++ BUG_ON(!mm);
++
++ /* If a CPU which we ran on has gone down, OK. */
++ cpus_and(cpumask, cpumask, cpu_online_map);
++ if (cpus_empty(cpumask))
++ return;
++
++ /*
++ * i'm not happy about this global shared spinlock in the
++ * MM hot path, but we'll see how contended it is.
++ * Temporarily this turns IRQs off, so that lockups are
++ * detected by the NMI watchdog.
++ */
++ spin_lock(&tlbstate_lock);
++
++ flush_mm = mm;
++ flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++ atomic_set_mask(cpumask, &flush_cpumask);
++#else
++ {
++ int k;
++ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++ unsigned long *cpu_mask = (unsigned long *)&cpumask;
++ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++ }
++#endif
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++ while (!cpus_empty(flush_cpumask))
++ /* nothing. lockup detection does not belong here */
++ mb();
++
++ flush_mm = NULL;
++ flush_va = 0;
++ spin_unlock(&tlbstate_lock);
++}
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#else
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{ return 0; }
++void flush_tlb_current_task(void)
++{ xen_tlb_flush_mask(&current->mm->cpu_vm_mask); }
++void flush_tlb_mm(struct mm_struct * mm)
++{ xen_tlb_flush_mask(&mm->cpu_vm_mask); }
++void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
++{ xen_invlpg_mask(&vma->vm_mm->cpu_vm_mask, va); }
++EXPORT_SYMBOL(flush_tlb_page);
++void flush_tlb_all(void)
++{ xen_tlb_flush_all(); }
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++ WARN_ON(cpu_is_offline(cpu));
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++static struct call_data_struct *call_data;
++
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ struct call_data_struct data;
++ int cpus;
++
++ /* Holding any lock stops cpus from going down. */
++ spin_lock(&call_lock);
++ cpus = num_online_cpus() - 1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
++ return 0;
++ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ barrier();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ barrier();
++ spin_unlock(&call_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++static void stop_this_cpu (void * dummy)
++{
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++#if 0
++ disable_local_APIC();
++#endif
++ if (cpu_data[smp_processor_id()].hlt_works_ok)
++ for(;;) halt();
++ for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++ smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++ local_irq_disable();
++#if 0
++ disable_local_APIC();
++#endif
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++
++ return IRQ_HANDLED;
++}
++
++int safe_smp_processor_id(void)
++{
++ return smp_processor_id();
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/swiotlb.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/swiotlb.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,716 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm@hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
++ */
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/interface/memory.h>
++
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++
++#define SG_ENT_PHYS_ADDRESS(sg) (page_to_bus((sg)->page) + (sg)->offset)
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++int swiotlb_force;
++
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
++{
++ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++ return (pfn_valid(pfn)
++ && (pfn >= iotlb_pfn_start)
++ && (pfn < iotlb_pfn_end));
++}
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++ struct page *page;
++ unsigned int offset;
++} *io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static unsigned int dma_bits;
++static unsigned int __initdata max_dma_bits = 32;
++static int __init
++setup_dma_bits(char *str)
++{
++ max_dma_bits = simple_strtoul(str, NULL, 0);
++ return 0;
++}
++__setup("dma_bits=", setup_dma_bits);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++ if (isdigit(*str)) {
++ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++ (20 - IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++ if (*str == ',')
++ ++str;
++ /*
++ * NB. 'force' enables the swiotlb, but doesn't force its use for
++ * every DMA like it does on native Linux. 'off' forcibly disables
++ * use of the swiotlb.
++ */
++ if (!strcmp(str, "force"))
++ swiotlb_force = 1;
++ else if (!strcmp(str, "off"))
++ swiotlb_force = -1;
++ return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++ unsigned long i, bytes;
++ int rc;
++
++ if (!iotlb_nslabs) {
++ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ /* Round up to power of two (xen_create_contiguous_region). */
++ while (iotlb_nslabs & (iotlb_nslabs-1))
++ iotlb_nslabs += iotlb_nslabs & ~(iotlb_nslabs-1);
++ }
++
++ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++ if (!iotlb_virt_start)
++ panic("Cannot allocate SWIOTLB buffer!\n");
++
++ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc) {
++ if (i == 0)
++ panic("No suitable physical memory available for SWIOTLB buffer!\n"
++ "Use dom0_mem Xen boot parameter to reserve\n"
++ "some DMA memory (e.g., dom0_mem=-128M).\n");
++ iotlb_nslabs = i;
++ i <<= IO_TLB_SHIFT;
++ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
++ bytes = i;
++ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
++ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
++
++ if (bits > dma_bits)
++ dma_bits = bits;
++ }
++ break;
++ }
++ }
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++ */
++ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++ for (i = 0; i < iotlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++ io_tlb_orig_addr = alloc_bootmem(
++ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++ if (!io_tlb_overflow_buffer)
++ panic("Cannot allocate SWIOTLB overflow buffer!\n");
++
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc)
++ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
++
++ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++
++ printk(KERN_INFO "Software IO TLB enabled: \n"
++ " Aperture: %lu megabytes\n"
++ " Kernel range: %p - %p\n"
++ " Address size: %u bits\n",
++ bytes >> 20,
++ iotlb_virt_start, iotlb_virt_start + bytes,
++ dma_bits);
++}
++
++void
++swiotlb_init(void)
++{
++ long ram_end;
++ size_t defsz = 64 * (1 << 20); /* 64MB default size */
++
++ if (swiotlb_force == 1) {
++ swiotlb = 1;
++ } else if ((swiotlb_force != -1) &&
++ is_running_on_xen() &&
++ is_initial_xendomain()) {
++ /* Domain 0 always has a swiotlb. */
++ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++ if (ram_end <= 0x7ffff)
++ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++ swiotlb = 1;
++ }
++
++ if (swiotlb)
++ swiotlb_init_with_default_size(defsz);
++ else
++ printk(KERN_INFO "Software IO TLB disabled\n");
++}
++
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++ if (PageHighMem(buffer.page)) {
++ size_t len, bytes;
++ char *dev, *host, *kmp;
++ len = size;
++ while (len != 0) {
++ unsigned long flags;
++
++ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++ bytes = PAGE_SIZE - buffer.offset;
++ local_irq_save(flags); /* protects KM_BOUNCE_READ */
++ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
++ dev = dma_addr + size - len;
++ host = kmp + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dev, bytes))
++ /* inaccessible */;
++ } else
++ memcpy(dev, host, bytes);
++ kunmap_atomic(kmp, KM_BOUNCE_READ);
++ local_irq_restore(flags);
++ len -= bytes;
++ buffer.page++;
++ buffer.offset = 0;
++ }
++ } else {
++ char *host = (char *)phys_to_virt(
++ page_to_pseudophys(buffer.page)) + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dma_addr, size))
++ /* inaccessible */;
++ } else if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, host, size);
++ }
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ int i;
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ wrap = index = ALIGN(io_tlb_index, stride);
++
++ if (index >= iotlb_nslabs)
++ wrap = index = 0;
++
++ do {
++ /*
++ * If we find a slot that indicates we have 'nslots'
++ * number of contiguous buffers, we allocate the
++ * buffers from that slot and mark the entries as '0'
++ * indicating unavailable.
++ */
++ if (io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int)(index + nslots); i++)
++ io_tlb_list[i] = 0;
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ dma_addr = iotlb_virt_start +
++ (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in
++ * the next round.
++ */
++ io_tlb_index =
++ ((index + nslots) < iotlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= iotlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++ return NULL;
++ }
++ found:
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ io_tlb_orig_addr[index] = buffer;
++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++ return dma_addr;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++ __sync_single(buffer, dma_addr, size, dir);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for pci_dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++
++ if (size > io_tlb_overflow && do_panic) {
++ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Memory would be corrupted\n");
++ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++ dma_addr_t dev_addr = virt_to_bus(ptr);
++ void *map;
++ struct phys_addr buffer;
++
++ BUG_ON(dir == DMA_NONE);
++
++ /*
++ * If the pointer passed in happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!range_straddles_page_boundary(ptr, size) &&
++ !address_needs_mapping(hwdev, dev_addr))
++ return dev_addr;
++
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ buffer.page = virt_to_page(ptr);
++ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++ map = map_single(hwdev, buffer, size, dir);
++ if (!map) {
++ swiotlb_full(hwdev, size, dir, 1);
++ map = io_tlb_overflow_buffer;
++ }
++
++ dev_addr = virt_to_bus(map);
++ return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++ int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so. At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++) {
++ dev_addr = SG_ENT_PHYS_ADDRESS(sg);
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ buffer.page = sg->page;
++ buffer.offset = sg->offset;
++ map = map_single(hwdev, buffer, sg->length, dir);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ swiotlb_full(hwdev, sg->length, dir, 0);
++ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++ sg[0].dma_length = 0;
++ return 0;
++ }
++ sg->dma_address = (dma_addr_t)virt_to_bus(map);
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
++ }
++ return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ unmap_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (sg->dma_address != SG_ENT_PHYS_ADDRESS(sg))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++#ifdef CONFIG_HIGHMEM
++
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++
++ dev_addr = page_to_bus(page) + offset;
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ buffer.page = page;
++ buffer.offset = offset;
++ map = map_single(hwdev, buffer, size, direction);
++ if (!map) {
++ swiotlb_full(hwdev, size, direction, 1);
++ map = io_tlb_overflow_buffer;
++ }
++ dev_addr = (dma_addr_t)virt_to_bus(map);
++ }
++
++ return dev_addr;
++}
++
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (in_swiotlb_aperture(dma_address))
++ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++}
++
++#endif
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++ return (mask >= ((1UL << dma_bits) - 1));
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/sysenter.c
+--- a/arch/i386/kernel/sysenter.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/sysenter.c Wed Aug 08 16:25:28 2007 -0300
+@@ -23,6 +23,10 @@
+ #include <asm/pgtable.h>
+ #include <asm/unistd.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ /*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+@@ -48,6 +52,7 @@ extern asmlinkage void sysenter_entry(vo
+
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+@@ -62,6 +67,7 @@ void enable_sep_cpu(void)
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->esp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) sysenter_entry, 0);
+ put_cpu();
++#endif
+ }
+
+ /*
+@@ -76,9 +82,26 @@ int __init sysenter_setup(void)
+ {
+ syscall_page = (void *)get_zeroed_page(GFP_ATOMIC);
+
++#ifdef CONFIG_XEN
++ if (boot_cpu_has(X86_FEATURE_SEP)) {
++ static struct callback_register __initdata sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = { __KERNEL_CS, (unsigned long)sysenter_entry },
++ };
++
++ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) < 0)
++ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++ }
++#endif
++
+ #ifdef CONFIG_COMPAT_VDSO
+ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_READONLY_EXEC);
+ printk("Compat vDSO mapped to %08lx.\n", __fix_to_virt(FIX_VDSO));
++#else
++ /*
++ * In the non-compat case the ELF coredumping code needs the fixmap:
++ */
++ __set_fixmap(FIX_VDSO, __pa(syscall_page), PAGE_KERNEL_RO);
+ #endif
+
+ if (!boot_cpu_has(X86_FEATURE_SEP)) {
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/time-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/time-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,1091 @@
++/*
++ * linux/arch/i386/kernel/time.c
++ *
++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02 Alan Modra
++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26 Markus Kuhn
++ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ * precision CMOS clock update
++ * 1996-05-03 Ingo Molnar
++ * fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
++ * "A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05 (Various)
++ * More robust do_fast_gettimeoffset() algorithm implemented
++ * (works with APM, Cyrix 6x86MX and Centaur C6),
++ * monotonic gettimeofday() with fast_get_timeoffset(),
++ * drift-proof precision TSC calibration on boot
++ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
++ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
++ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
++ * 1998-12-16 Andrea Arcangeli
++ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ * because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
++ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ * serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/time.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
++#ifdef __i386__
++#include <asm/arch_hooks.h>
++#endif
++
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++
++#include <asm/i8259.h>
++
++int pit_latch_buggy; /* extern */
++
++#define USEC_PER_TICK (USEC_PER_SEC / HZ)
++#define NSEC_PER_TICK (NSEC_PER_SEC / HZ)
++#define FSEC_PER_TICK (FSEC_PER_SEC / HZ)
++
++#define NS_SCALE 10 /* 2^10, carefully chosen */
++#define US_SCALE 32 /* 2^32, arbitralrily chosen */
++
++unsigned int cpu_khz; /* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
++
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++ u64 tsc_timestamp; /* TSC at last update of time vals. */
++ u64 system_timestamp; /* Time, in nanosecs, since boot. */
++ u32 tsc_to_nsec_mul;
++ u32 tsc_to_usec_mul;
++ int tsc_shift;
++ u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time; /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++ while (*nsec >= NSEC_PER_SEC) {
++ (*nsec) -= NSEC_PER_SEC;
++ (*sec)++;
++ }
++ while (*nsec < 0) {
++ (*nsec) += NSEC_PER_SEC;
++ (*sec)--;
++ }
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++ independent_wallclock = 1;
++ return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++ u64 product;
++ u32 tmp1, tmp2;
++
++ if (shift < 0)
++ delta >>= -shift;
++ else
++ delta <<= shift;
++
++ __asm__ (
++ "mul %5 ; "
++ "mov %4,%%eax ; "
++ "mov %%edx,%4 ; "
++ "mul %5 ; "
++ "xor %5,%5 ; "
++ "add %4,%%eax ; "
++ "adc %5,%%edx ; "
++ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
++ : "a" ((u32)delta), "1" ((u32)(delta >> US_SCALE)), "2" (mul_frac) );
++
++ return product;
++}
++
++void init_cpu_khz(void)
++{
++ u64 __cpu_khz = 1000000ULL << US_SCALE;
++ struct vcpu_time_info *info = &vcpu_info(0)->time;
++ do_div(__cpu_khz, info->tsc_to_system_mul);
++ if (info->tsc_shift < 0)
++ cpu_khz = __cpu_khz << -info->tsc_shift;
++ else
++ cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++ long wtm_nsec, xtime_nsec;
++ time_t wtm_sec, xtime_sec;
++ u64 tmp, wc_nsec;
++
++ /* Adjust wall-clock time base based on jiffies ticks. */
++ wc_nsec = processed_system_time;
++ wc_nsec += sec * (u64)NSEC_PER_SEC;
++ wc_nsec += nsec;
++
++ /* Split wallclock base into seconds and nanoseconds. */
++ tmp = wc_nsec;
++ xtime_nsec = do_div(tmp, 1000000000);
++ xtime_sec = (time_t)tmp;
++
++ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++ ntp_clear();
++}
++
++static void update_wallclock(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ do {
++ shadow_tv_version = s->wc_version;
++ rmb();
++ shadow_tv.tv_sec = s->wc_sec;
++ shadow_tv.tv_nsec = s->wc_nsec;
++ rmb();
++ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++ if (!independent_wallclock)
++ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ do {
++ dst->version = src->version;
++ rmb();
++ dst->tsc_timestamp = src->tsc_timestamp;
++ dst->system_timestamp = src->system_time;
++ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
++ dst->tsc_shift = src->tsc_shift;
++ rmb();
++ } while ((src->version & 1) | (dst->version ^ src->version));
++
++ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++}
++
++static inline int time_values_up_to_date(int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ rmb();
++ return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with. It is required for NMI access to the
++ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++ unsigned char val;
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ val = inb_p(RTC_PORT(1));
++ lock_cmos_suffix(addr);
++ return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ outb_p(val, RTC_PORT(1));
++ lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++ unsigned long seq;
++ unsigned long usec, sec;
++ unsigned long max_ntp_tick;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ u32 local_time_version;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ do {
++ local_time_version = shadow->version;
++ seq = read_seqbegin(&xtime_lock);
++
++ usec = get_usec_offset(shadow);
++
++ /*
++ * If time_adjust is negative then NTP is slowing the clock
++ * so make sure not to go into next possible interval.
++ * Better to lose some accuracy than have time go backwards..
++ */
++ if (unlikely(time_adjust < 0)) {
++ max_ntp_tick = (USEC_PER_SEC / HZ) - tickadj;
++ usec = min(usec, max_ntp_tick);
++ }
++
++ sec = xtime.tv_sec;
++ usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++ nsec = shadow->system_timestamp - processed_system_time;
++ __normalize_time(&sec, &nsec);
++ usec += (long)nsec / NSEC_PER_USEC;
++
++ if (unlikely(!time_values_up_to_date(cpu))) {
++ /*
++ * We may have blocked for a long time,
++ * rendering our calculations invalid
++ * (e.g. the time delta may have
++ * overflowed). Detect that and recalculate
++ * with fresh values.
++ */
++ get_time_values_from_xen(cpu);
++ continue;
++ }
++ } while (read_seqretry(&xtime_lock, seq) ||
++ (local_time_version != shadow->version));
++
++ put_cpu();
++
++ while (usec >= USEC_PER_SEC) {
++ usec -= USEC_PER_SEC;
++ sec++;
++ }
++
++ tv->tv_sec = sec;
++ tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++ time_t sec;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ struct xen_platform_op op;
++
++ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ return -EINVAL;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ write_seqlock_irq(&xtime_lock);
++
++ /*
++ * Ensure we don't get blocked for a long time so that our time delta
++ * overflows. If that were to happen then our shadow time values would
++ * be stale, so we can retry with fresh ones.
++ */
++ for (;;) {
++ nsec = tv->tv_nsec - get_nsec_offset(shadow);
++ if (time_values_up_to_date(cpu))
++ break;
++ get_time_values_from_xen(cpu);
++ }
++ sec = tv->tv_sec;
++ __normalize_time(&sec, &nsec);
++
++ if (is_initial_xendomain() && !independent_wallclock) {
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = shadow->system_timestamp;
++ HYPERVISOR_platform_op(&op);
++ update_wallclock();
++ } else if (independent_wallclock) {
++ nsec -= shadow->system_timestamp;
++ __normalize_time(&sec, &nsec);
++ __update_wallclock(sec, nsec);
++ }
++
++ write_sequnlock_irq(&xtime_lock);
++
++ put_cpu();
++
++ clock_was_set();
++ return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++ time_t sec;
++ s64 nsec;
++ struct xen_platform_op op;
++
++ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++ return;
++
++ write_seqlock_irq(&xtime_lock);
++
++ sec = xtime.tv_sec;
++ nsec = xtime.tv_nsec;
++ __normalize_time(&sec, &nsec);
++
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = processed_system_time;
++ HYPERVISOR_platform_op(&op);
++
++ update_wallclock();
++
++ write_sequnlock_irq(&xtime_lock);
++
++ /* Once per minute. */
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++static int set_rtc_mmss(unsigned long nowtime)
++{
++ int retval;
++ unsigned long flags;
++
++ if (independent_wallclock || !is_initial_xendomain())
++ return 0;
++
++ /* gets recalled with irq locally disabled */
++ /* XXX - does irqsave resolve this? -johnstul */
++ spin_lock_irqsave(&rtc_lock, flags);
++ retval = set_wallclock(nowtime);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ * Note: This function is required to return accurate
++ * time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++ int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ u64 time;
++ u32 local_time_version;
++
++ do {
++ local_time_version = shadow->version;
++ barrier();
++ time = shadow->system_timestamp + get_nsec_offset(shadow);
++ if (!time_values_up_to_date(cpu))
++ get_time_values_from_xen(cpu);
++ barrier();
++ } while (local_time_version != shadow->version);
++
++ put_cpu();
++
++ return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++ return monotonic_clock();
++}
++#endif
++
++unsigned long profile_pc(struct pt_regs *regs)
++{
++ unsigned long pc = instruction_pointer(regs);
++
++#ifdef CONFIG_SMP
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++#ifdef CONFIG_FRAME_POINTER
++ return *(unsigned long *)(regs->ebp + 4);
++#else
++ unsigned long *sp;
++ if ((regs->xcs & 3) == 0)
++ sp = (unsigned long *)&regs->esp;
++ else
++ sp = (unsigned long *)regs->esp;
++ /* Return address is either directly at stack pointer
++ or above a saved eflags. Eflags has bits 22-31 zero,
++ kernel addresses don't. */
++ if (sp[0] >> 22)
++ return sp[0];
++ if (sp[1] >> 22)
++ return sp[1];
++#endif
++ }
++#endif
++ return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++irqreturn_t timer_interrupt(int irq, void *dev_id)
++{
++ s64 delta, delta_cpu, stolen, blocked;
++ u64 sched_time;
++ int i, cpu = smp_processor_id();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ /*
++ * Here we are in the timer irq handler. We just have irqs locally
++ * disabled but we don't know if the timer_bh is running on the other
++ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++ * the irq version of write_lock because as just said we have irq
++ * locally disabled. -arca
++ */
++ write_seqlock(&xtime_lock);
++
++ do {
++ get_time_values_from_xen(cpu);
++
++ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
++ delta = delta_cpu =
++ shadow->system_timestamp + get_nsec_offset(shadow);
++ delta -= processed_system_time;
++ delta_cpu -= per_cpu(processed_system_time, cpu);
++
++ /*
++ * Obtain a consistent snapshot of stolen/blocked cycles. We
++ * can use state_entry_time to detect if we get preempted here.
++ */
++ do {
++ sched_time = runstate->state_entry_time;
++ barrier();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ barrier();
++ } while (sched_time != runstate->state_entry_time);
++ } while (!time_values_up_to_date(cpu));
++
++ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++ && printk_ratelimit()) {
++ printk("Timer ISR/%d: Time went backwards: "
++ "delta=%lld delta_cpu=%lld shadow=%lld "
++ "off=%lld processed=%lld cpu_processed=%lld\n",
++ cpu, delta, delta_cpu, shadow->system_timestamp,
++ (s64)get_nsec_offset(shadow),
++ processed_system_time,
++ per_cpu(processed_system_time, cpu));
++ for (i = 0; i < num_online_cpus(); i++)
++ printk(" %d: %lld\n", i,
++ per_cpu(processed_system_time, i));
++ }
++
++ /* System-wide jiffy work. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ processed_system_time += NS_PER_TICK;
++ do_timer(1);
++ }
++
++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++ update_wallclock();
++ clock_was_set();
++ }
++
++ write_sequnlock(&xtime_lock);
++
++ /*
++ * Account stolen ticks.
++ * HACK: Passing NULL to account_steal_time()
++ * ensures that the ticks are accounted as stolen.
++ */
++ if ((stolen > 0) && (delta_cpu > 0)) {
++ delta_cpu -= stolen;
++ if (unlikely(delta_cpu < 0))
++ stolen += delta_cpu; /* clamp local-time progress */
++ do_div(stolen, NS_PER_TICK);
++ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++ account_steal_time(NULL, (cputime_t)stolen);
++ }
++
++ /*
++ * Account blocked ticks.
++ * HACK: Passing idle_task to account_steal_time()
++ * ensures that the ticks are accounted as idle/wait.
++ */
++ if ((blocked > 0) && (delta_cpu > 0)) {
++ delta_cpu -= blocked;
++ if (unlikely(delta_cpu < 0))
++ blocked += delta_cpu; /* clamp local-time progress */
++ do_div(blocked, NS_PER_TICK);
++ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
++ account_steal_time(idle_task(cpu), (cputime_t)blocked);
++ }
++
++ /* Account user/system ticks. */
++ if (delta_cpu > 0) {
++ do_div(delta_cpu, NS_PER_TICK);
++ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++ if (user_mode_vm(get_irq_regs()))
++ account_user_time(current, (cputime_t)delta_cpu);
++ else
++ account_system_time(current, HARDIRQ_OFFSET,
++ (cputime_t)delta_cpu);
++ }
++
++ /* Offlined for more than a few seconds? Avoid lockup warnings. */
++ if (stolen > 5*HZ)
++ touch_softlockup_watchdog();
++
++ /* Local timer processing (see update_process_times()). */
++ run_local_timers();
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode_vm(get_irq_regs()));
++ scheduler_tick();
++ run_posix_cpu_timers(current);
++// JQ: Why this works on 2.6.16 & 2.6.18 and generates a page
++// fault on 2.6.19 is a mistery to me.
++// profile_tick(CPU_PROFILING);
++
++ return IRQ_HANDLED;
++}
++
++static void init_missing_ticks_accounting(int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++
++ per_cpu(processed_blocked_time, cpu) =
++ runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) =
++ runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline];
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++ unsigned long retval;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ retval = get_wallclock();
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
++
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++ struct timeval now, next;
++ int fail = 1;
++
++ /*
++ * If we have an externally synchronized Linux clock, then update
++ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++ * called as close as possible to 500 ms before the new second starts.
++ * This code is run on a timer. If the clock is set, that timer
++ * may not expire at the correct time. Thus, we adjust...
++ */
++ if (!ntp_synced())
++ /*
++ * Not synced, exit, do not restart a timer (if one is
++ * running, let it run out).
++ */
++ return;
++
++ do_gettimeofday(&now);
++ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++ fail = set_rtc_mmss(now.tv_sec);
++
++ next.tv_usec = USEC_AFTER - now.tv_usec;
++ if (next.tv_usec <= 0)
++ next.tv_usec += USEC_PER_SEC;
++
++ if (!fail)
++ next.tv_sec = 659;
++ else
++ next.tv_sec = 0;
++
++ if (next.tv_usec >= USEC_PER_SEC) {
++ next.tv_sec++;
++ next.tv_usec -= USEC_PER_SEC;
++ }
++ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++ mod_timer(&sync_cmos_timer, jiffies + 1);
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static long clock_cmos_diff;
++static unsigned long sleep_start;
++
++static int timer_suspend(struct sys_device *dev, pm_message_t state)
++{
++ /*
++ * Estimate time zone so that set_time can update the clock
++ */
++ unsigned long ctime = get_cmos_time();
++
++ clock_cmos_diff = -ctime;
++ clock_cmos_diff += get_seconds();
++ sleep_start = ctime;
++ return 0;
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++ unsigned long flags;
++ unsigned long sec;
++ unsigned long ctime = get_cmos_time();
++ long sleep_length = (ctime - sleep_start) * HZ;
++
++ if (sleep_length < 0) {
++ printk(KERN_WARNING "CMOS clock skew detected in timer resume!\n");
++ /* The time after the resume must not be earlier than the time
++ * before the suspend or some nasty things will happen
++ */
++ sleep_length = 0;
++ ctime = sleep_start;
++ }
++
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_enabled())
++ hpet_reenable();
++#endif
++ sec = ctime + clock_cmos_diff;
++ write_seqlock_irqsave(&xtime_lock, flags);
++ xtime.tv_sec = sec;
++ xtime.tv_nsec = 0;
++ jiffies_64 += sleep_length;
++ write_sequnlock_irqrestore(&xtime_lock, flags);
++ touch_softlockup_watchdog();
++ return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++ .resume = timer_resume,
++ .suspend = timer_suspend,
++ set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++ .id = 0,
++ .cls = &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++ int error = sysdev_class_register(&timer_sysclass);
++ if (!error)
++ error = sysdev_register(&device_timer);
++ return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++ xtime.tv_sec = get_cmos_time();
++ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++ set_normalized_timespec(&wall_to_monotonic,
++ -xtime.tv_sec, -xtime.tv_nsec);
++
++ if ((hpet_enable() >= 0) && hpet_use_timer) {
++ printk("Using HPET for base-timer\n");
++ }
++
++ do_time_init();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++ per_cpu(timer_irq, 0) =
++ bind_virq_to_irqhandler(
++ VIRQ_TIMER,
++ 0,
++ timer_interrupt,
++ SA_INTERRUPT,
++ "timer0",
++ NULL);
++ BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++ .period_ns = NS_PER_TICK
++};
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_capable()) {
++ /*
++ * HPET initialization needs to do memory-mapped io. So, let
++ * us do a late initialization after mem_init().
++ */
++ late_time_init = hpet_time_init;
++ return;
++ }
++#endif
++
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++ &xen_set_periodic_tick);
++
++ get_time_values_from_xen(0);
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++ per_cpu(processed_system_time, 0) = processed_system_time;
++ init_missing_ticks_accounting(0);
++
++ update_wallclock();
++
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++ /* Cannot request_irq() until kmem is initialised. */
++ late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
++{
++ unsigned long seq;
++ long delta;
++ u64 st;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ delta = j - jiffies;
++ if (delta < 1) {
++ /* Triggers in some wrap-around cases, but that's okay:
++ * we just end up with a shorter timeout. */
++ st = processed_system_time + NS_PER_TICK;
++ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++ /* Very long timeout means there is no pending timer.
++ * We indicate this to Xen by passing zero timeout. */
++ st = 0;
++ } else {
++ st = processed_system_time + delta * (u64)NS_PER_TICK;
++ }
++ } while (read_seqretry(&xtime_lock, seq));
++
++ return st;
++}
++EXPORT_SYMBOL(jiffies_to_st);
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++static void stop_hz_timer(void)
++{
++ struct vcpu_set_singleshot_timer singleshot;
++ unsigned int cpu = smp_processor_id();
++ unsigned long j;
++ int rc;
++
++ cpu_set(cpu, nohz_cpu_mask);
++
++ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
++ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
++ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
++ /* stop the hz timer then the cpumasks created for subsequent values */
++ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
++ /* nohz_cpu_mask and so will not depend on this cpu. */
++
++ smp_mb();
++
++ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++ cpu_clear(cpu, nohz_cpu_mask);
++ j = jiffies + 1;
++ }
++
++ singleshot.timeout_abs_ns = jiffies_to_st(j);
++ singleshot.flags = 0;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++ if (rc) {
++ BUG_ON(rc != -ENOSYS);
++ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++ }
++#endif
++ BUG_ON(rc);
++}
++
++static void start_hz_timer(void)
++{
++ cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++void raw_safe_halt(void)
++{
++ stop_hz_timer();
++ /* Blocking includes an implicit local_irq_enable(). */
++ HYPERVISOR_block();
++ start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
++
++void halt(void)
++{
++ if (irqs_disabled())
++ HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL);
++}
++EXPORT_SYMBOL(halt);
++
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
++{
++ unsigned int cpu;
++
++ init_cpu_khz();
++
++ for_each_online_cpu(cpu) {
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick);
++ get_time_values_from_xen(cpu);
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ }
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++
++ update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++int local_setup_timer(unsigned int cpu)
++{
++ int seq, irq;
++
++ BUG_ON(cpu == 0);
++
++ HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick);
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ } while (read_seqretry(&xtime_lock, seq));
++
++ sprintf(timer_name[cpu], "timer%d", cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++ cpu,
++ timer_interrupt,
++ SA_INTERRUPT,
++ timer_name[cpu],
++ NULL);
++ if (irq < 0)
++ return irq;
++ per_cpu(timer_irq, cpu) = irq;
++
++ return 0;
++}
++
++void local_teardown_timer(unsigned int cpu)
++{
++ BUG_ON(cpu == 0);
++ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++ {
++ .ctl_name = 1,
++ .procname = "independent_wallclock",
++ .data = &independent_wallclock,
++ .maxlen = sizeof(independent_wallclock),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
++ .ctl_name = 2,
++ .procname = "permitted_clock_jitter",
++ .data = &permitted_clock_jitter,
++ .maxlen = sizeof(permitted_clock_jitter),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax
++ },
++ { 0 }
++};
++static ctl_table xen_table[] = {
++ {
++ .ctl_name = 123,
++ .procname = "xen",
++ .mode = 0555,
++ .child = xen_subtable},
++ { 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++ (void)register_sysctl_table(xen_table, 0);
++ return 0;
++}
++__initcall(xen_sysctl_init);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/traps-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/traps-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,1132 @@
++/*
++ * linux/arch/i386/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++#include <linux/uaccess.h>
++#include <linux/nmi.h>
++#include <linux/bug.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++#include <asm/stacktrace.h>
++
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++int panic_on_unrecovered_nmi;
++
++asmlinkage int system_call(void);
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
++
++int kstack_depth_to_print = 24;
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++ return p > (void *)tinfo &&
++ p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++ unsigned long *stack, unsigned long ebp,
++ struct stacktrace_ops *ops, void *data)
++{
++ unsigned long addr;
++
++#ifdef CONFIG_FRAME_POINTER
++ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ unsigned long new_ebp;
++ addr = *(unsigned long *)(ebp + 4);
++ ops->address(data, addr);
++ /*
++ * break out of recursive entries (such as
++ * end_of_stack_stop_unwind_function). Also,
++ * we can never allow a frame pointer to
++ * move downwards!
++ */
++ new_ebp = *(unsigned long *)ebp;
++ if (new_ebp <= ebp)
++ break;
++ ebp = new_ebp;
++ }
++#else
++ while (valid_stack_ptr(tinfo, stack)) {
++ addr = *stack++;
++ if (__kernel_text_address(addr))
++ ops->address(data, addr);
++ }
++#endif
++ return ebp;
++}
++
++#define MSG(msg) ops->warning(data, msg)
++
++void dump_trace(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack,
++ struct stacktrace_ops *ops, void *data)
++{
++ unsigned long ebp = 0;
++
++ if (!task)
++ task = current;
++
++ if (!stack) {
++ unsigned long dummy;
++ stack = &dummy;
++ if (task && task != current)
++ stack = (unsigned long *)task->thread.esp;
++ }
++
++#ifdef CONFIG_FRAME_POINTER
++ if (!ebp) {
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
++ }
++#endif
++
++ while (1) {
++ struct thread_info *context;
++ context = (struct thread_info *)
++ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
++ ebp = print_context_stack(context, stack, ebp, ops, data);
++ /* Should be after the line below, but somewhere
++ in early boot context comes out corrupted and we
++ can't reference it -AK */
++ if (ops->stack(data, "IRQ") < 0)
++ break;
++ stack = (unsigned long*)context->previous_esp;
++ if (!stack)
++ break;
++ touch_nmi_watchdog();
++ }
++}
++EXPORT_SYMBOL(dump_trace);
++
++static void
++print_trace_warning_symbol(void *data, char *msg, unsigned long symbol)
++{
++ printk(data);
++ print_symbol(msg, symbol);
++ printk("\n");
++}
++
++static void print_trace_warning(void *data, char *msg)
++{
++ printk("%s%s\n", (char *)data, msg);
++}
++
++static int print_trace_stack(void *data, char *name)
++{
++ return 0;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static void print_trace_address(void *data, unsigned long addr)
++{
++ printk("%s [<%08lx>] ", (char *)data, addr);
++ print_symbol("%s\n", addr);
++}
++
++static struct stacktrace_ops print_trace_ops = {
++ .warning = print_trace_warning,
++ .warning_symbol = print_trace_warning_symbol,
++ .stack = print_trace_stack,
++ .address = print_trace_address,
++};
++
++static void
++show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long * stack, char *log_lvl)
++{
++ dump_trace(task, regs, stack, &print_trace_ops, log_lvl);
++ printk("%s =======================\n", log_lvl);
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs,
++ unsigned long * stack)
++{
++ show_trace_log_lvl(task, regs, stack, "");
++}
++
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *esp, char *log_lvl)
++{
++ unsigned long *stack;
++ int i;
++
++ if (esp == NULL) {
++ if (task)
++ esp = (unsigned long*)task->thread.esp;
++ else
++ esp = (unsigned long *)&esp;
++ }
++
++ stack = esp;
++ for(i = 0; i < kstack_depth_to_print; i++) {
++ if (kstack_end(stack))
++ break;
++ if (i && ((i % 8) == 0))
++ printk("\n%s ", log_lvl);
++ printk("%08lx ", *stack++);
++ }
++ printk("\n%sCall Trace:\n", log_lvl);
++ show_trace_log_lvl(task, regs, esp, log_lvl);
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++ printk(" ");
++ show_stack_log_lvl(task, NULL, esp, "");
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_trace(current, NULL, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = 1;
++ unsigned long esp;
++ unsigned short ss;
++
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode_vm(regs)) {
++ in_kernel = 0;
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ print_modules();
++ printk(KERN_EMERG "CPU: %d\n"
++ KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
++ KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
++ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++ print_tainted(), regs->eflags, init_utsname()->release,
++ (int)strcspn(init_utsname()->version, " "),
++ init_utsname()->version);
++ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
++ regs->eax, regs->ebx, regs->ecx, regs->edx);
++ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
++ regs->esi, regs->edi, regs->ebp, esp);
++ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++ TASK_COMM_LEN, current->comm, current->pid,
++ current_thread_info(), current, current->thread_info);
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++ u8 *eip;
++ int code_bytes = 64;
++ unsigned char c;
++
++ printk("\n" KERN_EMERG "Stack: ");
++ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++
++ printk(KERN_EMERG "Code: ");
++
++ eip = (u8 *)regs->eip - 43;
++ if (eip < (u8 *)PAGE_OFFSET ||
++ probe_kernel_address(eip, c)) {
++ /* try starting at EIP */
++ eip = (u8 *)regs->eip;
++ code_bytes = 32;
++ }
++ for (i = 0; i < code_bytes; i++, eip++) {
++ if (eip < (u8 *)PAGE_OFFSET ||
++ probe_kernel_address(eip, c)) {
++ printk(" Bad EIP value.");
++ break;
++ }
++ if (eip == (u8 *)regs->eip)
++ printk("<%02x> ", c);
++ else
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++int is_valid_bugaddr(unsigned long eip)
++{
++ unsigned short ud2;
++
++ if (eip < PAGE_OFFSET)
++ return 0;
++ if (probe_kernel_address((unsigned short *)eip, ud2))
++ return 0;
++
++ return ud2 == 0x0b0f;
++}
++
++/*
++ * This is gone through when something in the kernel has done something bad and
++ * is about to be terminated.
++ */
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ static struct {
++ spinlock_t lock;
++ u32 lock_owner;
++ int lock_owner_depth;
++ } die = {
++ .lock = __SPIN_LOCK_UNLOCKED(die.lock),
++ .lock_owner = -1,
++ .lock_owner_depth = 0
++ };
++ static int die_counter;
++ unsigned long flags;
++
++ oops_enter();
++
++ if (die.lock_owner != raw_smp_processor_id()) {
++ console_verbose();
++ spin_lock_irqsave(&die.lock, flags);
++ die.lock_owner = smp_processor_id();
++ die.lock_owner_depth = 0;
++ bust_spinlocks(1);
++ }
++ else
++ local_save_flags(flags);
++
++ if (++die.lock_owner_depth < 3) {
++ int nl = 0;
++ unsigned long esp;
++ unsigned short ss;
++
++ report_bug(regs->eip);
++
++ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk(KERN_EMERG "PREEMPT ");
++ nl = 1;
++#endif
++#ifdef CONFIG_SMP
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("SMP ");
++ nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("DEBUG_PAGEALLOC");
++ nl = 1;
++#endif
++ if (nl)
++ printk("\n");
++ if (notify_die(DIE_OOPS, str, regs, err,
++ current->thread.trap_no, SIGSEGV) !=
++ NOTIFY_STOP) {
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ esp = (unsigned long) (&regs->esp);
++ savesegment(ss, ss);
++ if (user_mode(regs)) {
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++ print_symbol("%s", regs->eip);
++ printk(" SS:ESP %04x:%08lx\n", ss, esp);
++ }
++ else
++ regs = NULL;
++ } else
++ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++ bust_spinlocks(0);
++ die.lock_owner = -1;
++ spin_unlock_irqrestore(&die.lock, flags);
++
++ if (!regs)
++ return;
++
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++
++ if (in_interrupt())
++ panic("Fatal exception in interrupt");
++
++ if (panic_on_oops)
++ panic("Fatal exception");
++
++ oops_exit();
++ do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++ if (!user_mode_vm(regs))
++ die(str, regs, err);
++}
++
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (regs->eflags & VM_MASK) {
++ if (vm86)
++ goto vm86_trap;
++ goto trap_signal;
++ }
++
++ if (!user_mode(regs))
++ goto kernel_trap;
++
++ trap_signal: {
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++ kernel_trap: {
++ if (!fixup_exception(regs))
++ die(str, regs, error_code);
++ return;
++ }
++
++ vm86_trap: {
++ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++ if (ret) goto trap_signal;
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++
++ if (regs->eflags & VM_MASK)
++ goto gp_in_vm86;
++
++ if (!user_mode(regs))
++ goto gp_in_kernel;
++
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++ force_sig(SIGSEGV, current);
++ return;
++
++gp_in_vm86:
++ local_irq_enable();
++ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++ return;
++
++gp_in_kernel:
++ if (!fixup_exception(regs)) {
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++ "CPU %d.\n", reason, smp_processor_id());
++ printk(KERN_EMERG "You have some hardware problem, likely on the PCI bus.\n");
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
++
++ /* Clear and disable the memory parity error line. */
++ clear_mem_error(reason);
++}
++
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++ /* Re-enable the IOCK line, wait for a few seconds */
++ clear_io_check_error(reason);
++}
++
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++ /* Might actually be able to figure out what the guilty party
++ * is. */
++ if( MCA_bus ) {
++ mca_handle_nmi();
++ return;
++ }
++#endif
++ printk(KERN_EMERG "Uhhuh. NMI received for unknown reason %02x on "
++ "CPU %d.\n", reason, smp_processor_id());
++ printk(KERN_EMERG "Do you have a strange power saving mode enabled?\n");
++ if (panic_on_unrecovered_nmi)
++ panic("NMI: Not continuing");
++
++ printk(KERN_EMERG "Dazed and confused, but trying to continue\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void __kprobes die_nmi(struct pt_regs *regs, const char *msg)
++{
++ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++ NOTIFY_STOP)
++ return;
++
++ spin_lock(&nmi_print_lock);
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ bust_spinlocks(1);
++ printk(KERN_EMERG "%s", msg);
++ printk(" on CPU%d, eip %08lx, registers:\n",
++ smp_processor_id(), regs->eip);
++ show_registers(regs);
++ console_silent();
++ spin_unlock(&nmi_print_lock);
++ bust_spinlocks(0);
++
++ /* If we are in kernel we are probably nested up pretty bad
++ * and might aswell get out now while we still can.
++ */
++ if (!user_mode_vm(regs)) {
++ current->thread.trap_no = 2;
++ crash_kexec(regs);
++ }
++
++ do_exit(SIGSEGV);
++}
++
++static __kprobes void default_do_nmi(struct pt_regs * regs)
++{
++ unsigned char reason = 0;
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!smp_processor_id())
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog_tick(regs, reason))
++ return;
++ if (!do_nmi_callback(regs, smp_processor_id()))
++#endif
++ unknown_nmi_error(reason, regs);
++
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++ /*
++ * Reassert NMI in case it became active meanwhile
++ * as it's edge-triggered.
++ */
++ reassert_nmi();
++}
++
++fastcall __kprobes void do_nmi(struct pt_regs * regs, long error_code)
++{
++ int cpu;
++
++ nmi_enter();
++
++ cpu = smp_processor_id();
++
++ ++nmi_count(cpu);
++
++ default_do_nmi(regs);
++
++ nmi_exit();
++}
++
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++ == NOTIFY_STOP)
++ return;
++ /* This is an interrupt gate, because kprobes wants interrupts
++ disabled. Normal trap handlers don't. */
++ restore_interrupts(regs);
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ *
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++{
++ unsigned int condition;
++ struct task_struct *tsk = current;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++ /* It's safe to allow irq's after DR6 has been saved */
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg[7])
++ goto clear_dr7;
++ }
++
++ if (regs->eflags & VM_MASK)
++ goto debug_vm86;
++
++ /* Save debug status register where ptrace can see it */
++ tsk->thread.debugreg[6] = condition;
++
++ /*
++ * Single-stepping through TF: make sure we ignore any events in
++ * kernel space (but re-enable TF when returning to user mode).
++ */
++ if (condition & DR_STEP) {
++ /*
++ * We already checked v86 mode above, so we can
++ * check for kernel mode by just checking the CPL
++ * of CS.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ }
++
++ /* Ok, finally something we can handle */
++ send_sigtrap(tsk, regs, error_code);
++
++ /* Disable additional traps. They'll be re-enabled when
++ * the signal is delivered.
++ */
++clear_dr7:
++ set_debugreg(0, 7);
++ return;
++
++debug_vm86:
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't syncronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000: /* No unmasked exception */
++ return;
++ default: /* Multiple exceptions */
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++ ignore_fpu_irq = 1;
++ math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++ long error_code)
++{
++ if (cpu_has_xmm) {
++ /* Handle SIMD FPU exceptions on PIII+ processors. */
++ ignore_fpu_irq = 1;
++ simd_math_error((void __user *)regs->eip);
++ } else {
++ /*
++ * Handle strange cache flush from user space exception
++ * in all other cases. This is undocumented behaviour.
++ */
++ if (regs->eflags & VM_MASK) {
++ handle_vm86_fault((struct kernel_vm86_regs *)regs,
++ error_code);
++ return;
++ }
++ current->thread.trap_no = 19;
++ current->thread.error_code = error_code;
++ die_if_kernel("cache flush denied", regs, error_code);
++ force_sig(SIGSEGV, current);
++ }
++}
++
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++ long error_code)
++{
++#if 0
++ /* No need to warn about this any longer. */
++ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall unsigned long patch_espfix_desc(unsigned long uesp,
++ unsigned long kesp)
++{
++ int cpu = smp_processor_id();
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++ struct desc_struct *gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ unsigned long base = (kesp - uesp) & -THREAD_SIZE;
++ unsigned long new_kesp = kesp - base;
++ unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
++ __u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];
++ /* Set up base for espfix segment */
++ desc &= 0x00f0ff0000000000ULL;
++ desc |= ((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)base) << 32) & 0xff00000000000000ULL) |
++ ((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
++ (lim_pages & 0xffff);
++ *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;
++ return new_kesp;
++}
++#endif
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(void)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = thread->task;
++
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
++ if (!tsk_used_math(tsk))
++ init_fpu(tsk);
++ restore_fpu(tsk);
++ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++ tsk->fpu_counter++;
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++ printk(KERN_EMERG "killing %s.\n",current->comm);
++ force_sig(SIGFPE,current);
++ schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++ /*
++ * Update the IDT descriptor and reload the IDT so that
++ * it uses the read-only mapped virtual address.
++ */
++ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ load_idt(&idt_descr);
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t trap_table[] = {
++ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
++ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ HYPERVISOR_set_trap_table(trap_table);
++
++ if (cpu_has_fxsr) {
++ /*
++ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++ * Generates a compile-time "error: zero width for bit-field" if
++ * the alignment is wrong.
++ */
++ struct fxsrAlignAssert {
++ int _:!(offsetof(struct task_struct,
++ thread.i387.fxsave) & 15);
++ };
++
++ printk(KERN_INFO "Enabling fast FPU save and restore... ");
++ set_in_cr4(X86_CR4_OSFXSR);
++ printk("done.\n");
++ }
++ if (cpu_has_xmm) {
++ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++ "support... ");
++ set_in_cr4(X86_CR4_OSXMMEXCPT);
++ printk("done.\n");
++ }
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void smp_trap_init(trap_info_t *trap_ctxt)
++{
++ trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/traps.c
+--- a/arch/i386/kernel/traps.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/traps.c Wed Aug 08 16:25:28 2007 -0300
+@@ -402,7 +402,6 @@ void die(const char * str, struct pt_reg
+ unsigned short ss;
+
+ report_bug(regs->eip);
+-
+ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
+ #ifdef CONFIG_PREEMPT
+ printk(KERN_EMERG "PREEMPT ");
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/vm86.c
+--- a/arch/i386/kernel/vm86.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/vm86.c Wed Aug 08 16:25:28 2007 -0300
+@@ -125,7 +125,9 @@ struct pt_regs * FASTCALL(save_v86_state
+ struct pt_regs * FASTCALL(save_v86_state(struct kernel_vm86_regs * regs));
+ struct pt_regs * fastcall save_v86_state(struct kernel_vm86_regs * regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ struct pt_regs *ret;
+ unsigned long tmp;
+
+@@ -148,12 +150,16 @@ struct pt_regs * fastcall save_v86_state
+ do_exit(SIGSEGV);
+ }
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ current->thread.esp0 = current->thread.saved_esp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_esp0(tss, &current->thread);
+ current->thread.saved_esp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ ret = KVM86->regs32;
+
+@@ -279,7 +285,9 @@ out:
+
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ /*
+ * make sure the vm86() system call doesn't try to do anything silly
+ */
+@@ -324,12 +332,16 @@ static void do_sys_vm86(struct kernel_vm
+ savesegment(fs, tsk->thread.saved_fs);
+ tsk->thread.saved_gs = info->regs32->xgs;
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ tsk->thread.esp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+ load_esp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ tsk->thread.screen_bitmap = info->screen_bitmap;
+ if (info->flags & VM86_SCREEN_BITMAP)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/vmlinux.lds.S
+--- a/arch/i386/kernel/vmlinux.lds.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/kernel/vmlinux.lds.S Wed Aug 08 16:25:28 2007 -0300
+@@ -35,7 +35,13 @@ PHDRS {
+ }
+ SECTIONS
+ {
++/* xen i386 redefineds LOAD_OFFSET to zero on page.h
++ quintela@redhat.com */
++#ifdef CONFIG_XEN
++ . = __PAGE_OFFSET + LOAD_PHYSICAL_ADDR;
++#else
+ . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
++#endif
+ phys_startup_32 = startup_32 - LOAD_OFFSET;
+ /* read-only */
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/kernel/vsyscall-note-xen.S
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/kernel/vsyscall-note-xen.S Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,32 @@
++/*
++ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
++ * Here we can supply some information useful to userland.
++ * First we get the vanilla i386 note that supplies the kernel version info.
++ */
++
++#include "vsyscall-note.S"
++
++/*
++ * Now we add a special note telling glibc's dynamic linker a fake hardware
++ * flavor that it will use to choose the search path for libraries in the
++ * same way it uses real hardware capabilities like "mmx".
++ * We supply "nosegneg" as the fake capability, to indicate that we
++ * do not like negative offsets in instructions using segment overrides,
++ * since we implement those inefficiently. This makes it possible to
++ * install libraries optimized to avoid those access patterns in someplace
++ * like /lib/i686/tls/nosegneg. Note that an /etc/ld.so.conf.d/file
++ * corresponding to the bits here is needed to make ldconfig work right.
++ * It should contain:
++ * hwcap 0 nosegneg
++ * to match the mapping of bit to name that we give here.
++ */
++#define NOTE_KERNELCAP_BEGIN(ncaps, mask) \
++ ASM_ELF_NOTE_BEGIN(".note.kernelcap", "a", "GNU", 2) \
++ .long ncaps, mask
++#define NOTE_KERNELCAP(bit, name) \
++ .byte bit; .asciz name
++#define NOTE_KERNELCAP_END ASM_ELF_NOTE_END
++
++NOTE_KERNELCAP_BEGIN(1, 1)
++NOTE_KERNELCAP(0, "nosegneg")
++NOTE_KERNELCAP_END
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mach-xen/Makefile
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mach-xen/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := setup.o
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mach-xen/setup.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mach-xen/setup.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,168 @@
++/*
++ * Machine specific setup for generic
++ */
++
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI (1)
++#else
++#define DEFAULT_SEND_IPI (0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
++{
++ get_option(&str, &no_broadcast);
++ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++ "IPI Broadcast");
++ return 1;
++}
++
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
++{
++ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++ "Shortcut");
++ return 0;
++}
++
++late_initcall(print_ipi_mode);
++
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++char * __init machine_specific_memory_setup(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8ULL << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ return "Xen";
++}
++
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ struct xen_platform_parameters pp;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++ };
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = { __KERNEL_CS, (unsigned long)nmi },
++ };
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address.cs, event.address.eip,
++ failsafe.address.cs, failsafe.address.eip);
++#endif
++ BUG_ON(ret);
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0) {
++ hypervisor_virt_start = pp.virt_start;
++ set_fixaddr_top();
++ }
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++}
++
++/**
++ * pre_setup_arch_hook - hook called prior to any setup_arch() execution
++ *
++ * Description:
++ * generally used to activate any machine specific identification
++ * routines that may be needed before setup_arch() runs. On VISWS
++ * this is used to get the board revision and type.
++ **/
++void __init pre_setup_arch_hook(void)
++{
++ int max_cmdline;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ /* Save unparsed command line copy for /proc/cmdline */
++ saved_command_line[max_cmdline-1] = '\0';
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/Makefile
+--- a/arch/i386/mm/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/mm/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -8,3 +8,11 @@ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpag
+ obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+ obj-$(CONFIG_HIGHMEM) += highmem.o
+ obj-$(CONFIG_BOOT_IOREMAP) += boot_ioremap.o
++
++ifdef CONFIG_XEN
++include $(srctree)/scripts/Makefile.xen
++
++obj-y += hypervisor.o
++
++obj-y := $(call cherrypickxen, $(obj-y))
++endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/fault-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/fault-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,760 @@
++/*
++ * linux/arch/i386/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++#include <linux/uaccess.h>
++
++#include <asm/system.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++#include <asm/segment.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++static ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&notify_page_fault_chain, nb);
++}
++EXPORT_SYMBOL_GPL(register_page_fault_notifier);
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&notify_page_fault_chain, nb);
++}
++EXPORT_SYMBOL_GPL(unregister_page_fault_notifier);
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(&notify_page_fault_chain, val, &args);
++}
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out
++ */
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++
++ if (yes) {
++ oops_in_progress = 1;
++ return;
++ }
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base. The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ *
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++ unsigned long *eip_limit)
++{
++ unsigned long eip = regs->eip;
++ unsigned seg = regs->xcs & 0xffff;
++ u32 seg_ar, seg_limit, base, *desc;
++
++ /* Unlikely, but must come before segment checks. */
++ if (unlikely(regs->eflags & VM_MASK)) {
++ base = seg << 4;
++ *eip_limit = base + 0xffff;
++ return base + (eip & 0xffff);
++ }
++
++ /* The standard kernel/user address space limit. */
++ *eip_limit = user_mode(regs) ? USER_DS.seg : KERNEL_DS.seg;
++
++ /* By far the most common cases. */
++ if (likely(SEGMENT_IS_FLAT_CODE(seg)))
++ return eip;
++
++ /* Check the segment exists, is within the current LDT/GDT size,
++ that kernel/user (ring 0..3) has the appropriate privilege,
++ that it's a code segment, and get the limit. */
++ __asm__ ("larl %3,%0; lsll %3,%1"
++ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++ if ((~seg_ar & 0x9800) || eip > seg_limit) {
++ *eip_limit = 0;
++ return 1; /* So that returned eip > *eip_limit. */
++ }
++
++ /* Get the GDT/LDT descriptor base.
++ When you look for races in this code remember that
++ LDT and other horrors are only used in user space. */
++ if (seg & (1<<2)) {
++ /* Must lock the LDT while reading it. */
++ down(&current->mm->context.sem);
++ desc = current->mm->context.ldt;
++ desc = (void *)desc + (seg & ~7);
++ } else {
++ /* Must disable preemption while reading the GDT. */
++ desc = (u32 *)get_cpu_gdt_table(get_cpu());
++ desc = (void *)desc + (seg & ~7);
++ }
++
++ /* Decode the code segment base from the descriptor */
++ base = get_desc_base((unsigned long *)desc);
++
++ if (seg & (1<<2)) {
++ up(&current->mm->context.sem);
++ } else
++ put_cpu();
++
++ /* Adjust EIP and segment limit, and clamp at the kernel limit.
++ It's legitimate for segments to wrap at 0xffffffff. */
++ seg_limit += base;
++ if (seg_limit < *eip_limit && seg_limit >= base)
++ *eip_limit = seg_limit;
++ return eip + base;
++}
++
++/*
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{
++ unsigned long limit;
++ unsigned char *instr = (unsigned char *)get_segment_eip (regs, &limit);
++ int scan_more = 1;
++ int prefetch = 0;
++ int i;
++
++ for (i = 0; scan_more && i < 15; i++) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (instr > (unsigned char *)limit)
++ break;
++ if (probe_kernel_address(instr, opcode))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (instr > (unsigned char *)limit)
++ break;
++ if (probe_kernel_address(instr, opcode))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 6)) {
++ /* Catch an obscure case of prefetch inside an NX page. */
++ if (nx_enabled && (error_code & 16))
++ return 0;
++ return __is_prefetch(regs, addr);
++ }
++ return 0;
++}
++
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long *p, page;
++ unsigned long mfn;
++
++ page = read_cr3();
++ p = (unsigned long *)__va(page);
++ p += (address >> 30) * 2;
++ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++ if (p[0] & 1) {
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *)__va(page);
++ address &= 0x3fffffff;
++ p += (address >> 21) * 2;
++ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++ if (mfn_to_pfn(mfn) >= highstart_pfn)
++ return;
++#endif
++ if (p[0] & 1) {
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *) __va(page);
++ address &= 0x001fffff;
++ p += (address >> 12) * 2;
++ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ }
++ }
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long page;
++
++ page = read_cr3();
++ page = ((unsigned long *) __va(page))[address >> 22];
++ if (oops_may_print())
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And lets rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++#ifdef CONFIG_HIGHPTE
++ if ((page >> PAGE_SHIFT) >= highstart_pfn)
++ return;
++#endif
++ if ((page & 1) && oops_may_print()) {
++ page &= PAGE_MASK;
++ address &= 0x003ff000;
++ page = machine_to_phys(page);
++ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ }
++}
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & 0x0c)
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & 0x02) && !pte_write(*pte))
++ return 0;
++#ifdef CONFIG_X86_PAE
++ if ((error_code & 0x10) && (pte_val(*pte) & _PAGE_NX))
++ return 0;
++#endif
++
++ return 1;
++}
++
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd))
++#ifndef CONFIG_XEN
++ set_pmd(pmd, *pmd_k);
++#else
++ /*
++ * When running on Xen we must launder *pmd_k through
++ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++ */
++ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++ else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
++{
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ int write, si_code;
++
++ /* get the address */
++ address = read_cr2();
++
++ /* Set the "privileged fault" bit to something sane. */
++ error_code &= ~4;
++ error_code |= (regs->xcs & 2) << 1;
++ if (regs->eflags & X86_EFLAGS_VM)
++ error_code |= 4;
++
++ tsk = current;
++
++ si_code = SEGV_MAPERR;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area can never be patched up. */
++ if (address >= hypervisor_virt_start)
++ goto bad_area_nosemaphore;
++#endif
++ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++ return;
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ mm = tsk->mm;
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault..
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & 4) == 0 &&
++ !search_exception_tables(regs->eip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /*
++ * Accessing the stack below %esp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %esp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & 3) {
++ default: /* 3: write, present */
++ /* fall through */
++ case 2: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case 1: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
++ goto bad_area;
++ }
++
++ survive:
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ case VM_FAULT_OOM:
++ goto out_of_memory;
++ default:
++ BUG();
++ }
++
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (regs->eflags & VM_MASK) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & 4) {
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++#ifdef CONFIG_X86_F00F_BUG
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ unsigned long nr;
++
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return;
++ }
++ }
++#endif
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ bust_spinlocks(1);
++
++ if (oops_may_print()) {
++ #ifdef CONFIG_X86_PAE
++ if (error_code & 16) {
++ pte_t *pte = lookup_address(address);
++
++ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++ #endif
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++ "pointer dereference");
++ else
++ printk(KERN_ALERT "BUG: unable to handle kernel paging"
++ " request");
++ printk(" at virtual address %08lx\n",address);
++ printk(KERN_ALERT " printing eip:\n");
++ printk("%08lx\n", regs->eip);
++ }
++ dump_fault_path(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (is_init(tsk)) {
++ yield();
++ down_read(&mm->mmap_sem);
++ goto survive;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & 4))
++ goto no_context;
++
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
++{
++ /*
++ * Note that races in the updates of insync and start aren't
++ * problematic: insync can only get set bits added, and updates to
++ * start are only improving performance (without affecting correctness
++ * if undone).
++ */
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++ static unsigned long start = TASK_SIZE;
++ unsigned long address;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start; address >= TASK_SIZE; address += PGDIR_SIZE) {
++ if (!test_bit(pgd_index(address), insync)) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page =
++ (struct page *)page->index)
++ if (!vmalloc_sync_one(page_address(page),
++ address)) {
++ BUG_ON(page != pgd_list);
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ if (!page)
++ set_bit(pgd_index(address), insync);
++ }
++ if (address == start && test_bit(pgd_index(address), insync))
++ start = address + PGDIR_SIZE;
++ }
++}
++#endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/highmem-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/highmem-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,120 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return page_address(page);
++ return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++ if (in_interrupt())
++ BUG();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++ pagefault_disable();
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ if (!pte_none(*(kmap_pte-idx)))
++ BUG();
++ set_pte_at(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++ return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type,
++ test_bit(PG_pinned, &page->flags)
++ ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++ /*
++ * Force other mappings to Oops if they'll try to access this pte
++ * without first remap it. Keeping stale mappings around is a bad idea
++ * also, in case the page changes cacheability attributes or becomes
++ * a protected page in a hypervisor.
++ */
++ if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) {
++ kpte_clear_flush(kmap_pte-idx, vaddr);
++ __flush_tlb_one(vaddr);
++ } else {
++#ifdef CONFIG_DEBUG_HIGHMEM
++ BUG_ON(vaddr < PAGE_OFFSET);
++ BUG_ON(vaddr >= (unsigned long)high_memory);
++#endif
++ }
++
++ pagefault_enable();
++}
++
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ pagefault_disable();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++
++ return (void*) vaddr;
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++ unsigned long idx, vaddr = (unsigned long)ptr;
++ pte_t *pte;
++
++ if (vaddr < FIXADDR_START)
++ return virt_to_page(ptr);
++
++ idx = virt_to_fix(vaddr);
++ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++ return pte_page(*pte);
++}
++
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/hypervisor.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/hypervisor.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,451 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ *
++ * Update page tables via the hypervisor.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++
++#ifdef CONFIG_X86_64
++#define pmd_val_ma(v) (v).pmd
++#else
++#ifdef CONFIG_X86_PAE
++# define pmd_val_ma(v) ((v).pmd)
++# define pud_val_ma(v) ((v).pgd.pgd)
++#else
++# define pmd_val_ma(v) ((v).pud.pgd.pgd)
++#endif
++#endif
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pte_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pmd_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#ifdef CONFIG_X86_PAE
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = pud_val_ma(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = val.pud;
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = val.pgd;
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++
++void xen_pt_switch(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
++
++void xen_invlpg(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_LOCAL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_ALL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_ALL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_INVLPG_MULTI;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++void xen_pgd_pin(unsigned long ptr)
++{
++ struct mmuext_op op;
++#ifdef CONFIG_X86_64
++ op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++ op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++ op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_UNPIN_TABLE;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(unsigned long ptr, unsigned long len)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_SET_LDT;
++ op.arg1.linear_addr = ptr;
++ op.arg2.nr_ents = len;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ unsigned long *in_frames = discontig_frames, out_frame;
++ unsigned long frame, i, flags;
++ long rc;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ /*
++ * Currently an auto-translated guest will not perform I/O, nor will
++ * it require PAE page directories below 4GB. Therefore any calls to
++ * this function are redundant and can be ignored.
++ */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages(vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs, remembering MFNs. */
++ for (i = 0; i < (1UL<<order); i++) {
++ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 2. Get a new contiguous memory extent. */
++ out_frame = __pa(vstart) >> PAGE_SHIFT;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == (1UL << order));
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != (1UL << order))
++ BUG();
++ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) == 1);
++ if (!success) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < (1UL<<order); i++)
++ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in) != (1UL<<order))
++ BUG();
++ }
++ }
++#endif
++
++ /* 3. Map the new extent in place of old pages. */
++ for (i = 0; i < (1UL<<order); i++) {
++ frame = success ? (out_frame + i) : in_frames[i];
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ if (success)
++ contiguous_bitmap_set(__pa(vstart) >> PAGE_SHIFT,
++ 1UL << order);
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long *out_frames = discontig_frames, in_frame;
++ unsigned long frame, i, flags;
++ long rc;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap) ||
++ !test_bit(__pa(vstart) >> PAGE_SHIFT, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages(vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(__pa(vstart) >> PAGE_SHIFT, 1UL << order);
++
++ /* 1. Find start MFN of contiguous extent. */
++ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++ /* 2. Zap current PTEs. */
++ for (i = 0; i < (1UL<<order); i++) {
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 3. Do the exchange for non-contiguous MFNs. */
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != 1)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != (1UL << order))
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 4. Map new pages in place of old pages. */
++ for (i = 0; i < (1UL<<order); i++) {
++ frame = success ? out_frames[i] : (in_frame + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++ return HYPERVISOR_update_descriptor(
++ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/init-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/init-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,871 @@
++/*
++ * linux/arch/i386/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
++
++extern unsigned long *contiguous_bitmap;
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++#ifdef CONFIG_X86_PAE
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++#else
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++#endif
++
++ return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(page_table,
++ XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++ if (page_table != pte_offset_kernel(pmd, 0))
++ BUG();
++
++ return page_table;
++ }
++
++ return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int pgd_idx, pmd_idx;
++ unsigned long vaddr;
++
++ vaddr = start;
++ pgd_idx = pgd_index(vaddr);
++ pmd_idx = pmd_index(vaddr);
++ pgd = pgd_base + pgd_idx;
++
++ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++ if (pgd_none(*pgd))
++ one_md_table_init(pgd);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ one_page_table_init(pmd);
++
++ vaddr += PMD_SIZE;
++ }
++ pmd_idx = 0;
++ }
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++ return 1;
++ return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++ unsigned long pfn;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ int pgd_idx, pmd_idx, pte_ofs;
++
++ unsigned long max_ram_pfn = xen_start_info->nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
++ pgd_idx = pgd_index(PAGE_OFFSET);
++ pgd = pgd_base + pgd_idx;
++ pfn = 0;
++ pmd_idx = pmd_index(PAGE_OFFSET);
++ pte_ofs = pte_index(PAGE_OFFSET);
++
++ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++ /*
++ * Native linux hasn't PAE-paging enabled yet at this
++ * point. When running as xen domain we are in PAE
++ * mode already, thus we can't simply hook a empty
++ * pmd. That would kill the mappings we are currently
++ * using ...
++ */
++ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++ pmd = one_md_table_init(pgd);
++#endif
++ if (pfn >= max_low_pfn)
++ continue;
++ pmd += pmd_idx;
++ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++ if (address >= hypervisor_virt_start)
++ continue;
++
++ /* Map with big pages if possible, otherwise create normal page tables. */
++ if (cpu_has_pse) {
++ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++ if (is_kernel_text(address) || is_kernel_text(address2))
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++ else
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++ pfn += PTRS_PER_PTE;
++ } else {
++ pte = one_page_table_init(pmd);
++
++ pte += pte_ofs;
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ }
++ pte_ofs = 0;
++ }
++ }
++ pmd_idx = 0;
++ }
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++ return 1;
++ return 0;
++}
++
++#else
++
++#define page_kills_ppro(p) 0
++
++#endif
++
++int page_is_ram(unsigned long pagenr)
++{
++ int i;
++ unsigned long addr, end;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ md = p;
++ if (!is_available_memory(md))
++ continue;
++ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++
++ if (e820.map[i].type != E820_RAM) /* not usable memory */
++ continue;
++ /*
++ * !!!FIXME!!! Some BIOSen report areas as RAM that
++ * are not. Notably the 640->1Mb area. We need a sanity
++ * check here.
++ */
++ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr) \
++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++ unsigned long kmap_vstart;
++
++ /* cache the first kmap pte */
++ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++ kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long vaddr;
++
++ vaddr = PKMAP_BASE;
++ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ pte = pte_offset_kernel(pmd, vaddr);
++ pkmap_page_table = pte;
++}
++
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++ init_page_count(page);
++ if (pfn < xen_start_info->nr_pages)
++ __free_page(page);
++ totalhigh_pages++;
++}
++
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++ ClearPageReserved(page);
++ free_new_highpage(page, pfn);
++ } else
++ SetPageReserved(page);
++}
++
++static int __meminit add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++ free_new_highpage(page, pfn);
++ totalram_pages++;
++#ifdef CONFIG_FLATMEM
++ max_mapnr = max(pfn, max_mapnr);
++#endif
++ num_physpages++;
++ return 0;
++}
++
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void __meminit online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ add_one_highpage_hotplug(page, page_to_pfn(page));
++}
++
++
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++ int pfn;
++ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++ totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++ unsigned long vaddr;
++ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++
++ swapper_pg_dir = pgd_base;
++ init_mm.pgd = pgd_base;
++
++ /* Enable PSE if available */
++ if (cpu_has_pse) {
++ set_in_cr4(X86_CR4_PSE);
++ }
++
++ /* Enable PGE if available */
++ if (cpu_has_pge) {
++ set_in_cr4(X86_CR4_PGE);
++ __PAGE_KERNEL |= _PAGE_GLOBAL;
++ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++ }
++
++ kernel_physical_mapping_init(pgd_base);
++ remap_numa_kva();
++
++ /*
++ * Fixed mappings, only the page table structure has to be
++ * created - mappings will be set by set_fixmap():
++ */
++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++
++ permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++ __attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++ int i;
++
++ save_pg_dir();
++
++ /*
++ * Zap initial low-memory mappings.
++ *
++ * Note that "pgd_clear()" doesn't do it for
++ * us, because pgd_clear() is a no-op on i386.
++ */
++ for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++ set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++ flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on Enable
++ * off Disable
++ */
++static int __init noexec_setup(char *str)
++{
++ if (!str || !strcmp(str, "on")) {
++ if (cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ }
++ } else if (!strcmp(str,"off")) {
++ disable_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ } else
++ return -EINVAL;
++
++ return 0;
++}
++early_param("noexec", noexec_setup);
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++ unsigned int v[4], l, h;
++
++ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if ((v[3] & (1 << 20)) && !disable_nx) {
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ nx_enabled = 1;
++ __supported_pte_mask |= _PAGE_NX;
++ }
++ }
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++ pte_t *pte;
++ int ret = 1;
++
++ if (!nx_enabled)
++ goto out;
++
++ pte = lookup_address(vaddr);
++ BUG_ON(!pte);
++
++ if (!pte_exec_kernel(*pte))
++ ret = 0;
++
++ if (enable)
++ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++ else
++ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ pte_update_defer(&init_mm, vaddr, pte);
++ __flush_tlb_all();
++out:
++ return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++ int i;
++
++#ifdef CONFIG_X86_PAE
++ set_nx();
++ if (nx_enabled)
++ printk("NX (Execute Disable) protection: active\n");
++#endif
++
++ pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ /*
++ * We will bail out later - printk doesn't work right now so
++ * the user would just see a hanging kernel.
++ * when running as xen domain we are already in PAE mode at
++ * this point.
++ */
++ if (cpu_has_pae)
++ set_in_cr4(X86_CR4_PAE);
++#endif
++ __flush_tlb_all();
++
++ kmap_init();
++
++ /* Switch to the real shared_info page, and clear the
++ * dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Setup mapping of lower 1st MB */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_machine(empty_zero_page),
++ PAGE_KERNEL_RO);
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++ /* Any page-aligned address will do, the test is non-destructive */
++ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++ boot_cpu_data.wp_works_ok = do_test_wp_bit();
++ clear_fixmap(FIX_WP_TEST);
++
++ if (!boot_cpu_data.wp_works_ok) {
++ printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++ } else {
++ printk("Ok.\n");
++ }
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc;
++
++void __init mem_init(void)
++{
++ extern int ppro_with_ram_bug(void);
++ int codesize, reservedpages, datasize, initsize;
++ int tmp;
++ int bad_ppro;
++ unsigned long pfn;
++
++ contiguous_bitmap = alloc_bootmem_low_pages(
++ (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, (max_low_pfn + 2*BITS_PER_LONG) >> 3);
++
++#if defined(CONFIG_SWIOTLB)
++ swiotlb_init();
++#endif
++
++#ifdef CONFIG_FLATMEM
++ BUG_ON(!mem_map);
++#endif
++
++ bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++ /* check that fixmap and pkmap do not overlap */
++ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++ BUG();
++ }
++#endif
++
++ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++ VMALLOC_START,VMALLOC_END,MAXMEM);
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++
++ /* this will put all low memory onto the freelists */
++ totalram_pages += free_all_bootmem();
++ /* XEN: init and count low-mem pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++
++ reservedpages = 0;
++ for (tmp = 0; tmp < max_low_pfn; tmp++)
++ /*
++ * Only count reserved RAM pages
++ */
++ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++ reservedpages++;
++
++ set_highmem_pages_init(bad_ppro);
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++
++ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ num_physpages << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10,
++ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++ );
++
++#if 1 /* double-sanity-check paranoia */
++ printk("virtual kernel memory layout:\n"
++ " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
++#ifdef CONFIG_HIGHMEM
++ " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n"
++#endif
++ " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n"
++ " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n"
++ " .init : 0x%08lx - 0x%08lx (%4ld kB)\n"
++ " .data : 0x%08lx - 0x%08lx (%4ld kB)\n"
++ " .text : 0x%08lx - 0x%08lx (%4ld kB)\n",
++ FIXADDR_START, FIXADDR_TOP,
++ (FIXADDR_TOP - FIXADDR_START) >> 10,
++
++#ifdef CONFIG_HIGHMEM
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE,
++ (LAST_PKMAP*PAGE_SIZE) >> 10,
++#endif
++
++ VMALLOC_START, VMALLOC_END,
++ (VMALLOC_END - VMALLOC_START) >> 20,
++
++ (unsigned long)__va(0), (unsigned long)high_memory,
++ ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
++
++ (unsigned long)&__init_begin, (unsigned long)&__init_end,
++ ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10,
++
++ (unsigned long)&_etext, (unsigned long)&_edata,
++ ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
++
++ (unsigned long)&_text, (unsigned long)&_etext,
++ ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
++
++#ifdef CONFIG_HIGHMEM
++ BUG_ON(PKMAP_BASE+LAST_PKMAP*PAGE_SIZE > FIXADDR_START);
++ BUG_ON(VMALLOC_END > PKMAP_BASE);
++#endif
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++ BUG_ON((unsigned long)high_memory > VMALLOC_START);
++#endif /* double-sanity-check paranoia */
++
++#ifdef CONFIG_X86_PAE
++ if (!cpu_has_pae)
++ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++ if (boot_cpu_data.wp_works_ok < 0)
++ test_wp_bit();
++
++ /*
++ * Subtle. SMP is doing it's boot stuff late (because it has to
++ * fork idle threads) - but it also needs low mappings for the
++ * protected-mode entry to work. We zap these entries only after
++ * the WP-bit has been tested.
++ */
++#ifndef CONFIG_SMP
++ zap_low_mappings();
++#endif
++
++ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdata = NODE_DATA(nid);
++ struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++
++ return __add_pages(zone, start_pfn, nr_pages);
++}
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++#endif
++
++struct kmem_cache *pgd_cache;
++struct kmem_cache *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++ if (PTRS_PER_PMD > 1) {
++ pmd_cache = kmem_cache_create("pmd",
++ PTRS_PER_PMD*sizeof(pmd_t),
++ PTRS_PER_PMD*sizeof(pmd_t),
++ 0,
++ pmd_ctor,
++ NULL);
++ if (!pmd_cache)
++ panic("pgtable_cache_init(): cannot create pmd cache");
++ }
++ pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++ PTRS_PER_PGD*sizeof(pgd_t),
++ PTRS_PER_PGD*sizeof(pgd_t),
++#else
++ PAGE_SIZE,
++ PAGE_SIZE,
++#endif
++ 0,
++ pgd_ctor,
++ PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++ if (!pgd_cache)
++ panic("pgtable_cache_init(): Cannot create pgd cache");
++}
++
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section. Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
++{
++ char tmp_reg;
++ int flag;
++
++ __asm__ __volatile__(
++ " movb %0,%1 \n"
++ "1: movb %1,%0 \n"
++ " xorl %2,%2 \n"
++ "2: \n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4 \n"
++ " .long 1b,2b \n"
++ ".previous \n"
++ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++ "=q" (tmp_reg),
++ "=r" (flag)
++ :"2" (1)
++ :"memory");
++
++ return flag;
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++
++ printk("Write protecting the kernel read-only data: %uk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++ free_page(addr);
++ totalram_pages++;
++ }
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++}
++
++void free_initmem(void)
++{
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/ioremap-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/ioremap-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,443 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
++
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++
++#define ISA_START_ADDRESS 0x0
++#define ISA_END_ADDRESS 0x100000
++
++static int direct_remap_area_pte_fn(pte_t *pte,
++ struct page *pmd_page,
++ unsigned long address,
++ void *data)
++{
++ mmu_update_t **v = (mmu_update_t **)data;
++
++ BUG_ON(!pte_none(*pte));
++
++ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ (*v)++;
++
++ return 0;
++}
++
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int rc;
++ unsigned long i, start_address;
++ mmu_update_t *u, *v, *w;
++
++ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ if (u == NULL)
++ return -ENOMEM;
++
++ start_address = address;
++
++ flush_cache_all();
++
++ for (i = 0; i < size; i += PAGE_SIZE) {
++ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++ /* Flush a full batch after filling in the PTE ptrs. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++ goto out;
++ v = w = u;
++ start_address = address;
++ }
++
++ /*
++ * Fill in the machine address: PTE ptr is done later by
++ * __direct_remap_area_pages().
++ */
++ v->val = pte_val_ma(pfn_pte_ma(mfn, prot));
++
++ mfn++;
++ address += PAGE_SIZE;
++ v++;
++ }
++
++ if (v != u) {
++ /* Final batch. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++ goto out;
++ }
++
++ rc = 0;
++
++ out:
++ flush_tlb_all();
++
++ free_page((unsigned long)u);
++
++ return rc;
++}
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return remap_pfn_range(vma, address, mfn, size, prot);
++
++ if (domid == DOMID_SELF)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++
++ vma->vm_mm->context.has_foreign_mappings = 1;
++
++ return __direct_remap_pfn_range(
++ vma->vm_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_remap_pfn_range);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ return __direct_remap_pfn_range(
++ &init_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++
++static int lookup_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ uint64_t *ptep = (uint64_t *)data;
++ if (ptep)
++ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ return 0;
++}
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep)
++{
++ return apply_to_page_range(mm, address, PAGE_SIZE,
++ lookup_pte_fn, ptep);
++}
++
++EXPORT_SYMBOL(create_lookup_pte_addr);
++
++static int noop_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ return 0;
++}
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size)
++{
++ return apply_to_page_range(mm, address, size, noop_fn, NULL);
++}
++
++EXPORT_SYMBOL(touch_pte_range);
++
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++ extern unsigned long max_low_pfn;
++ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++}
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++ void __iomem * addr;
++ struct vm_struct * area;
++ unsigned long offset, last_addr;
++ domid_t domid = DOMID_IO;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return (void __iomem *) isa_bus_to_virt(phys_addr);
++
++ /*
++ * Don't allow anybody to remap normal RAM that we're using..
++ */
++ if (is_local_lowmem(phys_addr)) {
++ char *t_addr, *t_end;
++ struct page *page;
++
++ t_addr = bus_to_virt(phys_addr);
++ t_end = t_addr + (size - 1);
++
++ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++ if(!PageReserved(page))
++ return NULL;
++
++ domid = DOMID_SELF;
++ }
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++ /*
++ * Ok, go for it..
++ */
++ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++ if (!area)
++ return NULL;
++ area->phys_addr = phys_addr;
++ addr = (void __iomem *) area->addr;
++ flags |= _KERNPG_TABLE;
++ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++ phys_addr>>PAGE_SHIFT,
++ size, __pgprot(flags), domid)) {
++ vunmap((void __force *) addr);
++ return NULL;
++ }
++ return (void __iomem *) (offset + (char __iomem *)addr);
++}
++EXPORT_SYMBOL(__ioremap);
++
++/**
++ * ioremap_nocache - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ *
++ * Must be freed with iounmap.
++ */
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++ unsigned long last_addr;
++ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++ if (!p)
++ return p;
++
++ /* Guaranteed to be > phys_addr, as per __ioremap() */
++ last_addr = phys_addr + size - 1;
++
++ if (is_local_lowmem(last_addr)) {
++ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++ unsigned long npages;
++
++ phys_addr &= PAGE_MASK;
++
++ /* This might overflow and become zero.. */
++ last_addr = PAGE_ALIGN(last_addr);
++
++ /* .. but that's ok, because modulo-2**n arithmetic will make
++ * the page-aligned "last - first" come out right.
++ */
++ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++
++ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
++ iounmap(p);
++ p = NULL;
++ }
++ global_flush_tlb();
++ }
++
++ return p;
++}
++EXPORT_SYMBOL(ioremap_nocache);
++
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++ struct vm_struct *p, *o;
++
++ if ((void __force *)addr <= high_memory)
++ return;
++
++ /*
++ * __ioremap special-cases the PCI/ISA range by not instantiating a
++ * vm_area and by simply returning an address into the kernel mapping
++ * of ISA space. So handle that here.
++ */
++ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++
++ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
++
++ /* Use the vm area unlocked, assuming the caller
++ ensures there isn't another iounmap for the same address
++ in parallel. Reuse of the virtual address is prevented by
++ leaving it in the global lists until we're done with it.
++ cpa takes care of the direct mappings. */
++ read_lock(&vmlist_lock);
++ for (p = vmlist; p; p = p->next) {
++ if (p->addr == addr)
++ break;
++ }
++ read_unlock(&vmlist_lock);
++
++ if (!p) {
++ printk("iounmap: bad address %p\n", addr);
++ dump_stack();
++ return;
++ }
++
++ /* Reset the direct mapping. Can block */
++ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++ /* p->size includes the guard page, but cpa doesn't like that */
++ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++ PAGE_KERNEL);
++ global_flush_tlb();
++ }
++
++ /* Finally remove it */
++ o = remove_vm_area((void *)addr);
++ BUG_ON(p != o || o == NULL);
++ kfree(p);
++}
++EXPORT_SYMBOL(iounmap);
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++ unsigned long offset, last_addr;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return isa_bus_to_virt(phys_addr);
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr) - phys_addr;
++
++ /*
++ * Mappings have to fit in the FIX_BTMAP area.
++ */
++ nrpages = size >> PAGE_SHIFT;
++ if (nrpages > NR_FIX_BTMAPS)
++ return NULL;
++
++ /*
++ * Ok, go for it..
++ */
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ set_fixmap(idx, phys_addr);
++ phys_addr += PAGE_SIZE;
++ --idx;
++ --nrpages;
++ }
++ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++ unsigned long virt_addr;
++ unsigned long offset;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ virt_addr = (unsigned long)addr;
++ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++ return;
++ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++ offset = virt_addr & ~PAGE_MASK;
++ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ clear_fixmap(idx);
++ --idx;
++ --nrpages;
++ }
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/pageattr.c
+--- a/arch/i386/mm/pageattr.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/mm/pageattr.c Wed Aug 08 16:25:28 2007 -0300
+@@ -90,7 +90,7 @@ static void set_pmd_pte(pte_t *kpte, uns
+ unsigned long flags;
+
+ set_pte_atomic(kpte, pte); /* change init_mm */
+- if (PTRS_PER_PMD > 1)
++ if (HAVE_SHARED_KERNEL_PMD)
+ return;
+
+ spin_lock_irqsave(&pgd_lock, flags);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/mm/pgtable-xen.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/mm/pgtable-xen.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,748 @@
++/*
++ * linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++
++#include <xen/features.h>
++#include <asm/hypervisor.h>
++
++static void pgd_test_and_unpin(pgd_t *pgd);
++
++void show_mem(void)
++{
++ int total = 0, reserved = 0;
++ int shared = 0, cached = 0;
++ int highmem = 0;
++ struct page *page;
++ pg_data_t *pgdat;
++ unsigned long i;
++ unsigned long flags;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++ for_each_online_pgdat(pgdat) {
++ pgdat_resize_lock(pgdat, &flags);
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pgdat_page_nr(pgdat, i);
++ total++;
++ if (PageHighMem(page))
++ highmem++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ pgdat_resize_unlock(pgdat, &flags);
++ }
++ printk(KERN_INFO "%d pages of RAM\n", total);
++ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
++ printk(KERN_INFO "%d reserved pages\n", reserved);
++ printk(KERN_INFO "%d pages shared\n", shared);
++ printk(KERN_INFO "%d pages swap cached\n", cached);
++
++ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
++ printk(KERN_INFO "%lu pages writeback\n",
++ global_page_state(NR_WRITEBACK));
++ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
++ printk(KERN_INFO "%lu pages slab\n",
++ global_page_state(NR_SLAB_RECLAIMABLE) +
++ global_page_state(NR_SLAB_UNRECLAIMABLE));
++ printk(KERN_INFO "%lu pages pagetables\n",
++ global_page_state(NR_PAGETABLE));
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame
++ * and protection flags for that frame.
++ */
++static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ BUG();
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++ BUG();
++ return;
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ BUG();
++ return;
++ }
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (pgprot_val(flags))
++ /* <pfn,flags> stored as-is, to permit clearing entries */
++ set_pte(pte, pfn_pte(pfn, flags));
++ else
++ pte_clear(&init_mm, vaddr, pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a virtual page frame with a given physical page frame
++ * and protection flags for that frame.
++ */
++static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
++ pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ BUG();
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++ BUG();
++ return;
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ BUG();
++ return;
++ }
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (pgprot_val(flags))
++ /* <pfn,flags> stored as-is, to permit clearing entries */
++ set_pte(pte, pfn_pte_ma(pfn, flags));
++ else
++ pte_clear(&init_mm, vaddr, pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/*
++ * Associate a large virtual page frame with a given physical page frame
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned.
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
++ return; /* BUG(); */
++ }
++ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
++ return; /* BUG(); */
++ }
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
++ return; /* BUG(); */
++ }
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ set_pmd(pmd, pfn_pmd(pfn, flags));
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static int fixmaps;
++unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
++#ifndef CONFIG_COMPAT_VDSO
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++EXPORT_SYMBOL(__FIXADDR_TOP);
++#endif
++
++void __init set_fixaddr_top(void)
++{
++ BUG_ON(fixmaps > 0);
++ __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
++}
++
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ BUG();
++ return;
++ }
++ switch (idx) {
++ case FIX_WP_TEST:
++#ifdef CONFIG_X86_F00F_BUG
++ case FIX_F00F_IDT:
++#endif
++ case FIX_VDSO:
++ set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
++ break;
++ default:
++ set_pte_pfn_ma(address, phys >> PAGE_SHIFT, flags);
++ break;
++ }
++ fixmaps++;
++}
++
++/**
++ * reserve_top_address - reserves a hole in the top of kernel address space
++ * @reserve - size of hole to reserve
++ *
++ * Can be used to relocate the fixmap area and poke a hole in the top
++ * of kernel address space to make room for a hypervisor.
++ */
++void reserve_top_address(unsigned long reserve)
++{
++ BUG_ON(fixmaps > 0);
++#ifdef CONFIG_COMPAT_VDSO
++ BUG_ON(reserve != 0);
++#else
++ __FIXADDR_TOP = -reserve - PAGE_SIZE;
++ __VMALLOC_RESERVE += reserve;
++#endif
++}
++
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ if (pte)
++ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++ return pte;
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++#ifdef CONFIG_HIGHPTE
++ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++#endif
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long pfn = page_to_pfn(pte);
++
++ if (!PageHighMem(pte)) {
++ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(pfn, PAGE_KERNEL), 0))
++ BUG();
++ } else
++ clear_bit(PG_pinned, &pte->flags);
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++
++void pmd_ctor(void *pmd, struct kmem_cache *cache, unsigned long flags)
++{
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
++
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++ page->index = (unsigned long)pgd_list;
++ if (pgd_list)
++ set_page_private(pgd_list, (unsigned long)&page->index);
++ pgd_list = page;
++ set_page_private(page, (unsigned long)&pgd_list);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page_private(page);
++ *pprev = next;
++ if (next)
++ set_page_private(next, (unsigned long)pprev);
++}
++
++void pgd_ctor(void *pgd, struct kmem_cache *cache, unsigned long unused)
++{
++ unsigned long flags;
++
++ if (PTRS_PER_PMD > 1) {
++ if (HAVE_SHARED_KERNEL_PMD)
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ } else {
++ spin_lock_irqsave(&pgd_lock, flags);
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++}
++
++/* never called when PTRS_PER_PMD > 1 */
++void pgd_dtor(void *pgd, struct kmem_cache *cache, unsigned long unused)
++{
++ unsigned long flags; /* can be called from interrupt context */
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ pgd_test_and_unpin(pgd);
++}
++
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ int i;
++ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++ pmd_t **pmd;
++ unsigned long flags;
++
++ pgd_test_and_unpin(pgd);
++
++ if (PTRS_PER_PMD == 1 || !pgd)
++ return pgd;
++
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd)
++ goto out_oom;
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++ }
++ return pgd;
++ }
++
++ /*
++ * We can race save/restore (if we sleep during a GFP_KERNEL memory
++ * allocation). We therefore store virtual addresses of pmds as they
++ * do not change across save/restore, and poke the machine addresses
++ * into the pgdir under the pgd_lock.
++ */
++ pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++ if (!pmd) {
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++ }
++
++ /* Allocate pmds, remember virtual addresses. */
++ for (i = 0; i < PTRS_PER_PGD; ++i) {
++ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd[i])
++ goto out_oom;
++ }
++
++ spin_lock_irqsave(&pgd_lock, flags);
++
++ /* Protect against save/restore: move below 4GB under pgd_lock. */
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
++ int rc = xen_create_contiguous_region(
++ (unsigned long)pgd, 0, 32);
++ if (rc) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ goto out_oom;
++ }
++ }
++
++ /* Copy kernel pmd contents and write-protect the new pmds. */
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++ pgd_t *kpgd = pgd_offset_k(v);
++ pud_t *kpud = pud_offset(kpgd, v);
++ pmd_t *kpmd = pmd_offset(kpud, v);
++ memcpy(pmd[i], kpmd, PAGE_SIZE);
++ make_lowmem_page_readonly(
++ pmd[i], XENFEAT_writable_page_tables);
++ }
++
++ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
++ for (i = 0; i < PTRS_PER_PGD; i++)
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++
++ /* Ensure this pgd gets picked up and pinned on save/restore. */
++ pgd_list_add(pgd);
++
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ kfree(pmd);
++
++ return pgd;
++
++out_oom:
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache,
++ (void *)__va(pgd_val(pgd[i])-1));
++ } else {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache, pmd[i]);
++ kfree(pmd);
++ }
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++}
++
++void pgd_free(pgd_t *pgd)
++{
++ int i;
++
++ /*
++ * After this the pgd should not be pinned for the duration of this
++ * function's execution. We should never sleep and thus never race:
++ * 1. User pmds will not become write-protected under our feet due
++ * to a concurrent mm_pin_all().
++ * 2. The machine addresses in PGD entries will not become invalid
++ * due to a concurrent save/restore.
++ */
++ pgd_test_and_unpin(pgd);
++
++ /* in the PAE case user pgd entries are overwritten before usage */
++ if (PTRS_PER_PMD > 1) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ make_lowmem_page_writable(
++ pmd, XENFEAT_writable_page_tables);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++ xen_destroy_contiguous_region(
++ (unsigned long)pgd, 0);
++ }
++ }
++
++ /* in the non-PAE case, free_pgtables() clears user pgd entries */
++ kmem_cache_free(pgd_cache, pgd);
++}
++
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_lowmem_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_wrprotect(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn >= highstart_pfn)
++ kmap_flush_unused(); /* flush stale writable kmaps */
++ else
++#endif
++ make_lowmem_page_readonly(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_mkwrite(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn < highstart_pfn)
++#endif
++ make_lowmem_page_writable(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_readonly(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_writable(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++static inline void pgd_walk_set_prot(struct page *page, pgprot_t flags)
++{
++ unsigned long pfn = page_to_pfn(page);
++ int rc;
++
++ if (PageHighMem(page)) {
++ if (pgprot_val(flags) & _PAGE_RW)
++ clear_bit(PG_pinned, &page->flags);
++ else
++ set_bit(PG_pinned, &page->flags);
++ } else {
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (rc)
++ BUG();
++ }
++}
++
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++{
++ pgd_t *pgd = pgd_base;
++ pud_t *pud;
++ pmd_t *pmd;
++ int g, u, m, rc;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return;
++
++ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ pgd_walk_set_prot(virt_to_page(pud),flags);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ pgd_walk_set_prot(virt_to_page(pmd),flags);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ pgd_walk_set_prot(pmd_page(*pmd),flags);
++ }
++ }
++ }
++
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (rc)
++ BUG();
++}
++
++static void __pgd_pin(pgd_t *pgd)
++{
++ pgd_walk(pgd, PAGE_KERNEL_RO);
++ kmap_flush_unused();
++ xen_pgd_pin(__pa(pgd));
++ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void __pgd_unpin(pgd_t *pgd)
++{
++ xen_pgd_unpin(__pa(pgd));
++ pgd_walk(pgd, PAGE_KERNEL);
++ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void pgd_test_and_unpin(pgd_t *pgd)
++{
++ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++ __pgd_unpin(pgd);
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ spin_lock(&mm->page_table_lock);
++ __pgd_pin(mm->pgd);
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ spin_lock(&mm->page_table_lock);
++ __pgd_unpin(mm->pgd);
++ spin_unlock(&mm->page_table_lock);
++}
++
++void mm_pin_all(void)
++{
++ struct page *page;
++ unsigned long flags;
++
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the pgd_list. Also protects
++ * __pgd_pin() by disabling preemption.
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page = (struct page *)page->index) {
++ if (!test_bit(PG_pinned, &page->flags))
++ __pgd_pin((pgd_t *)page_address(page));
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++ (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings)
++ mm_unpin(mm);
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/oprofile/Makefile
+--- a/arch/i386/oprofile/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/oprofile/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++oprofile-y := $(DRIVER_OBJS) \
++ $(XENOPROF_COMMON_OBJS) xenoprof.o
++else
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
+ op_model_ppro.o op_model_p4.o
+ oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
++endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/oprofile/xenoprof.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/oprofile/xenoprof.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,179 @@
++/**
++ * @file xenoprof.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * x86-specific part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/sched.h>
++#include <asm/pgtable.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++#include "op_counter.h"
++
++static unsigned int num_events = 0;
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ num_events = init->num_events;
++ /* just in case - make sure we do not overflow event list
++ (i.e. counter_config list) */
++ if (num_events > OP_MAX_COUNTER) {
++ num_events = OP_MAX_COUNTER;
++ init->num_events = num_events;
++ }
++}
++
++void xenoprof_arch_counter(void)
++{
++ int i;
++ struct xenoprof_counter counter;
++
++ for (i=0; i<num_events; i++) {
++ counter.ind = i;
++ counter.count = (uint64_t)counter_config[i].count;
++ counter.enabled = (uint32_t)counter_config[i].enabled;
++ counter.event = (uint32_t)counter_config[i].event;
++ counter.kernel = (uint32_t)counter_config[i].kernel;
++ counter.user = (uint32_t)counter_config[i].user;
++ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
++ HYPERVISOR_xenoprof_op(XENOPROF_counter,
++ &counter);
++ }
++}
++
++void xenoprof_arch_start(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_stop(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
++{
++ if (sbuf->buffer) {
++ vunmap(sbuf->buffer);
++ sbuf->buffer = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int npages, ret;
++ struct vm_struct *area;
++
++ sbuf->buffer = NULL;
++ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
++ return ret;
++
++ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL)
++ return -ENOMEM;
++
++ if ( (ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ get_buffer->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
++ DOMID_SELF)) ) {
++ vunmap(area->addr);
++ return ret;
++ }
++
++ sbuf->buffer = area->addr;
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int ret;
++ int npages;
++ struct vm_struct *area;
++ pgprot_t prot = __pgprot(_KERNPG_TABLE);
++
++ sbuf->buffer = NULL;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret)
++ goto out;
++
++ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ pdomain->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, prot, DOMID_SELF);
++ if (ret) {
++ vunmap(area->addr);
++ goto out;
++ }
++ sbuf->buffer = area->addr;
++
++out:
++ return ret;
++}
++
++struct op_counter_config counter_config[OP_MAX_COUNTER];
++
++int xenoprof_create_files(struct super_block * sb, struct dentry * root)
++{
++ unsigned int i;
++
++ for (i = 0; i < num_events; ++i) {
++ struct dentry * dir;
++ char buf[2];
++
++ snprintf(buf, 2, "%d", i);
++ dir = oprofilefs_mkdir(sb, root, buf);
++ oprofilefs_create_ulong(sb, dir, "enabled",
++ &counter_config[i].enabled);
++ oprofilefs_create_ulong(sb, dir, "event",
++ &counter_config[i].event);
++ oprofilefs_create_ulong(sb, dir, "count",
++ &counter_config[i].count);
++ oprofilefs_create_ulong(sb, dir, "unit_mask",
++ &counter_config[i].unit_mask);
++ oprofilefs_create_ulong(sb, dir, "kernel",
++ &counter_config[i].kernel);
++ oprofilefs_create_ulong(sb, dir, "user",
++ &counter_config[i].user);
++ }
++
++ return 0;
++}
++
++int __init oprofile_arch_init(struct oprofile_operations * ops)
++{
++ return xenoprofile_init(ops);
++}
++
++void oprofile_arch_exit(void)
++{
++ xenoprofile_exit();
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/pci/Makefile
+--- a/arch/i386/pci/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/pci/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -3,6 +3,10 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
+ obj-$(CONFIG_PCI_BIOS) += pcbios.o
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig.o direct.o
+ obj-$(CONFIG_PCI_DIRECT) += direct.o
++
++# pcifront should be after pcbios.o, mmconfig.o, and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
+
+ pci-y := fixup.o
+ pci-$(CONFIG_ACPI) += acpi.o
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/pci/irq.c
+--- a/arch/i386/pci/irq.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/pci/irq.c Wed Aug 08 16:25:28 2007 -0300
+@@ -94,13 +94,25 @@ static struct irq_routing_table * __init
+ u8 *addr;
+ struct irq_routing_table *rt;
+
++#ifdef CONFIG_XEN
++ if (!is_initial_xendomain())
++ return NULL;
++#endif
+ if (pirq_table_addr) {
++#ifdef CONFIG_XEN
++ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
++#else
+ rt = pirq_check_routing_table((u8 *) __va(pirq_table_addr));
++#endif
+ if (rt)
+ return rt;
+ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
+ }
++#ifdef CONFIG_XEN
++ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++#else
+ for(addr = (u8 *) __va(0xf0000); addr < (u8 *) __va(0x100000); addr += 16) {
++#endif
+ rt = pirq_check_routing_table(addr);
+ if (rt)
+ return rt;
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/pci/pcifront.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/i386/pci/pcifront.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,55 @@
++/*
++ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
++ * to support the Xen PCI Frontend's operation
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <asm/acpi.h>
++#include "pci.h"
++
++static int pcifront_enable_irq(struct pci_dev *dev)
++{
++ u8 irq;
++ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
++ dev->irq = irq;
++
++ return 0;
++}
++
++extern u8 pci_cache_line_size;
++
++static int __init pcifront_x86_stub_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ /* Only install our method if we haven't found real hardware already */
++ if (raw_pci_ops)
++ return 0;
++
++ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
++
++ /* Copied from arch/i386/pci/common.c */
++ pci_cache_line_size = 32 >> 2;
++ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
++ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
++ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
++ pci_cache_line_size = 128 >> 2; /* P4 */
++
++ /* On x86, we need to disable the normal IRQ routing table and
++ * just ask the backend
++ */
++ pcibios_enable_irq = pcifront_enable_irq;
++ pcibios_disable_irq = NULL;
++
++#ifdef CONFIG_ACPI
++ /* Keep ACPI out of the picture */
++ acpi_noirq = 1;
++#endif
++
++ return 0;
++}
++
++arch_initcall(pcifront_x86_stub_init);
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/i386/power/Makefile
+--- a/arch/i386/power/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/i386/power/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -1,2 +1,4 @@ obj-$(CONFIG_PM) += cpu.o
+-obj-$(CONFIG_PM) += cpu.o
++obj-$(CONFIG_PM_LEGACY) += cpu.o
++obj-$(CONFIG_SOFTWARE_SUSPEND) += cpu.o
++obj-$(CONFIG_ACPI_SLEEP) += cpu.o
+ obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o suspend.o
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/Kconfig
+--- a/arch/ia64/Kconfig Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/Kconfig Wed Aug 08 16:25:28 2007 -0300
+@@ -66,6 +66,34 @@ config GENERIC_IOMAP
+ config GENERIC_IOMAP
+ bool
+ default y
++
++config XEN
++ bool "Xen hypervisor support"
++ default y
++ help
++ Enable Xen hypervisor support. Resulting kernel runs
++ both as a guest OS on Xen and natively on hardware.
++
++config XEN_IA64_VDSO_PARAVIRT
++ bool
++ depends on XEN && !ITANIUM
++ default y
++ help
++ vDSO paravirtualization
++
++config XEN_IA64_EXPOSE_P2M
++ bool "Xen/IA64 exposure p2m table"
++ depends on XEN
++ default y
++ help
++ expose p2m from xen
++
++config XEN_IA64_EXPOSE_P2M_USE_DTR
++ bool "Xen/IA64 map p2m table with dtr"
++ depends on XEN_IA64_EXPOSE_P2M
++ default y
++ help
++ use dtr to map the exposed p2m table
+
+ config SCHED_NO_NO_OMIT_FRAME_POINTER
+ bool
+@@ -500,6 +528,21 @@ config PCI_DOMAINS
+ bool
+ default PCI
+
++config XEN_PCIDEV_FRONTEND
++ bool "Xen PCI Frontend"
++ depends on PCI && XEN
++ default y
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ source "drivers/pci/pcie/Kconfig"
+
+ source "drivers/pci/Kconfig"
+@@ -572,3 +615,13 @@ source "security/Kconfig"
+ source "security/Kconfig"
+
+ source "crypto/Kconfig"
++
++#
++# override default values of drivers/xen/Kconfig
++#
++if XEN
++config XEN_SMPBOOT
++ default n
++endif
++
++source "drivers/xen/Kconfig"
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/Makefile
+--- a/arch/ia64/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -45,6 +45,12 @@ endif
+ endif
+
+ CFLAGS += $(cflags-y)
++
++cppflags-$(CONFIG_XEN) += \
++ -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION)
++
++CPPFLAGS += $(cppflags-y)
++
+ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
+
+ libs-y += arch/ia64/lib/
+@@ -55,9 +61,15 @@ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/
+ core-$(CONFIG_IA64_HP_ZX1) += arch/ia64/dig/
+ core-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/dig/
+ core-$(CONFIG_IA64_SGI_SN2) += arch/ia64/sn/
++core-$(CONFIG_XEN) += arch/ia64/xen/
+
+ drivers-$(CONFIG_PCI) += arch/ia64/pci/
++ifneq ($(CONFIG_XEN),y)
+ drivers-$(CONFIG_IA64_HP_SIM) += arch/ia64/hp/sim/
++endif
++ifneq ($(CONFIG_IA64_GENERIC),y)
++drivers-$(CONFIG_XEN) += arch/ia64/hp/sim/
++endif
+ drivers-$(CONFIG_IA64_HP_ZX1) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
+ drivers-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += arch/ia64/hp/common/ arch/ia64/hp/zx1/
+ drivers-$(CONFIG_IA64_GENERIC) += arch/ia64/hp/common/ arch/ia64/hp/zx1/ arch/ia64/hp/sim/ arch/ia64/sn/
+@@ -87,8 +99,8 @@ boot: lib/lib.a vmlinux
+ boot: lib/lib.a vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $@
+
+-install: vmlinux.gz
+- sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) $< System.map "$(INSTALL_PATH)"
++install:
++ -yes | sh $(srctree)/arch/ia64/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
+
+ define archhelp
+ echo '* compressed - Build compressed kernel image'
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/dig/setup.c
+--- a/arch/ia64/dig/setup.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/dig/setup.c Wed Aug 08 16:25:28 2007 -0300
+@@ -23,6 +23,8 @@
+ #include <asm/io.h>
+ #include <asm/machvec.h>
+ #include <asm/system.h>
++
++#include <xen/xencons.h>
+
+ void __init
+ dig_setup (char **cmdline_p)
+@@ -67,4 +69,19 @@ dig_setup (char **cmdline_p)
+ screen_info.orig_video_mode = 3; /* XXX fake */
+ screen_info.orig_video_isVGA = 1; /* XXX fake */
+ screen_info.orig_video_ega_bx = 3; /* XXX fake */
++#ifdef CONFIG_XEN
++ if (!is_running_on_xen() || !is_initial_xendomain())
++ return;
++
++ if (xen_start_info->console.dom0.info_size >=
++ sizeof(struct dom0_vga_console_info)) {
++ const struct dom0_vga_console_info *info =
++ (struct dom0_vga_console_info *)(
++ (char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++ dom0_init_screen_info(info);
++ }
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++#endif
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/hp/sim/Makefile
+--- a/arch/ia64/hp/sim/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ /dev/null Thu Jan 01 00:00:00 1970 +0000
+@@ -1,16 +0,0 @@
+-#
+-# ia64/platform/hp/sim/Makefile
+-#
+-# Copyright (C) 2002 Hewlett-Packard Co.
+-# David Mosberger-Tang <davidm@hpl.hp.com>
+-# Copyright (C) 1999 Silicon Graphics, Inc.
+-# Copyright (C) Srinivasa Thirumalachar (sprasad@engr.sgi.com)
+-#
+-
+-obj-y := hpsim_irq.o hpsim_setup.o hpsim.o
+-obj-$(CONFIG_IA64_GENERIC) += hpsim_machvec.o
+-
+-obj-$(CONFIG_HP_SIMETH) += simeth.o
+-obj-$(CONFIG_HP_SIMSERIAL) += simserial.o
+-obj-$(CONFIG_HP_SIMSERIAL_CONSOLE) += hpsim_console.o
+-obj-$(CONFIG_HP_SIMSCSI) += simscsi.o
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/asm-offsets.c
+--- a/arch/ia64/kernel/asm-offsets.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/asm-offsets.c Wed Aug 08 16:25:28 2007 -0300
+@@ -268,4 +268,29 @@ void foo(void)
+ DEFINE(IA64_TIME_SOURCE_MMIO64, TIME_SOURCE_MMIO64);
+ DEFINE(IA64_TIME_SOURCE_MMIO32, TIME_SOURCE_MMIO32);
+ DEFINE(IA64_TIMESPEC_TV_NSEC_OFFSET, offsetof (struct timespec, tv_nsec));
++
++#ifdef CONFIG_XEN
++ BLANK();
++
++#define DEFINE_MAPPED_REG_OFS(sym, field) \
++ DEFINE(sym, (XMAPPEDREGS_OFS + offsetof(mapped_regs_t, field)))
++
++ DEFINE_MAPPED_REG_OFS(XSI_PSR_I_ADDR_OFS, interrupt_mask_addr);
++ DEFINE_MAPPED_REG_OFS(XSI_IPSR_OFS, ipsr);
++ DEFINE_MAPPED_REG_OFS(XSI_IIP_OFS, iip);
++ DEFINE_MAPPED_REG_OFS(XSI_IFS_OFS, ifs);
++ DEFINE_MAPPED_REG_OFS(XSI_PRECOVER_IFS_OFS, precover_ifs);
++ DEFINE_MAPPED_REG_OFS(XSI_ISR_OFS, isr);
++ DEFINE_MAPPED_REG_OFS(XSI_IFA_OFS, ifa);
++ DEFINE_MAPPED_REG_OFS(XSI_IIPA_OFS, iipa);
++ DEFINE_MAPPED_REG_OFS(XSI_IIM_OFS, iim);
++ DEFINE_MAPPED_REG_OFS(XSI_IHA_OFS, iha);
++ DEFINE_MAPPED_REG_OFS(XSI_ITIR_OFS, itir);
++ DEFINE_MAPPED_REG_OFS(XSI_PSR_IC_OFS, interrupt_collection_enabled);
++ DEFINE_MAPPED_REG_OFS(XSI_BANKNUM_OFS, banknum);
++ DEFINE_MAPPED_REG_OFS(XSI_BANK0_R16_OFS, bank0_regs[0]);
++ DEFINE_MAPPED_REG_OFS(XSI_BANK1_R16_OFS, bank1_regs[0]);
++ DEFINE_MAPPED_REG_OFS(XSI_B0NATS_OFS, vbnat);
++ DEFINE_MAPPED_REG_OFS(XSI_B1NATS_OFS, vnat);
++#endif /* CONFIG_XEN */
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/entry.S
+--- a/arch/ia64/kernel/entry.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/entry.S Wed Aug 08 16:25:28 2007 -0300
+@@ -180,7 +180,7 @@ END(sys_clone)
+ * called. The code starting at .map relies on this. The rest of the code
+ * doesn't care about the interrupt masking status.
+ */
+-GLOBAL_ENTRY(ia64_switch_to)
++GLOBAL_ENTRY(__ia64_switch_to)
+ .prologue
+ alloc r16=ar.pfs,1,0,0,0
+ DO_SAVE_SWITCH_STACK
+@@ -234,7 +234,7 @@ GLOBAL_ENTRY(ia64_switch_to)
+ ;;
+ srlz.d
+ br.cond.sptk .done
+-END(ia64_switch_to)
++END(__ia64_switch_to)
+
+ /*
+ * Note that interrupts are enabled during save_switch_stack and load_switch_stack. This
+@@ -375,7 +375,7 @@ END(save_switch_stack)
+ * - b7 holds address to return to
+ * - must not touch r8-r11
+ */
+-ENTRY(load_switch_stack)
++GLOBAL_ENTRY(load_switch_stack)
+ .prologue
+ .altrp b7
+
+@@ -510,7 +510,7 @@ END(clone)
+ * because some system calls (such as ia64_execve) directly
+ * manipulate ar.pfs.
+ */
+-GLOBAL_ENTRY(ia64_trace_syscall)
++GLOBAL_ENTRY(__ia64_trace_syscall)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * We need to preserve the scratch registers f6-f11 in case the system
+@@ -582,7 +582,7 @@ strace_error:
+ (p6) mov r10=-1
+ (p6) mov r8=r9
+ br.cond.sptk .strace_save_retval
+-END(ia64_trace_syscall)
++END(__ia64_trace_syscall)
+
+ /*
+ * When traced and returning from sigreturn, we invoke syscall_trace but then
+@@ -601,7 +601,7 @@ GLOBAL_ENTRY(ia64_strace_leave_kernel)
+ .ret4: br.cond.sptk ia64_leave_kernel
+ END(ia64_strace_leave_kernel)
+
+-GLOBAL_ENTRY(ia64_ret_from_clone)
++GLOBAL_ENTRY(__ia64_ret_from_clone)
+ PT_REGS_UNWIND_INFO(0)
+ { /*
+ * Some versions of gas generate bad unwind info if the first instruction of a
+@@ -627,7 +627,7 @@ GLOBAL_ENTRY(ia64_ret_from_clone)
+ cmp.ne p6,p0=r2,r0
+ (p6) br.cond.spnt .strace_check_retval
+ ;; // added stop bits to prevent r8 dependency
+-END(ia64_ret_from_clone)
++END(__ia64_ret_from_clone)
+ // fall through
+ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ PT_REGS_UNWIND_INFO(0)
+@@ -635,8 +635,11 @@ GLOBAL_ENTRY(ia64_ret_from_syscall)
+ adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
+ mov r10=r0 // clear error indication in r10
+ (p7) br.cond.spnt handle_syscall_error // handle potential syscall failure
++ ;;
++ // don't fall through, ia64_leave_syscall may be #define'd
++ br.cond.sptk.few ia64_leave_syscall
++ ;;
+ END(ia64_ret_from_syscall)
+- // fall through
+ /*
+ * ia64_leave_syscall(): Same as ia64_leave_kernel, except that it doesn't
+ * need to switch to bank 0 and doesn't restore the scratch registers.
+@@ -681,7 +684,7 @@ END(ia64_ret_from_syscall)
+ * ar.csd: cleared
+ * ar.ssd: cleared
+ */
+-ENTRY(ia64_leave_syscall)
++GLOBAL_ENTRY(__ia64_leave_syscall)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -789,7 +792,7 @@ ENTRY(ia64_leave_syscall)
+ mov.m ar.ssd=r0 // M2 clear ar.ssd
+ mov f11=f0 // F clear f11
+ br.cond.sptk.many rbs_switch // B
+-END(ia64_leave_syscall)
++END(__ia64_leave_syscall)
+
+ #ifdef CONFIG_IA32_SUPPORT
+ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+@@ -801,10 +804,13 @@ GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
+ st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
+ .mem.offset 8,0
+ st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
++ ;;
++ // don't fall through, ia64_leave_kernel may be #define'd
++ br.cond.sptk.few ia64_leave_kernel
++ ;;
+ END(ia64_ret_from_ia32_execve)
+- // fall through
+ #endif /* CONFIG_IA32_SUPPORT */
+-GLOBAL_ENTRY(ia64_leave_kernel)
++GLOBAL_ENTRY(__ia64_leave_kernel)
+ PT_REGS_UNWIND_INFO(0)
+ /*
+ * work.need_resched etc. mustn't get changed by this CPU before it returns to
+@@ -1135,7 +1141,7 @@ skip_rbs_switch:
+ ld8 r10=[r3]
+ br.cond.sptk.many .work_processed_syscall // re-check
+
+-END(ia64_leave_kernel)
++END(__ia64_leave_kernel)
+
+ ENTRY(handle_syscall_error)
+ /*
+@@ -1175,7 +1181,7 @@ END(ia64_invoke_schedule_tail)
+ * be set up by the caller. We declare 8 input registers so the system call
+ * args get preserved, in case we need to restart a system call.
+ */
+-ENTRY(notify_resume_user)
++GLOBAL_ENTRY(notify_resume_user)
+ .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(8)
+ alloc loc1=ar.pfs,8,2,3,0 // preserve all eight input regs in case of syscall restart!
+ mov r9=ar.unat
+@@ -1263,7 +1269,7 @@ ENTRY(sys_rt_sigreturn)
+ adds sp=16,sp
+ ;;
+ ld8 r9=[sp] // load new ar.unat
+- mov.sptk b7=r8,ia64_leave_kernel
++ mov.sptk b7=r8,__ia64_leave_kernel
+ ;;
+ mov ar.unat=r9
+ br.many b7
+@@ -1605,8 +1611,8 @@ sys_call_table:
+ data8 sys_ni_syscall // 1295 reserved for ppoll
+ data8 sys_unshare
+ data8 sys_splice
+- data8 sys_set_robust_list
+- data8 sys_get_robust_list
++ data8 sys_ni_syscall // reserved for set_robust_list
++ data8 sys_ni_syscall // reserved for get_robust_list
+ data8 sys_sync_file_range // 1300
+ data8 sys_tee
+ data8 sys_vmsplice
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/fsys.S
+--- a/arch/ia64/kernel/fsys.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/fsys.S Wed Aug 08 16:25:28 2007 -0300
+@@ -516,11 +516,34 @@ ENTRY(fsys_fallback_syscall)
+ adds r17=-1024,r15
+ movl r14=sys_call_table
+ ;;
++#ifdef CONFIG_XEN
++ movl r18=running_on_xen;;
++ ld4 r18=[r18];;
++ // p14 = running_on_xen
++ // p15 = !running_on_xen
++ cmp.ne p14,p15=r0,r18
++ ;;
++(p14) movl r18=XSI_PSR_I_ADDR;;
++(p14) ld8 r18=[r18]
++(p14) mov r29=1;;
++(p14) st1 [r18]=r29
++(p15) rsm psr.i
++#else
+ rsm psr.i
++#endif
+ shladd r18=r17,3,r14
+ ;;
+ ld8 r18=[r18] // load normal (heavy-weight) syscall entry-point
++#ifdef CONFIG_XEN
++(p14) mov r27=r8
++(p14) XEN_HYPER_GET_PSR
++ ;;
++(p14) mov r29=r8
++(p14) mov r8=r27
++(p15) mov r29=psr // read psr (12 cyc load latency)
++#else
+ mov r29=psr // read psr (12 cyc load latency)
++#endif
+ mov r27=ar.rsc
+ mov r21=ar.fpsr
+ mov r26=ar.pfs
+@@ -632,7 +655,25 @@ GLOBAL_ENTRY(fsys_bubble_down)
+ mov rp=r14 // I0 set the real return addr
+ and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
+ ;;
++#ifdef CONFIG_XEN
++ movl r14=running_on_xen;;
++ ld4 r14=[r14];;
++ // p14 = running_on_xen
++ // p15 = !running_on_xen
++ cmp.ne p14,p15=r0,r14
++ ;;
++(p14) movl r28=XSI_PSR_I_ADDR;;
++(p14) ld8 r28=[r28];;
++(p14) adds r28=-1,r28;; // event_pending
++(p14) ld1 r14=[r28];;
++(p14) cmp.ne.unc p13,p14=r14,r0;;
++(p13) XEN_HYPER_SSM_I
++(p14) adds r28=1,r28;; // event_mask
++(p14) st1 [r28]=r0;;
++(p15) ssm psr.i
++#else
+ ssm psr.i // M2 we're on kernel stacks now, reenable irqs
++#endif
+ cmp.eq p8,p0=r3,r0 // A
+ (p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
+
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/gate.S
+--- a/arch/ia64/kernel/gate.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/gate.S Wed Aug 08 16:25:28 2007 -0300
+@@ -6,13 +6,15 @@
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+-
+ #include <asm/asmmacro.h>
+ #include <asm/errno.h>
+ #include <asm/asm-offsets.h>
+ #include <asm/sigcontext.h>
+ #include <asm/system.h>
+ #include <asm/unistd.h>
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++# include <asm/privop.h>
++#endif
+
+ /*
+ * We can't easily refer to symbols inside the kernel. To avoid full runtime relocation,
+@@ -31,6 +33,40 @@
+ #define BRL_COND_FSYS_BUBBLE_DOWN(pr) \
+ [1:](pr)brl.cond.sptk 0; \
+ .xdata4 ".data.patch.brl_fsys_bubble_down", 1b-.
++
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++ // The page in which hyperprivop lives must be pinned by ITR.
++ // However vDSO area isn't pinned. So issuing hyperprivop
++ // from vDSO page causes trouble that Kevin pointed out.
++ // After clearing vpsr.ic, the vcpu is pre-empted and the itlb
++ // is flushed. Then vcpu get cpu again, tlb miss fault occures.
++ // However it results in nested dtlb fault because vpsr.ic is off.
++ // To avoid such a situation, we jump into the kernel text area
++ // which is pinned, and then issue hyperprivop and return back
++ // to vDSO page.
++ // This is Dan Magenheimer's idea.
++
++ // Currently is_running_on_xen() is defined as running_on_xen.
++ // If is_running_on_xen() is a real function, we must update
++ // according to it.
++ .section ".data.patch.running_on_xen", "a"
++ .previous
++#define LOAD_RUNNING_ON_XEN(reg) \
++[1:] movl reg=0; \
++ .xdata4 ".data.patch.running_on_xen", 1b-.
++
++ .section ".data.patch.brl_xen_ssm_i_0", "a"
++ .previous
++#define BRL_COND_XEN_SSM_I_0(pr) \
++[1:](pr)brl.cond.sptk 0; \
++ .xdata4 ".data.patch.brl_xen_ssm_i_0", 1b-.
++
++ .section ".data.patch.brl_xen_ssm_i_1", "a"
++ .previous
++#define BRL_COND_XEN_SSM_I_1(pr) \
++[1:](pr)brl.cond.sptk 0; \
++ .xdata4 ".data.patch.brl_xen_ssm_i_1", 1b-.
++#endif
+
+ GLOBAL_ENTRY(__kernel_syscall_via_break)
+ .prologue
+@@ -76,7 +112,42 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
+ epc // B causes split-issue
+ }
+ ;;
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++ // r20 = 1
++ // r22 = &vcpu->vcpu_info->evtchn_upcall_mask
++ // r23 = &vpsr.ic
++ // r24 = &vcpu->vcpu_info->evtchn_upcall_pending
++ // r25 = tmp
++ // r28 = &running_on_xen
++ // r30 = running_on_xen
++ // r31 = tmp
++ // p11 = tmp
++ // p12 = running_on_xen
++ // p13 = !running_on_xen
++ // p14 = tmp
++ // p15 = tmp
++#define isXen p12
++#define isRaw p13
++ LOAD_RUNNING_ON_XEN(r28)
++ movl r22=XSI_PSR_I_ADDR
++ ;;
++ ld8 r22=[r22]
++ ;;
++ movl r23=XSI_PSR_IC
++ adds r24=-1,r22
++ mov r20=1
++ ;;
++ ld4 r30=[r28]
++ ;;
++ cmp.ne isXen,isRaw=r0,r30
++ ;;
++(isRaw) rsm psr.be | psr.i
++(isXen) st1 [r22]=r20
++(isXen) rum psr.be
++ ;;
++#else
+ rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
++#endif
+ LOAD_FSYSCALL_TABLE(r14) // X
+ ;;
+ mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
+@@ -84,7 +155,14 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
+ mov r19=NR_syscalls-1 // A
+ ;;
+ lfetch [r18] // M0|1
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++(isRaw) mov r29=psr
++(isXen) XEN_HYPER_GET_PSR
++ ;;
++(isXen) mov r29=r8
++#else
+ mov r29=psr // M2 (12 cyc)
++#endif
+ // If r17 is a NaT, p6 will be zero
+ cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
+ ;;
+@@ -98,9 +176,21 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
+ ;;
+ nop.m 0
+ (p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++ ;;
++ // p14 = running_on_xen && p8
++ // p15 = !running_on_xen && p8
++(p8) cmp.ne.unc p14,p15=r0,r30
++ ;;
++(p15) ssm psr.i
++ BRL_COND_XEN_SSM_I_0(p14)
++ .global .vdso_ssm_i_0_ret
++.vdso_ssm_i_0_ret:
++#else
+ nop.i 0
+ ;;
+ (p8) ssm psr.i
++#endif
+ (p6) mov b7=r18 // I0
+ (p8) br.dptk.many b7 // B
+
+@@ -121,9 +211,21 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
+ #else
+ BRL_COND_FSYS_BUBBLE_DOWN(p6)
+ #endif
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++(isRaw) ssm psr.i
++ BRL_COND_XEN_SSM_I_1(isXen)
++ .global .vdso_ssm_i_1_ret
++.vdso_ssm_i_1_ret:
++#else
+ ssm psr.i
++#endif
+ mov r10=-1
+ (p10) mov r8=EINVAL
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++ dv_serialize_data // shut up gas warning.
++ // we know xen_hyper_ssm_i_0 or xen_hyper_ssm_i_1
++ // doesn't change p9 and p10
++#endif
+ (p9) mov r8=ENOSYS
+ FSYS_RETURN
+ END(__kernel_syscall_via_epc)
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/gate.lds.S
+--- a/arch/ia64/kernel/gate.lds.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/gate.lds.S Wed Aug 08 16:25:28 2007 -0300
+@@ -43,6 +43,20 @@ SECTIONS
+ __start_gate_brl_fsys_bubble_down_patchlist = .;
+ *(.data.patch.brl_fsys_bubble_down)
+ __end_gate_brl_fsys_bubble_down_patchlist = .;
++
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++ __start_gate_running_on_xen_patchlist = .;
++ *(.data.patch.running_on_xen)
++ __end_gate_running_on_xen_patchlist = .;
++
++ __start_gate_brl_xen_ssm_i_0_patchlist = .;
++ *(.data.patch.brl_xen_ssm_i_0)
++ __end_gate_brl_xen_ssm_i_0_patchlist = .;
++
++ __start_gate_brl_xen_ssm_i_1_patchlist = .;
++ *(.data.patch.brl_xen_ssm_i_1)
++ __end_gate_brl_xen_ssm_i_1_patchlist = .;
++#endif
+ } :readable
+ .IA_64.unwind_info : { *(.IA_64.unwind_info*) }
+ .IA_64.unwind : { *(.IA_64.unwind*) } :readable :unwind
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/head.S
+--- a/arch/ia64/kernel/head.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/head.S Wed Aug 08 16:25:28 2007 -0300
+@@ -366,6 +366,12 @@ 1: // now we are in virtual mode
+ (isBP) movl r2=ia64_boot_param
+ ;;
+ (isBP) st8 [r2]=r28 // save the address of the boot param area passed by the bootloader
++
++#ifdef CONFIG_XEN
++ // Note: isBP is used by the subprogram.
++ br.call.sptk.many rp=early_xen_setup
++ ;;
++#endif
+
+ #ifdef CONFIG_SMP
+ (isAP) br.call.sptk.many rp=start_secondary
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/iosapic.c
+--- a/arch/ia64/kernel/iosapic.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/iosapic.c Wed Aug 08 16:25:28 2007 -0300
+@@ -159,6 +159,75 @@ static int iosapic_kmalloc_ok;
+ static int iosapic_kmalloc_ok;
+ static LIST_HEAD(free_rte_list);
+
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++static inline unsigned int xen_iosapic_read(char __iomem *iosapic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = (unsigned long)iosapic -
++ __IA64_UNCACHED_OFFSET;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = (unsigned long)iosapic -
++ __IA64_UNCACHED_OFFSET;
++ apic_op.reg = reg;
++ apic_op.value = val;
++ HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op);
++}
++
++static inline unsigned int iosapic_read(char __iomem *iosapic, unsigned int reg)
++{
++ if (!is_running_on_xen()) {
++ writel(reg, iosapic + IOSAPIC_REG_SELECT);
++ return readl(iosapic + IOSAPIC_WINDOW);
++ } else
++ return xen_iosapic_read(iosapic, reg);
++}
++
++static inline void iosapic_write(char __iomem *iosapic, unsigned int reg, u32 val)
++{
++ if (!is_running_on_xen()) {
++ writel(reg, iosapic + IOSAPIC_REG_SELECT);
++ writel(val, iosapic + IOSAPIC_WINDOW);
++ } else
++ xen_iosapic_write(iosapic, reg, val);
++}
++
++int xen_assign_irq_vector(int irq)
++{
++ struct physdev_irq irq_op;
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op))
++ return -ENOSPC;
++
++ return irq_op.vector;
++}
++
++void xen_free_irq_vector(int vector)
++{
++ struct physdev_irq irq_op;
++
++ irq_op.vector = vector;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_free_irq_vector, &irq_op))
++ printk(KERN_WARNING "%s: xen_free_irq_vecotr fail vector=%d\n",
++ __FUNCTION__, vector);
++}
++#endif /* XEN */
++
+ /*
+ * Find an IOSAPIC associated with a GSI
+ */
+@@ -677,6 +746,9 @@ register_intr (unsigned int gsi, int vec
+ iosapic_intr_info[vector].polarity = polarity;
+ iosapic_intr_info[vector].dmode = delivery;
+ iosapic_intr_info[vector].trigger = trigger;
++
++ if (is_running_on_xen())
++ return 0;
+
+ if (trigger == IOSAPIC_EDGE)
+ irq_type = &irq_type_iosapic_edge;
+@@ -1040,6 +1112,9 @@ iosapic_system_init (int system_pcat_com
+ }
+
+ pcat_compat = system_pcat_compat;
++ if (is_running_on_xen())
++ return;
++
+ if (pcat_compat) {
+ /*
+ * Disable the compatibility mode interrupts (8259 style),
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/irq_ia64.c
+--- a/arch/ia64/kernel/irq_ia64.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/irq_ia64.c Wed Aug 08 16:25:28 2007 -0300
+@@ -31,6 +31,10 @@
+ #include <linux/threads.h>
+ #include <linux/bitops.h>
+ #include <linux/irq.h>
++#ifdef CONFIG_XEN
++#include <linux/cpu.h>
++#endif
++
+
+ #include <asm/delay.h>
+ #include <asm/intrinsics.h>
+@@ -70,6 +74,13 @@ assign_irq_vector (int irq)
+ assign_irq_vector (int irq)
+ {
+ int pos, vector;
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ extern int xen_assign_irq_vector(int);
++ return xen_assign_irq_vector(irq);
++ }
++#endif
+ again:
+ pos = find_first_zero_bit(ia64_vector_mask, IA64_NUM_DEVICE_VECTORS);
+ vector = IA64_FIRST_DEVICE_VECTOR + pos;
+@@ -88,6 +99,13 @@ free_irq_vector (int vector)
+ if (vector < IA64_FIRST_DEVICE_VECTOR || vector > IA64_LAST_DEVICE_VECTOR)
+ return;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ extern void xen_free_irq_vector(int);
++ xen_free_irq_vector(vector);
++ return;
++ }
++#endif
+ pos = vector - IA64_FIRST_DEVICE_VECTOR;
+ if (!test_and_clear_bit(pos, ia64_vector_mask))
+ printk(KERN_WARNING "%s: double free!\n", __FUNCTION__);
+@@ -280,11 +298,267 @@ static struct irqaction resched_irqactio
+ };
+ #endif
+
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#include <xen/interface/callback.h>
++
++static DEFINE_PER_CPU(int, timer_irq) = -1;
++static DEFINE_PER_CPU(int, ipi_irq) = -1;
++static DEFINE_PER_CPU(int, resched_irq) = -1;
++static DEFINE_PER_CPU(int, cmc_irq) = -1;
++static DEFINE_PER_CPU(int, cmcp_irq) = -1;
++static DEFINE_PER_CPU(int, cpep_irq) = -1;
++static char timer_name[NR_CPUS][15];
++static char ipi_name[NR_CPUS][15];
++static char resched_name[NR_CPUS][15];
++static char cmc_name[NR_CPUS][15];
++static char cmcp_name[NR_CPUS][15];
++static char cpep_name[NR_CPUS][15];
++
++struct saved_irq {
++ unsigned int irq;
++ struct irqaction *action;
++};
++/* 16 should be far optimistic value, since only several percpu irqs
++ * are registered early.
++ */
++#define MAX_LATE_IRQ 16
++static struct saved_irq saved_percpu_irqs[MAX_LATE_IRQ];
++static unsigned short late_irq_cnt = 0;
++static unsigned short saved_irq_cnt = 0;
++static int xen_slab_ready = 0;
++
++#ifdef CONFIG_SMP
++/* Dummy stub. Though we may check RESCHEDULE_VECTOR before __do_IRQ,
++ * it ends up to issue several memory accesses upon percpu data and
++ * thus adds unnecessary traffic to other paths.
++ */
++static irqreturn_t
++handle_reschedule(int irq, void *dev_id, struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++static struct irqaction resched_irqaction = {
++ .handler = handle_reschedule,
++ .flags = SA_INTERRUPT,
++ .name = "RESCHED"
++};
++#endif
++
++/*
++ * This is xen version percpu irq registration, which needs bind
++ * to xen specific evtchn sub-system. One trick here is that xen
++ * evtchn binding interface depends on kmalloc because related
++ * port needs to be freed at device/cpu down. So we cache the
++ * registration on BSP before slab is ready and then deal them
++ * at later point. For rest instances happening after slab ready,
++ * we hook them to xen evtchn immediately.
++ *
++ * FIXME: MCA is not supported by far, and thus "nomca" boot param is
++ * required.
++ */
++static void
++xen_register_percpu_irq (unsigned int vec, struct irqaction *action, int save)
++{
++ unsigned int cpu = smp_processor_id();
++ irq_desc_t *desc;
++ int irq = 0;
++
++ if (xen_slab_ready) {
++ switch (vec) {
++ case IA64_TIMER_VECTOR:
++ sprintf(timer_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
++ action->handler, action->flags,
++ timer_name[cpu], action->dev_id);
++ per_cpu(timer_irq,cpu) = irq;
++ break;
++ case IA64_IPI_RESCHEDULE:
++ sprintf(resched_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR, cpu,
++ action->handler, action->flags,
++ resched_name[cpu], action->dev_id);
++ per_cpu(resched_irq,cpu) = irq;
++ break;
++ case IA64_IPI_VECTOR:
++ sprintf(ipi_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(IPI_VECTOR, cpu,
++ action->handler, action->flags,
++ ipi_name[cpu], action->dev_id);
++ per_cpu(ipi_irq,cpu) = irq;
++ break;
++ case IA64_CMC_VECTOR:
++ sprintf(cmc_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
++ action->handler,
++ action->flags,
++ cmc_name[cpu],
++ action->dev_id);
++ per_cpu(cmc_irq,cpu) = irq;
++ break;
++ case IA64_CMCP_VECTOR:
++ sprintf(cmcp_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(CMCP_VECTOR, cpu,
++ action->handler,
++ action->flags,
++ cmcp_name[cpu],
++ action->dev_id);
++ per_cpu(cmcp_irq,cpu) = irq;
++ break;
++ case IA64_CPEP_VECTOR:
++ sprintf(cpep_name[cpu], "%s%d", action->name, cpu);
++ irq = bind_ipi_to_irqhandler(CPEP_VECTOR, cpu,
++ action->handler,
++ action->flags,
++ cpep_name[cpu],
++ action->dev_id);
++ per_cpu(cpep_irq,cpu) = irq;
++ break;
++ case IA64_CPE_VECTOR:
++ case IA64_MCA_RENDEZ_VECTOR:
++ case IA64_PERFMON_VECTOR:
++ case IA64_MCA_WAKEUP_VECTOR:
++ case IA64_SPURIOUS_INT_VECTOR:
++ /* No need to complain, these aren't supported. */
++ break;
++ default:
++ printk(KERN_WARNING "Percpu irq %d is unsupported "
++ "by xen!\n", vec);
++ break;
++ }
++ BUG_ON(irq < 0);
++
++ if (irq > 0) {
++ /*
++ * Mark percpu. Without this, migrate_irqs() will
++ * mark the interrupt for migrations and trigger it
++ * on cpu hotplug.
++ */
++ desc = irq_desc + irq;
++ desc->status |= IRQ_PER_CPU;
++ }
++ }
++
++ /* For BSP, we cache registered percpu irqs, and then re-walk
++ * them when initializing APs
++ */
++ if (!cpu && save) {
++ BUG_ON(saved_irq_cnt == MAX_LATE_IRQ);
++ saved_percpu_irqs[saved_irq_cnt].irq = vec;
++ saved_percpu_irqs[saved_irq_cnt].action = action;
++ saved_irq_cnt++;
++ if (!xen_slab_ready)
++ late_irq_cnt++;
++ }
++}
++
++static void
++xen_bind_early_percpu_irq (void)
++{
++ int i;
++
++ xen_slab_ready = 1;
++ /* There's no race when accessing this cached array, since only
++ * BSP will face with such step shortly
++ */
++ for (i = 0; i < late_irq_cnt; i++)
++ xen_register_percpu_irq(saved_percpu_irqs[i].irq,
++ saved_percpu_irqs[i].action, 0);
++}
++
++/* FIXME: There's no obvious point to check whether slab is ready. So
++ * a hack is used here by utilizing a late time hook.
++ */
++extern void (*late_time_init)(void);
++extern char xen_event_callback;
++extern void xen_init_IRQ(void);
++
++#ifdef CONFIG_HOTPLUG_CPU
++static int __devinit
++unbind_evtchn_callback(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (unsigned long)hcpu;
++
++ if (action == CPU_DEAD) {
++ /* Unregister evtchn. */
++ if (per_cpu(cpep_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL);
++ per_cpu(cpep_irq, cpu) = -1;
++ }
++ if (per_cpu(cmcp_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL);
++ per_cpu(cmcp_irq, cpu) = -1;
++ }
++ if (per_cpu(cmc_irq,cpu) >= 0) {
++ unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL);
++ per_cpu(cmc_irq, cpu) = -1;
++ }
++ if (per_cpu(ipi_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(ipi_irq, cpu), NULL);
++ per_cpu(ipi_irq, cpu) = -1;
++ }
++ if (per_cpu(resched_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(resched_irq, cpu),
++ NULL);
++ per_cpu(resched_irq, cpu) = -1;
++ }
++ if (per_cpu(timer_irq,cpu) >= 0) {
++ unbind_from_irqhandler (per_cpu(timer_irq, cpu), NULL);
++ per_cpu(timer_irq, cpu) = -1;
++ }
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block unbind_evtchn_notifier = {
++ .notifier_call = unbind_evtchn_callback,
++ .priority = 0
++};
++#endif
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++void xen_smp_intr_init(void)
++{
++#ifdef CONFIG_SMP
++ unsigned int cpu = smp_processor_id();
++ unsigned int i = 0;
++ struct callback_register event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long)&xen_event_callback,
++ };
++
++ if (cpu == 0) {
++ /* Initialization was already done for boot cpu. */
++#ifdef CONFIG_HOTPLUG_CPU
++ /* Register the notifier only once. */
++ register_cpu_notifier(&unbind_evtchn_notifier);
++#endif
++ return;
++ }
++
++ /* This should be piggyback when setup vcpu guest context */
++ BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
++
++ for (i = 0; i < saved_irq_cnt; i++)
++ xen_register_percpu_irq(saved_percpu_irqs[i].irq,
++ saved_percpu_irqs[i].action, 0);
++#endif /* CONFIG_SMP */
++}
++#endif /* CONFIG_XEN */
++
+ void
+ register_percpu_irq (ia64_vector vec, struct irqaction *action)
+ {
+ irq_desc_t *desc;
+ unsigned int irq;
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen())
++ return xen_register_percpu_irq(vec, action, 1);
++#endif
+
+ for (irq = 0; irq < NR_IRQS; ++irq)
+ if (irq_to_vector(irq) == vec) {
+@@ -299,6 +573,21 @@ void __init
+ void __init
+ init_IRQ (void)
+ {
++#ifdef CONFIG_XEN
++ /* Maybe put into platform_irq_init later */
++ if (is_running_on_xen()) {
++ struct callback_register event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long)&xen_event_callback,
++ };
++ xen_init_IRQ();
++ BUG_ON(HYPERVISOR_callback_op(CALLBACKOP_register, &event));
++ late_time_init = xen_bind_early_percpu_irq;
++#ifdef CONFIG_SMP
++ register_percpu_irq(IA64_IPI_RESCHEDULE, &resched_irqaction);
++#endif /* CONFIG_SMP */
++ }
++#endif /* CONFIG_XEN */
+ register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
+ #ifdef CONFIG_SMP
+ register_percpu_irq(IA64_IPI_VECTOR, &ipi_irqaction);
+@@ -317,6 +606,46 @@ ia64_send_ipi (int cpu, int vector, int
+ unsigned long ipi_data;
+ unsigned long phys_cpu_id;
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ int irq = -1;
++
++#ifdef CONFIG_SMP
++ /* TODO: we need to call vcpu_up here */
++ if (unlikely(vector == ap_wakeup_vector)) {
++ extern void xen_send_ipi (int cpu, int vec);
++ xen_send_ipi (cpu, vector);
++ //vcpu_prepare_and_up(cpu);
++ return;
++ }
++#endif
++
++ switch(vector) {
++ case IA64_IPI_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[IPI_VECTOR];
++ break;
++ case IA64_IPI_RESCHEDULE:
++ irq = per_cpu(ipi_to_irq, cpu)[RESCHEDULE_VECTOR];
++ break;
++ case IA64_CMCP_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[CMCP_VECTOR];
++ break;
++ case IA64_CPEP_VECTOR:
++ irq = per_cpu(ipi_to_irq, cpu)[CPEP_VECTOR];
++ break;
++ default:
++ printk(KERN_WARNING "Unsupported IPI type 0x%x\n",
++ vector);
++ irq = 0;
++ break;
++ }
++
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++ return;
++ }
++#endif /* CONFIG_XEN */
++
+ #ifdef CONFIG_SMP
+ phys_cpu_id = cpu_physical_id(cpu);
+ #else
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/pal.S
+--- a/arch/ia64/kernel/pal.S Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/pal.S Wed Aug 08 16:25:28 2007 -0300
+@@ -16,6 +16,7 @@
+ #include <asm/processor.h>
+
+ .data
++ .globl pal_entry_point
+ pal_entry_point:
+ data8 ia64_pal_default_handler
+ .text
+@@ -86,7 +87,7 @@ 1: mov psr.l = loc3
+ ;;
+ srlz.d // seralize restoration of psr.l
+ br.ret.sptk.many b0
+-END(ia64_pal_call_static)
++END(__ia64_pal_call_static)
+
+ /*
+ * Make a PAL call using the stacked registers calling convention.
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/patch.c
+--- a/arch/ia64/kernel/patch.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/patch.c Wed Aug 08 16:25:28 2007 -0300
+@@ -184,6 +184,69 @@ patch_brl_fsys_bubble_down (unsigned lon
+ ia64_srlz_i();
+ }
+
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++extern char __start_gate_running_on_xen_patchlist[];
++extern char __end_gate_running_on_xen_patchlist[];
++
++void __init
++patch_running_on_xen(unsigned long start, unsigned long end)
++{
++ extern int running_on_xen;
++ s32 *offp = (s32 *)start;
++ u64 ip;
++
++ while (offp < (s32 *)end) {
++ ip = (u64)ia64_imva((char *)offp + *offp);
++ ia64_patch_imm64(ip, (u64)&running_on_xen);
++ ia64_fc((void *)ip);
++ ++offp;
++ }
++ ia64_sync_i();
++ ia64_srlz_i();
++}
++
++static void __init
++patch_brl_symaddr(unsigned long start, unsigned long end,
++ unsigned long symaddr)
++{
++ s32 *offp = (s32 *)start;
++ u64 ip;
++
++ while (offp < (s32 *)end) {
++ ip = (u64)offp + *offp;
++ ia64_patch_imm60((u64)ia64_imva((void *)ip),
++ (u64)(symaddr - (ip & -16)) / 16);
++ ia64_fc((void *)ip);
++ ++offp;
++ }
++ ia64_sync_i();
++ ia64_srlz_i();
++}
++
++#define EXTERN_PATCHLIST(name) \
++ extern char __start_gate_brl_##name##_patchlist[]; \
++ extern char __end_gate_brl_##name##_patchlist[]; \
++ extern char name[]
++
++#define PATCH_BRL_SYMADDR(name) \
++ patch_brl_symaddr((unsigned long)__start_gate_brl_##name##_patchlist, \
++ (unsigned long)__end_gate_brl_##name##_patchlist, \
++ (unsigned long)name)
++
++static void __init
++patch_brl_in_vdso(void)
++{
++ EXTERN_PATCHLIST(xen_ssm_i_0);
++ EXTERN_PATCHLIST(xen_ssm_i_1);
++
++ PATCH_BRL_SYMADDR(xen_ssm_i_0);
++ PATCH_BRL_SYMADDR(xen_ssm_i_1);
++}
++#else
++#define patch_running_on_xen(start, end) do { } while (0)
++#define patch_brl_in_vdso() do { } while (0)
++#endif
++
+ void __init
+ ia64_patch_gate (void)
+ {
+@@ -192,6 +255,10 @@ ia64_patch_gate (void)
+
+ patch_fsyscall_table(START(fsyscall), END(fsyscall));
+ patch_brl_fsys_bubble_down(START(brl_fsys_bubble_down), END(brl_fsys_bubble_down));
++#ifdef CONFIG_XEN
++ patch_running_on_xen(START(running_on_xen), END(running_on_xen));
++ patch_brl_in_vdso();
++#endif
+ ia64_patch_vtop(START(vtop), END(vtop));
+ ia64_patch_mckinley_e9(START(mckinley_e9), END(mckinley_e9));
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/perfmon.c
+--- a/arch/ia64/kernel/perfmon.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/perfmon.c Wed Aug 08 16:25:28 2007 -0300
+@@ -53,6 +53,28 @@
+ #include <asm/delay.h>
+
+ #ifdef CONFIG_PERFMON
++#ifdef CONFIG_XEN
++//#include <xen/xenoprof.h>
++#include <xen/interface/xenoprof.h>
++
++static int xenoprof_is_primary = 0;
++#define init_xenoprof_primary(is_primary) (xenoprof_is_primary = (is_primary))
++#define is_xenoprof_primary() (xenoprof_is_primary)
++#define XEN_NOT_SUPPORTED_YET \
++ do { \
++ if (is_running_on_xen()) { \
++ printk("%s is not supported yet under xen.\n", \
++ __func__); \
++ return -ENOSYS; \
++ } \
++ } while (0)
++#else
++#define init_xenoprof_primary(is_primary) do { } while (0)
++#define is_xenoprof_primary() (0)
++#define XEN_NOT_SUPPORTED_YET do { } while (0)
++#define HYPERVISOR_perfmon_op(cmd, arg, count) do { } while (0)
++#endif
++
+ /*
+ * perfmon context state
+ */
+@@ -1516,6 +1538,7 @@ pfm_read(struct file *filp, char __user
+ ssize_t ret;
+ unsigned long flags;
+ DECLARE_WAITQUEUE(wait, current);
++ XEN_NOT_SUPPORTED_YET;
+ if (PFM_IS_FILE(filp) == 0) {
+ printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid);
+ return -EINVAL;
+@@ -2114,6 +2137,15 @@ doit:
+ */
+ if (free_possible) pfm_context_free(ctx);
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary()) {
++ int ret = HYPERVISOR_perfmon_op(PFM_DESTROY_CONTEXT,
++ NULL, 0);
++ if (ret)
++ printk("%s:%d PFM_DESTROY_CONTEXT hypercall "
++ "failed\n", __func__, __LINE__);
++ }
++ }
+ return 0;
+ }
+
+@@ -2737,6 +2769,23 @@ pfm_context_create(pfm_context_t *ctx, v
+ */
+ pfm_reset_pmu_state(ctx);
+
++ if (is_running_on_xen()) {
++ /*
++ * kludge to get xenoprof.is_primary.
++ * XENOPROF_init/ia64 is nop. so it is safe to call it here.
++ */
++ struct xenoprof_init init;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++ if (ret)
++ goto buffer_error;
++ init_xenoprof_primary(init.is_primary);
++
++ if (is_xenoprof_primary()) {
++ ret = HYPERVISOR_perfmon_op(PFM_CREATE_CONTEXT, arg, 0);
++ if (ret)
++ goto buffer_error;
++ }
++ }
+ return 0;
+
+ buffer_error:
+@@ -2872,6 +2921,12 @@ pfm_write_pmcs(pfm_context_t *ctx, void
+ pfm_reg_check_t wr_func;
+ #define PFM_CHECK_PMC_PM(x, y, z) ((x)->ctx_fl_system ^ PMC_PM(y, z))
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_WRITE_PMCS,
++ arg, count);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
+ is_system = ctx->ctx_fl_system;
+@@ -3110,6 +3165,12 @@ pfm_write_pmds(pfm_context_t *ctx, void
+ int ret = -EINVAL;
+ pfm_reg_check_t wr_func;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_WRITE_PMDS,
++ arg, count);
++ return 0;
++ }
+
+ state = ctx->ctx_state;
+ is_loaded = state == PFM_CTX_LOADED ? 1 : 0;
+@@ -3305,6 +3366,7 @@ pfm_read_pmds(pfm_context_t *ctx, void *
+ int is_loaded, is_system, is_counting, expert_mode;
+ int ret = -EINVAL;
+ pfm_reg_check_t rd_func;
++ XEN_NOT_SUPPORTED_YET;
+
+ /*
+ * access is possible when loaded only for
+@@ -3555,6 +3617,7 @@ pfm_restart(pfm_context_t *ctx, void *ar
+ pfm_ovfl_ctrl_t rst_ctrl;
+ int state, is_system;
+ int ret = 0;
++ XEN_NOT_SUPPORTED_YET;
+
+ state = ctx->ctx_state;
+ fmt = ctx->ctx_buf_fmt;
+@@ -3704,6 +3767,7 @@ pfm_debug(pfm_context_t *ctx, void *arg,
+ pfm_debug(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
+ {
+ unsigned int m = *(unsigned int *)arg;
++ XEN_NOT_SUPPORTED_YET;
+
+ pfm_sysctl.debug = m == 0 ? 0 : 1;
+
+@@ -3974,6 +4038,8 @@ pfm_get_features(pfm_context_t *ctx, voi
+ {
+ pfarg_features_t *req = (pfarg_features_t *)arg;
+
++ if (is_running_on_xen())
++ return HYPERVISOR_perfmon_op(PFM_GET_FEATURES, &arg, 0);
+ req->ft_version = PFM_VERSION;
+ return 0;
+ }
+@@ -3984,6 +4050,12 @@ pfm_stop(pfm_context_t *ctx, void *arg,
+ struct pt_regs *tregs;
+ struct task_struct *task = PFM_CTX_TASK(ctx);
+ int state, is_system;
++
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_STOP, NULL, 0);
++ return 0;
++ }
+
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+@@ -4073,6 +4145,11 @@ pfm_start(pfm_context_t *ctx, void *arg,
+ struct pt_regs *tregs;
+ int state, is_system;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_START, NULL, 0);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+
+@@ -4155,6 +4232,7 @@ pfm_get_pmc_reset(pfm_context_t *ctx, vo
+ unsigned int cnum;
+ int i;
+ int ret = -EINVAL;
++ XEN_NOT_SUPPORTED_YET;
+
+ for (i = 0; i < count; i++, req++) {
+
+@@ -4213,6 +4291,11 @@ pfm_context_load(pfm_context_t *ctx, voi
+ int ret = 0;
+ int state, is_system, set_dbregs = 0;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_LOAD_CONTEXT, arg, 0);
++ return 0;
++ }
+ state = ctx->ctx_state;
+ is_system = ctx->ctx_fl_system;
+ /*
+@@ -4461,6 +4544,12 @@ pfm_context_unload(pfm_context_t *ctx, v
+ int prev_state, is_system;
+ int ret;
+
++ if (is_running_on_xen()) {
++ if (is_xenoprof_primary())
++ return HYPERVISOR_perfmon_op(PFM_UNLOAD_CONTEXT,
++ NULL, 0);
++ return 0;
++ }
+ DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1));
+
+ prev_state = ctx->ctx_state;
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/setup.c
+--- a/arch/ia64/kernel/setup.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/setup.c Wed Aug 08 16:25:28 2007 -0300
+@@ -61,6 +61,12 @@
+ #include <asm/system.h>
+ #include <asm/unistd.h>
+ #include <asm/system.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#include <asm/xen/xencomm.h>
++#include <xen/xencons.h>
++#endif
++#include <linux/dma-mapping.h>
+
+ #if defined(CONFIG_SMP) && (IA64_CPU_SIZE > PAGE_SIZE)
+ # error "struct cpuinfo_ia64 too big!"
+@@ -69,6 +75,36 @@
+ #ifdef CONFIG_SMP
+ unsigned long __per_cpu_offset[NR_CPUS];
+ EXPORT_SYMBOL(__per_cpu_offset);
++#endif
++
++#ifdef CONFIG_XEN
++static void
++xen_panic_hypercall(struct unw_frame_info *info, void *arg)
++{
++ current->thread.ksp = (__u64)info->sw - 16;
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ unw_init_running(xen_panic_hypercall, NULL);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block xen_panic_block = {
++ .notifier_call = xen_panic_event,
++ .next = NULL,
++ .priority = 0 /* try to go last */
++};
++
++void xen_pm_power_off(void)
++{
++ local_irq_disable();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
+ #endif
+
+ extern void ia64_setup_printk_clock(void);
+@@ -177,15 +213,33 @@ filter_rsvd_memory (unsigned long start,
+ return 0;
+ }
+
++static int __init
++rsvd_region_cmp(struct rsvd_region *lhs, struct rsvd_region *rhs)
++{
++ if (lhs->start > rhs->start)
++ return 1;
++ if (lhs->start < rhs->start)
++ return -1;
++
++ if (lhs->end > rhs->end)
++ return 1;
++ if (lhs->end < rhs->end)
++ return -1;
++
++ return 0;
++}
++
+ static void __init
+ sort_regions (struct rsvd_region *rsvd_region, int max)
+ {
++ int num = max;
+ int j;
+
+ /* simple bubble sorting */
+ while (max--) {
+ for (j = 0; j < max; ++j) {
+- if (rsvd_region[j].start > rsvd_region[j+1].start) {
++ if (rsvd_region_cmp(&rsvd_region[j],
++ &rsvd_region[j + 1]) > 0) {
+ struct rsvd_region tmp;
+ tmp = rsvd_region[j];
+ rsvd_region[j] = rsvd_region[j + 1];
+@@ -193,6 +247,36 @@ sort_regions (struct rsvd_region *rsvd_r
+ }
+ }
+ }
++
++ for (j = 0; j < num - 1; j++) {
++ int k;
++ unsigned long start = rsvd_region[j].start;
++ unsigned long end = rsvd_region[j].end;
++ int collapsed;
++
++ for (k = j + 1; k < num; k++) {
++ BUG_ON(start > rsvd_region[k].start);
++ if (end < rsvd_region[k].start) {
++ k--;
++ break;
++ }
++ end = max(end, rsvd_region[k].end);
++ }
++ if (k == num)
++ k--;
++ rsvd_region[j].end = end;
++ collapsed = k - j;
++ num -= collapsed;
++ for (k = j + 1; k < num; k++) {
++ rsvd_region[k] = rsvd_region[k + collapsed];
++ }
++ }
++
++ num_rsvd_regions = num;
++ for (j = 0; j < num; j++) {
++ printk("rsvd_region[%d]: [0x%016lx, 0x%06lx)\n",
++ j, rsvd_region[j].start, rsvd_region[j].end);
++ }
+ }
+
+ /*
+@@ -242,6 +326,14 @@ reserve_memory (void)
+ rsvd_region[n].start = (unsigned long) ia64_imva((void *)KERNEL_START);
+ rsvd_region[n].end = (unsigned long) ia64_imva(_end);
+ n++;
++
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ rsvd_region[n].start = (unsigned long)__va((HYPERVISOR_shared_info->arch.start_info_pfn << PAGE_SHIFT));
++ rsvd_region[n].end = rsvd_region[n].start + PAGE_SIZE;
++ n++;
++ }
++#endif
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+ if (ia64_boot_param->initrd_start) {
+@@ -460,6 +552,19 @@ setup_arch (char **cmdline_p)
+ {
+ unw_init();
+
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ /* Must be done before any hypercall. */
++ xencomm_init();
++
++ setup_xen_features();
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list,
++ &xen_panic_block);
++ pm_power_off = xen_pm_power_off;
++ }
++#endif
++
+ ia64_patch_vtop((u64) __start___vtop_patchlist, (u64) __end___vtop_patchlist);
+
+ *cmdline_p = __va(ia64_boot_param->command_line);
+@@ -538,14 +643,79 @@ setup_arch (char **cmdline_p)
+ conswitchp = &vga_con;
+ # endif
+ }
+-#endif
++#ifdef CONFIG_XEN
++ if (is_running_on_xen()) {
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ xen_start_info = __va(s->arch.start_info_pfn << PAGE_SHIFT);
++
++ printk("Running on Xen! start_info_pfn=0x%lx nr_pages=%ld "
++ "flags=0x%x\n", s->arch.start_info_pfn,
++ xen_start_info->nr_pages, xen_start_info->flags);
++
++ if (!is_initial_xendomain()) {
++#if !defined(CONFIG_VT) || !defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = NULL;
++#endif
++ }
++
++ /*
++ * If a console= is NOT specified, we assume using the
++ * xencons console is desired. By default, this is ttyS0
++ * for dom0 and tty0 for domU.
++ */
++ if (!strstr(*cmdline_p, "console=")) {
++ char *p, *q, name[5];
++ int offset = 0;
++
++ if (is_initial_xendomain())
++ strncpy(name, "ttyS", 4);
++ else
++ strncpy(name, "tty", 3);
++
++ p = strstr(*cmdline_p, "xencons=");
++
++ if (p) {
++ p += 8;
++ if (!strncmp(p, "ttyS", 4)) {
++ strncpy(name, p, 4);
++ p += 4;
++ offset = simple_strtol(p, &q, 10);
++ if (p == q)
++ offset = 0;
++ } else if (!strncmp(p, "tty", 3) ||
++ !strncmp(p, "xvc", 3)) {
++ strncpy(name, p, 3);
++ p += 3;
++ offset = simple_strtol(p, &q, 10);
++ if (p == q)
++ offset = 0;
++ } else if (!strncmp(p, "off", 3))
++ offset = -1;
++ }
++
++ if (offset >= 0)
++ add_preferred_console(name, offset, NULL);
++ }
++ }
++ xencons_early_setup();
++#endif
++#endif
++
+
+ /* enable IA-64 Machine Check Abort Handling unless disabled */
++#ifdef CONFIG_XEN
++ if (is_running_on_xen() && !is_initial_xendomain())
++ nomca = 1;
++#endif
+ if (!nomca)
+ ia64_mca_init();
+
+ platform_setup(cmdline_p);
+ paging_init();
++#ifdef CONFIG_XEN
++ contiguous_bitmap_init(max_pfn);
++#endif
+ }
+
+ /*
+@@ -951,6 +1121,15 @@ cpu_init (void)
+ /* size of physical stacked register partition plus 8 bytes: */
+ __get_cpu_var(ia64_phys_stacked_size_p8) = num_phys_stacked*8 + 8;
+ platform_cpu_init();
++
++#ifdef CONFIG_XEN
++ /* Need to be moved into platform_cpu_init later */
++ if (is_running_on_xen()) {
++ extern void xen_smp_intr_init(void);
++ xen_smp_intr_init();
++ }
++#endif
++
+ pm_idle = default_idle;
+ }
+
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/kernel/time.c
+--- a/arch/ia64/kernel/time.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/kernel/time.c Wed Aug 08 16:25:28 2007 -0300
+@@ -29,6 +29,13 @@
+ #include <asm/sections.h>
+ #include <asm/system.h>
+
++#ifdef CONFIG_XEN
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <xen/interface/vcpu.h>
++#include <asm/percpu.h>
++#endif
++
+ volatile int time_keeper_id = 0; /* smp_processor_id() of time-keeper */
+
+ #ifdef CONFIG_IA64_DEBUG_IRQ
+@@ -36,6 +43,13 @@ unsigned long last_cli_ip;
+ unsigned long last_cli_ip;
+ EXPORT_SYMBOL(last_cli_ip);
+
++#endif
++
++#ifdef CONFIG_XEN
++DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++DEFINE_PER_CPU(unsigned long, processed_stolen_time);
++DEFINE_PER_CPU(unsigned long, processed_blocked_time);
++#define NS_PER_TICK (1000000000LL/HZ)
+ #endif
+
+ static struct time_interpolator itc_interpolator = {
+@@ -44,10 +58,96 @@ static struct time_interpolator itc_inte
+ .source = TIME_SOURCE_CPU
+ };
+
++#ifdef CONFIG_XEN
++static unsigned long
++consider_steal_time(unsigned long new_itm, struct pt_regs *regs)
++{
++ unsigned long stolen, blocked, sched_time;
++ unsigned long delta_itm = 0, stolentick = 0;
++ int i, cpu = smp_processor_id();
++ struct vcpu_runstate_info *runstate;
++ struct task_struct *p = current;
++
++ runstate = &per_cpu(runstate, smp_processor_id());
++
++ do {
++ sched_time = runstate->state_entry_time;
++ mb();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ mb();
++ } while (sched_time != runstate->state_entry_time);
++
++ /*
++ * Check for vcpu migration effect
++ * In this case, itc value is reversed.
++ * This causes huge stolen value.
++ * This function just checks and reject this effect.
++ */
++ if (!time_after_eq(runstate->time[RUNSTATE_blocked],
++ per_cpu(processed_blocked_time, cpu)))
++ blocked = 0;
++
++ if (!time_after_eq(runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline],
++ per_cpu(processed_stolen_time, cpu)))
++ stolen = 0;
++
++ if (!time_after(delta_itm + new_itm, ia64_get_itc()))
++ stolentick = ia64_get_itc() - delta_itm - new_itm;
++
++ do_div(stolentick, NS_PER_TICK);
++ stolentick++;
++
++ do_div(stolen, NS_PER_TICK);
++
++ if (stolen > stolentick)
++ stolen = stolentick;
++
++ stolentick -= stolen;
++ do_div(blocked, NS_PER_TICK);
++
++ if (blocked > stolentick)
++ blocked = stolentick;
++
++ if (stolen > 0 || blocked > 0) {
++ account_steal_time(NULL, jiffies_to_cputime(stolen));
++ account_steal_time(idle_task(cpu), jiffies_to_cputime(blocked));
++ run_local_timers();
++
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode(regs));
++
++ scheduler_tick();
++ run_posix_cpu_timers(p);
++ delta_itm += local_cpu_data->itm_delta * (stolen + blocked);
++
++ if (cpu == time_keeper_id) {
++ write_seqlock(&xtime_lock);
++ for(i = 0; i < stolen + blocked; i++)
++ do_timer(regs);
++ local_cpu_data->itm_next = delta_itm + new_itm;
++ write_sequnlock(&xtime_lock);
++ } else {
++ local_cpu_data->itm_next = delta_itm + new_itm;
++ }
++ per_cpu(processed_stolen_time,cpu) += NS_PER_TICK * stolen;
++ per_cpu(processed_blocked_time,cpu) += NS_PER_TICK * blocked;
++ }
++ return delta_itm;
++}
++#else
++#define consider_steal_time(new_itm, regs) (0)
++#endif
++
+ static irqreturn_t
+ timer_interrupt (int irq, void *dev_id)
+ {
+ unsigned long new_itm;
++ unsigned long delta_itm; /* XEN */
+
+ if (unlikely(cpu_is_offline(smp_processor_id()))) {
+ return IRQ_HANDLED;
+@@ -62,6 +162,13 @@ timer_interrupt (int irq, void *dev_id)
+ ia64_get_itc(), new_itm);
+
+ profile_tick(CPU_PROFILING);
++
++ if (is_running_on_xen()) {
++ delta_itm = consider_steal_time(new_itm, regs);
++ new_itm += delta_itm;
++ if (time_after(new_itm, ia64_get_itc()) && delta_itm)
++ goto skip_process_time_accounting;
++ }
+
+ while (1) {
+ update_process_times(user_mode(get_irq_regs()));
+@@ -91,6 +198,8 @@ timer_interrupt (int irq, void *dev_id)
+ local_irq_enable();
+ local_irq_disable();
+ }
++
++skip_process_time_accounting: /* XEN */
+
+ do {
+ /*
+@@ -146,6 +255,25 @@ static int __init nojitter_setup(char *s
+
+ __setup("nojitter", nojitter_setup);
+
++#ifdef CONFIG_XEN
++/* taken from i386/kernel/time-xen.c */
++static void init_missing_ticks_accounting(int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++
++ per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
++ + runstate->time[RUNSTATE_offline];
++}
++#else
++#define init_missing_ticks_accounting(cpu) do {} while (0)
++#endif
+
+ void __devinit
+ ia64_init_itm (void)
+@@ -229,6 +357,9 @@ ia64_init_itm (void)
+ register_time_interpolator(&itc_interpolator);
+ }
+
++ if (is_running_on_xen())
++ init_missing_ticks_accounting(smp_processor_id());
++
+ /* Setup the CPU local timer tick */
+ ia64_cpu_local_tick();
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/mm/ioremap.c
+--- a/arch/ia64/mm/ioremap.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/mm/ioremap.c Wed Aug 08 16:25:28 2007 -0300
+@@ -16,6 +16,9 @@ static inline void __iomem *
+ static inline void __iomem *
+ __ioremap (unsigned long offset, unsigned long size)
+ {
++#ifdef CONFIG_XEN
++ offset = HYPERVISOR_ioremap(offset, size);
++#endif
+ return (void __iomem *) (__IA64_UNCACHED_OFFSET | offset);
+ }
+
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/oprofile/Makefile
+--- a/arch/ia64/oprofile/Makefile Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/oprofile/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -8,3 +8,7 @@ DRIVER_OBJS := $(addprefix ../../../driv
+
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_PERFMON) += perfmon.o
++ifeq ($(CONFIG_XEN), y)
++oprofile-$(CONFIG_PERFMON) += xenoprof.o \
++ ../../../drivers/xen/xenoprof/xenoprofile.o
++endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/oprofile/init.c
+--- a/arch/ia64/oprofile/init.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/oprofile/init.c Wed Aug 08 16:25:28 2007 -0300
+@@ -11,6 +11,7 @@
+ #include <linux/oprofile.h>
+ #include <linux/init.h>
+ #include <linux/errno.h>
++#include "oprofile_perfmon.h"
+
+ extern int perfmon_init(struct oprofile_operations * ops);
+ extern void perfmon_exit(void);
+@@ -19,6 +20,13 @@ int __init oprofile_arch_init(struct opr
+ int __init oprofile_arch_init(struct oprofile_operations * ops)
+ {
+ int ret = -ENODEV;
++
++ if (is_running_on_xen()) {
++ ret = xen_perfmon_init();
++ if (ret)
++ return ret;
++ return xenoprofile_init(ops);
++ }
+
+ #ifdef CONFIG_PERFMON
+ /* perfmon_init() can fail, but we have no way to report it */
+@@ -32,6 +40,12 @@ int __init oprofile_arch_init(struct opr
+
+ void oprofile_arch_exit(void)
+ {
++ if (is_running_on_xen()) {
++ xenoprofile_exit();
++ xen_perfmon_exit();
++ return;
++ }
++
+ #ifdef CONFIG_PERFMON
+ perfmon_exit();
+ #endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/oprofile/oprofile_perfmon.h
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/oprofile/oprofile_perfmon.h Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,28 @@
++#ifndef OPROFILE_PERFMON_H
++#define OPROFILE_PERFMON_H
++
++#ifdef CONFIG_PERFMON
++int __perfmon_init(void);
++void __perfmon_exit(void);
++int perfmon_start(void);
++void perfmon_stop(void);
++#else
++#define __perfmon_init() (-ENOSYS)
++#define __perfmon_exit() do {} while (0)
++#endif /* CONFIG_PERFMON */
++
++#ifdef CONFIG_XEN
++#define STATIC_IF_NO_XEN /* nothing */
++#define xen_perfmon_init() __perfmon_init()
++#define xen_perfmon_exit() __perfmon_exit()
++extern int xenoprofile_init(struct oprofile_operations * ops);
++extern void xenoprofile_exit(void);
++#else
++#define STATIC_IF_NO_XEN static
++#define xen_perfmon_init() (-ENOSYS)
++#define xen_perfmon_exit() do {} while (0)
++#define xenoprofile_init() (-ENOSYS)
++#define xenoprofile_exit() do {} while (0)
++#endif /* CONFIG_XEN */
++
++#endif /* OPROFILE_PERFMON_H */
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/oprofile/perfmon.c
+--- a/arch/ia64/oprofile/perfmon.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/oprofile/perfmon.c Wed Aug 08 16:25:28 2007 -0300
+@@ -13,6 +13,7 @@
+ #include <asm/perfmon.h>
+ #include <asm/ptrace.h>
+ #include <asm/errno.h>
++#include "oprofile_perfmon.h"
+
+ static int allow_ints;
+
+@@ -33,14 +34,16 @@ perfmon_handler(struct task_struct *task
+ }
+
+
+-static int perfmon_start(void)
++STATIC_IF_NO_XEN
++int perfmon_start(void)
+ {
+ allow_ints = 1;
+ return 0;
+ }
+
+
+-static void perfmon_stop(void)
++STATIC_IF_NO_XEN
++void perfmon_stop(void)
+ {
+ allow_ints = 0;
+ }
+@@ -75,16 +78,35 @@ static char * get_cpu_type(void)
+
+ static int using_perfmon;
+
++STATIC_IF_NO_XEN
++int __perfmon_init(void)
++{
++ int ret = pfm_register_buffer_fmt(&oprofile_fmt);
++ if (ret)
++ return -ENODEV;
++
++ using_perfmon = 1;
++ return 0;
++}
++
++STATIC_IF_NO_XEN
++void __perfmon_exit(void)
++{
++ if (!using_perfmon)
++ return;
++
++ pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
++}
++
+ int perfmon_init(struct oprofile_operations * ops)
+ {
+- int ret = pfm_register_buffer_fmt(&oprofile_fmt);
++ int ret = __perfmon_init();
+ if (ret)
+ return -ENODEV;
+
+ ops->cpu_type = get_cpu_type();
+ ops->start = perfmon_start;
+ ops->stop = perfmon_stop;
+- using_perfmon = 1;
+ printk(KERN_INFO "oprofile: using perfmon.\n");
+ return 0;
+ }
+@@ -92,8 +114,5 @@ int perfmon_init(struct oprofile_operati
+
+ void perfmon_exit(void)
+ {
+- if (!using_perfmon)
+- return;
+-
+- pfm_unregister_buffer_fmt(oprofile_fmt.fmt_uuid);
++ __perfmon_exit();
+ }
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/oprofile/xenoprof.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/oprofile/xenoprof.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,142 @@
++/******************************************************************************
++ * xenoprof ia64 specific part
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/ioport.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++
++#include "oprofile_perfmon.h"
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ init->num_events = 0; /* perfmon manages. */
++}
++
++void xenoprof_arch_counter(void)
++{
++ /* nothing. perfmon does. */
++}
++
++void xenoprof_arch_start(void)
++{
++ perfmon_start();
++}
++
++void xenoprof_arch_stop(void)
++{
++ perfmon_stop();
++}
++
++/* XXX move them to an appropriate header file. */
++struct resource* xen_ia64_allocate_resource(unsigned long size);
++void xen_ia64_release_resource(struct resource* res);
++void xen_ia64_unmap_resource(struct resource* res);
++
++struct resource*
++xenoprof_ia64_allocate_resource(int32_t max_samples)
++{
++ unsigned long bufsize;
++
++ /* XXX add hypercall to get bufsize? */
++ /* this value is taken from alloc_xenoprof_struct(). */
++#if 0
++ bufsize = NR_CPUS * (sizeof(struct xenoprof_buf) +
++ (max_samples - 1) * sizeof(struct event_log));
++ bufsize = PAGE_ALIGN(bufsize) + PAGE_SIZE;
++#else
++#define MAX_OPROF_SHARED_PAGES 32
++ bufsize = (MAX_OPROF_SHARED_PAGES + 1) * PAGE_SIZE;
++#endif
++ return xen_ia64_allocate_resource(bufsize);
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf)
++{
++ if (sbuf->buffer) {
++ xen_ia64_unmap_resource(sbuf->arch.res);
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer,
++ struct xenoprof_shared_buffer* sbuf)
++{
++ int ret;
++ struct resource* res;
++
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++
++ res = xenoprof_ia64_allocate_resource(get_buffer->max_samples);
++ if (IS_ERR(res))
++ return PTR_ERR(res);
++
++ get_buffer->buf_gmaddr = res->start;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer);
++ if (ret) {
++ xen_ia64_release_resource(res);
++ return ret;
++ }
++
++ BUG_ON((res->end - res->start + 1) <
++ get_buffer->bufsize * get_buffer->nbuf);
++
++ sbuf->buffer = __va(res->start);
++ sbuf->arch.res = res;
++
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain,
++ struct xenoprof_shared_buffer* sbuf)
++{
++ int ret;
++ struct resource* res;
++
++ sbuf->buffer = NULL;
++ sbuf->arch.res = NULL;
++
++ res = xenoprof_ia64_allocate_resource(pdomain->max_samples);
++ if (IS_ERR(res))
++ return PTR_ERR(res);
++
++ pdomain->buf_gmaddr = res->start;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret) {
++ xen_ia64_release_resource(res);
++ return ret;
++ }
++
++ BUG_ON((res->end - res->start + 1) < pdomain->bufsize * pdomain->nbuf);
++
++ sbuf->buffer = __va(res->start);
++ sbuf->arch.res = res;
++
++ return ret;
++}
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/pci/pci.c
+--- a/arch/ia64/pci/pci.c Wed Jul 18 12:23:24 2007 -0300
++++ b/arch/ia64/pci/pci.c Wed Aug 08 16:25:28 2007 -0300
+@@ -609,6 +609,14 @@ pci_mmap_page_range (struct pci_dev *dev
+ else
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
++ if (is_initial_xendomain()) {
++ unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
++ size_t size = vma->vm_end - vma->vm_start;
++ unsigned long offset = HYPERVISOR_ioremap(addr, size);
++ if (IS_ERR_VALUE(offset))
++ return offset;
++ }
++
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ vma->vm_end - vma->vm_start, vma->vm_page_prot))
+ return -EAGAIN;
+@@ -665,6 +673,14 @@ pci_mmap_legacy_page_range(struct pci_bu
+
+ vma->vm_pgoff += (unsigned long)addr >> PAGE_SHIFT;
+ vma->vm_page_prot = prot;
++
++ if (is_initial_xendomain()) {
++ unsigned long addr = vma->vm_pgoff << PAGE_SHIFT;
++ size_t size = vma->vm_end - vma->vm_start;
++ unsigned long offset = HYPERVISOR_ioremap(addr, size);
++ if (IS_ERR_VALUE(offset))
++ return offset;
++ }
+
+ if (remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
+ size, vma->vm_page_prot))
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/xen/Makefile
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/Makefile Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,9 @@
++#
++# Makefile for Xen components
++#
++
++obj-y := hypercall.o xenivt.o xenentry.o xensetup.o xenpal.o xenhpski.o \
++ hypervisor.o pci-dma-xen.o util.o xencomm.o xcom_hcall.o \
++ xcom_mini.o xcom_privcmd.o mem.o
++
++pci-dma-xen-y := ../../i386/kernel/pci-dma-xen.o
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/xen/hypercall.S
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/hypercall.S Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,170 @@
++/*
++ * Support routines for Xen hypercalls
++ *
++ * Copyright (C) 2005 Dan Magenheimer <dan.magenheimer@hp.com>
++ */
++
++#include <asm/processor.h>
++#include <asm/asmmacro.h>
++
++GLOBAL_ENTRY(xen_get_psr)
++ XEN_HYPER_GET_PSR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_psr)
++
++GLOBAL_ENTRY(xen_get_ivr)
++ XEN_HYPER_GET_IVR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_ivr)
++
++GLOBAL_ENTRY(xen_get_tpr)
++ XEN_HYPER_GET_TPR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_tpr)
++
++GLOBAL_ENTRY(xen_set_tpr)
++ mov r8=r32
++ XEN_HYPER_SET_TPR
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_tpr)
++
++GLOBAL_ENTRY(xen_eoi)
++ mov r8=r32
++ XEN_HYPER_EOI
++ br.ret.sptk.many rp
++ ;;
++END(xen_eoi)
++
++GLOBAL_ENTRY(xen_thash)
++ mov r8=r32
++ XEN_HYPER_THASH
++ br.ret.sptk.many rp
++ ;;
++END(xen_thash)
++
++GLOBAL_ENTRY(xen_set_itm)
++ mov r8=r32
++ XEN_HYPER_SET_ITM
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_itm)
++
++GLOBAL_ENTRY(xen_ptcga)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_PTC_GA
++ br.ret.sptk.many rp
++ ;;
++END(xen_ptcga)
++
++GLOBAL_ENTRY(xen_get_rr)
++ mov r8=r32
++ XEN_HYPER_GET_RR
++ br.ret.sptk.many rp
++ ;;
++END(xen_get_rr)
++
++GLOBAL_ENTRY(xen_set_rr)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_SET_RR
++ br.ret.sptk.many rp
++ ;;
++END(xen_set_rr)
++
++GLOBAL_ENTRY(xen_set_kr)
++ mov r8=r32
++ mov r9=r33
++ XEN_HYPER_SET_KR
++ br.ret.sptk.many rp
++END(xen_set_kr)
++
++GLOBAL_ENTRY(xen_fc)
++ mov r8=r32
++ XEN_HYPER_FC
++ br.ret.sptk.many rp
++END(xen_fc)
++
++GLOBAL_ENTRY(xen_get_cpuid)
++ mov r8=r32
++ XEN_HYPER_GET_CPUID
++ br.ret.sptk.many rp
++END(xen_get_cpuid)
++
++GLOBAL_ENTRY(xen_get_pmd)
++ mov r8=r32
++ XEN_HYPER_GET_PMD
++ br.ret.sptk.many rp
++END(xen_get_pmd)
++
++#ifdef CONFIG_IA32_SUPPORT
++GLOBAL_ENTRY(xen_get_eflag)
++ XEN_HYPER_GET_EFLAG
++ br.ret.sptk.many rp
++END(xen_get_eflag)
++
++// some bits aren't set if pl!=0, see SDM vol1 3.1.8
++GLOBAL_ENTRY(xen_set_eflag)
++ mov r8=r32
++ XEN_HYPER_SET_EFLAG
++ br.ret.sptk.many rp
++END(xen_set_eflag)
++#endif
++
++GLOBAL_ENTRY(xen_send_ipi)
++ mov r14=r32
++ mov r15=r33
++ mov r2=0x400
++ break 0x1000
++ ;;
++ br.ret.sptk.many rp
++ ;;
++END(xen_send_ipi)
++
++#ifdef CONFIG_XEN_IA64_VDSO_PARAVIRT
++// Those are vdso specialized.
++// In fsys mode, call, ret can't be used.
++
++ // see xen_ssm_i() in privop.h
++ // r22 = &vcpu->vcpu_info->evtchn_upcall_mask
++ // r23 = &vpsr.ic
++ // r24 = &vcpu->vcpu_info->evtchn_upcall_pending
++ // r25 = tmp
++ // r31 = tmp
++ // p11 = tmp
++ // p14 = tmp
++#define XEN_SET_PSR_I \
++ ld1 r31=[r22]; \
++ ld1 r25=[r24]; \
++ ;; \
++ st1 [r22]=r0; \
++ cmp.ne.unc p14,p0=r0,r31; \
++ ;; \
++(p14) cmp.ne.unc p11,p0=r0,r25; \
++ ;; \
++(p11) st1 [r22]=r20; \
++(p11) XEN_HYPER_SSM_I;
++
++GLOBAL_ENTRY(xen_ssm_i_0)
++ XEN_SET_PSR_I
++ brl.cond.sptk .vdso_ssm_i_0_ret
++ ;;
++END(xen_ssm_i_0)
++
++GLOBAL_ENTRY(xen_ssm_i_1)
++ XEN_SET_PSR_I
++ brl.cond.sptk .vdso_ssm_i_1_ret
++ ;;
++END(xen_ssm_i_1)
++
++GLOBAL_ENTRY(__hypercall)
++ mov r2=r37
++ break 0x1000
++ br.ret.sptk.many b0
++ ;;
++END(__hypercall)
++#endif
+diff -r 4a9ef6a03fd9 -r 85b796b085e5 arch/ia64/xen/hypervisor.c
+--- /dev/null Thu Jan 01 00:00:00 1970 +0000
++++ b/arch/ia64/xen/hypervisor.c Wed Aug 08 16:25:28 2007 -0300
+@@ -0,0 +1,1183 @@
++/******************************************************************************
++ * include/asm-ia64/shadow.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++//#include <linux/kernel.h>
++#include <linux/spinlock.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/efi.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/meminit.h>
++#include <asm/hypervisor.h>
++#include <asm/hypercall.h>
++#include <xen/interface/memory.h>
++#include <xen/balloon.h>
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)XSI_BASE;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++int running_on_xen;
++EXPORT_SYMBOL(running_on_xen);
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++static int p2m_expose_init(void);
++#else
++#define p2m_expose_init() (-ENOSYS)
++#endif
++
++EXPORT_SYMBOL(__hypercall);
++
++//XXX same as i386, x86_64 contiguous_bitmap_set(), contiguous_bitmap_clear()
++// move those to lib/contiguous_bitmap?
++//XXX discontigmem/sparsemem
++
++/*
++ * Bitmap is indexed by page number. If bit is set, the page is part of a
++ * xen_create_contiguous_region() area of memory.
++ */
++unsigned long *contiguous_bitmap;
++
++#ifdef CONFIG_VIRTUAL_MEM_MAP
++/* Following logic is stolen from create_mem_map_table() for virtual memmap */
++static int
++create_contiguous_bitmap(u64 start, u64 end, void *arg)
++{
++ unsigned long address, start_page, end_page;
++ unsigned long bitmap_start, bitmap_end;
++ unsigned char *bitmap;
++ int node;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ bitmap_start = (unsigned long)contiguous_bitmap +
++ ((__pa(start) >> PAGE_SHIFT) >> 3);
++ bitmap_end = (unsigned long)contiguous_bitmap +
++ (((__pa(end) >> PAGE_SHIFT) + 2 * BITS_PER_LONG) >> 3);
++
++ start_page = bitmap_start & PAGE_MASK;
++ end_page = PAGE_ALIGN(bitmap_end);
++ node = paddr_to_nid(__pa(start));
++
++ bitmap = alloc_bootmem_pages_node(NODE_DATA(node),
++ end_page - start_page);
++ BUG_ON(!bitmap);
++ memset(bitmap, 0, end_page - start_page);
++
++ for (address = start_page; address < end_page; address += PAGE_SIZE) {
++ pgd = pgd_offset_k(address);
++ if (pgd_none(*pgd))
++ pgd_populate(&init_mm, pgd,
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
++ pud = pud_offset(pgd, address);
++
++ if (pud_none(*pud))
++ pud_populate(&init_mm, pud,
++ alloc_bootmem_pages_node(NODE_DATA(node),
++ PAGE_SIZE));
++ pmd = pmd_offset(pud, address);
++
++ if (pmd_none(*pmd))
++ pmd_populate_kernel(&init_mm, pmd,
++ alloc_bootmem_pages_node
++ (NODE_DATA(node), PAGE_SIZE));
++ pte = pte_offset_kernel(pmd, address);
++
++ if (pte_none(*pte))
++ set_pte(pte,
++ pfn_pte(__pa(bitmap + (address - start_page))
++ >> PAGE_SHIFT, PAGE_KERNEL));
++ }
++ return 0;
++}
++#endif
++
++static void
++__contiguous_bitmap_init(unsigned long size)
++{
++ contiguous_bitmap = alloc_bootmem_pages(size);
++ BUG_ON(!contiguous_bitmap);
++ memset(contiguous_bitmap, 0, size);
++}
++
++void
++contiguous_bitmap_init(unsigned long end_pfn)
++{
++ unsigned long size = (end_pfn + 2 * BITS_PER_LONG) >> 3;
++#ifndef CONFIG_VIRTUAL_MEM_MAP
++ __contiguous_bitmap_init(size);
++#else
++ unsigned long max_gap = 0;
++
++ efi_memmap_walk(find_largest_hole, (u64*)&max_gap);
++ if (max_gap < LARGE_GAP) {
++ __contiguous_bitmap_init(size);
++ } else {
++ unsigned long map_size = PAGE_ALIGN(size);
++ vmalloc_end -= map_size;
++ contiguous_bitmap = (unsigned long*)vmalloc_end;
++ efi_memmap_walk(create_contiguous_bitmap, NULL);
++ }
++#endif
++}
++
++#if 0
++int
++contiguous_bitmap_test(void* p)
++{
++ return test_bit(__pa(p) >> PAGE_SHIFT, contiguous_bitmap);
++}
++#endif
++
++static void contiguous_bitmap_set(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] |=
++ ((1UL<<end_off)-1) & -(1UL<<start_off);
++ } else {
++ contiguous_bitmap[curr_idx] |= -(1UL<<start_off);
++ while ( ++curr_idx < end_idx )
++ contiguous_bitmap[curr_idx] = ~0UL;
++ contiguous_bitmap[curr_idx] |= (1UL<<end_off)-1;
++ }
++}
++
++static void contiguous_bitmap_clear(
++ unsigned long first_page, unsigned long nr_pages)
++{
++ unsigned long start_off, end_off, curr_idx, end_idx;
++
++ curr_idx = first_page / BITS_PER_LONG;
++ start_off = first_page & (BITS_PER_LONG-1);
++ end_idx = (first_page + nr_pages) / BITS_PER_LONG;
++ end_off = (first_page + nr_pages) & (BITS_PER_LONG-1);
++
++ if (curr_idx == end_idx) {
++ contiguous_bitmap[curr_idx] &=
++ -(1UL<<end_off) | ((1UL<<start_off)-1);
++ } else {
++ contiguous_bitmap[curr_idx] &= (1UL<<start_off)-1;
++ while ( ++curr_idx != end_idx )
++ contiguous_bitmap[curr_idx] = 0;
++ contiguous_bitmap[curr_idx] &= -(1UL<<end_off);
++ }
++}
++
++// __xen_create_contiguous_region(), __xen_destroy_contiguous_region()
++// are based on i386 xen_create_contiguous_region(),
++// xen_destroy_contiguous_region()
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 7
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int
++__xen_create_contiguous_region(unsigned long vstart,
++ unsigned int order, unsigned int address_bits)
++{
++ unsigned long error = 0;
++ unsigned long gphys = __pa(vstart);
++ unsigned long start_gpfn = gphys >> PAGE_SHIFT;
++ unsigned long num_gpfn = 1 << order;
++ unsigned long i;
++ unsigned long flags;
++
++ unsigned long *in_frames = discontig_frames, out_frame;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = num_gpfn,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ },
++ .nr_exchanged = 0
++ };
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages(vstart, num_gpfn);
++
++ balloon_lock(flags);
++
++ /* Get a new contiguous memory extent. */
++ for (i = 0; i < num_gpfn; i++) {
++ in_frames[i] = start_gpfn + i;
++ }
++ out_frame = start_gpfn;
++ error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == num_gpfn);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
++ BUG_ON(success && (error != 0));
++ if (unlikely(error == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in);
++ BUG_ON(error != num_gpfn);
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out);
++ if (error != 1) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < num_gpfn; i++) {
++ in_frames[i] = start_gpfn + i;
++ }
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in);
++ BUG_ON(error != num_gpfn);
++ success = 0;
++ } else
++ success = 1;
++ }
++ if (success)
++ contiguous_bitmap_set(start_gpfn, num_gpfn);
++#if 0
++ if (success) {
++ unsigned long mfn;
++ unsigned long mfn_prev = ~0UL;
++ for (i = 0; i < num_gpfn; i++) {
++ mfn = pfn_to_mfn_for_dma(start_gpfn + i);
++ if (mfn_prev != ~0UL && mfn != mfn_prev + 1) {
++ xprintk("\n");
++ xprintk("%s:%d order %d "
++ "start 0x%lx bus 0x%lx "
++ "machine 0x%lx\n",
++ __func__, __LINE__, order,
++ vstart, virt_to_bus((void*)vstart),
++ phys_to_machine_for_dma(gphys));
++ xprintk("mfn: ");
++ for (i = 0; i < num_gpfn; i++) {
++ mfn = pfn_to_mfn_for_dma(
++ start_gpfn + i);
++ xprintk("0x%lx ", mfn);
++ }
++ xprintk("\n");
++ break;
++ }
++ mfn_prev = mfn;
++ }
++ }
++#endif
++ balloon_unlock(flags);
++ return success? 0: -ENOMEM;
++}
++
++void
++__xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long flags;
++ unsigned long error = 0;
++ unsigned long start_gpfn = __pa(vstart) >> PAGE_SHIFT;
++ unsigned long num_gpfn = 1UL << order;
++ unsigned long i;
++
++ unsigned long *out_frames = discontig_frames, in_frame;
++ int success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = num_gpfn,
++ .extent_order = 0,
++ .address_bits = 0,
++ .domid = DOMID_SELF
++ },
++ .nr_exchanged = 0
++ };
++
++
++ if (!test_bit(start_gpfn, contiguous_bitmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages(vstart, num_gpfn);
++
++ balloon_lock(flags);
++
++ contiguous_bitmap_clear(start_gpfn, num_gpfn);
++
++ /* Do the exchange for non-contiguous MFNs. */
++ in_frame = start_gpfn;
++ for (i = 0; i < num_gpfn; i++) {
++ out_frames[i] = start_gpfn + i;
++ }
++ error = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (error == 0)));
++ BUG_ON(success && (error != 0));
++ if (unlikely(error == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ error = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in);
++ BUG_ON(error != 1);
++
++ error = HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out);
++ BUG_ON(error != num_gpfn);
++ }
++ balloon_unlock(flags);
++}
++
++
++///////////////////////////////////////////////////////////////////////////
++// grant table hack
++// cmd: GNTTABOP_xxx
++
++#include <linux/mm.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++
++static void
++gnttab_map_grant_ref_pre(struct gnttab_map_grant_ref *uop)
++{
++ uint32_t flags;
++
++ flags = uop->flags;
++
++ if (flags & GNTMAP_host_map) {
++ if (flags & GNTMAP_application_map) {
++ xprintd("GNTMAP_application_map is not supported yet: flags 0x%x\n", flags);
++ BUG();
++ }
++ if (flags & GNTMAP_contains_pte) {
++ xprintd("GNTMAP_contains_pte is not supported yet flags 0x%x\n", flags);
++ BUG();
++ }
++ } else if (flags & GNTMAP_device_map) {
++ xprintd("GNTMAP_device_map is not supported yet 0x%x\n", flags);
++ BUG();//XXX not yet. actually this flag is not used.
++ } else {
++ BUG();
++ }
++}
++
++int
++HYPERVISOR_grant_table_op(unsigned int cmd, void *uop, unsigned int count)
++{
++ if (cmd == GNTTABOP_map_grant_ref) {
++ unsigned int i;
++ for (i = 0; i < count; i++) {
++ gnttab_map_grant_ref_pre(
++ (struct gnttab_map_grant_ref*)uop + i);
++ }
++ }
++ return xencomm_mini_hypercall_grant_table_op(cmd, uop, count);
++}
++EXPORT_SYMBOL(HYPERVISOR_grant_table_op);
++
++///////////////////////////////////////////////////////////////////////////
++// foreign mapping
++#include <linux/efi.h>
++#include <asm/meminit.h> // for IA64_GRANULE_SIZE, GRANULEROUND{UP,DOWN}()
++
++static unsigned long privcmd_resource_min = 0;
++// Xen/ia64 currently can handle pseudo physical address bits up to
++// (PAGE_SHIFT * 3)
++static unsigned long privcmd_resource_max = GRANULEROUNDDOWN((1UL << (PAGE_SHIFT * 3)) - 1);
++static unsigned long privcmd_resource_align = IA64_GRANULE_SIZE;
++
++static unsigned long
++md_end_addr(const efi_memory_desc_t *md)
++{
++ return md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
++}
++
++#define XEN_IA64_PRIVCMD_LEAST_GAP_SIZE (1024 * 1024 * 1024UL)
++static int
++xen_ia64_privcmd_check_size(unsigned long start, unsigned long end)
++{
++ return (start < end &&
++ (end - start) > XEN_IA64_PRIVCMD_LEAST_GAP_SIZE);
++}
++
++static int __init
++xen_ia64_privcmd_init(void)
++{
++ void *efi_map_start, *efi_map_end, *p;
++ u64 efi_desc_size;
++ efi_memory_desc_t *md;
++ unsigned long tmp_min;
++ unsigned long tmp_max;
++ unsigned long gap_size;
++ unsigned long prev_end;
++
++ if (!is_running_on_xen())
++ return -1;
++
++ efi_map_start = __va(ia64_boot_param->efi_memmap);
++ efi_map_end = efi_map_start + ia64_boot_param->efi_memmap_size;
++ efi_desc_size = ia64_boot_param->efi_memdesc_size;
++
++ // at first check the used highest address
++ for (p = efi_map_start; p < efi_map_end; p += efi_desc_size) {
++ // nothing
++ }
++ md = p - efi_desc_size;
++ privcmd_resource_min = GRANULEROUNDUP(md_end_addr(md));
++ if (xen_ia64_privcmd_check_size(privcmd_resource_min,
++ privcmd_resource_max)) {
++ goto out;
++ }
++
++ // the used highest address is too large. try to find the largest gap.
++ tmp_min = privcmd_resource_max;
++ tmp_max = 0;
++ gap_size = 0;
++ prev_end = 0;
++ for (p = efi_map_start;
++ p < efi_map_end - efi_desc_size;
++ p += efi_desc_size) {
++ unsigned long end;
++ efi_memory_desc_t* next;
++ unsigned long next_start;
++
++ md = p;
++ end = md_end_addr(md);
++ if (end > privcmd_resource_max) {
++ break;
++ }
++ if (end < prev_end) {
++ // work around.
++ // Xen may pass incompletely sorted memory
++ // descriptors like
++ // [x, x + length]
++ // [x, x]
++ // this order should be reversed.
++ continue;
++ }
++ next = p + efi_desc_size;
++ next_start = next->phys_addr;
++ if (next_start > privcmd_resource_max) {
++ next_start = privcmd_resource_max;
++ }
++ if (end < next_start && gap_size < (next_start - end)) {
++ tmp_min = end;
++ tmp_max = next_start;
++ gap_size = tmp_max - tmp_min;
++ }
++ prev_end = end;
++ }
++
++ privcmd_resource_min = GRANULEROUNDUP(tmp_min);
++ if (xen_ia64_privcmd_check_size(privcmd_resource_min, tmp_max)) {
++ privcmd_resource_max = tmp_max;
++ goto out;
++ }
++
++ privcmd_resource_min = tmp_min;
++ privcmd_resource_max = tmp_max;
++ if (!xen_ia64_privcmd_check_size(privcmd_resource_min,
++ privcmd_resource_max)) {
++ // Any large enough gap isn't found.
++ // go ahead anyway with the warning hoping that large region
++ // won't be requested.
++ printk(KERN_WARNING "xen privcmd: large enough region for privcmd mmap is not found.\n");
++ }
++
++out:
++ printk(KERN_INFO "xen privcmd uses pseudo physical addr range [0x%lx, 0x%lx] (%ldMB)\n",
++ privcmd_resource_min, privcmd_resource_max,
++ (privcmd_resource_max - privcmd_resource_min) >> 20);
++ BUG_ON(privcmd_resource_min >= privcmd_resource_max);
++
++ // XXX this should be somewhere appropriate
++ (void)p2m_expose_init();
++
++ return 0;
++}
++late_initcall(xen_ia64_privcmd_init);
++
++struct xen_ia64_privcmd_entry {
++ atomic_t map_count;
++#define INVALID_GPFN (~0UL)
++ unsigned long gpfn;
++};
++
++struct xen_ia64_privcmd_range {
++ atomic_t ref_count;
++ unsigned long pgoff; // in PAGE_SIZE
++ struct resource* res;
++
++ unsigned long num_entries;
++ struct xen_ia64_privcmd_entry entries[0];
++};
++
++struct xen_ia64_privcmd_vma {
++ int is_privcmd_mmapped;
++ struct xen_ia64_privcmd_range* range;
++
++ unsigned long num_entries;
++ struct xen_ia64_privcmd_entry* entries;
++};
++
++static void
++xen_ia64_privcmd_init_entry(struct xen_ia64_privcmd_entry* entry)
++{
++ atomic_set(&entry->map_count, 0);
++ entry->gpfn = INVALID_GPFN;
++}
++
++static int
++xen_ia64_privcmd_entry_mmap(struct vm_area_struct* vma,
++ unsigned long addr,
++ struct xen_ia64_privcmd_range* privcmd_range,
++ int i,
++ unsigned long gmfn,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int error = 0;
++ struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ unsigned long gpfn;
++ unsigned long flags;
++
++ if ((addr & ~PAGE_MASK) != 0 || gmfn == INVALID_MFN) {
++ error = -EINVAL;
++ goto out;
++ }
++
++ if (entry->gpfn != INVALID_GPFN) {
++ error = -EBUSY;
++ goto out;
++ }
++ gpfn = (privcmd_range->res->start >> PAGE_SHIFT) + i;
++
++ flags = ASSIGN_writable;
++ if (pgprot_val(prot) == PROT_READ) {
++ flags = ASSIGN_readonly;
++ }
++ error = HYPERVISOR_add_physmap_with_gmfn(gpfn, gmfn, flags, domid);
++ if (error != 0) {
++ goto out;
++ }
++
++ prot = vma->vm_page_prot;
++ error = remap_pfn_range(vma, addr, gpfn, 1 << PAGE_SHIFT, prot);
++ if (error != 0) {
++ error = HYPERVISOR_zap_physmap(gpfn, 0);
++ if (error) {
++ BUG();//XXX
++ }
++ } else {
++ atomic_inc(&entry->map_count);
++ entry->gpfn = gpfn;
++ }
++
++out:
++ return error;
++}
++
++static void
++xen_ia64_privcmd_entry_munmap(struct xen_ia64_privcmd_range* privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ unsigned long gpfn = entry->gpfn;
++ //gpfn = (privcmd_range->res->start >> PAGE_SHIFT) +
++ // (vma->vm_pgoff - privcmd_range->pgoff);
++ int error;
++
++ error = HYPERVISOR_zap_physmap(gpfn, 0);
++ if (error) {
++ BUG();//XXX
++ }
++ entry->gpfn = INVALID_GPFN;
++}
++
++static void
++xen_ia64_privcmd_entry_open(struct xen_ia64_privcmd_range* privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ if (entry->gpfn != INVALID_GPFN) {
++ atomic_inc(&entry->map_count);
++ } else {
++ BUG_ON(atomic_read(&entry->map_count) != 0);
++ }
++}
++
++static void
++xen_ia64_privcmd_entry_close(struct xen_ia64_privcmd_range* privcmd_range,
++ int i)
++{
++ struct xen_ia64_privcmd_entry* entry = &privcmd_range->entries[i];
++ if (entry->gpfn != INVALID_GPFN &&
++ atomic_dec_and_test(&entry->map_count)) {
++ xen_ia64_privcmd_entry_munmap(privcmd_range, i);
++ }
++}
++
++static void xen_ia64_privcmd_vma_open(struct vm_area_struct* vma);
++static void xen_ia64_privcmd_vma_close(struct vm_area_struct* vma);
++
++struct vm_operations_struct xen_ia64_privcmd_vm_ops = {
++ .open = &xen_ia64_privcmd_vma_open,
++ .close = &xen_ia64_privcmd_vma_close,
++};
++
++static void
++__xen_ia64_privcmd_vma_open(struct vm_area_struct* vma,
++ struct xen_ia64_privcmd_vma* privcmd_vma,
++ struct xen_ia64_privcmd_range* privcmd_range)
++{
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++ unsigned long num_entries = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long i;
++
++ BUG_ON(entry_offset < 0);
++ BUG_ON(entry_offset + num_entries > privcmd_range->num_entries);
++
++ privcmd_vma->range = privcmd_range;
++ privcmd_vma->num_entries = num_entries;
++ privcmd_vma->entries = &privcmd_range->entries[entry_offset];
++ vma->vm_private_data = privcmd_vma;
++ for (i = 0; i < privcmd_vma->num_entries; i++) {
++ xen_ia64_privcmd_entry_open(privcmd_range, entry_offset + i);
++ }
++
++ vma->vm_private_data = privcmd_vma;
++ vma->vm_ops = &xen_ia64_privcmd_vm_ops;
++}
++
++static void
++xen_ia64_privcmd_vma_open(struct vm_area_struct* vma)
++{
++ struct xen_ia64_privcmd_vma* old_privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_vma* privcmd_vma = (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++
++ atomic_inc(&privcmd_range->ref_count);
++ // vm_op->open() can't fail.
++ privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL | __GFP_NOFAIL);
++ // copy original value if necessary
++ privcmd_vma->is_privcmd_mmapped = old_privcmd_vma->is_privcmd_mmapped;
++
++ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
++}
++
++static void
++xen_ia64_privcmd_vma_close(struct vm_area_struct* vma)
++{
++ struct xen_ia64_privcmd_vma* privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++ unsigned long i;
++
++ for (i = 0; i < privcmd_vma->num_entries; i++) {
++ xen_ia64_privcmd_entry_close(privcmd_range, entry_offset + i);
++ }
++ vma->vm_private_data = NULL;
++ kfree(privcmd_vma);
++
++ if (atomic_dec_and_test(&privcmd_range->ref_count)) {
++#if 1
++ for (i = 0; i < privcmd_range->num_entries; i++) {
++ struct xen_ia64_privcmd_entry* entry =
++ &privcmd_range->entries[i];
++ BUG_ON(atomic_read(&entry->map_count) != 0);
++ BUG_ON(entry->gpfn != INVALID_GPFN);
++ }
++#endif
++ release_resource(privcmd_range->res);
++ kfree(privcmd_range->res);
++ vfree(privcmd_range);
++ }
++}
++
++int
++privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++{
++ struct xen_ia64_privcmd_vma* privcmd_vma =
++ (struct xen_ia64_privcmd_vma *)vma->vm_private_data;
++ return (xchg(&privcmd_vma->is_privcmd_mmapped, 1) == 0);
++}
++
++int
++privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ int error;
++ unsigned long size = vma->vm_end - vma->vm_start;
++ unsigned long num_entries = size >> PAGE_SHIFT;
++ struct xen_ia64_privcmd_range* privcmd_range = NULL;
++ struct xen_ia64_privcmd_vma* privcmd_vma = NULL;
++ struct resource* res = NULL;
++ unsigned long i;
++ BUG_ON(!is_running_on_xen());
++
++ BUG_ON(file->private_data != NULL);
++
++ error = -ENOMEM;
++ privcmd_range =
++ vmalloc(sizeof(*privcmd_range) +
++ sizeof(privcmd_range->entries[0]) * num_entries);
++ if (privcmd_range == NULL) {
++ goto out_enomem0;
++ }
++ privcmd_vma = kmalloc(sizeof(*privcmd_vma), GFP_KERNEL);
++ if (privcmd_vma == NULL) {
++ goto out_enomem1;
++ }
++ privcmd_vma->is_privcmd_mmapped = 0;
++
++ res = kzalloc(sizeof(*res), GFP_KERNEL);
++ if (res == NULL) {
++ goto out_enomem1;
++ }
++ res->name = "Xen privcmd mmap";
++ error = allocate_resource(&iomem_resource, res, size,
++ privcmd_resource_min, privcmd_resource_max,
++ privcmd_resource_align, NULL, NULL);
++ if (error) {
++ goto out_enomem1;
++ }
++ privcmd_range->res = res;
++
++ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY | VM_PFNMAP;
++
++ atomic_set(&privcmd_range->ref_count, 1);
++ privcmd_range->pgoff = vma->vm_pgoff;
++ privcmd_range->num_entries = num_entries;
++ for (i = 0; i < privcmd_range->num_entries; i++) {
++ xen_ia64_privcmd_init_entry(&privcmd_range->entries[i]);
++ }
++
++ __xen_ia64_privcmd_vma_open(vma, privcmd_vma, privcmd_range);
++ return 0;
++
++out_enomem1:
++ kfree(res);
++ kfree(privcmd_vma);
++out_enomem0:
++ vfree(privcmd_range);
++ return error;
++}
++
++int
++direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address, // process virtual address
++ unsigned long gmfn, // gmfn, gmfn + 1, ... gmfn + size/PAGE_SIZE
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid) // target domain
++{
++ struct xen_ia64_privcmd_vma* privcmd_vma =
++ (struct xen_ia64_privcmd_vma*)vma->vm_private_data;
++ struct xen_ia64_privcmd_range* privcmd_range = privcmd_vma->range;
++ unsigned long entry_offset = vma->vm_pgoff - privcmd_range->pgoff;
++
++ unsigned long i;
++ unsigned long offset;
++ int error = 0;
++ BUG_ON(!is_running_on_xen());
++
++#if 0
++ if (prot != vm->vm_page_prot) {
++ return -EINVAL;
++ }
++#endif
++
++ i = (address - vma->vm_start) >> PAGE_SHIFT;
++ for (offset = 0; offset < size; offset += PAGE_SIZE) {
++ error = xen_ia64_privcmd_entry_mmap(vma, (address + offset) & PAGE_MASK, privcmd_range, entry_offset + i, gmfn, prot, domid);
++ if (error != 0) {
++ break;
++ }
++
++ i++;
++ gmfn++;
++ }
++
++ return error;
++}
++
++
++/* Called after suspend, to resume time. */
++void
++time_resume(void)
++{
++ extern void ia64_cpu_local_tick(void);
++
++ /* Just trigger a tick. */
++ ia64_cpu_local_tick();
++
++ /* Time interpolator remembers the last timer status. Forget it */
++ time_interpolator_reset();
++}
++
++///////////////////////////////////////////////////////////////////////////
++// expose p2m table
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M
++#include <linux/cpu.h>
++#include <asm/uaccess.h>
++
++int p2m_initialized __read_mostly = 0;
++
++unsigned long p2m_min_low_pfn __read_mostly;
++unsigned long p2m_max_low_pfn __read_mostly;
++unsigned long p2m_convert_min_pfn __read_mostly;
++unsigned long p2m_convert_max_pfn __read_mostly;
++
++static struct resource p2m_resource = {
++ .name = "Xen p2m table",
++ .flags = IORESOURCE_MEM,
++};
++static unsigned long p2m_assign_start_pfn __read_mostly;
++static unsigned long p2m_assign_end_pfn __read_mostly;
++volatile const pte_t* p2m_pte __read_mostly;
++
++#define GRNULE_PFN PTRS_PER_PTE
++static unsigned long p2m_granule_pfn __read_mostly = GRNULE_PFN;
++
++#define ROUNDDOWN(x, y) ((x) & ~((y) - 1))
++#define ROUNDUP(x, y) (((x) + (y) - 1) & ~((y) - 1))
++
++#define P2M_PREFIX "Xen p2m: "
++
++static int xen_ia64_p2m_expose __read_mostly = 1;
++module_param(xen_ia64_p2m_expose, int, 0);
++MODULE_PARM_DESC(xen_ia64_p2m_expose,
++ "enable/disable xen/ia64 p2m exposure optimization\n");
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++static int xen_ia64_p2m_expose_use_dtr __read_mostly = 1;
++module_param(xen_ia64_p2m_expose_use_dtr, int, 0);
++MODULE_PARM_DESC(xen_ia64_p2m_expose_use_dtr,
++ "use/unuse dtr to map exposed p2m table\n");
++
++static const int p2m_page_shifts[] = {
++ _PAGE_SIZE_4K,
++ _PAGE_SIZE_8K,
++ _PAGE_SIZE_16K,
++ _PAGE_SIZE_64K,
++ _PAGE_SIZE_256K,
++ _PAGE_SIZE_1M,
++ _PAGE_SIZE_4M,
++ _PAGE_SIZE_16M,
++ _PAGE_SIZE_64M,
++ _PAGE_SIZE_256M,
++};
++
++struct p2m_itr_arg {
++ unsigned long vaddr;
++ unsigned long pteval;
++ unsigned long log_page_size;
++};
++static struct p2m_itr_arg p2m_itr_arg __read_mostly;
++
++// This should be in asm-ia64/kregs.h
++#define IA64_TR_P2M_TABLE 3
++
++static void
++p2m_itr(void* info)
++{
++ struct p2m_itr_arg* arg = (struct p2m_itr_arg*)info;
++ ia64_itr(0x2, IA64_TR_P2M_TABLE,
++ arg->vaddr, arg->pteval, arg->log_page_size);
++ ia64_srlz_d();
++}
++
++static int
++p2m_expose_dtr_call(struct notifier_block *self,
++ unsigned long event, void* ptr)
++{
++ unsigned int cpu = (unsigned int)(long)ptr;
++ if (event != CPU_ONLINE)
++ return 0;
++ if (!(p2m_initialized && xen_ia64_p2m_expose_use_dtr))
++ smp_call_function_single(cpu, &p2m_itr, &p2m_itr_arg, 1, 1);
++ return 0;
++}
++
++static struct notifier_block p2m_expose_dtr_hotplug_notifier = {
++ .notifier_call = p2m_expose_dtr_call,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++
++static int
++p2m_expose_init(void)
++{
++ unsigned long num_pfn;
++ unsigned long size = 0;
++ unsigned long p2m_size = 0;
++ unsigned long align = ~0UL;
++ int error = 0;
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ int i;
++ unsigned long page_size;
++ unsigned long log_page_size = 0;
++#endif
++
++ if (!xen_ia64_p2m_expose)
++ return -ENOSYS;
++ if (p2m_initialized)
++ return 0;
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ error = register_cpu_notifier(&p2m_expose_dtr_hotplug_notifier);
++ if (error < 0)
++ return error;
++#endif
++
++ lock_cpu_hotplug();
++ if (p2m_initialized)
++ goto out;
++
++#ifdef CONFIG_DISCONTIGMEM
++ p2m_min_low_pfn = min_low_pfn;
++ p2m_max_low_pfn = max_low_pfn;
++#else
++ p2m_min_low_pfn = 0;
++ p2m_max_low_pfn = max_pfn;
++#endif
++
++#ifdef CONFIG_XEN_IA64_EXPOSE_P2M_USE_DTR
++ if (xen_ia64_p2m_expose_use_dtr) {
++ unsigned long granule_pfn = 0;
++ p2m_size = p2m_max_low_pfn - p2m_min_low_pfn;
++ for (i = 0;
++ i < sizeof(p2m_page_shifts)/sizeof(p2m_page_shifts[0]);
++ i++) {
++ log_page_size = p2m_page_shifts[i];
++ page_size = 1UL << log_page_size;
++ if (page_size < p2m_size)
++ continue;
++
++ granule_pfn =