aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ia64
diff options
context:
space:
mode:
authorAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
committerAndrea Bastoni <bastoni@cs.unc.edu>2010-05-30 19:16:45 -0400
commitada47b5fe13d89735805b566185f4885f5a3f750 (patch)
tree644b88f8a71896307d71438e9b3af49126ffb22b /arch/ia64
parent43e98717ad40a4ae64545b5ba047c7b86aa44f4f (diff)
parent3280f21d43ee541f97f8cda5792150d2dbec20d5 (diff)
Merge branch 'wip-2.6.34' into old-private-masterarchived-private-master
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig23
-rw-r--r--arch/ia64/Makefile3
-rw-r--r--arch/ia64/configs/bigsur_defconfig2
-rw-r--r--arch/ia64/configs/generic_defconfig2
-rw-r--r--arch/ia64/configs/gensparse_defconfig2
-rw-r--r--arch/ia64/configs/sim_defconfig2
-rw-r--r--arch/ia64/configs/tiger_defconfig1
-rw-r--r--arch/ia64/configs/xen_domu_defconfig2
-rw-r--r--arch/ia64/configs/zx1_defconfig2
-rw-r--r--arch/ia64/hp/common/aml_nfw.c6
-rw-r--r--arch/ia64/hp/common/sba_iommu.c40
-rw-r--r--arch/ia64/ia32/Makefile11
-rw-r--r--arch/ia64/ia32/audit.c42
-rw-r--r--arch/ia64/ia32/binfmt_elf32.c245
-rw-r--r--arch/ia64/ia32/elfcore32.h150
-rw-r--r--arch/ia64/ia32/ia32_entry.S468
-rw-r--r--arch/ia64/ia32/ia32_ldt.c146
-rw-r--r--arch/ia64/ia32/ia32_signal.c1010
-rw-r--r--arch/ia64/ia32/ia32_support.c253
-rw-r--r--arch/ia64/ia32/ia32_traps.c156
-rw-r--r--arch/ia64/ia32/ia32priv.h532
-rw-r--r--arch/ia64/ia32/sys_ia32.c2817
-rw-r--r--arch/ia64/include/asm/acpi.h33
-rw-r--r--arch/ia64/include/asm/asm-offsets.h1
-rw-r--r--arch/ia64/include/asm/bitops.h2
-rw-r--r--arch/ia64/include/asm/cacheflush.h1
-rw-r--r--arch/ia64/include/asm/compat.h3
-rw-r--r--arch/ia64/include/asm/dma-mapping.h2
-rw-r--r--arch/ia64/include/asm/dmi.h1
-rw-r--r--arch/ia64/include/asm/elf.h53
-rw-r--r--arch/ia64/include/asm/ftrace.h1
-rw-r--r--arch/ia64/include/asm/hw_irq.h6
-rw-r--r--arch/ia64/include/asm/ia32.h40
-rw-r--r--arch/ia64/include/asm/io.h2
-rw-r--r--arch/ia64/include/asm/irq.h2
-rw-r--r--arch/ia64/include/asm/kprobes.h5
-rw-r--r--arch/ia64/include/asm/kvm.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h1
-rw-r--r--arch/ia64/include/asm/mca.h5
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/numa.h2
-rw-r--r--arch/ia64/include/asm/pci.h14
-rw-r--r--arch/ia64/include/asm/percpu.h4
-rw-r--r--arch/ia64/include/asm/perfmon_default_smpl.h2
-rw-r--r--arch/ia64/include/asm/pgtable.h5
-rw-r--r--arch/ia64/include/asm/processor.h52
-rw-r--r--arch/ia64/include/asm/ptrace.h4
-rw-r--r--arch/ia64/include/asm/rwsem.h2
-rw-r--r--arch/ia64/include/asm/scatterlist.h24
-rw-r--r--arch/ia64/include/asm/sn/shubio.h2
-rw-r--r--arch/ia64/include/asm/socket.h2
-rw-r--r--arch/ia64/include/asm/spinlock.h76
-rw-r--r--arch/ia64/include/asm/spinlock_types.h8
-rw-r--r--arch/ia64/include/asm/swiotlb.h2
-rw-r--r--arch/ia64/include/asm/syscall.h81
-rw-r--r--arch/ia64/include/asm/system.h11
-rw-r--r--arch/ia64/include/asm/tlb.h2
-rw-r--r--arch/ia64/include/asm/topology.h4
-rw-r--r--arch/ia64/include/asm/types.h5
-rw-r--r--arch/ia64/include/asm/unistd.h17
-rw-r--r--arch/ia64/include/asm/xen/events.h4
-rw-r--r--arch/ia64/include/asm/xen/hypervisor.h28
-rw-r--r--arch/ia64/kernel/Makefile19
-rw-r--r--arch/ia64/kernel/acpi-ext.c1
-rw-r--r--arch/ia64/kernel/acpi-processor.c85
-rw-r--r--arch/ia64/kernel/acpi.c74
-rw-r--r--arch/ia64/kernel/audit.c21
-rw-r--r--arch/ia64/kernel/cpufreq/acpi-cpufreq.c1
-rw-r--r--arch/ia64/kernel/crash.c11
-rw-r--r--arch/ia64/kernel/efi.c1
-rw-r--r--arch/ia64/kernel/elfcore.c80
-rw-r--r--arch/ia64/kernel/entry.S40
-rw-r--r--arch/ia64/kernel/esi.c2
-rw-r--r--arch/ia64/kernel/head.S4
-rw-r--r--arch/ia64/kernel/ia64_ksyms.c4
-rw-r--r--arch/ia64/kernel/iosapic.c7
-rw-r--r--arch/ia64/kernel/irq.c4
-rw-r--r--arch/ia64/kernel/irq_ia64.c11
-rw-r--r--arch/ia64/kernel/ivt.S114
-rw-r--r--arch/ia64/kernel/kprobes.c2
-rw-r--r--arch/ia64/kernel/mca.c17
-rw-r--r--arch/ia64/kernel/mca_asm.S2
-rw-r--r--arch/ia64/kernel/mca_drv.c1
-rw-r--r--arch/ia64/kernel/pci-swiotlb.c5
-rw-r--r--arch/ia64/kernel/perfmon.c39
-rw-r--r--arch/ia64/kernel/process.c61
-rw-r--r--arch/ia64/kernel/ptrace.c15
-rw-r--r--arch/ia64/kernel/relocate_kernel.S2
-rw-r--r--arch/ia64/kernel/setup.c32
-rw-r--r--arch/ia64/kernel/signal.c54
-rw-r--r--arch/ia64/kernel/smpboot.c5
-rw-r--r--arch/ia64/kernel/sys_ia64.c83
-rw-r--r--arch/ia64/kernel/time.c6
-rw-r--r--arch/ia64/kernel/topology.c3
-rw-r--r--arch/ia64/kernel/traps.c9
-rw-r--r--arch/ia64/kernel/uncached.c2
-rw-r--r--arch/ia64/kernel/vmlinux.lds.S11
-rw-r--r--arch/ia64/kvm/Kconfig2
-rw-r--r--arch/ia64/kvm/Makefile2
-rw-r--r--arch/ia64/kvm/asm-offsets.c1
-rw-r--r--arch/ia64/kvm/kvm-ia64.c80
-rw-r--r--arch/ia64/kvm/kvm_fw.c28
-rw-r--r--arch/ia64/kvm/mmio.c4
-rw-r--r--arch/ia64/kvm/vcpu.c4
-rw-r--r--arch/ia64/kvm/vcpu.h9
-rw-r--r--arch/ia64/kvm/vmm.c4
-rw-r--r--arch/ia64/kvm/vtlb.c2
-rw-r--r--arch/ia64/mm/contig.c99
-rw-r--r--arch/ia64/mm/discontig.c130
-rw-r--r--arch/ia64/mm/hugetlbpage.c1
-rw-r--r--arch/ia64/mm/init.c13
-rw-r--r--arch/ia64/mm/ioremap.c11
-rw-r--r--arch/ia64/mm/tlb.c33
-rw-r--r--arch/ia64/pci/pci.c49
-rw-r--r--arch/ia64/sn/kernel/bte.c1
-rw-r--r--arch/ia64/sn/kernel/io_acpi_init.c3
-rw-r--r--arch/ia64/sn/kernel/io_common.c1
-rw-r--r--arch/ia64/sn/kernel/io_init.c1
-rw-r--r--arch/ia64/sn/kernel/irq.c1
-rw-r--r--arch/ia64/sn/kernel/msi_sn.c1
-rw-r--r--arch/ia64/sn/kernel/setup.c4
-rw-r--r--arch/ia64/sn/kernel/sn2/sn2_smp.c8
-rw-r--r--arch/ia64/sn/pci/pci_dma.c1
-rw-r--r--arch/ia64/sn/pci/pcibr/pcibr_provider.c1
-rw-r--r--arch/ia64/sn/pci/tioca_provider.c20
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c1
-rw-r--r--arch/ia64/uv/kernel/setup.c2
-rw-r--r--arch/ia64/xen/grant-table.c1
-rw-r--r--arch/ia64/xen/hypercall.S5
-rw-r--r--arch/ia64/xen/irq_xen.c131
-rw-r--r--arch/ia64/xen/time.c22
-rw-r--r--arch/ia64/xen/xen_pv_ops.c16
132 files changed, 811 insertions, 7136 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 1ee596cd942f..4d4f4188cdf1 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -53,6 +53,9 @@ config MMU
53 bool 53 bool
54 default y 54 default y
55 55
56config NEED_DMA_MAP_STATE
57 def_bool y
58
56config SWIOTLB 59config SWIOTLB
57 bool 60 bool
58 61
@@ -87,9 +90,6 @@ config GENERIC_TIME_VSYSCALL
87 bool 90 bool
88 default y 91 default y
89 92
90config HAVE_LEGACY_PER_CPU_AREA
91 def_bool y
92
93config HAVE_SETUP_PER_CPU_AREA 93config HAVE_SETUP_PER_CPU_AREA
94 def_bool y 94 def_bool y
95 95
@@ -502,23 +502,6 @@ config ARCH_PROC_KCORE_TEXT
502 def_bool y 502 def_bool y
503 depends on PROC_KCORE 503 depends on PROC_KCORE
504 504
505config IA32_SUPPORT
506 bool "Support for Linux/x86 binaries"
507 help
508 IA-64 processors can execute IA-32 (X86) instructions. By
509 saying Y here, the kernel will include IA-32 system call
510 emulation support which makes it possible to transparently
511 run IA-32 Linux binaries on an IA-64 Linux system.
512 If in doubt, say Y.
513
514config COMPAT
515 bool
516 depends on IA32_SUPPORT
517 default y
518
519config COMPAT_FOR_U64_ALIGNMENT
520 def_bool COMPAT
521
522config IA64_MCA_RECOVERY 505config IA64_MCA_RECOVERY
523 tristate "MCA recovery from errors other than TLB." 506 tristate "MCA recovery from errors other than TLB."
524 507
diff --git a/arch/ia64/Makefile b/arch/ia64/Makefile
index e7cbaa02cd0b..8ae0d2604ce1 100644
--- a/arch/ia64/Makefile
+++ b/arch/ia64/Makefile
@@ -46,7 +46,6 @@ head-y := arch/ia64/kernel/head.o arch/ia64/kernel/init_task.o
46 46
47libs-y += arch/ia64/lib/ 47libs-y += arch/ia64/lib/
48core-y += arch/ia64/kernel/ arch/ia64/mm/ 48core-y += arch/ia64/kernel/ arch/ia64/mm/
49core-$(CONFIG_IA32_SUPPORT) += arch/ia64/ia32/
50core-$(CONFIG_IA64_DIG) += arch/ia64/dig/ 49core-$(CONFIG_IA64_DIG) += arch/ia64/dig/
51core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/ 50core-$(CONFIG_IA64_DIG_VTD) += arch/ia64/dig/
52core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/ 51core-$(CONFIG_IA64_GENERIC) += arch/ia64/dig/
@@ -103,4 +102,4 @@ archprepare: make_nr_irqs_h FORCE
103PHONY += make_nr_irqs_h FORCE 102PHONY += make_nr_irqs_h FORCE
104 103
105make_nr_irqs_h: FORCE 104make_nr_irqs_h: FORCE
106 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/asm-ia64/nr-irqs.h 105 $(Q)$(MAKE) $(build)=arch/ia64/kernel include/generated/nr-irqs.h
diff --git a/arch/ia64/configs/bigsur_defconfig b/arch/ia64/configs/bigsur_defconfig
index ace41096b47b..312b12094a1d 100644
--- a/arch/ia64/configs/bigsur_defconfig
+++ b/arch/ia64/configs/bigsur_defconfig
@@ -131,8 +131,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
131CONFIG_ARCH_FLATMEM_ENABLE=y 131CONFIG_ARCH_FLATMEM_ENABLE=y
132CONFIG_ARCH_SPARSEMEM_ENABLE=y 132CONFIG_ARCH_SPARSEMEM_ENABLE=y
133# CONFIG_VIRTUAL_MEM_MAP is not set 133# CONFIG_VIRTUAL_MEM_MAP is not set
134CONFIG_IA32_SUPPORT=y
135CONFIG_COMPAT=y
136# CONFIG_IA64_MCA_RECOVERY is not set 134# CONFIG_IA64_MCA_RECOVERY is not set
137CONFIG_PERFMON=y 135CONFIG_PERFMON=y
138CONFIG_IA64_PALINFO=y 136CONFIG_IA64_PALINFO=y
diff --git a/arch/ia64/configs/generic_defconfig b/arch/ia64/configs/generic_defconfig
index 75645495c2dd..6a4cc506fb5f 100644
--- a/arch/ia64/configs/generic_defconfig
+++ b/arch/ia64/configs/generic_defconfig
@@ -205,8 +205,6 @@ CONFIG_VIRTUAL_MEM_MAP=y
205CONFIG_HOLES_IN_ZONE=y 205CONFIG_HOLES_IN_ZONE=y
206CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 206CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
207CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y 207CONFIG_HAVE_ARCH_NODEDATA_EXTENSION=y
208CONFIG_IA32_SUPPORT=y
209CONFIG_COMPAT=y
210CONFIG_COMPAT_FOR_U64_ALIGNMENT=y 208CONFIG_COMPAT_FOR_U64_ALIGNMENT=y
211CONFIG_IA64_MCA_RECOVERY=y 209CONFIG_IA64_MCA_RECOVERY=y
212CONFIG_PERFMON=y 210CONFIG_PERFMON=y
diff --git a/arch/ia64/configs/gensparse_defconfig b/arch/ia64/configs/gensparse_defconfig
index e86fbd39c795..2dc185b0f9a3 100644
--- a/arch/ia64/configs/gensparse_defconfig
+++ b/arch/ia64/configs/gensparse_defconfig
@@ -139,8 +139,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
139CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y 139CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
140CONFIG_NUMA=y 140CONFIG_NUMA=y
141CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y 141CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
142CONFIG_IA32_SUPPORT=y
143CONFIG_COMPAT=y
144CONFIG_IA64_MCA_RECOVERY=y 142CONFIG_IA64_MCA_RECOVERY=y
145CONFIG_PERFMON=y 143CONFIG_PERFMON=y
146CONFIG_IA64_PALINFO=y 144CONFIG_IA64_PALINFO=y
diff --git a/arch/ia64/configs/sim_defconfig b/arch/ia64/configs/sim_defconfig
index 546a772f438e..21a23cdfd41c 100644
--- a/arch/ia64/configs/sim_defconfig
+++ b/arch/ia64/configs/sim_defconfig
@@ -130,8 +130,6 @@ CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
130CONFIG_ARCH_FLATMEM_ENABLE=y 130CONFIG_ARCH_FLATMEM_ENABLE=y
131CONFIG_ARCH_SPARSEMEM_ENABLE=y 131CONFIG_ARCH_SPARSEMEM_ENABLE=y
132# CONFIG_VIRTUAL_MEM_MAP is not set 132# CONFIG_VIRTUAL_MEM_MAP is not set
133CONFIG_IA32_SUPPORT=y
134CONFIG_COMPAT=y
135# CONFIG_IA64_MCA_RECOVERY is not set 133# CONFIG_IA64_MCA_RECOVERY is not set
136# CONFIG_PERFMON is not set 134# CONFIG_PERFMON is not set
137CONFIG_IA64_PALINFO=m 135CONFIG_IA64_PALINFO=m
diff --git a/arch/ia64/configs/tiger_defconfig b/arch/ia64/configs/tiger_defconfig
index c522edf23c62..c5a5ea9d54ae 100644
--- a/arch/ia64/configs/tiger_defconfig
+++ b/arch/ia64/configs/tiger_defconfig
@@ -154,7 +154,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
154CONFIG_ARCH_POPULATES_NODE_MAP=y 154CONFIG_ARCH_POPULATES_NODE_MAP=y
155CONFIG_VIRTUAL_MEM_MAP=y 155CONFIG_VIRTUAL_MEM_MAP=y
156CONFIG_HOLES_IN_ZONE=y 156CONFIG_HOLES_IN_ZONE=y
157# CONFIG_IA32_SUPPORT is not set
158CONFIG_IA64_MCA_RECOVERY=y 157CONFIG_IA64_MCA_RECOVERY=y
159CONFIG_PERFMON=y 158CONFIG_PERFMON=y
160CONFIG_IA64_PALINFO=y 159CONFIG_IA64_PALINFO=y
diff --git a/arch/ia64/configs/xen_domu_defconfig b/arch/ia64/configs/xen_domu_defconfig
index 0bb0714dc19d..c67eafc4bb38 100644
--- a/arch/ia64/configs/xen_domu_defconfig
+++ b/arch/ia64/configs/xen_domu_defconfig
@@ -200,8 +200,6 @@ CONFIG_ARCH_SPARSEMEM_ENABLE=y
200CONFIG_ARCH_POPULATES_NODE_MAP=y 200CONFIG_ARCH_POPULATES_NODE_MAP=y
201CONFIG_VIRTUAL_MEM_MAP=y 201CONFIG_VIRTUAL_MEM_MAP=y
202CONFIG_HOLES_IN_ZONE=y 202CONFIG_HOLES_IN_ZONE=y
203# CONFIG_IA32_SUPPORT is not set
204# CONFIG_COMPAT_FOR_U64_ALIGNMENT is not set
205CONFIG_IA64_MCA_RECOVERY=y 203CONFIG_IA64_MCA_RECOVERY=y
206CONFIG_PERFMON=y 204CONFIG_PERFMON=y
207CONFIG_IA64_PALINFO=y 205CONFIG_IA64_PALINFO=y
diff --git a/arch/ia64/configs/zx1_defconfig b/arch/ia64/configs/zx1_defconfig
index 514f0635dafe..3cec65b534c2 100644
--- a/arch/ia64/configs/zx1_defconfig
+++ b/arch/ia64/configs/zx1_defconfig
@@ -150,8 +150,6 @@ CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
150CONFIG_ARCH_POPULATES_NODE_MAP=y 150CONFIG_ARCH_POPULATES_NODE_MAP=y
151CONFIG_VIRTUAL_MEM_MAP=y 151CONFIG_VIRTUAL_MEM_MAP=y
152CONFIG_HOLES_IN_ZONE=y 152CONFIG_HOLES_IN_ZONE=y
153CONFIG_IA32_SUPPORT=y
154CONFIG_COMPAT=y
155CONFIG_IA64_MCA_RECOVERY=y 153CONFIG_IA64_MCA_RECOVERY=y
156CONFIG_PERFMON=y 154CONFIG_PERFMON=y
157CONFIG_IA64_PALINFO=y 155CONFIG_IA64_PALINFO=y
diff --git a/arch/ia64/hp/common/aml_nfw.c b/arch/ia64/hp/common/aml_nfw.c
index 4abd2c79bb1d..22078486d35d 100644
--- a/arch/ia64/hp/common/aml_nfw.c
+++ b/arch/ia64/hp/common/aml_nfw.c
@@ -77,7 +77,7 @@ static void aml_nfw_execute(struct ia64_nfw_context *c)
77 c->arg[4], c->arg[5], c->arg[6], c->arg[7]); 77 c->arg[4], c->arg[5], c->arg[6], c->arg[7]);
78} 78}
79 79
80static void aml_nfw_read_arg(u8 *offset, u32 bit_width, acpi_integer *value) 80static void aml_nfw_read_arg(u8 *offset, u32 bit_width, u64 *value)
81{ 81{
82 switch (bit_width) { 82 switch (bit_width) {
83 case 8: 83 case 8:
@@ -95,7 +95,7 @@ static void aml_nfw_read_arg(u8 *offset, u32 bit_width, acpi_integer *value)
95 } 95 }
96} 96}
97 97
98static void aml_nfw_write_arg(u8 *offset, u32 bit_width, acpi_integer *value) 98static void aml_nfw_write_arg(u8 *offset, u32 bit_width, u64 *value)
99{ 99{
100 switch (bit_width) { 100 switch (bit_width) {
101 case 8: 101 case 8:
@@ -114,7 +114,7 @@ static void aml_nfw_write_arg(u8 *offset, u32 bit_width, acpi_integer *value)
114} 114}
115 115
116static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address, 116static acpi_status aml_nfw_handler(u32 function, acpi_physical_address address,
117 u32 bit_width, acpi_integer *value, void *handler_context, 117 u32 bit_width, u64 *value, void *handler_context,
118 void *region_context) 118 void *region_context)
119{ 119{
120 struct ia64_nfw_context *context = handler_context; 120 struct ia64_nfw_context *context = handler_context;
diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
index 674a8374c6d9..e14c492a8a93 100644
--- a/arch/ia64/hp/common/sba_iommu.c
+++ b/arch/ia64/hp/common/sba_iommu.c
@@ -677,12 +677,19 @@ sba_alloc_range(struct ioc *ioc, struct device *dev, size_t size)
677 spin_unlock_irqrestore(&ioc->saved_lock, flags); 677 spin_unlock_irqrestore(&ioc->saved_lock, flags);
678 678
679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0); 679 pide = sba_search_bitmap(ioc, dev, pages_needed, 0);
680 if (unlikely(pide >= (ioc->res_size << 3))) 680 if (unlikely(pide >= (ioc->res_size << 3))) {
681 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 681 printk(KERN_WARNING "%s: I/O MMU @ %p is"
682 ioc->ioc_hpa); 682 "out of mapping resources, %u %u %lx\n",
683 __func__, ioc->ioc_hpa, ioc->res_size,
684 pages_needed, dma_get_seg_boundary(dev));
685 return -1;
686 }
683#else 687#else
684 panic(__FILE__ ": I/O MMU @ %p is out of mapping resources\n", 688 printk(KERN_WARNING "%s: I/O MMU @ %p is"
685 ioc->ioc_hpa); 689 "out of mapping resources, %u %u %lx\n",
690 __func__, ioc->ioc_hpa, ioc->res_size,
691 pages_needed, dma_get_seg_boundary(dev));
692 return -1;
686#endif 693#endif
687 } 694 }
688 } 695 }
@@ -965,6 +972,8 @@ static dma_addr_t sba_map_page(struct device *dev, struct page *page,
965#endif 972#endif
966 973
967 pide = sba_alloc_range(ioc, dev, size); 974 pide = sba_alloc_range(ioc, dev, size);
975 if (pide < 0)
976 return 0;
968 977
969 iovp = (dma_addr_t) pide << iovp_shift; 978 iovp = (dma_addr_t) pide << iovp_shift;
970 979
@@ -1320,6 +1329,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1320 unsigned long dma_offset, dma_len; /* start/len of DMA stream */ 1329 unsigned long dma_offset, dma_len; /* start/len of DMA stream */
1321 int n_mappings = 0; 1330 int n_mappings = 0;
1322 unsigned int max_seg_size = dma_get_max_seg_size(dev); 1331 unsigned int max_seg_size = dma_get_max_seg_size(dev);
1332 int idx;
1323 1333
1324 while (nents > 0) { 1334 while (nents > 0) {
1325 unsigned long vaddr = (unsigned long) sba_sg_address(startsg); 1335 unsigned long vaddr = (unsigned long) sba_sg_address(startsg);
@@ -1381,7 +1391,7 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1381#endif 1391#endif
1382 1392
1383 /* 1393 /*
1384 ** Not virtually contigous. 1394 ** Not virtually contiguous.
1385 ** Terminate prev chunk. 1395 ** Terminate prev chunk.
1386 ** Start a new chunk. 1396 ** Start a new chunk.
1387 ** 1397 **
@@ -1418,16 +1428,22 @@ sba_coalesce_chunks(struct ioc *ioc, struct device *dev,
1418 vcontig_sg->dma_length = vcontig_len; 1428 vcontig_sg->dma_length = vcontig_len;
1419 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask; 1429 dma_len = (dma_len + dma_offset + ~iovp_mask) & iovp_mask;
1420 ASSERT(dma_len <= DMA_CHUNK_SIZE); 1430 ASSERT(dma_len <= DMA_CHUNK_SIZE);
1421 dma_sg->dma_address = (dma_addr_t) (PIDE_FLAG 1431 idx = sba_alloc_range(ioc, dev, dma_len);
1422 | (sba_alloc_range(ioc, dev, dma_len) << iovp_shift) 1432 if (idx < 0) {
1423 | dma_offset); 1433 dma_sg->dma_length = 0;
1434 return -1;
1435 }
1436 dma_sg->dma_address = (dma_addr_t)(PIDE_FLAG | (idx << iovp_shift)
1437 | dma_offset);
1424 n_mappings++; 1438 n_mappings++;
1425 } 1439 }
1426 1440
1427 return n_mappings; 1441 return n_mappings;
1428} 1442}
1429 1443
1430 1444static void sba_unmap_sg_attrs(struct device *dev, struct scatterlist *sglist,
1445 int nents, enum dma_data_direction dir,
1446 struct dma_attrs *attrs);
1431/** 1447/**
1432 * sba_map_sg - map Scatter/Gather list 1448 * sba_map_sg - map Scatter/Gather list
1433 * @dev: instance of PCI owned by the driver that's asking. 1449 * @dev: instance of PCI owned by the driver that's asking.
@@ -1493,6 +1509,10 @@ static int sba_map_sg_attrs(struct device *dev, struct scatterlist *sglist,
1493 ** Access to the virtual address is what forces a two pass algorithm. 1509 ** Access to the virtual address is what forces a two pass algorithm.
1494 */ 1510 */
1495 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents); 1511 coalesced = sba_coalesce_chunks(ioc, dev, sglist, nents);
1512 if (coalesced < 0) {
1513 sba_unmap_sg_attrs(dev, sglist, nents, dir, attrs);
1514 return 0;
1515 }
1496 1516
1497 /* 1517 /*
1498 ** Program the I/O Pdir 1518 ** Program the I/O Pdir
diff --git a/arch/ia64/ia32/Makefile b/arch/ia64/ia32/Makefile
deleted file mode 100644
index baad8c7699c0..000000000000
--- a/arch/ia64/ia32/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1#
2# Makefile for the ia32 kernel emulation subsystem.
3#
4
5obj-y := ia32_entry.o sys_ia32.o ia32_signal.o \
6 ia32_support.o ia32_traps.o binfmt_elf32.o ia32_ldt.o
7obj-$(CONFIG_AUDIT) += audit.o
8
9# Don't let GCC uses f16-f31 so that save_ia32_fpstate_live() and
10# restore_ia32_fpstate_live() can be sure the live register contain user-level state.
11CFLAGS_ia32_signal.o += -mfixed-range=f16-f31
diff --git a/arch/ia64/ia32/audit.c b/arch/ia64/ia32/audit.c
deleted file mode 100644
index 5c93ddd1e42d..000000000000
--- a/arch/ia64/ia32/audit.c
+++ /dev/null
@@ -1,42 +0,0 @@
1#include "../../x86/include/asm/unistd_32.h"
2
3unsigned ia32_dir_class[] = {
4#include <asm-generic/audit_dir_write.h>
5~0U
6};
7
8unsigned ia32_chattr_class[] = {
9#include <asm-generic/audit_change_attr.h>
10~0U
11};
12
13unsigned ia32_write_class[] = {
14#include <asm-generic/audit_write.h>
15~0U
16};
17
18unsigned ia32_read_class[] = {
19#include <asm-generic/audit_read.h>
20~0U
21};
22
23unsigned ia32_signal_class[] = {
24#include <asm-generic/audit_signal.h>
25~0U
26};
27
28int ia32_classify_syscall(unsigned syscall)
29{
30 switch(syscall) {
31 case __NR_open:
32 return 2;
33 case __NR_openat:
34 return 3;
35 case __NR_socketcall:
36 return 4;
37 case __NR_execve:
38 return 5;
39 default:
40 return 1;
41 }
42}
diff --git a/arch/ia64/ia32/binfmt_elf32.c b/arch/ia64/ia32/binfmt_elf32.c
deleted file mode 100644
index c69552bf893e..000000000000
--- a/arch/ia64/ia32/binfmt_elf32.c
+++ /dev/null
@@ -1,245 +0,0 @@
1/*
2 * IA-32 ELF support.
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2001 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 06/16/00 A. Mallick initialize csd/ssd/tssd/cflg for ia32_load_state
9 * 04/13/01 D. Mosberger dropped saving tssd in ar.k1---it's not needed
10 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
11 */
12
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/security.h>
16
17#include <asm/param.h>
18#include <asm/signal.h>
19
20#include "ia32priv.h"
21#include "elfcore32.h"
22
23/* Override some function names */
24#undef start_thread
25#define start_thread ia32_start_thread
26#define elf_format elf32_format
27#define init_elf_binfmt init_elf32_binfmt
28#define exit_elf_binfmt exit_elf32_binfmt
29
30#undef CLOCKS_PER_SEC
31#define CLOCKS_PER_SEC IA32_CLOCKS_PER_SEC
32
33extern void ia64_elf32_init (struct pt_regs *regs);
34
35static void elf32_set_personality (void);
36
37static unsigned long __attribute ((unused))
38randomize_stack_top(unsigned long stack_top);
39
40#define setup_arg_pages(bprm,tos,exec) ia32_setup_arg_pages(bprm,exec)
41#define elf_map elf32_map
42
43#undef SET_PERSONALITY
44#define SET_PERSONALITY(ex) elf32_set_personality()
45
46#define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
47
48/* Ugly but avoids duplication */
49#include "../../../fs/binfmt_elf.c"
50
51extern struct page *ia32_shared_page[];
52extern unsigned long *ia32_gdt;
53extern struct page *ia32_gate_page;
54
55int
56ia32_install_shared_page (struct vm_area_struct *vma, struct vm_fault *vmf)
57{
58 vmf->page = ia32_shared_page[smp_processor_id()];
59 get_page(vmf->page);
60 return 0;
61}
62
63int
64ia32_install_gate_page (struct vm_area_struct *vma, struct vm_fault *vmf)
65{
66 vmf->page = ia32_gate_page;
67 get_page(vmf->page);
68 return 0;
69}
70
71
72static const struct vm_operations_struct ia32_shared_page_vm_ops = {
73 .fault = ia32_install_shared_page
74};
75
76static const struct vm_operations_struct ia32_gate_page_vm_ops = {
77 .fault = ia32_install_gate_page
78};
79
80void
81ia64_elf32_init (struct pt_regs *regs)
82{
83 struct vm_area_struct *vma;
84
85 /*
86 * Map GDT below 4GB, where the processor can find it. We need to map
87 * it with privilege level 3 because the IVE uses non-privileged accesses to these
88 * tables. IA-32 segmentation is used to protect against IA-32 accesses to them.
89 */
90 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
91 if (vma) {
92 vma->vm_mm = current->mm;
93 vma->vm_start = IA32_GDT_OFFSET;
94 vma->vm_end = vma->vm_start + PAGE_SIZE;
95 vma->vm_page_prot = PAGE_SHARED;
96 vma->vm_flags = VM_READ|VM_MAYREAD|VM_RESERVED;
97 vma->vm_ops = &ia32_shared_page_vm_ops;
98 down_write(&current->mm->mmap_sem);
99 {
100 if (insert_vm_struct(current->mm, vma)) {
101 kmem_cache_free(vm_area_cachep, vma);
102 up_write(&current->mm->mmap_sem);
103 BUG();
104 }
105 }
106 up_write(&current->mm->mmap_sem);
107 }
108
109 /*
110 * When user stack is not executable, push sigreturn code to stack makes
111 * segmentation fault raised when returning to kernel. So now sigreturn
112 * code is locked in specific gate page, which is pointed by pretcode
113 * when setup_frame_ia32
114 */
115 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
116 if (vma) {
117 vma->vm_mm = current->mm;
118 vma->vm_start = IA32_GATE_OFFSET;
119 vma->vm_end = vma->vm_start + PAGE_SIZE;
120 vma->vm_page_prot = PAGE_COPY_EXEC;
121 vma->vm_flags = VM_READ | VM_MAYREAD | VM_EXEC
122 | VM_MAYEXEC | VM_RESERVED;
123 vma->vm_ops = &ia32_gate_page_vm_ops;
124 down_write(&current->mm->mmap_sem);
125 {
126 if (insert_vm_struct(current->mm, vma)) {
127 kmem_cache_free(vm_area_cachep, vma);
128 up_write(&current->mm->mmap_sem);
129 BUG();
130 }
131 }
132 up_write(&current->mm->mmap_sem);
133 }
134
135 /*
136 * Install LDT as anonymous memory. This gives us all-zero segment descriptors
137 * until a task modifies them via modify_ldt().
138 */
139 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
140 if (vma) {
141 vma->vm_mm = current->mm;
142 vma->vm_start = IA32_LDT_OFFSET;
143 vma->vm_end = vma->vm_start + PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
144 vma->vm_page_prot = PAGE_SHARED;
145 vma->vm_flags = VM_READ|VM_WRITE|VM_MAYREAD|VM_MAYWRITE;
146 down_write(&current->mm->mmap_sem);
147 {
148 if (insert_vm_struct(current->mm, vma)) {
149 kmem_cache_free(vm_area_cachep, vma);
150 up_write(&current->mm->mmap_sem);
151 BUG();
152 }
153 }
154 up_write(&current->mm->mmap_sem);
155 }
156
157 ia64_psr(regs)->ac = 0; /* turn off alignment checking */
158 regs->loadrs = 0;
159 /*
160 * According to the ABI %edx points to an `atexit' handler. Since we don't have
161 * one we'll set it to 0 and initialize all the other registers just to make
162 * things more deterministic, ala the i386 implementation.
163 */
164 regs->r8 = 0; /* %eax */
165 regs->r11 = 0; /* %ebx */
166 regs->r9 = 0; /* %ecx */
167 regs->r10 = 0; /* %edx */
168 regs->r13 = 0; /* %ebp */
169 regs->r14 = 0; /* %esi */
170 regs->r15 = 0; /* %edi */
171
172 current->thread.eflag = IA32_EFLAG;
173 current->thread.fsr = IA32_FSR_DEFAULT;
174 current->thread.fcr = IA32_FCR_DEFAULT;
175 current->thread.fir = 0;
176 current->thread.fdr = 0;
177
178 /*
179 * Setup GDTD. Note: GDTD is the descrambled version of the pseudo-descriptor
180 * format defined by Figure 3-11 "Pseudo-Descriptor Format" in the IA-32
181 * architecture manual. Also note that the only fields that are not ignored are
182 * `base', `limit', 'G', `P' (must be 1) and `S' (must be 0).
183 */
184 regs->r31 = IA32_SEG_UNSCRAMBLE(IA32_SEG_DESCRIPTOR(IA32_GDT_OFFSET, IA32_PAGE_SIZE - 1,
185 0, 0, 0, 1, 0, 0, 0));
186 /* Setup the segment selectors */
187 regs->r16 = (__USER_DS << 16) | __USER_DS; /* ES == DS, GS, FS are zero */
188 regs->r17 = (__USER_DS << 16) | __USER_CS; /* SS, CS; ia32_load_state() sets TSS and LDT */
189
190 ia32_load_segment_descriptors(current);
191 ia32_load_state(current);
192}
193
194/*
195 * Undo the override of setup_arg_pages() without this ia32_setup_arg_pages()
196 * will suffer infinite self recursion.
197 */
198#undef setup_arg_pages
199
200int
201ia32_setup_arg_pages (struct linux_binprm *bprm, int executable_stack)
202{
203 int ret;
204
205 ret = setup_arg_pages(bprm, IA32_STACK_TOP, executable_stack);
206 if (!ret) {
207 /*
208 * Can't do it in ia64_elf32_init(). Needs to be done before
209 * calls to elf32_map()
210 */
211 current->thread.ppl = ia32_init_pp_list();
212 }
213
214 return ret;
215}
216
217static void
218elf32_set_personality (void)
219{
220 set_personality(PER_LINUX32);
221 current->thread.map_base = IA32_PAGE_OFFSET/3;
222}
223
224static unsigned long
225elf32_map(struct file *filep, unsigned long addr, struct elf_phdr *eppnt,
226 int prot, int type, unsigned long unused)
227{
228 unsigned long pgoff = (eppnt->p_vaddr) & ~IA32_PAGE_MASK;
229
230 return ia32_do_mmap(filep, (addr & IA32_PAGE_MASK), eppnt->p_filesz + pgoff, prot, type,
231 eppnt->p_offset - pgoff);
232}
233
234#define cpu_uses_ia32el() (local_cpu_data->family > 0x1f)
235
236static int __init check_elf32_binfmt(void)
237{
238 if (cpu_uses_ia32el()) {
239 printk("Please use IA-32 EL for executing IA-32 binaries\n");
240 unregister_binfmt(&elf_format);
241 }
242 return 0;
243}
244
245module_init(check_elf32_binfmt)
diff --git a/arch/ia64/ia32/elfcore32.h b/arch/ia64/ia32/elfcore32.h
deleted file mode 100644
index 9a3abf58cea3..000000000000
--- a/arch/ia64/ia32/elfcore32.h
+++ /dev/null
@@ -1,150 +0,0 @@
1/*
2 * IA-32 ELF core dump support.
3 *
4 * Copyright (C) 2003 Arun Sharma <arun.sharma@intel.com>
5 *
6 * Derived from the x86_64 version
7 */
8#ifndef _ELFCORE32_H_
9#define _ELFCORE32_H_
10
11#include <asm/intrinsics.h>
12#include <asm/uaccess.h>
13
14#define USE_ELF_CORE_DUMP 1
15
16/* Override elfcore.h */
17#define _LINUX_ELFCORE_H 1
18typedef unsigned int elf_greg_t;
19
20#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
21typedef elf_greg_t elf_gregset_t[ELF_NGREG];
22
23typedef struct ia32_user_i387_struct elf_fpregset_t;
24typedef struct ia32_user_fxsr_struct elf_fpxregset_t;
25
26struct elf_siginfo
27{
28 int si_signo; /* signal number */
29 int si_code; /* extra code */
30 int si_errno; /* errno */
31};
32
33#ifdef CONFIG_VIRT_CPU_ACCOUNTING
34/*
35 * Hacks are here since types between compat_timeval (= pair of s32) and
36 * ia64-native timeval (= pair of s64) are not compatible, at least a file
37 * arch/ia64/ia32/../../../fs/binfmt_elf.c will get warnings from compiler on
38 * use of cputime_to_timeval(), which usually an alias of jiffies_to_timeval().
39 */
40#define cputime_to_timeval(a,b) \
41 do { (b)->tv_usec = 0; (b)->tv_sec = (a)/NSEC_PER_SEC; } while(0)
42#else
43#define jiffies_to_timeval(a,b) \
44 do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; } while(0)
45#endif
46
47struct elf_prstatus
48{
49 struct elf_siginfo pr_info; /* Info associated with signal */
50 short pr_cursig; /* Current signal */
51 unsigned int pr_sigpend; /* Set of pending signals */
52 unsigned int pr_sighold; /* Set of held signals */
53 pid_t pr_pid;
54 pid_t pr_ppid;
55 pid_t pr_pgrp;
56 pid_t pr_sid;
57 struct compat_timeval pr_utime; /* User time */
58 struct compat_timeval pr_stime; /* System time */
59 struct compat_timeval pr_cutime; /* Cumulative user time */
60 struct compat_timeval pr_cstime; /* Cumulative system time */
61 elf_gregset_t pr_reg; /* GP registers */
62 int pr_fpvalid; /* True if math co-processor being used. */
63};
64
65#define ELF_PRARGSZ (80) /* Number of chars for args */
66
67struct elf_prpsinfo
68{
69 char pr_state; /* numeric process state */
70 char pr_sname; /* char for pr_state */
71 char pr_zomb; /* zombie */
72 char pr_nice; /* nice val */
73 unsigned int pr_flag; /* flags */
74 __u16 pr_uid;
75 __u16 pr_gid;
76 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
77 /* Lots missing */
78 char pr_fname[16]; /* filename of executable */
79 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
80};
81
82#define ELF_CORE_COPY_REGS(pr_reg, regs) \
83 pr_reg[0] = regs->r11; \
84 pr_reg[1] = regs->r9; \
85 pr_reg[2] = regs->r10; \
86 pr_reg[3] = regs->r14; \
87 pr_reg[4] = regs->r15; \
88 pr_reg[5] = regs->r13; \
89 pr_reg[6] = regs->r8; \
90 pr_reg[7] = regs->r16 & 0xffff; \
91 pr_reg[8] = (regs->r16 >> 16) & 0xffff; \
92 pr_reg[9] = (regs->r16 >> 32) & 0xffff; \
93 pr_reg[10] = (regs->r16 >> 48) & 0xffff; \
94 pr_reg[11] = regs->r1; \
95 pr_reg[12] = regs->cr_iip; \
96 pr_reg[13] = regs->r17 & 0xffff; \
97 pr_reg[14] = ia64_getreg(_IA64_REG_AR_EFLAG); \
98 pr_reg[15] = regs->r12; \
99 pr_reg[16] = (regs->r17 >> 16) & 0xffff;
100
101static inline void elf_core_copy_regs(elf_gregset_t *elfregs,
102 struct pt_regs *regs)
103{
104 ELF_CORE_COPY_REGS((*elfregs), regs)
105}
106
107static inline int elf_core_copy_task_regs(struct task_struct *t,
108 elf_gregset_t* elfregs)
109{
110 ELF_CORE_COPY_REGS((*elfregs), task_pt_regs(t));
111 return 1;
112}
113
114static inline int
115elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu)
116{
117 struct ia32_user_i387_struct *fpstate = (void*)fpu;
118 mm_segment_t old_fs;
119
120 if (!tsk_used_math(tsk))
121 return 0;
122
123 old_fs = get_fs();
124 set_fs(KERNEL_DS);
125 save_ia32_fpstate(tsk, (struct ia32_user_i387_struct __user *) fpstate);
126 set_fs(old_fs);
127
128 return 1;
129}
130
131#define ELF_CORE_COPY_XFPREGS 1
132#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
133static inline int
134elf_core_copy_task_xfpregs(struct task_struct *tsk, elf_fpxregset_t *xfpu)
135{
136 struct ia32_user_fxsr_struct *fpxstate = (void*) xfpu;
137 mm_segment_t old_fs;
138
139 if (!tsk_used_math(tsk))
140 return 0;
141
142 old_fs = get_fs();
143 set_fs(KERNEL_DS);
144 save_ia32_fpxstate(tsk, (struct ia32_user_fxsr_struct __user *) fpxstate);
145 set_fs(old_fs);
146
147 return 1;
148}
149
150#endif /* _ELFCORE32_H_ */
diff --git a/arch/ia64/ia32/ia32_entry.S b/arch/ia64/ia32/ia32_entry.S
deleted file mode 100644
index af9405cd70e5..000000000000
--- a/arch/ia64/ia32/ia32_entry.S
+++ /dev/null
@@ -1,468 +0,0 @@
1#include <asm/asmmacro.h>
2#include <asm/ia32.h>
3#include <asm/asm-offsets.h>
4#include <asm/signal.h>
5#include <asm/thread_info.h>
6
7#include "../kernel/minstate.h"
8
9 /*
10 * execve() is special because in case of success, we need to
11 * setup a null register window frame (in case an IA-32 process
12 * is exec'ing an IA-64 program).
13 */
14ENTRY(ia32_execve)
15 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(3)
16 alloc loc1=ar.pfs,3,2,4,0
17 mov loc0=rp
18 .body
19 zxt4 out0=in0 // filename
20 ;; // stop bit between alloc and call
21 zxt4 out1=in1 // argv
22 zxt4 out2=in2 // envp
23 add out3=16,sp // regs
24 br.call.sptk.few rp=sys32_execve
251: cmp.ge p6,p0=r8,r0
26 mov ar.pfs=loc1 // restore ar.pfs
27 ;;
28(p6) mov ar.pfs=r0 // clear ar.pfs in case of success
29 sxt4 r8=r8 // return 64-bit result
30 mov rp=loc0
31 br.ret.sptk.few rp
32END(ia32_execve)
33
34ENTRY(ia32_clone)
35 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(5)
36 alloc r16=ar.pfs,5,2,6,0
37 DO_SAVE_SWITCH_STACK
38 mov loc0=rp
39 mov loc1=r16 // save ar.pfs across do_fork
40 .body
41 zxt4 out1=in1 // newsp
42 mov out3=16 // stacksize (compensates for 16-byte scratch area)
43 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
44 mov out0=in0 // out0 = clone_flags
45 zxt4 out4=in2 // out4 = parent_tidptr
46 zxt4 out5=in4 // out5 = child_tidptr
47 br.call.sptk.many rp=do_fork
48.ret0: .restore sp
49 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
50 mov ar.pfs=loc1
51 mov rp=loc0
52 br.ret.sptk.many rp
53END(ia32_clone)
54
55GLOBAL_ENTRY(ia32_ret_from_clone)
56 PT_REGS_UNWIND_INFO(0)
57{ /*
58 * Some versions of gas generate bad unwind info if the first instruction of a
59 * procedure doesn't go into the first slot of a bundle. This is a workaround.
60 */
61 nop.m 0
62 nop.i 0
63 /*
64 * We need to call schedule_tail() to complete the scheduling process.
65 * Called by ia64_switch_to after do_fork()->copy_thread(). r8 contains the
66 * address of the previously executing task.
67 */
68 br.call.sptk.many rp=ia64_invoke_schedule_tail
69}
70.ret1:
71 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
72 ;;
73 ld4 r2=[r2]
74 ;;
75 mov r8=0
76 and r2=_TIF_SYSCALL_TRACEAUDIT,r2
77 ;;
78 cmp.ne p6,p0=r2,r0
79(p6) br.cond.spnt .ia32_strace_check_retval
80 ;; // prevent RAW on r8
81END(ia32_ret_from_clone)
82 // fall thrugh
83GLOBAL_ENTRY(ia32_ret_from_syscall)
84 PT_REGS_UNWIND_INFO(0)
85
86 cmp.ge p6,p7=r8,r0 // syscall executed successfully?
87 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
88 ;;
89 alloc r3=ar.pfs,0,0,0,0 // drop the syscall argument frame
90 st8 [r2]=r8 // store return value in slot for r8
91 br.cond.sptk.many ia64_leave_kernel
92END(ia32_ret_from_syscall)
93
94 //
95 // Invoke a system call, but do some tracing before and after the call.
96 // We MUST preserve the current register frame throughout this routine
97 // because some system calls (such as ia64_execve) directly
98 // manipulate ar.pfs.
99 //
100 // Input:
101 // r8 = syscall number
102 // b6 = syscall entry point
103 //
104GLOBAL_ENTRY(ia32_trace_syscall)
105 PT_REGS_UNWIND_INFO(0)
106 mov r3=-38
107 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp
108 ;;
109 st8 [r2]=r3 // initialize return code to -ENOSYS
110 br.call.sptk.few rp=syscall_trace_enter // give parent a chance to catch syscall args
111 cmp.lt p6,p0=r8,r0 // check tracehook
112 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
113 ;;
114(p6) st8.spill [r2]=r8 // store return value in slot for r8
115(p6) br.spnt.few .ret4
116.ret2: // Need to reload arguments (they may be changed by the tracing process)
117 adds r2=IA64_PT_REGS_R1_OFFSET+16,sp // r2 = &pt_regs.r1
118 adds r3=IA64_PT_REGS_R13_OFFSET+16,sp // r3 = &pt_regs.r13
119 mov r15=IA32_NR_syscalls
120 ;;
121 ld4 r8=[r2],IA64_PT_REGS_R9_OFFSET-IA64_PT_REGS_R1_OFFSET
122 movl r16=ia32_syscall_table
123 ;;
124 ld4 r33=[r2],8 // r9 == ecx
125 ld4 r37=[r3],16 // r13 == ebp
126 cmp.ltu.unc p6,p7=r8,r15
127 ;;
128 ld4 r34=[r2],8 // r10 == edx
129 ld4 r36=[r3],8 // r15 == edi
130(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
131 ;;
132 ld8 r16=[r16]
133 ;;
134 ld4 r32=[r2],8 // r11 == ebx
135 mov b6=r16
136 ld4 r35=[r3],8 // r14 == esi
137 br.call.sptk.few rp=b6 // do the syscall
138.ia32_strace_check_retval:
139 cmp.lt p6,p0=r8,r0 // syscall failed?
140 adds r2=IA64_PT_REGS_R8_OFFSET+16,sp // r2 = &pt_regs.r8
141 ;;
142 st8.spill [r2]=r8 // store return value in slot for r8
143 br.call.sptk.few rp=syscall_trace_leave // give parent a chance to catch return value
144.ret4: alloc r2=ar.pfs,0,0,0,0 // drop the syscall argument frame
145 br.cond.sptk.many ia64_leave_kernel
146END(ia32_trace_syscall)
147
148GLOBAL_ENTRY(sys32_vfork)
149 alloc r16=ar.pfs,2,2,4,0;;
150 mov out0=IA64_CLONE_VFORK|IA64_CLONE_VM|SIGCHLD // out0 = clone_flags
151 br.cond.sptk.few .fork1 // do the work
152END(sys32_vfork)
153
154GLOBAL_ENTRY(sys32_fork)
155 .prologue ASM_UNW_PRLG_RP|ASM_UNW_PRLG_PFS, ASM_UNW_PRLG_GRSAVE(2)
156 alloc r16=ar.pfs,2,2,4,0
157 mov out0=SIGCHLD // out0 = clone_flags
158 ;;
159.fork1:
160 mov loc0=rp
161 mov loc1=r16 // save ar.pfs across do_fork
162 DO_SAVE_SWITCH_STACK
163
164 .body
165
166 mov out1=0
167 mov out3=0
168 adds out2=IA64_SWITCH_STACK_SIZE+16,sp // out2 = &regs
169 br.call.sptk.few rp=do_fork
170.ret5: .restore sp
171 adds sp=IA64_SWITCH_STACK_SIZE,sp // pop the switch stack
172 mov ar.pfs=loc1
173 mov rp=loc0
174 br.ret.sptk.many rp
175END(sys32_fork)
176
177 .rodata
178 .align 8
179 .globl ia32_syscall_table
180ia32_syscall_table:
181 data8 sys_ni_syscall /* 0 - old "setup(" system call*/
182 data8 sys_exit
183 data8 sys32_fork
184 data8 sys_read
185 data8 sys_write
186 data8 compat_sys_open /* 5 */
187 data8 sys_close
188 data8 sys32_waitpid
189 data8 sys_creat
190 data8 sys_link
191 data8 sys_unlink /* 10 */
192 data8 ia32_execve
193 data8 sys_chdir
194 data8 compat_sys_time
195 data8 sys_mknod
196 data8 sys_chmod /* 15 */
197 data8 sys_lchown /* 16-bit version */
198 data8 sys_ni_syscall /* old break syscall holder */
199 data8 sys_ni_syscall
200 data8 sys32_lseek
201 data8 sys_getpid /* 20 */
202 data8 compat_sys_mount
203 data8 sys_oldumount
204 data8 sys_setuid /* 16-bit version */
205 data8 sys_getuid /* 16-bit version */
206 data8 compat_sys_stime /* 25 */
207 data8 compat_sys_ptrace
208 data8 sys32_alarm
209 data8 sys_ni_syscall
210 data8 sys_pause
211 data8 compat_sys_utime /* 30 */
212 data8 sys_ni_syscall /* old stty syscall holder */
213 data8 sys_ni_syscall /* old gtty syscall holder */
214 data8 sys_access
215 data8 sys_nice
216 data8 sys_ni_syscall /* 35 */ /* old ftime syscall holder */
217 data8 sys_sync
218 data8 sys_kill
219 data8 sys_rename
220 data8 sys_mkdir
221 data8 sys_rmdir /* 40 */
222 data8 sys_dup
223 data8 sys_ia64_pipe
224 data8 compat_sys_times
225 data8 sys_ni_syscall /* old prof syscall holder */
226 data8 sys32_brk /* 45 */
227 data8 sys_setgid /* 16-bit version */
228 data8 sys_getgid /* 16-bit version */
229 data8 sys32_signal
230 data8 sys_geteuid /* 16-bit version */
231 data8 sys_getegid /* 16-bit version */ /* 50 */
232 data8 sys_acct
233 data8 sys_umount /* recycled never used phys( */
234 data8 sys_ni_syscall /* old lock syscall holder */
235 data8 compat_sys_ioctl
236 data8 compat_sys_fcntl /* 55 */
237 data8 sys_ni_syscall /* old mpx syscall holder */
238 data8 sys_setpgid
239 data8 sys_ni_syscall /* old ulimit syscall holder */
240 data8 sys_ni_syscall
241 data8 sys_umask /* 60 */
242 data8 sys_chroot
243 data8 compat_sys_ustat
244 data8 sys_dup2
245 data8 sys_getppid
246 data8 sys_getpgrp /* 65 */
247 data8 sys_setsid
248 data8 sys32_sigaction
249 data8 sys_ni_syscall
250 data8 sys_ni_syscall
251 data8 sys_setreuid /* 16-bit version */ /* 70 */
252 data8 sys_setregid /* 16-bit version */
253 data8 sys32_sigsuspend
254 data8 compat_sys_sigpending
255 data8 sys_sethostname
256 data8 compat_sys_setrlimit /* 75 */
257 data8 compat_sys_old_getrlimit
258 data8 compat_sys_getrusage
259 data8 compat_sys_gettimeofday
260 data8 compat_sys_settimeofday
261 data8 sys32_getgroups16 /* 80 */
262 data8 sys32_setgroups16
263 data8 sys32_old_select
264 data8 sys_symlink
265 data8 sys_ni_syscall
266 data8 sys_readlink /* 85 */
267 data8 sys_uselib
268 data8 sys_swapon
269 data8 sys_reboot
270 data8 compat_sys_old_readdir
271 data8 sys32_mmap /* 90 */
272 data8 sys32_munmap
273 data8 sys_truncate
274 data8 sys_ftruncate
275 data8 sys_fchmod
276 data8 sys_fchown /* 16-bit version */ /* 95 */
277 data8 sys_getpriority
278 data8 sys_setpriority
279 data8 sys_ni_syscall /* old profil syscall holder */
280 data8 compat_sys_statfs
281 data8 compat_sys_fstatfs /* 100 */
282 data8 sys_ni_syscall /* ioperm */
283 data8 compat_sys_socketcall
284 data8 sys_syslog
285 data8 compat_sys_setitimer
286 data8 compat_sys_getitimer /* 105 */
287 data8 compat_sys_newstat
288 data8 compat_sys_newlstat
289 data8 compat_sys_newfstat
290 data8 sys_ni_syscall
291 data8 sys_ni_syscall /* iopl */ /* 110 */
292 data8 sys_vhangup
293 data8 sys_ni_syscall /* used to be sys_idle */
294 data8 sys_ni_syscall
295 data8 compat_sys_wait4
296 data8 sys_swapoff /* 115 */
297 data8 compat_sys_sysinfo
298 data8 sys32_ipc
299 data8 sys_fsync
300 data8 sys32_sigreturn
301 data8 ia32_clone /* 120 */
302 data8 sys_setdomainname
303 data8 sys32_newuname
304 data8 sys32_modify_ldt
305 data8 compat_sys_adjtimex
306 data8 sys32_mprotect /* 125 */
307 data8 compat_sys_sigprocmask
308 data8 sys_ni_syscall /* create_module */
309 data8 sys_ni_syscall /* init_module */
310 data8 sys_ni_syscall /* delete_module */
311 data8 sys_ni_syscall /* get_kernel_syms */ /* 130 */
312 data8 sys32_quotactl
313 data8 sys_getpgid
314 data8 sys_fchdir
315 data8 sys_ni_syscall /* sys_bdflush */
316 data8 sys_sysfs /* 135 */
317 data8 sys32_personality
318 data8 sys_ni_syscall /* for afs_syscall */
319 data8 sys_setfsuid /* 16-bit version */
320 data8 sys_setfsgid /* 16-bit version */
321 data8 sys_llseek /* 140 */
322 data8 compat_sys_getdents
323 data8 compat_sys_select
324 data8 sys_flock
325 data8 sys32_msync
326 data8 compat_sys_readv /* 145 */
327 data8 compat_sys_writev
328 data8 sys_getsid
329 data8 sys_fdatasync
330 data8 sys32_sysctl
331 data8 sys_mlock /* 150 */
332 data8 sys_munlock
333 data8 sys_mlockall
334 data8 sys_munlockall
335 data8 sys_sched_setparam
336 data8 sys_sched_getparam /* 155 */
337 data8 sys_sched_setscheduler
338 data8 sys_sched_getscheduler
339 data8 sys_sched_yield
340 data8 sys_sched_get_priority_max
341 data8 sys_sched_get_priority_min /* 160 */
342 data8 sys32_sched_rr_get_interval
343 data8 compat_sys_nanosleep
344 data8 sys32_mremap
345 data8 sys_setresuid /* 16-bit version */
346 data8 sys32_getresuid16 /* 16-bit version */ /* 165 */
347 data8 sys_ni_syscall /* vm86 */
348 data8 sys_ni_syscall /* sys_query_module */
349 data8 sys_poll
350 data8 sys_ni_syscall /* nfsservctl */
351 data8 sys_setresgid /* 170 */
352 data8 sys32_getresgid16
353 data8 sys_prctl
354 data8 sys32_rt_sigreturn
355 data8 sys32_rt_sigaction
356 data8 sys32_rt_sigprocmask /* 175 */
357 data8 sys_rt_sigpending
358 data8 compat_sys_rt_sigtimedwait
359 data8 sys32_rt_sigqueueinfo
360 data8 compat_sys_rt_sigsuspend
361 data8 sys32_pread /* 180 */
362 data8 sys32_pwrite
363 data8 sys_chown /* 16-bit version */
364 data8 sys_getcwd
365 data8 sys_capget
366 data8 sys_capset /* 185 */
367 data8 sys32_sigaltstack
368 data8 sys32_sendfile
369 data8 sys_ni_syscall /* streams1 */
370 data8 sys_ni_syscall /* streams2 */
371 data8 sys32_vfork /* 190 */
372 data8 compat_sys_getrlimit
373 data8 sys32_mmap2
374 data8 sys32_truncate64
375 data8 sys32_ftruncate64
376 data8 sys32_stat64 /* 195 */
377 data8 sys32_lstat64
378 data8 sys32_fstat64
379 data8 sys_lchown
380 data8 sys_getuid
381 data8 sys_getgid /* 200 */
382 data8 sys_geteuid
383 data8 sys_getegid
384 data8 sys_setreuid
385 data8 sys_setregid
386 data8 sys_getgroups /* 205 */
387 data8 sys_setgroups
388 data8 sys_fchown
389 data8 sys_setresuid
390 data8 sys_getresuid
391 data8 sys_setresgid /* 210 */
392 data8 sys_getresgid
393 data8 sys_chown
394 data8 sys_setuid
395 data8 sys_setgid
396 data8 sys_setfsuid /* 215 */
397 data8 sys_setfsgid
398 data8 sys_pivot_root
399 data8 sys_mincore
400 data8 sys_madvise
401 data8 compat_sys_getdents64 /* 220 */
402 data8 compat_sys_fcntl64
403 data8 sys_ni_syscall /* reserved for TUX */
404 data8 sys_ni_syscall /* reserved for Security */
405 data8 sys_gettid
406 data8 sys_readahead /* 225 */
407 data8 sys_setxattr
408 data8 sys_lsetxattr
409 data8 sys_fsetxattr
410 data8 sys_getxattr
411 data8 sys_lgetxattr /* 230 */
412 data8 sys_fgetxattr
413 data8 sys_listxattr
414 data8 sys_llistxattr
415 data8 sys_flistxattr
416 data8 sys_removexattr /* 235 */
417 data8 sys_lremovexattr
418 data8 sys_fremovexattr
419 data8 sys_tkill
420 data8 sys_sendfile64
421 data8 compat_sys_futex /* 240 */
422 data8 compat_sys_sched_setaffinity
423 data8 compat_sys_sched_getaffinity
424 data8 sys32_set_thread_area
425 data8 sys32_get_thread_area
426 data8 compat_sys_io_setup /* 245 */
427 data8 sys_io_destroy
428 data8 compat_sys_io_getevents
429 data8 compat_sys_io_submit
430 data8 sys_io_cancel
431 data8 sys_fadvise64 /* 250 */
432 data8 sys_ni_syscall
433 data8 sys_exit_group
434 data8 sys_lookup_dcookie
435 data8 sys_epoll_create
436 data8 sys32_epoll_ctl /* 255 */
437 data8 sys32_epoll_wait
438 data8 sys_remap_file_pages
439 data8 sys_set_tid_address
440 data8 compat_sys_timer_create
441 data8 compat_sys_timer_settime /* 260 */
442 data8 compat_sys_timer_gettime
443 data8 sys_timer_getoverrun
444 data8 sys_timer_delete
445 data8 compat_sys_clock_settime
446 data8 compat_sys_clock_gettime /* 265 */
447 data8 compat_sys_clock_getres
448 data8 compat_sys_clock_nanosleep
449 data8 compat_sys_statfs64
450 data8 compat_sys_fstatfs64
451 data8 sys_tgkill /* 270 */
452 data8 compat_sys_utimes
453 data8 sys32_fadvise64_64
454 data8 sys_ni_syscall
455 data8 sys_ni_syscall
456 data8 sys_ni_syscall /* 275 */
457 data8 sys_ni_syscall
458 data8 compat_sys_mq_open
459 data8 sys_mq_unlink
460 data8 compat_sys_mq_timedsend
461 data8 compat_sys_mq_timedreceive /* 280 */
462 data8 compat_sys_mq_notify
463 data8 compat_sys_mq_getsetattr
464 data8 sys_ni_syscall /* reserved for kexec */
465 data8 compat_sys_waitid
466
467 // guard against failures to increase IA32_NR_syscalls
468 .org ia32_syscall_table + 8*IA32_NR_syscalls
diff --git a/arch/ia64/ia32/ia32_ldt.c b/arch/ia64/ia32/ia32_ldt.c
deleted file mode 100644
index 16d51c146849..000000000000
--- a/arch/ia64/ia32/ia32_ldt.c
+++ /dev/null
@@ -1,146 +0,0 @@
1/*
2 * Copyright (C) 2001, 2004 Hewlett-Packard Co
3 * David Mosberger-Tang <davidm@hpl.hp.com>
4 *
5 * Adapted from arch/i386/kernel/ldt.c
6 */
7
8#include <linux/errno.h>
9#include <linux/sched.h>
10#include <linux/string.h>
11#include <linux/mm.h>
12#include <linux/smp.h>
13#include <linux/vmalloc.h>
14
15#include <asm/uaccess.h>
16
17#include "ia32priv.h"
18
19/*
20 * read_ldt() is not really atomic - this is not a problem since synchronization of reads
21 * and writes done to the LDT has to be assured by user-space anyway. Writes are atomic,
22 * to protect the security checks done on new descriptors.
23 */
24static int
25read_ldt (void __user *ptr, unsigned long bytecount)
26{
27 unsigned long bytes_left, n;
28 char __user *src, *dst;
29 char buf[256]; /* temporary buffer (don't overflow kernel stack!) */
30
31 if (bytecount > IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE)
32 bytecount = IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE;
33
34 bytes_left = bytecount;
35
36 src = (void __user *) IA32_LDT_OFFSET;
37 dst = ptr;
38
39 while (bytes_left) {
40 n = sizeof(buf);
41 if (n > bytes_left)
42 n = bytes_left;
43
44 /*
45 * We know we're reading valid memory, but we still must guard against
46 * running out of memory.
47 */
48 if (__copy_from_user(buf, src, n))
49 return -EFAULT;
50
51 if (copy_to_user(dst, buf, n))
52 return -EFAULT;
53
54 src += n;
55 dst += n;
56 bytes_left -= n;
57 }
58 return bytecount;
59}
60
61static int
62read_default_ldt (void __user * ptr, unsigned long bytecount)
63{
64 unsigned long size;
65 int err;
66
67 /* XXX fix me: should return equivalent of default_ldt[0] */
68 err = 0;
69 size = 8;
70 if (size > bytecount)
71 size = bytecount;
72
73 err = size;
74 if (clear_user(ptr, size))
75 err = -EFAULT;
76
77 return err;
78}
79
80static int
81write_ldt (void __user * ptr, unsigned long bytecount, int oldmode)
82{
83 struct ia32_user_desc ldt_info;
84 __u64 entry;
85 int ret;
86
87 if (bytecount != sizeof(ldt_info))
88 return -EINVAL;
89 if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
90 return -EFAULT;
91
92 if (ldt_info.entry_number >= IA32_LDT_ENTRIES)
93 return -EINVAL;
94 if (ldt_info.contents == 3) {
95 if (oldmode)
96 return -EINVAL;
97 if (ldt_info.seg_not_present == 0)
98 return -EINVAL;
99 }
100
101 if (ldt_info.base_addr == 0 && ldt_info.limit == 0
102 && (oldmode || (ldt_info.contents == 0 && ldt_info.read_exec_only == 1
103 && ldt_info.seg_32bit == 0 && ldt_info.limit_in_pages == 0
104 && ldt_info.seg_not_present == 1 && ldt_info.useable == 0)))
105 /* allow LDTs to be cleared by the user */
106 entry = 0;
107 else
108 /* we must set the "Accessed" bit as IVE doesn't emulate it */
109 entry = IA32_SEG_DESCRIPTOR(ldt_info.base_addr, ldt_info.limit,
110 (((ldt_info.read_exec_only ^ 1) << 1)
111 | (ldt_info.contents << 2)) | 1,
112 1, 3, ldt_info.seg_not_present ^ 1,
113 (oldmode ? 0 : ldt_info.useable),
114 ldt_info.seg_32bit,
115 ldt_info.limit_in_pages);
116 /*
117 * Install the new entry. We know we're accessing valid (mapped) user-level
118 * memory, but we still need to guard against out-of-memory, hence we must use
119 * put_user().
120 */
121 ret = __put_user(entry, (__u64 __user *) IA32_LDT_OFFSET + ldt_info.entry_number);
122 ia32_load_segment_descriptors(current);
123 return ret;
124}
125
126asmlinkage int
127sys32_modify_ldt (int func, unsigned int ptr, unsigned int bytecount)
128{
129 int ret = -ENOSYS;
130
131 switch (func) {
132 case 0:
133 ret = read_ldt(compat_ptr(ptr), bytecount);
134 break;
135 case 1:
136 ret = write_ldt(compat_ptr(ptr), bytecount, 1);
137 break;
138 case 2:
139 ret = read_default_ldt(compat_ptr(ptr), bytecount);
140 break;
141 case 0x11:
142 ret = write_ldt(compat_ptr(ptr), bytecount, 0);
143 break;
144 }
145 return ret;
146}
diff --git a/arch/ia64/ia32/ia32_signal.c b/arch/ia64/ia32/ia32_signal.c
deleted file mode 100644
index b763ca19ef17..000000000000
--- a/arch/ia64/ia32/ia32_signal.c
+++ /dev/null
@@ -1,1010 +0,0 @@
1/*
2 * IA32 Architecture-specific signal handling support.
3 *
4 * Copyright (C) 1999, 2001-2002, 2005 Hewlett-Packard Co
5 * David Mosberger-Tang <davidm@hpl.hp.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 2000 VA Linux Co
8 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
9 *
10 * Derived from i386 and Alpha versions.
11 */
12
13#include <linux/errno.h>
14#include <linux/kernel.h>
15#include <linux/mm.h>
16#include <linux/personality.h>
17#include <linux/ptrace.h>
18#include <linux/sched.h>
19#include <linux/signal.h>
20#include <linux/smp.h>
21#include <linux/stddef.h>
22#include <linux/syscalls.h>
23#include <linux/unistd.h>
24#include <linux/wait.h>
25#include <linux/compat.h>
26
27#include <asm/intrinsics.h>
28#include <asm/uaccess.h>
29#include <asm/rse.h>
30#include <asm/sigcontext.h>
31
32#include "ia32priv.h"
33
34#include "../kernel/sigframe.h"
35
36#define A(__x) ((unsigned long)(__x))
37
38#define DEBUG_SIG 0
39#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
40
41#define __IA32_NR_sigreturn 119
42#define __IA32_NR_rt_sigreturn 173
43
44struct sigframe_ia32
45{
46 int pretcode;
47 int sig;
48 struct sigcontext_ia32 sc;
49 struct _fpstate_ia32 fpstate;
50 unsigned int extramask[_COMPAT_NSIG_WORDS-1];
51 char retcode[8];
52};
53
54struct rt_sigframe_ia32
55{
56 int pretcode;
57 int sig;
58 int pinfo;
59 int puc;
60 compat_siginfo_t info;
61 struct ucontext_ia32 uc;
62 struct _fpstate_ia32 fpstate;
63 char retcode[8];
64};
65
66int
67copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from)
68{
69 unsigned long tmp;
70 int err;
71
72 if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t)))
73 return -EFAULT;
74
75 err = __get_user(to->si_signo, &from->si_signo);
76 err |= __get_user(to->si_errno, &from->si_errno);
77 err |= __get_user(to->si_code, &from->si_code);
78
79 if (to->si_code < 0)
80 err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
81 else {
82 switch (to->si_code >> 16) {
83 case __SI_CHLD >> 16:
84 err |= __get_user(to->si_utime, &from->si_utime);
85 err |= __get_user(to->si_stime, &from->si_stime);
86 err |= __get_user(to->si_status, &from->si_status);
87 default:
88 err |= __get_user(to->si_pid, &from->si_pid);
89 err |= __get_user(to->si_uid, &from->si_uid);
90 break;
91 case __SI_FAULT >> 16:
92 err |= __get_user(tmp, &from->si_addr);
93 to->si_addr = (void __user *) tmp;
94 break;
95 case __SI_POLL >> 16:
96 err |= __get_user(to->si_band, &from->si_band);
97 err |= __get_user(to->si_fd, &from->si_fd);
98 break;
99 case __SI_RT >> 16: /* This is not generated by the kernel as of now. */
100 case __SI_MESGQ >> 16:
101 err |= __get_user(to->si_pid, &from->si_pid);
102 err |= __get_user(to->si_uid, &from->si_uid);
103 err |= __get_user(to->si_int, &from->si_int);
104 break;
105 }
106 }
107 return err;
108}
109
110int
111copy_siginfo_to_user32 (compat_siginfo_t __user *to, siginfo_t *from)
112{
113 unsigned int addr;
114 int err;
115
116 if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t)))
117 return -EFAULT;
118
119 /* If you change siginfo_t structure, please be sure
120 this code is fixed accordingly.
121 It should never copy any pad contained in the structure
122 to avoid security leaks, but must copy the generic
123 3 ints plus the relevant union member.
124 This routine must convert siginfo from 64bit to 32bit as well
125 at the same time. */
126 err = __put_user(from->si_signo, &to->si_signo);
127 err |= __put_user(from->si_errno, &to->si_errno);
128 err |= __put_user((short)from->si_code, &to->si_code);
129 if (from->si_code < 0)
130 err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE);
131 else {
132 switch (from->si_code >> 16) {
133 case __SI_CHLD >> 16:
134 err |= __put_user(from->si_utime, &to->si_utime);
135 err |= __put_user(from->si_stime, &to->si_stime);
136 err |= __put_user(from->si_status, &to->si_status);
137 default:
138 err |= __put_user(from->si_pid, &to->si_pid);
139 err |= __put_user(from->si_uid, &to->si_uid);
140 break;
141 case __SI_FAULT >> 16:
142 /* avoid type-checking warnings by copying _pad[0] in lieu of si_addr... */
143 err |= __put_user(from->_sifields._pad[0], &to->si_addr);
144 break;
145 case __SI_POLL >> 16:
146 err |= __put_user(from->si_band, &to->si_band);
147 err |= __put_user(from->si_fd, &to->si_fd);
148 break;
149 case __SI_TIMER >> 16:
150 err |= __put_user(from->si_tid, &to->si_tid);
151 err |= __put_user(from->si_overrun, &to->si_overrun);
152 addr = (unsigned long) from->si_ptr;
153 err |= __put_user(addr, &to->si_ptr);
154 break;
155 case __SI_RT >> 16: /* Not generated by the kernel as of now. */
156 case __SI_MESGQ >> 16:
157 err |= __put_user(from->si_uid, &to->si_uid);
158 err |= __put_user(from->si_pid, &to->si_pid);
159 addr = (unsigned long) from->si_ptr;
160 err |= __put_user(addr, &to->si_ptr);
161 break;
162 }
163 }
164 return err;
165}
166
167
168/*
169 * SAVE and RESTORE of ia32 fpstate info, from ia64 current state
170 * Used in exception handler to pass the fpstate to the user, and restore
171 * the fpstate while returning from the exception handler.
172 *
173 * fpstate info and their mapping to IA64 regs:
174 * fpstate REG(BITS) Attribute Comments
175 * cw ar.fcr(0:12) with bits 7 and 6 not used
176 * sw ar.fsr(0:15)
177 * tag ar.fsr(16:31) with odd numbered bits not used
178 * (read returns 0, writes ignored)
179 * ipoff ar.fir(0:31)
180 * cssel ar.fir(32:47)
181 * dataoff ar.fdr(0:31)
182 * datasel ar.fdr(32:47)
183 *
184 * _st[(0+TOS)%8] f8
185 * _st[(1+TOS)%8] f9
186 * _st[(2+TOS)%8] f10
187 * _st[(3+TOS)%8] f11 (f8..f11 from ptregs)
188 * : : : (f12..f15 from live reg)
189 * : : :
190 * _st[(7+TOS)%8] f15 TOS=sw.top(bits11:13)
191 *
192 * status Same as sw RO
193 * magic 0 as X86_FXSR_MAGIC in ia32
194 * mxcsr Bits(7:15)=ar.fcr(39:47)
195 * Bits(0:5) =ar.fsr(32:37) with bit 6 reserved
196 * _xmm[0..7] f16..f31 (live registers)
197 * with _xmm[0]
198 * Bit(64:127)=f17(0:63)
199 * Bit(0:63)=f16(0:63)
200 * All other fields unused...
201 */
202
203static int
204save_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
205{
206 struct task_struct *tsk = current;
207 struct pt_regs *ptp;
208 struct _fpreg_ia32 *fpregp;
209 char buf[32];
210 unsigned long fsr, fcr, fir, fdr;
211 unsigned long new_fsr;
212 unsigned long num128[2];
213 unsigned long mxcsr=0;
214 int fp_tos, fr8_st_map;
215
216 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
217 return -EFAULT;
218
219 /* Read in fsr, fcr, fir, fdr and copy onto fpstate */
220 fsr = ia64_getreg(_IA64_REG_AR_FSR);
221 fcr = ia64_getreg(_IA64_REG_AR_FCR);
222 fir = ia64_getreg(_IA64_REG_AR_FIR);
223 fdr = ia64_getreg(_IA64_REG_AR_FDR);
224
225 /*
226 * We need to clear the exception state before calling the signal handler. Clear
227 * the bits 15, bits 0-7 in fp status word. Similar to the functionality of fnclex
228 * instruction.
229 */
230 new_fsr = fsr & ~0x80ff;
231 ia64_setreg(_IA64_REG_AR_FSR, new_fsr);
232
233 __put_user(fcr & 0xffff, &save->cw);
234 __put_user(fsr & 0xffff, &save->sw);
235 __put_user((fsr>>16) & 0xffff, &save->tag);
236 __put_user(fir, &save->ipoff);
237 __put_user((fir>>32) & 0xffff, &save->cssel);
238 __put_user(fdr, &save->dataoff);
239 __put_user((fdr>>32) & 0xffff, &save->datasel);
240 __put_user(fsr & 0xffff, &save->status);
241
242 mxcsr = ((fcr>>32) & 0xff80) | ((fsr>>32) & 0x3f);
243 __put_user(mxcsr & 0xffff, &save->mxcsr);
244 __put_user( 0, &save->magic); //#define X86_FXSR_MAGIC 0x0000
245
246 /*
247 * save f8..f11 from pt_regs
248 * save f12..f15 from live register set
249 */
250 /*
251 * Find the location where f8 has to go in fp reg stack. This depends on
252 * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
253 * to.
254 */
255 fp_tos = (fsr>>11)&0x7;
256 fr8_st_map = (8-fp_tos)&0x7;
257 ptp = task_pt_regs(tsk);
258 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
259 ia64f2ia32f(fpregp, &ptp->f8);
260 copy_to_user(&save->_st[(0+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
261 ia64f2ia32f(fpregp, &ptp->f9);
262 copy_to_user(&save->_st[(1+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
263 ia64f2ia32f(fpregp, &ptp->f10);
264 copy_to_user(&save->_st[(2+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
265 ia64f2ia32f(fpregp, &ptp->f11);
266 copy_to_user(&save->_st[(3+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
267
268 ia64_stfe(fpregp, 12);
269 copy_to_user(&save->_st[(4+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
270 ia64_stfe(fpregp, 13);
271 copy_to_user(&save->_st[(5+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
272 ia64_stfe(fpregp, 14);
273 copy_to_user(&save->_st[(6+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
274 ia64_stfe(fpregp, 15);
275 copy_to_user(&save->_st[(7+fr8_st_map)&0x7], fpregp, sizeof(struct _fpreg_ia32));
276
277 ia64_stf8(&num128[0], 16);
278 ia64_stf8(&num128[1], 17);
279 copy_to_user(&save->_xmm[0], num128, sizeof(struct _xmmreg_ia32));
280
281 ia64_stf8(&num128[0], 18);
282 ia64_stf8(&num128[1], 19);
283 copy_to_user(&save->_xmm[1], num128, sizeof(struct _xmmreg_ia32));
284
285 ia64_stf8(&num128[0], 20);
286 ia64_stf8(&num128[1], 21);
287 copy_to_user(&save->_xmm[2], num128, sizeof(struct _xmmreg_ia32));
288
289 ia64_stf8(&num128[0], 22);
290 ia64_stf8(&num128[1], 23);
291 copy_to_user(&save->_xmm[3], num128, sizeof(struct _xmmreg_ia32));
292
293 ia64_stf8(&num128[0], 24);
294 ia64_stf8(&num128[1], 25);
295 copy_to_user(&save->_xmm[4], num128, sizeof(struct _xmmreg_ia32));
296
297 ia64_stf8(&num128[0], 26);
298 ia64_stf8(&num128[1], 27);
299 copy_to_user(&save->_xmm[5], num128, sizeof(struct _xmmreg_ia32));
300
301 ia64_stf8(&num128[0], 28);
302 ia64_stf8(&num128[1], 29);
303 copy_to_user(&save->_xmm[6], num128, sizeof(struct _xmmreg_ia32));
304
305 ia64_stf8(&num128[0], 30);
306 ia64_stf8(&num128[1], 31);
307 copy_to_user(&save->_xmm[7], num128, sizeof(struct _xmmreg_ia32));
308 return 0;
309}
310
311static int
312restore_ia32_fpstate_live (struct _fpstate_ia32 __user *save)
313{
314 struct task_struct *tsk = current;
315 struct pt_regs *ptp;
316 unsigned int lo, hi;
317 unsigned long num128[2];
318 unsigned long num64, mxcsr;
319 struct _fpreg_ia32 *fpregp;
320 char buf[32];
321 unsigned long fsr, fcr, fir, fdr;
322 int fp_tos, fr8_st_map;
323
324 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
325 return(-EFAULT);
326
327 /*
328 * Updating fsr, fcr, fir, fdr.
329 * Just a bit more complicated than save.
330 * - Need to make sure that we don't write any value other than the
331 * specific fpstate info
332 * - Need to make sure that the untouched part of frs, fdr, fir, fcr
333 * should remain same while writing.
334 * So, we do a read, change specific fields and write.
335 */
336 fsr = ia64_getreg(_IA64_REG_AR_FSR);
337 fcr = ia64_getreg(_IA64_REG_AR_FCR);
338 fir = ia64_getreg(_IA64_REG_AR_FIR);
339 fdr = ia64_getreg(_IA64_REG_AR_FDR);
340
341 __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
342 /* setting bits 0..5 8..12 with cw and 39..47 from mxcsr */
343 __get_user(lo, (unsigned int __user *)&save->cw);
344 num64 = mxcsr & 0xff10;
345 num64 = (num64 << 32) | (lo & 0x1f3f);
346 fcr = (fcr & (~0xff1000001f3fUL)) | num64;
347
348 /* setting bits 0..31 with sw and tag and 32..37 from mxcsr */
349 __get_user(lo, (unsigned int __user *)&save->sw);
350 /* set bits 15,7 (fsw.b, fsw.es) to reflect the current error status */
351 if ( !(lo & 0x7f) )
352 lo &= (~0x8080);
353 __get_user(hi, (unsigned int __user *)&save->tag);
354 num64 = mxcsr & 0x3f;
355 num64 = (num64 << 16) | (hi & 0xffff);
356 num64 = (num64 << 16) | (lo & 0xffff);
357 fsr = (fsr & (~0x3fffffffffUL)) | num64;
358
359 /* setting bits 0..47 with cssel and ipoff */
360 __get_user(lo, (unsigned int __user *)&save->ipoff);
361 __get_user(hi, (unsigned int __user *)&save->cssel);
362 num64 = hi & 0xffff;
363 num64 = (num64 << 32) | lo;
364 fir = (fir & (~0xffffffffffffUL)) | num64;
365
366 /* setting bits 0..47 with datasel and dataoff */
367 __get_user(lo, (unsigned int __user *)&save->dataoff);
368 __get_user(hi, (unsigned int __user *)&save->datasel);
369 num64 = hi & 0xffff;
370 num64 = (num64 << 32) | lo;
371 fdr = (fdr & (~0xffffffffffffUL)) | num64;
372
373 ia64_setreg(_IA64_REG_AR_FSR, fsr);
374 ia64_setreg(_IA64_REG_AR_FCR, fcr);
375 ia64_setreg(_IA64_REG_AR_FIR, fir);
376 ia64_setreg(_IA64_REG_AR_FDR, fdr);
377
378 /*
379 * restore f8..f11 onto pt_regs
380 * restore f12..f15 onto live registers
381 */
382 /*
383 * Find the location where f8 has to go in fp reg stack. This depends on
384 * TOP(11:13) field of sw. Other f reg continue sequentially from where f8 maps
385 * to.
386 */
387 fp_tos = (fsr>>11)&0x7;
388 fr8_st_map = (8-fp_tos)&0x7;
389 fpregp = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
390
391 ptp = task_pt_regs(tsk);
392 copy_from_user(fpregp, &save->_st[(0+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
393 ia32f2ia64f(&ptp->f8, fpregp);
394 copy_from_user(fpregp, &save->_st[(1+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
395 ia32f2ia64f(&ptp->f9, fpregp);
396 copy_from_user(fpregp, &save->_st[(2+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
397 ia32f2ia64f(&ptp->f10, fpregp);
398 copy_from_user(fpregp, &save->_st[(3+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
399 ia32f2ia64f(&ptp->f11, fpregp);
400
401 copy_from_user(fpregp, &save->_st[(4+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
402 ia64_ldfe(12, fpregp);
403 copy_from_user(fpregp, &save->_st[(5+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
404 ia64_ldfe(13, fpregp);
405 copy_from_user(fpregp, &save->_st[(6+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
406 ia64_ldfe(14, fpregp);
407 copy_from_user(fpregp, &save->_st[(7+fr8_st_map)&0x7], sizeof(struct _fpreg_ia32));
408 ia64_ldfe(15, fpregp);
409
410 copy_from_user(num128, &save->_xmm[0], sizeof(struct _xmmreg_ia32));
411 ia64_ldf8(16, &num128[0]);
412 ia64_ldf8(17, &num128[1]);
413
414 copy_from_user(num128, &save->_xmm[1], sizeof(struct _xmmreg_ia32));
415 ia64_ldf8(18, &num128[0]);
416 ia64_ldf8(19, &num128[1]);
417
418 copy_from_user(num128, &save->_xmm[2], sizeof(struct _xmmreg_ia32));
419 ia64_ldf8(20, &num128[0]);
420 ia64_ldf8(21, &num128[1]);
421
422 copy_from_user(num128, &save->_xmm[3], sizeof(struct _xmmreg_ia32));
423 ia64_ldf8(22, &num128[0]);
424 ia64_ldf8(23, &num128[1]);
425
426 copy_from_user(num128, &save->_xmm[4], sizeof(struct _xmmreg_ia32));
427 ia64_ldf8(24, &num128[0]);
428 ia64_ldf8(25, &num128[1]);
429
430 copy_from_user(num128, &save->_xmm[5], sizeof(struct _xmmreg_ia32));
431 ia64_ldf8(26, &num128[0]);
432 ia64_ldf8(27, &num128[1]);
433
434 copy_from_user(num128, &save->_xmm[6], sizeof(struct _xmmreg_ia32));
435 ia64_ldf8(28, &num128[0]);
436 ia64_ldf8(29, &num128[1]);
437
438 copy_from_user(num128, &save->_xmm[7], sizeof(struct _xmmreg_ia32));
439 ia64_ldf8(30, &num128[0]);
440 ia64_ldf8(31, &num128[1]);
441 return 0;
442}
443
444static inline void
445sigact_set_handler (struct k_sigaction *sa, unsigned int handler, unsigned int restorer)
446{
447 if (handler + 1 <= 2)
448 /* SIG_DFL, SIG_IGN, or SIG_ERR: must sign-extend to 64-bits */
449 sa->sa.sa_handler = (__sighandler_t) A((int) handler);
450 else
451 sa->sa.sa_handler = (__sighandler_t) (((unsigned long) restorer << 32) | handler);
452}
453
454asmlinkage long
455sys32_sigsuspend (int history0, int history1, old_sigset_t mask)
456{
457 mask &= _BLOCKABLE;
458 spin_lock_irq(&current->sighand->siglock);
459 current->saved_sigmask = current->blocked;
460 siginitset(&current->blocked, mask);
461 recalc_sigpending();
462 spin_unlock_irq(&current->sighand->siglock);
463
464 current->state = TASK_INTERRUPTIBLE;
465 schedule();
466 set_restore_sigmask();
467 return -ERESTARTNOHAND;
468}
469
470asmlinkage long
471sys32_signal (int sig, unsigned int handler)
472{
473 struct k_sigaction new_sa, old_sa;
474 int ret;
475
476 sigact_set_handler(&new_sa, handler, 0);
477 new_sa.sa.sa_flags = SA_ONESHOT | SA_NOMASK;
478 sigemptyset(&new_sa.sa.sa_mask);
479
480 ret = do_sigaction(sig, &new_sa, &old_sa);
481
482 return ret ? ret : IA32_SA_HANDLER(&old_sa);
483}
484
485asmlinkage long
486sys32_rt_sigaction (int sig, struct sigaction32 __user *act,
487 struct sigaction32 __user *oact, unsigned int sigsetsize)
488{
489 struct k_sigaction new_ka, old_ka;
490 unsigned int handler, restorer;
491 int ret;
492
493 /* XXX: Don't preclude handling different sized sigset_t's. */
494 if (sigsetsize != sizeof(compat_sigset_t))
495 return -EINVAL;
496
497 if (act) {
498 ret = get_user(handler, &act->sa_handler);
499 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
500 ret |= get_user(restorer, &act->sa_restorer);
501 ret |= copy_from_user(&new_ka.sa.sa_mask, &act->sa_mask, sizeof(compat_sigset_t));
502 if (ret)
503 return -EFAULT;
504
505 sigact_set_handler(&new_ka, handler, restorer);
506 }
507
508 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
509
510 if (!ret && oact) {
511 ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
512 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
513 ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
514 ret |= copy_to_user(&oact->sa_mask, &old_ka.sa.sa_mask, sizeof(compat_sigset_t));
515 }
516 return ret;
517}
518
519
520asmlinkage long
521sys32_rt_sigprocmask (int how, compat_sigset_t __user *set, compat_sigset_t __user *oset,
522 unsigned int sigsetsize)
523{
524 mm_segment_t old_fs = get_fs();
525 sigset_t s;
526 long ret;
527
528 if (sigsetsize > sizeof(s))
529 return -EINVAL;
530
531 if (set) {
532 memset(&s, 0, sizeof(s));
533 if (copy_from_user(&s.sig, set, sigsetsize))
534 return -EFAULT;
535 }
536 set_fs(KERNEL_DS);
537 ret = sys_rt_sigprocmask(how,
538 set ? (sigset_t __user *) &s : NULL,
539 oset ? (sigset_t __user *) &s : NULL, sizeof(s));
540 set_fs(old_fs);
541 if (ret)
542 return ret;
543 if (oset) {
544 if (copy_to_user(oset, &s.sig, sigsetsize))
545 return -EFAULT;
546 }
547 return 0;
548}
549
550asmlinkage long
551sys32_rt_sigqueueinfo (int pid, int sig, compat_siginfo_t __user *uinfo)
552{
553 mm_segment_t old_fs = get_fs();
554 siginfo_t info;
555 int ret;
556
557 if (copy_siginfo_from_user32(&info, uinfo))
558 return -EFAULT;
559 set_fs(KERNEL_DS);
560 ret = sys_rt_sigqueueinfo(pid, sig, (siginfo_t __user *) &info);
561 set_fs(old_fs);
562 return ret;
563}
564
565asmlinkage long
566sys32_sigaction (int sig, struct old_sigaction32 __user *act, struct old_sigaction32 __user *oact)
567{
568 struct k_sigaction new_ka, old_ka;
569 unsigned int handler, restorer;
570 int ret;
571
572 if (act) {
573 compat_old_sigset_t mask;
574
575 ret = get_user(handler, &act->sa_handler);
576 ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags);
577 ret |= get_user(restorer, &act->sa_restorer);
578 ret |= get_user(mask, &act->sa_mask);
579 if (ret)
580 return ret;
581
582 sigact_set_handler(&new_ka, handler, restorer);
583 siginitset(&new_ka.sa.sa_mask, mask);
584 }
585
586 ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
587
588 if (!ret && oact) {
589 ret = put_user(IA32_SA_HANDLER(&old_ka), &oact->sa_handler);
590 ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags);
591 ret |= put_user(IA32_SA_RESTORER(&old_ka), &oact->sa_restorer);
592 ret |= put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
593 }
594
595 return ret;
596}
597
598static int
599setup_sigcontext_ia32 (struct sigcontext_ia32 __user *sc, struct _fpstate_ia32 __user *fpstate,
600 struct pt_regs *regs, unsigned long mask)
601{
602 int err = 0;
603 unsigned long flag;
604
605 if (!access_ok(VERIFY_WRITE, sc, sizeof(*sc)))
606 return -EFAULT;
607
608 err |= __put_user((regs->r16 >> 32) & 0xffff, (unsigned int __user *)&sc->fs);
609 err |= __put_user((regs->r16 >> 48) & 0xffff, (unsigned int __user *)&sc->gs);
610 err |= __put_user((regs->r16 >> 16) & 0xffff, (unsigned int __user *)&sc->es);
611 err |= __put_user(regs->r16 & 0xffff, (unsigned int __user *)&sc->ds);
612 err |= __put_user(regs->r15, &sc->edi);
613 err |= __put_user(regs->r14, &sc->esi);
614 err |= __put_user(regs->r13, &sc->ebp);
615 err |= __put_user(regs->r12, &sc->esp);
616 err |= __put_user(regs->r11, &sc->ebx);
617 err |= __put_user(regs->r10, &sc->edx);
618 err |= __put_user(regs->r9, &sc->ecx);
619 err |= __put_user(regs->r8, &sc->eax);
620#if 0
621 err |= __put_user(current->tss.trap_no, &sc->trapno);
622 err |= __put_user(current->tss.error_code, &sc->err);
623#endif
624 err |= __put_user(regs->cr_iip, &sc->eip);
625 err |= __put_user(regs->r17 & 0xffff, (unsigned int __user *)&sc->cs);
626 /*
627 * `eflags' is in an ar register for this context
628 */
629 flag = ia64_getreg(_IA64_REG_AR_EFLAG);
630 err |= __put_user((unsigned int)flag, &sc->eflags);
631 err |= __put_user(regs->r12, &sc->esp_at_signal);
632 err |= __put_user((regs->r17 >> 16) & 0xffff, (unsigned int __user *)&sc->ss);
633
634 if ( save_ia32_fpstate_live(fpstate) < 0 )
635 err = -EFAULT;
636 else
637 err |= __put_user((u32)(u64)fpstate, &sc->fpstate);
638
639#if 0
640 tmp = save_i387(fpstate);
641 if (tmp < 0)
642 err = 1;
643 else
644 err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);
645
646 /* non-iBCS2 extensions.. */
647#endif
648 err |= __put_user(mask, &sc->oldmask);
649#if 0
650 err |= __put_user(current->tss.cr2, &sc->cr2);
651#endif
652 return err;
653}
654
655static int
656restore_sigcontext_ia32 (struct pt_regs *regs, struct sigcontext_ia32 __user *sc, int *peax)
657{
658 unsigned int err = 0;
659
660 /* Always make any pending restarted system calls return -EINTR */
661 current_thread_info()->restart_block.fn = do_no_restart_syscall;
662
663 if (!access_ok(VERIFY_READ, sc, sizeof(*sc)))
664 return(-EFAULT);
665
666#define COPY(ia64x, ia32x) err |= __get_user(regs->ia64x, &sc->ia32x)
667
668#define copyseg_gs(tmp) (regs->r16 |= (unsigned long) (tmp) << 48)
669#define copyseg_fs(tmp) (regs->r16 |= (unsigned long) (tmp) << 32)
670#define copyseg_cs(tmp) (regs->r17 |= tmp)
671#define copyseg_ss(tmp) (regs->r17 |= (unsigned long) (tmp) << 16)
672#define copyseg_es(tmp) (regs->r16 |= (unsigned long) (tmp) << 16)
673#define copyseg_ds(tmp) (regs->r16 |= tmp)
674
675#define COPY_SEG(seg) \
676 { \
677 unsigned short tmp; \
678 err |= __get_user(tmp, &sc->seg); \
679 copyseg_##seg(tmp); \
680 }
681#define COPY_SEG_STRICT(seg) \
682 { \
683 unsigned short tmp; \
684 err |= __get_user(tmp, &sc->seg); \
685 copyseg_##seg(tmp|3); \
686 }
687
688 /* To make COPY_SEGs easier, we zero r16, r17 */
689 regs->r16 = 0;
690 regs->r17 = 0;
691
692 COPY_SEG(gs);
693 COPY_SEG(fs);
694 COPY_SEG(es);
695 COPY_SEG(ds);
696 COPY(r15, edi);
697 COPY(r14, esi);
698 COPY(r13, ebp);
699 COPY(r12, esp);
700 COPY(r11, ebx);
701 COPY(r10, edx);
702 COPY(r9, ecx);
703 COPY(cr_iip, eip);
704 COPY_SEG_STRICT(cs);
705 COPY_SEG_STRICT(ss);
706 ia32_load_segment_descriptors(current);
707 {
708 unsigned int tmpflags;
709 unsigned long flag;
710
711 /*
712 * IA32 `eflags' is not part of `pt_regs', it's in an ar register which
713 * is part of the thread context. Fortunately, we are executing in the
714 * IA32 process's context.
715 */
716 err |= __get_user(tmpflags, &sc->eflags);
717 flag = ia64_getreg(_IA64_REG_AR_EFLAG);
718 flag &= ~0x40DD5;
719 flag |= (tmpflags & 0x40DD5);
720 ia64_setreg(_IA64_REG_AR_EFLAG, flag);
721
722 regs->r1 = -1; /* disable syscall checks, r1 is orig_eax */
723 }
724
725 {
726 struct _fpstate_ia32 __user *buf = NULL;
727 u32 fpstate_ptr;
728 err |= get_user(fpstate_ptr, &(sc->fpstate));
729 buf = compat_ptr(fpstate_ptr);
730 if (buf) {
731 err |= restore_ia32_fpstate_live(buf);
732 }
733 }
734
735#if 0
736 {
737 struct _fpstate * buf;
738 err |= __get_user(buf, &sc->fpstate);
739 if (buf) {
740 if (!access_ok(VERIFY_READ, buf, sizeof(*buf)))
741 goto badframe;
742 err |= restore_i387(buf);
743 }
744 }
745#endif
746
747 err |= __get_user(*peax, &sc->eax);
748 return err;
749
750#if 0
751 badframe:
752 return 1;
753#endif
754}
755
756/*
757 * Determine which stack to use..
758 */
759static inline void __user *
760get_sigframe (struct k_sigaction *ka, struct pt_regs * regs, size_t frame_size)
761{
762 unsigned long esp;
763
764 /* Default to using normal stack (truncate off sign-extension of bit 31: */
765 esp = (unsigned int) regs->r12;
766
767 /* This is the X/Open sanctioned signal stack switching. */
768 if (ka->sa.sa_flags & SA_ONSTACK) {
769 int onstack = sas_ss_flags(esp);
770
771 if (onstack == 0)
772 esp = current->sas_ss_sp + current->sas_ss_size;
773 else if (onstack == SS_ONSTACK) {
774 /*
775 * If we are on the alternate signal stack and would
776 * overflow it, don't. Return an always-bogus address
777 * instead so we will die with SIGSEGV.
778 */
779 if (!likely(on_sig_stack(esp - frame_size)))
780 return (void __user *) -1L;
781 }
782 }
783 /* Legacy stack switching not supported */
784
785 esp -= frame_size;
786 /* Align the stack pointer according to the i386 ABI,
787 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
788 esp = ((esp + 4) & -16ul) - 4;
789 return (void __user *) esp;
790}
791
792static int
793setup_frame_ia32 (int sig, struct k_sigaction *ka, sigset_t *set, struct pt_regs * regs)
794{
795 struct exec_domain *ed = current_thread_info()->exec_domain;
796 struct sigframe_ia32 __user *frame;
797 int err = 0;
798
799 frame = get_sigframe(ka, regs, sizeof(*frame));
800
801 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
802 goto give_sigsegv;
803
804 err |= __put_user((ed && ed->signal_invmap && sig < 32
805 ? (int)(ed->signal_invmap[sig]) : sig), &frame->sig);
806
807 err |= setup_sigcontext_ia32(&frame->sc, &frame->fpstate, regs, set->sig[0]);
808
809 if (_COMPAT_NSIG_WORDS > 1)
810 err |= __copy_to_user(frame->extramask, (char *) &set->sig + 4,
811 sizeof(frame->extramask));
812
813 /* Set up to return from userspace. If provided, use a stub
814 already in userspace. */
815 if (ka->sa.sa_flags & SA_RESTORER) {
816 unsigned int restorer = IA32_SA_RESTORER(ka);
817 err |= __put_user(restorer, &frame->pretcode);
818 } else {
819 /* Pointing to restorer in ia32 gate page */
820 err |= __put_user(IA32_GATE_OFFSET, &frame->pretcode);
821 }
822
823 /* This is popl %eax ; movl $,%eax ; int $0x80
824 * and there for historical reasons only.
825 * See arch/i386/kernel/signal.c
826 */
827
828 err |= __put_user(0xb858, (short __user *)(frame->retcode+0));
829 err |= __put_user(__IA32_NR_sigreturn, (int __user *)(frame->retcode+2));
830 err |= __put_user(0x80cd, (short __user *)(frame->retcode+6));
831
832 if (err)
833 goto give_sigsegv;
834
835 /* Set up registers for signal handler */
836 regs->r12 = (unsigned long) frame;
837 regs->cr_iip = IA32_SA_HANDLER(ka);
838
839 set_fs(USER_DS);
840
841#if 0
842 regs->eflags &= ~TF_MASK;
843#endif
844
845#if 0
846 printk("SIG deliver (%s:%d): sig=%d sp=%p pc=%lx ra=%x\n",
847 current->comm, current->pid, sig, (void *) frame, regs->cr_iip, frame->pretcode);
848#endif
849
850 return 1;
851
852 give_sigsegv:
853 force_sigsegv(sig, current);
854 return 0;
855}
856
857static int
858setup_rt_frame_ia32 (int sig, struct k_sigaction *ka, siginfo_t *info,
859 sigset_t *set, struct pt_regs * regs)
860{
861 struct exec_domain *ed = current_thread_info()->exec_domain;
862 compat_uptr_t pinfo, puc;
863 struct rt_sigframe_ia32 __user *frame;
864 int err = 0;
865
866 frame = get_sigframe(ka, regs, sizeof(*frame));
867
868 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
869 goto give_sigsegv;
870
871 err |= __put_user((ed && ed->signal_invmap
872 && sig < 32 ? ed->signal_invmap[sig] : sig), &frame->sig);
873
874 pinfo = (long __user) &frame->info;
875 puc = (long __user) &frame->uc;
876 err |= __put_user(pinfo, &frame->pinfo);
877 err |= __put_user(puc, &frame->puc);
878 err |= copy_siginfo_to_user32(&frame->info, info);
879
880 /* Create the ucontext. */
881 err |= __put_user(0, &frame->uc.uc_flags);
882 err |= __put_user(0, &frame->uc.uc_link);
883 err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
884 err |= __put_user(sas_ss_flags(regs->r12), &frame->uc.uc_stack.ss_flags);
885 err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
886 err |= setup_sigcontext_ia32(&frame->uc.uc_mcontext, &frame->fpstate, regs, set->sig[0]);
887 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
888 if (err)
889 goto give_sigsegv;
890
891 /* Set up to return from userspace. If provided, use a stub
892 already in userspace. */
893 if (ka->sa.sa_flags & SA_RESTORER) {
894 unsigned int restorer = IA32_SA_RESTORER(ka);
895 err |= __put_user(restorer, &frame->pretcode);
896 } else {
897 /* Pointing to rt_restorer in ia32 gate page */
898 err |= __put_user(IA32_GATE_OFFSET + 8, &frame->pretcode);
899 }
900
901 /* This is movl $,%eax ; int $0x80
902 * and there for historical reasons only.
903 * See arch/i386/kernel/signal.c
904 */
905
906 err |= __put_user(0xb8, (char __user *)(frame->retcode+0));
907 err |= __put_user(__IA32_NR_rt_sigreturn, (int __user *)(frame->retcode+1));
908 err |= __put_user(0x80cd, (short __user *)(frame->retcode+5));
909
910 if (err)
911 goto give_sigsegv;
912
913 /* Set up registers for signal handler */
914 regs->r12 = (unsigned long) frame;
915 regs->cr_iip = IA32_SA_HANDLER(ka);
916
917 set_fs(USER_DS);
918
919#if 0
920 regs->eflags &= ~TF_MASK;
921#endif
922
923#if 0
924 printk("SIG deliver (%s:%d): sp=%p pc=%lx ra=%x\n",
925 current->comm, current->pid, (void *) frame, regs->cr_iip, frame->pretcode);
926#endif
927
928 return 1;
929
930give_sigsegv:
931 force_sigsegv(sig, current);
932 return 0;
933}
934
935int
936ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
937 sigset_t *set, struct pt_regs *regs)
938{
939 /* Set up the stack frame */
940 if (ka->sa.sa_flags & SA_SIGINFO)
941 return setup_rt_frame_ia32(sig, ka, info, set, regs);
942 else
943 return setup_frame_ia32(sig, ka, set, regs);
944}
945
946asmlinkage long
947sys32_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4, int arg5,
948 int arg6, int arg7, struct pt_regs regs)
949{
950 unsigned long esp = (unsigned int) regs.r12;
951 struct sigframe_ia32 __user *frame = (struct sigframe_ia32 __user *)(esp - 8);
952 sigset_t set;
953 int eax;
954
955 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
956 goto badframe;
957
958 if (__get_user(set.sig[0], &frame->sc.oldmask)
959 || (_COMPAT_NSIG_WORDS > 1 && __copy_from_user((char *) &set.sig + 4, &frame->extramask,
960 sizeof(frame->extramask))))
961 goto badframe;
962
963 sigdelsetmask(&set, ~_BLOCKABLE);
964 spin_lock_irq(&current->sighand->siglock);
965 current->blocked = set;
966 recalc_sigpending();
967 spin_unlock_irq(&current->sighand->siglock);
968
969 if (restore_sigcontext_ia32(&regs, &frame->sc, &eax))
970 goto badframe;
971 return eax;
972
973 badframe:
974 force_sig(SIGSEGV, current);
975 return 0;
976}
977
978asmlinkage long
979sys32_rt_sigreturn (int arg0, int arg1, int arg2, int arg3, int arg4,
980 int arg5, int arg6, int arg7, struct pt_regs regs)
981{
982 unsigned long esp = (unsigned int) regs.r12;
983 struct rt_sigframe_ia32 __user *frame = (struct rt_sigframe_ia32 __user *)(esp - 4);
984 sigset_t set;
985 int eax;
986
987 if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
988 goto badframe;
989 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
990 goto badframe;
991
992 sigdelsetmask(&set, ~_BLOCKABLE);
993 spin_lock_irq(&current->sighand->siglock);
994 current->blocked = set;
995 recalc_sigpending();
996 spin_unlock_irq(&current->sighand->siglock);
997
998 if (restore_sigcontext_ia32(&regs, &frame->uc.uc_mcontext, &eax))
999 goto badframe;
1000
1001 /* It is more difficult to avoid calling this function than to
1002 call it and ignore errors. */
1003 do_sigaltstack((stack_t __user *) &frame->uc.uc_stack, NULL, esp);
1004
1005 return eax;
1006
1007 badframe:
1008 force_sig(SIGSEGV, current);
1009 return 0;
1010}
diff --git a/arch/ia64/ia32/ia32_support.c b/arch/ia64/ia32/ia32_support.c
deleted file mode 100644
index a6965ddafc46..000000000000
--- a/arch/ia64/ia32/ia32_support.c
+++ /dev/null
@@ -1,253 +0,0 @@
1/*
2 * IA32 helper functions
3 *
4 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
5 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
6 * Copyright (C) 2001-2002 Hewlett-Packard Co
7 * David Mosberger-Tang <davidm@hpl.hp.com>
8 *
9 * 06/16/00 A. Mallick added csd/ssd/tssd for ia32 thread context
10 * 02/19/01 D. Mosberger dropped tssd; it's not needed
11 * 09/14/01 D. Mosberger fixed memory management for gdt/tss page
12 * 09/29/01 D. Mosberger added ia32_load_segment_descriptors()
13 */
14
15#include <linux/kernel.h>
16#include <linux/init.h>
17#include <linux/mm.h>
18#include <linux/sched.h>
19
20#include <asm/intrinsics.h>
21#include <asm/page.h>
22#include <asm/pgtable.h>
23#include <asm/system.h>
24#include <asm/processor.h>
25#include <asm/uaccess.h>
26
27#include "ia32priv.h"
28
29extern int die_if_kernel (char *str, struct pt_regs *regs, long err);
30
31struct page *ia32_shared_page[NR_CPUS];
32unsigned long *ia32_boot_gdt;
33unsigned long *cpu_gdt_table[NR_CPUS];
34struct page *ia32_gate_page;
35
36static unsigned long
37load_desc (u16 selector)
38{
39 unsigned long *table, limit, index;
40
41 if (!selector)
42 return 0;
43 if (selector & IA32_SEGSEL_TI) {
44 table = (unsigned long *) IA32_LDT_OFFSET;
45 limit = IA32_LDT_ENTRIES;
46 } else {
47 table = cpu_gdt_table[smp_processor_id()];
48 limit = IA32_PAGE_SIZE / sizeof(ia32_boot_gdt[0]);
49 }
50 index = selector >> IA32_SEGSEL_INDEX_SHIFT;
51 if (index >= limit)
52 return 0;
53 return IA32_SEG_UNSCRAMBLE(table[index]);
54}
55
56void
57ia32_load_segment_descriptors (struct task_struct *task)
58{
59 struct pt_regs *regs = task_pt_regs(task);
60
61 /* Setup the segment descriptors */
62 regs->r24 = load_desc(regs->r16 >> 16); /* ESD */
63 regs->r27 = load_desc(regs->r16 >> 0); /* DSD */
64 regs->r28 = load_desc(regs->r16 >> 32); /* FSD */
65 regs->r29 = load_desc(regs->r16 >> 48); /* GSD */
66 regs->ar_csd = load_desc(regs->r17 >> 0); /* CSD */
67 regs->ar_ssd = load_desc(regs->r17 >> 16); /* SSD */
68}
69
70int
71ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
72{
73 struct desc_struct *desc;
74 struct ia32_user_desc info;
75 int idx;
76
77 if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
78 return -EFAULT;
79 if (LDT_empty(&info))
80 return -EINVAL;
81
82 idx = info.entry_number;
83 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
84 return -EINVAL;
85
86 desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
87 desc->a = LDT_entry_a(&info);
88 desc->b = LDT_entry_b(&info);
89
90 /* XXX: can this be done in a cleaner way ? */
91 load_TLS(&child->thread, smp_processor_id());
92 ia32_load_segment_descriptors(child);
93 load_TLS(&current->thread, smp_processor_id());
94
95 return 0;
96}
97
98void
99ia32_save_state (struct task_struct *t)
100{
101 t->thread.eflag = ia64_getreg(_IA64_REG_AR_EFLAG);
102 t->thread.fsr = ia64_getreg(_IA64_REG_AR_FSR);
103 t->thread.fcr = ia64_getreg(_IA64_REG_AR_FCR);
104 t->thread.fir = ia64_getreg(_IA64_REG_AR_FIR);
105 t->thread.fdr = ia64_getreg(_IA64_REG_AR_FDR);
106 ia64_set_kr(IA64_KR_IO_BASE, t->thread.old_iob);
107 ia64_set_kr(IA64_KR_TSSD, t->thread.old_k1);
108}
109
110void
111ia32_load_state (struct task_struct *t)
112{
113 unsigned long eflag, fsr, fcr, fir, fdr, tssd;
114 struct pt_regs *regs = task_pt_regs(t);
115
116 eflag = t->thread.eflag;
117 fsr = t->thread.fsr;
118 fcr = t->thread.fcr;
119 fir = t->thread.fir;
120 fdr = t->thread.fdr;
121 tssd = load_desc(_TSS); /* TSSD */
122
123 ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
124 ia64_setreg(_IA64_REG_AR_FSR, fsr);
125 ia64_setreg(_IA64_REG_AR_FCR, fcr);
126 ia64_setreg(_IA64_REG_AR_FIR, fir);
127 ia64_setreg(_IA64_REG_AR_FDR, fdr);
128 current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
129 current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
130 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
131 ia64_set_kr(IA64_KR_TSSD, tssd);
132
133 regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
134 regs->r30 = load_desc(_LDT); /* LDTD */
135 load_TLS(&t->thread, smp_processor_id());
136}
137
138/*
139 * Setup IA32 GDT and TSS
140 */
141void
142ia32_gdt_init (void)
143{
144 int cpu = smp_processor_id();
145
146 ia32_shared_page[cpu] = alloc_page(GFP_KERNEL);
147 if (!ia32_shared_page[cpu])
148 panic("failed to allocate ia32_shared_page[%d]\n", cpu);
149
150 cpu_gdt_table[cpu] = page_address(ia32_shared_page[cpu]);
151
152 /* Copy from the boot cpu's GDT */
153 memcpy(cpu_gdt_table[cpu], ia32_boot_gdt, PAGE_SIZE);
154}
155
156
157/*
158 * Setup IA32 GDT and TSS
159 */
160static void
161ia32_boot_gdt_init (void)
162{
163 unsigned long ldt_size;
164
165 ia32_shared_page[0] = alloc_page(GFP_KERNEL);
166 if (!ia32_shared_page[0])
167 panic("failed to allocate ia32_shared_page[0]\n");
168
169 ia32_boot_gdt = page_address(ia32_shared_page[0]);
170 cpu_gdt_table[0] = ia32_boot_gdt;
171
172 /* CS descriptor in IA-32 (scrambled) format */
173 ia32_boot_gdt[__USER_CS >> 3]
174 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
175 0xb, 1, 3, 1, 1, 1, 1);
176
177 /* DS descriptor in IA-32 (scrambled) format */
178 ia32_boot_gdt[__USER_DS >> 3]
179 = IA32_SEG_DESCRIPTOR(0, (IA32_GATE_END-1) >> IA32_PAGE_SHIFT,
180 0x3, 1, 3, 1, 1, 1, 1);
181
182 ldt_size = PAGE_ALIGN(IA32_LDT_ENTRIES*IA32_LDT_ENTRY_SIZE);
183 ia32_boot_gdt[TSS_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_TSS_OFFSET, 235,
184 0xb, 0, 3, 1, 1, 1, 0);
185 ia32_boot_gdt[LDT_ENTRY] = IA32_SEG_DESCRIPTOR(IA32_LDT_OFFSET, ldt_size - 1,
186 0x2, 0, 3, 1, 1, 1, 0);
187}
188
189static void
190ia32_gate_page_init(void)
191{
192 unsigned long *sr;
193
194 ia32_gate_page = alloc_page(GFP_KERNEL);
195 sr = page_address(ia32_gate_page);
196 /* This is popl %eax ; movl $,%eax ; int $0x80 */
197 *sr++ = 0xb858 | (__IA32_NR_sigreturn << 16) | (0x80cdUL << 48);
198
199 /* This is movl $,%eax ; int $0x80 */
200 *sr = 0xb8 | (__IA32_NR_rt_sigreturn << 8) | (0x80cdUL << 40);
201}
202
203void
204ia32_mem_init(void)
205{
206 ia32_boot_gdt_init();
207 ia32_gate_page_init();
208}
209
210/*
211 * Handle bad IA32 interrupt via syscall
212 */
213void
214ia32_bad_interrupt (unsigned long int_num, struct pt_regs *regs)
215{
216 siginfo_t siginfo;
217
218 if (die_if_kernel("Bad IA-32 interrupt", regs, int_num))
219 return;
220
221 siginfo.si_signo = SIGTRAP;
222 siginfo.si_errno = int_num; /* XXX is it OK to abuse si_errno like this? */
223 siginfo.si_flags = 0;
224 siginfo.si_isr = 0;
225 siginfo.si_addr = NULL;
226 siginfo.si_imm = 0;
227 siginfo.si_code = TRAP_BRKPT;
228 force_sig_info(SIGTRAP, &siginfo, current);
229}
230
231void
232ia32_cpu_init (void)
233{
234 /* initialize global ia32 state - CR0 and CR4 */
235 ia64_setreg(_IA64_REG_AR_CFLAG, (((ulong) IA32_CR4 << 32) | IA32_CR0));
236}
237
238static int __init
239ia32_init (void)
240{
241#if PAGE_SHIFT > IA32_PAGE_SHIFT
242 {
243 extern struct kmem_cache *ia64_partial_page_cachep;
244
245 ia64_partial_page_cachep = kmem_cache_create("ia64_partial_page_cache",
246 sizeof(struct ia64_partial_page),
247 0, SLAB_PANIC, NULL);
248 }
249#endif
250 return 0;
251}
252
253__initcall(ia32_init);
diff --git a/arch/ia64/ia32/ia32_traps.c b/arch/ia64/ia32/ia32_traps.c
deleted file mode 100644
index e486042672f1..000000000000
--- a/arch/ia64/ia32/ia32_traps.c
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * IA-32 exception handlers
3 *
4 * Copyright (C) 2000 Asit K. Mallick <asit.k.mallick@intel.com>
5 * Copyright (C) 2001-2002 Hewlett-Packard Co
6 * David Mosberger-Tang <davidm@hpl.hp.com>
7 *
8 * 06/16/00 A. Mallick added siginfo for most cases (close to IA32)
9 * 09/29/00 D. Mosberger added ia32_intercept()
10 */
11
12#include <linux/kernel.h>
13#include <linux/sched.h>
14
15#include "ia32priv.h"
16
17#include <asm/intrinsics.h>
18#include <asm/ptrace.h>
19
20int
21ia32_intercept (struct pt_regs *regs, unsigned long isr)
22{
23 switch ((isr >> 16) & 0xff) {
24 case 0: /* Instruction intercept fault */
25 case 4: /* Locked Data reference fault */
26 case 1: /* Gate intercept trap */
27 return -1;
28
29 case 2: /* System flag trap */
30 if (((isr >> 14) & 0x3) >= 2) {
31 /* MOV SS, POP SS instructions */
32 ia64_psr(regs)->id = 1;
33 return 0;
34 } else
35 return -1;
36 }
37 return -1;
38}
39
40int
41ia32_exception (struct pt_regs *regs, unsigned long isr)
42{
43 struct siginfo siginfo;
44
45 /* initialize these fields to avoid leaking kernel bits to user space: */
46 siginfo.si_errno = 0;
47 siginfo.si_flags = 0;
48 siginfo.si_isr = 0;
49 siginfo.si_imm = 0;
50 switch ((isr >> 16) & 0xff) {
51 case 1:
52 case 2:
53 siginfo.si_signo = SIGTRAP;
54 if (isr == 0)
55 siginfo.si_code = TRAP_TRACE;
56 else if (isr & 0x4)
57 siginfo.si_code = TRAP_BRANCH;
58 else
59 siginfo.si_code = TRAP_BRKPT;
60 break;
61
62 case 3:
63 siginfo.si_signo = SIGTRAP;
64 siginfo.si_code = TRAP_BRKPT;
65 break;
66
67 case 0: /* Divide fault */
68 siginfo.si_signo = SIGFPE;
69 siginfo.si_code = FPE_INTDIV;
70 break;
71
72 case 4: /* Overflow */
73 case 5: /* Bounds fault */
74 siginfo.si_signo = SIGFPE;
75 siginfo.si_code = 0;
76 break;
77
78 case 6: /* Invalid Op-code */
79 siginfo.si_signo = SIGILL;
80 siginfo.si_code = ILL_ILLOPN;
81 break;
82
83 case 7: /* FP DNA */
84 case 8: /* Double Fault */
85 case 9: /* Invalid TSS */
86 case 11: /* Segment not present */
87 case 12: /* Stack fault */
88 case 13: /* General Protection Fault */
89 siginfo.si_signo = SIGSEGV;
90 siginfo.si_code = 0;
91 break;
92
93 case 16: /* Pending FP error */
94 {
95 unsigned long fsr, fcr;
96
97 fsr = ia64_getreg(_IA64_REG_AR_FSR);
98 fcr = ia64_getreg(_IA64_REG_AR_FCR);
99
100 siginfo.si_signo = SIGFPE;
101 /*
102 * (~cwd & swd) will mask out exceptions that are not set to unmasked
103 * status. 0x3f is the exception bits in these regs, 0x200 is the
104 * C1 reg you need in case of a stack fault, 0x040 is the stack
105 * fault bit. We should only be taking one exception at a time,
106 * so if this combination doesn't produce any single exception,
107 * then we have a bad program that isn't synchronizing its FPU usage
108 * and it will suffer the consequences since we won't be able to
109 * fully reproduce the context of the exception
110 */
111 siginfo.si_isr = isr;
112 siginfo.si_flags = __ISR_VALID;
113 switch(((~fcr) & (fsr & 0x3f)) | (fsr & 0x240)) {
114 case 0x000:
115 default:
116 siginfo.si_code = 0;
117 break;
118 case 0x001: /* Invalid Op */
119 case 0x040: /* Stack Fault */
120 case 0x240: /* Stack Fault | Direction */
121 siginfo.si_code = FPE_FLTINV;
122 break;
123 case 0x002: /* Denormalize */
124 case 0x010: /* Underflow */
125 siginfo.si_code = FPE_FLTUND;
126 break;
127 case 0x004: /* Zero Divide */
128 siginfo.si_code = FPE_FLTDIV;
129 break;
130 case 0x008: /* Overflow */
131 siginfo.si_code = FPE_FLTOVF;
132 break;
133 case 0x020: /* Precision */
134 siginfo.si_code = FPE_FLTRES;
135 break;
136 }
137
138 break;
139 }
140
141 case 17: /* Alignment check */
142 siginfo.si_signo = SIGSEGV;
143 siginfo.si_code = BUS_ADRALN;
144 break;
145
146 case 19: /* SSE Numeric error */
147 siginfo.si_signo = SIGFPE;
148 siginfo.si_code = 0;
149 break;
150
151 default:
152 return -1;
153 }
154 force_sig_info(siginfo.si_signo, &siginfo, current);
155 return 0;
156}
diff --git a/arch/ia64/ia32/ia32priv.h b/arch/ia64/ia32/ia32priv.h
deleted file mode 100644
index 0f15349c3c6b..000000000000
--- a/arch/ia64/ia32/ia32priv.h
+++ /dev/null
@@ -1,532 +0,0 @@
1#ifndef _ASM_IA64_IA32_PRIV_H
2#define _ASM_IA64_IA32_PRIV_H
3
4
5#include <asm/ia32.h>
6
7#ifdef CONFIG_IA32_SUPPORT
8
9#include <linux/binfmts.h>
10#include <linux/compat.h>
11#include <linux/rbtree.h>
12
13#include <asm/processor.h>
14
15/*
16 * 32 bit structures for IA32 support.
17 */
18
19#define IA32_PAGE_SIZE (1UL << IA32_PAGE_SHIFT)
20#define IA32_PAGE_MASK (~(IA32_PAGE_SIZE - 1))
21#define IA32_PAGE_ALIGN(addr) (((addr) + IA32_PAGE_SIZE - 1) & IA32_PAGE_MASK)
22#define IA32_CLOCKS_PER_SEC 100 /* Cast in stone for IA32 Linux */
23
24/*
25 * partially mapped pages provide precise accounting of which 4k sub pages
26 * are mapped and which ones are not, thereby improving IA-32 compatibility.
27 */
28struct ia64_partial_page {
29 struct ia64_partial_page *next; /* linked list, sorted by address */
30 struct rb_node pp_rb;
31 /* 64K is the largest "normal" page supported by ia64 ABI. So 4K*64
32 * should suffice.*/
33 unsigned long bitmap;
34 unsigned int base;
35};
36
37struct ia64_partial_page_list {
38 struct ia64_partial_page *pp_head; /* list head, points to the lowest
39 * addressed partial page */
40 struct rb_root ppl_rb;
41 struct ia64_partial_page *pp_hint; /* pp_hint->next is the last
42 * accessed partial page */
43 atomic_t pp_count; /* reference count */
44};
45
46#if PAGE_SHIFT > IA32_PAGE_SHIFT
47struct ia64_partial_page_list* ia32_init_pp_list (void);
48#else
49# define ia32_init_pp_list() 0
50#endif
51
52/* sigcontext.h */
53/*
54 * As documented in the iBCS2 standard..
55 *
56 * The first part of "struct _fpstate" is just the
57 * normal i387 hardware setup, the extra "status"
58 * word is used to save the coprocessor status word
59 * before entering the handler.
60 */
61struct _fpreg_ia32 {
62 unsigned short significand[4];
63 unsigned short exponent;
64};
65
66struct _fpxreg_ia32 {
67 unsigned short significand[4];
68 unsigned short exponent;
69 unsigned short padding[3];
70};
71
72struct _xmmreg_ia32 {
73 unsigned int element[4];
74};
75
76
77struct _fpstate_ia32 {
78 unsigned int cw,
79 sw,
80 tag,
81 ipoff,
82 cssel,
83 dataoff,
84 datasel;
85 struct _fpreg_ia32 _st[8];
86 unsigned short status;
87 unsigned short magic; /* 0xffff = regular FPU data only */
88
89 /* FXSR FPU environment */
90 unsigned int _fxsr_env[6]; /* FXSR FPU env is ignored */
91 unsigned int mxcsr;
92 unsigned int reserved;
93 struct _fpxreg_ia32 _fxsr_st[8]; /* FXSR FPU reg data is ignored */
94 struct _xmmreg_ia32 _xmm[8];
95 unsigned int padding[56];
96};
97
98struct sigcontext_ia32 {
99 unsigned short gs, __gsh;
100 unsigned short fs, __fsh;
101 unsigned short es, __esh;
102 unsigned short ds, __dsh;
103 unsigned int edi;
104 unsigned int esi;
105 unsigned int ebp;
106 unsigned int esp;
107 unsigned int ebx;
108 unsigned int edx;
109 unsigned int ecx;
110 unsigned int eax;
111 unsigned int trapno;
112 unsigned int err;
113 unsigned int eip;
114 unsigned short cs, __csh;
115 unsigned int eflags;
116 unsigned int esp_at_signal;
117 unsigned short ss, __ssh;
118 unsigned int fpstate; /* really (struct _fpstate_ia32 *) */
119 unsigned int oldmask;
120 unsigned int cr2;
121};
122
123/* user.h */
124/*
125 * IA32 (Pentium III/4) FXSR, SSE support
126 *
127 * Provide support for the GDB 5.0+ PTRACE_{GET|SET}FPXREGS requests for
128 * interacting with the FXSR-format floating point environment. Floating
129 * point data can be accessed in the regular format in the usual manner,
130 * and both the standard and SIMD floating point data can be accessed via
131 * the new ptrace requests. In either case, changes to the FPU environment
132 * will be reflected in the task's state as expected.
133 */
134struct ia32_user_i387_struct {
135 int cwd;
136 int swd;
137 int twd;
138 int fip;
139 int fcs;
140 int foo;
141 int fos;
142 /* 8*10 bytes for each FP-reg = 80 bytes */
143 struct _fpreg_ia32 st_space[8];
144};
145
146struct ia32_user_fxsr_struct {
147 unsigned short cwd;
148 unsigned short swd;
149 unsigned short twd;
150 unsigned short fop;
151 int fip;
152 int fcs;
153 int foo;
154 int fos;
155 int mxcsr;
156 int reserved;
157 int st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
158 int xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
159 int padding[56];
160};
161
162/* signal.h */
163#define IA32_SET_SA_HANDLER(ka,handler,restorer) \
164 ((ka)->sa.sa_handler = (__sighandler_t) \
165 (((unsigned long)(restorer) << 32) \
166 | ((handler) & 0xffffffff)))
167#define IA32_SA_HANDLER(ka) ((unsigned long) (ka)->sa.sa_handler & 0xffffffff)
168#define IA32_SA_RESTORER(ka) ((unsigned long) (ka)->sa.sa_handler >> 32)
169
170#define __IA32_NR_sigreturn 119
171#define __IA32_NR_rt_sigreturn 173
172
173struct sigaction32 {
174 unsigned int sa_handler; /* Really a pointer, but need to deal with 32 bits */
175 unsigned int sa_flags;
176 unsigned int sa_restorer; /* Another 32 bit pointer */
177 compat_sigset_t sa_mask; /* A 32 bit mask */
178};
179
180struct old_sigaction32 {
181 unsigned int sa_handler; /* Really a pointer, but need to deal
182 with 32 bits */
183 compat_old_sigset_t sa_mask; /* A 32 bit mask */
184 unsigned int sa_flags;
185 unsigned int sa_restorer; /* Another 32 bit pointer */
186};
187
188typedef struct sigaltstack_ia32 {
189 unsigned int ss_sp;
190 int ss_flags;
191 unsigned int ss_size;
192} stack_ia32_t;
193
194struct ucontext_ia32 {
195 unsigned int uc_flags;
196 unsigned int uc_link;
197 stack_ia32_t uc_stack;
198 struct sigcontext_ia32 uc_mcontext;
199 sigset_t uc_sigmask; /* mask last for extensibility */
200};
201
202struct stat64 {
203 unsigned long long st_dev;
204 unsigned char __pad0[4];
205 unsigned int __st_ino;
206 unsigned int st_mode;
207 unsigned int st_nlink;
208 unsigned int st_uid;
209 unsigned int st_gid;
210 unsigned long long st_rdev;
211 unsigned char __pad3[4];
212 unsigned int st_size_lo;
213 unsigned int st_size_hi;
214 unsigned int st_blksize;
215 unsigned int st_blocks; /* Number 512-byte blocks allocated. */
216 unsigned int __pad4; /* future possible st_blocks high bits */
217 unsigned int st_atime;
218 unsigned int st_atime_nsec;
219 unsigned int st_mtime;
220 unsigned int st_mtime_nsec;
221 unsigned int st_ctime;
222 unsigned int st_ctime_nsec;
223 unsigned int st_ino_lo;
224 unsigned int st_ino_hi;
225};
226
227typedef struct compat_siginfo {
228 int si_signo;
229 int si_errno;
230 int si_code;
231
232 union {
233 int _pad[((128/sizeof(int)) - 3)];
234
235 /* kill() */
236 struct {
237 unsigned int _pid; /* sender's pid */
238 unsigned int _uid; /* sender's uid */
239 } _kill;
240
241 /* POSIX.1b timers */
242 struct {
243 compat_timer_t _tid; /* timer id */
244 int _overrun; /* overrun count */
245 char _pad[sizeof(unsigned int) - sizeof(int)];
246 compat_sigval_t _sigval; /* same as below */
247 int _sys_private; /* not to be passed to user */
248 } _timer;
249
250 /* POSIX.1b signals */
251 struct {
252 unsigned int _pid; /* sender's pid */
253 unsigned int _uid; /* sender's uid */
254 compat_sigval_t _sigval;
255 } _rt;
256
257 /* SIGCHLD */
258 struct {
259 unsigned int _pid; /* which child */
260 unsigned int _uid; /* sender's uid */
261 int _status; /* exit code */
262 compat_clock_t _utime;
263 compat_clock_t _stime;
264 } _sigchld;
265
266 /* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
267 struct {
268 unsigned int _addr; /* faulting insn/memory ref. */
269 } _sigfault;
270
271 /* SIGPOLL */
272 struct {
273 int _band; /* POLL_IN, POLL_OUT, POLL_MSG */
274 int _fd;
275 } _sigpoll;
276 } _sifields;
277} compat_siginfo_t;
278
279/*
280 * IA-32 ELF specific definitions for IA-64.
281 */
282
283#define _ASM_IA64_ELF_H /* Don't include elf.h */
284
285#include <linux/sched.h>
286
287/*
288 * This is used to ensure we don't load something for the wrong architecture.
289 */
290#define elf_check_arch(x) ((x)->e_machine == EM_386)
291
292/*
293 * These are used to set parameters in the core dumps.
294 */
295#define ELF_CLASS ELFCLASS32
296#define ELF_DATA ELFDATA2LSB
297#define ELF_ARCH EM_386
298
299#define IA32_STACK_TOP IA32_PAGE_OFFSET
300#define IA32_GATE_OFFSET IA32_PAGE_OFFSET
301#define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
302
303/*
304 * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
305 * access them.
306 */
307#define IA32_GDT_OFFSET (IA32_PAGE_OFFSET + PAGE_SIZE)
308#define IA32_TSS_OFFSET (IA32_PAGE_OFFSET + 2*PAGE_SIZE)
309#define IA32_LDT_OFFSET (IA32_PAGE_OFFSET + 3*PAGE_SIZE)
310
311#define ELF_EXEC_PAGESIZE IA32_PAGE_SIZE
312
313/*
314 * This is the location that an ET_DYN program is loaded if exec'ed.
315 * Typical use of this is to invoke "./ld.so someprog" to test out a
316 * new version of the loader. We need to make sure that it is out of
317 * the way of the program that it will "exec", and that there is
318 * sufficient room for the brk.
319 */
320#define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
321
322void ia64_elf32_init(struct pt_regs *regs);
323#define ELF_PLAT_INIT(_r, load_addr) ia64_elf32_init(_r)
324
325/* This macro yields a bitmask that programs can use to figure out
326 what instruction set this CPU supports. */
327#define ELF_HWCAP 0
328
329/* This macro yields a string that ld.so will use to load
330 implementation specific libraries for optimization. Not terribly
331 relevant until we have real hardware to play with... */
332#define ELF_PLATFORM NULL
333
334#ifdef __KERNEL__
335# define SET_PERSONALITY(EX) \
336 (current->personality = PER_LINUX)
337#endif
338
339#define IA32_EFLAG 0x200
340
341/*
342 * IA-32 ELF specific definitions for IA-64.
343 */
344
345#define __USER_CS 0x23
346#define __USER_DS 0x2B
347
348/*
349 * The per-cpu GDT has 32 entries: see <asm-i386/segment.h>
350 */
351#define GDT_ENTRIES 32
352
353#define GDT_SIZE (GDT_ENTRIES * 8)
354
355#define TSS_ENTRY 14
356#define LDT_ENTRY (TSS_ENTRY + 1)
357
358#define IA32_SEGSEL_RPL (0x3 << 0)
359#define IA32_SEGSEL_TI (0x1 << 2)
360#define IA32_SEGSEL_INDEX_SHIFT 3
361
362#define _TSS ((unsigned long) TSS_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
363#define _LDT ((unsigned long) LDT_ENTRY << IA32_SEGSEL_INDEX_SHIFT)
364
365#define IA32_SEG_BASE 16
366#define IA32_SEG_TYPE 40
367#define IA32_SEG_SYS 44
368#define IA32_SEG_DPL 45
369#define IA32_SEG_P 47
370#define IA32_SEG_HIGH_LIMIT 48
371#define IA32_SEG_AVL 52
372#define IA32_SEG_DB 54
373#define IA32_SEG_G 55
374#define IA32_SEG_HIGH_BASE 56
375
376#define IA32_SEG_DESCRIPTOR(base, limit, segtype, nonsysseg, dpl, segpresent, avl, segdb, gran) \
377 (((limit) & 0xffff) \
378 | (((unsigned long) (base) & 0xffffff) << IA32_SEG_BASE) \
379 | ((unsigned long) (segtype) << IA32_SEG_TYPE) \
380 | ((unsigned long) (nonsysseg) << IA32_SEG_SYS) \
381 | ((unsigned long) (dpl) << IA32_SEG_DPL) \
382 | ((unsigned long) (segpresent) << IA32_SEG_P) \
383 | ((((unsigned long) (limit) >> 16) & 0xf) << IA32_SEG_HIGH_LIMIT) \
384 | ((unsigned long) (avl) << IA32_SEG_AVL) \
385 | ((unsigned long) (segdb) << IA32_SEG_DB) \
386 | ((unsigned long) (gran) << IA32_SEG_G) \
387 | ((((unsigned long) (base) >> 24) & 0xff) << IA32_SEG_HIGH_BASE))
388
389#define SEG_LIM 32
390#define SEG_TYPE 52
391#define SEG_SYS 56
392#define SEG_DPL 57
393#define SEG_P 59
394#define SEG_AVL 60
395#define SEG_DB 62
396#define SEG_G 63
397
398/* Unscramble an IA-32 segment descriptor into the IA-64 format. */
399#define IA32_SEG_UNSCRAMBLE(sd) \
400 ( (((sd) >> IA32_SEG_BASE) & 0xffffff) | ((((sd) >> IA32_SEG_HIGH_BASE) & 0xff) << 24) \
401 | ((((sd) & 0xffff) | ((((sd) >> IA32_SEG_HIGH_LIMIT) & 0xf) << 16)) << SEG_LIM) \
402 | ((((sd) >> IA32_SEG_TYPE) & 0xf) << SEG_TYPE) \
403 | ((((sd) >> IA32_SEG_SYS) & 0x1) << SEG_SYS) \
404 | ((((sd) >> IA32_SEG_DPL) & 0x3) << SEG_DPL) \
405 | ((((sd) >> IA32_SEG_P) & 0x1) << SEG_P) \
406 | ((((sd) >> IA32_SEG_AVL) & 0x1) << SEG_AVL) \
407 | ((((sd) >> IA32_SEG_DB) & 0x1) << SEG_DB) \
408 | ((((sd) >> IA32_SEG_G) & 0x1) << SEG_G))
409
410#define IA32_IOBASE 0x2000000000000000UL /* Virtual address for I/O space */
411
412#define IA32_CR0 0x80000001 /* Enable PG and PE bits */
413#define IA32_CR4 0x600 /* MMXEX and FXSR on */
414
415/*
416 * IA32 floating point control registers starting values
417 */
418
419#define IA32_FSR_DEFAULT 0x55550000 /* set all tag bits */
420#define IA32_FCR_DEFAULT 0x17800000037fUL /* extended precision, all masks */
421
422#define IA32_PTRACE_GETREGS 12
423#define IA32_PTRACE_SETREGS 13
424#define IA32_PTRACE_GETFPREGS 14
425#define IA32_PTRACE_SETFPREGS 15
426#define IA32_PTRACE_GETFPXREGS 18
427#define IA32_PTRACE_SETFPXREGS 19
428
429#define ia32_start_thread(regs,new_ip,new_sp) do { \
430 set_fs(USER_DS); \
431 ia64_psr(regs)->cpl = 3; /* set user mode */ \
432 ia64_psr(regs)->ri = 0; /* clear return slot number */ \
433 ia64_psr(regs)->is = 1; /* IA-32 instruction set */ \
434 regs->cr_iip = new_ip; \
435 regs->ar_rsc = 0xc; /* enforced lazy mode, priv. level 3 */ \
436 regs->ar_rnat = 0; \
437 regs->loadrs = 0; \
438 regs->r12 = new_sp; \
439} while (0)
440
441/*
442 * Local Descriptor Table (LDT) related declarations.
443 */
444
445#define IA32_LDT_ENTRIES 8192 /* Maximum number of LDT entries supported. */
446#define IA32_LDT_ENTRY_SIZE 8 /* The size of each LDT entry. */
447
448#define LDT_entry_a(info) \
449 ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
450
451#define LDT_entry_b(info) \
452 (((info)->base_addr & 0xff000000) | \
453 (((info)->base_addr & 0x00ff0000) >> 16) | \
454 ((info)->limit & 0xf0000) | \
455 (((info)->read_exec_only ^ 1) << 9) | \
456 ((info)->contents << 10) | \
457 (((info)->seg_not_present ^ 1) << 15) | \
458 ((info)->seg_32bit << 22) | \
459 ((info)->limit_in_pages << 23) | \
460 ((info)->useable << 20) | \
461 0x7100)
462
463#define LDT_empty(info) ( \
464 (info)->base_addr == 0 && \
465 (info)->limit == 0 && \
466 (info)->contents == 0 && \
467 (info)->read_exec_only == 1 && \
468 (info)->seg_32bit == 0 && \
469 (info)->limit_in_pages == 0 && \
470 (info)->seg_not_present == 1 && \
471 (info)->useable == 0 )
472
473static inline void
474load_TLS (struct thread_struct *t, unsigned int cpu)
475{
476 extern unsigned long *cpu_gdt_table[NR_CPUS];
477
478 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 0, &t->tls_array[0], sizeof(long));
479 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 1, &t->tls_array[1], sizeof(long));
480 memcpy(cpu_gdt_table[cpu] + GDT_ENTRY_TLS_MIN + 2, &t->tls_array[2], sizeof(long));
481}
482
483struct ia32_user_desc {
484 unsigned int entry_number;
485 unsigned int base_addr;
486 unsigned int limit;
487 unsigned int seg_32bit:1;
488 unsigned int contents:2;
489 unsigned int read_exec_only:1;
490 unsigned int limit_in_pages:1;
491 unsigned int seg_not_present:1;
492 unsigned int useable:1;
493};
494
495struct linux_binprm;
496
497extern void ia32_init_addr_space (struct pt_regs *regs);
498extern int ia32_setup_arg_pages (struct linux_binprm *bprm, int exec_stack);
499extern unsigned long ia32_do_mmap (struct file *, unsigned long, unsigned long, int, int, loff_t);
500extern void ia32_load_segment_descriptors (struct task_struct *task);
501
502#define ia32f2ia64f(dst,src) \
503do { \
504 ia64_ldfe(6,src); \
505 ia64_stop(); \
506 ia64_stf_spill(dst, 6); \
507} while(0)
508
509#define ia64f2ia32f(dst,src) \
510do { \
511 ia64_ldf_fill(6, src); \
512 ia64_stop(); \
513 ia64_stfe(dst, 6); \
514} while(0)
515
516struct user_regs_struct32 {
517 __u32 ebx, ecx, edx, esi, edi, ebp, eax;
518 unsigned short ds, __ds, es, __es;
519 unsigned short fs, __fs, gs, __gs;
520 __u32 orig_eax, eip;
521 unsigned short cs, __cs;
522 __u32 eflags, esp;
523 unsigned short ss, __ss;
524};
525
526/* Prototypes for use in elfcore32.h */
527extern int save_ia32_fpstate (struct task_struct *, struct ia32_user_i387_struct __user *);
528extern int save_ia32_fpxstate (struct task_struct *, struct ia32_user_fxsr_struct __user *);
529
530#endif /* !CONFIG_IA32_SUPPORT */
531
532#endif /* _ASM_IA64_IA32_PRIV_H */
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
deleted file mode 100644
index 625ed8f76fce..000000000000
--- a/arch/ia64/ia32/sys_ia32.c
+++ /dev/null
@@ -1,2817 +0,0 @@
1/*
2 * sys_ia32.c: Conversion between 32bit and 64bit native syscalls. Derived from sys_sparc32.c.
3 *
4 * Copyright (C) 2000 VA Linux Co
5 * Copyright (C) 2000 Don Dugger <n0ano@valinux.com>
6 * Copyright (C) 1999 Arun Sharma <arun.sharma@intel.com>
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
9 * Copyright (C) 2000-2003, 2005 Hewlett-Packard Co
10 * David Mosberger-Tang <davidm@hpl.hp.com>
11 * Copyright (C) 2004 Gordon Jin <gordon.jin@intel.com>
12 *
13 * These routines maintain argument size conversion between 32bit and 64bit
14 * environment.
15 */
16
17#include <linux/kernel.h>
18#include <linux/syscalls.h>
19#include <linux/sysctl.h>
20#include <linux/sched.h>
21#include <linux/fs.h>
22#include <linux/file.h>
23#include <linux/signal.h>
24#include <linux/resource.h>
25#include <linux/times.h>
26#include <linux/utsname.h>
27#include <linux/smp.h>
28#include <linux/smp_lock.h>
29#include <linux/sem.h>
30#include <linux/msg.h>
31#include <linux/mm.h>
32#include <linux/shm.h>
33#include <linux/slab.h>
34#include <linux/uio.h>
35#include <linux/socket.h>
36#include <linux/quota.h>
37#include <linux/poll.h>
38#include <linux/eventpoll.h>
39#include <linux/personality.h>
40#include <linux/ptrace.h>
41#include <linux/regset.h>
42#include <linux/stat.h>
43#include <linux/ipc.h>
44#include <linux/capability.h>
45#include <linux/compat.h>
46#include <linux/vfs.h>
47#include <linux/mman.h>
48#include <linux/mutex.h>
49
50#include <asm/intrinsics.h>
51#include <asm/types.h>
52#include <asm/uaccess.h>
53#include <asm/unistd.h>
54
55#include "ia32priv.h"
56
57#include <net/scm.h>
58#include <net/sock.h>
59
60#define DEBUG 0
61
62#if DEBUG
63# define DBG(fmt...) printk(KERN_DEBUG fmt)
64#else
65# define DBG(fmt...)
66#endif
67
68#define ROUND_UP(x,a) ((__typeof__(x))(((unsigned long)(x) + ((a) - 1)) & ~((a) - 1)))
69
70#define OFFSET4K(a) ((a) & 0xfff)
71#define PAGE_START(addr) ((addr) & PAGE_MASK)
72#define MINSIGSTKSZ_IA32 2048
73
74#define high2lowuid(uid) ((uid) > 65535 ? 65534 : (uid))
75#define high2lowgid(gid) ((gid) > 65535 ? 65534 : (gid))
76
77/*
78 * Anything that modifies or inspects ia32 user virtual memory must hold this semaphore
79 * while doing so.
80 */
81/* XXX make per-mm: */
82static DEFINE_MUTEX(ia32_mmap_mutex);
83
84asmlinkage long
85sys32_execve (char __user *name, compat_uptr_t __user *argv, compat_uptr_t __user *envp,
86 struct pt_regs *regs)
87{
88 long error;
89 char *filename;
90 unsigned long old_map_base, old_task_size, tssd;
91
92 filename = getname(name);
93 error = PTR_ERR(filename);
94 if (IS_ERR(filename))
95 return error;
96
97 old_map_base = current->thread.map_base;
98 old_task_size = current->thread.task_size;
99 tssd = ia64_get_kr(IA64_KR_TSSD);
100
101 /* we may be exec'ing a 64-bit process: reset map base, task-size, and io-base: */
102 current->thread.map_base = DEFAULT_MAP_BASE;
103 current->thread.task_size = DEFAULT_TASK_SIZE;
104 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
105 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
106
107 error = compat_do_execve(filename, argv, envp, regs);
108 putname(filename);
109
110 if (error < 0) {
111 /* oops, execve failed, switch back to old values... */
112 ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
113 ia64_set_kr(IA64_KR_TSSD, tssd);
114 current->thread.map_base = old_map_base;
115 current->thread.task_size = old_task_size;
116 }
117
118 return error;
119}
120
121
122#if PAGE_SHIFT > IA32_PAGE_SHIFT
123
124
125static int
126get_page_prot (struct vm_area_struct *vma, unsigned long addr)
127{
128 int prot = 0;
129
130 if (!vma || vma->vm_start > addr)
131 return 0;
132
133 if (vma->vm_flags & VM_READ)
134 prot |= PROT_READ;
135 if (vma->vm_flags & VM_WRITE)
136 prot |= PROT_WRITE;
137 if (vma->vm_flags & VM_EXEC)
138 prot |= PROT_EXEC;
139 return prot;
140}
141
142/*
143 * Map a subpage by creating an anonymous page that contains the union of the old page and
144 * the subpage.
145 */
146static unsigned long
147mmap_subpage (struct file *file, unsigned long start, unsigned long end, int prot, int flags,
148 loff_t off)
149{
150 void *page = NULL;
151 struct inode *inode;
152 unsigned long ret = 0;
153 struct vm_area_struct *vma = find_vma(current->mm, start);
154 int old_prot = get_page_prot(vma, start);
155
156 DBG("mmap_subpage(file=%p,start=0x%lx,end=0x%lx,prot=%x,flags=%x,off=0x%llx)\n",
157 file, start, end, prot, flags, off);
158
159
160 /* Optimize the case where the old mmap and the new mmap are both anonymous */
161 if ((old_prot & PROT_WRITE) && (flags & MAP_ANONYMOUS) && !vma->vm_file) {
162 if (clear_user((void __user *) start, end - start)) {
163 ret = -EFAULT;
164 goto out;
165 }
166 goto skip_mmap;
167 }
168
169 page = (void *) get_zeroed_page(GFP_KERNEL);
170 if (!page)
171 return -ENOMEM;
172
173 if (old_prot)
174 copy_from_user(page, (void __user *) PAGE_START(start), PAGE_SIZE);
175
176 down_write(&current->mm->mmap_sem);
177 {
178 ret = do_mmap(NULL, PAGE_START(start), PAGE_SIZE, prot | PROT_WRITE,
179 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
180 }
181 up_write(&current->mm->mmap_sem);
182
183 if (IS_ERR((void *) ret))
184 goto out;
185
186 if (old_prot) {
187 /* copy back the old page contents. */
188 if (offset_in_page(start))
189 copy_to_user((void __user *) PAGE_START(start), page,
190 offset_in_page(start));
191 if (offset_in_page(end))
192 copy_to_user((void __user *) end, page + offset_in_page(end),
193 PAGE_SIZE - offset_in_page(end));
194 }
195
196 if (!(flags & MAP_ANONYMOUS)) {
197 /* read the file contents */
198 inode = file->f_path.dentry->d_inode;
199 if (!inode->i_fop || !file->f_op->read
200 || ((*file->f_op->read)(file, (char __user *) start, end - start, &off) < 0))
201 {
202 ret = -EINVAL;
203 goto out;
204 }
205 }
206
207 skip_mmap:
208 if (!(prot & PROT_WRITE))
209 ret = sys_mprotect(PAGE_START(start), PAGE_SIZE, prot | old_prot);
210 out:
211 if (page)
212 free_page((unsigned long) page);
213 return ret;
214}
215
216/* SLAB cache for ia64_partial_page structures */
217struct kmem_cache *ia64_partial_page_cachep;
218
219/*
220 * init ia64_partial_page_list.
221 * return 0 means kmalloc fail.
222 */
223struct ia64_partial_page_list*
224ia32_init_pp_list(void)
225{
226 struct ia64_partial_page_list *p;
227
228 if ((p = kmalloc(sizeof(*p), GFP_KERNEL)) == NULL)
229 return p;
230 p->pp_head = NULL;
231 p->ppl_rb = RB_ROOT;
232 p->pp_hint = NULL;
233 atomic_set(&p->pp_count, 1);
234 return p;
235}
236
237/*
238 * Search for the partial page with @start in partial page list @ppl.
239 * If finds the partial page, return the found partial page.
240 * Else, return 0 and provide @pprev, @rb_link, @rb_parent to
241 * be used by later __ia32_insert_pp().
242 */
243static struct ia64_partial_page *
244__ia32_find_pp(struct ia64_partial_page_list *ppl, unsigned int start,
245 struct ia64_partial_page **pprev, struct rb_node ***rb_link,
246 struct rb_node **rb_parent)
247{
248 struct ia64_partial_page *pp;
249 struct rb_node **__rb_link, *__rb_parent, *rb_prev;
250
251 pp = ppl->pp_hint;
252 if (pp && pp->base == start)
253 return pp;
254
255 __rb_link = &ppl->ppl_rb.rb_node;
256 rb_prev = __rb_parent = NULL;
257
258 while (*__rb_link) {
259 __rb_parent = *__rb_link;
260 pp = rb_entry(__rb_parent, struct ia64_partial_page, pp_rb);
261
262 if (pp->base == start) {
263 ppl->pp_hint = pp;
264 return pp;
265 } else if (pp->base < start) {
266 rb_prev = __rb_parent;
267 __rb_link = &__rb_parent->rb_right;
268 } else {
269 __rb_link = &__rb_parent->rb_left;
270 }
271 }
272
273 *rb_link = __rb_link;
274 *rb_parent = __rb_parent;
275 *pprev = NULL;
276 if (rb_prev)
277 *pprev = rb_entry(rb_prev, struct ia64_partial_page, pp_rb);
278 return NULL;
279}
280
281/*
282 * insert @pp into @ppl.
283 */
284static void
285__ia32_insert_pp(struct ia64_partial_page_list *ppl,
286 struct ia64_partial_page *pp, struct ia64_partial_page *prev,
287 struct rb_node **rb_link, struct rb_node *rb_parent)
288{
289 /* link list */
290 if (prev) {
291 pp->next = prev->next;
292 prev->next = pp;
293 } else {
294 ppl->pp_head = pp;
295 if (rb_parent)
296 pp->next = rb_entry(rb_parent,
297 struct ia64_partial_page, pp_rb);
298 else
299 pp->next = NULL;
300 }
301
302 /* link rb */
303 rb_link_node(&pp->pp_rb, rb_parent, rb_link);
304 rb_insert_color(&pp->pp_rb, &ppl->ppl_rb);
305
306 ppl->pp_hint = pp;
307}
308
309/*
310 * delete @pp from partial page list @ppl.
311 */
312static void
313__ia32_delete_pp(struct ia64_partial_page_list *ppl,
314 struct ia64_partial_page *pp, struct ia64_partial_page *prev)
315{
316 if (prev) {
317 prev->next = pp->next;
318 if (ppl->pp_hint == pp)
319 ppl->pp_hint = prev;
320 } else {
321 ppl->pp_head = pp->next;
322 if (ppl->pp_hint == pp)
323 ppl->pp_hint = pp->next;
324 }
325 rb_erase(&pp->pp_rb, &ppl->ppl_rb);
326 kmem_cache_free(ia64_partial_page_cachep, pp);
327}
328
329static struct ia64_partial_page *
330__pp_prev(struct ia64_partial_page *pp)
331{
332 struct rb_node *prev = rb_prev(&pp->pp_rb);
333 if (prev)
334 return rb_entry(prev, struct ia64_partial_page, pp_rb);
335 else
336 return NULL;
337}
338
339/*
340 * Delete partial pages with address between @start and @end.
341 * @start and @end are page aligned.
342 */
343static void
344__ia32_delete_pp_range(unsigned int start, unsigned int end)
345{
346 struct ia64_partial_page *pp, *prev;
347 struct rb_node **rb_link, *rb_parent;
348
349 if (start >= end)
350 return;
351
352 pp = __ia32_find_pp(current->thread.ppl, start, &prev,
353 &rb_link, &rb_parent);
354 if (pp)
355 prev = __pp_prev(pp);
356 else {
357 if (prev)
358 pp = prev->next;
359 else
360 pp = current->thread.ppl->pp_head;
361 }
362
363 while (pp && pp->base < end) {
364 struct ia64_partial_page *tmp = pp->next;
365 __ia32_delete_pp(current->thread.ppl, pp, prev);
366 pp = tmp;
367 }
368}
369
370/*
371 * Set the range between @start and @end in bitmap.
372 * @start and @end should be IA32 page aligned and in the same IA64 page.
373 */
374static int
375__ia32_set_pp(unsigned int start, unsigned int end, int flags)
376{
377 struct ia64_partial_page *pp, *prev;
378 struct rb_node ** rb_link, *rb_parent;
379 unsigned int pstart, start_bit, end_bit, i;
380
381 pstart = PAGE_START(start);
382 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
383 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
384 if (end_bit == 0)
385 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
386 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
387 &rb_link, &rb_parent);
388 if (pp) {
389 for (i = start_bit; i < end_bit; i++)
390 set_bit(i, &pp->bitmap);
391 /*
392 * Check: if this partial page has been set to a full page,
393 * then delete it.
394 */
395 if (find_first_zero_bit(&pp->bitmap, sizeof(pp->bitmap)*8) >=
396 PAGE_SIZE/IA32_PAGE_SIZE) {
397 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
398 }
399 return 0;
400 }
401
402 /*
403 * MAP_FIXED may lead to overlapping mmap.
404 * In this case, the requested mmap area may already mmaped as a full
405 * page. So check vma before adding a new partial page.
406 */
407 if (flags & MAP_FIXED) {
408 struct vm_area_struct *vma = find_vma(current->mm, pstart);
409 if (vma && vma->vm_start <= pstart)
410 return 0;
411 }
412
413 /* new a ia64_partial_page */
414 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
415 if (!pp)
416 return -ENOMEM;
417 pp->base = pstart;
418 pp->bitmap = 0;
419 for (i=start_bit; i<end_bit; i++)
420 set_bit(i, &(pp->bitmap));
421 pp->next = NULL;
422 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
423 return 0;
424}
425
426/*
427 * @start and @end should be IA32 page aligned, but don't need to be in the
428 * same IA64 page. Split @start and @end to make sure they're in the same IA64
429 * page, then call __ia32_set_pp().
430 */
431static void
432ia32_set_pp(unsigned int start, unsigned int end, int flags)
433{
434 down_write(&current->mm->mmap_sem);
435 if (flags & MAP_FIXED) {
436 /*
437 * MAP_FIXED may lead to overlapping mmap. When this happens,
438 * a series of complete IA64 pages results in deletion of
439 * old partial pages in that range.
440 */
441 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
442 }
443
444 if (end < PAGE_ALIGN(start)) {
445 __ia32_set_pp(start, end, flags);
446 } else {
447 if (offset_in_page(start))
448 __ia32_set_pp(start, PAGE_ALIGN(start), flags);
449 if (offset_in_page(end))
450 __ia32_set_pp(PAGE_START(end), end, flags);
451 }
452 up_write(&current->mm->mmap_sem);
453}
454
455/*
456 * Unset the range between @start and @end in bitmap.
457 * @start and @end should be IA32 page aligned and in the same IA64 page.
458 * After doing that, if the bitmap is 0, then free the page and return 1,
459 * else return 0;
460 * If not find the partial page in the list, then
461 * If the vma exists, then the full page is set to a partial page;
462 * Else return -ENOMEM.
463 */
464static int
465__ia32_unset_pp(unsigned int start, unsigned int end)
466{
467 struct ia64_partial_page *pp, *prev;
468 struct rb_node ** rb_link, *rb_parent;
469 unsigned int pstart, start_bit, end_bit, i;
470 struct vm_area_struct *vma;
471
472 pstart = PAGE_START(start);
473 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
474 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
475 if (end_bit == 0)
476 end_bit = PAGE_SIZE / IA32_PAGE_SIZE;
477
478 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
479 &rb_link, &rb_parent);
480 if (pp) {
481 for (i = start_bit; i < end_bit; i++)
482 clear_bit(i, &pp->bitmap);
483 if (pp->bitmap == 0) {
484 __ia32_delete_pp(current->thread.ppl, pp, __pp_prev(pp));
485 return 1;
486 }
487 return 0;
488 }
489
490 vma = find_vma(current->mm, pstart);
491 if (!vma || vma->vm_start > pstart) {
492 return -ENOMEM;
493 }
494
495 /* new a ia64_partial_page */
496 pp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
497 if (!pp)
498 return -ENOMEM;
499 pp->base = pstart;
500 pp->bitmap = 0;
501 for (i = 0; i < start_bit; i++)
502 set_bit(i, &(pp->bitmap));
503 for (i = end_bit; i < PAGE_SIZE / IA32_PAGE_SIZE; i++)
504 set_bit(i, &(pp->bitmap));
505 pp->next = NULL;
506 __ia32_insert_pp(current->thread.ppl, pp, prev, rb_link, rb_parent);
507 return 0;
508}
509
510/*
511 * Delete pp between PAGE_ALIGN(start) and PAGE_START(end) by calling
512 * __ia32_delete_pp_range(). Unset possible partial pages by calling
513 * __ia32_unset_pp().
514 * The returned value see __ia32_unset_pp().
515 */
516static int
517ia32_unset_pp(unsigned int *startp, unsigned int *endp)
518{
519 unsigned int start = *startp, end = *endp;
520 int ret = 0;
521
522 down_write(&current->mm->mmap_sem);
523
524 __ia32_delete_pp_range(PAGE_ALIGN(start), PAGE_START(end));
525
526 if (end < PAGE_ALIGN(start)) {
527 ret = __ia32_unset_pp(start, end);
528 if (ret == 1) {
529 *startp = PAGE_START(start);
530 *endp = PAGE_ALIGN(end);
531 }
532 if (ret == 0) {
533 /* to shortcut sys_munmap() in sys32_munmap() */
534 *startp = PAGE_START(start);
535 *endp = PAGE_START(end);
536 }
537 } else {
538 if (offset_in_page(start)) {
539 ret = __ia32_unset_pp(start, PAGE_ALIGN(start));
540 if (ret == 1)
541 *startp = PAGE_START(start);
542 if (ret == 0)
543 *startp = PAGE_ALIGN(start);
544 if (ret < 0)
545 goto out;
546 }
547 if (offset_in_page(end)) {
548 ret = __ia32_unset_pp(PAGE_START(end), end);
549 if (ret == 1)
550 *endp = PAGE_ALIGN(end);
551 if (ret == 0)
552 *endp = PAGE_START(end);
553 }
554 }
555
556 out:
557 up_write(&current->mm->mmap_sem);
558 return ret;
559}
560
561/*
562 * Compare the range between @start and @end with bitmap in partial page.
563 * @start and @end should be IA32 page aligned and in the same IA64 page.
564 */
565static int
566__ia32_compare_pp(unsigned int start, unsigned int end)
567{
568 struct ia64_partial_page *pp, *prev;
569 struct rb_node ** rb_link, *rb_parent;
570 unsigned int pstart, start_bit, end_bit, size;
571 unsigned int first_bit, next_zero_bit; /* the first range in bitmap */
572
573 pstart = PAGE_START(start);
574
575 pp = __ia32_find_pp(current->thread.ppl, pstart, &prev,
576 &rb_link, &rb_parent);
577 if (!pp)
578 return 1;
579
580 start_bit = (start % PAGE_SIZE) / IA32_PAGE_SIZE;
581 end_bit = (end % PAGE_SIZE) / IA32_PAGE_SIZE;
582 size = sizeof(pp->bitmap) * 8;
583 first_bit = find_first_bit(&pp->bitmap, size);
584 next_zero_bit = find_next_zero_bit(&pp->bitmap, size, first_bit);
585 if ((start_bit < first_bit) || (end_bit > next_zero_bit)) {
586 /* exceeds the first range in bitmap */
587 return -ENOMEM;
588 } else if ((start_bit == first_bit) && (end_bit == next_zero_bit)) {
589 first_bit = find_next_bit(&pp->bitmap, size, next_zero_bit);
590 if ((next_zero_bit < first_bit) && (first_bit < size))
591 return 1; /* has next range */
592 else
593 return 0; /* no next range */
594 } else
595 return 1;
596}
597
598/*
599 * @start and @end should be IA32 page aligned, but don't need to be in the
600 * same IA64 page. Split @start and @end to make sure they're in the same IA64
601 * page, then call __ia32_compare_pp().
602 *
603 * Take this as example: the range is the 1st and 2nd 4K page.
604 * Return 0 if they fit bitmap exactly, i.e. bitmap = 00000011;
605 * Return 1 if the range doesn't cover whole bitmap, e.g. bitmap = 00001111;
606 * Return -ENOMEM if the range exceeds the bitmap, e.g. bitmap = 00000001 or
607 * bitmap = 00000101.
608 */
609static int
610ia32_compare_pp(unsigned int *startp, unsigned int *endp)
611{
612 unsigned int start = *startp, end = *endp;
613 int retval = 0;
614
615 down_write(&current->mm->mmap_sem);
616
617 if (end < PAGE_ALIGN(start)) {
618 retval = __ia32_compare_pp(start, end);
619 if (retval == 0) {
620 *startp = PAGE_START(start);
621 *endp = PAGE_ALIGN(end);
622 }
623 } else {
624 if (offset_in_page(start)) {
625 retval = __ia32_compare_pp(start,
626 PAGE_ALIGN(start));
627 if (retval == 0)
628 *startp = PAGE_START(start);
629 if (retval < 0)
630 goto out;
631 }
632 if (offset_in_page(end)) {
633 retval = __ia32_compare_pp(PAGE_START(end), end);
634 if (retval == 0)
635 *endp = PAGE_ALIGN(end);
636 }
637 }
638
639 out:
640 up_write(&current->mm->mmap_sem);
641 return retval;
642}
643
644static void
645__ia32_drop_pp_list(struct ia64_partial_page_list *ppl)
646{
647 struct ia64_partial_page *pp = ppl->pp_head;
648
649 while (pp) {
650 struct ia64_partial_page *next = pp->next;
651 kmem_cache_free(ia64_partial_page_cachep, pp);
652 pp = next;
653 }
654
655 kfree(ppl);
656}
657
658void
659ia32_drop_ia64_partial_page_list(struct task_struct *task)
660{
661 struct ia64_partial_page_list* ppl = task->thread.ppl;
662
663 if (ppl && atomic_dec_and_test(&ppl->pp_count))
664 __ia32_drop_pp_list(ppl);
665}
666
667/*
668 * Copy current->thread.ppl to ppl (already initialized).
669 */
670static int
671__ia32_copy_pp_list(struct ia64_partial_page_list *ppl)
672{
673 struct ia64_partial_page *pp, *tmp, *prev;
674 struct rb_node **rb_link, *rb_parent;
675
676 ppl->pp_head = NULL;
677 ppl->pp_hint = NULL;
678 ppl->ppl_rb = RB_ROOT;
679 rb_link = &ppl->ppl_rb.rb_node;
680 rb_parent = NULL;
681 prev = NULL;
682
683 for (pp = current->thread.ppl->pp_head; pp; pp = pp->next) {
684 tmp = kmem_cache_alloc(ia64_partial_page_cachep, GFP_KERNEL);
685 if (!tmp)
686 return -ENOMEM;
687 *tmp = *pp;
688 __ia32_insert_pp(ppl, tmp, prev, rb_link, rb_parent);
689 prev = tmp;
690 rb_link = &tmp->pp_rb.rb_right;
691 rb_parent = &tmp->pp_rb;
692 }
693 return 0;
694}
695
696int
697ia32_copy_ia64_partial_page_list(struct task_struct *p,
698 unsigned long clone_flags)
699{
700 int retval = 0;
701
702 if (clone_flags & CLONE_VM) {
703 atomic_inc(&current->thread.ppl->pp_count);
704 p->thread.ppl = current->thread.ppl;
705 } else {
706 p->thread.ppl = ia32_init_pp_list();
707 if (!p->thread.ppl)
708 return -ENOMEM;
709 down_write(&current->mm->mmap_sem);
710 {
711 retval = __ia32_copy_pp_list(p->thread.ppl);
712 }
713 up_write(&current->mm->mmap_sem);
714 }
715
716 return retval;
717}
718
719static unsigned long
720emulate_mmap (struct file *file, unsigned long start, unsigned long len, int prot, int flags,
721 loff_t off)
722{
723 unsigned long tmp, end, pend, pstart, ret, is_congruent, fudge = 0;
724 struct inode *inode;
725 loff_t poff;
726
727 end = start + len;
728 pstart = PAGE_START(start);
729 pend = PAGE_ALIGN(end);
730
731 if (flags & MAP_FIXED) {
732 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
733 if (start > pstart) {
734 if (flags & MAP_SHARED)
735 printk(KERN_INFO
736 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
737 current->comm, task_pid_nr(current), start);
738 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
739 off);
740 if (IS_ERR((void *) ret))
741 return ret;
742 pstart += PAGE_SIZE;
743 if (pstart >= pend)
744 goto out; /* done */
745 }
746 if (end < pend) {
747 if (flags & MAP_SHARED)
748 printk(KERN_INFO
749 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
750 current->comm, task_pid_nr(current), end);
751 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
752 (off + len) - offset_in_page(end));
753 if (IS_ERR((void *) ret))
754 return ret;
755 pend -= PAGE_SIZE;
756 if (pstart >= pend)
757 goto out; /* done */
758 }
759 } else {
760 /*
761 * If a start address was specified, use it if the entire rounded out area
762 * is available.
763 */
764 if (start && !pstart)
765 fudge = 1; /* handle case of mapping to range (0,PAGE_SIZE) */
766 tmp = arch_get_unmapped_area(file, pstart - fudge, pend - pstart, 0, flags);
767 if (tmp != pstart) {
768 pstart = tmp;
769 start = pstart + offset_in_page(off); /* make start congruent with off */
770 end = start + len;
771 pend = PAGE_ALIGN(end);
772 }
773 }
774
775 poff = off + (pstart - start); /* note: (pstart - start) may be negative */
776 is_congruent = (flags & MAP_ANONYMOUS) || (offset_in_page(poff) == 0);
777
778 if ((flags & MAP_SHARED) && !is_congruent)
779 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
780 "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
781
782 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
783 is_congruent ? "congruent" : "not congruent", poff);
784
785 down_write(&current->mm->mmap_sem);
786 {
787 if (!(flags & MAP_ANONYMOUS) && is_congruent)
788 ret = do_mmap(file, pstart, pend - pstart, prot, flags | MAP_FIXED, poff);
789 else
790 ret = do_mmap(NULL, pstart, pend - pstart,
791 prot | ((flags & MAP_ANONYMOUS) ? 0 : PROT_WRITE),
792 flags | MAP_FIXED | MAP_ANONYMOUS, 0);
793 }
794 up_write(&current->mm->mmap_sem);
795
796 if (IS_ERR((void *) ret))
797 return ret;
798
799 if (!is_congruent) {
800 /* read the file contents */
801 inode = file->f_path.dentry->d_inode;
802 if (!inode->i_fop || !file->f_op->read
803 || ((*file->f_op->read)(file, (char __user *) pstart, pend - pstart, &poff)
804 < 0))
805 {
806 sys_munmap(pstart, pend - pstart);
807 return -EINVAL;
808 }
809 if (!(prot & PROT_WRITE) && sys_mprotect(pstart, pend - pstart, prot) < 0)
810 return -EINVAL;
811 }
812
813 if (!(flags & MAP_FIXED))
814 ia32_set_pp((unsigned int)start, (unsigned int)end, flags);
815out:
816 return start;
817}
818
819#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
820
821static inline unsigned int
822get_prot32 (unsigned int prot)
823{
824 if (prot & PROT_WRITE)
825 /* on x86, PROT_WRITE implies PROT_READ which implies PROT_EEC */
826 prot |= PROT_READ | PROT_WRITE | PROT_EXEC;
827 else if (prot & (PROT_READ | PROT_EXEC))
828 /* on x86, there is no distinction between PROT_READ and PROT_EXEC */
829 prot |= (PROT_READ | PROT_EXEC);
830
831 return prot;
832}
833
834unsigned long
835ia32_do_mmap (struct file *file, unsigned long addr, unsigned long len, int prot, int flags,
836 loff_t offset)
837{
838 DBG("ia32_do_mmap(file=%p,addr=0x%lx,len=0x%lx,prot=%x,flags=%x,offset=0x%llx)\n",
839 file, addr, len, prot, flags, offset);
840
841 if (file && (!file->f_op || !file->f_op->mmap))
842 return -ENODEV;
843
844 len = IA32_PAGE_ALIGN(len);
845 if (len == 0)
846 return addr;
847
848 if (len > IA32_PAGE_OFFSET || addr > IA32_PAGE_OFFSET - len)
849 {
850 if (flags & MAP_FIXED)
851 return -ENOMEM;
852 else
853 return -EINVAL;
854 }
855
856 if (OFFSET4K(offset))
857 return -EINVAL;
858
859 prot = get_prot32(prot);
860
861#if PAGE_SHIFT > IA32_PAGE_SHIFT
862 mutex_lock(&ia32_mmap_mutex);
863 {
864 addr = emulate_mmap(file, addr, len, prot, flags, offset);
865 }
866 mutex_unlock(&ia32_mmap_mutex);
867#else
868 down_write(&current->mm->mmap_sem);
869 {
870 addr = do_mmap(file, addr, len, prot, flags, offset);
871 }
872 up_write(&current->mm->mmap_sem);
873#endif
874 DBG("ia32_do_mmap: returning 0x%lx\n", addr);
875 return addr;
876}
877
878/*
879 * Linux/i386 didn't use to be able to handle more than 4 system call parameters, so these
880 * system calls used a memory block for parameter passing..
881 */
882
883struct mmap_arg_struct {
884 unsigned int addr;
885 unsigned int len;
886 unsigned int prot;
887 unsigned int flags;
888 unsigned int fd;
889 unsigned int offset;
890};
891
892asmlinkage long
893sys32_mmap (struct mmap_arg_struct __user *arg)
894{
895 struct mmap_arg_struct a;
896 struct file *file = NULL;
897 unsigned long addr;
898 int flags;
899
900 if (copy_from_user(&a, arg, sizeof(a)))
901 return -EFAULT;
902
903 if (OFFSET4K(a.offset))
904 return -EINVAL;
905
906 flags = a.flags;
907
908 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
909 if (!(flags & MAP_ANONYMOUS)) {
910 file = fget(a.fd);
911 if (!file)
912 return -EBADF;
913 }
914
915 addr = ia32_do_mmap(file, a.addr, a.len, a.prot, flags, a.offset);
916
917 if (file)
918 fput(file);
919 return addr;
920}
921
922asmlinkage long
923sys32_mmap2 (unsigned int addr, unsigned int len, unsigned int prot, unsigned int flags,
924 unsigned int fd, unsigned int pgoff)
925{
926 struct file *file = NULL;
927 unsigned long retval;
928
929 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
930 if (!(flags & MAP_ANONYMOUS)) {
931 file = fget(fd);
932 if (!file)
933 return -EBADF;
934 }
935
936 retval = ia32_do_mmap(file, addr, len, prot, flags,
937 (unsigned long) pgoff << IA32_PAGE_SHIFT);
938
939 if (file)
940 fput(file);
941 return retval;
942}
943
944asmlinkage long
945sys32_munmap (unsigned int start, unsigned int len)
946{
947 unsigned int end = start + len;
948 long ret;
949
950#if PAGE_SHIFT <= IA32_PAGE_SHIFT
951 ret = sys_munmap(start, end - start);
952#else
953 if (OFFSET4K(start))
954 return -EINVAL;
955
956 end = IA32_PAGE_ALIGN(end);
957 if (start >= end)
958 return -EINVAL;
959
960 ret = ia32_unset_pp(&start, &end);
961 if (ret < 0)
962 return ret;
963
964 if (start >= end)
965 return 0;
966
967 mutex_lock(&ia32_mmap_mutex);
968 ret = sys_munmap(start, end - start);
969 mutex_unlock(&ia32_mmap_mutex);
970#endif
971 return ret;
972}
973
974#if PAGE_SHIFT > IA32_PAGE_SHIFT
975
976/*
977 * When mprotect()ing a partial page, we set the permission to the union of the old
978 * settings and the new settings. In other words, it's only possible to make access to a
979 * partial page less restrictive.
980 */
981static long
982mprotect_subpage (unsigned long address, int new_prot)
983{
984 int old_prot;
985 struct vm_area_struct *vma;
986
987 if (new_prot == PROT_NONE)
988 return 0; /* optimize case where nothing changes... */
989 vma = find_vma(current->mm, address);
990 old_prot = get_page_prot(vma, address);
991 return sys_mprotect(address, PAGE_SIZE, new_prot | old_prot);
992}
993
994#endif /* PAGE_SHIFT > IA32_PAGE_SHIFT */
995
996asmlinkage long
997sys32_mprotect (unsigned int start, unsigned int len, int prot)
998{
999 unsigned int end = start + len;
1000#if PAGE_SHIFT > IA32_PAGE_SHIFT
1001 long retval = 0;
1002#endif
1003
1004 prot = get_prot32(prot);
1005
1006#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1007 return sys_mprotect(start, end - start, prot);
1008#else
1009 if (OFFSET4K(start))
1010 return -EINVAL;
1011
1012 end = IA32_PAGE_ALIGN(end);
1013 if (end < start)
1014 return -EINVAL;
1015
1016 retval = ia32_compare_pp(&start, &end);
1017
1018 if (retval < 0)
1019 return retval;
1020
1021 mutex_lock(&ia32_mmap_mutex);
1022 {
1023 if (offset_in_page(start)) {
1024 /* start address is 4KB aligned but not page aligned. */
1025 retval = mprotect_subpage(PAGE_START(start), prot);
1026 if (retval < 0)
1027 goto out;
1028
1029 start = PAGE_ALIGN(start);
1030 if (start >= end)
1031 goto out; /* retval is already zero... */
1032 }
1033
1034 if (offset_in_page(end)) {
1035 /* end address is 4KB aligned but not page aligned. */
1036 retval = mprotect_subpage(PAGE_START(end), prot);
1037 if (retval < 0)
1038 goto out;
1039
1040 end = PAGE_START(end);
1041 }
1042 retval = sys_mprotect(start, end - start, prot);
1043 }
1044 out:
1045 mutex_unlock(&ia32_mmap_mutex);
1046 return retval;
1047#endif
1048}
1049
1050asmlinkage long
1051sys32_mremap (unsigned int addr, unsigned int old_len, unsigned int new_len,
1052 unsigned int flags, unsigned int new_addr)
1053{
1054 long ret;
1055
1056#if PAGE_SHIFT <= IA32_PAGE_SHIFT
1057 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1058#else
1059 unsigned int old_end, new_end;
1060
1061 if (OFFSET4K(addr))
1062 return -EINVAL;
1063
1064 old_len = IA32_PAGE_ALIGN(old_len);
1065 new_len = IA32_PAGE_ALIGN(new_len);
1066 old_end = addr + old_len;
1067 new_end = addr + new_len;
1068
1069 if (!new_len)
1070 return -EINVAL;
1071
1072 if ((flags & MREMAP_FIXED) && (OFFSET4K(new_addr)))
1073 return -EINVAL;
1074
1075 if (old_len >= new_len) {
1076 ret = sys32_munmap(addr + new_len, old_len - new_len);
1077 if (ret && old_len != new_len)
1078 return ret;
1079 ret = addr;
1080 if (!(flags & MREMAP_FIXED) || (new_addr == addr))
1081 return ret;
1082 old_len = new_len;
1083 }
1084
1085 addr = PAGE_START(addr);
1086 old_len = PAGE_ALIGN(old_end) - addr;
1087 new_len = PAGE_ALIGN(new_end) - addr;
1088
1089 mutex_lock(&ia32_mmap_mutex);
1090 ret = sys_mremap(addr, old_len, new_len, flags, new_addr);
1091 mutex_unlock(&ia32_mmap_mutex);
1092
1093 if ((ret >= 0) && (old_len < new_len)) {
1094 /* mremap expanded successfully */
1095 ia32_set_pp(old_end, new_end, flags);
1096 }
1097#endif
1098 return ret;
1099}
1100
1101asmlinkage unsigned long
1102sys32_alarm (unsigned int seconds)
1103{
1104 return alarm_setitimer(seconds);
1105}
1106
1107struct sel_arg_struct {
1108 unsigned int n;
1109 unsigned int inp;
1110 unsigned int outp;
1111 unsigned int exp;
1112 unsigned int tvp;
1113};
1114
1115asmlinkage long
1116sys32_old_select (struct sel_arg_struct __user *arg)
1117{
1118 struct sel_arg_struct a;
1119
1120 if (copy_from_user(&a, arg, sizeof(a)))
1121 return -EFAULT;
1122 return compat_sys_select(a.n, compat_ptr(a.inp), compat_ptr(a.outp),
1123 compat_ptr(a.exp), compat_ptr(a.tvp));
1124}
1125
1126#define SEMOP 1
1127#define SEMGET 2
1128#define SEMCTL 3
1129#define SEMTIMEDOP 4
1130#define MSGSND 11
1131#define MSGRCV 12
1132#define MSGGET 13
1133#define MSGCTL 14
1134#define SHMAT 21
1135#define SHMDT 22
1136#define SHMGET 23
1137#define SHMCTL 24
1138
1139asmlinkage long
1140sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
1141{
1142 int version;
1143
1144 version = call >> 16; /* hack for backward compatibility */
1145 call &= 0xffff;
1146
1147 switch (call) {
1148 case SEMTIMEDOP:
1149 if (fifth)
1150 return compat_sys_semtimedop(first, compat_ptr(ptr),
1151 second, compat_ptr(fifth));
1152 /* else fall through for normal semop() */
1153 case SEMOP:
1154 /* struct sembuf is the same on 32 and 64bit :)) */
1155 return sys_semtimedop(first, compat_ptr(ptr), second,
1156 NULL);
1157 case SEMGET:
1158 return sys_semget(first, second, third);
1159 case SEMCTL:
1160 return compat_sys_semctl(first, second, third, compat_ptr(ptr));
1161
1162 case MSGSND:
1163 return compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
1164 case MSGRCV:
1165 return compat_sys_msgrcv(first, second, fifth, third, version, compat_ptr(ptr));
1166 case MSGGET:
1167 return sys_msgget((key_t) first, second);
1168 case MSGCTL:
1169 return compat_sys_msgctl(first, second, compat_ptr(ptr));
1170
1171 case SHMAT:
1172 return compat_sys_shmat(first, second, third, version, compat_ptr(ptr));
1173 break;
1174 case SHMDT:
1175 return sys_shmdt(compat_ptr(ptr));
1176 case SHMGET:
1177 return sys_shmget(first, (unsigned)second, third);
1178 case SHMCTL:
1179 return compat_sys_shmctl(first, second, compat_ptr(ptr));
1180
1181 default:
1182 return -ENOSYS;
1183 }
1184 return -EINVAL;
1185}
1186
1187asmlinkage long
1188compat_sys_wait4 (compat_pid_t pid, compat_uint_t * stat_addr, int options,
1189 struct compat_rusage *ru);
1190
1191asmlinkage long
1192sys32_waitpid (int pid, unsigned int *stat_addr, int options)
1193{
1194 return compat_sys_wait4(pid, stat_addr, options, NULL);
1195}
1196
1197/*
1198 * The order in which registers are stored in the ptrace regs structure
1199 */
1200#define PT_EBX 0
1201#define PT_ECX 1
1202#define PT_EDX 2
1203#define PT_ESI 3
1204#define PT_EDI 4
1205#define PT_EBP 5
1206#define PT_EAX 6
1207#define PT_DS 7
1208#define PT_ES 8
1209#define PT_FS 9
1210#define PT_GS 10
1211#define PT_ORIG_EAX 11
1212#define PT_EIP 12
1213#define PT_CS 13
1214#define PT_EFL 14
1215#define PT_UESP 15
1216#define PT_SS 16
1217
1218static unsigned int
1219getreg (struct task_struct *child, int regno)
1220{
1221 struct pt_regs *child_regs;
1222
1223 child_regs = task_pt_regs(child);
1224 switch (regno / sizeof(int)) {
1225 case PT_EBX: return child_regs->r11;
1226 case PT_ECX: return child_regs->r9;
1227 case PT_EDX: return child_regs->r10;
1228 case PT_ESI: return child_regs->r14;
1229 case PT_EDI: return child_regs->r15;
1230 case PT_EBP: return child_regs->r13;
1231 case PT_EAX: return child_regs->r8;
1232 case PT_ORIG_EAX: return child_regs->r1; /* see dispatch_to_ia32_handler() */
1233 case PT_EIP: return child_regs->cr_iip;
1234 case PT_UESP: return child_regs->r12;
1235 case PT_EFL: return child->thread.eflag;
1236 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1237 return __USER_DS;
1238 case PT_CS: return __USER_CS;
1239 default:
1240 printk(KERN_ERR "ia32.getreg(): unknown register %d\n", regno);
1241 break;
1242 }
1243 return 0;
1244}
1245
1246static void
1247putreg (struct task_struct *child, int regno, unsigned int value)
1248{
1249 struct pt_regs *child_regs;
1250
1251 child_regs = task_pt_regs(child);
1252 switch (regno / sizeof(int)) {
1253 case PT_EBX: child_regs->r11 = value; break;
1254 case PT_ECX: child_regs->r9 = value; break;
1255 case PT_EDX: child_regs->r10 = value; break;
1256 case PT_ESI: child_regs->r14 = value; break;
1257 case PT_EDI: child_regs->r15 = value; break;
1258 case PT_EBP: child_regs->r13 = value; break;
1259 case PT_EAX: child_regs->r8 = value; break;
1260 case PT_ORIG_EAX: child_regs->r1 = value; break;
1261 case PT_EIP: child_regs->cr_iip = value; break;
1262 case PT_UESP: child_regs->r12 = value; break;
1263 case PT_EFL: child->thread.eflag = value; break;
1264 case PT_DS: case PT_ES: case PT_FS: case PT_GS: case PT_SS:
1265 if (value != __USER_DS)
1266 printk(KERN_ERR
1267 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1268 regno, value);
1269 break;
1270 case PT_CS:
1271 if (value != __USER_CS)
1272 printk(KERN_ERR
1273 "ia32.putreg: attempt to set invalid segment register %d = %x\n",
1274 regno, value);
1275 break;
1276 default:
1277 printk(KERN_ERR "ia32.putreg: unknown register %d\n", regno);
1278 break;
1279 }
1280}
1281
1282static void
1283put_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1284 struct switch_stack *swp, int tos)
1285{
1286 struct _fpreg_ia32 *f;
1287 char buf[32];
1288
1289 f = (struct _fpreg_ia32 *)(((unsigned long)buf + 15) & ~15);
1290 if ((regno += tos) >= 8)
1291 regno -= 8;
1292 switch (regno) {
1293 case 0:
1294 ia64f2ia32f(f, &ptp->f8);
1295 break;
1296 case 1:
1297 ia64f2ia32f(f, &ptp->f9);
1298 break;
1299 case 2:
1300 ia64f2ia32f(f, &ptp->f10);
1301 break;
1302 case 3:
1303 ia64f2ia32f(f, &ptp->f11);
1304 break;
1305 case 4:
1306 case 5:
1307 case 6:
1308 case 7:
1309 ia64f2ia32f(f, &swp->f12 + (regno - 4));
1310 break;
1311 }
1312 copy_to_user(reg, f, sizeof(*reg));
1313}
1314
1315static void
1316get_fpreg (int regno, struct _fpreg_ia32 __user *reg, struct pt_regs *ptp,
1317 struct switch_stack *swp, int tos)
1318{
1319
1320 if ((regno += tos) >= 8)
1321 regno -= 8;
1322 switch (regno) {
1323 case 0:
1324 copy_from_user(&ptp->f8, reg, sizeof(*reg));
1325 break;
1326 case 1:
1327 copy_from_user(&ptp->f9, reg, sizeof(*reg));
1328 break;
1329 case 2:
1330 copy_from_user(&ptp->f10, reg, sizeof(*reg));
1331 break;
1332 case 3:
1333 copy_from_user(&ptp->f11, reg, sizeof(*reg));
1334 break;
1335 case 4:
1336 case 5:
1337 case 6:
1338 case 7:
1339 copy_from_user(&swp->f12 + (regno - 4), reg, sizeof(*reg));
1340 break;
1341 }
1342 return;
1343}
1344
1345int
1346save_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1347{
1348 struct switch_stack *swp;
1349 struct pt_regs *ptp;
1350 int i, tos;
1351
1352 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1353 return -EFAULT;
1354
1355 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1356 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1357 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1358 __put_user(tsk->thread.fir, &save->fip);
1359 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1360 __put_user(tsk->thread.fdr, &save->foo);
1361 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1362
1363 /*
1364 * Stack frames start with 16-bytes of temp space
1365 */
1366 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1367 ptp = task_pt_regs(tsk);
1368 tos = (tsk->thread.fsr >> 11) & 7;
1369 for (i = 0; i < 8; i++)
1370 put_fpreg(i, &save->st_space[i], ptp, swp, tos);
1371 return 0;
1372}
1373
1374static int
1375restore_ia32_fpstate (struct task_struct *tsk, struct ia32_user_i387_struct __user *save)
1376{
1377 struct switch_stack *swp;
1378 struct pt_regs *ptp;
1379 int i, tos;
1380 unsigned int fsrlo, fsrhi, num32;
1381
1382 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1383 return(-EFAULT);
1384
1385 __get_user(num32, (unsigned int __user *)&save->cwd);
1386 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1387 __get_user(fsrlo, (unsigned int __user *)&save->swd);
1388 __get_user(fsrhi, (unsigned int __user *)&save->twd);
1389 num32 = (fsrhi << 16) | fsrlo;
1390 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1391 __get_user(num32, (unsigned int __user *)&save->fip);
1392 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1393 __get_user(num32, (unsigned int __user *)&save->foo);
1394 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1395
1396 /*
1397 * Stack frames start with 16-bytes of temp space
1398 */
1399 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1400 ptp = task_pt_regs(tsk);
1401 tos = (tsk->thread.fsr >> 11) & 7;
1402 for (i = 0; i < 8; i++)
1403 get_fpreg(i, &save->st_space[i], ptp, swp, tos);
1404 return 0;
1405}
1406
1407int
1408save_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1409{
1410 struct switch_stack *swp;
1411 struct pt_regs *ptp;
1412 int i, tos;
1413 unsigned long mxcsr=0;
1414 unsigned long num128[2];
1415
1416 if (!access_ok(VERIFY_WRITE, save, sizeof(*save)))
1417 return -EFAULT;
1418
1419 __put_user(tsk->thread.fcr & 0xffff, &save->cwd);
1420 __put_user(tsk->thread.fsr & 0xffff, &save->swd);
1421 __put_user((tsk->thread.fsr>>16) & 0xffff, &save->twd);
1422 __put_user(tsk->thread.fir, &save->fip);
1423 __put_user((tsk->thread.fir>>32) & 0xffff, &save->fcs);
1424 __put_user(tsk->thread.fdr, &save->foo);
1425 __put_user((tsk->thread.fdr>>32) & 0xffff, &save->fos);
1426
1427 /*
1428 * Stack frames start with 16-bytes of temp space
1429 */
1430 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1431 ptp = task_pt_regs(tsk);
1432 tos = (tsk->thread.fsr >> 11) & 7;
1433 for (i = 0; i < 8; i++)
1434 put_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1435
1436 mxcsr = ((tsk->thread.fcr>>32) & 0xff80) | ((tsk->thread.fsr>>32) & 0x3f);
1437 __put_user(mxcsr & 0xffff, &save->mxcsr);
1438 for (i = 0; i < 8; i++) {
1439 memcpy(&(num128[0]), &(swp->f16) + i*2, sizeof(unsigned long));
1440 memcpy(&(num128[1]), &(swp->f17) + i*2, sizeof(unsigned long));
1441 copy_to_user(&save->xmm_space[0] + 4*i, num128, sizeof(struct _xmmreg_ia32));
1442 }
1443 return 0;
1444}
1445
1446static int
1447restore_ia32_fpxstate (struct task_struct *tsk, struct ia32_user_fxsr_struct __user *save)
1448{
1449 struct switch_stack *swp;
1450 struct pt_regs *ptp;
1451 int i, tos;
1452 unsigned int fsrlo, fsrhi, num32;
1453 int mxcsr;
1454 unsigned long num64;
1455 unsigned long num128[2];
1456
1457 if (!access_ok(VERIFY_READ, save, sizeof(*save)))
1458 return(-EFAULT);
1459
1460 __get_user(num32, (unsigned int __user *)&save->cwd);
1461 tsk->thread.fcr = (tsk->thread.fcr & (~0x1f3f)) | (num32 & 0x1f3f);
1462 __get_user(fsrlo, (unsigned int __user *)&save->swd);
1463 __get_user(fsrhi, (unsigned int __user *)&save->twd);
1464 num32 = (fsrhi << 16) | fsrlo;
1465 tsk->thread.fsr = (tsk->thread.fsr & (~0xffffffff)) | num32;
1466 __get_user(num32, (unsigned int __user *)&save->fip);
1467 tsk->thread.fir = (tsk->thread.fir & (~0xffffffff)) | num32;
1468 __get_user(num32, (unsigned int __user *)&save->foo);
1469 tsk->thread.fdr = (tsk->thread.fdr & (~0xffffffff)) | num32;
1470
1471 /*
1472 * Stack frames start with 16-bytes of temp space
1473 */
1474 swp = (struct switch_stack *)(tsk->thread.ksp + 16);
1475 ptp = task_pt_regs(tsk);
1476 tos = (tsk->thread.fsr >> 11) & 7;
1477 for (i = 0; i < 8; i++)
1478 get_fpreg(i, (struct _fpreg_ia32 __user *)&save->st_space[4*i], ptp, swp, tos);
1479
1480 __get_user(mxcsr, (unsigned int __user *)&save->mxcsr);
1481 num64 = mxcsr & 0xff10;
1482 tsk->thread.fcr = (tsk->thread.fcr & (~0xff1000000000UL)) | (num64<<32);
1483 num64 = mxcsr & 0x3f;
1484 tsk->thread.fsr = (tsk->thread.fsr & (~0x3f00000000UL)) | (num64<<32);
1485
1486 for (i = 0; i < 8; i++) {
1487 copy_from_user(num128, &save->xmm_space[0] + 4*i, sizeof(struct _xmmreg_ia32));
1488 memcpy(&(swp->f16) + i*2, &(num128[0]), sizeof(unsigned long));
1489 memcpy(&(swp->f17) + i*2, &(num128[1]), sizeof(unsigned long));
1490 }
1491 return 0;
1492}
1493
1494long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
1495 compat_ulong_t caddr, compat_ulong_t cdata)
1496{
1497 unsigned long addr = caddr;
1498 unsigned long data = cdata;
1499 unsigned int tmp;
1500 long i, ret;
1501
1502 switch (request) {
1503 case PTRACE_PEEKUSR: /* read word at addr in USER area */
1504 ret = -EIO;
1505 if ((addr & 3) || addr > 17*sizeof(int))
1506 break;
1507
1508 tmp = getreg(child, addr);
1509 if (!put_user(tmp, (unsigned int __user *) compat_ptr(data)))
1510 ret = 0;
1511 break;
1512
1513 case PTRACE_POKEUSR: /* write word at addr in USER area */
1514 ret = -EIO;
1515 if ((addr & 3) || addr > 17*sizeof(int))
1516 break;
1517
1518 putreg(child, addr, data);
1519 ret = 0;
1520 break;
1521
1522 case IA32_PTRACE_GETREGS:
1523 if (!access_ok(VERIFY_WRITE, compat_ptr(data), 17*sizeof(int))) {
1524 ret = -EIO;
1525 break;
1526 }
1527 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1528 put_user(getreg(child, i), (unsigned int __user *) compat_ptr(data));
1529 data += sizeof(int);
1530 }
1531 ret = 0;
1532 break;
1533
1534 case IA32_PTRACE_SETREGS:
1535 if (!access_ok(VERIFY_READ, compat_ptr(data), 17*sizeof(int))) {
1536 ret = -EIO;
1537 break;
1538 }
1539 for (i = 0; i < (int) (17*sizeof(int)); i += sizeof(int) ) {
1540 get_user(tmp, (unsigned int __user *) compat_ptr(data));
1541 putreg(child, i, tmp);
1542 data += sizeof(int);
1543 }
1544 ret = 0;
1545 break;
1546
1547 case IA32_PTRACE_GETFPREGS:
1548 ret = save_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1549 compat_ptr(data));
1550 break;
1551
1552 case IA32_PTRACE_GETFPXREGS:
1553 ret = save_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1554 compat_ptr(data));
1555 break;
1556
1557 case IA32_PTRACE_SETFPREGS:
1558 ret = restore_ia32_fpstate(child, (struct ia32_user_i387_struct __user *)
1559 compat_ptr(data));
1560 break;
1561
1562 case IA32_PTRACE_SETFPXREGS:
1563 ret = restore_ia32_fpxstate(child, (struct ia32_user_fxsr_struct __user *)
1564 compat_ptr(data));
1565 break;
1566
1567 default:
1568 return compat_ptrace_request(child, request, caddr, cdata);
1569 }
1570 return ret;
1571}
1572
1573typedef struct {
1574 unsigned int ss_sp;
1575 unsigned int ss_flags;
1576 unsigned int ss_size;
1577} ia32_stack_t;
1578
1579asmlinkage long
1580sys32_sigaltstack (ia32_stack_t __user *uss32, ia32_stack_t __user *uoss32,
1581 long arg2, long arg3, long arg4, long arg5, long arg6,
1582 long arg7, struct pt_regs pt)
1583{
1584 stack_t uss, uoss;
1585 ia32_stack_t buf32;
1586 int ret;
1587 mm_segment_t old_fs = get_fs();
1588
1589 if (uss32) {
1590 if (copy_from_user(&buf32, uss32, sizeof(ia32_stack_t)))
1591 return -EFAULT;
1592 uss.ss_sp = (void __user *) (long) buf32.ss_sp;
1593 uss.ss_flags = buf32.ss_flags;
1594 /* MINSIGSTKSZ is different for ia32 vs ia64. We lie here to pass the
1595 check and set it to the user requested value later */
1596 if ((buf32.ss_flags != SS_DISABLE) && (buf32.ss_size < MINSIGSTKSZ_IA32)) {
1597 ret = -ENOMEM;
1598 goto out;
1599 }
1600 uss.ss_size = MINSIGSTKSZ;
1601 }
1602 set_fs(KERNEL_DS);
1603 ret = do_sigaltstack(uss32 ? (stack_t __user *) &uss : NULL,
1604 (stack_t __user *) &uoss, pt.r12);
1605 current->sas_ss_size = buf32.ss_size;
1606 set_fs(old_fs);
1607out:
1608 if (ret < 0)
1609 return(ret);
1610 if (uoss32) {
1611 buf32.ss_sp = (long __user) uoss.ss_sp;
1612 buf32.ss_flags = uoss.ss_flags;
1613 buf32.ss_size = uoss.ss_size;
1614 if (copy_to_user(uoss32, &buf32, sizeof(ia32_stack_t)))
1615 return -EFAULT;
1616 }
1617 return ret;
1618}
1619
1620asmlinkage int
1621sys32_msync (unsigned int start, unsigned int len, int flags)
1622{
1623 unsigned int addr;
1624
1625 if (OFFSET4K(start))
1626 return -EINVAL;
1627 addr = PAGE_START(start);
1628 return sys_msync(addr, len + (start - addr), flags);
1629}
1630
1631struct sysctl32 {
1632 unsigned int name;
1633 int nlen;
1634 unsigned int oldval;
1635 unsigned int oldlenp;
1636 unsigned int newval;
1637 unsigned int newlen;
1638 unsigned int __unused[4];
1639};
1640
1641#ifdef CONFIG_SYSCTL_SYSCALL
1642asmlinkage long
1643sys32_sysctl (struct sysctl32 __user *args)
1644{
1645 struct sysctl32 a32;
1646 mm_segment_t old_fs = get_fs ();
1647 void __user *oldvalp, *newvalp;
1648 size_t oldlen;
1649 int __user *namep;
1650 long ret;
1651
1652 if (copy_from_user(&a32, args, sizeof(a32)))
1653 return -EFAULT;
1654
1655 /*
1656 * We need to pre-validate these because we have to disable address checking
1657 * before calling do_sysctl() because of OLDLEN but we can't run the risk of the
1658 * user specifying bad addresses here. Well, since we're dealing with 32 bit
1659 * addresses, we KNOW that access_ok() will always succeed, so this is an
1660 * expensive NOP, but so what...
1661 */
1662 namep = (int __user *) compat_ptr(a32.name);
1663 oldvalp = compat_ptr(a32.oldval);
1664 newvalp = compat_ptr(a32.newval);
1665
1666 if ((oldvalp && get_user(oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1667 || !access_ok(VERIFY_WRITE, namep, 0)
1668 || !access_ok(VERIFY_WRITE, oldvalp, 0)
1669 || !access_ok(VERIFY_WRITE, newvalp, 0))
1670 return -EFAULT;
1671
1672 set_fs(KERNEL_DS);
1673 lock_kernel();
1674 ret = do_sysctl(namep, a32.nlen, oldvalp, (size_t __user *) &oldlen,
1675 newvalp, (size_t) a32.newlen);
1676 unlock_kernel();
1677 set_fs(old_fs);
1678
1679 if (oldvalp && put_user (oldlen, (int __user *) compat_ptr(a32.oldlenp)))
1680 return -EFAULT;
1681
1682 return ret;
1683}
1684#endif
1685
1686asmlinkage long
1687sys32_newuname (struct new_utsname __user *name)
1688{
1689 int ret = sys_newuname(name);
1690
1691 if (!ret)
1692 if (copy_to_user(name->machine, "i686\0\0\0", 8))
1693 ret = -EFAULT;
1694 return ret;
1695}
1696
1697asmlinkage long
1698sys32_getresuid16 (u16 __user *ruid, u16 __user *euid, u16 __user *suid)
1699{
1700 uid_t a, b, c;
1701 int ret;
1702 mm_segment_t old_fs = get_fs();
1703
1704 set_fs(KERNEL_DS);
1705 ret = sys_getresuid((uid_t __user *) &a, (uid_t __user *) &b, (uid_t __user *) &c);
1706 set_fs(old_fs);
1707
1708 if (put_user(a, ruid) || put_user(b, euid) || put_user(c, suid))
1709 return -EFAULT;
1710 return ret;
1711}
1712
1713asmlinkage long
1714sys32_getresgid16 (u16 __user *rgid, u16 __user *egid, u16 __user *sgid)
1715{
1716 gid_t a, b, c;
1717 int ret;
1718 mm_segment_t old_fs = get_fs();
1719
1720 set_fs(KERNEL_DS);
1721 ret = sys_getresgid((gid_t __user *) &a, (gid_t __user *) &b, (gid_t __user *) &c);
1722 set_fs(old_fs);
1723
1724 if (ret)
1725 return ret;
1726
1727 return put_user(a, rgid) | put_user(b, egid) | put_user(c, sgid);
1728}
1729
1730asmlinkage long
1731sys32_lseek (unsigned int fd, int offset, unsigned int whence)
1732{
1733 /* Sign-extension of "offset" is important here... */
1734 return sys_lseek(fd, offset, whence);
1735}
1736
1737static int
1738groups16_to_user(short __user *grouplist, struct group_info *group_info)
1739{
1740 int i;
1741 short group;
1742
1743 for (i = 0; i < group_info->ngroups; i++) {
1744 group = (short)GROUP_AT(group_info, i);
1745 if (put_user(group, grouplist+i))
1746 return -EFAULT;
1747 }
1748
1749 return 0;
1750}
1751
1752static int
1753groups16_from_user(struct group_info *group_info, short __user *grouplist)
1754{
1755 int i;
1756 short group;
1757
1758 for (i = 0; i < group_info->ngroups; i++) {
1759 if (get_user(group, grouplist+i))
1760 return -EFAULT;
1761 GROUP_AT(group_info, i) = (gid_t)group;
1762 }
1763
1764 return 0;
1765}
1766
1767asmlinkage long
1768sys32_getgroups16 (int gidsetsize, short __user *grouplist)
1769{
1770 const struct cred *cred = current_cred();
1771 int i;
1772
1773 if (gidsetsize < 0)
1774 return -EINVAL;
1775
1776 i = cred->group_info->ngroups;
1777 if (gidsetsize) {
1778 if (i > gidsetsize) {
1779 i = -EINVAL;
1780 goto out;
1781 }
1782 if (groups16_to_user(grouplist, cred->group_info)) {
1783 i = -EFAULT;
1784 goto out;
1785 }
1786 }
1787out:
1788 return i;
1789}
1790
1791asmlinkage long
1792sys32_setgroups16 (int gidsetsize, short __user *grouplist)
1793{
1794 struct group_info *group_info;
1795 int retval;
1796
1797 if (!capable(CAP_SETGID))
1798 return -EPERM;
1799 if ((unsigned)gidsetsize > NGROUPS_MAX)
1800 return -EINVAL;
1801
1802 group_info = groups_alloc(gidsetsize);
1803 if (!group_info)
1804 return -ENOMEM;
1805 retval = groups16_from_user(group_info, grouplist);
1806 if (retval) {
1807 put_group_info(group_info);
1808 return retval;
1809 }
1810
1811 retval = set_current_groups(group_info);
1812 put_group_info(group_info);
1813
1814 return retval;
1815}
1816
1817asmlinkage long
1818sys32_truncate64 (unsigned int path, unsigned int len_lo, unsigned int len_hi)
1819{
1820 return sys_truncate(compat_ptr(path), ((unsigned long) len_hi << 32) | len_lo);
1821}
1822
1823asmlinkage long
1824sys32_ftruncate64 (int fd, unsigned int len_lo, unsigned int len_hi)
1825{
1826 return sys_ftruncate(fd, ((unsigned long) len_hi << 32) | len_lo);
1827}
1828
1829static int
1830putstat64 (struct stat64 __user *ubuf, struct kstat *kbuf)
1831{
1832 int err;
1833 u64 hdev;
1834
1835 if (clear_user(ubuf, sizeof(*ubuf)))
1836 return -EFAULT;
1837
1838 hdev = huge_encode_dev(kbuf->dev);
1839 err = __put_user(hdev, (u32 __user*)&ubuf->st_dev);
1840 err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_dev) + 1);
1841 err |= __put_user(kbuf->ino, &ubuf->__st_ino);
1842 err |= __put_user(kbuf->ino, &ubuf->st_ino_lo);
1843 err |= __put_user(kbuf->ino >> 32, &ubuf->st_ino_hi);
1844 err |= __put_user(kbuf->mode, &ubuf->st_mode);
1845 err |= __put_user(kbuf->nlink, &ubuf->st_nlink);
1846 err |= __put_user(kbuf->uid, &ubuf->st_uid);
1847 err |= __put_user(kbuf->gid, &ubuf->st_gid);
1848 hdev = huge_encode_dev(kbuf->rdev);
1849 err = __put_user(hdev, (u32 __user*)&ubuf->st_rdev);
1850 err |= __put_user(hdev >> 32, ((u32 __user*)&ubuf->st_rdev) + 1);
1851 err |= __put_user(kbuf->size, &ubuf->st_size_lo);
1852 err |= __put_user((kbuf->size >> 32), &ubuf->st_size_hi);
1853 err |= __put_user(kbuf->atime.tv_sec, &ubuf->st_atime);
1854 err |= __put_user(kbuf->atime.tv_nsec, &ubuf->st_atime_nsec);
1855 err |= __put_user(kbuf->mtime.tv_sec, &ubuf->st_mtime);
1856 err |= __put_user(kbuf->mtime.tv_nsec, &ubuf->st_mtime_nsec);
1857 err |= __put_user(kbuf->ctime.tv_sec, &ubuf->st_ctime);
1858 err |= __put_user(kbuf->ctime.tv_nsec, &ubuf->st_ctime_nsec);
1859 err |= __put_user(kbuf->blksize, &ubuf->st_blksize);
1860 err |= __put_user(kbuf->blocks, &ubuf->st_blocks);
1861 return err;
1862}
1863
1864asmlinkage long
1865sys32_stat64 (char __user *filename, struct stat64 __user *statbuf)
1866{
1867 struct kstat s;
1868 long ret = vfs_stat(filename, &s);
1869 if (!ret)
1870 ret = putstat64(statbuf, &s);
1871 return ret;
1872}
1873
1874asmlinkage long
1875sys32_lstat64 (char __user *filename, struct stat64 __user *statbuf)
1876{
1877 struct kstat s;
1878 long ret = vfs_lstat(filename, &s);
1879 if (!ret)
1880 ret = putstat64(statbuf, &s);
1881 return ret;
1882}
1883
1884asmlinkage long
1885sys32_fstat64 (unsigned int fd, struct stat64 __user *statbuf)
1886{
1887 struct kstat s;
1888 long ret = vfs_fstat(fd, &s);
1889 if (!ret)
1890 ret = putstat64(statbuf, &s);
1891 return ret;
1892}
1893
1894asmlinkage long
1895sys32_sched_rr_get_interval (pid_t pid, struct compat_timespec __user *interval)
1896{
1897 mm_segment_t old_fs = get_fs();
1898 struct timespec t;
1899 long ret;
1900
1901 set_fs(KERNEL_DS);
1902 ret = sys_sched_rr_get_interval(pid, (struct timespec __user *) &t);
1903 set_fs(old_fs);
1904 if (put_compat_timespec(&t, interval))
1905 return -EFAULT;
1906 return ret;
1907}
1908
1909asmlinkage long
1910sys32_pread (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
1911{
1912 return sys_pread64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
1913}
1914
1915asmlinkage long
1916sys32_pwrite (unsigned int fd, void __user *buf, unsigned int count, u32 pos_lo, u32 pos_hi)
1917{
1918 return sys_pwrite64(fd, buf, count, ((unsigned long) pos_hi << 32) | pos_lo);
1919}
1920
1921asmlinkage long
1922sys32_sendfile (int out_fd, int in_fd, int __user *offset, unsigned int count)
1923{
1924 mm_segment_t old_fs = get_fs();
1925 long ret;
1926 off_t of;
1927
1928 if (offset && get_user(of, offset))
1929 return -EFAULT;
1930
1931 set_fs(KERNEL_DS);
1932 ret = sys_sendfile(out_fd, in_fd, offset ? (off_t __user *) &of : NULL, count);
1933 set_fs(old_fs);
1934
1935 if (offset && put_user(of, offset))
1936 return -EFAULT;
1937
1938 return ret;
1939}
1940
1941asmlinkage long
1942sys32_personality (unsigned int personality)
1943{
1944 long ret;
1945
1946 if (current->personality == PER_LINUX32 && personality == PER_LINUX)
1947 personality = PER_LINUX32;
1948 ret = sys_personality(personality);
1949 if (ret == PER_LINUX32)
1950 ret = PER_LINUX;
1951 return ret;
1952}
1953
1954asmlinkage unsigned long
1955sys32_brk (unsigned int brk)
1956{
1957 unsigned long ret, obrk;
1958 struct mm_struct *mm = current->mm;
1959
1960 obrk = mm->brk;
1961 ret = sys_brk(brk);
1962 if (ret < obrk)
1963 clear_user(compat_ptr(ret), PAGE_ALIGN(ret) - ret);
1964 return ret;
1965}
1966
1967/* Structure for ia32 emulation on ia64 */
1968struct epoll_event32
1969{
1970 u32 events;
1971 u32 data[2];
1972};
1973
1974asmlinkage long
1975sys32_epoll_ctl(int epfd, int op, int fd, struct epoll_event32 __user *event)
1976{
1977 mm_segment_t old_fs = get_fs();
1978 struct epoll_event event64;
1979 int error;
1980 u32 data_halfword;
1981
1982 if (!access_ok(VERIFY_READ, event, sizeof(struct epoll_event32)))
1983 return -EFAULT;
1984
1985 __get_user(event64.events, &event->events);
1986 __get_user(data_halfword, &event->data[0]);
1987 event64.data = data_halfword;
1988 __get_user(data_halfword, &event->data[1]);
1989 event64.data |= (u64)data_halfword << 32;
1990
1991 set_fs(KERNEL_DS);
1992 error = sys_epoll_ctl(epfd, op, fd, (struct epoll_event __user *) &event64);
1993 set_fs(old_fs);
1994
1995 return error;
1996}
1997
1998asmlinkage long
1999sys32_epoll_wait(int epfd, struct epoll_event32 __user * events, int maxevents,
2000 int timeout)
2001{
2002 struct epoll_event *events64 = NULL;
2003 mm_segment_t old_fs = get_fs();
2004 int numevents, size;
2005 int evt_idx;
2006 int do_free_pages = 0;
2007
2008 if (maxevents <= 0) {
2009 return -EINVAL;
2010 }
2011
2012 /* Verify that the area passed by the user is writeable */
2013 if (!access_ok(VERIFY_WRITE, events, maxevents * sizeof(struct epoll_event32)))
2014 return -EFAULT;
2015
2016 /*
2017 * Allocate space for the intermediate copy. If the space needed
2018 * is large enough to cause kmalloc to fail, then try again with
2019 * __get_free_pages.
2020 */
2021 size = maxevents * sizeof(struct epoll_event);
2022 events64 = kmalloc(size, GFP_KERNEL);
2023 if (events64 == NULL) {
2024 events64 = (struct epoll_event *)
2025 __get_free_pages(GFP_KERNEL, get_order(size));
2026 if (events64 == NULL)
2027 return -ENOMEM;
2028 do_free_pages = 1;
2029 }
2030
2031 /* Do the system call */
2032 set_fs(KERNEL_DS); /* copy_to/from_user should work on kernel mem*/
2033 numevents = sys_epoll_wait(epfd, (struct epoll_event __user *) events64,
2034 maxevents, timeout);
2035 set_fs(old_fs);
2036
2037 /* Don't modify userspace memory if we're returning an error */
2038 if (numevents > 0) {
2039 /* Translate the 64-bit structures back into the 32-bit
2040 structures */
2041 for (evt_idx = 0; evt_idx < numevents; evt_idx++) {
2042 __put_user(events64[evt_idx].events,
2043 &events[evt_idx].events);
2044 __put_user((u32)events64[evt_idx].data,
2045 &events[evt_idx].data[0]);
2046 __put_user((u32)(events64[evt_idx].data >> 32),
2047 &events[evt_idx].data[1]);
2048 }
2049 }
2050
2051 if (do_free_pages)
2052 free_pages((unsigned long) events64, get_order(size));
2053 else
2054 kfree(events64);
2055 return numevents;
2056}
2057
2058/*
2059 * Get a yet unused TLS descriptor index.
2060 */
2061static int
2062get_free_idx (void)
2063{
2064 struct thread_struct *t = &current->thread;
2065 int idx;
2066
2067 for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
2068 if (desc_empty(t->tls_array + idx))
2069 return idx + GDT_ENTRY_TLS_MIN;
2070 return -ESRCH;
2071}
2072
2073static void set_tls_desc(struct task_struct *p, int idx,
2074 const struct ia32_user_desc *info, int n)
2075{
2076 struct thread_struct *t = &p->thread;
2077 struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
2078 int cpu;
2079
2080 /*
2081 * We must not get preempted while modifying the TLS.
2082 */
2083 cpu = get_cpu();
2084
2085 while (n-- > 0) {
2086 if (LDT_empty(info)) {
2087 desc->a = 0;
2088 desc->b = 0;
2089 } else {
2090 desc->a = LDT_entry_a(info);
2091 desc->b = LDT_entry_b(info);
2092 }
2093
2094 ++info;
2095 ++desc;
2096 }
2097
2098 if (t == &current->thread)
2099 load_TLS(t, cpu);
2100
2101 put_cpu();
2102}
2103
2104/*
2105 * Set a given TLS descriptor:
2106 */
2107asmlinkage int
2108sys32_set_thread_area (struct ia32_user_desc __user *u_info)
2109{
2110 struct ia32_user_desc info;
2111 int idx;
2112
2113 if (copy_from_user(&info, u_info, sizeof(info)))
2114 return -EFAULT;
2115 idx = info.entry_number;
2116
2117 /*
2118 * index -1 means the kernel should try to find and allocate an empty descriptor:
2119 */
2120 if (idx == -1) {
2121 idx = get_free_idx();
2122 if (idx < 0)
2123 return idx;
2124 if (put_user(idx, &u_info->entry_number))
2125 return -EFAULT;
2126 }
2127
2128 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2129 return -EINVAL;
2130
2131 set_tls_desc(current, idx, &info, 1);
2132 return 0;
2133}
2134
2135/*
2136 * Get the current Thread-Local Storage area:
2137 */
2138
2139#define GET_BASE(desc) ( \
2140 (((desc)->a >> 16) & 0x0000ffff) | \
2141 (((desc)->b << 16) & 0x00ff0000) | \
2142 ( (desc)->b & 0xff000000) )
2143
2144#define GET_LIMIT(desc) ( \
2145 ((desc)->a & 0x0ffff) | \
2146 ((desc)->b & 0xf0000) )
2147
2148#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
2149#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
2150#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
2151#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
2152#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
2153#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
2154
2155static void fill_user_desc(struct ia32_user_desc *info, int idx,
2156 const struct desc_struct *desc)
2157{
2158 info->entry_number = idx;
2159 info->base_addr = GET_BASE(desc);
2160 info->limit = GET_LIMIT(desc);
2161 info->seg_32bit = GET_32BIT(desc);
2162 info->contents = GET_CONTENTS(desc);
2163 info->read_exec_only = !GET_WRITABLE(desc);
2164 info->limit_in_pages = GET_LIMIT_PAGES(desc);
2165 info->seg_not_present = !GET_PRESENT(desc);
2166 info->useable = GET_USEABLE(desc);
2167}
2168
2169asmlinkage int
2170sys32_get_thread_area (struct ia32_user_desc __user *u_info)
2171{
2172 struct ia32_user_desc info;
2173 struct desc_struct *desc;
2174 int idx;
2175
2176 if (get_user(idx, &u_info->entry_number))
2177 return -EFAULT;
2178 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
2179 return -EINVAL;
2180
2181 desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
2182 fill_user_desc(&info, idx, desc);
2183
2184 if (copy_to_user(u_info, &info, sizeof(info)))
2185 return -EFAULT;
2186 return 0;
2187}
2188
2189struct regset_get {
2190 void *kbuf;
2191 void __user *ubuf;
2192};
2193
2194struct regset_set {
2195 const void *kbuf;
2196 const void __user *ubuf;
2197};
2198
2199struct regset_getset {
2200 struct task_struct *target;
2201 const struct user_regset *regset;
2202 union {
2203 struct regset_get get;
2204 struct regset_set set;
2205 } u;
2206 unsigned int pos;
2207 unsigned int count;
2208 int ret;
2209};
2210
2211static void getfpreg(struct task_struct *task, int regno, int *val)
2212{
2213 switch (regno / sizeof(int)) {
2214 case 0:
2215 *val = task->thread.fcr & 0xffff;
2216 break;
2217 case 1:
2218 *val = task->thread.fsr & 0xffff;
2219 break;
2220 case 2:
2221 *val = (task->thread.fsr>>16) & 0xffff;
2222 break;
2223 case 3:
2224 *val = task->thread.fir;
2225 break;
2226 case 4:
2227 *val = (task->thread.fir>>32) & 0xffff;
2228 break;
2229 case 5:
2230 *val = task->thread.fdr;
2231 break;
2232 case 6:
2233 *val = (task->thread.fdr >> 32) & 0xffff;
2234 break;
2235 }
2236}
2237
2238static void setfpreg(struct task_struct *task, int regno, int val)
2239{
2240 switch (regno / sizeof(int)) {
2241 case 0:
2242 task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2243 | (val & 0x1f3f);
2244 break;
2245 case 1:
2246 task->thread.fsr = (task->thread.fsr & (~0xffff)) | val;
2247 break;
2248 case 2:
2249 task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2250 | (val << 16);
2251 break;
2252 case 3:
2253 task->thread.fir = (task->thread.fir & (~0xffffffff)) | val;
2254 break;
2255 case 5:
2256 task->thread.fdr = (task->thread.fdr & (~0xffffffff)) | val;
2257 break;
2258 }
2259}
2260
2261static void access_fpreg_ia32(int regno, void *reg,
2262 struct pt_regs *pt, struct switch_stack *sw,
2263 int tos, int write)
2264{
2265 void *f;
2266
2267 if ((regno += tos) >= 8)
2268 regno -= 8;
2269 if (regno < 4)
2270 f = &pt->f8 + regno;
2271 else if (regno <= 7)
2272 f = &sw->f12 + (regno - 4);
2273 else {
2274 printk(KERN_ERR "regno must be less than 7 \n");
2275 return;
2276 }
2277
2278 if (write)
2279 memcpy(f, reg, sizeof(struct _fpreg_ia32));
2280 else
2281 memcpy(reg, f, sizeof(struct _fpreg_ia32));
2282}
2283
2284static void do_fpregs_get(struct unw_frame_info *info, void *arg)
2285{
2286 struct regset_getset *dst = arg;
2287 struct task_struct *task = dst->target;
2288 struct pt_regs *pt;
2289 int start, end, tos;
2290 char buf[80];
2291
2292 if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2293 return;
2294 if (dst->pos < 7 * sizeof(int)) {
2295 end = min((dst->pos + dst->count),
2296 (unsigned int)(7 * sizeof(int)));
2297 for (start = dst->pos; start < end; start += sizeof(int))
2298 getfpreg(task, start, (int *)(buf + start));
2299 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2300 &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2301 0, 7 * sizeof(int));
2302 if (dst->ret || dst->count == 0)
2303 return;
2304 }
2305 if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2306 pt = task_pt_regs(task);
2307 tos = (task->thread.fsr >> 11) & 7;
2308 end = min(dst->pos + dst->count,
2309 (unsigned int)(sizeof(struct ia32_user_i387_struct)));
2310 start = (dst->pos - 7 * sizeof(int)) /
2311 sizeof(struct _fpreg_ia32);
2312 end = (end - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2313 for (; start < end; start++)
2314 access_fpreg_ia32(start,
2315 (struct _fpreg_ia32 *)buf + start,
2316 pt, info->sw, tos, 0);
2317 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2318 &dst->u.get.kbuf, &dst->u.get.ubuf,
2319 buf, 7 * sizeof(int),
2320 sizeof(struct ia32_user_i387_struct));
2321 if (dst->ret || dst->count == 0)
2322 return;
2323 }
2324}
2325
2326static void do_fpregs_set(struct unw_frame_info *info, void *arg)
2327{
2328 struct regset_getset *dst = arg;
2329 struct task_struct *task = dst->target;
2330 struct pt_regs *pt;
2331 char buf[80];
2332 int end, start, tos;
2333
2334 if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2335 return;
2336
2337 if (dst->pos < 7 * sizeof(int)) {
2338 start = dst->pos;
2339 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2340 &dst->u.set.kbuf, &dst->u.set.ubuf, buf,
2341 0, 7 * sizeof(int));
2342 if (dst->ret)
2343 return;
2344 for (; start < dst->pos; start += sizeof(int))
2345 setfpreg(task, start, *((int *)(buf + start)));
2346 if (dst->count == 0)
2347 return;
2348 }
2349 if (dst->pos < sizeof(struct ia32_user_i387_struct)) {
2350 start = (dst->pos - 7 * sizeof(int)) /
2351 sizeof(struct _fpreg_ia32);
2352 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2353 &dst->u.set.kbuf, &dst->u.set.ubuf,
2354 buf, 7 * sizeof(int),
2355 sizeof(struct ia32_user_i387_struct));
2356 if (dst->ret)
2357 return;
2358 pt = task_pt_regs(task);
2359 tos = (task->thread.fsr >> 11) & 7;
2360 end = (dst->pos - 7 * sizeof(int)) / sizeof(struct _fpreg_ia32);
2361 for (; start < end; start++)
2362 access_fpreg_ia32(start,
2363 (struct _fpreg_ia32 *)buf + start,
2364 pt, info->sw, tos, 1);
2365 if (dst->count == 0)
2366 return;
2367 }
2368}
2369
2370#define OFFSET(member) ((int)(offsetof(struct ia32_user_fxsr_struct, member)))
2371static void getfpxreg(struct task_struct *task, int start, int end, char *buf)
2372{
2373 int min_val;
2374
2375 min_val = min(end, OFFSET(fop));
2376 while (start < min_val) {
2377 if (start == OFFSET(cwd))
2378 *((short *)buf) = task->thread.fcr & 0xffff;
2379 else if (start == OFFSET(swd))
2380 *((short *)buf) = task->thread.fsr & 0xffff;
2381 else if (start == OFFSET(twd))
2382 *((short *)buf) = (task->thread.fsr>>16) & 0xffff;
2383 buf += 2;
2384 start += 2;
2385 }
2386 /* skip fop element */
2387 if (start == OFFSET(fop)) {
2388 start += 2;
2389 buf += 2;
2390 }
2391 while (start < end) {
2392 if (start == OFFSET(fip))
2393 *((int *)buf) = task->thread.fir;
2394 else if (start == OFFSET(fcs))
2395 *((int *)buf) = (task->thread.fir>>32) & 0xffff;
2396 else if (start == OFFSET(foo))
2397 *((int *)buf) = task->thread.fdr;
2398 else if (start == OFFSET(fos))
2399 *((int *)buf) = (task->thread.fdr>>32) & 0xffff;
2400 else if (start == OFFSET(mxcsr))
2401 *((int *)buf) = ((task->thread.fcr>>32) & 0xff80)
2402 | ((task->thread.fsr>>32) & 0x3f);
2403 buf += 4;
2404 start += 4;
2405 }
2406}
2407
2408static void setfpxreg(struct task_struct *task, int start, int end, char *buf)
2409{
2410 int min_val, num32;
2411 short num;
2412 unsigned long num64;
2413
2414 min_val = min(end, OFFSET(fop));
2415 while (start < min_val) {
2416 num = *((short *)buf);
2417 if (start == OFFSET(cwd)) {
2418 task->thread.fcr = (task->thread.fcr & (~0x1f3f))
2419 | (num & 0x1f3f);
2420 } else if (start == OFFSET(swd)) {
2421 task->thread.fsr = (task->thread.fsr & (~0xffff)) | num;
2422 } else if (start == OFFSET(twd)) {
2423 task->thread.fsr = (task->thread.fsr & (~0xffff0000))
2424 | (((int)num) << 16);
2425 }
2426 buf += 2;
2427 start += 2;
2428 }
2429 /* skip fop element */
2430 if (start == OFFSET(fop)) {
2431 start += 2;
2432 buf += 2;
2433 }
2434 while (start < end) {
2435 num32 = *((int *)buf);
2436 if (start == OFFSET(fip))
2437 task->thread.fir = (task->thread.fir & (~0xffffffff))
2438 | num32;
2439 else if (start == OFFSET(foo))
2440 task->thread.fdr = (task->thread.fdr & (~0xffffffff))
2441 | num32;
2442 else if (start == OFFSET(mxcsr)) {
2443 num64 = num32 & 0xff10;
2444 task->thread.fcr = (task->thread.fcr &
2445 (~0xff1000000000UL)) | (num64<<32);
2446 num64 = num32 & 0x3f;
2447 task->thread.fsr = (task->thread.fsr &
2448 (~0x3f00000000UL)) | (num64<<32);
2449 }
2450 buf += 4;
2451 start += 4;
2452 }
2453}
2454
2455static void do_fpxregs_get(struct unw_frame_info *info, void *arg)
2456{
2457 struct regset_getset *dst = arg;
2458 struct task_struct *task = dst->target;
2459 struct pt_regs *pt;
2460 char buf[128];
2461 int start, end, tos;
2462
2463 if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2464 return;
2465 if (dst->pos < OFFSET(st_space[0])) {
2466 end = min(dst->pos + dst->count, (unsigned int)32);
2467 getfpxreg(task, dst->pos, end, buf);
2468 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2469 &dst->u.get.kbuf, &dst->u.get.ubuf, buf,
2470 0, OFFSET(st_space[0]));
2471 if (dst->ret || dst->count == 0)
2472 return;
2473 }
2474 if (dst->pos < OFFSET(xmm_space[0])) {
2475 pt = task_pt_regs(task);
2476 tos = (task->thread.fsr >> 11) & 7;
2477 end = min(dst->pos + dst->count,
2478 (unsigned int)OFFSET(xmm_space[0]));
2479 start = (dst->pos - OFFSET(st_space[0])) / 16;
2480 end = (end - OFFSET(st_space[0])) / 16;
2481 for (; start < end; start++)
2482 access_fpreg_ia32(start, buf + 16 * start, pt,
2483 info->sw, tos, 0);
2484 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2485 &dst->u.get.kbuf, &dst->u.get.ubuf,
2486 buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2487 if (dst->ret || dst->count == 0)
2488 return;
2489 }
2490 if (dst->pos < OFFSET(padding[0]))
2491 dst->ret = user_regset_copyout(&dst->pos, &dst->count,
2492 &dst->u.get.kbuf, &dst->u.get.ubuf,
2493 &info->sw->f16, OFFSET(xmm_space[0]),
2494 OFFSET(padding[0]));
2495}
2496
2497static void do_fpxregs_set(struct unw_frame_info *info, void *arg)
2498{
2499 struct regset_getset *dst = arg;
2500 struct task_struct *task = dst->target;
2501 char buf[128];
2502 int start, end;
2503
2504 if (dst->count == 0 || unw_unwind_to_user(info) < 0)
2505 return;
2506
2507 if (dst->pos < OFFSET(st_space[0])) {
2508 start = dst->pos;
2509 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2510 &dst->u.set.kbuf, &dst->u.set.ubuf,
2511 buf, 0, OFFSET(st_space[0]));
2512 if (dst->ret)
2513 return;
2514 setfpxreg(task, start, dst->pos, buf);
2515 if (dst->count == 0)
2516 return;
2517 }
2518 if (dst->pos < OFFSET(xmm_space[0])) {
2519 struct pt_regs *pt;
2520 int tos;
2521 pt = task_pt_regs(task);
2522 tos = (task->thread.fsr >> 11) & 7;
2523 start = (dst->pos - OFFSET(st_space[0])) / 16;
2524 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2525 &dst->u.set.kbuf, &dst->u.set.ubuf,
2526 buf, OFFSET(st_space[0]), OFFSET(xmm_space[0]));
2527 if (dst->ret)
2528 return;
2529 end = (dst->pos - OFFSET(st_space[0])) / 16;
2530 for (; start < end; start++)
2531 access_fpreg_ia32(start, buf + 16 * start, pt, info->sw,
2532 tos, 1);
2533 if (dst->count == 0)
2534 return;
2535 }
2536 if (dst->pos < OFFSET(padding[0]))
2537 dst->ret = user_regset_copyin(&dst->pos, &dst->count,
2538 &dst->u.set.kbuf, &dst->u.set.ubuf,
2539 &info->sw->f16, OFFSET(xmm_space[0]),
2540 OFFSET(padding[0]));
2541}
2542#undef OFFSET
2543
2544static int do_regset_call(void (*call)(struct unw_frame_info *, void *),
2545 struct task_struct *target,
2546 const struct user_regset *regset,
2547 unsigned int pos, unsigned int count,
2548 const void *kbuf, const void __user *ubuf)
2549{
2550 struct regset_getset info = { .target = target, .regset = regset,
2551 .pos = pos, .count = count,
2552 .u.set = { .kbuf = kbuf, .ubuf = ubuf },
2553 .ret = 0 };
2554
2555 if (target == current)
2556 unw_init_running(call, &info);
2557 else {
2558 struct unw_frame_info ufi;
2559 memset(&ufi, 0, sizeof(ufi));
2560 unw_init_from_blocked_task(&ufi, target);
2561 (*call)(&ufi, &info);
2562 }
2563
2564 return info.ret;
2565}
2566
2567static int ia32_fpregs_get(struct task_struct *target,
2568 const struct user_regset *regset,
2569 unsigned int pos, unsigned int count,
2570 void *kbuf, void __user *ubuf)
2571{
2572 return do_regset_call(do_fpregs_get, target, regset, pos, count,
2573 kbuf, ubuf);
2574}
2575
2576static int ia32_fpregs_set(struct task_struct *target,
2577 const struct user_regset *regset,
2578 unsigned int pos, unsigned int count,
2579 const void *kbuf, const void __user *ubuf)
2580{
2581 return do_regset_call(do_fpregs_set, target, regset, pos, count,
2582 kbuf, ubuf);
2583}
2584
2585static int ia32_fpxregs_get(struct task_struct *target,
2586 const struct user_regset *regset,
2587 unsigned int pos, unsigned int count,
2588 void *kbuf, void __user *ubuf)
2589{
2590 return do_regset_call(do_fpxregs_get, target, regset, pos, count,
2591 kbuf, ubuf);
2592}
2593
2594static int ia32_fpxregs_set(struct task_struct *target,
2595 const struct user_regset *regset,
2596 unsigned int pos, unsigned int count,
2597 const void *kbuf, const void __user *ubuf)
2598{
2599 return do_regset_call(do_fpxregs_set, target, regset, pos, count,
2600 kbuf, ubuf);
2601}
2602
2603static int ia32_genregs_get(struct task_struct *target,
2604 const struct user_regset *regset,
2605 unsigned int pos, unsigned int count,
2606 void *kbuf, void __user *ubuf)
2607{
2608 if (kbuf) {
2609 u32 *kp = kbuf;
2610 while (count > 0) {
2611 *kp++ = getreg(target, pos);
2612 pos += 4;
2613 count -= 4;
2614 }
2615 } else {
2616 u32 __user *up = ubuf;
2617 while (count > 0) {
2618 if (__put_user(getreg(target, pos), up++))
2619 return -EFAULT;
2620 pos += 4;
2621 count -= 4;
2622 }
2623 }
2624 return 0;
2625}
2626
2627static int ia32_genregs_set(struct task_struct *target,
2628 const struct user_regset *regset,
2629 unsigned int pos, unsigned int count,
2630 const void *kbuf, const void __user *ubuf)
2631{
2632 int ret = 0;
2633
2634 if (kbuf) {
2635 const u32 *kp = kbuf;
2636 while (!ret && count > 0) {
2637 putreg(target, pos, *kp++);
2638 pos += 4;
2639 count -= 4;
2640 }
2641 } else {
2642 const u32 __user *up = ubuf;
2643 u32 val;
2644 while (!ret && count > 0) {
2645 ret = __get_user(val, up++);
2646 if (!ret)
2647 putreg(target, pos, val);
2648 pos += 4;
2649 count -= 4;
2650 }
2651 }
2652 return ret;
2653}
2654
2655static int ia32_tls_active(struct task_struct *target,
2656 const struct user_regset *regset)
2657{
2658 struct thread_struct *t = &target->thread;
2659 int n = GDT_ENTRY_TLS_ENTRIES;
2660 while (n > 0 && desc_empty(&t->tls_array[n -1]))
2661 --n;
2662 return n;
2663}
2664
2665static int ia32_tls_get(struct task_struct *target,
2666 const struct user_regset *regset, unsigned int pos,
2667 unsigned int count, void *kbuf, void __user *ubuf)
2668{
2669 const struct desc_struct *tls;
2670
2671 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2672 (pos % sizeof(struct ia32_user_desc)) != 0 ||
2673 (count % sizeof(struct ia32_user_desc)) != 0)
2674 return -EINVAL;
2675
2676 pos /= sizeof(struct ia32_user_desc);
2677 count /= sizeof(struct ia32_user_desc);
2678
2679 tls = &target->thread.tls_array[pos];
2680
2681 if (kbuf) {
2682 struct ia32_user_desc *info = kbuf;
2683 while (count-- > 0)
2684 fill_user_desc(info++, GDT_ENTRY_TLS_MIN + pos++,
2685 tls++);
2686 } else {
2687 struct ia32_user_desc __user *u_info = ubuf;
2688 while (count-- > 0) {
2689 struct ia32_user_desc info;
2690 fill_user_desc(&info, GDT_ENTRY_TLS_MIN + pos++, tls++);
2691 if (__copy_to_user(u_info++, &info, sizeof(info)))
2692 return -EFAULT;
2693 }
2694 }
2695
2696 return 0;
2697}
2698
2699static int ia32_tls_set(struct task_struct *target,
2700 const struct user_regset *regset, unsigned int pos,
2701 unsigned int count, const void *kbuf, const void __user *ubuf)
2702{
2703 struct ia32_user_desc infobuf[GDT_ENTRY_TLS_ENTRIES];
2704 const struct ia32_user_desc *info;
2705
2706 if (pos > GDT_ENTRY_TLS_ENTRIES * sizeof(struct ia32_user_desc) ||
2707 (pos % sizeof(struct ia32_user_desc)) != 0 ||
2708 (count % sizeof(struct ia32_user_desc)) != 0)
2709 return -EINVAL;
2710
2711 if (kbuf)
2712 info = kbuf;
2713 else if (__copy_from_user(infobuf, ubuf, count))
2714 return -EFAULT;
2715 else
2716 info = infobuf;
2717
2718 set_tls_desc(target,
2719 GDT_ENTRY_TLS_MIN + (pos / sizeof(struct ia32_user_desc)),
2720 info, count / sizeof(struct ia32_user_desc));
2721
2722 return 0;
2723}
2724
2725/*
2726 * This should match arch/i386/kernel/ptrace.c:native_regsets.
2727 * XXX ioperm? vm86?
2728 */
2729static const struct user_regset ia32_regsets[] = {
2730 {
2731 .core_note_type = NT_PRSTATUS,
2732 .n = sizeof(struct user_regs_struct32)/4,
2733 .size = 4, .align = 4,
2734 .get = ia32_genregs_get, .set = ia32_genregs_set
2735 },
2736 {
2737 .core_note_type = NT_PRFPREG,
2738 .n = sizeof(struct ia32_user_i387_struct) / 4,
2739 .size = 4, .align = 4,
2740 .get = ia32_fpregs_get, .set = ia32_fpregs_set
2741 },
2742 {
2743 .core_note_type = NT_PRXFPREG,
2744 .n = sizeof(struct ia32_user_fxsr_struct) / 4,
2745 .size = 4, .align = 4,
2746 .get = ia32_fpxregs_get, .set = ia32_fpxregs_set
2747 },
2748 {
2749 .core_note_type = NT_386_TLS,
2750 .n = GDT_ENTRY_TLS_ENTRIES,
2751 .bias = GDT_ENTRY_TLS_MIN,
2752 .size = sizeof(struct ia32_user_desc),
2753 .align = sizeof(struct ia32_user_desc),
2754 .active = ia32_tls_active,
2755 .get = ia32_tls_get, .set = ia32_tls_set,
2756 },
2757};
2758
2759const struct user_regset_view user_ia32_view = {
2760 .name = "i386", .e_machine = EM_386,
2761 .regsets = ia32_regsets, .n = ARRAY_SIZE(ia32_regsets)
2762};
2763
2764long sys32_fadvise64_64(int fd, __u32 offset_low, __u32 offset_high,
2765 __u32 len_low, __u32 len_high, int advice)
2766{
2767 return sys_fadvise64_64(fd,
2768 (((u64)offset_high)<<32) | offset_low,
2769 (((u64)len_high)<<32) | len_low,
2770 advice);
2771}
2772
2773#ifdef NOTYET /* UNTESTED FOR IA64 FROM HERE DOWN */
2774
2775asmlinkage long sys32_setreuid(compat_uid_t ruid, compat_uid_t euid)
2776{
2777 uid_t sruid, seuid;
2778
2779 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2780 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2781 return sys_setreuid(sruid, seuid);
2782}
2783
2784asmlinkage long
2785sys32_setresuid(compat_uid_t ruid, compat_uid_t euid,
2786 compat_uid_t suid)
2787{
2788 uid_t sruid, seuid, ssuid;
2789
2790 sruid = (ruid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)ruid);
2791 seuid = (euid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)euid);
2792 ssuid = (suid == (compat_uid_t)-1) ? ((uid_t)-1) : ((uid_t)suid);
2793 return sys_setresuid(sruid, seuid, ssuid);
2794}
2795
2796asmlinkage long
2797sys32_setregid(compat_gid_t rgid, compat_gid_t egid)
2798{
2799 gid_t srgid, segid;
2800
2801 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2802 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2803 return sys_setregid(srgid, segid);
2804}
2805
2806asmlinkage long
2807sys32_setresgid(compat_gid_t rgid, compat_gid_t egid,
2808 compat_gid_t sgid)
2809{
2810 gid_t srgid, segid, ssgid;
2811
2812 srgid = (rgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)rgid);
2813 segid = (egid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)egid);
2814 ssgid = (sgid == (compat_gid_t)-1) ? ((gid_t)-1) : ((gid_t)sgid);
2815 return sys_setresgid(srgid, segid, ssgid);
2816}
2817#endif /* NOTYET */
diff --git a/arch/ia64/include/asm/acpi.h b/arch/ia64/include/asm/acpi.h
index 91df9686a0da..21adbd7f90f8 100644
--- a/arch/ia64/include/asm/acpi.h
+++ b/arch/ia64/include/asm/acpi.h
@@ -94,11 +94,38 @@ ia64_acpi_release_global_lock (unsigned int *lock)
94#define acpi_noirq 0 /* ACPI always enabled on IA64 */ 94#define acpi_noirq 0 /* ACPI always enabled on IA64 */
95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */ 95#define acpi_pci_disabled 0 /* ACPI PCI always enabled on IA64 */
96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */ 96#define acpi_strict 1 /* no ACPI spec workarounds on IA64 */
97#define acpi_ht 0 /* no HT-only mode on IA64 */
97#endif 98#endif
98#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */ 99#define acpi_processor_cstate_check(x) (x) /* no idle limits on IA64 :) */
99static inline void disable_acpi(void) { } 100static inline void disable_acpi(void) { }
101static inline void pci_acpi_crs_quirks(void) { }
100 102
103#ifdef CONFIG_IA64_GENERIC
101const char *acpi_get_sysname (void); 104const char *acpi_get_sysname (void);
105#else
106static inline const char *acpi_get_sysname (void)
107{
108# if defined (CONFIG_IA64_HP_SIM)
109 return "hpsim";
110# elif defined (CONFIG_IA64_HP_ZX1)
111 return "hpzx1";
112# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
113 return "hpzx1_swiotlb";
114# elif defined (CONFIG_IA64_SGI_SN2)
115 return "sn2";
116# elif defined (CONFIG_IA64_SGI_UV)
117 return "uv";
118# elif defined (CONFIG_IA64_DIG)
119 return "dig";
120# elif defined (CONFIG_IA64_XEN_GUEST)
121 return "xen";
122# elif defined(CONFIG_IA64_DIG_VTD)
123 return "dig_vtd";
124# else
125# error Unknown platform. Fix acpi.c.
126# endif
127}
128#endif
102int acpi_request_vector (u32 int_type); 129int acpi_request_vector (u32 int_type);
103int acpi_gsi_to_irq (u32 gsi, unsigned int *irq); 130int acpi_gsi_to_irq (u32 gsi, unsigned int *irq);
104 131
@@ -132,6 +159,12 @@ extern int __devinitdata pxm_to_nid_map[MAX_PXM_DOMAINS];
132extern int __initdata nid_to_pxm_map[MAX_NUMNODES]; 159extern int __initdata nid_to_pxm_map[MAX_NUMNODES];
133#endif 160#endif
134 161
162static inline bool arch_has_acpi_pdc(void) { return true; }
163static inline void arch_acpi_set_pdc_bits(u32 *buf)
164{
165 buf[2] |= ACPI_PDC_EST_CAPABILITY_SMP;
166}
167
135#define acpi_unlazy_tlb(x) 168#define acpi_unlazy_tlb(x)
136 169
137#ifdef CONFIG_ACPI_NUMA 170#ifdef CONFIG_ACPI_NUMA
diff --git a/arch/ia64/include/asm/asm-offsets.h b/arch/ia64/include/asm/asm-offsets.h
new file mode 100644
index 000000000000..d370ee36a182
--- /dev/null
+++ b/arch/ia64/include/asm/asm-offsets.h
@@ -0,0 +1 @@
#include <generated/asm-offsets.h>
diff --git a/arch/ia64/include/asm/bitops.h b/arch/ia64/include/asm/bitops.h
index 57a2787bc9fb..6ebc229a1c51 100644
--- a/arch/ia64/include/asm/bitops.h
+++ b/arch/ia64/include/asm/bitops.h
@@ -127,7 +127,7 @@ clear_bit_unlock (int nr, volatile void *addr)
127 * @addr: Address to start counting from 127 * @addr: Address to start counting from
128 * 128 *
129 * Similarly to clear_bit_unlock, the implementation uses a store 129 * Similarly to clear_bit_unlock, the implementation uses a store
130 * with release semantics. See also __raw_spin_unlock(). 130 * with release semantics. See also arch_spin_unlock().
131 */ 131 */
132static __inline__ void 132static __inline__ void
133__clear_bit_unlock(int nr, void *addr) 133__clear_bit_unlock(int nr, void *addr)
diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
index c8ce2719fee8..429eefc93ee7 100644
--- a/arch/ia64/include/asm/cacheflush.h
+++ b/arch/ia64/include/asm/cacheflush.h
@@ -25,6 +25,7 @@
25#define flush_cache_vmap(start, end) do { } while (0) 25#define flush_cache_vmap(start, end) do { } while (0)
26#define flush_cache_vunmap(start, end) do { } while (0) 26#define flush_cache_vunmap(start, end) do { } while (0)
27 27
28#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
28#define flush_dcache_page(page) \ 29#define flush_dcache_page(page) \
29do { \ 30do { \
30 clear_bit(PG_arch_1, &(page)->flags); \ 31 clear_bit(PG_arch_1, &(page)->flags); \
diff --git a/arch/ia64/include/asm/compat.h b/arch/ia64/include/asm/compat.h
index dfcf75b8426d..f90edc85b509 100644
--- a/arch/ia64/include/asm/compat.h
+++ b/arch/ia64/include/asm/compat.h
@@ -5,7 +5,8 @@
5 */ 5 */
6#include <linux/types.h> 6#include <linux/types.h>
7 7
8#define COMPAT_USER_HZ 100 8#define COMPAT_USER_HZ 100
9#define COMPAT_UTS_MACHINE "i686\0\0\0"
9 10
10typedef u32 compat_size_t; 11typedef u32 compat_size_t;
11typedef s32 compat_ssize_t; 12typedef s32 compat_ssize_t;
diff --git a/arch/ia64/include/asm/dma-mapping.h b/arch/ia64/include/asm/dma-mapping.h
index 8d3c79cd81e7..7d09a09cdaad 100644
--- a/arch/ia64/include/asm/dma-mapping.h
+++ b/arch/ia64/include/asm/dma-mapping.h
@@ -73,7 +73,7 @@ static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
73 if (!dev->dma_mask) 73 if (!dev->dma_mask)
74 return 0; 74 return 0;
75 75
76 return addr + size <= *dev->dma_mask; 76 return addr + size - 1 <= *dev->dma_mask;
77} 77}
78 78
79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 79static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/ia64/include/asm/dmi.h b/arch/ia64/include/asm/dmi.h
index 00eb1b130b63..1ed4c8fedb83 100644
--- a/arch/ia64/include/asm/dmi.h
+++ b/arch/ia64/include/asm/dmi.h
@@ -1,6 +1,7 @@
1#ifndef _ASM_DMI_H 1#ifndef _ASM_DMI_H
2#define _ASM_DMI_H 1 2#define _ASM_DMI_H 1
3 3
4#include <linux/slab.h>
4#include <asm/io.h> 5#include <asm/io.h>
5 6
6/* Use normal IO mappings for DMI */ 7/* Use normal IO mappings for DMI */
diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
index 86eddee029cb..b5298eb09adb 100644
--- a/arch/ia64/include/asm/elf.h
+++ b/arch/ia64/include/asm/elf.h
@@ -25,7 +25,6 @@
25#define ELF_DATA ELFDATA2LSB 25#define ELF_DATA ELFDATA2LSB
26#define ELF_ARCH EM_IA_64 26#define ELF_ARCH EM_IA_64
27 27
28#define USE_ELF_CORE_DUMP
29#define CORE_DUMP_USE_REGSET 28#define CORE_DUMP_USE_REGSET
30 29
31/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are 30/* Least-significant four bits of ELF header's e_flags are OS-specific. The bits are
@@ -202,7 +201,9 @@ extern void ia64_elf_core_copy_regs (struct pt_regs *src, elf_gregset_t dst);
202 relevant until we have real hardware to play with... */ 201 relevant until we have real hardware to play with... */
203#define ELF_PLATFORM NULL 202#define ELF_PLATFORM NULL
204 203
205#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 204#define SET_PERSONALITY(ex) \
205 set_personality((current->personality & ~PER_MASK) | PER_LINUX)
206
206#define elf_read_implies_exec(ex, executable_stack) \ 207#define elf_read_implies_exec(ex, executable_stack) \
207 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0) 208 ((executable_stack!=EXSTACK_DISABLE_X) && ((ex).e_flags & EF_IA_64_LINUX_EXECUTABLE_STACK) != 0)
208 209
@@ -218,54 +219,6 @@ do { \
218 NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \ 219 NEW_AUX_ENT(AT_SYSINFO_EHDR, (unsigned long) GATE_EHDR); \
219} while (0) 220} while (0)
220 221
221
222/*
223 * These macros parameterize elf_core_dump in fs/binfmt_elf.c to write out
224 * extra segments containing the gate DSO contents. Dumping its
225 * contents makes post-mortem fully interpretable later without matching up
226 * the same kernel and hardware config to see what PC values meant.
227 * Dumping its extra ELF program headers includes all the other information
228 * a debugger needs to easily find how the gate DSO was being used.
229 */
230#define ELF_CORE_EXTRA_PHDRS (GATE_EHDR->e_phnum)
231#define ELF_CORE_WRITE_EXTRA_PHDRS \
232do { \
233 const struct elf_phdr *const gate_phdrs = \
234 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
235 int i; \
236 Elf64_Off ofs = 0; \
237 for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
238 struct elf_phdr phdr = gate_phdrs[i]; \
239 if (phdr.p_type == PT_LOAD) { \
240 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz); \
241 phdr.p_filesz = phdr.p_memsz; \
242 if (ofs == 0) { \
243 ofs = phdr.p_offset = offset; \
244 offset += phdr.p_filesz; \
245 } \
246 else \
247 phdr.p_offset = ofs; \
248 } \
249 else \
250 phdr.p_offset += ofs; \
251 phdr.p_paddr = 0; /* match other core phdrs */ \
252 DUMP_WRITE(&phdr, sizeof(phdr)); \
253 } \
254} while (0)
255#define ELF_CORE_WRITE_EXTRA_DATA \
256do { \
257 const struct elf_phdr *const gate_phdrs = \
258 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff); \
259 int i; \
260 for (i = 0; i < GATE_EHDR->e_phnum; ++i) { \
261 if (gate_phdrs[i].p_type == PT_LOAD) { \
262 DUMP_WRITE((void *) gate_phdrs[i].p_vaddr, \
263 PAGE_ALIGN(gate_phdrs[i].p_memsz)); \
264 break; \
265 } \
266 } \
267} while (0)
268
269/* 222/*
270 * format for entries in the Global Offset Table 223 * format for entries in the Global Offset Table
271 */ 224 */
diff --git a/arch/ia64/include/asm/ftrace.h b/arch/ia64/include/asm/ftrace.h
index d20db3c2a656..fbd1a2470cae 100644
--- a/arch/ia64/include/asm/ftrace.h
+++ b/arch/ia64/include/asm/ftrace.h
@@ -8,7 +8,6 @@
8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0); 8extern void _mcount(unsigned long pfs, unsigned long r1, unsigned long b0, unsigned long r0);
9#define mcount _mcount 9#define mcount _mcount
10 10
11#include <asm/kprobes.h>
12/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */ 11/* In IA64, MCOUNT_ADDR is set in link time, so it's not a constant at compile time */
13#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip) 12#define MCOUNT_ADDR (((struct fnptr *)mcount)->ip)
14#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip) 13#define FTRACE_ADDR (((struct fnptr *)ftrace_caller)->ip)
diff --git a/arch/ia64/include/asm/hw_irq.h b/arch/ia64/include/asm/hw_irq.h
index 91619b31dbf5..bf2e37493e04 100644
--- a/arch/ia64/include/asm/hw_irq.h
+++ b/arch/ia64/include/asm/hw_irq.h
@@ -59,7 +59,13 @@ typedef u16 ia64_vector;
59extern int ia64_first_device_vector; 59extern int ia64_first_device_vector;
60extern int ia64_last_device_vector; 60extern int ia64_last_device_vector;
61 61
62#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined (CONFIG_IA64_DIG))
63/* Reserve the lower priority vector than device vectors for "move IRQ" IPI */
64#define IA64_IRQ_MOVE_VECTOR 0x30 /* "move IRQ" IPI */
65#define IA64_DEF_FIRST_DEVICE_VECTOR 0x31
66#else
62#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30 67#define IA64_DEF_FIRST_DEVICE_VECTOR 0x30
68#endif
63#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7 69#define IA64_DEF_LAST_DEVICE_VECTOR 0xe7
64#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector 70#define IA64_FIRST_DEVICE_VECTOR ia64_first_device_vector
65#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector 71#define IA64_LAST_DEVICE_VECTOR ia64_last_device_vector
diff --git a/arch/ia64/include/asm/ia32.h b/arch/ia64/include/asm/ia32.h
deleted file mode 100644
index 2390ee145aa1..000000000000
--- a/arch/ia64/include/asm/ia32.h
+++ /dev/null
@@ -1,40 +0,0 @@
1#ifndef _ASM_IA64_IA32_H
2#define _ASM_IA64_IA32_H
3
4
5#include <asm/ptrace.h>
6#include <asm/signal.h>
7
8#define IA32_NR_syscalls 285 /* length of syscall table */
9#define IA32_PAGE_SHIFT 12 /* 4KB pages */
10
11#ifndef __ASSEMBLY__
12
13# ifdef CONFIG_IA32_SUPPORT
14
15#define IA32_PAGE_OFFSET 0xc0000000
16
17extern void ia32_cpu_init (void);
18extern void ia32_mem_init (void);
19extern void ia32_gdt_init (void);
20extern int ia32_exception (struct pt_regs *regs, unsigned long isr);
21extern int ia32_intercept (struct pt_regs *regs, unsigned long isr);
22extern int ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs);
23
24# endif /* !CONFIG_IA32_SUPPORT */
25
26/* Declare this unconditionally, so we don't get warnings for unreachable code. */
27extern int ia32_setup_frame1 (int sig, struct k_sigaction *ka, siginfo_t *info,
28 sigset_t *set, struct pt_regs *regs);
29#if PAGE_SHIFT > IA32_PAGE_SHIFT
30extern int ia32_copy_ia64_partial_page_list(struct task_struct *,
31 unsigned long);
32extern void ia32_drop_ia64_partial_page_list(struct task_struct *);
33#else
34# define ia32_copy_ia64_partial_page_list(a1, a2) 0
35# define ia32_drop_ia64_partial_page_list(a1) do { ; } while (0)
36#endif
37
38#endif /* !__ASSEMBLY__ */
39
40#endif /* _ASM_IA64_IA32_H */
diff --git a/arch/ia64/include/asm/io.h b/arch/ia64/include/asm/io.h
index 0d9d16e2d949..cc8335eb3110 100644
--- a/arch/ia64/include/asm/io.h
+++ b/arch/ia64/include/asm/io.h
@@ -424,6 +424,8 @@ __writeq (unsigned long val, volatile void __iomem *addr)
424extern void __iomem * ioremap(unsigned long offset, unsigned long size); 424extern void __iomem * ioremap(unsigned long offset, unsigned long size);
425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size); 425extern void __iomem * ioremap_nocache (unsigned long offset, unsigned long size);
426extern void iounmap (volatile void __iomem *addr); 426extern void iounmap (volatile void __iomem *addr);
427extern void __iomem * early_ioremap (unsigned long phys_addr, unsigned long size);
428extern void early_iounmap (volatile void __iomem *addr, unsigned long size);
427 429
428/* 430/*
429 * String version of IO memory access ops: 431 * String version of IO memory access ops:
diff --git a/arch/ia64/include/asm/irq.h b/arch/ia64/include/asm/irq.h
index 5282546cdf82..91b920fd7d53 100644
--- a/arch/ia64/include/asm/irq.h
+++ b/arch/ia64/include/asm/irq.h
@@ -13,7 +13,7 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/cpumask.h> 15#include <linux/cpumask.h>
16#include <asm-ia64/nr-irqs.h> 16#include <generated/nr-irqs.h>
17 17
18static __inline__ int 18static __inline__ int
19irq_canonicalize (int irq) 19irq_canonicalize (int irq)
diff --git a/arch/ia64/include/asm/kprobes.h b/arch/ia64/include/asm/kprobes.h
index dbf83fb28db3..d5505d6f2382 100644
--- a/arch/ia64/include/asm/kprobes.h
+++ b/arch/ia64/include/asm/kprobes.h
@@ -103,11 +103,6 @@ typedef struct kprobe_opcode {
103 bundle_t bundle; 103 bundle_t bundle;
104} kprobe_opcode_t; 104} kprobe_opcode_t;
105 105
106struct fnptr {
107 unsigned long ip;
108 unsigned long gp;
109};
110
111/* Architecture specific copy of original instruction*/ 106/* Architecture specific copy of original instruction*/
112struct arch_specific_insn { 107struct arch_specific_insn {
113 /* copy of the instruction to be emulated */ 108 /* copy of the instruction to be emulated */
diff --git a/arch/ia64/include/asm/kvm.h b/arch/ia64/include/asm/kvm.h
index 18a7e49abbc5..bc90c75adf67 100644
--- a/arch/ia64/include/asm/kvm.h
+++ b/arch/ia64/include/asm/kvm.h
@@ -60,6 +60,7 @@ struct kvm_ioapic_state {
60#define KVM_IRQCHIP_PIC_MASTER 0 60#define KVM_IRQCHIP_PIC_MASTER 0
61#define KVM_IRQCHIP_PIC_SLAVE 1 61#define KVM_IRQCHIP_PIC_SLAVE 1
62#define KVM_IRQCHIP_IOAPIC 2 62#define KVM_IRQCHIP_IOAPIC 2
63#define KVM_NR_IRQCHIPS 3
63 64
64#define KVM_CONTEXT_SIZE 8*1024 65#define KVM_CONTEXT_SIZE 8*1024
65 66
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index d9b6325a9328..a362e67e0ca6 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -475,7 +475,6 @@ struct kvm_arch {
475 struct list_head assigned_dev_head; 475 struct list_head assigned_dev_head;
476 struct iommu_domain *iommu_domain; 476 struct iommu_domain *iommu_domain;
477 int iommu_flags; 477 int iommu_flags;
478 struct hlist_head irq_ack_notifier_list;
479 478
480 unsigned long irq_sources_bitmap; 479 unsigned long irq_sources_bitmap;
481 unsigned long irq_states[KVM_IOAPIC_NUM_PINS]; 480 unsigned long irq_states[KVM_IOAPIC_NUM_PINS];
diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h
index c171cdf0a789..43f96ab18fa0 100644
--- a/arch/ia64/include/asm/mca.h
+++ b/arch/ia64/include/asm/mca.h
@@ -106,6 +106,11 @@ struct ia64_sal_os_state {
106 unsigned long os_status; /* OS status to SAL, enum below */ 106 unsigned long os_status; /* OS status to SAL, enum below */
107 unsigned long context; /* 0 if return to same context 107 unsigned long context; /* 0 if return to same context
108 1 if return to new context */ 108 1 if return to new context */
109
110 /* I-resources */
111 unsigned long iip;
112 unsigned long ipsr;
113 unsigned long ifs;
109}; 114};
110 115
111enum { 116enum {
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 688a812c017d..61c7b1750b16 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -61,7 +61,7 @@ extern int register_active_ranges(u64 start, u64 len, int nid);
61 61
62#ifdef CONFIG_VIRTUAL_MEM_MAP 62#ifdef CONFIG_VIRTUAL_MEM_MAP
63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */ 63# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
64 extern unsigned long vmalloc_end; 64 extern unsigned long VMALLOC_END;
65 extern struct page *vmem_map; 65 extern struct page *vmem_map;
66 extern int find_largest_hole(u64 start, u64 end, void *arg); 66 extern int find_largest_hole(u64 start, u64 end, void *arg);
67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg); 67 extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
diff --git a/arch/ia64/include/asm/numa.h b/arch/ia64/include/asm/numa.h
index 3499ff57bf42..6a8a27cfae3e 100644
--- a/arch/ia64/include/asm/numa.h
+++ b/arch/ia64/include/asm/numa.h
@@ -22,8 +22,6 @@
22 22
23#include <asm/mmzone.h> 23#include <asm/mmzone.h>
24 24
25#define NUMA_NO_NODE -1
26
27extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned; 25extern u16 cpu_to_node_map[NR_CPUS] __cacheline_aligned;
28extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned; 26extern cpumask_t node_to_cpu_mask[MAX_NUMNODES] __cacheline_aligned;
29extern pg_data_t *pgdat_list[MAX_NUMNODES]; 27extern pg_data_t *pgdat_list[MAX_NUMNODES];
diff --git a/arch/ia64/include/asm/pci.h b/arch/ia64/include/asm/pci.h
index 55281aabe5f2..73b5f785e70c 100644
--- a/arch/ia64/include/asm/pci.h
+++ b/arch/ia64/include/asm/pci.h
@@ -56,20 +56,6 @@ pcibios_penalize_isa_irq (int irq, int active)
56 56
57#include <asm-generic/pci-dma-compat.h> 57#include <asm-generic/pci-dma-compat.h>
58 58
59/* pci_unmap_{single,page} is not a nop, thus... */
60#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
61 dma_addr_t ADDR_NAME;
62#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
63 __u32 LEN_NAME;
64#define pci_unmap_addr(PTR, ADDR_NAME) \
65 ((PTR)->ADDR_NAME)
66#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
67 (((PTR)->ADDR_NAME) = (VAL))
68#define pci_unmap_len(PTR, LEN_NAME) \
69 ((PTR)->LEN_NAME)
70#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
71 (((PTR)->LEN_NAME) = (VAL))
72
73#ifdef CONFIG_PCI 59#ifdef CONFIG_PCI
74static inline void pci_dma_burst_advice(struct pci_dev *pdev, 60static inline void pci_dma_burst_advice(struct pci_dev *pdev,
75 enum pci_dma_burst_strategy *strat, 61 enum pci_dma_burst_strategy *strat,
diff --git a/arch/ia64/include/asm/percpu.h b/arch/ia64/include/asm/percpu.h
index 30cf46534dd2..f7c00a5e0e2b 100644
--- a/arch/ia64/include/asm/percpu.h
+++ b/arch/ia64/include/asm/percpu.h
@@ -9,7 +9,7 @@
9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE 9#define PERCPU_ENOUGH_ROOM PERCPU_PAGE_SIZE
10 10
11#ifdef __ASSEMBLY__ 11#ifdef __ASSEMBLY__
12# define THIS_CPU(var) (per_cpu__##var) /* use this to mark accesses to per-CPU variables... */ 12# define THIS_CPU(var) (var) /* use this to mark accesses to per-CPU variables... */
13#else /* !__ASSEMBLY__ */ 13#else /* !__ASSEMBLY__ */
14 14
15 15
@@ -39,7 +39,7 @@ extern void *per_cpu_init(void);
39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly 39 * On the positive side, using __ia64_per_cpu_var() instead of __get_cpu_var() is slightly
40 * more efficient. 40 * more efficient.
41 */ 41 */
42#define __ia64_per_cpu_var(var) per_cpu__##var 42#define __ia64_per_cpu_var(var) var
43 43
44#include <asm-generic/percpu.h> 44#include <asm-generic/percpu.h>
45 45
diff --git a/arch/ia64/include/asm/perfmon_default_smpl.h b/arch/ia64/include/asm/perfmon_default_smpl.h
index 48822c0811d8..74724b24c2b7 100644
--- a/arch/ia64/include/asm/perfmon_default_smpl.h
+++ b/arch/ia64/include/asm/perfmon_default_smpl.h
@@ -67,7 +67,7 @@ typedef struct {
67 unsigned long ip; /* where did the overflow interrupt happened */ 67 unsigned long ip; /* where did the overflow interrupt happened */
68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */ 68 unsigned long tstamp; /* ar.itc when entering perfmon intr. handler */
69 69
70 unsigned short cpu; /* cpu on which the overfow occured */ 70 unsigned short cpu; /* cpu on which the overflow occured */
71 unsigned short set; /* event set active when overflow ocurred */ 71 unsigned short set; /* event set active when overflow ocurred */
72 int tgid; /* thread group id (for NPTL, this is getpid()) */ 72 int tgid; /* thread group id (for NPTL, this is getpid()) */
73} pfm_default_smpl_entry_t; 73} pfm_default_smpl_entry_t;
diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
index 8840a690d1e7..c3286f42e501 100644
--- a/arch/ia64/include/asm/pgtable.h
+++ b/arch/ia64/include/asm/pgtable.h
@@ -228,8 +228,7 @@ ia64_phys_addr_valid (unsigned long addr)
228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL) 228#define VMALLOC_START (RGN_BASE(RGN_GATE) + 0x200000000UL)
229#ifdef CONFIG_VIRTUAL_MEM_MAP 229#ifdef CONFIG_VIRTUAL_MEM_MAP
230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9))) 230# define VMALLOC_END_INIT (RGN_BASE(RGN_GATE) + (1UL << (4*PAGE_SHIFT - 9)))
231# define VMALLOC_END vmalloc_end 231extern unsigned long VMALLOC_END;
232 extern unsigned long vmalloc_end;
233#else 232#else
234#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP) 233#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_SPARSEMEM_VMEMMAP)
235/* SPARSEMEM_VMEMMAP uses half of vmalloc... */ 234/* SPARSEMEM_VMEMMAP uses half of vmalloc... */
@@ -463,7 +462,7 @@ pte_same (pte_t a, pte_t b)
463 return pte_val(a) == pte_val(b); 462 return pte_val(a) == pte_val(b);
464} 463}
465 464
466#define update_mmu_cache(vma, address, pte) do { } while (0) 465#define update_mmu_cache(vma, address, ptep) do { } while (0)
467 466
468extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 467extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
469extern void paging_init (void); 468extern void paging_init (void);
diff --git a/arch/ia64/include/asm/processor.h b/arch/ia64/include/asm/processor.h
index 3eaeedf1aef2..348e44d08ce3 100644
--- a/arch/ia64/include/asm/processor.h
+++ b/arch/ia64/include/asm/processor.h
@@ -229,7 +229,7 @@ struct cpuinfo_ia64 {
229#endif 229#endif
230}; 230};
231 231
232DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info); 232DECLARE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
233 233
234/* 234/*
235 * The "local" data variable. It refers to the per-CPU data of the currently executing 235 * The "local" data variable. It refers to the per-CPU data of the currently executing
@@ -237,8 +237,8 @@ DECLARE_PER_CPU(struct cpuinfo_ia64, cpu_info);
237 * Do not use the address of local_cpu_data, since it will be different from 237 * Do not use the address of local_cpu_data, since it will be different from
238 * cpu_data(smp_processor_id())! 238 * cpu_data(smp_processor_id())!
239 */ 239 */
240#define local_cpu_data (&__ia64_per_cpu_var(cpu_info)) 240#define local_cpu_data (&__ia64_per_cpu_var(ia64_cpu_info))
241#define cpu_data(cpu) (&per_cpu(cpu_info, cpu)) 241#define cpu_data(cpu) (&per_cpu(ia64_cpu_info, cpu))
242 242
243extern void print_cpu_info (struct cpuinfo_ia64 *); 243extern void print_cpu_info (struct cpuinfo_ia64 *);
244 244
@@ -270,23 +270,6 @@ typedef struct {
270 (int __user *) (addr)); \ 270 (int __user *) (addr)); \
271}) 271})
272 272
273#ifdef CONFIG_IA32_SUPPORT
274struct desc_struct {
275 unsigned int a, b;
276};
277
278#define desc_empty(desc) (!((desc)->a | (desc)->b))
279#define desc_equal(desc1, desc2) (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
280
281#define GDT_ENTRY_TLS_ENTRIES 3
282#define GDT_ENTRY_TLS_MIN 6
283#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
284
285#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
286
287struct ia64_partial_page_list;
288#endif
289
290struct thread_struct { 273struct thread_struct {
291 __u32 flags; /* various thread flags (see IA64_THREAD_*) */ 274 __u32 flags; /* various thread flags (see IA64_THREAD_*) */
292 /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */ 275 /* writing on_ustack is performance-critical, so it's worth spending 8 bits on it... */
@@ -298,29 +281,6 @@ struct thread_struct {
298 __u64 rbs_bot; /* the base address for the RBS */ 281 __u64 rbs_bot; /* the base address for the RBS */
299 int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */ 282 int last_fph_cpu; /* CPU that may hold the contents of f32-f127 */
300 283
301#ifdef CONFIG_IA32_SUPPORT
302 __u64 eflag; /* IA32 EFLAGS reg */
303 __u64 fsr; /* IA32 floating pt status reg */
304 __u64 fcr; /* IA32 floating pt control reg */
305 __u64 fir; /* IA32 fp except. instr. reg */
306 __u64 fdr; /* IA32 fp except. data reg */
307 __u64 old_k1; /* old value of ar.k1 */
308 __u64 old_iob; /* old IOBase value */
309 struct ia64_partial_page_list *ppl; /* partial page list for 4K page size issue */
310 /* cached TLS descriptors. */
311 struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
312
313# define INIT_THREAD_IA32 .eflag = 0, \
314 .fsr = 0, \
315 .fcr = 0x17800000037fULL, \
316 .fir = 0, \
317 .fdr = 0, \
318 .old_k1 = 0, \
319 .old_iob = 0, \
320 .ppl = NULL,
321#else
322# define INIT_THREAD_IA32
323#endif /* CONFIG_IA32_SUPPORT */
324#ifdef CONFIG_PERFMON 284#ifdef CONFIG_PERFMON
325 void *pfm_context; /* pointer to detailed PMU context */ 285 void *pfm_context; /* pointer to detailed PMU context */
326 unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */ 286 unsigned long pfm_needs_checking; /* when >0, pending perfmon work on kernel exit */
@@ -342,7 +302,6 @@ struct thread_struct {
342 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \ 302 .rbs_bot = STACK_TOP - DEFAULT_USER_STACK_SIZE, \
343 .task_size = DEFAULT_TASK_SIZE, \ 303 .task_size = DEFAULT_TASK_SIZE, \
344 .last_fph_cpu = -1, \ 304 .last_fph_cpu = -1, \
345 INIT_THREAD_IA32 \
346 INIT_THREAD_PM \ 305 INIT_THREAD_PM \
347 .dbr = {0, }, \ 306 .dbr = {0, }, \
348 .ibr = {0, }, \ 307 .ibr = {0, }, \
@@ -485,11 +444,6 @@ extern void __ia64_load_fpu (struct ia64_fpreg *fph);
485extern void ia64_save_debug_regs (unsigned long *save_area); 444extern void ia64_save_debug_regs (unsigned long *save_area);
486extern void ia64_load_debug_regs (unsigned long *save_area); 445extern void ia64_load_debug_regs (unsigned long *save_area);
487 446
488#ifdef CONFIG_IA32_SUPPORT
489extern void ia32_save_state (struct task_struct *task);
490extern void ia32_load_state (struct task_struct *task);
491#endif
492
493#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) 447#define ia64_fph_enable() do { ia64_rsm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
494#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0) 448#define ia64_fph_disable() do { ia64_ssm(IA64_PSR_DFH); ia64_srlz_d(); } while (0)
495 449
diff --git a/arch/ia64/include/asm/ptrace.h b/arch/ia64/include/asm/ptrace.h
index 14055c636adf..7ae9c3f15a1c 100644
--- a/arch/ia64/include/asm/ptrace.h
+++ b/arch/ia64/include/asm/ptrace.h
@@ -319,11 +319,7 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
319 ptrace_attach_sync_user_rbs(child) 319 ptrace_attach_sync_user_rbs(child)
320 320
321 #define arch_has_single_step() (1) 321 #define arch_has_single_step() (1)
322 extern void user_enable_single_step(struct task_struct *);
323 extern void user_disable_single_step(struct task_struct *);
324
325 #define arch_has_block_step() (1) 322 #define arch_has_block_step() (1)
326 extern void user_enable_block_step(struct task_struct *);
327 323
328#endif /* !__KERNEL__ */ 324#endif /* !__KERNEL__ */
329 325
diff --git a/arch/ia64/include/asm/rwsem.h b/arch/ia64/include/asm/rwsem.h
index fbee74b15782..e8762688e8e3 100644
--- a/arch/ia64/include/asm/rwsem.h
+++ b/arch/ia64/include/asm/rwsem.h
@@ -47,7 +47,7 @@ struct rw_semaphore {
47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 47#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
48 48
49#define __RWSEM_INITIALIZER(name) \ 49#define __RWSEM_INITIALIZER(name) \
50 { RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, \ 50 { RWSEM_UNLOCKED_VALUE, __SPIN_LOCK_UNLOCKED((name).wait_lock), \
51 LIST_HEAD_INIT((name).wait_list) } 51 LIST_HEAD_INIT((name).wait_list) }
52 52
53#define DECLARE_RWSEM(name) \ 53#define DECLARE_RWSEM(name) \
diff --git a/arch/ia64/include/asm/scatterlist.h b/arch/ia64/include/asm/scatterlist.h
index d6f57874041d..d8e98961dec7 100644
--- a/arch/ia64/include/asm/scatterlist.h
+++ b/arch/ia64/include/asm/scatterlist.h
@@ -2,25 +2,6 @@
2#define _ASM_IA64_SCATTERLIST_H 2#define _ASM_IA64_SCATTERLIST_H
3 3
4/* 4/*
5 * Modified 1998-1999, 2001-2002, 2004
6 * David Mosberger-Tang <davidm@hpl.hp.com>, Hewlett-Packard Co
7 */
8
9#include <asm/types.h>
10
11struct scatterlist {
12#ifdef CONFIG_DEBUG_SG
13 unsigned long sg_magic;
14#endif
15 unsigned long page_link;
16 unsigned int offset;
17 unsigned int length; /* buffer length */
18
19 dma_addr_t dma_address;
20 unsigned int dma_length;
21};
22
23/*
24 * It used to be that ISA_DMA_THRESHOLD had something to do with the 5 * It used to be that ISA_DMA_THRESHOLD had something to do with the
25 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart 6 * DMA-limits of ISA-devices. Nowadays, its only remaining use (apart
26 * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to 7 * from the aha1542.c driver, which isn't 64-bit clean anyhow) is to
@@ -30,9 +11,6 @@ struct scatterlist {
30 */ 11 */
31#define ISA_DMA_THRESHOLD 0xffffffff 12#define ISA_DMA_THRESHOLD 0xffffffff
32 13
33#define sg_dma_len(sg) ((sg)->dma_length) 14#include <asm-generic/scatterlist.h>
34#define sg_dma_address(sg) ((sg)->dma_address)
35
36#define ARCH_HAS_SG_CHAIN
37 15
38#endif /* _ASM_IA64_SCATTERLIST_H */ 16#endif /* _ASM_IA64_SCATTERLIST_H */
diff --git a/arch/ia64/include/asm/sn/shubio.h b/arch/ia64/include/asm/sn/shubio.h
index 22a6f18a5313..6052422a22b3 100644
--- a/arch/ia64/include/asm/sn/shubio.h
+++ b/arch/ia64/include/asm/sn/shubio.h
@@ -3289,7 +3289,7 @@ typedef ii_icrb0_e_u_t icrbe_t;
3289#define IIO_IIDSR_LVL_SHIFT 0 3289#define IIO_IIDSR_LVL_SHIFT 0
3290#define IIO_IIDSR_LVL_MASK 0x000000ff 3290#define IIO_IIDSR_LVL_MASK 0x000000ff
3291 3291
3292/* Xtalk timeout threshhold register (IIO_IXTT) */ 3292/* Xtalk timeout threshold register (IIO_IXTT) */
3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */ 3293#define IXTT_RRSP_TO_SHFT 55 /* read response timeout */
3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT) 3294#define IXTT_RRSP_TO_MASK (0x1FULL << IXTT_RRSP_TO_SHFT)
3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */ 3295#define IXTT_RRSP_PS_SHFT 32 /* read responsed TO prescalar */
diff --git a/arch/ia64/include/asm/socket.h b/arch/ia64/include/asm/socket.h
index 0b0d5ff062e5..51427eaa51ba 100644
--- a/arch/ia64/include/asm/socket.h
+++ b/arch/ia64/include/asm/socket.h
@@ -69,4 +69,6 @@
69#define SO_PROTOCOL 38 69#define SO_PROTOCOL 38
70#define SO_DOMAIN 39 70#define SO_DOMAIN 39
71 71
72#define SO_RXQ_OVFL 40
73
72#endif /* _ASM_IA64_SOCKET_H */ 74#endif /* _ASM_IA64_SOCKET_H */
diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
index 239ecdc9516d..1a91c9121d17 100644
--- a/arch/ia64/include/asm/spinlock.h
+++ b/arch/ia64/include/asm/spinlock.h
@@ -17,7 +17,7 @@
17#include <asm/intrinsics.h> 17#include <asm/intrinsics.h>
18#include <asm/system.h> 18#include <asm/system.h>
19 19
20#define __raw_spin_lock_init(x) ((x)->lock = 0) 20#define arch_spin_lock_init(x) ((x)->lock = 0)
21 21
22/* 22/*
23 * Ticket locks are conceptually two parts, one indicating the current head of 23 * Ticket locks are conceptually two parts, one indicating the current head of
@@ -38,7 +38,7 @@
38#define TICKET_BITS 15 38#define TICKET_BITS 15
39#define TICKET_MASK ((1 << TICKET_BITS) - 1) 39#define TICKET_MASK ((1 << TICKET_BITS) - 1)
40 40
41static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) 41static __always_inline void __ticket_spin_lock(arch_spinlock_t *lock)
42{ 42{
43 int *p = (int *)&lock->lock, ticket, serve; 43 int *p = (int *)&lock->lock, ticket, serve;
44 44
@@ -58,7 +58,7 @@ static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock)
58 } 58 }
59} 59}
60 60
61static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) 61static __always_inline int __ticket_spin_trylock(arch_spinlock_t *lock)
62{ 62{
63 int tmp = ACCESS_ONCE(lock->lock); 63 int tmp = ACCESS_ONCE(lock->lock);
64 64
@@ -67,7 +67,7 @@ static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock)
67 return 0; 67 return 0;
68} 68}
69 69
70static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) 70static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
71{ 71{
72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp; 72 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
73 73
@@ -75,7 +75,7 @@ static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock)
75 ACCESS_ONCE(*p) = (tmp + 2) & ~1; 75 ACCESS_ONCE(*p) = (tmp + 2) & ~1;
76} 76}
77 77
78static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock) 78static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
79{ 79{
80 int *p = (int *)&lock->lock, ticket; 80 int *p = (int *)&lock->lock, ticket;
81 81
@@ -89,64 +89,64 @@ static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
89 } 89 }
90} 90}
91 91
92static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) 92static inline int __ticket_spin_is_locked(arch_spinlock_t *lock)
93{ 93{
94 long tmp = ACCESS_ONCE(lock->lock); 94 long tmp = ACCESS_ONCE(lock->lock);
95 95
96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK); 96 return !!(((tmp >> TICKET_SHIFT) ^ tmp) & TICKET_MASK);
97} 97}
98 98
99static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) 99static inline int __ticket_spin_is_contended(arch_spinlock_t *lock)
100{ 100{
101 long tmp = ACCESS_ONCE(lock->lock); 101 long tmp = ACCESS_ONCE(lock->lock);
102 102
103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1; 103 return ((tmp - (tmp >> TICKET_SHIFT)) & TICKET_MASK) > 1;
104} 104}
105 105
106static inline int __raw_spin_is_locked(raw_spinlock_t *lock) 106static inline int arch_spin_is_locked(arch_spinlock_t *lock)
107{ 107{
108 return __ticket_spin_is_locked(lock); 108 return __ticket_spin_is_locked(lock);
109} 109}
110 110
111static inline int __raw_spin_is_contended(raw_spinlock_t *lock) 111static inline int arch_spin_is_contended(arch_spinlock_t *lock)
112{ 112{
113 return __ticket_spin_is_contended(lock); 113 return __ticket_spin_is_contended(lock);
114} 114}
115#define __raw_spin_is_contended __raw_spin_is_contended 115#define arch_spin_is_contended arch_spin_is_contended
116 116
117static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) 117static __always_inline void arch_spin_lock(arch_spinlock_t *lock)
118{ 118{
119 __ticket_spin_lock(lock); 119 __ticket_spin_lock(lock);
120} 120}
121 121
122static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) 122static __always_inline int arch_spin_trylock(arch_spinlock_t *lock)
123{ 123{
124 return __ticket_spin_trylock(lock); 124 return __ticket_spin_trylock(lock);
125} 125}
126 126
127static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) 127static __always_inline void arch_spin_unlock(arch_spinlock_t *lock)
128{ 128{
129 __ticket_spin_unlock(lock); 129 __ticket_spin_unlock(lock);
130} 130}
131 131
132static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, 132static __always_inline void arch_spin_lock_flags(arch_spinlock_t *lock,
133 unsigned long flags) 133 unsigned long flags)
134{ 134{
135 __raw_spin_lock(lock); 135 arch_spin_lock(lock);
136} 136}
137 137
138static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) 138static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
139{ 139{
140 __ticket_spin_unlock_wait(lock); 140 __ticket_spin_unlock_wait(lock);
141} 141}
142 142
143#define __raw_read_can_lock(rw) (*(volatile int *)(rw) >= 0) 143#define arch_read_can_lock(rw) (*(volatile int *)(rw) >= 0)
144#define __raw_write_can_lock(rw) (*(volatile int *)(rw) == 0) 144#define arch_write_can_lock(rw) (*(volatile int *)(rw) == 0)
145 145
146#ifdef ASM_SUPPORTED 146#ifdef ASM_SUPPORTED
147 147
148static __always_inline void 148static __always_inline void
149__raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags) 149arch_read_lock_flags(arch_rwlock_t *lock, unsigned long flags)
150{ 150{
151 __asm__ __volatile__ ( 151 __asm__ __volatile__ (
152 "tbit.nz p6, p0 = %1,%2\n" 152 "tbit.nz p6, p0 = %1,%2\n"
@@ -169,15 +169,15 @@ __raw_read_lock_flags(raw_rwlock_t *lock, unsigned long flags)
169 : "p6", "p7", "r2", "memory"); 169 : "p6", "p7", "r2", "memory");
170} 170}
171 171
172#define __raw_read_lock(lock) __raw_read_lock_flags(lock, 0) 172#define arch_read_lock(lock) arch_read_lock_flags(lock, 0)
173 173
174#else /* !ASM_SUPPORTED */ 174#else /* !ASM_SUPPORTED */
175 175
176#define __raw_read_lock_flags(rw, flags) __raw_read_lock(rw) 176#define arch_read_lock_flags(rw, flags) arch_read_lock(rw)
177 177
178#define __raw_read_lock(rw) \ 178#define arch_read_lock(rw) \
179do { \ 179do { \
180 raw_rwlock_t *__read_lock_ptr = (rw); \ 180 arch_rwlock_t *__read_lock_ptr = (rw); \
181 \ 181 \
182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \ 182 while (unlikely(ia64_fetchadd(1, (int *) __read_lock_ptr, acq) < 0)) { \
183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 183 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
@@ -188,16 +188,16 @@ do { \
188 188
189#endif /* !ASM_SUPPORTED */ 189#endif /* !ASM_SUPPORTED */
190 190
191#define __raw_read_unlock(rw) \ 191#define arch_read_unlock(rw) \
192do { \ 192do { \
193 raw_rwlock_t *__read_lock_ptr = (rw); \ 193 arch_rwlock_t *__read_lock_ptr = (rw); \
194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \ 194 ia64_fetchadd(-1, (int *) __read_lock_ptr, rel); \
195} while (0) 195} while (0)
196 196
197#ifdef ASM_SUPPORTED 197#ifdef ASM_SUPPORTED
198 198
199static __always_inline void 199static __always_inline void
200__raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags) 200arch_write_lock_flags(arch_rwlock_t *lock, unsigned long flags)
201{ 201{
202 __asm__ __volatile__ ( 202 __asm__ __volatile__ (
203 "tbit.nz p6, p0 = %1, %2\n" 203 "tbit.nz p6, p0 = %1, %2\n"
@@ -221,9 +221,9 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory"); 221 : "ar.ccv", "p6", "p7", "r2", "r29", "memory");
222} 222}
223 223
224#define __raw_write_lock(rw) __raw_write_lock_flags(rw, 0) 224#define arch_write_lock(rw) arch_write_lock_flags(rw, 0)
225 225
226#define __raw_write_trylock(rw) \ 226#define arch_write_trylock(rw) \
227({ \ 227({ \
228 register long result; \ 228 register long result; \
229 \ 229 \
@@ -235,7 +235,7 @@ __raw_write_lock_flags(raw_rwlock_t *lock, unsigned long flags)
235 (result == 0); \ 235 (result == 0); \
236}) 236})
237 237
238static inline void __raw_write_unlock(raw_rwlock_t *x) 238static inline void arch_write_unlock(arch_rwlock_t *x)
239{ 239{
240 u8 *y = (u8 *)x; 240 u8 *y = (u8 *)x;
241 barrier(); 241 barrier();
@@ -244,9 +244,9 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
244 244
245#else /* !ASM_SUPPORTED */ 245#else /* !ASM_SUPPORTED */
246 246
247#define __raw_write_lock_flags(l, flags) __raw_write_lock(l) 247#define arch_write_lock_flags(l, flags) arch_write_lock(l)
248 248
249#define __raw_write_lock(l) \ 249#define arch_write_lock(l) \
250({ \ 250({ \
251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \ 251 __u64 ia64_val, ia64_set_val = ia64_dep_mi(-1, 0, 31, 1); \
252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \ 252 __u32 *ia64_write_lock_ptr = (__u32 *) (l); \
@@ -257,7 +257,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
257 } while (ia64_val); \ 257 } while (ia64_val); \
258}) 258})
259 259
260#define __raw_write_trylock(rw) \ 260#define arch_write_trylock(rw) \
261({ \ 261({ \
262 __u64 ia64_val; \ 262 __u64 ia64_val; \
263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \ 263 __u64 ia64_set_val = ia64_dep_mi(-1, 0, 31,1); \
@@ -265,7 +265,7 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
265 (ia64_val == 0); \ 265 (ia64_val == 0); \
266}) 266})
267 267
268static inline void __raw_write_unlock(raw_rwlock_t *x) 268static inline void arch_write_unlock(arch_rwlock_t *x)
269{ 269{
270 barrier(); 270 barrier();
271 x->write_lock = 0; 271 x->write_lock = 0;
@@ -273,10 +273,10 @@ static inline void __raw_write_unlock(raw_rwlock_t *x)
273 273
274#endif /* !ASM_SUPPORTED */ 274#endif /* !ASM_SUPPORTED */
275 275
276static inline int __raw_read_trylock(raw_rwlock_t *x) 276static inline int arch_read_trylock(arch_rwlock_t *x)
277{ 277{
278 union { 278 union {
279 raw_rwlock_t lock; 279 arch_rwlock_t lock;
280 __u32 word; 280 __u32 word;
281 } old, new; 281 } old, new;
282 old.lock = new.lock = *x; 282 old.lock = new.lock = *x;
@@ -285,8 +285,8 @@ static inline int __raw_read_trylock(raw_rwlock_t *x)
285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word; 285 return (u32)ia64_cmpxchg4_acq((__u32 *)(x), new.word, old.word) == old.word;
286} 286}
287 287
288#define _raw_spin_relax(lock) cpu_relax() 288#define arch_spin_relax(lock) cpu_relax()
289#define _raw_read_relax(lock) cpu_relax() 289#define arch_read_relax(lock) cpu_relax()
290#define _raw_write_relax(lock) cpu_relax() 290#define arch_write_relax(lock) cpu_relax()
291 291
292#endif /* _ASM_IA64_SPINLOCK_H */ 292#endif /* _ASM_IA64_SPINLOCK_H */
diff --git a/arch/ia64/include/asm/spinlock_types.h b/arch/ia64/include/asm/spinlock_types.h
index 474e46f1ab4a..e2b42a52a6d3 100644
--- a/arch/ia64/include/asm/spinlock_types.h
+++ b/arch/ia64/include/asm/spinlock_types.h
@@ -7,15 +7,15 @@
7 7
8typedef struct { 8typedef struct {
9 volatile unsigned int lock; 9 volatile unsigned int lock;
10} raw_spinlock_t; 10} arch_spinlock_t;
11 11
12#define __RAW_SPIN_LOCK_UNLOCKED { 0 } 12#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
13 13
14typedef struct { 14typedef struct {
15 volatile unsigned int read_counter : 31; 15 volatile unsigned int read_counter : 31;
16 volatile unsigned int write_lock : 1; 16 volatile unsigned int write_lock : 1;
17} raw_rwlock_t; 17} arch_rwlock_t;
18 18
19#define __RAW_RW_LOCK_UNLOCKED { 0, 0 } 19#define __ARCH_RW_LOCK_UNLOCKED { 0, 0 }
20 20
21#endif 21#endif
diff --git a/arch/ia64/include/asm/swiotlb.h b/arch/ia64/include/asm/swiotlb.h
index dcbaea7ce128..f0acde68aaea 100644
--- a/arch/ia64/include/asm/swiotlb.h
+++ b/arch/ia64/include/asm/swiotlb.h
@@ -4,8 +4,6 @@
4#include <linux/dma-mapping.h> 4#include <linux/dma-mapping.h>
5#include <linux/swiotlb.h> 5#include <linux/swiotlb.h>
6 6
7extern int swiotlb_force;
8
9#ifdef CONFIG_SWIOTLB 7#ifdef CONFIG_SWIOTLB
10extern int swiotlb; 8extern int swiotlb;
11extern void pci_swiotlb_init(void); 9extern void pci_swiotlb_init(void);
diff --git a/arch/ia64/include/asm/syscall.h b/arch/ia64/include/asm/syscall.h
index 2f758a42f94b..a7ff1c6ab068 100644
--- a/arch/ia64/include/asm/syscall.h
+++ b/arch/ia64/include/asm/syscall.h
@@ -22,33 +22,18 @@ static inline long syscall_get_nr(struct task_struct *task,
22 if ((long)regs->cr_ifs < 0) /* Not a syscall */ 22 if ((long)regs->cr_ifs < 0) /* Not a syscall */
23 return -1; 23 return -1;
24 24
25#ifdef CONFIG_IA32_SUPPORT
26 if (IS_IA32_PROCESS(regs))
27 return regs->r1;
28#endif
29
30 return regs->r15; 25 return regs->r15;
31} 26}
32 27
33static inline void syscall_rollback(struct task_struct *task, 28static inline void syscall_rollback(struct task_struct *task,
34 struct pt_regs *regs) 29 struct pt_regs *regs)
35{ 30{
36#ifdef CONFIG_IA32_SUPPORT
37 if (IS_IA32_PROCESS(regs))
38 regs->r8 = regs->r1;
39#endif
40
41 /* do nothing */ 31 /* do nothing */
42} 32}
43 33
44static inline long syscall_get_error(struct task_struct *task, 34static inline long syscall_get_error(struct task_struct *task,
45 struct pt_regs *regs) 35 struct pt_regs *regs)
46{ 36{
47#ifdef CONFIG_IA32_SUPPORT
48 if (IS_IA32_PROCESS(regs))
49 return regs->r8;
50#endif
51
52 return regs->r10 == -1 ? regs->r8:0; 37 return regs->r10 == -1 ? regs->r8:0;
53} 38}
54 39
@@ -62,13 +47,6 @@ static inline void syscall_set_return_value(struct task_struct *task,
62 struct pt_regs *regs, 47 struct pt_regs *regs,
63 int error, long val) 48 int error, long val)
64{ 49{
65#ifdef CONFIG_IA32_SUPPORT
66 if (IS_IA32_PROCESS(regs)) {
67 regs->r8 = (long) error ? error : val;
68 return;
69 }
70#endif
71
72 if (error) { 50 if (error) {
73 /* error < 0, but ia64 uses > 0 return value */ 51 /* error < 0, but ia64 uses > 0 return value */
74 regs->r8 = -error; 52 regs->r8 = -error;
@@ -89,37 +67,6 @@ static inline void syscall_get_arguments(struct task_struct *task,
89{ 67{
90 BUG_ON(i + n > 6); 68 BUG_ON(i + n > 6);
91 69
92#ifdef CONFIG_IA32_SUPPORT
93 if (IS_IA32_PROCESS(regs)) {
94 switch (i + n) {
95 case 6:
96 if (!n--) break;
97 *args++ = regs->r13;
98 case 5:
99 if (!n--) break;
100 *args++ = regs->r15;
101 case 4:
102 if (!n--) break;
103 *args++ = regs->r14;
104 case 3:
105 if (!n--) break;
106 *args++ = regs->r10;
107 case 2:
108 if (!n--) break;
109 *args++ = regs->r9;
110 case 1:
111 if (!n--) break;
112 *args++ = regs->r11;
113 case 0:
114 if (!n--) break;
115 default:
116 BUG();
117 break;
118 }
119
120 return;
121 }
122#endif
123 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0); 70 ia64_syscall_get_set_arguments(task, regs, i, n, args, 0);
124} 71}
125 72
@@ -130,34 +77,6 @@ static inline void syscall_set_arguments(struct task_struct *task,
130{ 77{
131 BUG_ON(i + n > 6); 78 BUG_ON(i + n > 6);
132 79
133#ifdef CONFIG_IA32_SUPPORT
134 if (IS_IA32_PROCESS(regs)) {
135 switch (i + n) {
136 case 6:
137 if (!n--) break;
138 regs->r13 = *args++;
139 case 5:
140 if (!n--) break;
141 regs->r15 = *args++;
142 case 4:
143 if (!n--) break;
144 regs->r14 = *args++;
145 case 3:
146 if (!n--) break;
147 regs->r10 = *args++;
148 case 2:
149 if (!n--) break;
150 regs->r9 = *args++;
151 case 1:
152 if (!n--) break;
153 regs->r11 = *args++;
154 case 0:
155 if (!n--) break;
156 }
157
158 return;
159 }
160#endif
161 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1); 80 ia64_syscall_get_set_arguments(task, regs, i, n, args, 1);
162} 81}
163#endif /* _ASM_SYSCALL_H */ 82#endif /* _ASM_SYSCALL_H */
diff --git a/arch/ia64/include/asm/system.h b/arch/ia64/include/asm/system.h
index 927a381c20ca..9f342a574ce8 100644
--- a/arch/ia64/include/asm/system.h
+++ b/arch/ia64/include/asm/system.h
@@ -191,15 +191,6 @@ do { \
191 191
192#ifdef __KERNEL__ 192#ifdef __KERNEL__
193 193
194#ifdef CONFIG_IA32_SUPPORT
195# define IS_IA32_PROCESS(regs) (ia64_psr(regs)->is != 0)
196#else
197# define IS_IA32_PROCESS(regs) 0
198struct task_struct;
199static inline void ia32_save_state(struct task_struct *t __attribute__((unused))){}
200static inline void ia32_load_state(struct task_struct *t __attribute__((unused))){}
201#endif
202
203/* 194/*
204 * Context switch from one thread to another. If the two threads have 195 * Context switch from one thread to another. If the two threads have
205 * different address spaces, schedule() has already taken care of 196 * different address spaces, schedule() has already taken care of
@@ -233,7 +224,7 @@ extern void ia64_account_on_switch (struct task_struct *prev, struct task_struct
233 224
234#define IA64_HAS_EXTRA_STATE(t) \ 225#define IA64_HAS_EXTRA_STATE(t) \
235 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \ 226 ((t)->thread.flags & (IA64_THREAD_DBG_VALID|IA64_THREAD_PM_VALID) \
236 || IS_IA32_PROCESS(task_pt_regs(t)) || PERFMON_IS_SYSWIDE()) 227 || PERFMON_IS_SYSWIDE())
237 228
238#define __switch_to(prev,next,last) do { \ 229#define __switch_to(prev,next,last) do { \
239 IA64_ACCOUNT_ON_SWITCH(prev, next); \ 230 IA64_ACCOUNT_ON_SWITCH(prev, next); \
diff --git a/arch/ia64/include/asm/tlb.h b/arch/ia64/include/asm/tlb.h
index 85d965cb19a0..23cce999eb1c 100644
--- a/arch/ia64/include/asm/tlb.h
+++ b/arch/ia64/include/asm/tlb.h
@@ -74,7 +74,7 @@ struct ia64_tr_entry {
74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size); 74extern int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size);
75extern void ia64_ptr_entry(u64 target_mask, int slot); 75extern void ia64_ptr_entry(u64 target_mask, int slot);
76 76
77extern struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 77extern struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
78 78
79/* 79/*
80 region register macros 80 region register macros
diff --git a/arch/ia64/include/asm/topology.h b/arch/ia64/include/asm/topology.h
index 3ddb4e709dba..d323071d0f91 100644
--- a/arch/ia64/include/asm/topology.h
+++ b/arch/ia64/include/asm/topology.h
@@ -33,7 +33,9 @@
33/* 33/*
34 * Returns a bitmask of CPUs on Node 'node'. 34 * Returns a bitmask of CPUs on Node 'node'.
35 */ 35 */
36#define cpumask_of_node(node) (&node_to_cpu_mask[node]) 36#define cpumask_of_node(node) ((node) == -1 ? \
37 cpu_all_mask : \
38 &node_to_cpu_mask[node])
37 39
38/* 40/*
39 * Returns the number of the node containing Node 'nid'. 41 * Returns the number of the node containing Node 'nid'.
diff --git a/arch/ia64/include/asm/types.h b/arch/ia64/include/asm/types.h
index bcd260e597de..93773fd37be0 100644
--- a/arch/ia64/include/asm/types.h
+++ b/arch/ia64/include/asm/types.h
@@ -35,6 +35,11 @@ typedef unsigned int umode_t;
35 */ 35 */
36# ifdef __KERNEL__ 36# ifdef __KERNEL__
37 37
38struct fnptr {
39 unsigned long ip;
40 unsigned long gp;
41};
42
38/* DMA addresses are 64-bits wide, in general. */ 43/* DMA addresses are 64-bits wide, in general. */
39typedef u64 dma_addr_t; 44typedef u64 dma_addr_t;
40 45
diff --git a/arch/ia64/include/asm/unistd.h b/arch/ia64/include/asm/unistd.h
index 5a5347f5c4e4..bb8b0fff32b3 100644
--- a/arch/ia64/include/asm/unistd.h
+++ b/arch/ia64/include/asm/unistd.h
@@ -311,11 +311,12 @@
311#define __NR_preadv 1319 311#define __NR_preadv 1319
312#define __NR_pwritev 1320 312#define __NR_pwritev 1320
313#define __NR_rt_tgsigqueueinfo 1321 313#define __NR_rt_tgsigqueueinfo 1321
314#define __NR_recvmmsg 1322
314 315
315#ifdef __KERNEL__ 316#ifdef __KERNEL__
316 317
317 318
318#define NR_syscalls 298 /* length of syscall table */ 319#define NR_syscalls 299 /* length of syscall table */
319 320
320/* 321/*
321 * The following defines stop scripts/checksyscalls.sh from complaining about 322 * The following defines stop scripts/checksyscalls.sh from complaining about
@@ -334,20 +335,6 @@
334#define __ARCH_WANT_SYS_RT_SIGACTION 335#define __ARCH_WANT_SYS_RT_SIGACTION
335#define __ARCH_WANT_SYS_RT_SIGSUSPEND 336#define __ARCH_WANT_SYS_RT_SIGSUSPEND
336 337
337#ifdef CONFIG_IA32_SUPPORT
338# define __ARCH_WANT_SYS_FADVISE64
339# define __ARCH_WANT_SYS_GETPGRP
340# define __ARCH_WANT_SYS_LLSEEK
341# define __ARCH_WANT_SYS_NICE
342# define __ARCH_WANT_SYS_OLD_GETRLIMIT
343# define __ARCH_WANT_SYS_OLDUMOUNT
344# define __ARCH_WANT_SYS_PAUSE
345# define __ARCH_WANT_SYS_SIGPENDING
346# define __ARCH_WANT_SYS_SIGPROCMASK
347# define __ARCH_WANT_COMPAT_SYS_RT_SIGSUSPEND
348# define __ARCH_WANT_COMPAT_SYS_TIME
349#endif
350
351#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER) 338#if !defined(__ASSEMBLY__) && !defined(ASSEMBLER)
352 339
353#include <linux/types.h> 340#include <linux/types.h>
diff --git a/arch/ia64/include/asm/xen/events.h b/arch/ia64/include/asm/xen/events.h
index b8370c8b6198..baa74c82aa71 100644
--- a/arch/ia64/include/asm/xen/events.h
+++ b/arch/ia64/include/asm/xen/events.h
@@ -36,10 +36,6 @@ static inline int xen_irqs_disabled(struct pt_regs *regs)
36 return !(ia64_psr(regs)->i); 36 return !(ia64_psr(regs)->i);
37} 37}
38 38
39static inline void handle_irq(int irq, struct pt_regs *regs)
40{
41 __do_IRQ(irq);
42}
43#define irq_ctx_init(cpu) do { } while (0) 39#define irq_ctx_init(cpu) do { } while (0)
44 40
45#endif /* _ASM_IA64_XEN_EVENTS_H */ 41#endif /* _ASM_IA64_XEN_EVENTS_H */
diff --git a/arch/ia64/include/asm/xen/hypervisor.h b/arch/ia64/include/asm/xen/hypervisor.h
index 88afb54501e4..67455c2ed2b1 100644
--- a/arch/ia64/include/asm/xen/hypervisor.h
+++ b/arch/ia64/include/asm/xen/hypervisor.h
@@ -37,35 +37,9 @@
37#include <xen/interface/xen.h> 37#include <xen/interface/xen.h>
38#include <xen/interface/version.h> /* to compile feature.c */ 38#include <xen/interface/version.h> /* to compile feature.c */
39#include <xen/features.h> /* to comiple xen-netfront.c */ 39#include <xen/features.h> /* to comiple xen-netfront.c */
40#include <xen/xen.h>
40#include <asm/xen/hypercall.h> 41#include <asm/xen/hypercall.h>
41 42
42/* xen_domain_type is set before executing any C code by early_xen_setup */
43enum xen_domain_type {
44 XEN_NATIVE, /* running on bare hardware */
45 XEN_PV_DOMAIN, /* running in a PV domain */
46 XEN_HVM_DOMAIN, /* running in a Xen hvm domain*/
47};
48
49#ifdef CONFIG_XEN
50extern enum xen_domain_type xen_domain_type;
51#else
52#define xen_domain_type XEN_NATIVE
53#endif
54
55#define xen_domain() (xen_domain_type != XEN_NATIVE)
56#define xen_pv_domain() (xen_domain() && \
57 xen_domain_type == XEN_PV_DOMAIN)
58#define xen_hvm_domain() (xen_domain() && \
59 xen_domain_type == XEN_HVM_DOMAIN)
60
61#ifdef CONFIG_XEN_DOM0
62#define xen_initial_domain() (xen_pv_domain() && \
63 (xen_start_info->flags & SIF_INITDOMAIN))
64#else
65#define xen_initial_domain() (0)
66#endif
67
68
69#ifdef CONFIG_XEN 43#ifdef CONFIG_XEN
70extern struct shared_info *HYPERVISOR_shared_info; 44extern struct shared_info *HYPERVISOR_shared_info;
71extern struct start_info *xen_start_info; 45extern struct start_info *xen_start_info;
diff --git a/arch/ia64/kernel/Makefile b/arch/ia64/kernel/Makefile
index 6b7edcab0cb5..db10b1e378b0 100644
--- a/arch/ia64/kernel/Makefile
+++ b/arch/ia64/kernel/Makefile
@@ -8,19 +8,13 @@ endif
8 8
9extra-y := head.o init_task.o vmlinux.lds 9extra-y := head.o init_task.o vmlinux.lds
10 10
11obj-y := acpi.o entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \ 11obj-y := entry.o efi.o efi_stub.o gate-data.o fsys.o ia64_ksyms.o irq.o irq_ia64.o \
12 irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \ 12 irq_lsapic.o ivt.o machvec.o pal.o paravirt_patchlist.o patch.o process.o perfmon.o ptrace.o sal.o \
13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \ 13 salinfo.o setup.o signal.o sys_ia64.o time.o traps.o unaligned.o \
14 unwind.o mca.o mca_asm.o topology.o dma-mapping.o 14 unwind.o mca.o mca_asm.o topology.o dma-mapping.o
15 15
16obj-$(CONFIG_ACPI) += acpi.o acpi-ext.o
16obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o 17obj-$(CONFIG_IA64_BRL_EMU) += brl_emu.o
17obj-$(CONFIG_IA64_GENERIC) += acpi-ext.o
18obj-$(CONFIG_IA64_HP_ZX1) += acpi-ext.o
19obj-$(CONFIG_IA64_HP_ZX1_SWIOTLB) += acpi-ext.o
20
21ifneq ($(CONFIG_ACPI_PROCESSOR),)
22obj-y += acpi-processor.o
23endif
24 18
25obj-$(CONFIG_IA64_PALINFO) += palinfo.o 19obj-$(CONFIG_IA64_PALINFO) += palinfo.o
26obj-$(CONFIG_IOSAPIC) += iosapic.o 20obj-$(CONFIG_IOSAPIC) += iosapic.o
@@ -51,6 +45,8 @@ endif
51obj-$(CONFIG_DMAR) += pci-dma.o 45obj-$(CONFIG_DMAR) += pci-dma.o
52obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o 46obj-$(CONFIG_SWIOTLB) += pci-swiotlb.o
53 47
48obj-$(CONFIG_BINFMT_ELF) += elfcore.o
49
54# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state. 50# fp_emulate() expects f2-f5,f16-f31 to contain the user-level state.
55CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31 51CFLAGS_traps.o += -mfixed-range=f2-f5,f16-f31
56 52
@@ -81,17 +77,14 @@ define cmd_nr_irqs
81endef 77endef
82 78
83# We use internal kbuild rules to avoid the "is up to date" message from make 79# We use internal kbuild rules to avoid the "is up to date" message from make
84arch/$(SRCARCH)/kernel/nr-irqs.s: $(srctree)/arch/$(SRCARCH)/kernel/nr-irqs.c \ 80arch/$(SRCARCH)/kernel/nr-irqs.s: arch/$(SRCARCH)/kernel/nr-irqs.c
85 $(wildcard $(srctree)/include/asm-ia64/*/irq.h)
86 $(Q)mkdir -p $(dir $@) 81 $(Q)mkdir -p $(dir $@)
87 $(call if_changed_dep,cc_s_c) 82 $(call if_changed_dep,cc_s_c)
88 83
89include/asm-ia64/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s 84include/generated/nr-irqs.h: arch/$(SRCARCH)/kernel/nr-irqs.s
90 $(Q)mkdir -p $(dir $@) 85 $(Q)mkdir -p $(dir $@)
91 $(call cmd,nr_irqs) 86 $(call cmd,nr_irqs)
92 87
93clean-files += $(objtree)/include/asm-ia64/nr-irqs.h
94
95# 88#
96# native ivt.S, entry.S and fsys.S 89# native ivt.S, entry.S and fsys.S
97# 90#
diff --git a/arch/ia64/kernel/acpi-ext.c b/arch/ia64/kernel/acpi-ext.c
index b7515bc808a8..8b9318d311a0 100644
--- a/arch/ia64/kernel/acpi-ext.c
+++ b/arch/ia64/kernel/acpi-ext.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/slab.h>
13#include <linux/acpi.h> 14#include <linux/acpi.h>
14 15
15#include <asm/acpi-ext.h> 16#include <asm/acpi-ext.h>
diff --git a/arch/ia64/kernel/acpi-processor.c b/arch/ia64/kernel/acpi-processor.c
deleted file mode 100644
index dbda7bde6112..000000000000
--- a/arch/ia64/kernel/acpi-processor.c
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * arch/ia64/kernel/acpi-processor.c
3 *
4 * Copyright (C) 2005 Intel Corporation
5 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
6 * - Added _PDC for platforms with Intel CPUs
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/init.h>
12#include <linux/acpi.h>
13
14#include <acpi/processor.h>
15#include <asm/acpi.h>
16
17static void init_intel_pdc(struct acpi_processor *pr)
18{
19 struct acpi_object_list *obj_list;
20 union acpi_object *obj;
21 u32 *buf;
22
23 /* allocate and initialize pdc. It will be used later. */
24 obj_list = kmalloc(sizeof(struct acpi_object_list), GFP_KERNEL);
25 if (!obj_list) {
26 printk(KERN_ERR "Memory allocation error\n");
27 return;
28 }
29
30 obj = kmalloc(sizeof(union acpi_object), GFP_KERNEL);
31 if (!obj) {
32 printk(KERN_ERR "Memory allocation error\n");
33 kfree(obj_list);
34 return;
35 }
36
37 buf = kmalloc(12, GFP_KERNEL);
38 if (!buf) {
39 printk(KERN_ERR "Memory allocation error\n");
40 kfree(obj);
41 kfree(obj_list);
42 return;
43 }
44
45 buf[0] = ACPI_PDC_REVISION_ID;
46 buf[1] = 1;
47 buf[2] = ACPI_PDC_EST_CAPABILITY_SMP;
48 /*
49 * The default of PDC_SMP_T_SWCOORD bit is set for IA64 cpu so
50 * that OSPM is capable of native ACPI throttling software
51 * coordination using BIOS supplied _TSD info.
52 */
53 buf[2] |= ACPI_PDC_SMP_T_SWCOORD;
54
55 obj->type = ACPI_TYPE_BUFFER;
56 obj->buffer.length = 12;
57 obj->buffer.pointer = (u8 *) buf;
58 obj_list->count = 1;
59 obj_list->pointer = obj;
60 pr->pdc = obj_list;
61
62 return;
63}
64
65/* Initialize _PDC data based on the CPU vendor */
66void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
67{
68 pr->pdc = NULL;
69 init_intel_pdc(pr);
70 return;
71}
72
73EXPORT_SYMBOL(arch_acpi_processor_init_pdc);
74
75void arch_acpi_processor_cleanup_pdc(struct acpi_processor *pr)
76{
77 if (pr->pdc) {
78 kfree(pr->pdc->pointer->buffer.pointer);
79 kfree(pr->pdc->pointer);
80 kfree(pr->pdc);
81 pr->pdc = NULL;
82 }
83}
84
85EXPORT_SYMBOL(arch_acpi_processor_cleanup_pdc);
diff --git a/arch/ia64/kernel/acpi.c b/arch/ia64/kernel/acpi.c
index baec6f00f7f3..4d1a7e9314cf 100644
--- a/arch/ia64/kernel/acpi.c
+++ b/arch/ia64/kernel/acpi.c
@@ -44,6 +44,8 @@
44#include <linux/efi.h> 44#include <linux/efi.h>
45#include <linux/mmzone.h> 45#include <linux/mmzone.h>
46#include <linux/nodemask.h> 46#include <linux/nodemask.h>
47#include <linux/slab.h>
48#include <acpi/processor.h>
47#include <asm/io.h> 49#include <asm/io.h>
48#include <asm/iosapic.h> 50#include <asm/iosapic.h>
49#include <asm/machvec.h> 51#include <asm/machvec.h>
@@ -60,11 +62,6 @@
60 62
61#define PREFIX "ACPI: " 63#define PREFIX "ACPI: "
62 64
63void (*pm_idle) (void);
64EXPORT_SYMBOL(pm_idle);
65void (*pm_power_off) (void);
66EXPORT_SYMBOL(pm_power_off);
67
68u32 acpi_rsdt_forced; 65u32 acpi_rsdt_forced;
69unsigned int acpi_cpei_override; 66unsigned int acpi_cpei_override;
70unsigned int acpi_cpei_phys_cpuid; 67unsigned int acpi_cpei_phys_cpuid;
@@ -83,12 +80,10 @@ static unsigned long __init acpi_find_rsdp(void)
83 "v1.0/r0.71 tables no longer supported\n"); 80 "v1.0/r0.71 tables no longer supported\n");
84 return rsdp_phys; 81 return rsdp_phys;
85} 82}
86#endif
87 83
88const char __init * 84const char __init *
89acpi_get_sysname(void) 85acpi_get_sysname(void)
90{ 86{
91#ifdef CONFIG_IA64_GENERIC
92 unsigned long rsdp_phys; 87 unsigned long rsdp_phys;
93 struct acpi_table_rsdp *rsdp; 88 struct acpi_table_rsdp *rsdp;
94 struct acpi_table_xsdt *xsdt; 89 struct acpi_table_xsdt *xsdt;
@@ -143,30 +138,8 @@ acpi_get_sysname(void)
143#endif 138#endif
144 139
145 return "dig"; 140 return "dig";
146#else
147# if defined (CONFIG_IA64_HP_SIM)
148 return "hpsim";
149# elif defined (CONFIG_IA64_HP_ZX1)
150 return "hpzx1";
151# elif defined (CONFIG_IA64_HP_ZX1_SWIOTLB)
152 return "hpzx1_swiotlb";
153# elif defined (CONFIG_IA64_SGI_SN2)
154 return "sn2";
155# elif defined (CONFIG_IA64_SGI_UV)
156 return "uv";
157# elif defined (CONFIG_IA64_DIG)
158 return "dig";
159# elif defined (CONFIG_IA64_XEN_GUEST)
160 return "xen";
161# elif defined(CONFIG_IA64_DIG_VTD)
162 return "dig_vtd";
163# else
164# error Unknown platform. Fix acpi.c.
165# endif
166#endif
167} 141}
168 142#endif /* CONFIG_IA64_GENERIC */
169#ifdef CONFIG_ACPI
170 143
171#define ACPI_MAX_PLATFORM_INTERRUPTS 256 144#define ACPI_MAX_PLATFORM_INTERRUPTS 256
172 145
@@ -702,11 +675,23 @@ int __init early_acpi_boot_init(void)
702 printk(KERN_ERR PREFIX 675 printk(KERN_ERR PREFIX
703 "Error parsing MADT - no LAPIC entries\n"); 676 "Error parsing MADT - no LAPIC entries\n");
704 677
678#ifdef CONFIG_SMP
679 if (available_cpus == 0) {
680 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
681 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
682 smp_boot_data.cpu_phys_id[available_cpus] =
683 hard_smp_processor_id();
684 available_cpus = 1; /* We've got at least one of these, no? */
685 }
686 smp_boot_data.cpu_count = available_cpus;
687#endif
688 /* Make boot-up look pretty */
689 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
690 total_cpus);
691
705 return 0; 692 return 0;
706} 693}
707 694
708
709
710int __init acpi_boot_init(void) 695int __init acpi_boot_init(void)
711{ 696{
712 697
@@ -769,18 +754,8 @@ int __init acpi_boot_init(void)
769 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt)) 754 if (acpi_table_parse(ACPI_SIG_FADT, acpi_parse_fadt))
770 printk(KERN_ERR PREFIX "Can't find FADT\n"); 755 printk(KERN_ERR PREFIX "Can't find FADT\n");
771 756
757#ifdef CONFIG_ACPI_NUMA
772#ifdef CONFIG_SMP 758#ifdef CONFIG_SMP
773 if (available_cpus == 0) {
774 printk(KERN_INFO "ACPI: Found 0 CPUS; assuming 1\n");
775 printk(KERN_INFO "CPU 0 (0x%04x)", hard_smp_processor_id());
776 smp_boot_data.cpu_phys_id[available_cpus] =
777 hard_smp_processor_id();
778 available_cpus = 1; /* We've got at least one of these, no? */
779 }
780 smp_boot_data.cpu_count = available_cpus;
781
782 smp_build_cpu_map();
783# ifdef CONFIG_ACPI_NUMA
784 if (srat_num_cpus == 0) { 759 if (srat_num_cpus == 0) {
785 int cpu, i = 1; 760 int cpu, i = 1;
786 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++) 761 for (cpu = 0; cpu < smp_boot_data.cpu_count; cpu++)
@@ -789,14 +764,9 @@ int __init acpi_boot_init(void)
789 node_cpuid[i++].phys_id = 764 node_cpuid[i++].phys_id =
790 smp_boot_data.cpu_phys_id[cpu]; 765 smp_boot_data.cpu_phys_id[cpu];
791 } 766 }
792# endif
793#endif 767#endif
794#ifdef CONFIG_ACPI_NUMA
795 build_cpu_to_node_map(); 768 build_cpu_to_node_map();
796#endif 769#endif
797 /* Make boot-up look pretty */
798 printk(KERN_INFO "%d CPUs available, %d CPUs total\n", available_cpus,
799 total_cpus);
800 return 0; 770 return 0;
801} 771}
802 772
@@ -884,8 +854,8 @@ __init void prefill_possible_map(void)
884 854
885 possible = available_cpus + additional_cpus; 855 possible = available_cpus + additional_cpus;
886 856
887 if (possible > NR_CPUS) 857 if (possible > nr_cpu_ids)
888 possible = NR_CPUS; 858 possible = nr_cpu_ids;
889 859
890 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", 860 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
891 possible, max((possible - available_cpus), 0)); 861 possible, max((possible - available_cpus), 0));
@@ -939,6 +909,8 @@ int acpi_map_lsapic(acpi_handle handle, int *pcpu)
939 cpu_set(cpu, cpu_present_map); 909 cpu_set(cpu, cpu_present_map);
940 ia64_cpu_to_sapicid[cpu] = physid; 910 ia64_cpu_to_sapicid[cpu] = physid;
941 911
912 acpi_processor_set_pdc(handle);
913
942 *pcpu = cpu; 914 *pcpu = cpu;
943 return (0); 915 return (0);
944} 916}
@@ -1063,5 +1035,3 @@ void acpi_restore_state_mem(void) {}
1063 * do_suspend_lowlevel() 1035 * do_suspend_lowlevel()
1064 */ 1036 */
1065void do_suspend_lowlevel(void) {} 1037void do_suspend_lowlevel(void) {}
1066
1067#endif /* CONFIG_ACPI */
diff --git a/arch/ia64/kernel/audit.c b/arch/ia64/kernel/audit.c
index f3802ae89b10..96a9d18ff4c4 100644
--- a/arch/ia64/kernel/audit.c
+++ b/arch/ia64/kernel/audit.c
@@ -30,20 +30,11 @@ static unsigned signal_class[] = {
30 30
31int audit_classify_arch(int arch) 31int audit_classify_arch(int arch)
32{ 32{
33#ifdef CONFIG_IA32_SUPPORT
34 if (arch == AUDIT_ARCH_I386)
35 return 1;
36#endif
37 return 0; 33 return 0;
38} 34}
39 35
40int audit_classify_syscall(int abi, unsigned syscall) 36int audit_classify_syscall(int abi, unsigned syscall)
41{ 37{
42#ifdef CONFIG_IA32_SUPPORT
43 extern int ia32_classify_syscall(unsigned);
44 if (abi == AUDIT_ARCH_I386)
45 return ia32_classify_syscall(syscall);
46#endif
47 switch(syscall) { 38 switch(syscall) {
48 case __NR_open: 39 case __NR_open:
49 return 2; 40 return 2;
@@ -58,18 +49,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
58 49
59static int __init audit_classes_init(void) 50static int __init audit_classes_init(void)
60{ 51{
61#ifdef CONFIG_IA32_SUPPORT
62 extern __u32 ia32_dir_class[];
63 extern __u32 ia32_write_class[];
64 extern __u32 ia32_read_class[];
65 extern __u32 ia32_chattr_class[];
66 extern __u32 ia32_signal_class[];
67 audit_register_class(AUDIT_CLASS_WRITE_32, ia32_write_class);
68 audit_register_class(AUDIT_CLASS_READ_32, ia32_read_class);
69 audit_register_class(AUDIT_CLASS_DIR_WRITE_32, ia32_dir_class);
70 audit_register_class(AUDIT_CLASS_CHATTR_32, ia32_chattr_class);
71 audit_register_class(AUDIT_CLASS_SIGNAL_32, ia32_signal_class);
72#endif
73 audit_register_class(AUDIT_CLASS_WRITE, write_class); 52 audit_register_class(AUDIT_CLASS_WRITE, write_class);
74 audit_register_class(AUDIT_CLASS_READ, read_class); 53 audit_register_class(AUDIT_CLASS_READ, read_class);
75 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); 54 audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class);
diff --git a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
index 7b435451b3dc..b0b4e6e710f2 100644
--- a/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
+++ b/arch/ia64/kernel/cpufreq/acpi-cpufreq.c
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/slab.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/init.h> 15#include <linux/init.h>
15#include <linux/cpufreq.h> 16#include <linux/cpufreq.h>
diff --git a/arch/ia64/kernel/crash.c b/arch/ia64/kernel/crash.c
index 6631a9dfafdc..b942f4032d7a 100644
--- a/arch/ia64/kernel/crash.c
+++ b/arch/ia64/kernel/crash.c
@@ -239,32 +239,29 @@ kdump_init_notifier(struct notifier_block *self, unsigned long val, void *data)
239#ifdef CONFIG_SYSCTL 239#ifdef CONFIG_SYSCTL
240static ctl_table kdump_ctl_table[] = { 240static ctl_table kdump_ctl_table[] = {
241 { 241 {
242 .ctl_name = CTL_UNNUMBERED,
243 .procname = "kdump_on_init", 242 .procname = "kdump_on_init",
244 .data = &kdump_on_init, 243 .data = &kdump_on_init,
245 .maxlen = sizeof(int), 244 .maxlen = sizeof(int),
246 .mode = 0644, 245 .mode = 0644,
247 .proc_handler = &proc_dointvec, 246 .proc_handler = proc_dointvec,
248 }, 247 },
249 { 248 {
250 .ctl_name = CTL_UNNUMBERED,
251 .procname = "kdump_on_fatal_mca", 249 .procname = "kdump_on_fatal_mca",
252 .data = &kdump_on_fatal_mca, 250 .data = &kdump_on_fatal_mca,
253 .maxlen = sizeof(int), 251 .maxlen = sizeof(int),
254 .mode = 0644, 252 .mode = 0644,
255 .proc_handler = &proc_dointvec, 253 .proc_handler = proc_dointvec,
256 }, 254 },
257 { .ctl_name = 0 } 255 { }
258}; 256};
259 257
260static ctl_table sys_table[] = { 258static ctl_table sys_table[] = {
261 { 259 {
262 .ctl_name = CTL_KERN,
263 .procname = "kernel", 260 .procname = "kernel",
264 .mode = 0555, 261 .mode = 0555,
265 .child = kdump_ctl_table, 262 .child = kdump_ctl_table,
266 }, 263 },
267 { .ctl_name = 0 } 264 { }
268}; 265};
269#endif 266#endif
270 267
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index c745d0aeb6e0..a0f001928502 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -26,6 +26,7 @@
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/slab.h>
29#include <linux/time.h> 30#include <linux/time.h>
30#include <linux/efi.h> 31#include <linux/efi.h>
31#include <linux/kexec.h> 32#include <linux/kexec.h>
diff --git a/arch/ia64/kernel/elfcore.c b/arch/ia64/kernel/elfcore.c
new file mode 100644
index 000000000000..bac1639bc320
--- /dev/null
+++ b/arch/ia64/kernel/elfcore.c
@@ -0,0 +1,80 @@
1#include <linux/elf.h>
2#include <linux/coredump.h>
3#include <linux/fs.h>
4#include <linux/mm.h>
5
6#include <asm/elf.h>
7
8
9Elf64_Half elf_core_extra_phdrs(void)
10{
11 return GATE_EHDR->e_phnum;
12}
13
14int elf_core_write_extra_phdrs(struct file *file, loff_t offset, size_t *size,
15 unsigned long limit)
16{
17 const struct elf_phdr *const gate_phdrs =
18 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
19 int i;
20 Elf64_Off ofs = 0;
21
22 for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
23 struct elf_phdr phdr = gate_phdrs[i];
24
25 if (phdr.p_type == PT_LOAD) {
26 phdr.p_memsz = PAGE_ALIGN(phdr.p_memsz);
27 phdr.p_filesz = phdr.p_memsz;
28 if (ofs == 0) {
29 ofs = phdr.p_offset = offset;
30 offset += phdr.p_filesz;
31 } else {
32 phdr.p_offset = ofs;
33 }
34 } else {
35 phdr.p_offset += ofs;
36 }
37 phdr.p_paddr = 0; /* match other core phdrs */
38 *size += sizeof(phdr);
39 if (*size > limit || !dump_write(file, &phdr, sizeof(phdr)))
40 return 0;
41 }
42 return 1;
43}
44
45int elf_core_write_extra_data(struct file *file, size_t *size,
46 unsigned long limit)
47{
48 const struct elf_phdr *const gate_phdrs =
49 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
50 int i;
51
52 for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
53 if (gate_phdrs[i].p_type == PT_LOAD) {
54 void *addr = (void *)gate_phdrs[i].p_vaddr;
55 size_t memsz = PAGE_ALIGN(gate_phdrs[i].p_memsz);
56
57 *size += memsz;
58 if (*size > limit || !dump_write(file, addr, memsz))
59 return 0;
60 break;
61 }
62 }
63 return 1;
64}
65
66size_t elf_core_extra_data_size(void)
67{
68 const struct elf_phdr *const gate_phdrs =
69 (const struct elf_phdr *) (GATE_ADDR + GATE_EHDR->e_phoff);
70 int i;
71 size_t size = 0;
72
73 for (i = 0; i < GATE_EHDR->e_phnum; ++i) {
74 if (gate_phdrs[i].p_type == PT_LOAD) {
75 size += PAGE_ALIGN(gate_phdrs[i].p_memsz);
76 break;
77 }
78 }
79 return size;
80}
diff --git a/arch/ia64/kernel/entry.S b/arch/ia64/kernel/entry.S
index d0e7d37017b4..9a260b317d8d 100644
--- a/arch/ia64/kernel/entry.S
+++ b/arch/ia64/kernel/entry.S
@@ -71,15 +71,6 @@ ENTRY(ia64_execve)
71 add out3=16,sp // regs 71 add out3=16,sp // regs
72 br.call.sptk.many rp=sys_execve 72 br.call.sptk.many rp=sys_execve
73.ret0: 73.ret0:
74#ifdef CONFIG_IA32_SUPPORT
75 /*
76 * Check if we're returning to ia32 mode. If so, we need to restore ia32 registers
77 * from pt_regs.
78 */
79 adds r16=PT(CR_IPSR)+16,sp
80 ;;
81 ld8 r16=[r16]
82#endif
83 cmp4.ge p6,p7=r8,r0 74 cmp4.ge p6,p7=r8,r0
84 mov ar.pfs=loc1 // restore ar.pfs 75 mov ar.pfs=loc1 // restore ar.pfs
85 sxt4 r8=r8 // return 64-bit result 76 sxt4 r8=r8 // return 64-bit result
@@ -108,12 +99,6 @@ ENTRY(ia64_execve)
108 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0 99 ldf.fill f23=[sp]; ldf.fill f24=[sp]; mov f25=f0
109 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0 100 ldf.fill f26=[sp]; ldf.fill f27=[sp]; mov f28=f0
110 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0 101 ldf.fill f29=[sp]; ldf.fill f30=[sp]; mov f31=f0
111#ifdef CONFIG_IA32_SUPPORT
112 tbit.nz p6,p0=r16, IA64_PSR_IS_BIT
113 movl loc0=ia64_ret_from_ia32_execve
114 ;;
115(p6) mov rp=loc0
116#endif
117 br.ret.sptk.many rp 102 br.ret.sptk.many rp
118END(ia64_execve) 103END(ia64_execve)
119 104
@@ -848,30 +833,6 @@ __paravirt_work_processed_syscall:
848 br.cond.sptk.many rbs_switch // B 833 br.cond.sptk.many rbs_switch // B
849END(__paravirt_leave_syscall) 834END(__paravirt_leave_syscall)
850 835
851#ifdef __IA64_ASM_PARAVIRTUALIZED_NATIVE
852#ifdef CONFIG_IA32_SUPPORT
853GLOBAL_ENTRY(ia64_ret_from_ia32_execve)
854 PT_REGS_UNWIND_INFO(0)
855 adds r2=PT(R8)+16,sp // r2 = &pt_regs.r8
856 adds r3=PT(R10)+16,sp // r3 = &pt_regs.r10
857 ;;
858 .mem.offset 0,0
859 st8.spill [r2]=r8 // store return value in slot for r8 and set unat bit
860 .mem.offset 8,0
861 st8.spill [r3]=r0 // clear error indication in slot for r10 and set unat bit
862#ifdef CONFIG_PARAVIRT
863 ;;
864 // don't fall through, ia64_leave_kernel may be #define'd
865 br.cond.sptk.few ia64_leave_kernel
866 ;;
867#endif /* CONFIG_PARAVIRT */
868END(ia64_ret_from_ia32_execve)
869#ifndef CONFIG_PARAVIRT
870 // fall through
871#endif
872#endif /* CONFIG_IA32_SUPPORT */
873#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
874
875GLOBAL_ENTRY(__paravirt_leave_kernel) 836GLOBAL_ENTRY(__paravirt_leave_kernel)
876 PT_REGS_UNWIND_INFO(0) 837 PT_REGS_UNWIND_INFO(0)
877 /* 838 /*
@@ -1806,6 +1767,7 @@ sys_call_table:
1806 data8 sys_preadv 1767 data8 sys_preadv
1807 data8 sys_pwritev // 1320 1768 data8 sys_pwritev // 1320
1808 data8 sys_rt_tgsigqueueinfo 1769 data8 sys_rt_tgsigqueueinfo
1770 data8 sys_recvmmsg
1809 1771
1810 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls 1772 .org sys_call_table + 8*NR_syscalls // guard against failures to increase NR_syscalls
1811#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */ 1773#endif /* __IA64_ASM_PARAVIRTUALIZED_NATIVE */
diff --git a/arch/ia64/kernel/esi.c b/arch/ia64/kernel/esi.c
index d5764a3d74af..b091111270cb 100644
--- a/arch/ia64/kernel/esi.c
+++ b/arch/ia64/kernel/esi.c
@@ -84,7 +84,7 @@ static int __init esi_init (void)
84 case ESI_DESC_ENTRY_POINT: 84 case ESI_DESC_ENTRY_POINT:
85 break; 85 break;
86 default: 86 default:
87 printk(KERN_WARNING "Unkown table type %d found in " 87 printk(KERN_WARNING "Unknown table type %d found in "
88 "ESI table, ignoring rest of table\n", *p); 88 "ESI table, ignoring rest of table\n", *p);
89 return -ENODEV; 89 return -ENODEV;
90 } 90 }
diff --git a/arch/ia64/kernel/head.S b/arch/ia64/kernel/head.S
index 696eff28a0c4..17a9fba38930 100644
--- a/arch/ia64/kernel/head.S
+++ b/arch/ia64/kernel/head.S
@@ -1051,7 +1051,7 @@ END(ia64_delay_loop)
1051 * intermediate precision so that we can produce a full 64-bit result. 1051 * intermediate precision so that we can produce a full 64-bit result.
1052 */ 1052 */
1053GLOBAL_ENTRY(ia64_native_sched_clock) 1053GLOBAL_ENTRY(ia64_native_sched_clock)
1054 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1054 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc) 1055 mov.m r9=ar.itc // fetch cycle-counter (35 cyc)
1056 ;; 1056 ;;
1057 ldf8 f8=[r8] 1057 ldf8 f8=[r8]
@@ -1077,7 +1077,7 @@ sched_clock = ia64_native_sched_clock
1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING 1077#ifdef CONFIG_VIRT_CPU_ACCOUNTING
1078GLOBAL_ENTRY(cycle_to_cputime) 1078GLOBAL_ENTRY(cycle_to_cputime)
1079 alloc r16=ar.pfs,1,0,0,0 1079 alloc r16=ar.pfs,1,0,0,0
1080 addl r8=THIS_CPU(cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0 1080 addl r8=THIS_CPU(ia64_cpu_info) + IA64_CPUINFO_NSEC_PER_CYC_OFFSET,r0
1081 ;; 1081 ;;
1082 ldf8 f8=[r8] 1082 ldf8 f8=[r8]
1083 ;; 1083 ;;
diff --git a/arch/ia64/kernel/ia64_ksyms.c b/arch/ia64/kernel/ia64_ksyms.c
index 14d39e300627..7f4a0ed24152 100644
--- a/arch/ia64/kernel/ia64_ksyms.c
+++ b/arch/ia64/kernel/ia64_ksyms.c
@@ -30,9 +30,9 @@ EXPORT_SYMBOL(max_low_pfn); /* defined by bootmem.c, but not exported by generic
30#endif 30#endif
31 31
32#include <asm/processor.h> 32#include <asm/processor.h>
33EXPORT_SYMBOL(per_cpu__cpu_info); 33EXPORT_SYMBOL(ia64_cpu_info);
34#ifdef CONFIG_SMP 34#ifdef CONFIG_SMP
35EXPORT_SYMBOL(per_cpu__local_per_cpu_offset); 35EXPORT_SYMBOL(local_per_cpu_offset);
36#endif 36#endif
37 37
38#include <asm/uaccess.h> 38#include <asm/uaccess.h>
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index dab4d393908c..7ded76658d2d 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -86,6 +86,7 @@
86#include <linux/kernel.h> 86#include <linux/kernel.h>
87#include <linux/list.h> 87#include <linux/list.h>
88#include <linux/pci.h> 88#include <linux/pci.h>
89#include <linux/slab.h>
89#include <linux/smp.h> 90#include <linux/smp.h>
90#include <linux/string.h> 91#include <linux/string.h>
91#include <linux/bootmem.h> 92#include <linux/bootmem.h>
@@ -793,12 +794,12 @@ iosapic_register_intr (unsigned int gsi,
793 goto unlock_iosapic_lock; 794 goto unlock_iosapic_lock;
794 } 795 }
795 796
796 spin_lock(&irq_desc[irq].lock); 797 raw_spin_lock(&irq_desc[irq].lock);
797 dest = get_target_cpu(gsi, irq); 798 dest = get_target_cpu(gsi, irq);
798 dmode = choose_dmode(); 799 dmode = choose_dmode();
799 err = register_intr(gsi, irq, dmode, polarity, trigger); 800 err = register_intr(gsi, irq, dmode, polarity, trigger);
800 if (err < 0) { 801 if (err < 0) {
801 spin_unlock(&irq_desc[irq].lock); 802 raw_spin_unlock(&irq_desc[irq].lock);
802 irq = err; 803 irq = err;
803 goto unlock_iosapic_lock; 804 goto unlock_iosapic_lock;
804 } 805 }
@@ -817,7 +818,7 @@ iosapic_register_intr (unsigned int gsi,
817 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"), 818 (polarity == IOSAPIC_POL_HIGH ? "high" : "low"),
818 cpu_logical_id(dest), dest, irq_to_vector(irq)); 819 cpu_logical_id(dest), dest, irq_to_vector(irq));
819 820
820 spin_unlock(&irq_desc[irq].lock); 821 raw_spin_unlock(&irq_desc[irq].lock);
821 unlock_iosapic_lock: 822 unlock_iosapic_lock:
822 spin_unlock_irqrestore(&iosapic_lock, flags); 823 spin_unlock_irqrestore(&iosapic_lock, flags);
823 return irq; 824 return irq;
diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c
index 7d8951229e7c..94ee9d067cbd 100644
--- a/arch/ia64/kernel/irq.c
+++ b/arch/ia64/kernel/irq.c
@@ -71,7 +71,7 @@ int show_interrupts(struct seq_file *p, void *v)
71 } 71 }
72 72
73 if (i < NR_IRQS) { 73 if (i < NR_IRQS) {
74 spin_lock_irqsave(&irq_desc[i].lock, flags); 74 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
75 action = irq_desc[i].action; 75 action = irq_desc[i].action;
76 if (!action) 76 if (!action)
77 goto skip; 77 goto skip;
@@ -91,7 +91,7 @@ int show_interrupts(struct seq_file *p, void *v)
91 91
92 seq_putc(p, '\n'); 92 seq_putc(p, '\n');
93skip: 93skip:
94 spin_unlock_irqrestore(&irq_desc[i].lock, flags); 94 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
95 } else if (i == NR_IRQS) 95 } else if (i == NR_IRQS)
96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count)); 96 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
97 return 0; 97 return 0;
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index dd9d7b54f1a1..640479304ac0 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -22,7 +22,6 @@
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/ioport.h> 23#include <linux/ioport.h>
24#include <linux/kernel_stat.h> 24#include <linux/kernel_stat.h>
25#include <linux/slab.h>
26#include <linux/ptrace.h> 25#include <linux/ptrace.h>
27#include <linux/random.h> /* for rand_initialize_irq() */ 26#include <linux/random.h> /* for rand_initialize_irq() */
28#include <linux/signal.h> 27#include <linux/signal.h>
@@ -260,7 +259,6 @@ void __setup_vector_irq(int cpu)
260} 259}
261 260
262#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)) 261#if defined(CONFIG_SMP) && (defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG))
263#define IA64_IRQ_MOVE_VECTOR IA64_DEF_FIRST_DEVICE_VECTOR
264 262
265static enum vector_domain_type { 263static enum vector_domain_type {
266 VECTOR_DOMAIN_NONE, 264 VECTOR_DOMAIN_NONE,
@@ -345,7 +343,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
345 343
346 desc = irq_desc + irq; 344 desc = irq_desc + irq;
347 cfg = irq_cfg + irq; 345 cfg = irq_cfg + irq;
348 spin_lock(&desc->lock); 346 raw_spin_lock(&desc->lock);
349 if (!cfg->move_cleanup_count) 347 if (!cfg->move_cleanup_count)
350 goto unlock; 348 goto unlock;
351 349
@@ -358,7 +356,7 @@ static irqreturn_t smp_irq_move_cleanup_interrupt(int irq, void *dev_id)
358 spin_unlock_irqrestore(&vector_lock, flags); 356 spin_unlock_irqrestore(&vector_lock, flags);
359 cfg->move_cleanup_count--; 357 cfg->move_cleanup_count--;
360 unlock: 358 unlock:
361 spin_unlock(&desc->lock); 359 raw_spin_unlock(&desc->lock);
362 } 360 }
363 return IRQ_HANDLED; 361 return IRQ_HANDLED;
364} 362}
@@ -659,11 +657,8 @@ init_IRQ (void)
659 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL); 657 register_percpu_irq(IA64_SPURIOUS_INT_VECTOR, NULL);
660#ifdef CONFIG_SMP 658#ifdef CONFIG_SMP
661#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG) 659#if defined(CONFIG_IA64_GENERIC) || defined(CONFIG_IA64_DIG)
662 if (vector_domain_type != VECTOR_DOMAIN_NONE) { 660 if (vector_domain_type != VECTOR_DOMAIN_NONE)
663 BUG_ON(IA64_FIRST_DEVICE_VECTOR != IA64_IRQ_MOVE_VECTOR);
664 IA64_FIRST_DEVICE_VECTOR++;
665 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction); 661 register_percpu_irq(IA64_IRQ_MOVE_VECTOR, &irq_move_irqaction);
666 }
667#endif 662#endif
668#endif 663#endif
669#ifdef CONFIG_PERFMON 664#ifdef CONFIG_PERFMON
diff --git a/arch/ia64/kernel/ivt.S b/arch/ia64/kernel/ivt.S
index ec9a5fdfa1b9..179fd122e837 100644
--- a/arch/ia64/kernel/ivt.S
+++ b/arch/ia64/kernel/ivt.S
@@ -49,7 +49,6 @@
49 49
50#include <asm/asmmacro.h> 50#include <asm/asmmacro.h>
51#include <asm/break.h> 51#include <asm/break.h>
52#include <asm/ia32.h>
53#include <asm/kregs.h> 52#include <asm/kregs.h>
54#include <asm/asm-offsets.h> 53#include <asm/asm-offsets.h>
55#include <asm/pgtable.h> 54#include <asm/pgtable.h>
@@ -1386,28 +1385,6 @@ END(ia32_exception)
1386// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71) 1385// 0x6a00 Entry 46 (size 16 bundles) IA-32 Intercept (30,31,59,70,71)
1387ENTRY(ia32_intercept) 1386ENTRY(ia32_intercept)
1388 DBG_FAULT(46) 1387 DBG_FAULT(46)
1389#ifdef CONFIG_IA32_SUPPORT
1390 mov r31=pr
1391 MOV_FROM_ISR(r16)
1392 ;;
1393 extr.u r17=r16,16,8 // get ISR.code
1394 mov r18=ar.eflag
1395 MOV_FROM_IIM(r19) // old eflag value
1396 ;;
1397 cmp.ne p6,p0=2,r17
1398(p6) br.cond.spnt 1f // not a system flag fault
1399 xor r16=r18,r19
1400 ;;
1401 extr.u r17=r16,18,1 // get the eflags.ac bit
1402 ;;
1403 cmp.eq p6,p0=0,r17
1404(p6) br.cond.spnt 1f // eflags.ac bit didn't change
1405 ;;
1406 mov pr=r31,-1 // restore predicate registers
1407 RFI
1408
14091:
1410#endif // CONFIG_IA32_SUPPORT
1411 FAULT(46) 1388 FAULT(46)
1412END(ia32_intercept) 1389END(ia32_intercept)
1413 1390
@@ -1416,12 +1393,7 @@ END(ia32_intercept)
1416// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74) 1393// 0x6b00 Entry 47 (size 16 bundles) IA-32 Interrupt (74)
1417ENTRY(ia32_interrupt) 1394ENTRY(ia32_interrupt)
1418 DBG_FAULT(47) 1395 DBG_FAULT(47)
1419#ifdef CONFIG_IA32_SUPPORT
1420 mov r31=pr
1421 br.sptk.many dispatch_to_ia32_handler
1422#else
1423 FAULT(47) 1396 FAULT(47)
1424#endif
1425END(ia32_interrupt) 1397END(ia32_interrupt)
1426 1398
1427 .org ia64_ivt+0x6c00 1399 .org ia64_ivt+0x6c00
@@ -1715,89 +1687,3 @@ ENTRY(dispatch_illegal_op_fault)
1715(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel 1687(p6) br.call.dpnt.many b6=b6 // call returns to ia64_leave_kernel
1716 br.sptk.many ia64_leave_kernel 1688 br.sptk.many ia64_leave_kernel
1717END(dispatch_illegal_op_fault) 1689END(dispatch_illegal_op_fault)
1718
1719#ifdef CONFIG_IA32_SUPPORT
1720
1721 /*
1722 * There is no particular reason for this code to be here, other than that
1723 * there happens to be space here that would go unused otherwise. If this
1724 * fault ever gets "unreserved", simply moved the following code to a more
1725 * suitable spot...
1726 */
1727
1728 // IA32 interrupt entry point
1729
1730ENTRY(dispatch_to_ia32_handler)
1731 SAVE_MIN
1732 ;;
1733 MOV_FROM_ISR(r14)
1734 SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(r3, r24)
1735 // guarantee that interruption collection is on
1736 ;;
1737 SSM_PSR_I(p15, p15, r3)
1738 adds r3=8,r2 // Base pointer for SAVE_REST
1739 ;;
1740 SAVE_REST
1741 ;;
1742 mov r15=0x80
1743 shr r14=r14,16 // Get interrupt number
1744 ;;
1745 cmp.ne p6,p0=r14,r15
1746(p6) br.call.dpnt.many b6=non_ia32_syscall
1747
1748 adds r14=IA64_PT_REGS_R8_OFFSET + 16,sp // 16 byte hole per SW conventions
1749 adds r15=IA64_PT_REGS_R1_OFFSET + 16,sp
1750 ;;
1751 cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
1752 ld8 r8=[r14] // get r8
1753 ;;
1754 st8 [r15]=r8 // save original EAX in r1 (IA32 procs don't use the GP)
1755 ;;
1756 alloc r15=ar.pfs,0,0,6,0 // must first in an insn group
1757 ;;
1758 ld4 r8=[r14],8 // r8 == eax (syscall number)
1759 mov r15=IA32_NR_syscalls
1760 ;;
1761 cmp.ltu.unc p6,p7=r8,r15
1762 ld4 out1=[r14],8 // r9 == ecx
1763 ;;
1764 ld4 out2=[r14],8 // r10 == edx
1765 ;;
1766 ld4 out0=[r14] // r11 == ebx
1767 adds r14=(IA64_PT_REGS_R13_OFFSET) + 16,sp
1768 ;;
1769 ld4 out5=[r14],PT(R14)-PT(R13) // r13 == ebp
1770 ;;
1771 ld4 out3=[r14],PT(R15)-PT(R14) // r14 == esi
1772 adds r2=TI_FLAGS+IA64_TASK_SIZE,r13
1773 ;;
1774 ld4 out4=[r14] // r15 == edi
1775 movl r16=ia32_syscall_table
1776 ;;
1777(p6) shladd r16=r8,3,r16 // force ni_syscall if not valid syscall number
1778 ld4 r2=[r2] // r2 = current_thread_info()->flags
1779 ;;
1780 ld8 r16=[r16]
1781 and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
1782 ;;
1783 mov b6=r16
1784 movl r15=ia32_ret_from_syscall
1785 cmp.eq p8,p0=r2,r0
1786 ;;
1787 mov rp=r15
1788(p8) br.call.sptk.many b6=b6
1789 br.cond.sptk ia32_trace_syscall
1790
1791non_ia32_syscall:
1792 alloc r15=ar.pfs,0,0,2,0
1793 mov out0=r14 // interrupt #
1794 add out1=16,sp // pointer to pt_regs
1795 ;; // avoid WAW on CFM
1796 br.call.sptk.many rp=ia32_bad_interrupt
1797.ret1: movl r15=ia64_leave_kernel
1798 ;;
1799 mov rp=r15
1800 br.ret.sptk.many rp
1801END(dispatch_to_ia32_handler)
1802
1803#endif /* CONFIG_IA32_SUPPORT */
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 9adac441ac9b..7026b29e277a 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -870,7 +870,7 @@ static int __kprobes pre_kprobes_handler(struct die_args *args)
870 return 1; 870 return 1;
871 871
872ss_probe: 872ss_probe:
873#if !defined(CONFIG_PREEMPT) || defined(CONFIG_FREEZER) 873#if !defined(CONFIG_PREEMPT)
874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) { 874 if (p->ainsn.inst_flag == INST_FLAG_BOOSTABLE && !p->post_handler) {
875 /* Boost up -- we can execute copied instructions directly */ 875 /* Boost up -- we can execute copied instructions directly */
876 ia64_psr(regs)->ri = p->ainsn.slot; 876 ia64_psr(regs)->ri = p->ainsn.slot;
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 496ac7a99488..a0220dc5ff42 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -85,6 +85,7 @@
85#include <linux/cpumask.h> 85#include <linux/cpumask.h>
86#include <linux/kdebug.h> 86#include <linux/kdebug.h>
87#include <linux/cpu.h> 87#include <linux/cpu.h>
88#include <linux/gfp.h>
88 89
89#include <asm/delay.h> 90#include <asm/delay.h>
90#include <asm/machvec.h> 91#include <asm/machvec.h>
@@ -888,9 +889,10 @@ ia64_mca_modify_comm(const struct task_struct *previous_current)
888} 889}
889 890
890static void 891static void
891finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms, 892finish_pt_regs(struct pt_regs *regs, struct ia64_sal_os_state *sos,
892 unsigned long *nat) 893 unsigned long *nat)
893{ 894{
895 const pal_min_state_area_t *ms = sos->pal_min_state;
894 const u64 *bank; 896 const u64 *bank;
895 897
896 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use 898 /* If ipsr.ic then use pmsa_{iip,ipsr,ifs}, else use
@@ -904,6 +906,10 @@ finish_pt_regs(struct pt_regs *regs, const pal_min_state_area_t *ms,
904 regs->cr_iip = ms->pmsa_xip; 906 regs->cr_iip = ms->pmsa_xip;
905 regs->cr_ipsr = ms->pmsa_xpsr; 907 regs->cr_ipsr = ms->pmsa_xpsr;
906 regs->cr_ifs = ms->pmsa_xfs; 908 regs->cr_ifs = ms->pmsa_xfs;
909
910 sos->iip = ms->pmsa_iip;
911 sos->ipsr = ms->pmsa_ipsr;
912 sos->ifs = ms->pmsa_ifs;
907 } 913 }
908 regs->pr = ms->pmsa_pr; 914 regs->pr = ms->pmsa_pr;
909 regs->b0 = ms->pmsa_br0; 915 regs->b0 = ms->pmsa_br0;
@@ -1079,7 +1085,7 @@ ia64_mca_modify_original_stack(struct pt_regs *regs,
1079 memcpy(old_regs, regs, sizeof(*regs)); 1085 memcpy(old_regs, regs, sizeof(*regs));
1080 old_regs->loadrs = loadrs; 1086 old_regs->loadrs = loadrs;
1081 old_unat = old_regs->ar_unat; 1087 old_unat = old_regs->ar_unat;
1082 finish_pt_regs(old_regs, ms, &old_unat); 1088 finish_pt_regs(old_regs, sos, &old_unat);
1083 1089
1084 /* Next stack a struct switch_stack. mca_asm.S built a partial 1090 /* Next stack a struct switch_stack. mca_asm.S built a partial
1085 * switch_stack, copy it and fill in the blanks using pt_regs and 1091 * switch_stack, copy it and fill in the blanks using pt_regs and
@@ -1150,7 +1156,7 @@ no_mod:
1150 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n", 1156 mprintk(KERN_INFO "cpu %d, %s %s, original stack not modified\n",
1151 smp_processor_id(), type, msg); 1157 smp_processor_id(), type, msg);
1152 old_unat = regs->ar_unat; 1158 old_unat = regs->ar_unat;
1153 finish_pt_regs(regs, ms, &old_unat); 1159 finish_pt_regs(regs, sos, &old_unat);
1154 return previous_current; 1160 return previous_current;
1155} 1161}
1156 1162
@@ -1220,9 +1226,12 @@ static void mca_insert_tr(u64 iord)
1220 unsigned long psr; 1226 unsigned long psr;
1221 int cpu = smp_processor_id(); 1227 int cpu = smp_processor_id();
1222 1228
1229 if (!ia64_idtrs[cpu])
1230 return;
1231
1223 psr = ia64_clear_ic(); 1232 psr = ia64_clear_ic();
1224 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) { 1233 for (i = IA64_TR_ALLOC_BASE; i < IA64_TR_ALLOC_MAX; i++) {
1225 p = &__per_cpu_idtrs[cpu][iord-1][i]; 1234 p = ia64_idtrs[cpu] + (iord - 1) * IA64_TR_ALLOC_MAX;
1226 if (p->pte & 0x1) { 1235 if (p->pte & 0x1) {
1227 old_rr = ia64_get_rr(p->ifa); 1236 old_rr = ia64_get_rr(p->ifa);
1228 if (old_rr != p->rr) { 1237 if (old_rr != p->rr) {
diff --git a/arch/ia64/kernel/mca_asm.S b/arch/ia64/kernel/mca_asm.S
index 7461d2573d41..d5bdf9de36b6 100644
--- a/arch/ia64/kernel/mca_asm.S
+++ b/arch/ia64/kernel/mca_asm.S
@@ -59,7 +59,7 @@
59ia64_do_tlb_purge: 59ia64_do_tlb_purge:
60#define O(member) IA64_CPUINFO_##member##_OFFSET 60#define O(member) IA64_CPUINFO_##member##_OFFSET
61 61
62 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 62 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
63 ;; 63 ;;
64 addl r17=O(PTCE_STRIDE),r2 64 addl r17=O(PTCE_STRIDE),r2
65 addl r2=O(PTCE_BASE),r2 65 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index f94aaa86933f..09b4d6828c45 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -22,6 +22,7 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/workqueue.h> 23#include <linux/workqueue.h>
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/slab.h>
25 26
26#include <asm/delay.h> 27#include <asm/delay.h>
27#include <asm/machvec.h> 28#include <asm/machvec.h>
diff --git a/arch/ia64/kernel/pci-swiotlb.c b/arch/ia64/kernel/pci-swiotlb.c
index 285aae8431c6..3095654f9ab3 100644
--- a/arch/ia64/kernel/pci-swiotlb.c
+++ b/arch/ia64/kernel/pci-swiotlb.c
@@ -1,6 +1,7 @@
1/* Glue code to lib/swiotlb.c */ 1/* Glue code to lib/swiotlb.c */
2 2
3#include <linux/pci.h> 3#include <linux/pci.h>
4#include <linux/gfp.h>
4#include <linux/cache.h> 5#include <linux/cache.h>
5#include <linux/module.h> 6#include <linux/module.h>
6#include <linux/dma-mapping.h> 7#include <linux/dma-mapping.h>
@@ -41,7 +42,7 @@ struct dma_map_ops swiotlb_dma_ops = {
41void __init swiotlb_dma_init(void) 42void __init swiotlb_dma_init(void)
42{ 43{
43 dma_ops = &swiotlb_dma_ops; 44 dma_ops = &swiotlb_dma_ops;
44 swiotlb_init(); 45 swiotlb_init(1);
45} 46}
46 47
47void __init pci_swiotlb_init(void) 48void __init pci_swiotlb_init(void)
@@ -51,7 +52,7 @@ void __init pci_swiotlb_init(void)
51 swiotlb = 1; 52 swiotlb = 1;
52 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n"); 53 printk(KERN_INFO "PCI-DMA: Re-initialize machine vector.\n");
53 machvec_init("dig"); 54 machvec_init("dig");
54 swiotlb_init(); 55 swiotlb_init(1);
55 dma_ops = &swiotlb_dma_ops; 56 dma_ops = &swiotlb_dma_ops;
56#else 57#else
57 panic("Unable to find Intel IOMMU"); 58 panic("Unable to find Intel IOMMU");
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f1782705b1f7..ab985f785c14 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -41,6 +41,7 @@
41#include <linux/rcupdate.h> 41#include <linux/rcupdate.h>
42#include <linux/completion.h> 42#include <linux/completion.h>
43#include <linux/tracehook.h> 43#include <linux/tracehook.h>
44#include <linux/slab.h>
44 45
45#include <asm/errno.h> 46#include <asm/errno.h>
46#include <asm/intrinsics.h> 47#include <asm/intrinsics.h>
@@ -522,42 +523,37 @@ EXPORT_SYMBOL(pfm_sysctl);
522 523
523static ctl_table pfm_ctl_table[]={ 524static ctl_table pfm_ctl_table[]={
524 { 525 {
525 .ctl_name = CTL_UNNUMBERED,
526 .procname = "debug", 526 .procname = "debug",
527 .data = &pfm_sysctl.debug, 527 .data = &pfm_sysctl.debug,
528 .maxlen = sizeof(int), 528 .maxlen = sizeof(int),
529 .mode = 0666, 529 .mode = 0666,
530 .proc_handler = &proc_dointvec, 530 .proc_handler = proc_dointvec,
531 }, 531 },
532 { 532 {
533 .ctl_name = CTL_UNNUMBERED,
534 .procname = "debug_ovfl", 533 .procname = "debug_ovfl",
535 .data = &pfm_sysctl.debug_ovfl, 534 .data = &pfm_sysctl.debug_ovfl,
536 .maxlen = sizeof(int), 535 .maxlen = sizeof(int),
537 .mode = 0666, 536 .mode = 0666,
538 .proc_handler = &proc_dointvec, 537 .proc_handler = proc_dointvec,
539 }, 538 },
540 { 539 {
541 .ctl_name = CTL_UNNUMBERED,
542 .procname = "fastctxsw", 540 .procname = "fastctxsw",
543 .data = &pfm_sysctl.fastctxsw, 541 .data = &pfm_sysctl.fastctxsw,
544 .maxlen = sizeof(int), 542 .maxlen = sizeof(int),
545 .mode = 0600, 543 .mode = 0600,
546 .proc_handler = &proc_dointvec, 544 .proc_handler = proc_dointvec,
547 }, 545 },
548 { 546 {
549 .ctl_name = CTL_UNNUMBERED,
550 .procname = "expert_mode", 547 .procname = "expert_mode",
551 .data = &pfm_sysctl.expert_mode, 548 .data = &pfm_sysctl.expert_mode,
552 .maxlen = sizeof(int), 549 .maxlen = sizeof(int),
553 .mode = 0600, 550 .mode = 0600,
554 .proc_handler = &proc_dointvec, 551 .proc_handler = proc_dointvec,
555 }, 552 },
556 {} 553 {}
557}; 554};
558static ctl_table pfm_sysctl_dir[] = { 555static ctl_table pfm_sysctl_dir[] = {
559 { 556 {
560 .ctl_name = CTL_UNNUMBERED,
561 .procname = "perfmon", 557 .procname = "perfmon",
562 .mode = 0555, 558 .mode = 0555,
563 .child = pfm_ctl_table, 559 .child = pfm_ctl_table,
@@ -566,7 +562,6 @@ static ctl_table pfm_sysctl_dir[] = {
566}; 562};
567static ctl_table pfm_sysctl_root[] = { 563static ctl_table pfm_sysctl_root[] = {
568 { 564 {
569 .ctl_name = CTL_KERN,
570 .procname = "kernel", 565 .procname = "kernel",
571 .mode = 0555, 566 .mode = 0555,
572 .child = pfm_sysctl_dir, 567 .child = pfm_sysctl_dir,
@@ -2206,7 +2201,7 @@ pfm_alloc_file(pfm_context_t *ctx)
2206{ 2201{
2207 struct file *file; 2202 struct file *file;
2208 struct inode *inode; 2203 struct inode *inode;
2209 struct dentry *dentry; 2204 struct path path;
2210 char name[32]; 2205 char name[32];
2211 struct qstr this; 2206 struct qstr this;
2212 2207
@@ -2231,18 +2226,19 @@ pfm_alloc_file(pfm_context_t *ctx)
2231 /* 2226 /*
2232 * allocate a new dcache entry 2227 * allocate a new dcache entry
2233 */ 2228 */
2234 dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this); 2229 path.dentry = d_alloc(pfmfs_mnt->mnt_sb->s_root, &this);
2235 if (!dentry) { 2230 if (!path.dentry) {
2236 iput(inode); 2231 iput(inode);
2237 return ERR_PTR(-ENOMEM); 2232 return ERR_PTR(-ENOMEM);
2238 } 2233 }
2234 path.mnt = mntget(pfmfs_mnt);
2239 2235
2240 dentry->d_op = &pfmfs_dentry_operations; 2236 path.dentry->d_op = &pfmfs_dentry_operations;
2241 d_add(dentry, inode); 2237 d_add(path.dentry, inode);
2242 2238
2243 file = alloc_file(pfmfs_mnt, dentry, FMODE_READ, &pfm_file_ops); 2239 file = alloc_file(&path, FMODE_READ, &pfm_file_ops);
2244 if (!file) { 2240 if (!file) {
2245 dput(dentry); 2241 path_put(&path);
2246 return ERR_PTR(-ENFILE); 2242 return ERR_PTR(-ENFILE);
2247 } 2243 }
2248 2244
@@ -2298,7 +2294,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
2298 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur) 2294 * if ((mm->total_vm << PAGE_SHIFT) + len> task->rlim[RLIMIT_AS].rlim_cur)
2299 * return -ENOMEM; 2295 * return -ENOMEM;
2300 */ 2296 */
2301 if (size > task->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) 2297 if (size > task_rlimit(task, RLIMIT_MEMLOCK))
2302 return -ENOMEM; 2298 return -ENOMEM;
2303 2299
2304 /* 2300 /*
@@ -2320,6 +2316,7 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
2320 DPRINT(("Cannot allocate vma\n")); 2316 DPRINT(("Cannot allocate vma\n"));
2321 goto error_kmem; 2317 goto error_kmem;
2322 } 2318 }
2319 INIT_LIST_HEAD(&vma->anon_vma_chain);
2323 2320
2324 /* 2321 /*
2325 * partially initialize the vma for the sampling buffer 2322 * partially initialize the vma for the sampling buffer
@@ -2718,7 +2715,7 @@ pfm_context_create(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
2718 goto buffer_error; 2715 goto buffer_error;
2719 } 2716 }
2720 2717
2721 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d \n", 2718 DPRINT(("ctx=%p flags=0x%x system=%d notify_block=%d excl_idle=%d no_msg=%d ctx_fd=%d\n",
2722 ctx, 2719 ctx,
2723 ctx_flags, 2720 ctx_flags,
2724 ctx->ctx_fl_system, 2721 ctx->ctx_fl_system,
@@ -3523,7 +3520,7 @@ pfm_use_debug_registers(struct task_struct *task)
3523 * IA64_THREAD_DBG_VALID set. This indicates a task which was 3520 * IA64_THREAD_DBG_VALID set. This indicates a task which was
3524 * able to use the debug registers for debugging purposes via 3521 * able to use the debug registers for debugging purposes via
3525 * ptrace(). Therefore we know it was not using them for 3522 * ptrace(). Therefore we know it was not using them for
3526 * perfmormance monitoring, so we only decrement the number 3523 * performance monitoring, so we only decrement the number
3527 * of "ptraced" debug register users to keep the count up to date 3524 * of "ptraced" debug register users to keep the count up to date
3528 */ 3525 */
3529int 3526int
@@ -3682,7 +3679,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3682 * "self-monitoring". 3679 * "self-monitoring".
3683 */ 3680 */
3684 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { 3681 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3685 DPRINT(("unblocking [%d] \n", task_pid_nr(task))); 3682 DPRINT(("unblocking [%d]\n", task_pid_nr(task)));
3686 complete(&ctx->ctx_restart_done); 3683 complete(&ctx->ctx_restart_done);
3687 } else { 3684 } else {
3688 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task))); 3685 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 9bcec9945c12..53f1648c8b81 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -15,11 +15,11 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/slab.h>
18#include <linux/module.h> 19#include <linux/module.h>
19#include <linux/notifier.h> 20#include <linux/notifier.h>
20#include <linux/personality.h> 21#include <linux/personality.h>
21#include <linux/sched.h> 22#include <linux/sched.h>
22#include <linux/slab.h>
23#include <linux/stddef.h> 23#include <linux/stddef.h>
24#include <linux/thread_info.h> 24#include <linux/thread_info.h>
25#include <linux/unistd.h> 25#include <linux/unistd.h>
@@ -33,7 +33,6 @@
33#include <asm/cpu.h> 33#include <asm/cpu.h>
34#include <asm/delay.h> 34#include <asm/delay.h>
35#include <asm/elf.h> 35#include <asm/elf.h>
36#include <asm/ia32.h>
37#include <asm/irq.h> 36#include <asm/irq.h>
38#include <asm/kexec.h> 37#include <asm/kexec.h>
39#include <asm/pgalloc.h> 38#include <asm/pgalloc.h>
@@ -60,6 +59,10 @@ unsigned long idle_halt;
60EXPORT_SYMBOL(idle_halt); 59EXPORT_SYMBOL(idle_halt);
61unsigned long idle_nomwait; 60unsigned long idle_nomwait;
62EXPORT_SYMBOL(idle_nomwait); 61EXPORT_SYMBOL(idle_nomwait);
62void (*pm_idle) (void);
63EXPORT_SYMBOL(pm_idle);
64void (*pm_power_off) (void);
65EXPORT_SYMBOL(pm_power_off);
63 66
64void 67void
65ia64_do_show_stack (struct unw_frame_info *info, void *arg) 68ia64_do_show_stack (struct unw_frame_info *info, void *arg)
@@ -358,11 +361,6 @@ ia64_save_extra (struct task_struct *task)
358 if (info & PFM_CPUINFO_SYST_WIDE) 361 if (info & PFM_CPUINFO_SYST_WIDE)
359 pfm_syst_wide_update_task(task, info, 0); 362 pfm_syst_wide_update_task(task, info, 0);
360#endif 363#endif
361
362#ifdef CONFIG_IA32_SUPPORT
363 if (IS_IA32_PROCESS(task_pt_regs(task)))
364 ia32_save_state(task);
365#endif
366} 364}
367 365
368void 366void
@@ -383,11 +381,6 @@ ia64_load_extra (struct task_struct *task)
383 if (info & PFM_CPUINFO_SYST_WIDE) 381 if (info & PFM_CPUINFO_SYST_WIDE)
384 pfm_syst_wide_update_task(task, info, 1); 382 pfm_syst_wide_update_task(task, info, 1);
385#endif 383#endif
386
387#ifdef CONFIG_IA32_SUPPORT
388 if (IS_IA32_PROCESS(task_pt_regs(task)))
389 ia32_load_state(task);
390#endif
391} 384}
392 385
393/* 386/*
@@ -426,7 +419,7 @@ copy_thread(unsigned long clone_flags,
426 unsigned long user_stack_base, unsigned long user_stack_size, 419 unsigned long user_stack_base, unsigned long user_stack_size,
427 struct task_struct *p, struct pt_regs *regs) 420 struct task_struct *p, struct pt_regs *regs)
428{ 421{
429 extern char ia64_ret_from_clone, ia32_ret_from_clone; 422 extern char ia64_ret_from_clone;
430 struct switch_stack *child_stack, *stack; 423 struct switch_stack *child_stack, *stack;
431 unsigned long rbs, child_rbs, rbs_size; 424 unsigned long rbs, child_rbs, rbs_size;
432 struct pt_regs *child_ptregs; 425 struct pt_regs *child_ptregs;
@@ -457,7 +450,7 @@ copy_thread(unsigned long clone_flags,
457 memcpy((void *) child_rbs, (void *) rbs, rbs_size); 450 memcpy((void *) child_rbs, (void *) rbs, rbs_size);
458 451
459 if (likely(user_mode(child_ptregs))) { 452 if (likely(user_mode(child_ptregs))) {
460 if ((clone_flags & CLONE_SETTLS) && !IS_IA32_PROCESS(regs)) 453 if (clone_flags & CLONE_SETTLS)
461 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */ 454 child_ptregs->r13 = regs->r16; /* see sys_clone2() in entry.S */
462 if (user_stack_base) { 455 if (user_stack_base) {
463 child_ptregs->r12 = user_stack_base + user_stack_size - 16; 456 child_ptregs->r12 = user_stack_base + user_stack_size - 16;
@@ -477,10 +470,7 @@ copy_thread(unsigned long clone_flags,
477 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */ 470 child_ptregs->r13 = (unsigned long) p; /* set `current' pointer */
478 } 471 }
479 child_stack->ar_bspstore = child_rbs + rbs_size; 472 child_stack->ar_bspstore = child_rbs + rbs_size;
480 if (IS_IA32_PROCESS(regs)) 473 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
481 child_stack->b0 = (unsigned long) &ia32_ret_from_clone;
482 else
483 child_stack->b0 = (unsigned long) &ia64_ret_from_clone;
484 474
485 /* copy parts of thread_struct: */ 475 /* copy parts of thread_struct: */
486 p->thread.ksp = (unsigned long) child_stack - 16; 476 p->thread.ksp = (unsigned long) child_stack - 16;
@@ -515,22 +505,6 @@ copy_thread(unsigned long clone_flags,
515 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR) 505 p->thread.flags = ((current->thread.flags & ~THREAD_FLAGS_TO_CLEAR)
516 | THREAD_FLAGS_TO_SET); 506 | THREAD_FLAGS_TO_SET);
517 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */ 507 ia64_drop_fpu(p); /* don't pick up stale state from a CPU's fph */
518#ifdef CONFIG_IA32_SUPPORT
519 /*
520 * If we're cloning an IA32 task then save the IA32 extra
521 * state from the current task to the new task
522 */
523 if (IS_IA32_PROCESS(task_pt_regs(current))) {
524 ia32_save_state(p);
525 if (clone_flags & CLONE_SETTLS)
526 retval = ia32_clone_tls(p, child_ptregs);
527
528 /* Copy partially mapped page list */
529 if (!retval)
530 retval = ia32_copy_ia64_partial_page_list(p,
531 clone_flags);
532 }
533#endif
534 508
535#ifdef CONFIG_PERFMON 509#ifdef CONFIG_PERFMON
536 if (current->thread.pfm_context) 510 if (current->thread.pfm_context)
@@ -704,15 +678,6 @@ EXPORT_SYMBOL(kernel_thread);
704int 678int
705kernel_thread_helper (int (*fn)(void *), void *arg) 679kernel_thread_helper (int (*fn)(void *), void *arg)
706{ 680{
707#ifdef CONFIG_IA32_SUPPORT
708 if (IS_IA32_PROCESS(task_pt_regs(current))) {
709 /* A kernel thread is always a 64-bit process. */
710 current->thread.map_base = DEFAULT_MAP_BASE;
711 current->thread.task_size = DEFAULT_TASK_SIZE;
712 ia64_set_kr(IA64_KR_IO_BASE, current->thread.old_iob);
713 ia64_set_kr(IA64_KR_TSSD, current->thread.old_k1);
714 }
715#endif
716 return (*fn)(arg); 681 return (*fn)(arg);
717} 682}
718 683
@@ -725,14 +690,6 @@ flush_thread (void)
725 /* drop floating-point and debug-register state if it exists: */ 690 /* drop floating-point and debug-register state if it exists: */
726 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID); 691 current->thread.flags &= ~(IA64_THREAD_FPH_VALID | IA64_THREAD_DBG_VALID);
727 ia64_drop_fpu(current); 692 ia64_drop_fpu(current);
728#ifdef CONFIG_IA32_SUPPORT
729 if (IS_IA32_PROCESS(task_pt_regs(current))) {
730 ia32_drop_ia64_partial_page_list(current);
731 current->thread.task_size = IA32_PAGE_OFFSET;
732 set_fs(USER_DS);
733 memset(current->thread.tls_array, 0, sizeof(current->thread.tls_array));
734 }
735#endif
736} 693}
737 694
738/* 695/*
@@ -753,8 +710,6 @@ exit_thread (void)
753 if (current->thread.flags & IA64_THREAD_DBG_VALID) 710 if (current->thread.flags & IA64_THREAD_DBG_VALID)
754 pfm_release_debug_registers(current); 711 pfm_release_debug_registers(current);
755#endif 712#endif
756 if (IS_IA32_PROCESS(task_pt_regs(current)))
757 ia32_drop_ia64_partial_page_list(current);
758} 713}
759 714
760unsigned long 715unsigned long
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 9daa87fdb018..0dec7f702448 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -11,7 +11,6 @@
11 */ 11 */
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/slab.h>
15#include <linux/mm.h> 14#include <linux/mm.h>
16#include <linux/errno.h> 15#include <linux/errno.h>
17#include <linux/ptrace.h> 16#include <linux/ptrace.h>
@@ -1250,13 +1249,8 @@ syscall_trace_enter (long arg0, long arg1, long arg2, long arg3,
1250 long syscall; 1249 long syscall;
1251 int arch; 1250 int arch;
1252 1251
1253 if (IS_IA32_PROCESS(&regs)) { 1252 syscall = regs.r15;
1254 syscall = regs.r1; 1253 arch = AUDIT_ARCH_IA64;
1255 arch = AUDIT_ARCH_I386;
1256 } else {
1257 syscall = regs.r15;
1258 arch = AUDIT_ARCH_IA64;
1259 }
1260 1254
1261 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3); 1255 audit_syscall_entry(arch, syscall, arg0, arg1, arg2, arg3);
1262 } 1256 }
@@ -2172,11 +2166,6 @@ static const struct user_regset_view user_ia64_view = {
2172 2166
2173const struct user_regset_view *task_user_regset_view(struct task_struct *tsk) 2167const struct user_regset_view *task_user_regset_view(struct task_struct *tsk)
2174{ 2168{
2175#ifdef CONFIG_IA32_SUPPORT
2176 extern const struct user_regset_view user_ia32_view;
2177 if (IS_IA32_PROCESS(task_pt_regs(tsk)))
2178 return &user_ia32_view;
2179#endif
2180 return &user_ia64_view; 2169 return &user_ia64_view;
2181} 2170}
2182 2171
diff --git a/arch/ia64/kernel/relocate_kernel.S b/arch/ia64/kernel/relocate_kernel.S
index 32f6fc131fbe..c370e02f0061 100644
--- a/arch/ia64/kernel/relocate_kernel.S
+++ b/arch/ia64/kernel/relocate_kernel.S
@@ -61,7 +61,7 @@ GLOBAL_ENTRY(relocate_new_kernel)
61 61
62 // purge all TC entries 62 // purge all TC entries
63#define O(member) IA64_CPUINFO_##member##_OFFSET 63#define O(member) IA64_CPUINFO_##member##_OFFSET
64 GET_THIS_PADDR(r2, cpu_info) // load phys addr of cpu_info into r2 64 GET_THIS_PADDR(r2, ia64_cpu_info) // load phys addr of cpu_info into r2
65 ;; 65 ;;
66 addl r17=O(PTCE_STRIDE),r2 66 addl r17=O(PTCE_STRIDE),r2
67 addl r2=O(PTCE_BASE),r2 67 addl r2=O(PTCE_BASE),r2
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 1de86c96801d..41ae6a596b50 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -46,7 +46,6 @@
46#include <linux/kexec.h> 46#include <linux/kexec.h>
47#include <linux/crash_dump.h> 47#include <linux/crash_dump.h>
48 48
49#include <asm/ia32.h>
50#include <asm/machvec.h> 49#include <asm/machvec.h>
51#include <asm/mca.h> 50#include <asm/mca.h>
52#include <asm/meminit.h> 51#include <asm/meminit.h>
@@ -74,7 +73,7 @@ unsigned long __per_cpu_offset[NR_CPUS];
74EXPORT_SYMBOL(__per_cpu_offset); 73EXPORT_SYMBOL(__per_cpu_offset);
75#endif 74#endif
76 75
77DEFINE_PER_CPU(struct cpuinfo_ia64, cpu_info); 76DEFINE_PER_CPU(struct cpuinfo_ia64, ia64_cpu_info);
78DEFINE_PER_CPU(unsigned long, local_per_cpu_offset); 77DEFINE_PER_CPU(unsigned long, local_per_cpu_offset);
79unsigned long ia64_cycles_per_usec; 78unsigned long ia64_cycles_per_usec;
80struct ia64_boot_param *ia64_boot_param; 79struct ia64_boot_param *ia64_boot_param;
@@ -566,19 +565,18 @@ setup_arch (char **cmdline_p)
566 early_acpi_boot_init(); 565 early_acpi_boot_init();
567# ifdef CONFIG_ACPI_NUMA 566# ifdef CONFIG_ACPI_NUMA
568 acpi_numa_init(); 567 acpi_numa_init();
569#ifdef CONFIG_ACPI_HOTPLUG_CPU 568# ifdef CONFIG_ACPI_HOTPLUG_CPU
570 prefill_possible_map(); 569 prefill_possible_map();
571#endif 570# endif
572 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ? 571 per_cpu_scan_finalize((cpus_weight(early_cpu_possible_map) == 0 ?
573 32 : cpus_weight(early_cpu_possible_map)), 572 32 : cpus_weight(early_cpu_possible_map)),
574 additional_cpus > 0 ? additional_cpus : 0); 573 additional_cpus > 0 ? additional_cpus : 0);
575# endif 574# endif
576#else
577# ifdef CONFIG_SMP
578 smp_build_cpu_map(); /* happens, e.g., with the Ski simulator */
579# endif
580#endif /* CONFIG_APCI_BOOT */ 575#endif /* CONFIG_APCI_BOOT */
581 576
577#ifdef CONFIG_SMP
578 smp_build_cpu_map();
579#endif
582 find_memory(); 580 find_memory();
583 581
584 /* process SAL system table: */ 582 /* process SAL system table: */
@@ -856,18 +854,6 @@ identify_cpu (struct cpuinfo_ia64 *c)
856} 854}
857 855
858/* 856/*
859 * In UP configuration, setup_per_cpu_areas() is defined in
860 * include/linux/percpu.h
861 */
862#ifdef CONFIG_SMP
863void __init
864setup_per_cpu_areas (void)
865{
866 /* start_kernel() requires this... */
867}
868#endif
869
870/*
871 * Do the following calculations: 857 * Do the following calculations:
872 * 858 *
873 * 1. the max. cache line size. 859 * 1. the max. cache line size.
@@ -980,7 +966,7 @@ cpu_init (void)
980 * depends on the data returned by identify_cpu(). We break the dependency by 966 * depends on the data returned by identify_cpu(). We break the dependency by
981 * accessing cpu_data() through the canonical per-CPU address. 967 * accessing cpu_data() through the canonical per-CPU address.
982 */ 968 */
983 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(cpu_info) - __per_cpu_start); 969 cpu_info = cpu_data + ((char *) &__ia64_per_cpu_var(ia64_cpu_info) - __per_cpu_start);
984 identify_cpu(cpu_info); 970 identify_cpu(cpu_info);
985 971
986#ifdef CONFIG_MCKINLEY 972#ifdef CONFIG_MCKINLEY
@@ -1029,10 +1015,6 @@ cpu_init (void)
1029 ia64_mmu_init(ia64_imva(cpu_data)); 1015 ia64_mmu_init(ia64_imva(cpu_data));
1030 ia64_mca_cpu_init(ia64_imva(cpu_data)); 1016 ia64_mca_cpu_init(ia64_imva(cpu_data));
1031 1017
1032#ifdef CONFIG_IA32_SUPPORT
1033 ia32_cpu_init();
1034#endif
1035
1036 /* Clear ITC to eliminate sched_clock() overflows in human time. */ 1018 /* Clear ITC to eliminate sched_clock() overflows in human time. */
1037 ia64_set_itc(0); 1019 ia64_set_itc(0);
1038 1020
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index e1821ca4c7df..7bdafc8788bd 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -21,7 +21,6 @@
21#include <linux/unistd.h> 21#include <linux/unistd.h>
22#include <linux/wait.h> 22#include <linux/wait.h>
23 23
24#include <asm/ia32.h>
25#include <asm/intrinsics.h> 24#include <asm/intrinsics.h>
26#include <asm/uaccess.h> 25#include <asm/uaccess.h>
27#include <asm/rse.h> 26#include <asm/rse.h>
@@ -425,14 +424,8 @@ static long
425handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, 424handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset,
426 struct sigscratch *scr) 425 struct sigscratch *scr)
427{ 426{
428 if (IS_IA32_PROCESS(&scr->pt)) { 427 if (!setup_frame(sig, ka, info, oldset, scr))
429 /* send signal to IA-32 process */ 428 return 0;
430 if (!ia32_setup_frame1(sig, ka, info, oldset, &scr->pt))
431 return 0;
432 } else
433 /* send signal to IA-64 process */
434 if (!setup_frame(sig, ka, info, oldset, scr))
435 return 0;
436 429
437 spin_lock_irq(&current->sighand->siglock); 430 spin_lock_irq(&current->sighand->siglock);
438 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask); 431 sigorsets(&current->blocked, &current->blocked, &ka->sa.sa_mask);
@@ -462,7 +455,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
462 siginfo_t info; 455 siginfo_t info;
463 long restart = in_syscall; 456 long restart = in_syscall;
464 long errno = scr->pt.r8; 457 long errno = scr->pt.r8;
465# define ERR_CODE(c) (IS_IA32_PROCESS(&scr->pt) ? -(c) : (c))
466 458
467 /* 459 /*
468 * In the ia64_leave_kernel code path, we want the common case to go fast, which 460 * In the ia64_leave_kernel code path, we want the common case to go fast, which
@@ -490,14 +482,7 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
490 * inferior call), thus it's important to check for restarting _after_ 482 * inferior call), thus it's important to check for restarting _after_
491 * get_signal_to_deliver(). 483 * get_signal_to_deliver().
492 */ 484 */
493 if (IS_IA32_PROCESS(&scr->pt)) { 485 if ((long) scr->pt.r10 != -1)
494 if (in_syscall) {
495 if (errno >= 0)
496 restart = 0;
497 else
498 errno = -errno;
499 }
500 } else if ((long) scr->pt.r10 != -1)
501 /* 486 /*
502 * A system calls has to be restarted only if one of the error codes 487 * A system calls has to be restarted only if one of the error codes
503 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10 488 * ERESTARTNOHAND, ERESTARTSYS, or ERESTARTNOINTR is returned. If r10
@@ -513,22 +498,18 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
513 switch (errno) { 498 switch (errno) {
514 case ERESTART_RESTARTBLOCK: 499 case ERESTART_RESTARTBLOCK:
515 case ERESTARTNOHAND: 500 case ERESTARTNOHAND:
516 scr->pt.r8 = ERR_CODE(EINTR); 501 scr->pt.r8 = EINTR;
517 /* note: scr->pt.r10 is already -1 */ 502 /* note: scr->pt.r10 is already -1 */
518 break; 503 break;
519 504
520 case ERESTARTSYS: 505 case ERESTARTSYS:
521 if ((ka.sa.sa_flags & SA_RESTART) == 0) { 506 if ((ka.sa.sa_flags & SA_RESTART) == 0) {
522 scr->pt.r8 = ERR_CODE(EINTR); 507 scr->pt.r8 = EINTR;
523 /* note: scr->pt.r10 is already -1 */ 508 /* note: scr->pt.r10 is already -1 */
524 break; 509 break;
525 } 510 }
526 case ERESTARTNOINTR: 511 case ERESTARTNOINTR:
527 if (IS_IA32_PROCESS(&scr->pt)) { 512 ia64_decrement_ip(&scr->pt);
528 scr->pt.r8 = scr->pt.r1;
529 scr->pt.cr_iip -= 2;
530 } else
531 ia64_decrement_ip(&scr->pt);
532 restart = 0; /* don't restart twice if handle_signal() fails... */ 513 restart = 0; /* don't restart twice if handle_signal() fails... */
533 } 514 }
534 } 515 }
@@ -555,21 +536,14 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
555 if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR 536 if (errno == ERESTARTNOHAND || errno == ERESTARTSYS || errno == ERESTARTNOINTR
556 || errno == ERESTART_RESTARTBLOCK) 537 || errno == ERESTART_RESTARTBLOCK)
557 { 538 {
558 if (IS_IA32_PROCESS(&scr->pt)) { 539 /*
559 scr->pt.r8 = scr->pt.r1; 540 * Note: the syscall number is in r15 which is saved in
560 scr->pt.cr_iip -= 2; 541 * pt_regs so all we need to do here is adjust ip so that
561 if (errno == ERESTART_RESTARTBLOCK) 542 * the "break" instruction gets re-executed.
562 scr->pt.r8 = 0; /* x86 version of __NR_restart_syscall */ 543 */
563 } else { 544 ia64_decrement_ip(&scr->pt);
564 /* 545 if (errno == ERESTART_RESTARTBLOCK)
565 * Note: the syscall number is in r15 which is saved in 546 scr->pt.r15 = __NR_restart_syscall;
566 * pt_regs so all we need to do here is adjust ip so that
567 * the "break" instruction gets re-executed.
568 */
569 ia64_decrement_ip(&scr->pt);
570 if (errno == ERESTART_RESTARTBLOCK)
571 scr->pt.r15 = __NR_restart_syscall;
572 }
573 } 547 }
574 } 548 }
575 549
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index de100aa7ff03..e5230b2ff2c5 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -44,7 +44,6 @@
44#include <asm/cache.h> 44#include <asm/cache.h>
45#include <asm/current.h> 45#include <asm/current.h>
46#include <asm/delay.h> 46#include <asm/delay.h>
47#include <asm/ia32.h>
48#include <asm/io.h> 47#include <asm/io.h>
49#include <asm/irq.h> 48#include <asm/irq.h>
50#include <asm/machvec.h> 49#include <asm/machvec.h>
@@ -443,10 +442,6 @@ smp_callin (void)
443 calibrate_delay(); 442 calibrate_delay();
444 local_cpu_data->loops_per_jiffy = loops_per_jiffy; 443 local_cpu_data->loops_per_jiffy = loops_per_jiffy;
445 444
446#ifdef CONFIG_IA32_SUPPORT
447 ia32_gdt_init();
448#endif
449
450 /* 445 /*
451 * Allow the master to continue. 446 * Allow the master to continue.
452 */ 447 */
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 92ed83f34036..609d50056a6c 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -100,51 +100,7 @@ sys_getpagesize (void)
100asmlinkage unsigned long 100asmlinkage unsigned long
101ia64_brk (unsigned long brk) 101ia64_brk (unsigned long brk)
102{ 102{
103 unsigned long rlim, retval, newbrk, oldbrk; 103 unsigned long retval = sys_brk(brk);
104 struct mm_struct *mm = current->mm;
105
106 /*
107 * Most of this replicates the code in sys_brk() except for an additional safety
108 * check and the clearing of r8. However, we can't call sys_brk() because we need
109 * to acquire the mmap_sem before we can do the test...
110 */
111 down_write(&mm->mmap_sem);
112
113 if (brk < mm->end_code)
114 goto out;
115 newbrk = PAGE_ALIGN(brk);
116 oldbrk = PAGE_ALIGN(mm->brk);
117 if (oldbrk == newbrk)
118 goto set_brk;
119
120 /* Always allow shrinking brk. */
121 if (brk <= mm->brk) {
122 if (!do_munmap(mm, newbrk, oldbrk-newbrk))
123 goto set_brk;
124 goto out;
125 }
126
127 /* Check against unimplemented/unmapped addresses: */
128 if ((newbrk - oldbrk) > RGN_MAP_LIMIT || REGION_OFFSET(newbrk) > RGN_MAP_LIMIT)
129 goto out;
130
131 /* Check against rlimit.. */
132 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
133 if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
134 goto out;
135
136 /* Check against existing mmap mappings. */
137 if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
138 goto out;
139
140 /* Ok, looks good - let it rip. */
141 if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
142 goto out;
143set_brk:
144 mm->brk = brk;
145out:
146 retval = mm->brk;
147 up_write(&mm->mmap_sem);
148 force_successful_syscall_return(); 104 force_successful_syscall_return();
149 return retval; 105 return retval;
150} 106}
@@ -185,39 +141,6 @@ int ia64_mmap_check(unsigned long addr, unsigned long len,
185 return 0; 141 return 0;
186} 142}
187 143
188static inline unsigned long
189do_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, unsigned long pgoff)
190{
191 struct file *file = NULL;
192
193 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
194 if (!(flags & MAP_ANONYMOUS)) {
195 file = fget(fd);
196 if (!file)
197 return -EBADF;
198
199 if (!file->f_op || !file->f_op->mmap) {
200 addr = -ENODEV;
201 goto out;
202 }
203 }
204
205 /* Careful about overflows.. */
206 len = PAGE_ALIGN(len);
207 if (!len || len > TASK_SIZE) {
208 addr = -EINVAL;
209 goto out;
210 }
211
212 down_write(&current->mm->mmap_sem);
213 addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
214 up_write(&current->mm->mmap_sem);
215
216out: if (file)
217 fput(file);
218 return addr;
219}
220
221/* 144/*
222 * mmap2() is like mmap() except that the offset is expressed in units 145 * mmap2() is like mmap() except that the offset is expressed in units
223 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces 146 * of PAGE_SIZE (instead of bytes). This allows to mmap2() (pieces
@@ -226,7 +149,7 @@ out: if (file)
226asmlinkage unsigned long 149asmlinkage unsigned long
227sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff) 150sys_mmap2 (unsigned long addr, unsigned long len, int prot, int flags, int fd, long pgoff)
228{ 151{
229 addr = do_mmap2(addr, len, prot, flags, fd, pgoff); 152 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
230 if (!IS_ERR((void *) addr)) 153 if (!IS_ERR((void *) addr))
231 force_successful_syscall_return(); 154 force_successful_syscall_return();
232 return addr; 155 return addr;
@@ -238,7 +161,7 @@ sys_mmap (unsigned long addr, unsigned long len, int prot, int flags, int fd, lo
238 if (offset_in_page(off) != 0) 161 if (offset_in_page(off) != 0)
239 return -EINVAL; 162 return -EINVAL;
240 163
241 addr = do_mmap2(addr, len, prot, flags, fd, off >> PAGE_SHIFT); 164 addr = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT);
242 if (!IS_ERR((void *) addr)) 165 if (!IS_ERR((void *) addr))
243 force_successful_syscall_return(); 166 force_successful_syscall_return();
244 return addr; 167 return addr;
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 4990495d7531..47a192781b0a 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -61,7 +61,7 @@ unsigned long long sched_clock(void)
61 61
62#ifdef CONFIG_PARAVIRT 62#ifdef CONFIG_PARAVIRT
63static void 63static void
64paravirt_clocksource_resume(void) 64paravirt_clocksource_resume(struct clocksource *cs)
65{ 65{
66 if (pv_time_ops.clocksource_resume) 66 if (pv_time_ops.clocksource_resume)
67 pv_time_ops.clocksource_resume(); 67 pv_time_ops.clocksource_resume();
@@ -473,7 +473,7 @@ void update_vsyscall_tz(void)
473{ 473{
474} 474}
475 475
476void update_vsyscall(struct timespec *wall, struct clocksource *c) 476void update_vsyscall(struct timespec *wall, struct clocksource *c, u32 mult)
477{ 477{
478 unsigned long flags; 478 unsigned long flags;
479 479
@@ -481,7 +481,7 @@ void update_vsyscall(struct timespec *wall, struct clocksource *c)
481 481
482 /* copy fsyscall clock data */ 482 /* copy fsyscall clock data */
483 fsyscall_gtod_data.clk_mask = c->mask; 483 fsyscall_gtod_data.clk_mask = c->mask;
484 fsyscall_gtod_data.clk_mult = c->mult; 484 fsyscall_gtod_data.clk_mult = mult;
485 fsyscall_gtod_data.clk_shift = c->shift; 485 fsyscall_gtod_data.clk_shift = c->shift;
486 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio; 486 fsyscall_gtod_data.clk_fsys_mmio = c->fsys_mmio;
487 fsyscall_gtod_data.clk_cycle_last = c->cycle_last; 487 fsyscall_gtod_data.clk_cycle_last = c->cycle_last;
diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
index 8f060352e129..28f299de2903 100644
--- a/arch/ia64/kernel/topology.c
+++ b/arch/ia64/kernel/topology.c
@@ -17,6 +17,7 @@
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/node.h> 19#include <linux/node.h>
20#include <linux/slab.h>
20#include <linux/init.h> 21#include <linux/init.h>
21#include <linux/bootmem.h> 22#include <linux/bootmem.h>
22#include <linux/nodemask.h> 23#include <linux/nodemask.h>
@@ -282,7 +283,7 @@ static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char *
282 return ret; 283 return ret;
283} 284}
284 285
285static struct sysfs_ops cache_sysfs_ops = { 286static const struct sysfs_ops cache_sysfs_ops = {
286 .show = cache_show 287 .show = cache_show
287}; 288};
288 289
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index f0cda765e681..fd80e70018a9 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -19,7 +19,6 @@
19#include <linux/kdebug.h> 19#include <linux/kdebug.h>
20 20
21#include <asm/fpswa.h> 21#include <asm/fpswa.h>
22#include <asm/ia32.h>
23#include <asm/intrinsics.h> 22#include <asm/intrinsics.h>
24#include <asm/processor.h> 23#include <asm/processor.h>
25#include <asm/uaccess.h> 24#include <asm/uaccess.h>
@@ -626,10 +625,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
626 break; 625 break;
627 626
628 case 45: 627 case 45:
629#ifdef CONFIG_IA32_SUPPORT
630 if (ia32_exception(&regs, isr) == 0)
631 return;
632#endif
633 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n"); 628 printk(KERN_ERR "Unexpected IA-32 exception (Trap 45)\n");
634 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n", 629 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx\n",
635 iip, ifa, isr); 630 iip, ifa, isr);
@@ -637,10 +632,6 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
637 break; 632 break;
638 633
639 case 46: 634 case 46:
640#ifdef CONFIG_IA32_SUPPORT
641 if (ia32_intercept(&regs, isr) == 0)
642 return;
643#endif
644 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n"); 635 printk(KERN_ERR "Unexpected IA-32 intercept trap (Trap 46)\n");
645 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n", 636 printk(KERN_ERR " iip - 0x%lx, ifa - 0x%lx, isr - 0x%lx, iim - 0x%lx\n",
646 iip, ifa, isr, iim); 637 iip, ifa, isr, iim);
diff --git a/arch/ia64/kernel/uncached.c b/arch/ia64/kernel/uncached.c
index a595823582d9..c4696d217ce0 100644
--- a/arch/ia64/kernel/uncached.c
+++ b/arch/ia64/kernel/uncached.c
@@ -18,9 +18,9 @@
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/errno.h> 19#include <linux/errno.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/slab.h>
22#include <linux/efi.h> 21#include <linux/efi.h>
23#include <linux/genalloc.h> 22#include <linux/genalloc.h>
23#include <linux/gfp.h>
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pal.h> 25#include <asm/pal.h>
26#include <asm/system.h> 26#include <asm/system.h>
diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
index 0a0c77b2c988..1295ba327f6f 100644
--- a/arch/ia64/kernel/vmlinux.lds.S
+++ b/arch/ia64/kernel/vmlinux.lds.S
@@ -166,6 +166,12 @@ SECTIONS
166 } 166 }
167#endif 167#endif
168 168
169#ifdef CONFIG_SMP
170 . = ALIGN(PERCPU_PAGE_SIZE);
171 __cpu0_per_cpu = .;
172 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
173#endif
174
169 . = ALIGN(PAGE_SIZE); 175 . = ALIGN(PAGE_SIZE);
170 __init_end = .; 176 __init_end = .;
171 177
@@ -198,11 +204,6 @@ SECTIONS
198 data : { } :data 204 data : { } :data
199 .data : AT(ADDR(.data) - LOAD_OFFSET) 205 .data : AT(ADDR(.data) - LOAD_OFFSET)
200 { 206 {
201#ifdef CONFIG_SMP
202 . = ALIGN(PERCPU_PAGE_SIZE);
203 __cpu0_per_cpu = .;
204 . = . + PERCPU_PAGE_SIZE; /* cpu0 per-cpu space */
205#endif
206 INIT_TASK_DATA(PAGE_SIZE) 207 INIT_TASK_DATA(PAGE_SIZE)
207 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES) 208 CACHELINE_ALIGNED_DATA(SMP_CACHE_BYTES)
208 READ_MOSTLY_DATA(SMP_CACHE_BYTES) 209 READ_MOSTLY_DATA(SMP_CACHE_BYTES)
diff --git a/arch/ia64/kvm/Kconfig b/arch/ia64/kvm/Kconfig
index ef3e7be29caf..fa4d1e59deb0 100644
--- a/arch/ia64/kvm/Kconfig
+++ b/arch/ia64/kvm/Kconfig
@@ -26,6 +26,7 @@ config KVM
26 select ANON_INODES 26 select ANON_INODES
27 select HAVE_KVM_IRQCHIP 27 select HAVE_KVM_IRQCHIP
28 select KVM_APIC_ARCHITECTURE 28 select KVM_APIC_ARCHITECTURE
29 select KVM_MMIO
29 ---help--- 30 ---help---
30 Support hosting fully virtualized guest machines using hardware 31 Support hosting fully virtualized guest machines using hardware
31 virtualization extensions. You will need a fairly recent 32 virtualization extensions. You will need a fairly recent
@@ -47,6 +48,7 @@ config KVM_INTEL
47 Provides support for KVM on Itanium 2 processors equipped with the VT 48 Provides support for KVM on Itanium 2 processors equipped with the VT
48 extensions. 49 extensions.
49 50
51source drivers/vhost/Kconfig
50source drivers/virtio/Kconfig 52source drivers/virtio/Kconfig
51 53
52endif # VIRTUALIZATION 54endif # VIRTUALIZATION
diff --git a/arch/ia64/kvm/Makefile b/arch/ia64/kvm/Makefile
index 0bb99b732908..1089b3e918ac 100644
--- a/arch/ia64/kvm/Makefile
+++ b/arch/ia64/kvm/Makefile
@@ -49,7 +49,7 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/ 49EXTRA_AFLAGS += -Ivirt/kvm -Iarch/ia64/kvm/
50 50
51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ 51common-objs = $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \
52 coalesced_mmio.o irq_comm.o) 52 coalesced_mmio.o irq_comm.o assigned-dev.o)
53 53
54ifeq ($(CONFIG_IOMMU_API),y) 54ifeq ($(CONFIG_IOMMU_API),y)
55common-objs += $(addprefix ../../../virt/kvm/, iommu.o) 55common-objs += $(addprefix ../../../virt/kvm/, iommu.o)
diff --git a/arch/ia64/kvm/asm-offsets.c b/arch/ia64/kvm/asm-offsets.c
index 0c3564a7a033..9324c875caf5 100644
--- a/arch/ia64/kvm/asm-offsets.c
+++ b/arch/ia64/kvm/asm-offsets.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24 24
25#include <linux/autoconf.h>
26#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
27#include <linux/kbuild.h> 26#include <linux/kbuild.h>
28 27
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 0ad09f05efa9..7f3c0a2e60cd 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -23,8 +23,8 @@
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/errno.h> 24#include <linux/errno.h>
25#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/gfp.h>
27#include <linux/fs.h> 26#include <linux/fs.h>
27#include <linux/slab.h>
28#include <linux/smp.h> 28#include <linux/smp.h>
29#include <linux/kvm_host.h> 29#include <linux/kvm_host.h>
30#include <linux/kvm.h> 30#include <linux/kvm.h>
@@ -124,7 +124,7 @@ long ia64_pal_vp_create(u64 *vpd, u64 *host_iva, u64 *opt_handler)
124 124
125static DEFINE_SPINLOCK(vp_lock); 125static DEFINE_SPINLOCK(vp_lock);
126 126
127void kvm_arch_hardware_enable(void *garbage) 127int kvm_arch_hardware_enable(void *garbage)
128{ 128{
129 long status; 129 long status;
130 long tmp_base; 130 long tmp_base;
@@ -137,7 +137,7 @@ void kvm_arch_hardware_enable(void *garbage)
137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT); 137 slot = ia64_itr_entry(0x3, KVM_VMM_BASE, pte, KVM_VMM_SHIFT);
138 local_irq_restore(saved_psr); 138 local_irq_restore(saved_psr);
139 if (slot < 0) 139 if (slot < 0)
140 return; 140 return -EINVAL;
141 141
142 spin_lock(&vp_lock); 142 spin_lock(&vp_lock);
143 status = ia64_pal_vp_init_env(kvm_vsa_base ? 143 status = ia64_pal_vp_init_env(kvm_vsa_base ?
@@ -145,7 +145,7 @@ void kvm_arch_hardware_enable(void *garbage)
145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base); 145 __pa(kvm_vm_buffer), KVM_VM_BUFFER_BASE, &tmp_base);
146 if (status != 0) { 146 if (status != 0) {
147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n"); 147 printk(KERN_WARNING"kvm: Failed to Enable VT Support!!!!\n");
148 return ; 148 return -EINVAL;
149 } 149 }
150 150
151 if (!kvm_vsa_base) { 151 if (!kvm_vsa_base) {
@@ -154,6 +154,8 @@ void kvm_arch_hardware_enable(void *garbage)
154 } 154 }
155 spin_unlock(&vp_lock); 155 spin_unlock(&vp_lock);
156 ia64_ptr_entry(0x3, slot); 156 ia64_ptr_entry(0x3, slot);
157
158 return 0;
157} 159}
158 160
159void kvm_arch_hardware_disable(void *garbage) 161void kvm_arch_hardware_disable(void *garbage)
@@ -239,10 +241,10 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
239 return 0; 241 return 0;
240mmio: 242mmio:
241 if (p->dir) 243 if (p->dir)
242 r = kvm_io_bus_read(&vcpu->kvm->mmio_bus, p->addr, 244 r = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, p->addr,
243 p->size, &p->data); 245 p->size, &p->data);
244 else 246 else
245 r = kvm_io_bus_write(&vcpu->kvm->mmio_bus, p->addr, 247 r = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, p->addr,
246 p->size, &p->data); 248 p->size, &p->data);
247 if (r) 249 if (r)
248 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr); 250 printk(KERN_ERR"kvm: No iodevice found! addr:%lx\n", p->addr);
@@ -634,12 +636,9 @@ static void kvm_vcpu_post_transition(struct kvm_vcpu *vcpu)
634static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 636static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
635{ 637{
636 union context *host_ctx, *guest_ctx; 638 union context *host_ctx, *guest_ctx;
637 int r; 639 int r, idx;
638 640
639 /* 641 idx = srcu_read_lock(&vcpu->kvm->srcu);
640 * down_read() may sleep and return with interrupts enabled
641 */
642 down_read(&vcpu->kvm->slots_lock);
643 642
644again: 643again:
645 if (signal_pending(current)) { 644 if (signal_pending(current)) {
@@ -661,7 +660,7 @@ again:
661 if (r < 0) 660 if (r < 0)
662 goto vcpu_run_fail; 661 goto vcpu_run_fail;
663 662
664 up_read(&vcpu->kvm->slots_lock); 663 srcu_read_unlock(&vcpu->kvm->srcu, idx);
665 kvm_guest_enter(); 664 kvm_guest_enter();
666 665
667 /* 666 /*
@@ -685,7 +684,7 @@ again:
685 kvm_guest_exit(); 684 kvm_guest_exit();
686 preempt_enable(); 685 preempt_enable();
687 686
688 down_read(&vcpu->kvm->slots_lock); 687 idx = srcu_read_lock(&vcpu->kvm->srcu);
689 688
690 r = kvm_handle_exit(kvm_run, vcpu); 689 r = kvm_handle_exit(kvm_run, vcpu);
691 690
@@ -695,10 +694,10 @@ again:
695 } 694 }
696 695
697out: 696out:
698 up_read(&vcpu->kvm->slots_lock); 697 srcu_read_unlock(&vcpu->kvm->srcu, idx);
699 if (r > 0) { 698 if (r > 0) {
700 kvm_resched(vcpu); 699 kvm_resched(vcpu);
701 down_read(&vcpu->kvm->slots_lock); 700 idx = srcu_read_lock(&vcpu->kvm->srcu);
702 goto again; 701 goto again;
703 } 702 }
704 703
@@ -851,8 +850,7 @@ static int kvm_vm_ioctl_get_irqchip(struct kvm *kvm,
851 r = 0; 850 r = 0;
852 switch (chip->chip_id) { 851 switch (chip->chip_id) {
853 case KVM_IRQCHIP_IOAPIC: 852 case KVM_IRQCHIP_IOAPIC:
854 memcpy(&chip->chip.ioapic, ioapic_irqchip(kvm), 853 r = kvm_get_ioapic(kvm, &chip->chip.ioapic);
855 sizeof(struct kvm_ioapic_state));
856 break; 854 break;
857 default: 855 default:
858 r = -EINVAL; 856 r = -EINVAL;
@@ -868,9 +866,7 @@ static int kvm_vm_ioctl_set_irqchip(struct kvm *kvm, struct kvm_irqchip *chip)
868 r = 0; 866 r = 0;
869 switch (chip->chip_id) { 867 switch (chip->chip_id) {
870 case KVM_IRQCHIP_IOAPIC: 868 case KVM_IRQCHIP_IOAPIC:
871 memcpy(ioapic_irqchip(kvm), 869 r = kvm_set_ioapic(kvm, &chip->chip.ioapic);
872 &chip->chip.ioapic,
873 sizeof(struct kvm_ioapic_state));
874 break; 870 break;
875 default: 871 default:
876 r = -EINVAL; 872 r = -EINVAL;
@@ -944,7 +940,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
944{ 940{
945 struct kvm *kvm = filp->private_data; 941 struct kvm *kvm = filp->private_data;
946 void __user *argp = (void __user *)arg; 942 void __user *argp = (void __user *)arg;
947 int r = -EINVAL; 943 int r = -ENOTTY;
948 944
949 switch (ioctl) { 945 switch (ioctl) {
950 case KVM_SET_MEMORY_REGION: { 946 case KVM_SET_MEMORY_REGION: {
@@ -972,7 +968,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
972 goto out; 968 goto out;
973 r = kvm_setup_default_irq_routing(kvm); 969 r = kvm_setup_default_irq_routing(kvm);
974 if (r) { 970 if (r) {
975 kfree(kvm->arch.vioapic); 971 kvm_ioapic_destroy(kvm);
976 goto out; 972 goto out;
977 } 973 }
978 break; 974 break;
@@ -985,10 +981,8 @@ long kvm_arch_vm_ioctl(struct file *filp,
985 goto out; 981 goto out;
986 if (irqchip_in_kernel(kvm)) { 982 if (irqchip_in_kernel(kvm)) {
987 __s32 status; 983 __s32 status;
988 mutex_lock(&kvm->irq_lock);
989 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 984 status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
990 irq_event.irq, irq_event.level); 985 irq_event.irq, irq_event.level);
991 mutex_unlock(&kvm->irq_lock);
992 if (ioctl == KVM_IRQ_LINE_STATUS) { 986 if (ioctl == KVM_IRQ_LINE_STATUS) {
993 irq_event.status = status; 987 irq_event.status = status;
994 if (copy_to_user(argp, &irq_event, 988 if (copy_to_user(argp, &irq_event,
@@ -1380,12 +1374,14 @@ static void free_kvm(struct kvm *kvm)
1380 1374
1381static void kvm_release_vm_pages(struct kvm *kvm) 1375static void kvm_release_vm_pages(struct kvm *kvm)
1382{ 1376{
1377 struct kvm_memslots *slots;
1383 struct kvm_memory_slot *memslot; 1378 struct kvm_memory_slot *memslot;
1384 int i, j; 1379 int i, j;
1385 unsigned long base_gfn; 1380 unsigned long base_gfn;
1386 1381
1387 for (i = 0; i < kvm->nmemslots; i++) { 1382 slots = rcu_dereference(kvm->memslots);
1388 memslot = &kvm->memslots[i]; 1383 for (i = 0; i < slots->nmemslots; i++) {
1384 memslot = &slots->memslots[i];
1389 base_gfn = memslot->base_gfn; 1385 base_gfn = memslot->base_gfn;
1390 1386
1391 for (j = 0; j < memslot->npages; j++) { 1387 for (j = 0; j < memslot->npages; j++) {
@@ -1408,6 +1404,7 @@ void kvm_arch_destroy_vm(struct kvm *kvm)
1408 kfree(kvm->arch.vioapic); 1404 kfree(kvm->arch.vioapic);
1409 kvm_release_vm_pages(kvm); 1405 kvm_release_vm_pages(kvm);
1410 kvm_free_physmem(kvm); 1406 kvm_free_physmem(kvm);
1407 cleanup_srcu_struct(&kvm->srcu);
1411 free_kvm(kvm); 1408 free_kvm(kvm);
1412} 1409}
1413 1410
@@ -1579,15 +1576,15 @@ out:
1579 return r; 1576 return r;
1580} 1577}
1581 1578
1582int kvm_arch_set_memory_region(struct kvm *kvm, 1579int kvm_arch_prepare_memory_region(struct kvm *kvm,
1583 struct kvm_userspace_memory_region *mem, 1580 struct kvm_memory_slot *memslot,
1584 struct kvm_memory_slot old, 1581 struct kvm_memory_slot old,
1582 struct kvm_userspace_memory_region *mem,
1585 int user_alloc) 1583 int user_alloc)
1586{ 1584{
1587 unsigned long i; 1585 unsigned long i;
1588 unsigned long pfn; 1586 unsigned long pfn;
1589 int npages = mem->memory_size >> PAGE_SHIFT; 1587 int npages = memslot->npages;
1590 struct kvm_memory_slot *memslot = &kvm->memslots[mem->slot];
1591 unsigned long base_gfn = memslot->base_gfn; 1588 unsigned long base_gfn = memslot->base_gfn;
1592 1589
1593 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT)) 1590 if (base_gfn + npages > (KVM_MAX_MEM_SIZE >> PAGE_SHIFT))
@@ -1611,6 +1608,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
1611 return 0; 1608 return 0;
1612} 1609}
1613 1610
1611void kvm_arch_commit_memory_region(struct kvm *kvm,
1612 struct kvm_userspace_memory_region *mem,
1613 struct kvm_memory_slot old,
1614 int user_alloc)
1615{
1616 return;
1617}
1618
1614void kvm_arch_flush_shadow(struct kvm *kvm) 1619void kvm_arch_flush_shadow(struct kvm *kvm)
1615{ 1620{
1616 kvm_flush_remote_tlbs(kvm); 1621 kvm_flush_remote_tlbs(kvm);
@@ -1797,7 +1802,8 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1797{ 1802{
1798 struct kvm_memory_slot *memslot; 1803 struct kvm_memory_slot *memslot;
1799 int r, i; 1804 int r, i;
1800 long n, base; 1805 long base;
1806 unsigned long n;
1801 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base + 1807 unsigned long *dirty_bitmap = (unsigned long *)(kvm->arch.vm_base +
1802 offsetof(struct kvm_vm_data, kvm_mem_dirty_log)); 1808 offsetof(struct kvm_vm_data, kvm_mem_dirty_log));
1803 1809
@@ -1805,12 +1811,12 @@ static int kvm_ia64_sync_dirty_log(struct kvm *kvm,
1805 if (log->slot >= KVM_MEMORY_SLOTS) 1811 if (log->slot >= KVM_MEMORY_SLOTS)
1806 goto out; 1812 goto out;
1807 1813
1808 memslot = &kvm->memslots[log->slot]; 1814 memslot = &kvm->memslots->memslots[log->slot];
1809 r = -ENOENT; 1815 r = -ENOENT;
1810 if (!memslot->dirty_bitmap) 1816 if (!memslot->dirty_bitmap)
1811 goto out; 1817 goto out;
1812 1818
1813 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1819 n = kvm_dirty_bitmap_bytes(memslot);
1814 base = memslot->base_gfn / BITS_PER_LONG; 1820 base = memslot->base_gfn / BITS_PER_LONG;
1815 1821
1816 for (i = 0; i < n/sizeof(long); ++i) { 1822 for (i = 0; i < n/sizeof(long); ++i) {
@@ -1826,10 +1832,11 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1826 struct kvm_dirty_log *log) 1832 struct kvm_dirty_log *log)
1827{ 1833{
1828 int r; 1834 int r;
1829 int n; 1835 unsigned long n;
1830 struct kvm_memory_slot *memslot; 1836 struct kvm_memory_slot *memslot;
1831 int is_dirty = 0; 1837 int is_dirty = 0;
1832 1838
1839 mutex_lock(&kvm->slots_lock);
1833 spin_lock(&kvm->arch.dirty_log_lock); 1840 spin_lock(&kvm->arch.dirty_log_lock);
1834 1841
1835 r = kvm_ia64_sync_dirty_log(kvm, log); 1842 r = kvm_ia64_sync_dirty_log(kvm, log);
@@ -1843,12 +1850,13 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1843 /* If nothing is dirty, don't bother messing with page tables. */ 1850 /* If nothing is dirty, don't bother messing with page tables. */
1844 if (is_dirty) { 1851 if (is_dirty) {
1845 kvm_flush_remote_tlbs(kvm); 1852 kvm_flush_remote_tlbs(kvm);
1846 memslot = &kvm->memslots[log->slot]; 1853 memslot = &kvm->memslots->memslots[log->slot];
1847 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8; 1854 n = kvm_dirty_bitmap_bytes(memslot);
1848 memset(memslot->dirty_bitmap, 0, n); 1855 memset(memslot->dirty_bitmap, 0, n);
1849 } 1856 }
1850 r = 0; 1857 r = 0;
1851out: 1858out:
1859 mutex_unlock(&kvm->slots_lock);
1852 spin_unlock(&kvm->arch.dirty_log_lock); 1860 spin_unlock(&kvm->arch.dirty_log_lock);
1853 return r; 1861 return r;
1854} 1862}
diff --git a/arch/ia64/kvm/kvm_fw.c b/arch/ia64/kvm/kvm_fw.c
index e4b82319881d..cb548ee9fcae 100644
--- a/arch/ia64/kvm/kvm_fw.c
+++ b/arch/ia64/kvm/kvm_fw.c
@@ -75,7 +75,7 @@ static void set_pal_result(struct kvm_vcpu *vcpu,
75 struct exit_ctl_data *p; 75 struct exit_ctl_data *p;
76 76
77 p = kvm_get_exit_data(vcpu); 77 p = kvm_get_exit_data(vcpu);
78 if (p && p->exit_reason == EXIT_REASON_PAL_CALL) { 78 if (p->exit_reason == EXIT_REASON_PAL_CALL) {
79 p->u.pal_data.ret = result; 79 p->u.pal_data.ret = result;
80 return ; 80 return ;
81 } 81 }
@@ -87,7 +87,7 @@ static void set_sal_result(struct kvm_vcpu *vcpu,
87 struct exit_ctl_data *p; 87 struct exit_ctl_data *p;
88 88
89 p = kvm_get_exit_data(vcpu); 89 p = kvm_get_exit_data(vcpu);
90 if (p && p->exit_reason == EXIT_REASON_SAL_CALL) { 90 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
91 p->u.sal_data.ret = result; 91 p->u.sal_data.ret = result;
92 return ; 92 return ;
93 } 93 }
@@ -322,7 +322,7 @@ static u64 kvm_get_pal_call_index(struct kvm_vcpu *vcpu)
322 struct exit_ctl_data *p; 322 struct exit_ctl_data *p;
323 323
324 p = kvm_get_exit_data(vcpu); 324 p = kvm_get_exit_data(vcpu);
325 if (p && (p->exit_reason == EXIT_REASON_PAL_CALL)) 325 if (p->exit_reason == EXIT_REASON_PAL_CALL)
326 index = p->u.pal_data.gr28; 326 index = p->u.pal_data.gr28;
327 327
328 return index; 328 return index;
@@ -646,18 +646,16 @@ static void kvm_get_sal_call_data(struct kvm_vcpu *vcpu, u64 *in0, u64 *in1,
646 646
647 p = kvm_get_exit_data(vcpu); 647 p = kvm_get_exit_data(vcpu);
648 648
649 if (p) { 649 if (p->exit_reason == EXIT_REASON_SAL_CALL) {
650 if (p->exit_reason == EXIT_REASON_SAL_CALL) { 650 *in0 = p->u.sal_data.in0;
651 *in0 = p->u.sal_data.in0; 651 *in1 = p->u.sal_data.in1;
652 *in1 = p->u.sal_data.in1; 652 *in2 = p->u.sal_data.in2;
653 *in2 = p->u.sal_data.in2; 653 *in3 = p->u.sal_data.in3;
654 *in3 = p->u.sal_data.in3; 654 *in4 = p->u.sal_data.in4;
655 *in4 = p->u.sal_data.in4; 655 *in5 = p->u.sal_data.in5;
656 *in5 = p->u.sal_data.in5; 656 *in6 = p->u.sal_data.in6;
657 *in6 = p->u.sal_data.in6; 657 *in7 = p->u.sal_data.in7;
658 *in7 = p->u.sal_data.in7; 658 return ;
659 return ;
660 }
661 } 659 }
662 *in0 = 0; 660 *in0 = 0;
663} 661}
diff --git a/arch/ia64/kvm/mmio.c b/arch/ia64/kvm/mmio.c
index 9bf55afd08d0..fb8f9f59a1ed 100644
--- a/arch/ia64/kvm/mmio.c
+++ b/arch/ia64/kvm/mmio.c
@@ -316,8 +316,8 @@ void emulate_io_inst(struct kvm_vcpu *vcpu, u64 padr, u64 ma)
316 return; 316 return;
317 } else { 317 } else {
318 inst_type = -1; 318 inst_type = -1;
319 panic_vm(vcpu, "Unsupported MMIO access instruction! \ 319 panic_vm(vcpu, "Unsupported MMIO access instruction! "
320 Bunld[0]=0x%lx, Bundle[1]=0x%lx\n", 320 "Bunld[0]=0x%lx, Bundle[1]=0x%lx\n",
321 bundle.i64[0], bundle.i64[1]); 321 bundle.i64[0], bundle.i64[1]);
322 } 322 }
323 323
diff --git a/arch/ia64/kvm/vcpu.c b/arch/ia64/kvm/vcpu.c
index dce75b70cdd5..958815c9787d 100644
--- a/arch/ia64/kvm/vcpu.c
+++ b/arch/ia64/kvm/vcpu.c
@@ -1639,8 +1639,8 @@ void vcpu_set_psr(struct kvm_vcpu *vcpu, unsigned long val)
1639 * Otherwise panic 1639 * Otherwise panic
1640 */ 1640 */
1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM)) 1641 if (val & (IA64_PSR_PK | IA64_PSR_IS | IA64_PSR_VM))
1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 \ 1642 panic_vm(vcpu, "Only support guests with vpsr.pk =0 "
1643 & vpsr.is=0\n"); 1643 "& vpsr.is=0\n");
1644 1644
1645 /* 1645 /*
1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia 1646 * For those IA64_PSR bits: id/da/dd/ss/ed/ia
diff --git a/arch/ia64/kvm/vcpu.h b/arch/ia64/kvm/vcpu.h
index 360724d3ae69..988911b4cc7a 100644
--- a/arch/ia64/kvm/vcpu.h
+++ b/arch/ia64/kvm/vcpu.h
@@ -388,6 +388,9 @@ static inline u64 __gpfn_is_io(u64 gpfn)
388#define _vmm_raw_spin_lock(x) do {}while(0) 388#define _vmm_raw_spin_lock(x) do {}while(0)
389#define _vmm_raw_spin_unlock(x) do {}while(0) 389#define _vmm_raw_spin_unlock(x) do {}while(0)
390#else 390#else
391typedef struct {
392 volatile unsigned int lock;
393} vmm_spinlock_t;
391#define _vmm_raw_spin_lock(x) \ 394#define _vmm_raw_spin_lock(x) \
392 do { \ 395 do { \
393 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \ 396 __u32 *ia64_spinlock_ptr = (__u32 *) (x); \
@@ -405,12 +408,12 @@ static inline u64 __gpfn_is_io(u64 gpfn)
405 408
406#define _vmm_raw_spin_unlock(x) \ 409#define _vmm_raw_spin_unlock(x) \
407 do { barrier(); \ 410 do { barrier(); \
408 ((spinlock_t *)x)->raw_lock.lock = 0; } \ 411 ((vmm_spinlock_t *)x)->lock = 0; } \
409while (0) 412while (0)
410#endif 413#endif
411 414
412void vmm_spin_lock(spinlock_t *lock); 415void vmm_spin_lock(vmm_spinlock_t *lock);
413void vmm_spin_unlock(spinlock_t *lock); 416void vmm_spin_unlock(vmm_spinlock_t *lock);
414enum { 417enum {
415 I_TLB = 1, 418 I_TLB = 1,
416 D_TLB = 2 419 D_TLB = 2
diff --git a/arch/ia64/kvm/vmm.c b/arch/ia64/kvm/vmm.c
index f4b4c899bb6c..7a62f75778c5 100644
--- a/arch/ia64/kvm/vmm.c
+++ b/arch/ia64/kvm/vmm.c
@@ -60,12 +60,12 @@ static void __exit kvm_vmm_exit(void)
60 return ; 60 return ;
61} 61}
62 62
63void vmm_spin_lock(spinlock_t *lock) 63void vmm_spin_lock(vmm_spinlock_t *lock)
64{ 64{
65 _vmm_raw_spin_lock(lock); 65 _vmm_raw_spin_lock(lock);
66} 66}
67 67
68void vmm_spin_unlock(spinlock_t *lock) 68void vmm_spin_unlock(vmm_spinlock_t *lock)
69{ 69{
70 _vmm_raw_spin_unlock(lock); 70 _vmm_raw_spin_unlock(lock);
71} 71}
diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c
index 20b3852f7a6e..4332f7ee5203 100644
--- a/arch/ia64/kvm/vtlb.c
+++ b/arch/ia64/kvm/vtlb.c
@@ -182,7 +182,7 @@ void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{ 182{
183 u64 i, dirty_pages = 1; 183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT; 184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa); 185 vmm_spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE; 186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187 187
188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT; 188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index 2f724d2bf299..54bf54059811 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -154,38 +154,99 @@ static void *cpu_data;
154void * __cpuinit 154void * __cpuinit
155per_cpu_init (void) 155per_cpu_init (void)
156{ 156{
157 int cpu; 157 static bool first_time = true;
158 static int first_time=1; 158 void *cpu0_data = __cpu0_per_cpu;
159 unsigned int cpu;
160
161 if (!first_time)
162 goto skip;
163 first_time = false;
159 164
160 /* 165 /*
161 * get_free_pages() cannot be used before cpu_init() done. BSP 166 * get_free_pages() cannot be used before cpu_init() done.
162 * allocates "NR_CPUS" pages for all CPUs to avoid that AP calls 167 * BSP allocates PERCPU_PAGE_SIZE bytes for all possible CPUs
163 * get_zeroed_page(). 168 * to avoid that AP calls get_zeroed_page().
164 */ 169 */
165 if (first_time) { 170 for_each_possible_cpu(cpu) {
166 void *cpu0_data = __cpu0_per_cpu; 171 void *src = cpu == 0 ? cpu0_data : __phys_per_cpu_start;
167 172
168 first_time=0; 173 memcpy(cpu_data, src, __per_cpu_end - __per_cpu_start);
174 __per_cpu_offset[cpu] = (char *)cpu_data - __per_cpu_start;
175 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
169 176
170 __per_cpu_offset[0] = (char *) cpu0_data - __per_cpu_start; 177 /*
171 per_cpu(local_per_cpu_offset, 0) = __per_cpu_offset[0]; 178 * percpu area for cpu0 is moved from the __init area
179 * which is setup by head.S and used till this point.
180 * Update ar.k3. This move is ensures that percpu
181 * area for cpu0 is on the correct node and its
182 * virtual address isn't insanely far from other
183 * percpu areas which is important for congruent
184 * percpu allocator.
185 */
186 if (cpu == 0)
187 ia64_set_kr(IA64_KR_PER_CPU_DATA, __pa(cpu_data) -
188 (unsigned long)__per_cpu_start);
172 189
173 for (cpu = 1; cpu < NR_CPUS; cpu++) { 190 cpu_data += PERCPU_PAGE_SIZE;
174 memcpy(cpu_data, __phys_per_cpu_start, __per_cpu_end - __per_cpu_start);
175 __per_cpu_offset[cpu] = (char *) cpu_data - __per_cpu_start;
176 cpu_data += PERCPU_PAGE_SIZE;
177 per_cpu(local_per_cpu_offset, cpu) = __per_cpu_offset[cpu];
178 }
179 } 191 }
192skip:
180 return __per_cpu_start + __per_cpu_offset[smp_processor_id()]; 193 return __per_cpu_start + __per_cpu_offset[smp_processor_id()];
181} 194}
182 195
183static inline void 196static inline void
184alloc_per_cpu_data(void) 197alloc_per_cpu_data(void)
185{ 198{
186 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * NR_CPUS-1, 199 cpu_data = __alloc_bootmem(PERCPU_PAGE_SIZE * num_possible_cpus(),
187 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); 200 PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
188} 201}
202
203/**
204 * setup_per_cpu_areas - setup percpu areas
205 *
206 * Arch code has already allocated and initialized percpu areas. All
207 * this function has to do is to teach the determined layout to the
208 * dynamic percpu allocator, which happens to be more complex than
209 * creating whole new ones using helpers.
210 */
211void __init
212setup_per_cpu_areas(void)
213{
214 struct pcpu_alloc_info *ai;
215 struct pcpu_group_info *gi;
216 unsigned int cpu;
217 ssize_t static_size, reserved_size, dyn_size;
218 int rc;
219
220 ai = pcpu_alloc_alloc_info(1, num_possible_cpus());
221 if (!ai)
222 panic("failed to allocate pcpu_alloc_info");
223 gi = &ai->groups[0];
224
225 /* units are assigned consecutively to possible cpus */
226 for_each_possible_cpu(cpu)
227 gi->cpu_map[gi->nr_units++] = cpu;
228
229 /* set parameters */
230 static_size = __per_cpu_end - __per_cpu_start;
231 reserved_size = PERCPU_MODULE_RESERVE;
232 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
233 if (dyn_size < 0)
234 panic("percpu area overflow static=%zd reserved=%zd\n",
235 static_size, reserved_size);
236
237 ai->static_size = static_size;
238 ai->reserved_size = reserved_size;
239 ai->dyn_size = dyn_size;
240 ai->unit_size = PERCPU_PAGE_SIZE;
241 ai->atom_size = PAGE_SIZE;
242 ai->alloc_size = PERCPU_PAGE_SIZE;
243
244 rc = pcpu_setup_first_chunk(ai, __per_cpu_start + __per_cpu_offset[0]);
245 if (rc)
246 panic("failed to setup percpu area (err=%d)", rc);
247
248 pcpu_free_alloc_info(ai);
249}
189#else 250#else
190#define alloc_per_cpu_data() do { } while (0) 251#define alloc_per_cpu_data() do { } while (0)
191#endif /* CONFIG_SMP */ 252#endif /* CONFIG_SMP */
@@ -270,8 +331,8 @@ paging_init (void)
270 331
271 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 332 map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
272 sizeof(struct page)); 333 sizeof(struct page));
273 vmalloc_end -= map_size; 334 VMALLOC_END -= map_size;
274 vmem_map = (struct page *) vmalloc_end; 335 vmem_map = (struct page *) VMALLOC_END;
275 efi_memmap_walk(create_mem_map_page_table, NULL); 336 efi_memmap_walk(create_mem_map_page_table, NULL);
276 337
277 /* 338 /*
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index d85ba98d9008..61620323bb60 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -22,6 +22,7 @@
22#include <linux/acpi.h> 22#include <linux/acpi.h>
23#include <linux/efi.h> 23#include <linux/efi.h>
24#include <linux/nodemask.h> 24#include <linux/nodemask.h>
25#include <linux/slab.h>
25#include <asm/pgalloc.h> 26#include <asm/pgalloc.h>
26#include <asm/tlb.h> 27#include <asm/tlb.h>
27#include <asm/meminit.h> 28#include <asm/meminit.h>
@@ -143,22 +144,120 @@ static void *per_cpu_node_setup(void *cpu_data, int node)
143 int cpu; 144 int cpu;
144 145
145 for_each_possible_early_cpu(cpu) { 146 for_each_possible_early_cpu(cpu) {
146 if (cpu == 0) { 147 void *src = cpu == 0 ? __cpu0_per_cpu : __phys_per_cpu_start;
147 void *cpu0_data = __cpu0_per_cpu; 148
148 __per_cpu_offset[cpu] = (char*)cpu0_data - 149 if (node != node_cpuid[cpu].nid)
149 __per_cpu_start; 150 continue;
150 } else if (node == node_cpuid[cpu].nid) { 151
151 memcpy(__va(cpu_data), __phys_per_cpu_start, 152 memcpy(__va(cpu_data), src, __per_cpu_end - __per_cpu_start);
152 __per_cpu_end - __per_cpu_start); 153 __per_cpu_offset[cpu] = (char *)__va(cpu_data) -
153 __per_cpu_offset[cpu] = (char*)__va(cpu_data) - 154 __per_cpu_start;
154 __per_cpu_start; 155
155 cpu_data += PERCPU_PAGE_SIZE; 156 /*
156 } 157 * percpu area for cpu0 is moved from the __init area
158 * which is setup by head.S and used till this point.
159 * Update ar.k3. This move is ensures that percpu
160 * area for cpu0 is on the correct node and its
161 * virtual address isn't insanely far from other
162 * percpu areas which is important for congruent
163 * percpu allocator.
164 */
165 if (cpu == 0)
166 ia64_set_kr(IA64_KR_PER_CPU_DATA,
167 (unsigned long)cpu_data -
168 (unsigned long)__per_cpu_start);
169
170 cpu_data += PERCPU_PAGE_SIZE;
157 } 171 }
158#endif 172#endif
159 return cpu_data; 173 return cpu_data;
160} 174}
161 175
176#ifdef CONFIG_SMP
177/**
178 * setup_per_cpu_areas - setup percpu areas
179 *
180 * Arch code has already allocated and initialized percpu areas. All
181 * this function has to do is to teach the determined layout to the
182 * dynamic percpu allocator, which happens to be more complex than
183 * creating whole new ones using helpers.
184 */
185void __init setup_per_cpu_areas(void)
186{
187 struct pcpu_alloc_info *ai;
188 struct pcpu_group_info *uninitialized_var(gi);
189 unsigned int *cpu_map;
190 void *base;
191 unsigned long base_offset;
192 unsigned int cpu;
193 ssize_t static_size, reserved_size, dyn_size;
194 int node, prev_node, unit, nr_units, rc;
195
196 ai = pcpu_alloc_alloc_info(MAX_NUMNODES, nr_cpu_ids);
197 if (!ai)
198 panic("failed to allocate pcpu_alloc_info");
199 cpu_map = ai->groups[0].cpu_map;
200
201 /* determine base */
202 base = (void *)ULONG_MAX;
203 for_each_possible_cpu(cpu)
204 base = min(base,
205 (void *)(__per_cpu_offset[cpu] + __per_cpu_start));
206 base_offset = (void *)__per_cpu_start - base;
207
208 /* build cpu_map, units are grouped by node */
209 unit = 0;
210 for_each_node(node)
211 for_each_possible_cpu(cpu)
212 if (node == node_cpuid[cpu].nid)
213 cpu_map[unit++] = cpu;
214 nr_units = unit;
215
216 /* set basic parameters */
217 static_size = __per_cpu_end - __per_cpu_start;
218 reserved_size = PERCPU_MODULE_RESERVE;
219 dyn_size = PERCPU_PAGE_SIZE - static_size - reserved_size;
220 if (dyn_size < 0)
221 panic("percpu area overflow static=%zd reserved=%zd\n",
222 static_size, reserved_size);
223
224 ai->static_size = static_size;
225 ai->reserved_size = reserved_size;
226 ai->dyn_size = dyn_size;
227 ai->unit_size = PERCPU_PAGE_SIZE;
228 ai->atom_size = PAGE_SIZE;
229 ai->alloc_size = PERCPU_PAGE_SIZE;
230
231 /*
232 * CPUs are put into groups according to node. Walk cpu_map
233 * and create new groups at node boundaries.
234 */
235 prev_node = -1;
236 ai->nr_groups = 0;
237 for (unit = 0; unit < nr_units; unit++) {
238 cpu = cpu_map[unit];
239 node = node_cpuid[cpu].nid;
240
241 if (node == prev_node) {
242 gi->nr_units++;
243 continue;
244 }
245 prev_node = node;
246
247 gi = &ai->groups[ai->nr_groups++];
248 gi->nr_units = 1;
249 gi->base_offset = __per_cpu_offset[cpu] + base_offset;
250 gi->cpu_map = &cpu_map[unit];
251 }
252
253 rc = pcpu_setup_first_chunk(ai, base);
254 if (rc)
255 panic("failed to setup percpu area (err=%d)", rc);
256
257 pcpu_free_alloc_info(ai);
258}
259#endif
260
162/** 261/**
163 * fill_pernode - initialize pernode data. 262 * fill_pernode - initialize pernode data.
164 * @node: the node id. 263 * @node: the node id.
@@ -352,7 +451,8 @@ static void __init initialize_pernode_data(void)
352 /* Set the node_data pointer for each per-cpu struct */ 451 /* Set the node_data pointer for each per-cpu struct */
353 for_each_possible_early_cpu(cpu) { 452 for_each_possible_early_cpu(cpu) {
354 node = node_cpuid[cpu].nid; 453 node = node_cpuid[cpu].nid;
355 per_cpu(cpu_info, cpu).node_data = mem_data[node].node_data; 454 per_cpu(ia64_cpu_info, cpu).node_data =
455 mem_data[node].node_data;
356 } 456 }
357#else 457#else
358 { 458 {
@@ -360,7 +460,7 @@ static void __init initialize_pernode_data(void)
360 cpu = 0; 460 cpu = 0;
361 node = node_cpuid[cpu].nid; 461 node = node_cpuid[cpu].nid;
362 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start + 462 cpu0_cpu_info = (struct cpuinfo_ia64 *)(__phys_per_cpu_start +
363 ((char *)&per_cpu__cpu_info - __per_cpu_start)); 463 ((char *)&ia64_cpu_info - __per_cpu_start));
364 cpu0_cpu_info->node_data = mem_data[node].node_data; 464 cpu0_cpu_info->node_data = mem_data[node].node_data;
365 } 465 }
366#endif /* CONFIG_SMP */ 466#endif /* CONFIG_SMP */
@@ -666,9 +766,9 @@ void __init paging_init(void)
666 sparse_init(); 766 sparse_init();
667 767
668#ifdef CONFIG_VIRTUAL_MEM_MAP 768#ifdef CONFIG_VIRTUAL_MEM_MAP
669 vmalloc_end -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) * 769 VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
670 sizeof(struct page)); 770 sizeof(struct page));
671 vmem_map = (struct page *) vmalloc_end; 771 vmem_map = (struct page *) VMALLOC_END;
672 efi_memmap_walk(create_mem_map_page_table, NULL); 772 efi_memmap_walk(create_mem_map_page_table, NULL);
673 printk("Virtual mem_map starts at 0x%p\n", vmem_map); 773 printk("Virtual mem_map starts at 0x%p\n", vmem_map);
674#endif 774#endif
diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
index b0f615759e97..1841ee7e65f9 100644
--- a/arch/ia64/mm/hugetlbpage.c
+++ b/arch/ia64/mm/hugetlbpage.c
@@ -14,7 +14,6 @@
14#include <linux/hugetlb.h> 14#include <linux/hugetlb.h>
15#include <linux/pagemap.h> 15#include <linux/pagemap.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h> 17#include <linux/sysctl.h>
19#include <linux/log2.h> 18#include <linux/log2.h>
20#include <asm/mman.h> 19#include <asm/mman.h>
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 1857766a63c1..ed41759efcac 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -22,7 +22,6 @@
22#include <linux/kexec.h> 22#include <linux/kexec.h>
23 23
24#include <asm/dma.h> 24#include <asm/dma.h>
25#include <asm/ia32.h>
26#include <asm/io.h> 25#include <asm/io.h>
27#include <asm/machvec.h> 26#include <asm/machvec.h>
28#include <asm/numa.h> 27#include <asm/numa.h>
@@ -44,8 +43,8 @@ extern void ia64_tlb_init (void);
44unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL; 43unsigned long MAX_DMA_ADDRESS = PAGE_OFFSET + 0x100000000UL;
45 44
46#ifdef CONFIG_VIRTUAL_MEM_MAP 45#ifdef CONFIG_VIRTUAL_MEM_MAP
47unsigned long vmalloc_end = VMALLOC_END_INIT; 46unsigned long VMALLOC_END = VMALLOC_END_INIT;
48EXPORT_SYMBOL(vmalloc_end); 47EXPORT_SYMBOL(VMALLOC_END);
49struct page *vmem_map; 48struct page *vmem_map;
50EXPORT_SYMBOL(vmem_map); 49EXPORT_SYMBOL(vmem_map);
51#endif 50#endif
@@ -91,7 +90,7 @@ dma_mark_clean(void *addr, size_t size)
91inline void 90inline void
92ia64_set_rbs_bot (void) 91ia64_set_rbs_bot (void)
93{ 92{
94 unsigned long stack_size = current->signal->rlim[RLIMIT_STACK].rlim_max & -16; 93 unsigned long stack_size = rlimit_max(RLIMIT_STACK) & -16;
95 94
96 if (stack_size > MAX_USER_STACK_SIZE) 95 if (stack_size > MAX_USER_STACK_SIZE)
97 stack_size = MAX_USER_STACK_SIZE; 96 stack_size = MAX_USER_STACK_SIZE;
@@ -118,6 +117,7 @@ ia64_init_addr_space (void)
118 */ 117 */
119 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 118 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
120 if (vma) { 119 if (vma) {
120 INIT_LIST_HEAD(&vma->anon_vma_chain);
121 vma->vm_mm = current->mm; 121 vma->vm_mm = current->mm;
122 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 122 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
123 vma->vm_end = vma->vm_start + PAGE_SIZE; 123 vma->vm_end = vma->vm_start + PAGE_SIZE;
@@ -136,6 +136,7 @@ ia64_init_addr_space (void)
136 if (!(current->personality & MMAP_PAGE_ZERO)) { 136 if (!(current->personality & MMAP_PAGE_ZERO)) {
137 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); 137 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
138 if (vma) { 138 if (vma) {
139 INIT_LIST_HEAD(&vma->anon_vma_chain);
139 vma->vm_mm = current->mm; 140 vma->vm_mm = current->mm;
140 vma->vm_end = PAGE_SIZE; 141 vma->vm_end = PAGE_SIZE;
141 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT); 142 vma->vm_page_prot = __pgprot(pgprot_val(PAGE_READONLY) | _PAGE_MA_NAT);
@@ -668,10 +669,6 @@ mem_init (void)
668 fsyscall_table[i] = sys_call_table[i] | 1; 669 fsyscall_table[i] = sys_call_table[i] | 1;
669 } 670 }
670 setup_gate(); 671 setup_gate();
671
672#ifdef CONFIG_IA32_SUPPORT
673 ia32_mem_init();
674#endif
675} 672}
676 673
677#ifdef CONFIG_MEMORY_HOTPLUG 674#ifdef CONFIG_MEMORY_HOTPLUG
diff --git a/arch/ia64/mm/ioremap.c b/arch/ia64/mm/ioremap.c
index 2a140627dfd6..3dccdd8eb275 100644
--- a/arch/ia64/mm/ioremap.c
+++ b/arch/ia64/mm/ioremap.c
@@ -22,6 +22,12 @@ __ioremap (unsigned long phys_addr)
22} 22}
23 23
24void __iomem * 24void __iomem *
25early_ioremap (unsigned long phys_addr, unsigned long size)
26{
27 return __ioremap(phys_addr);
28}
29
30void __iomem *
25ioremap (unsigned long phys_addr, unsigned long size) 31ioremap (unsigned long phys_addr, unsigned long size)
26{ 32{
27 void __iomem *addr; 33 void __iomem *addr;
@@ -102,6 +108,11 @@ ioremap_nocache (unsigned long phys_addr, unsigned long size)
102EXPORT_SYMBOL(ioremap_nocache); 108EXPORT_SYMBOL(ioremap_nocache);
103 109
104void 110void
111early_iounmap (volatile void __iomem *addr, unsigned long size)
112{
113}
114
115void
105iounmap (volatile void __iomem *addr) 116iounmap (volatile void __iomem *addr)
106{ 117{
107 if (REGION_NUMBER(addr) == RGN_GATE) 118 if (REGION_NUMBER(addr) == RGN_GATE)
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index ee09d261f2e6..5dfd916e9ea6 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -22,6 +22,7 @@
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/bootmem.h> 24#include <linux/bootmem.h>
25#include <linux/slab.h>
25 26
26#include <asm/delay.h> 27#include <asm/delay.h>
27#include <asm/mmu_context.h> 28#include <asm/mmu_context.h>
@@ -48,7 +49,7 @@ DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
48DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/ 49DEFINE_PER_CPU(u8, ia64_tr_num); /*Number of TR slots in current processor*/
49DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/ 50DEFINE_PER_CPU(u8, ia64_tr_used); /*Max Slot number used by kernel*/
50 51
51struct ia64_tr_entry __per_cpu_idtrs[NR_CPUS][2][IA64_TR_ALLOC_MAX]; 52struct ia64_tr_entry *ia64_idtrs[NR_CPUS];
52 53
53/* 54/*
54 * Initializes the ia64_ctx.bitmap array based on max_ctx+1. 55 * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
@@ -429,10 +430,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
429 struct ia64_tr_entry *p; 430 struct ia64_tr_entry *p;
430 int cpu = smp_processor_id(); 431 int cpu = smp_processor_id();
431 432
433 if (!ia64_idtrs[cpu]) {
434 ia64_idtrs[cpu] = kmalloc(2 * IA64_TR_ALLOC_MAX *
435 sizeof (struct ia64_tr_entry), GFP_KERNEL);
436 if (!ia64_idtrs[cpu])
437 return -ENOMEM;
438 }
432 r = -EINVAL; 439 r = -EINVAL;
433 /*Check overlap with existing TR entries*/ 440 /*Check overlap with existing TR entries*/
434 if (target_mask & 0x1) { 441 if (target_mask & 0x1) {
435 p = &__per_cpu_idtrs[cpu][0][0]; 442 p = ia64_idtrs[cpu];
436 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 443 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
437 i++, p++) { 444 i++, p++) {
438 if (p->pte & 0x1) 445 if (p->pte & 0x1)
@@ -444,7 +451,7 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
444 } 451 }
445 } 452 }
446 if (target_mask & 0x2) { 453 if (target_mask & 0x2) {
447 p = &__per_cpu_idtrs[cpu][1][0]; 454 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX;
448 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu); 455 for (i = IA64_TR_ALLOC_BASE; i <= per_cpu(ia64_tr_used, cpu);
449 i++, p++) { 456 i++, p++) {
450 if (p->pte & 0x1) 457 if (p->pte & 0x1)
@@ -459,16 +466,16 @@ int ia64_itr_entry(u64 target_mask, u64 va, u64 pte, u64 log_size)
459 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) { 466 for (i = IA64_TR_ALLOC_BASE; i < per_cpu(ia64_tr_num, cpu); i++) {
460 switch (target_mask & 0x3) { 467 switch (target_mask & 0x3) {
461 case 1: 468 case 1:
462 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1)) 469 if (!((ia64_idtrs[cpu] + i)->pte & 0x1))
463 goto found; 470 goto found;
464 continue; 471 continue;
465 case 2: 472 case 2:
466 if (!(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 473 if (!((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
467 goto found; 474 goto found;
468 continue; 475 continue;
469 case 3: 476 case 3:
470 if (!(__per_cpu_idtrs[cpu][0][i].pte & 0x1) && 477 if (!((ia64_idtrs[cpu] + i)->pte & 0x1) &&
471 !(__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 478 !((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
472 goto found; 479 goto found;
473 continue; 480 continue;
474 default: 481 default:
@@ -488,7 +495,7 @@ found:
488 if (target_mask & 0x1) { 495 if (target_mask & 0x1) {
489 ia64_itr(0x1, i, va, pte, log_size); 496 ia64_itr(0x1, i, va, pte, log_size);
490 ia64_srlz_i(); 497 ia64_srlz_i();
491 p = &__per_cpu_idtrs[cpu][0][i]; 498 p = ia64_idtrs[cpu] + i;
492 p->ifa = va; 499 p->ifa = va;
493 p->pte = pte; 500 p->pte = pte;
494 p->itir = log_size << 2; 501 p->itir = log_size << 2;
@@ -497,7 +504,7 @@ found:
497 if (target_mask & 0x2) { 504 if (target_mask & 0x2) {
498 ia64_itr(0x2, i, va, pte, log_size); 505 ia64_itr(0x2, i, va, pte, log_size);
499 ia64_srlz_i(); 506 ia64_srlz_i();
500 p = &__per_cpu_idtrs[cpu][1][i]; 507 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i;
501 p->ifa = va; 508 p->ifa = va;
502 p->pte = pte; 509 p->pte = pte;
503 p->itir = log_size << 2; 510 p->itir = log_size << 2;
@@ -528,7 +535,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
528 return; 535 return;
529 536
530 if (target_mask & 0x1) { 537 if (target_mask & 0x1) {
531 p = &__per_cpu_idtrs[cpu][0][slot]; 538 p = ia64_idtrs[cpu] + slot;
532 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 539 if ((p->pte&0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
533 p->pte = 0; 540 p->pte = 0;
534 ia64_ptr(0x1, p->ifa, p->itir>>2); 541 ia64_ptr(0x1, p->ifa, p->itir>>2);
@@ -537,7 +544,7 @@ void ia64_ptr_entry(u64 target_mask, int slot)
537 } 544 }
538 545
539 if (target_mask & 0x2) { 546 if (target_mask & 0x2) {
540 p = &__per_cpu_idtrs[cpu][1][slot]; 547 p = ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + slot;
541 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) { 548 if ((p->pte & 0x1) && is_tr_overlap(p, p->ifa, p->itir>>2)) {
542 p->pte = 0; 549 p->pte = 0;
543 ia64_ptr(0x2, p->ifa, p->itir>>2); 550 ia64_ptr(0x2, p->ifa, p->itir>>2);
@@ -546,8 +553,8 @@ void ia64_ptr_entry(u64 target_mask, int slot)
546 } 553 }
547 554
548 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) { 555 for (i = per_cpu(ia64_tr_used, cpu); i >= IA64_TR_ALLOC_BASE; i--) {
549 if ((__per_cpu_idtrs[cpu][0][i].pte & 0x1) || 556 if (((ia64_idtrs[cpu] + i)->pte & 0x1) ||
550 (__per_cpu_idtrs[cpu][1][i].pte & 0x1)) 557 ((ia64_idtrs[cpu] + IA64_TR_ALLOC_MAX + i)->pte & 0x1))
551 break; 558 break;
552 } 559 }
553 per_cpu(ia64_tr_used, cpu) = i; 560 per_cpu(ia64_tr_used, cpu) = i;
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index c0fca2c1c858..64aff520b899 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -131,6 +131,7 @@ alloc_pci_controller (int seg)
131} 131}
132 132
133struct pci_root_info { 133struct pci_root_info {
134 struct acpi_device *bridge;
134 struct pci_controller *controller; 135 struct pci_controller *controller;
135 char *name; 136 char *name;
136}; 137};
@@ -297,9 +298,20 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
297 window->offset = offset; 298 window->offset = offset;
298 299
299 if (insert_resource(root, &window->resource)) { 300 if (insert_resource(root, &window->resource)) {
300 printk(KERN_ERR "alloc 0x%llx-0x%llx from %s for %s failed\n", 301 dev_err(&info->bridge->dev,
301 window->resource.start, window->resource.end, 302 "can't allocate host bridge window %pR\n",
302 root->name, info->name); 303 &window->resource);
304 } else {
305 if (offset)
306 dev_info(&info->bridge->dev, "host bridge window %pR "
307 "(PCI address [%#llx-%#llx])\n",
308 &window->resource,
309 window->resource.start - offset,
310 window->resource.end - offset);
311 else
312 dev_info(&info->bridge->dev,
313 "host bridge window %pR\n",
314 &window->resource);
303 } 315 }
304 316
305 return AE_OK; 317 return AE_OK;
@@ -308,9 +320,9 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
308static void __devinit 320static void __devinit
309pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl) 321pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
310{ 322{
311 int i, j; 323 int i;
312 324
313 j = 0; 325 pci_bus_remove_resources(bus);
314 for (i = 0; i < ctrl->windows; i++) { 326 for (i = 0; i < ctrl->windows; i++) {
315 struct resource *res = &ctrl->window[i].resource; 327 struct resource *res = &ctrl->window[i].resource;
316 /* HP's firmware has a hack to work around a Windows bug. 328 /* HP's firmware has a hack to work around a Windows bug.
@@ -318,12 +330,7 @@ pcibios_setup_root_windows(struct pci_bus *bus, struct pci_controller *ctrl)
318 if ((res->flags & IORESOURCE_MEM) && 330 if ((res->flags & IORESOURCE_MEM) &&
319 (res->end - res->start < 16)) 331 (res->end - res->start < 16))
320 continue; 332 continue;
321 if (j >= PCI_BUS_NUM_RESOURCES) { 333 pci_bus_add_resource(bus, res, 0);
322 printk("Ignoring range [%#llx-%#llx] (%lx)\n",
323 res->start, res->end, res->flags);
324 continue;
325 }
326 bus->resource[j++] = res;
327 } 334 }
328} 335}
329 336
@@ -364,6 +371,7 @@ pci_acpi_scan_root(struct acpi_device *device, int domain, int bus)
364 goto out3; 371 goto out3;
365 372
366 sprintf(name, "PCI Bus %04x:%02x", domain, bus); 373 sprintf(name, "PCI Bus %04x:%02x", domain, bus);
374 info.bridge = device;
367 info.controller = controller; 375 info.controller = controller;
368 info.name = name; 376 info.name = name;
369 acpi_walk_resources(device->handle, METHOD_NAME__CRS, 377 acpi_walk_resources(device->handle, METHOD_NAME__CRS,
@@ -438,13 +446,12 @@ EXPORT_SYMBOL(pcibios_bus_to_resource);
438static int __devinit is_valid_resource(struct pci_dev *dev, int idx) 446static int __devinit is_valid_resource(struct pci_dev *dev, int idx)
439{ 447{
440 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM; 448 unsigned int i, type_mask = IORESOURCE_IO | IORESOURCE_MEM;
441 struct resource *devr = &dev->resource[idx]; 449 struct resource *devr = &dev->resource[idx], *busr;
442 450
443 if (!dev->bus) 451 if (!dev->bus)
444 return 0; 452 return 0;
445 for (i=0; i<PCI_BUS_NUM_RESOURCES; i++) {
446 struct resource *busr = dev->bus->resource[i];
447 453
454 pci_bus_for_each_resource(dev->bus, busr, i) {
448 if (!busr || ((busr->flags ^ devr->flags) & type_mask)) 455 if (!busr || ((busr->flags ^ devr->flags) & type_mask))
449 continue; 456 continue;
450 if ((devr->start) && (devr->start >= busr->start) && 457 if ((devr->start) && (devr->start >= busr->start) &&
@@ -533,10 +540,11 @@ pcibios_disable_device (struct pci_dev *dev)
533 acpi_pci_irq_disable(dev); 540 acpi_pci_irq_disable(dev);
534} 541}
535 542
536void 543resource_size_t
537pcibios_align_resource (void *data, struct resource *res, 544pcibios_align_resource (void *data, const struct resource *res,
538 resource_size_t size, resource_size_t align) 545 resource_size_t size, resource_size_t align)
539{ 546{
547 return res->start;
540} 548}
541 549
542/* 550/*
@@ -720,9 +728,6 @@ int ia64_pci_legacy_write(struct pci_bus *bus, u16 port, u32 val, u8 size)
720 return ret; 728 return ret;
721} 729}
722 730
723/* It's defined in drivers/pci/pci.c */
724extern u8 pci_cache_line_size;
725
726/** 731/**
727 * set_pci_cacheline_size - determine cacheline size for PCI devices 732 * set_pci_cacheline_size - determine cacheline size for PCI devices
728 * 733 *
@@ -731,7 +736,7 @@ extern u8 pci_cache_line_size;
731 * 736 *
732 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info(). 737 * Code mostly taken from arch/ia64/kernel/palinfo.c:cache_info().
733 */ 738 */
734static void __init set_pci_cacheline_size(void) 739static void __init set_pci_dfl_cacheline_size(void)
735{ 740{
736 unsigned long levels, unique_caches; 741 unsigned long levels, unique_caches;
737 long status; 742 long status;
@@ -751,7 +756,7 @@ static void __init set_pci_cacheline_size(void)
751 "(status=%ld)\n", __func__, status); 756 "(status=%ld)\n", __func__, status);
752 return; 757 return;
753 } 758 }
754 pci_cache_line_size = (1 << cci.pcci_line_size) / 4; 759 pci_dfl_cache_line_size = (1 << cci.pcci_line_size) / 4;
755} 760}
756 761
757u64 ia64_dma_get_required_mask(struct device *dev) 762u64 ia64_dma_get_required_mask(struct device *dev)
@@ -782,7 +787,7 @@ EXPORT_SYMBOL_GPL(dma_get_required_mask);
782 787
783static int __init pcibios_init(void) 788static int __init pcibios_init(void)
784{ 789{
785 set_pci_cacheline_size(); 790 set_pci_dfl_cacheline_size();
786 return 0; 791 return 0;
787} 792}
788 793
diff --git a/arch/ia64/sn/kernel/bte.c b/arch/ia64/sn/kernel/bte.c
index c6d6b62db66c..cad775a1a157 100644
--- a/arch/ia64/sn/kernel/bte.c
+++ b/arch/ia64/sn/kernel/bte.c
@@ -19,6 +19,7 @@
19#include <linux/bootmem.h> 19#include <linux/bootmem.h>
20#include <linux/string.h> 20#include <linux/string.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/slab.h>
22 23
23#include <asm/sn/bte.h> 24#include <asm/sn/bte.h>
24 25
diff --git a/arch/ia64/sn/kernel/io_acpi_init.c b/arch/ia64/sn/kernel/io_acpi_init.c
index fd50ff94302b..8cdcb173a138 100644
--- a/arch/ia64/sn/kernel/io_acpi_init.c
+++ b/arch/ia64/sn/kernel/io_acpi_init.c
@@ -13,6 +13,7 @@
13#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
14#include "xtalk/hubdev.h" 14#include "xtalk/hubdev.h"
15#include <linux/acpi.h> 15#include <linux/acpi.h>
16#include <linux/slab.h>
16 17
17 18
18/* 19/*
@@ -390,7 +391,7 @@ sn_acpi_get_pcidev_info(struct pci_dev *dev, struct pcidev_info **pcidev_info,
390 pcidev_match.handle = NULL; 391 pcidev_match.handle = NULL;
391 392
392 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX, 393 acpi_walk_namespace(ACPI_TYPE_DEVICE, rootbus_handle, ACPI_UINT32_MAX,
393 find_matching_device, &pcidev_match, NULL); 394 find_matching_device, NULL, &pcidev_match, NULL);
394 395
395 if (!pcidev_match.handle) { 396 if (!pcidev_match.handle) {
396 printk(KERN_ERR 397 printk(KERN_ERR
diff --git a/arch/ia64/sn/kernel/io_common.c b/arch/ia64/sn/kernel/io_common.c
index 308e6595110e..4433dd019d3c 100644
--- a/arch/ia64/sn/kernel/io_common.c
+++ b/arch/ia64/sn/kernel/io_common.c
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9#include <linux/bootmem.h> 9#include <linux/bootmem.h>
10#include <linux/slab.h>
10#include <asm/sn/types.h> 11#include <asm/sn/types.h>
11#include <asm/sn/addrs.h> 12#include <asm/sn/addrs.h>
12#include <asm/sn/sn_feature_sets.h> 13#include <asm/sn/sn_feature_sets.h>
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index ee774c366a06..98079f29d9a9 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved. 6 * Copyright (C) 1992 - 1997, 2000-2006 Silicon Graphics, Inc. All rights reserved.
7 */ 7 */
8 8
9#include <linux/slab.h>
9#include <asm/sn/types.h> 10#include <asm/sn/types.h>
10#include <asm/sn/addrs.h> 11#include <asm/sn/addrs.h>
11#include <asm/sn/io.h> 12#include <asm/sn/io.h>
diff --git a/arch/ia64/sn/kernel/irq.c b/arch/ia64/sn/kernel/irq.c
index 40d6eeda1c4b..13c15d968098 100644
--- a/arch/ia64/sn/kernel/irq.c
+++ b/arch/ia64/sn/kernel/irq.c
@@ -12,6 +12,7 @@
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/rculist.h> 14#include <linux/rculist.h>
15#include <linux/slab.h>
15#include <asm/sn/addrs.h> 16#include <asm/sn/addrs.h>
16#include <asm/sn/arch.h> 17#include <asm/sn/arch.h>
17#include <asm/sn/intr.h> 18#include <asm/sn/intr.h>
diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c
index fbbfb9701201..ebfdd6a9ae1a 100644
--- a/arch/ia64/sn/kernel/msi_sn.c
+++ b/arch/ia64/sn/kernel/msi_sn.c
@@ -11,6 +11,7 @@
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/cpumask.h> 12#include <linux/cpumask.h>
13#include <linux/msi.h> 13#include <linux/msi.h>
14#include <linux/slab.h>
14 15
15#include <asm/sn/addrs.h> 16#include <asm/sn/addrs.h>
16#include <asm/sn/intr.h> 17#include <asm/sn/intr.h>
diff --git a/arch/ia64/sn/kernel/setup.c b/arch/ia64/sn/kernel/setup.c
index ece1bf994499..d00dfc180021 100644
--- a/arch/ia64/sn/kernel/setup.c
+++ b/arch/ia64/sn/kernel/setup.c
@@ -71,7 +71,7 @@ EXPORT_SYMBOL(sn_rtc_cycles_per_second);
71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info); 71DEFINE_PER_CPU(struct sn_hub_info_s, __sn_hub_info);
72EXPORT_PER_CPU_SYMBOL(__sn_hub_info); 72EXPORT_PER_CPU_SYMBOL(__sn_hub_info);
73 73
74DEFINE_PER_CPU(short [MAX_COMPACT_NODES], __sn_cnodeid_to_nasid); 74DEFINE_PER_CPU(short, __sn_cnodeid_to_nasid[MAX_COMPACT_NODES]);
75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid); 75EXPORT_PER_CPU_SYMBOL(__sn_cnodeid_to_nasid);
76 76
77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda); 77DEFINE_PER_CPU(struct nodepda_s *, __sn_nodepda);
@@ -241,7 +241,7 @@ static void __cpuinit sn_check_for_wars(void)
241 * Note: This stuff is duped here because Altix requires the PCDP to 241 * Note: This stuff is duped here because Altix requires the PCDP to
242 * locate a usable VGA device due to lack of proper ACPI support. Structures 242 * locate a usable VGA device due to lack of proper ACPI support. Structures
243 * could be used from drivers/firmware/pcdp.h, but it was decided that moving 243 * could be used from drivers/firmware/pcdp.h, but it was decided that moving
244 * this file to a more public location just for Altix use was undesireable. 244 * this file to a more public location just for Altix use was undesirable.
245 */ 245 */
246 246
247struct hcdp_uart_desc { 247struct hcdp_uart_desc {
diff --git a/arch/ia64/sn/kernel/sn2/sn2_smp.c b/arch/ia64/sn/kernel/sn2/sn2_smp.c
index 1176506b2bae..e884ba4e031d 100644
--- a/arch/ia64/sn/kernel/sn2/sn2_smp.c
+++ b/arch/ia64/sn/kernel/sn2/sn2_smp.c
@@ -496,13 +496,13 @@ static int sn2_ptc_seq_show(struct seq_file *file, void *data)
496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l, 496 seq_printf(file, "cpu %d %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld %ld\n", cpu, stat->ptc_l,
497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed, 497 stat->change_rid, stat->shub_ptc_flushes, stat->nodes_flushed,
498 stat->deadlocks, 498 stat->deadlocks,
499 1000 * stat->lock_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 499 1000 * stat->lock_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
500 1000 * stat->shub_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec, 500 1000 * stat->shub_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
501 1000 * stat->shub_itc_clocks_max / per_cpu(cpu_info, cpu).cyc_per_usec, 501 1000 * stat->shub_itc_clocks_max / per_cpu(ia64_cpu_info, cpu).cyc_per_usec,
502 stat->shub_ptc_flushes_not_my_mm, 502 stat->shub_ptc_flushes_not_my_mm,
503 stat->deadlocks2, 503 stat->deadlocks2,
504 stat->shub_ipi_flushes, 504 stat->shub_ipi_flushes,
505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(cpu_info, cpu).cyc_per_usec); 505 1000 * stat->shub_ipi_flushes_itc_clocks / per_cpu(ia64_cpu_info, cpu).cyc_per_usec);
506 } 506 }
507 return 0; 507 return 0;
508} 508}
diff --git a/arch/ia64/sn/pci/pci_dma.c b/arch/ia64/sn/pci/pci_dma.c
index 98b684928e12..a9d310de57da 100644
--- a/arch/ia64/sn/pci/pci_dma.c
+++ b/arch/ia64/sn/pci/pci_dma.c
@@ -9,6 +9,7 @@
9 * a description of how these routines should be used. 9 * a description of how these routines should be used.
10 */ 10 */
11 11
12#include <linux/gfp.h>
12#include <linux/module.h> 13#include <linux/module.h>
13#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
14#include <asm/dma.h> 15#include <asm/dma.h>
diff --git a/arch/ia64/sn/pci/pcibr/pcibr_provider.c b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
index d13e5a22a558..3cb5cf377644 100644
--- a/arch/ia64/sn/pci/pcibr/pcibr_provider.c
+++ b/arch/ia64/sn/pci/pcibr/pcibr_provider.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/interrupt.h> 9#include <linux/interrupt.h>
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/slab.h>
11#include <linux/pci.h> 12#include <linux/pci.h>
12#include <asm/sn/addrs.h> 13#include <asm/sn/addrs.h>
13#include <asm/sn/geo.h> 14#include <asm/sn/geo.h>
diff --git a/arch/ia64/sn/pci/tioca_provider.c b/arch/ia64/sn/pci/tioca_provider.c
index 35b2a27d2e77..4d4536e3b6f3 100644
--- a/arch/ia64/sn/pci/tioca_provider.c
+++ b/arch/ia64/sn/pci/tioca_provider.c
@@ -9,6 +9,8 @@
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/pci.h> 11#include <linux/pci.h>
12#include <linux/bitmap.h>
13#include <linux/slab.h>
12#include <asm/sn/sn_sal.h> 14#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 15#include <asm/sn/addrs.h>
14#include <asm/sn/io.h> 16#include <asm/sn/io.h>
@@ -369,7 +371,7 @@ tioca_dma_d48(struct pci_dev *pdev, u64 paddr)
369static dma_addr_t 371static dma_addr_t
370tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size) 372tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
371{ 373{
372 int i, ps, ps_shift, entry, entries, mapsize, last_entry; 374 int ps, ps_shift, entry, entries, mapsize;
373 u64 xio_addr, end_xio_addr; 375 u64 xio_addr, end_xio_addr;
374 struct tioca_common *tioca_common; 376 struct tioca_common *tioca_common;
375 struct tioca_kernel *tioca_kern; 377 struct tioca_kernel *tioca_kern;
@@ -410,23 +412,13 @@ tioca_dma_mapped(struct pci_dev *pdev, unsigned long paddr, size_t req_size)
410 map = tioca_kern->ca_pcigart_pagemap; 412 map = tioca_kern->ca_pcigart_pagemap;
411 mapsize = tioca_kern->ca_pcigart_entries; 413 mapsize = tioca_kern->ca_pcigart_entries;
412 414
413 entry = find_first_zero_bit(map, mapsize); 415 entry = bitmap_find_next_zero_area(map, mapsize, 0, entries, 0);
414 while (entry < mapsize) { 416 if (entry >= mapsize) {
415 last_entry = find_next_bit(map, mapsize, entry);
416
417 if (last_entry - entry >= entries)
418 break;
419
420 entry = find_next_zero_bit(map, mapsize, last_entry);
421 }
422
423 if (entry > mapsize) {
424 kfree(ca_dmamap); 417 kfree(ca_dmamap);
425 goto map_return; 418 goto map_return;
426 } 419 }
427 420
428 for (i = 0; i < entries; i++) 421 bitmap_set(map, entry, entries);
429 set_bit(entry + i, map);
430 422
431 bus_addr = tioca_kern->ca_pciap_base + (entry * ps); 423 bus_addr = tioca_kern->ca_pciap_base + (entry * ps);
432 424
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 012f3b82ee55..27faba035f3a 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/types.h> 9#include <linux/types.h>
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/slab.h>
11#include <linux/pci.h> 12#include <linux/pci.h>
12#include <asm/sn/sn_sal.h> 13#include <asm/sn/sn_sal.h>
13#include <asm/sn/addrs.h> 14#include <asm/sn/addrs.h>
diff --git a/arch/ia64/uv/kernel/setup.c b/arch/ia64/uv/kernel/setup.c
index 7a5ae633198b..f1490657bafc 100644
--- a/arch/ia64/uv/kernel/setup.c
+++ b/arch/ia64/uv/kernel/setup.c
@@ -104,7 +104,7 @@ void __init uv_setup(char **cmdline_p)
104 uv_cpu_hub_info(cpu)->lowmem_remap_top = 104 uv_cpu_hub_info(cpu)->lowmem_remap_top =
105 lowmem_redir_base + lowmem_redir_size; 105 lowmem_redir_base + lowmem_redir_size;
106 uv_cpu_hub_info(cpu)->m_val = m_val; 106 uv_cpu_hub_info(cpu)->m_val = m_val;
107 uv_cpu_hub_info(cpu)->n_val = m_val; 107 uv_cpu_hub_info(cpu)->n_val = n_val;
108 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; 108 uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1;
109 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; 109 uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
110 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; 110 uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
diff --git a/arch/ia64/xen/grant-table.c b/arch/ia64/xen/grant-table.c
index 777dd9a9108b..48cca37625eb 100644
--- a/arch/ia64/xen/grant-table.c
+++ b/arch/ia64/xen/grant-table.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/vmalloc.h> 24#include <linux/vmalloc.h>
25#include <linux/slab.h>
25#include <linux/mm.h> 26#include <linux/mm.h>
26 27
27#include <xen/interface/xen.h> 28#include <xen/interface/xen.h>
diff --git a/arch/ia64/xen/hypercall.S b/arch/ia64/xen/hypercall.S
index e32dae444dd6..08847aa12583 100644
--- a/arch/ia64/xen/hypercall.S
+++ b/arch/ia64/xen/hypercall.S
@@ -58,11 +58,6 @@ __HCALL2(xen_ptcga, HYPERPRIVOP_PTC_GA)
58__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR) 58__HCALL2(xen_set_rr, HYPERPRIVOP_SET_RR)
59__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR) 59__HCALL2(xen_set_kr, HYPERPRIVOP_SET_KR)
60 60
61#ifdef CONFIG_IA32_SUPPORT
62__HCALL0(xen_get_eflag, HYPERPRIVOP_GET_EFLAG)
63__HCALL1(xen_set_eflag, HYPERPRIVOP_SET_EFLAG) // refer SDM vol1 3.1.8
64#endif /* CONFIG_IA32_SUPPORT */
65
66GLOBAL_ENTRY(xen_set_rr0_to_rr4) 61GLOBAL_ENTRY(xen_set_rr0_to_rr4)
67 mov r8=r32 62 mov r8=r32
68 mov r9=r33 63 mov r9=r33
diff --git a/arch/ia64/xen/irq_xen.c b/arch/ia64/xen/irq_xen.c
index f042e192d2fe..a3fb7cf9ae1d 100644
--- a/arch/ia64/xen/irq_xen.c
+++ b/arch/ia64/xen/irq_xen.c
@@ -63,19 +63,19 @@ xen_free_irq_vector(int vector)
63} 63}
64 64
65 65
66static DEFINE_PER_CPU(int, timer_irq) = -1; 66static DEFINE_PER_CPU(int, xen_timer_irq) = -1;
67static DEFINE_PER_CPU(int, ipi_irq) = -1; 67static DEFINE_PER_CPU(int, xen_ipi_irq) = -1;
68static DEFINE_PER_CPU(int, resched_irq) = -1; 68static DEFINE_PER_CPU(int, xen_resched_irq) = -1;
69static DEFINE_PER_CPU(int, cmc_irq) = -1; 69static DEFINE_PER_CPU(int, xen_cmc_irq) = -1;
70static DEFINE_PER_CPU(int, cmcp_irq) = -1; 70static DEFINE_PER_CPU(int, xen_cmcp_irq) = -1;
71static DEFINE_PER_CPU(int, cpep_irq) = -1; 71static DEFINE_PER_CPU(int, xen_cpep_irq) = -1;
72#define NAME_SIZE 15 72#define NAME_SIZE 15
73static DEFINE_PER_CPU(char[NAME_SIZE], timer_name); 73static DEFINE_PER_CPU(char[NAME_SIZE], xen_timer_name);
74static DEFINE_PER_CPU(char[NAME_SIZE], ipi_name); 74static DEFINE_PER_CPU(char[NAME_SIZE], xen_ipi_name);
75static DEFINE_PER_CPU(char[NAME_SIZE], resched_name); 75static DEFINE_PER_CPU(char[NAME_SIZE], xen_resched_name);
76static DEFINE_PER_CPU(char[NAME_SIZE], cmc_name); 76static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmc_name);
77static DEFINE_PER_CPU(char[NAME_SIZE], cmcp_name); 77static DEFINE_PER_CPU(char[NAME_SIZE], xen_cmcp_name);
78static DEFINE_PER_CPU(char[NAME_SIZE], cpep_name); 78static DEFINE_PER_CPU(char[NAME_SIZE], xen_cpep_name);
79#undef NAME_SIZE 79#undef NAME_SIZE
80 80
81struct saved_irq { 81struct saved_irq {
@@ -144,64 +144,64 @@ __xen_register_percpu_irq(unsigned int cpu, unsigned int vec,
144 if (xen_slab_ready) { 144 if (xen_slab_ready) {
145 switch (vec) { 145 switch (vec) {
146 case IA64_TIMER_VECTOR: 146 case IA64_TIMER_VECTOR:
147 snprintf(per_cpu(timer_name, cpu), 147 snprintf(per_cpu(xen_timer_name, cpu),
148 sizeof(per_cpu(timer_name, cpu)), 148 sizeof(per_cpu(xen_timer_name, cpu)),
149 "%s%d", action->name, cpu); 149 "%s%d", action->name, cpu);
150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu, 150 irq = bind_virq_to_irqhandler(VIRQ_ITC, cpu,
151 action->handler, action->flags, 151 action->handler, action->flags,
152 per_cpu(timer_name, cpu), action->dev_id); 152 per_cpu(xen_timer_name, cpu), action->dev_id);
153 per_cpu(timer_irq, cpu) = irq; 153 per_cpu(xen_timer_irq, cpu) = irq;
154 break; 154 break;
155 case IA64_IPI_RESCHEDULE: 155 case IA64_IPI_RESCHEDULE:
156 snprintf(per_cpu(resched_name, cpu), 156 snprintf(per_cpu(xen_resched_name, cpu),
157 sizeof(per_cpu(resched_name, cpu)), 157 sizeof(per_cpu(xen_resched_name, cpu)),
158 "%s%d", action->name, cpu); 158 "%s%d", action->name, cpu);
159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu, 159 irq = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, cpu,
160 action->handler, action->flags, 160 action->handler, action->flags,
161 per_cpu(resched_name, cpu), action->dev_id); 161 per_cpu(xen_resched_name, cpu), action->dev_id);
162 per_cpu(resched_irq, cpu) = irq; 162 per_cpu(xen_resched_irq, cpu) = irq;
163 break; 163 break;
164 case IA64_IPI_VECTOR: 164 case IA64_IPI_VECTOR:
165 snprintf(per_cpu(ipi_name, cpu), 165 snprintf(per_cpu(xen_ipi_name, cpu),
166 sizeof(per_cpu(ipi_name, cpu)), 166 sizeof(per_cpu(xen_ipi_name, cpu)),
167 "%s%d", action->name, cpu); 167 "%s%d", action->name, cpu);
168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu, 168 irq = bind_ipi_to_irqhandler(XEN_IPI_VECTOR, cpu,
169 action->handler, action->flags, 169 action->handler, action->flags,
170 per_cpu(ipi_name, cpu), action->dev_id); 170 per_cpu(xen_ipi_name, cpu), action->dev_id);
171 per_cpu(ipi_irq, cpu) = irq; 171 per_cpu(xen_ipi_irq, cpu) = irq;
172 break; 172 break;
173 case IA64_CMC_VECTOR: 173 case IA64_CMC_VECTOR:
174 snprintf(per_cpu(cmc_name, cpu), 174 snprintf(per_cpu(xen_cmc_name, cpu),
175 sizeof(per_cpu(cmc_name, cpu)), 175 sizeof(per_cpu(xen_cmc_name, cpu)),
176 "%s%d", action->name, cpu); 176 "%s%d", action->name, cpu);
177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu, 177 irq = bind_virq_to_irqhandler(VIRQ_MCA_CMC, cpu,
178 action->handler, 178 action->handler,
179 action->flags, 179 action->flags,
180 per_cpu(cmc_name, cpu), 180 per_cpu(xen_cmc_name, cpu),
181 action->dev_id); 181 action->dev_id);
182 per_cpu(cmc_irq, cpu) = irq; 182 per_cpu(xen_cmc_irq, cpu) = irq;
183 break; 183 break;
184 case IA64_CMCP_VECTOR: 184 case IA64_CMCP_VECTOR:
185 snprintf(per_cpu(cmcp_name, cpu), 185 snprintf(per_cpu(xen_cmcp_name, cpu),
186 sizeof(per_cpu(cmcp_name, cpu)), 186 sizeof(per_cpu(xen_cmcp_name, cpu)),
187 "%s%d", action->name, cpu); 187 "%s%d", action->name, cpu);
188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu, 188 irq = bind_ipi_to_irqhandler(XEN_CMCP_VECTOR, cpu,
189 action->handler, 189 action->handler,
190 action->flags, 190 action->flags,
191 per_cpu(cmcp_name, cpu), 191 per_cpu(xen_cmcp_name, cpu),
192 action->dev_id); 192 action->dev_id);
193 per_cpu(cmcp_irq, cpu) = irq; 193 per_cpu(xen_cmcp_irq, cpu) = irq;
194 break; 194 break;
195 case IA64_CPEP_VECTOR: 195 case IA64_CPEP_VECTOR:
196 snprintf(per_cpu(cpep_name, cpu), 196 snprintf(per_cpu(xen_cpep_name, cpu),
197 sizeof(per_cpu(cpep_name, cpu)), 197 sizeof(per_cpu(xen_cpep_name, cpu)),
198 "%s%d", action->name, cpu); 198 "%s%d", action->name, cpu);
199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu, 199 irq = bind_ipi_to_irqhandler(XEN_CPEP_VECTOR, cpu,
200 action->handler, 200 action->handler,
201 action->flags, 201 action->flags,
202 per_cpu(cpep_name, cpu), 202 per_cpu(xen_cpep_name, cpu),
203 action->dev_id); 203 action->dev_id);
204 per_cpu(cpep_irq, cpu) = irq; 204 per_cpu(xen_cpep_irq, cpu) = irq;
205 break; 205 break;
206 case IA64_CPE_VECTOR: 206 case IA64_CPE_VECTOR:
207 case IA64_MCA_RENDEZ_VECTOR: 207 case IA64_MCA_RENDEZ_VECTOR:
@@ -275,30 +275,33 @@ unbind_evtchn_callback(struct notifier_block *nfb,
275 275
276 if (action == CPU_DEAD) { 276 if (action == CPU_DEAD) {
277 /* Unregister evtchn. */ 277 /* Unregister evtchn. */
278 if (per_cpu(cpep_irq, cpu) >= 0) { 278 if (per_cpu(xen_cpep_irq, cpu) >= 0) {
279 unbind_from_irqhandler(per_cpu(cpep_irq, cpu), NULL); 279 unbind_from_irqhandler(per_cpu(xen_cpep_irq, cpu),
280 per_cpu(cpep_irq, cpu) = -1; 280 NULL);
281 per_cpu(xen_cpep_irq, cpu) = -1;
281 } 282 }
282 if (per_cpu(cmcp_irq, cpu) >= 0) { 283 if (per_cpu(xen_cmcp_irq, cpu) >= 0) {
283 unbind_from_irqhandler(per_cpu(cmcp_irq, cpu), NULL); 284 unbind_from_irqhandler(per_cpu(xen_cmcp_irq, cpu),
284 per_cpu(cmcp_irq, cpu) = -1; 285 NULL);
286 per_cpu(xen_cmcp_irq, cpu) = -1;
285 } 287 }
286 if (per_cpu(cmc_irq, cpu) >= 0) { 288 if (per_cpu(xen_cmc_irq, cpu) >= 0) {
287 unbind_from_irqhandler(per_cpu(cmc_irq, cpu), NULL); 289 unbind_from_irqhandler(per_cpu(xen_cmc_irq, cpu), NULL);
288 per_cpu(cmc_irq, cpu) = -1; 290 per_cpu(xen_cmc_irq, cpu) = -1;
289 } 291 }
290 if (per_cpu(ipi_irq, cpu) >= 0) { 292 if (per_cpu(xen_ipi_irq, cpu) >= 0) {
291 unbind_from_irqhandler(per_cpu(ipi_irq, cpu), NULL); 293 unbind_from_irqhandler(per_cpu(xen_ipi_irq, cpu), NULL);
292 per_cpu(ipi_irq, cpu) = -1; 294 per_cpu(xen_ipi_irq, cpu) = -1;
293 } 295 }
294 if (per_cpu(resched_irq, cpu) >= 0) { 296 if (per_cpu(xen_resched_irq, cpu) >= 0) {
295 unbind_from_irqhandler(per_cpu(resched_irq, cpu), 297 unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu),
296 NULL); 298 NULL);
297 per_cpu(resched_irq, cpu) = -1; 299 per_cpu(xen_resched_irq, cpu) = -1;
298 } 300 }
299 if (per_cpu(timer_irq, cpu) >= 0) { 301 if (per_cpu(xen_timer_irq, cpu) >= 0) {
300 unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL); 302 unbind_from_irqhandler(per_cpu(xen_timer_irq, cpu),
301 per_cpu(timer_irq, cpu) = -1; 303 NULL);
304 per_cpu(xen_timer_irq, cpu) = -1;
302 } 305 }
303 } 306 }
304 return NOTIFY_OK; 307 return NOTIFY_OK;
diff --git a/arch/ia64/xen/time.c b/arch/ia64/xen/time.c
index dbeadb9c8e20..c1c544513e8d 100644
--- a/arch/ia64/xen/time.c
+++ b/arch/ia64/xen/time.c
@@ -34,15 +34,15 @@
34 34
35#include "../kernel/fsyscall_gtod_data.h" 35#include "../kernel/fsyscall_gtod_data.h"
36 36
37DEFINE_PER_CPU(struct vcpu_runstate_info, runstate); 37static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate);
38DEFINE_PER_CPU(unsigned long, processed_stolen_time); 38static DEFINE_PER_CPU(unsigned long, xen_stolen_time);
39DEFINE_PER_CPU(unsigned long, processed_blocked_time); 39static DEFINE_PER_CPU(unsigned long, xen_blocked_time);
40 40
41/* taken from i386/kernel/time-xen.c */ 41/* taken from i386/kernel/time-xen.c */
42static void xen_init_missing_ticks_accounting(int cpu) 42static void xen_init_missing_ticks_accounting(int cpu)
43{ 43{
44 struct vcpu_register_runstate_memory_area area; 44 struct vcpu_register_runstate_memory_area area;
45 struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu); 45 struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu);
46 int rc; 46 int rc;
47 47
48 memset(runstate, 0, sizeof(*runstate)); 48 memset(runstate, 0, sizeof(*runstate));
@@ -52,8 +52,8 @@ static void xen_init_missing_ticks_accounting(int cpu)
52 &area); 52 &area);
53 WARN_ON(rc && rc != -ENOSYS); 53 WARN_ON(rc && rc != -ENOSYS);
54 54
55 per_cpu(processed_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; 55 per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked];
56 per_cpu(processed_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] 56 per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable]
57 + runstate->time[RUNSTATE_offline]; 57 + runstate->time[RUNSTATE_offline];
58} 58}
59 59
@@ -68,7 +68,7 @@ static void get_runstate_snapshot(struct vcpu_runstate_info *res)
68 68
69 BUG_ON(preemptible()); 69 BUG_ON(preemptible());
70 70
71 state = &__get_cpu_var(runstate); 71 state = &__get_cpu_var(xen_runstate);
72 72
73 /* 73 /*
74 * The runstate info is always updated by the hypervisor on 74 * The runstate info is always updated by the hypervisor on
@@ -103,12 +103,12 @@ consider_steal_time(unsigned long new_itm)
103 * This function just checks and reject this effect. 103 * This function just checks and reject this effect.
104 */ 104 */
105 if (!time_after_eq(runstate.time[RUNSTATE_blocked], 105 if (!time_after_eq(runstate.time[RUNSTATE_blocked],
106 per_cpu(processed_blocked_time, cpu))) 106 per_cpu(xen_blocked_time, cpu)))
107 blocked = 0; 107 blocked = 0;
108 108
109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] + 109 if (!time_after_eq(runstate.time[RUNSTATE_runnable] +
110 runstate.time[RUNSTATE_offline], 110 runstate.time[RUNSTATE_offline],
111 per_cpu(processed_stolen_time, cpu))) 111 per_cpu(xen_stolen_time, cpu)))
112 stolen = 0; 112 stolen = 0;
113 113
114 if (!time_after(delta_itm + new_itm, ia64_get_itc())) 114 if (!time_after(delta_itm + new_itm, ia64_get_itc()))
@@ -147,8 +147,8 @@ consider_steal_time(unsigned long new_itm)
147 } else { 147 } else {
148 local_cpu_data->itm_next = delta_itm + new_itm; 148 local_cpu_data->itm_next = delta_itm + new_itm;
149 } 149 }
150 per_cpu(processed_stolen_time, cpu) += NS_PER_TICK * stolen; 150 per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen;
151 per_cpu(processed_blocked_time, cpu) += NS_PER_TICK * blocked; 151 per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked;
152 } 152 }
153 return delta_itm; 153 return delta_itm;
154} 154}
diff --git a/arch/ia64/xen/xen_pv_ops.c b/arch/ia64/xen/xen_pv_ops.c
index 5e2270a999fa..8adc6a14272a 100644
--- a/arch/ia64/xen/xen_pv_ops.c
+++ b/arch/ia64/xen/xen_pv_ops.c
@@ -301,11 +301,6 @@ static void xen_setreg(int regnum, unsigned long val)
301 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7: 301 case _IA64_REG_AR_KR0 ... _IA64_REG_AR_KR7:
302 xen_set_kr(regnum - _IA64_REG_AR_KR0, val); 302 xen_set_kr(regnum - _IA64_REG_AR_KR0, val);
303 break; 303 break;
304#ifdef CONFIG_IA32_SUPPORT
305 case _IA64_REG_AR_EFLAG:
306 xen_set_eflag(val);
307 break;
308#endif
309 case _IA64_REG_AR_ITC: 304 case _IA64_REG_AR_ITC:
310 xen_set_itc(val); 305 xen_set_itc(val);
311 break; 306 break;
@@ -332,11 +327,6 @@ static unsigned long xen_getreg(int regnum)
332 case _IA64_REG_PSR: 327 case _IA64_REG_PSR:
333 res = xen_get_psr(); 328 res = xen_get_psr();
334 break; 329 break;
335#ifdef CONFIG_IA32_SUPPORT
336 case _IA64_REG_AR_EFLAG:
337 res = xen_get_eflag();
338 break;
339#endif
340 case _IA64_REG_AR_ITC: 330 case _IA64_REG_AR_ITC:
341 res = xen_get_itc(); 331 res = xen_get_itc();
342 break; 332 break;
@@ -710,9 +700,6 @@ extern unsigned long xen_getreg(int regnum);
710 700
711__DEFINE_FUNC(getreg, 701__DEFINE_FUNC(getreg,
712 __DEFINE_GET_REG(PSR, PSR) 702 __DEFINE_GET_REG(PSR, PSR)
713#ifdef CONFIG_IA32_SUPPORT
714 __DEFINE_GET_REG(AR_EFLAG, EFLAG)
715#endif
716 703
717 /* get_itc */ 704 /* get_itc */
718 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n" 705 "mov r2 = " __stringify(_IA64_REG_AR_ITC) "\n"
@@ -789,9 +776,6 @@ __DEFINE_FUNC(setreg,
789 ";;\n" 776 ";;\n"
790 "(p6) br.cond.spnt xen_set_itc\n" 777 "(p6) br.cond.spnt xen_set_itc\n"
791 778
792#ifdef CONFIG_IA32_SUPPORT
793 __DEFINE_SET_REG(AR_EFLAG, SET_EFLAG)
794#endif
795 __DEFINE_SET_REG(CR_TPR, SET_TPR) 779 __DEFINE_SET_REG(CR_TPR, SET_TPR)
796 __DEFINE_SET_REG(CR_EOI, EOI) 780 __DEFINE_SET_REG(CR_EOI, EOI)
797 781