aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/mach-davinci/board-evm.c6
-rw-r--r--arch/arm/mach-davinci/clock.c5
-rw-r--r--arch/arm/mach-davinci/usb.c1
-rw-r--r--arch/arm/mach-rpc/riscpc.c6
-rw-r--r--arch/ia64/Kconfig11
-rw-r--r--arch/ia64/kernel/iosapic.c2
-rw-r--r--arch/ia64/kernel/unwind.c2
-rw-r--r--arch/mips/Kconfig9
-rw-r--r--arch/mips/alchemy/common/time.c6
-rw-r--r--arch/mips/include/asm/seccomp.h1
-rw-r--r--arch/mips/kernel/irq.c1
-rw-r--r--arch/mips/kernel/linux32.c69
-rw-r--r--arch/mips/kernel/scall32-o32.S4
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S28
-rw-r--r--arch/mips/kernel/scall64-o32.S40
-rw-r--r--arch/mips/kernel/signal.c5
-rw-r--r--arch/mips/kernel/signal32.c28
-rw-r--r--arch/mips/kernel/syscall.c26
-rw-r--r--arch/mips/mm/cache.c5
-rw-r--r--arch/powerpc/include/asm/compat.h5
-rw-r--r--arch/powerpc/include/asm/seccomp.h4
-rw-r--r--arch/powerpc/kernel/align.c29
-rw-r--r--arch/powerpc/lib/copyuser_64.S38
-rw-r--r--arch/powerpc/lib/memcpy_64.S26
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c17
-rw-r--r--arch/sh/boards/board-ap325rxa.c53
-rw-r--r--arch/sh/kernel/cpu/sh2a/clock-sh7201.c4
-rw-r--r--arch/sparc/include/asm/compat.h5
-rw-r--r--arch/sparc/include/asm/seccomp.h6
-rw-r--r--arch/sparc/kernel/chmc.c1
-rw-r--r--arch/x86/Kconfig5
-rw-r--r--arch/x86/boot/compressed/Makefile21
-rw-r--r--arch/x86/boot/compressed/misc.c118
-rw-r--r--arch/x86/configs/i386_defconfig6
-rw-r--r--arch/x86/configs/x86_64_defconfig6
-rw-r--r--arch/x86/include/asm/apic.h26
-rw-r--r--arch/x86/include/asm/boot.h16
-rw-r--r--arch/x86/include/asm/fixmap.h149
-rw-r--r--arch/x86/include/asm/fixmap_32.h115
-rw-r--r--arch/x86/include/asm/fixmap_64.h79
-rw-r--r--arch/x86/include/asm/iomap.h3
-rw-r--r--arch/x86/include/asm/numa_32.h6
-rw-r--r--arch/x86/include/asm/pat.h3
-rw-r--r--arch/x86/include/asm/processor.h6
-rw-r--r--arch/x86/include/asm/seccomp_32.h6
-rw-r--r--arch/x86/include/asm/seccomp_64.h8
-rw-r--r--arch/x86/include/asm/setup.h7
-rw-r--r--arch/x86/include/asm/system.h3
-rw-r--r--arch/x86/include/asm/uaccess_64.h26
-rw-r--r--arch/x86/include/asm/uv/uv.h3
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c2
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c41
-rw-r--r--arch/x86/kernel/apic/es7000_32.c221
-rw-r--r--arch/x86/kernel/apic/numaq_32.c12
-rw-r--r--arch/x86/kernel/apic/probe_32.c15
-rw-r--r--arch/x86/kernel/apic/probe_64.c3
-rw-r--r--arch/x86/kernel/apic/summit_32.c102
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c1
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c44
-rw-r--r--arch/x86/kernel/cpu/proc.c20
-rw-r--r--arch/x86/kernel/e820.c3
-rw-r--r--arch/x86/kernel/ioport.c11
-rw-r--r--arch/x86/kernel/process.c191
-rw-r--r--arch/x86/kernel/process_32.c190
-rw-r--r--arch/x86/kernel/process_64.c188
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/setup.c16
-rw-r--r--arch/x86/kernel/signal.c117
-rw-r--r--arch/x86/kernel/smpboot.c19
-rw-r--r--arch/x86/kernel/traps.c46
-rw-r--r--arch/x86/kernel/vsmp_64.c12
-rw-r--r--arch/x86/mm/Makefile2
-rw-r--r--arch/x86/mm/highmem_32.c34
-rw-r--r--arch/x86/mm/init.c49
-rw-r--r--arch/x86/mm/init_32.c61
-rw-r--r--arch/x86/mm/init_64.c39
-rw-r--r--arch/x86/mm/iomap_32.c11
-rw-r--r--arch/x86/mm/memtest.c156
-rw-r--r--arch/x86/mm/numa_32.c26
-rw-r--r--arch/x86/mm/pat.c48
-rw-r--r--arch/x86/mm/pgtable.c18
-rw-r--r--arch/x86/mm/pgtable_32.c18
-rw-r--r--arch/x86/oprofile/op_model_ppro.c14
-rw-r--r--arch/x86/xen/enlighten.c3
88 files changed, 1218 insertions, 1583 deletions
diff --git a/arch/arm/mach-davinci/board-evm.c b/arch/arm/mach-davinci/board-evm.c
index a957d239a683..38b6a9ce2a93 100644
--- a/arch/arm/mach-davinci/board-evm.c
+++ b/arch/arm/mach-davinci/board-evm.c
@@ -311,6 +311,9 @@ evm_u35_setup(struct i2c_client *client, int gpio, unsigned ngpio, void *c)
311 gpio_request(gpio + 7, "nCF_SEL"); 311 gpio_request(gpio + 7, "nCF_SEL");
312 gpio_direction_output(gpio + 7, 1); 312 gpio_direction_output(gpio + 7, 1);
313 313
314 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
315 setup_usb(500, 8);
316
314 return 0; 317 return 0;
315} 318}
316 319
@@ -417,9 +420,6 @@ static __init void davinci_evm_init(void)
417 platform_add_devices(davinci_evm_devices, 420 platform_add_devices(davinci_evm_devices,
418 ARRAY_SIZE(davinci_evm_devices)); 421 ARRAY_SIZE(davinci_evm_devices));
419 evm_init_i2c(); 422 evm_init_i2c();
420
421 /* irlml6401 sustains over 3A, switches 5V in under 8 msec */
422 setup_usb(500, 8);
423} 423}
424 424
425static __init void davinci_evm_irq_init(void) 425static __init void davinci_evm_irq_init(void)
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 28f6dbc95bd7..abb92b7eca0c 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -231,6 +231,11 @@ static struct clk davinci_clks[] = {
231 .lpsc = DAVINCI_LPSC_GPIO, 231 .lpsc = DAVINCI_LPSC_GPIO,
232 }, 232 },
233 { 233 {
234 .name = "usb",
235 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_USB,
237 },
238 {
234 .name = "AEMIFCLK", 239 .name = "AEMIFCLK",
235 .rate = &commonrate, 240 .rate = &commonrate,
236 .lpsc = DAVINCI_LPSC_AEMIF, 241 .lpsc = DAVINCI_LPSC_AEMIF,
diff --git a/arch/arm/mach-davinci/usb.c b/arch/arm/mach-davinci/usb.c
index 867ead2559ad..69680784448a 100644
--- a/arch/arm/mach-davinci/usb.c
+++ b/arch/arm/mach-davinci/usb.c
@@ -47,6 +47,7 @@ static struct musb_hdrc_platform_data usb_data = {
47#elif defined(CONFIG_USB_MUSB_HOST) 47#elif defined(CONFIG_USB_MUSB_HOST)
48 .mode = MUSB_HOST, 48 .mode = MUSB_HOST,
49#endif 49#endif
50 .clock = "usb",
50 .config = &musb_config, 51 .config = &musb_config,
51}; 52};
52 53
diff --git a/arch/arm/mach-rpc/riscpc.c b/arch/arm/mach-rpc/riscpc.c
index e88d417736af..c7fc01e9d1f6 100644
--- a/arch/arm/mach-rpc/riscpc.c
+++ b/arch/arm/mach-rpc/riscpc.c
@@ -19,6 +19,7 @@
19#include <linux/serial_8250.h> 19#include <linux/serial_8250.h>
20#include <linux/ata_platform.h> 20#include <linux/ata_platform.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/i2c.h>
22 23
23#include <asm/elf.h> 24#include <asm/elf.h>
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
@@ -201,8 +202,13 @@ static struct platform_device *devs[] __initdata = {
201 &pata_device, 202 &pata_device,
202}; 203};
203 204
205static struct i2c_board_info i2c_rtc = {
206 I2C_BOARD_INFO("pcf8583", 0x50)
207};
208
204static int __init rpc_init(void) 209static int __init rpc_init(void)
205{ 210{
211 i2c_register_board_info(0, &i2c_rtc, 1);
206 return platform_add_devices(devs, ARRAY_SIZE(devs)); 212 return platform_add_devices(devs, ARRAY_SIZE(devs));
207} 213}
208 214
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 4eb45c012498..153e727a6e8e 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -638,6 +638,17 @@ config DMAR
638 and include PCI device scope covered by these DMA 638 and include PCI device scope covered by these DMA
639 remapping devices. 639 remapping devices.
640 640
641config DMAR_DEFAULT_ON
642 def_bool y
643 prompt "Enable DMA Remapping Devices by default"
644 depends on DMAR
645 help
646 Selecting this option will enable a DMAR device at boot time if
647 one is found. If this option is not selected, DMAR support can
648 be enabled by passing intel_iommu=on to the kernel. It is
649 recommended you say N here while the DMAR code remains
650 experimental.
651
641endmenu 652endmenu
642 653
643endif 654endif
diff --git a/arch/ia64/kernel/iosapic.c b/arch/ia64/kernel/iosapic.c
index 006ad366a454..166e0d839fa0 100644
--- a/arch/ia64/kernel/iosapic.c
+++ b/arch/ia64/kernel/iosapic.c
@@ -507,7 +507,7 @@ static int iosapic_find_sharable_irq(unsigned long trigger, unsigned long pol)
507 if (trigger == IOSAPIC_EDGE) 507 if (trigger == IOSAPIC_EDGE)
508 return -EINVAL; 508 return -EINVAL;
509 509
510 for (i = 0; i <= NR_IRQS; i++) { 510 for (i = 0; i < NR_IRQS; i++) {
511 info = &iosapic_intr_info[i]; 511 info = &iosapic_intr_info[i];
512 if (info->trigger == trigger && info->polarity == pol && 512 if (info->trigger == trigger && info->polarity == pol &&
513 (info->dmode == IOSAPIC_FIXED || 513 (info->dmode == IOSAPIC_FIXED ||
diff --git a/arch/ia64/kernel/unwind.c b/arch/ia64/kernel/unwind.c
index 67810b77d998..b6c0e63a0bf6 100644
--- a/arch/ia64/kernel/unwind.c
+++ b/arch/ia64/kernel/unwind.c
@@ -2149,7 +2149,7 @@ unw_remove_unwind_table (void *handle)
2149 2149
2150 /* next, remove hash table entries for this table */ 2150 /* next, remove hash table entries for this table */
2151 2151
2152 for (index = 0; index <= UNW_HASH_SIZE; ++index) { 2152 for (index = 0; index < UNW_HASH_SIZE; ++index) {
2153 tmp = unw.cache + unw.hash[index]; 2153 tmp = unw.cache + unw.hash[index];
2154 if (unw.hash[index] >= UNW_CACHE_SIZE 2154 if (unw.hash[index] >= UNW_CACHE_SIZE
2155 || tmp->ip < table->start || tmp->ip >= table->end) 2155 || tmp->ip < table->start || tmp->ip >= table->end)
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 600eef3f3ac7..e61465a18c7e 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -603,7 +603,7 @@ config CAVIUM_OCTEON_SIMULATOR
603 select SYS_SUPPORTS_64BIT_KERNEL 603 select SYS_SUPPORTS_64BIT_KERNEL
604 select SYS_SUPPORTS_BIG_ENDIAN 604 select SYS_SUPPORTS_BIG_ENDIAN
605 select SYS_SUPPORTS_HIGHMEM 605 select SYS_SUPPORTS_HIGHMEM
606 select CPU_CAVIUM_OCTEON 606 select SYS_HAS_CPU_CAVIUM_OCTEON
607 help 607 help
608 The Octeon simulator is software performance model of the Cavium 608 The Octeon simulator is software performance model of the Cavium
609 Octeon Processor. It supports simulating Octeon processors on x86 609 Octeon Processor. It supports simulating Octeon processors on x86
@@ -618,7 +618,7 @@ config CAVIUM_OCTEON_REFERENCE_BOARD
618 select SYS_SUPPORTS_BIG_ENDIAN 618 select SYS_SUPPORTS_BIG_ENDIAN
619 select SYS_SUPPORTS_HIGHMEM 619 select SYS_SUPPORTS_HIGHMEM
620 select SYS_HAS_EARLY_PRINTK 620 select SYS_HAS_EARLY_PRINTK
621 select CPU_CAVIUM_OCTEON 621 select SYS_HAS_CPU_CAVIUM_OCTEON
622 select SWAP_IO_SPACE 622 select SWAP_IO_SPACE
623 help 623 help
624 This option supports all of the Octeon reference boards from Cavium 624 This option supports all of the Octeon reference boards from Cavium
@@ -1234,6 +1234,7 @@ config CPU_SB1
1234 1234
1235config CPU_CAVIUM_OCTEON 1235config CPU_CAVIUM_OCTEON
1236 bool "Cavium Octeon processor" 1236 bool "Cavium Octeon processor"
1237 depends on SYS_HAS_CPU_CAVIUM_OCTEON
1237 select IRQ_CPU 1238 select IRQ_CPU
1238 select IRQ_CPU_OCTEON 1239 select IRQ_CPU_OCTEON
1239 select CPU_HAS_PREFETCH 1240 select CPU_HAS_PREFETCH
@@ -1314,6 +1315,9 @@ config SYS_HAS_CPU_RM9000
1314config SYS_HAS_CPU_SB1 1315config SYS_HAS_CPU_SB1
1315 bool 1316 bool
1316 1317
1318config SYS_HAS_CPU_CAVIUM_OCTEON
1319 bool
1320
1317# 1321#
1318# CPU may reorder R->R, R->W, W->R, W->W 1322# CPU may reorder R->R, R->W, W->R, W->W
1319# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC 1323# Reordering beyond LL and SC is handled in WEAK_REORDERING_BEYOND_LLSC
@@ -1387,6 +1391,7 @@ config 32BIT
1387config 64BIT 1391config 64BIT
1388 bool "64-bit kernel" 1392 bool "64-bit kernel"
1389 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL 1393 depends on CPU_SUPPORTS_64BIT_KERNEL && SYS_SUPPORTS_64BIT_KERNEL
1394 select HAVE_SYSCALL_WRAPPERS
1390 help 1395 help
1391 Select this option if you want to build a 64-bit kernel. 1396 Select this option if you want to build a 64-bit kernel.
1392 1397
diff --git a/arch/mips/alchemy/common/time.c b/arch/mips/alchemy/common/time.c
index 6fd441d16af5..f58d4ffb8945 100644
--- a/arch/mips/alchemy/common/time.c
+++ b/arch/mips/alchemy/common/time.c
@@ -118,7 +118,7 @@ void __init plat_time_init(void)
118 * setup counter 1 (RTC) to tick at full speed 118 * setup counter 1 (RTC) to tick at full speed
119 */ 119 */
120 t = 0xffffff; 120 t = 0xffffff;
121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && t--) 121 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_T1S) && --t)
122 asm volatile ("nop"); 122 asm volatile ("nop");
123 if (!t) 123 if (!t)
124 goto cntr_err; 124 goto cntr_err;
@@ -127,7 +127,7 @@ void __init plat_time_init(void)
127 au_sync(); 127 au_sync();
128 128
129 t = 0xffffff; 129 t = 0xffffff;
130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 130 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
131 asm volatile ("nop"); 131 asm volatile ("nop");
132 if (!t) 132 if (!t)
133 goto cntr_err; 133 goto cntr_err;
@@ -135,7 +135,7 @@ void __init plat_time_init(void)
135 au_sync(); 135 au_sync();
136 136
137 t = 0xffffff; 137 t = 0xffffff;
138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && t--) 138 while ((au_readl(SYS_COUNTER_CNTRL) & SYS_CNTRL_C1S) && --t)
139 asm volatile ("nop"); 139 asm volatile ("nop");
140 if (!t) 140 if (!t)
141 goto cntr_err; 141 goto cntr_err;
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 36ed44070256..a6772e9507f5 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -1,6 +1,5 @@
1#ifndef __ASM_SECCOMP_H 1#ifndef __ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h>
4#include <linux/unistd.h> 3#include <linux/unistd.h>
5 4
6#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index a0ff2b66e22b..4b4007b3083a 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -111,7 +111,6 @@ int show_interrupts(struct seq_file *p, void *v)
111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 111 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
112#endif 112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name); 113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, "-%-8s", irq_desc[i].name);
115 seq_printf(p, " %s", action->name); 114 seq_printf(p, " %s", action->name);
116 115
117 for (action=action->next; action; action = action->next) 116 for (action=action->next; action; action = action->next)
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index aa2c55e3b55f..2f8452b404c7 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/binfmts.h> 33#include <linux/binfmts.h>
34#include <linux/security.h> 34#include <linux/security.h>
35#include <linux/syscalls.h>
35#include <linux/compat.h> 36#include <linux/compat.h>
36#include <linux/vfs.h> 37#include <linux/vfs.h>
37#include <linux/ipc.h> 38#include <linux/ipc.h>
@@ -63,9 +64,9 @@
63#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) 64#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
64#endif 65#endif
65 66
66asmlinkage unsigned long 67SYSCALL_DEFINE6(32_mmap2, unsigned long, addr, unsigned long, len,
67sys32_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 68 unsigned long, prot, unsigned long, flags, unsigned long, fd,
68 unsigned long flags, unsigned long fd, unsigned long pgoff) 69 unsigned long, pgoff)
69{ 70{
70 struct file * file = NULL; 71 struct file * file = NULL;
71 unsigned long error; 72 unsigned long error;
@@ -121,21 +122,21 @@ struct rlimit32 {
121 int rlim_max; 122 int rlim_max;
122}; 123};
123 124
124asmlinkage long sys32_truncate64(const char __user * path, 125SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
125 unsigned long __dummy, int a2, int a3) 126 unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
126{ 127{
127 return sys_truncate(path, merge_64(a2, a3)); 128 return sys_truncate(path, merge_64(a2, a3));
128} 129}
129 130
130asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long __dummy, 131SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
131 int a2, int a3) 132 unsigned long, a2, unsigned long, a3)
132{ 133{
133 return sys_ftruncate(fd, merge_64(a2, a3)); 134 return sys_ftruncate(fd, merge_64(a2, a3));
134} 135}
135 136
136asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high, 137SYSCALL_DEFINE5(32_llseek, unsigned long, fd, unsigned long, offset_high,
137 unsigned int offset_low, loff_t __user * result, 138 unsigned long, offset_low, loff_t __user *, result,
138 unsigned int origin) 139 unsigned long, origin)
139{ 140{
140 return sys_llseek(fd, offset_high, offset_low, result, origin); 141 return sys_llseek(fd, offset_high, offset_low, result, origin);
141} 142}
@@ -144,20 +145,20 @@ asmlinkage int sys32_llseek(unsigned int fd, unsigned int offset_high,
144 lseek back to original location. They fail just like lseek does on 145 lseek back to original location. They fail just like lseek does on
145 non-seekable files. */ 146 non-seekable files. */
146 147
147asmlinkage ssize_t sys32_pread(unsigned int fd, char __user * buf, 148SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
148 size_t count, u32 unused, u64 a4, u64 a5) 149 unsigned long, unused, unsigned long, a4, unsigned long, a5)
149{ 150{
150 return sys_pread64(fd, buf, count, merge_64(a4, a5)); 151 return sys_pread64(fd, buf, count, merge_64(a4, a5));
151} 152}
152 153
153asmlinkage ssize_t sys32_pwrite(unsigned int fd, const char __user * buf, 154SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
154 size_t count, u32 unused, u64 a4, u64 a5) 155 size_t, count, u32, unused, u64, a4, u64, a5)
155{ 156{
156 return sys_pwrite64(fd, buf, count, merge_64(a4, a5)); 157 return sys_pwrite64(fd, buf, count, merge_64(a4, a5));
157} 158}
158 159
159asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid, 160SYSCALL_DEFINE2(32_sched_rr_get_interval, compat_pid_t, pid,
160 struct compat_timespec __user *interval) 161 struct compat_timespec __user *, interval)
161{ 162{
162 struct timespec t; 163 struct timespec t;
163 int ret; 164 int ret;
@@ -174,8 +175,8 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
174 175
175#ifdef CONFIG_SYSVIPC 176#ifdef CONFIG_SYSVIPC
176 177
177asmlinkage long 178SYSCALL_DEFINE6(32_ipc, u32, call, long, first, long, second, long, third,
178sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 179 unsigned long, ptr, unsigned long, fifth)
179{ 180{
180 int version, err; 181 int version, err;
181 182
@@ -233,8 +234,8 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
233 234
234#else 235#else
235 236
236asmlinkage long 237SYSCALL_DEFINE6(32_ipc, u32, call, int, first, int, second, int, third,
237sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth) 238 u32, ptr, u32 fifth)
238{ 239{
239 return -ENOSYS; 240 return -ENOSYS;
240} 241}
@@ -242,7 +243,7 @@ sys32_ipc(u32 call, int first, int second, int third, u32 ptr, u32 fifth)
242#endif /* CONFIG_SYSVIPC */ 243#endif /* CONFIG_SYSVIPC */
243 244
244#ifdef CONFIG_MIPS32_N32 245#ifdef CONFIG_MIPS32_N32
245asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg) 246SYSCALL_DEFINE4(n32_semctl, int, semid, int, semnum, int, cmd, u32, arg)
246{ 247{
247 /* compat_sys_semctl expects a pointer to union semun */ 248 /* compat_sys_semctl expects a pointer to union semun */
248 u32 __user *uptr = compat_alloc_user_space(sizeof(u32)); 249 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
@@ -251,13 +252,14 @@ asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
251 return compat_sys_semctl(semid, semnum, cmd, uptr); 252 return compat_sys_semctl(semid, semnum, cmd, uptr);
252} 253}
253 254
254asmlinkage long sysn32_msgsnd(int msqid, u32 msgp, unsigned msgsz, int msgflg) 255SYSCALL_DEFINE4(n32_msgsnd, int, msqid, u32, msgp, unsigned int, msgsz,
256 int, msgflg)
255{ 257{
256 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp)); 258 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
257} 259}
258 260
259asmlinkage long sysn32_msgrcv(int msqid, u32 msgp, size_t msgsz, int msgtyp, 261SYSCALL_DEFINE5(n32_msgrcv, int, msqid, u32, msgp, size_t, msgsz,
260 int msgflg) 262 int, msgtyp, int, msgflg)
261{ 263{
262 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64, 264 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
263 compat_ptr(msgp)); 265 compat_ptr(msgp));
@@ -277,7 +279,7 @@ struct sysctl_args32
277 279
278#ifdef CONFIG_SYSCTL_SYSCALL 280#ifdef CONFIG_SYSCTL_SYSCALL
279 281
280asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args) 282SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
281{ 283{
282 struct sysctl_args32 tmp; 284 struct sysctl_args32 tmp;
283 int error; 285 int error;
@@ -316,9 +318,16 @@ asmlinkage long sys32_sysctl(struct sysctl_args32 __user *args)
316 return error; 318 return error;
317} 319}
318 320
321#else
322
323SYSCALL_DEFINE1(32_sysctl, struct sysctl_args32 __user *, args)
324{
325 return -ENOSYS;
326}
327
319#endif /* CONFIG_SYSCTL_SYSCALL */ 328#endif /* CONFIG_SYSCTL_SYSCALL */
320 329
321asmlinkage long sys32_newuname(struct new_utsname __user * name) 330SYSCALL_DEFINE1(32_newuname, struct new_utsname __user *, name)
322{ 331{
323 int ret = 0; 332 int ret = 0;
324 333
@@ -334,7 +343,7 @@ asmlinkage long sys32_newuname(struct new_utsname __user * name)
334 return ret; 343 return ret;
335} 344}
336 345
337asmlinkage int sys32_personality(unsigned long personality) 346SYSCALL_DEFINE1(32_personality, unsigned long, personality)
338{ 347{
339 int ret; 348 int ret;
340 personality &= 0xffffffff; 349 personality &= 0xffffffff;
@@ -357,7 +366,7 @@ struct ustat32 {
357 366
358extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf); 367extern asmlinkage long sys_ustat(dev_t dev, struct ustat __user * ubuf);
359 368
360asmlinkage int sys32_ustat(dev_t dev, struct ustat32 __user * ubuf32) 369SYSCALL_DEFINE2(32_ustat, dev_t, dev, struct ustat32 __user *, ubuf32)
361{ 370{
362 int err; 371 int err;
363 struct ustat tmp; 372 struct ustat tmp;
@@ -381,8 +390,8 @@ out:
381 return err; 390 return err;
382} 391}
383 392
384asmlinkage int sys32_sendfile(int out_fd, int in_fd, compat_off_t __user *offset, 393SYSCALL_DEFINE4(32_sendfile, long, out_fd, long, in_fd,
385 s32 count) 394 compat_off_t __user *, offset, s32, count)
386{ 395{
387 mm_segment_t old_fs = get_fs(); 396 mm_segment_t old_fs = get_fs();
388 int ret; 397 int ret;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 51d1ba415b90..9ab70c3b5be6 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -399,7 +399,7 @@ einval: li v0, -ENOSYS
399 sys sys_swapon 2 399 sys sys_swapon 2
400 sys sys_reboot 3 400 sys sys_reboot 3
401 sys sys_old_readdir 3 401 sys sys_old_readdir 3
402 sys old_mmap 6 /* 4090 */ 402 sys sys_mips_mmap 6 /* 4090 */
403 sys sys_munmap 2 403 sys sys_munmap 2
404 sys sys_truncate 2 404 sys sys_truncate 2
405 sys sys_ftruncate 2 405 sys sys_ftruncate 2
@@ -519,7 +519,7 @@ einval: li v0, -ENOSYS
519 sys sys_sendfile 4 519 sys sys_sendfile 4
520 sys sys_ni_syscall 0 520 sys sys_ni_syscall 0
521 sys sys_ni_syscall 0 521 sys sys_ni_syscall 0
522 sys sys_mmap2 6 /* 4210 */ 522 sys sys_mips_mmap2 6 /* 4210 */
523 sys sys_truncate64 4 523 sys sys_truncate64 4
524 sys sys_ftruncate64 4 524 sys sys_ftruncate64 4
525 sys sys_stat64 2 525 sys sys_stat64 2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index a9e171618994..9b4698667154 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -207,7 +207,7 @@ sys_call_table:
207 PTR sys_newlstat 207 PTR sys_newlstat
208 PTR sys_poll 208 PTR sys_poll
209 PTR sys_lseek 209 PTR sys_lseek
210 PTR old_mmap 210 PTR sys_mips_mmap
211 PTR sys_mprotect /* 5010 */ 211 PTR sys_mprotect /* 5010 */
212 PTR sys_munmap 212 PTR sys_munmap
213 PTR sys_brk 213 PTR sys_brk
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 30f3b6317a83..7438e92f8a01 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -129,12 +129,12 @@ EXPORT(sysn32_call_table)
129 PTR sys_newlstat 129 PTR sys_newlstat
130 PTR sys_poll 130 PTR sys_poll
131 PTR sys_lseek 131 PTR sys_lseek
132 PTR old_mmap 132 PTR sys_mips_mmap
133 PTR sys_mprotect /* 6010 */ 133 PTR sys_mprotect /* 6010 */
134 PTR sys_munmap 134 PTR sys_munmap
135 PTR sys_brk 135 PTR sys_brk
136 PTR sys32_rt_sigaction 136 PTR sys_32_rt_sigaction
137 PTR sys32_rt_sigprocmask 137 PTR sys_32_rt_sigprocmask
138 PTR compat_sys_ioctl /* 6015 */ 138 PTR compat_sys_ioctl /* 6015 */
139 PTR sys_pread64 139 PTR sys_pread64
140 PTR sys_pwrite64 140 PTR sys_pwrite64
@@ -159,7 +159,7 @@ EXPORT(sysn32_call_table)
159 PTR compat_sys_setitimer 159 PTR compat_sys_setitimer
160 PTR sys_alarm 160 PTR sys_alarm
161 PTR sys_getpid 161 PTR sys_getpid
162 PTR sys32_sendfile 162 PTR sys_32_sendfile
163 PTR sys_socket /* 6040 */ 163 PTR sys_socket /* 6040 */
164 PTR sys_connect 164 PTR sys_connect
165 PTR sys_accept 165 PTR sys_accept
@@ -181,14 +181,14 @@ EXPORT(sysn32_call_table)
181 PTR sys_exit 181 PTR sys_exit
182 PTR compat_sys_wait4 182 PTR compat_sys_wait4
183 PTR sys_kill /* 6060 */ 183 PTR sys_kill /* 6060 */
184 PTR sys32_newuname 184 PTR sys_32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sysn32_semctl 187 PTR sys_n32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sysn32_msgsnd 190 PTR sys_n32_msgsnd
191 PTR sysn32_msgrcv 191 PTR sys_n32_msgrcv
192 PTR compat_sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
@@ -245,15 +245,15 @@ EXPORT(sysn32_call_table)
245 PTR sys_getsid 245 PTR sys_getsid
246 PTR sys_capget 246 PTR sys_capget
247 PTR sys_capset 247 PTR sys_capset
248 PTR sys32_rt_sigpending /* 6125 */ 248 PTR sys_32_rt_sigpending /* 6125 */
249 PTR compat_sys_rt_sigtimedwait 249 PTR compat_sys_rt_sigtimedwait
250 PTR sys32_rt_sigqueueinfo 250 PTR sys_32_rt_sigqueueinfo
251 PTR sysn32_rt_sigsuspend 251 PTR sysn32_rt_sigsuspend
252 PTR sys32_sigaltstack 252 PTR sys32_sigaltstack
253 PTR compat_sys_utime /* 6130 */ 253 PTR compat_sys_utime /* 6130 */
254 PTR sys_mknod 254 PTR sys_mknod
255 PTR sys32_personality 255 PTR sys_32_personality
256 PTR sys32_ustat 256 PTR sys_32_ustat
257 PTR compat_sys_statfs 257 PTR compat_sys_statfs
258 PTR compat_sys_fstatfs /* 6135 */ 258 PTR compat_sys_fstatfs /* 6135 */
259 PTR sys_sysfs 259 PTR sys_sysfs
@@ -265,14 +265,14 @@ EXPORT(sysn32_call_table)
265 PTR sys_sched_getscheduler 265 PTR sys_sched_getscheduler
266 PTR sys_sched_get_priority_max 266 PTR sys_sched_get_priority_max
267 PTR sys_sched_get_priority_min 267 PTR sys_sched_get_priority_min
268 PTR sys32_sched_rr_get_interval /* 6145 */ 268 PTR sys_32_sched_rr_get_interval /* 6145 */
269 PTR sys_mlock 269 PTR sys_mlock
270 PTR sys_munlock 270 PTR sys_munlock
271 PTR sys_mlockall 271 PTR sys_mlockall
272 PTR sys_munlockall 272 PTR sys_munlockall
273 PTR sys_vhangup /* 6150 */ 273 PTR sys_vhangup /* 6150 */
274 PTR sys_pivot_root 274 PTR sys_pivot_root
275 PTR sys32_sysctl 275 PTR sys_32_sysctl
276 PTR sys_prctl 276 PTR sys_prctl
277 PTR compat_sys_adjtimex 277 PTR compat_sys_adjtimex
278 PTR compat_sys_setrlimit /* 6155 */ 278 PTR compat_sys_setrlimit /* 6155 */
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index fefef4af8595..b0fef4ff9827 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -265,12 +265,12 @@ sys_call_table:
265 PTR sys_olduname 265 PTR sys_olduname
266 PTR sys_umask /* 4060 */ 266 PTR sys_umask /* 4060 */
267 PTR sys_chroot 267 PTR sys_chroot
268 PTR sys32_ustat 268 PTR sys_32_ustat
269 PTR sys_dup2 269 PTR sys_dup2
270 PTR sys_getppid 270 PTR sys_getppid
271 PTR sys_getpgrp /* 4065 */ 271 PTR sys_getpgrp /* 4065 */
272 PTR sys_setsid 272 PTR sys_setsid
273 PTR sys32_sigaction 273 PTR sys_32_sigaction
274 PTR sys_sgetmask 274 PTR sys_sgetmask
275 PTR sys_ssetmask 275 PTR sys_ssetmask
276 PTR sys_setreuid /* 4070 */ 276 PTR sys_setreuid /* 4070 */
@@ -293,7 +293,7 @@ sys_call_table:
293 PTR sys_swapon 293 PTR sys_swapon
294 PTR sys_reboot 294 PTR sys_reboot
295 PTR compat_sys_old_readdir 295 PTR compat_sys_old_readdir
296 PTR old_mmap /* 4090 */ 296 PTR sys_mips_mmap /* 4090 */
297 PTR sys_munmap 297 PTR sys_munmap
298 PTR sys_truncate 298 PTR sys_truncate
299 PTR sys_ftruncate 299 PTR sys_ftruncate
@@ -320,12 +320,12 @@ sys_call_table:
320 PTR compat_sys_wait4 320 PTR compat_sys_wait4
321 PTR sys_swapoff /* 4115 */ 321 PTR sys_swapoff /* 4115 */
322 PTR compat_sys_sysinfo 322 PTR compat_sys_sysinfo
323 PTR sys32_ipc 323 PTR sys_32_ipc
324 PTR sys_fsync 324 PTR sys_fsync
325 PTR sys32_sigreturn 325 PTR sys32_sigreturn
326 PTR sys32_clone /* 4120 */ 326 PTR sys32_clone /* 4120 */
327 PTR sys_setdomainname 327 PTR sys_setdomainname
328 PTR sys32_newuname 328 PTR sys_32_newuname
329 PTR sys_ni_syscall /* sys_modify_ldt */ 329 PTR sys_ni_syscall /* sys_modify_ldt */
330 PTR compat_sys_adjtimex 330 PTR compat_sys_adjtimex
331 PTR sys_mprotect /* 4125 */ 331 PTR sys_mprotect /* 4125 */
@@ -339,11 +339,11 @@ sys_call_table:
339 PTR sys_fchdir 339 PTR sys_fchdir
340 PTR sys_bdflush 340 PTR sys_bdflush
341 PTR sys_sysfs /* 4135 */ 341 PTR sys_sysfs /* 4135 */
342 PTR sys32_personality 342 PTR sys_32_personality
343 PTR sys_ni_syscall /* for afs_syscall */ 343 PTR sys_ni_syscall /* for afs_syscall */
344 PTR sys_setfsuid 344 PTR sys_setfsuid
345 PTR sys_setfsgid 345 PTR sys_setfsgid
346 PTR sys32_llseek /* 4140 */ 346 PTR sys_32_llseek /* 4140 */
347 PTR compat_sys_getdents 347 PTR compat_sys_getdents
348 PTR compat_sys_select 348 PTR compat_sys_select
349 PTR sys_flock 349 PTR sys_flock
@@ -356,7 +356,7 @@ sys_call_table:
356 PTR sys_ni_syscall /* 4150 */ 356 PTR sys_ni_syscall /* 4150 */
357 PTR sys_getsid 357 PTR sys_getsid
358 PTR sys_fdatasync 358 PTR sys_fdatasync
359 PTR sys32_sysctl 359 PTR sys_32_sysctl
360 PTR sys_mlock 360 PTR sys_mlock
361 PTR sys_munlock /* 4155 */ 361 PTR sys_munlock /* 4155 */
362 PTR sys_mlockall 362 PTR sys_mlockall
@@ -368,7 +368,7 @@ sys_call_table:
368 PTR sys_sched_yield 368 PTR sys_sched_yield
369 PTR sys_sched_get_priority_max 369 PTR sys_sched_get_priority_max
370 PTR sys_sched_get_priority_min 370 PTR sys_sched_get_priority_min
371 PTR sys32_sched_rr_get_interval /* 4165 */ 371 PTR sys_32_sched_rr_get_interval /* 4165 */
372 PTR compat_sys_nanosleep 372 PTR compat_sys_nanosleep
373 PTR sys_mremap 373 PTR sys_mremap
374 PTR sys_accept 374 PTR sys_accept
@@ -397,25 +397,25 @@ sys_call_table:
397 PTR sys_getresgid 397 PTR sys_getresgid
398 PTR sys_prctl 398 PTR sys_prctl
399 PTR sys32_rt_sigreturn 399 PTR sys32_rt_sigreturn
400 PTR sys32_rt_sigaction 400 PTR sys_32_rt_sigaction
401 PTR sys32_rt_sigprocmask /* 4195 */ 401 PTR sys_32_rt_sigprocmask /* 4195 */
402 PTR sys32_rt_sigpending 402 PTR sys_32_rt_sigpending
403 PTR compat_sys_rt_sigtimedwait 403 PTR compat_sys_rt_sigtimedwait
404 PTR sys32_rt_sigqueueinfo 404 PTR sys_32_rt_sigqueueinfo
405 PTR sys32_rt_sigsuspend 405 PTR sys32_rt_sigsuspend
406 PTR sys32_pread /* 4200 */ 406 PTR sys_32_pread /* 4200 */
407 PTR sys32_pwrite 407 PTR sys_32_pwrite
408 PTR sys_chown 408 PTR sys_chown
409 PTR sys_getcwd 409 PTR sys_getcwd
410 PTR sys_capget 410 PTR sys_capget
411 PTR sys_capset /* 4205 */ 411 PTR sys_capset /* 4205 */
412 PTR sys32_sigaltstack 412 PTR sys32_sigaltstack
413 PTR sys32_sendfile 413 PTR sys_32_sendfile
414 PTR sys_ni_syscall 414 PTR sys_ni_syscall
415 PTR sys_ni_syscall 415 PTR sys_ni_syscall
416 PTR sys32_mmap2 /* 4210 */ 416 PTR sys_mips_mmap2 /* 4210 */
417 PTR sys32_truncate64 417 PTR sys_32_truncate64
418 PTR sys32_ftruncate64 418 PTR sys_32_ftruncate64
419 PTR sys_newstat 419 PTR sys_newstat
420 PTR sys_newlstat 420 PTR sys_newlstat
421 PTR sys_newfstat /* 4215 */ 421 PTR sys_newfstat /* 4215 */
@@ -481,7 +481,7 @@ sys_call_table:
481 PTR compat_sys_mq_notify /* 4275 */ 481 PTR compat_sys_mq_notify /* 4275 */
482 PTR compat_sys_mq_getsetattr 482 PTR compat_sys_mq_getsetattr
483 PTR sys_ni_syscall /* sys_vserver */ 483 PTR sys_ni_syscall /* sys_vserver */
484 PTR sys32_waitid 484 PTR sys_32_waitid
485 PTR sys_ni_syscall /* available, was setaltroot */ 485 PTR sys_ni_syscall /* available, was setaltroot */
486 PTR sys_add_key /* 4280 */ 486 PTR sys_add_key /* 4280 */
487 PTR sys_request_key 487 PTR sys_request_key
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index a4e106c56ab5..830c5ef9932b 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -19,6 +19,7 @@
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/unistd.h> 20#include <linux/unistd.h>
21#include <linux/compiler.h> 21#include <linux/compiler.h>
22#include <linux/syscalls.h>
22#include <linux/uaccess.h> 23#include <linux/uaccess.h>
23 24
24#include <asm/abi.h> 25#include <asm/abi.h>
@@ -338,8 +339,8 @@ asmlinkage int sys_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
338} 339}
339 340
340#ifdef CONFIG_TRAD_SIGNALS 341#ifdef CONFIG_TRAD_SIGNALS
341asmlinkage int sys_sigaction(int sig, const struct sigaction __user *act, 342SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
342 struct sigaction __user *oact) 343 struct sigaction __user *, oact)
343{ 344{
344 struct k_sigaction new_ka, old_ka; 345 struct k_sigaction new_ka, old_ka;
345 int ret; 346 int ret;
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 652709b353ad..2e74075ac0ca 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -349,8 +349,8 @@ asmlinkage int sys32_rt_sigsuspend(nabi_no_regargs struct pt_regs regs)
349 return -ERESTARTNOHAND; 349 return -ERESTARTNOHAND;
350} 350}
351 351
352asmlinkage int sys32_sigaction(int sig, const struct sigaction32 __user *act, 352SYSCALL_DEFINE3(32_sigaction, long, sig, const struct sigaction32 __user *, act,
353 struct sigaction32 __user *oact) 353 struct sigaction32 __user *, oact)
354{ 354{
355 struct k_sigaction new_ka, old_ka; 355 struct k_sigaction new_ka, old_ka;
356 int ret; 356 int ret;
@@ -704,9 +704,9 @@ struct mips_abi mips_abi_32 = {
704 .restart = __NR_O32_restart_syscall 704 .restart = __NR_O32_restart_syscall
705}; 705};
706 706
707asmlinkage int sys32_rt_sigaction(int sig, const struct sigaction32 __user *act, 707SYSCALL_DEFINE4(32_rt_sigaction, int, sig,
708 struct sigaction32 __user *oact, 708 const struct sigaction32 __user *, act,
709 unsigned int sigsetsize) 709 struct sigaction32 __user *, oact, unsigned int, sigsetsize)
710{ 710{
711 struct k_sigaction new_sa, old_sa; 711 struct k_sigaction new_sa, old_sa;
712 int ret = -EINVAL; 712 int ret = -EINVAL;
@@ -748,8 +748,8 @@ out:
748 return ret; 748 return ret;
749} 749}
750 750
751asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set, 751SYSCALL_DEFINE4(32_rt_sigprocmask, int, how, compat_sigset_t __user *, set,
752 compat_sigset_t __user *oset, unsigned int sigsetsize) 752 compat_sigset_t __user *, oset, unsigned int, sigsetsize)
753{ 753{
754 sigset_t old_set, new_set; 754 sigset_t old_set, new_set;
755 int ret; 755 int ret;
@@ -770,8 +770,8 @@ asmlinkage int sys32_rt_sigprocmask(int how, compat_sigset_t __user *set,
770 return ret; 770 return ret;
771} 771}
772 772
773asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset, 773SYSCALL_DEFINE2(32_rt_sigpending, compat_sigset_t __user *, uset,
774 unsigned int sigsetsize) 774 unsigned int, sigsetsize)
775{ 775{
776 int ret; 776 int ret;
777 sigset_t set; 777 sigset_t set;
@@ -787,7 +787,8 @@ asmlinkage int sys32_rt_sigpending(compat_sigset_t __user *uset,
787 return ret; 787 return ret;
788} 788}
789 789
790asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *uinfo) 790SYSCALL_DEFINE3(32_rt_sigqueueinfo, int, pid, int, sig,
791 compat_siginfo_t __user *, uinfo)
791{ 792{
792 siginfo_t info; 793 siginfo_t info;
793 int ret; 794 int ret;
@@ -802,10 +803,9 @@ asmlinkage int sys32_rt_sigqueueinfo(int pid, int sig, compat_siginfo_t __user *
802 return ret; 803 return ret;
803} 804}
804 805
805asmlinkage long 806SYSCALL_DEFINE5(32_waitid, int, which, compat_pid_t, pid,
806sys32_waitid(int which, compat_pid_t pid, 807 compat_siginfo_t __user *, uinfo, int, options,
807 compat_siginfo_t __user *uinfo, int options, 808 struct compat_rusage __user *, uru)
808 struct compat_rusage __user *uru)
809{ 809{
810 siginfo_t info; 810 siginfo_t info;
811 struct rusage ru; 811 struct rusage ru;
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 37970d9b2186..8cf384644040 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -152,9 +152,9 @@ out:
152 return error; 152 return error;
153} 153}
154 154
155asmlinkage unsigned long 155SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
156old_mmap(unsigned long addr, unsigned long len, int prot, 156 unsigned long, prot, unsigned long, flags, unsigned long,
157 int flags, int fd, off_t offset) 157 fd, off_t, offset)
158{ 158{
159 unsigned long result; 159 unsigned long result;
160 160
@@ -168,9 +168,9 @@ out:
168 return result; 168 return result;
169} 169}
170 170
171asmlinkage unsigned long 171SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
172sys_mmap2(unsigned long addr, unsigned long len, unsigned long prot, 172 unsigned long, prot, unsigned long, flags, unsigned long, fd,
173 unsigned long flags, unsigned long fd, unsigned long pgoff) 173 unsigned long, pgoff)
174{ 174{
175 if (pgoff & (~PAGE_MASK >> 12)) 175 if (pgoff & (~PAGE_MASK >> 12))
176 return -EINVAL; 176 return -EINVAL;
@@ -240,7 +240,7 @@ out:
240/* 240/*
241 * Compacrapability ... 241 * Compacrapability ...
242 */ 242 */
243asmlinkage int sys_uname(struct old_utsname __user * name) 243SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
244{ 244{
245 if (name && !copy_to_user(name, utsname(), sizeof (*name))) 245 if (name && !copy_to_user(name, utsname(), sizeof (*name)))
246 return 0; 246 return 0;
@@ -250,7 +250,7 @@ asmlinkage int sys_uname(struct old_utsname __user * name)
250/* 250/*
251 * Compacrapability ... 251 * Compacrapability ...
252 */ 252 */
253asmlinkage int sys_olduname(struct oldold_utsname __user * name) 253SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
254{ 254{
255 int error; 255 int error;
256 256
@@ -279,7 +279,7 @@ asmlinkage int sys_olduname(struct oldold_utsname __user * name)
279 return error; 279 return error;
280} 280}
281 281
282asmlinkage int sys_set_thread_area(unsigned long addr) 282SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
283{ 283{
284 struct thread_info *ti = task_thread_info(current); 284 struct thread_info *ti = task_thread_info(current);
285 285
@@ -290,7 +290,7 @@ asmlinkage int sys_set_thread_area(unsigned long addr)
290 return 0; 290 return 0;
291} 291}
292 292
293asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) 293asmlinkage int _sys_sysmips(long cmd, long arg1, long arg2, long arg3)
294{ 294{
295 switch (cmd) { 295 switch (cmd) {
296 case MIPS_ATOMIC_SET: 296 case MIPS_ATOMIC_SET:
@@ -325,8 +325,8 @@ asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3)
325 * 325 *
326 * This is really horribly ugly. 326 * This is really horribly ugly.
327 */ 327 */
328asmlinkage int sys_ipc(unsigned int call, int first, int second, 328SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, int, second,
329 unsigned long third, void __user *ptr, long fifth) 329 unsigned long, third, void __user *, ptr, long, fifth)
330{ 330{
331 int version, ret; 331 int version, ret;
332 332
@@ -411,7 +411,7 @@ asmlinkage int sys_ipc(unsigned int call, int first, int second,
411/* 411/*
412 * No implemented yet ... 412 * No implemented yet ...
413 */ 413 */
414asmlinkage int sys_cachectl(char *addr, int nbytes, int op) 414SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
415{ 415{
416 return -ENOSYS; 416 return -ENOSYS;
417} 417}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 98ad0a82c29e..694d51f523d1 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -13,6 +13,7 @@
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/syscalls.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17 18
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
@@ -58,8 +59,8 @@ EXPORT_SYMBOL(_dma_cache_wback_inv);
58 * We could optimize the case where the cache argument is not BCACHE but 59 * We could optimize the case where the cache argument is not BCACHE but
59 * that seems very atypical use ... 60 * that seems very atypical use ...
60 */ 61 */
61asmlinkage int sys_cacheflush(unsigned long addr, 62SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes,
62 unsigned long bytes, unsigned int cache) 63 unsigned int, cache)
63{ 64{
64 if (bytes == 0) 65 if (bytes == 0)
65 return 0; 66 return 0;
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h
index d811a8cd7b58..4774c2f92232 100644
--- a/arch/powerpc/include/asm/compat.h
+++ b/arch/powerpc/include/asm/compat.h
@@ -210,5 +210,10 @@ struct compat_shmid64_ds {
210 compat_ulong_t __unused6; 210 compat_ulong_t __unused6;
211}; 211};
212 212
213static inline int is_compat_task(void)
214{
215 return test_thread_flag(TIF_32BIT);
216}
217
213#endif /* __KERNEL__ */ 218#endif /* __KERNEL__ */
214#endif /* _ASM_POWERPC_COMPAT_H */ 219#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/arch/powerpc/include/asm/seccomp.h b/arch/powerpc/include/asm/seccomp.h
index 853765eb1f65..00c1d9133cfe 100644
--- a/arch/powerpc/include/asm/seccomp.h
+++ b/arch/powerpc/include/asm/seccomp.h
@@ -1,10 +1,6 @@
1#ifndef _ASM_POWERPC_SECCOMP_H 1#ifndef _ASM_POWERPC_SECCOMP_H
2#define _ASM_POWERPC_SECCOMP_H 2#define _ASM_POWERPC_SECCOMP_H
3 3
4#ifdef __KERNEL__
5#include <linux/thread_info.h>
6#endif
7
8#include <linux/unistd.h> 4#include <linux/unistd.h>
9 5
10#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index ada06924a423..73cb6a3229ae 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -367,27 +367,24 @@ static int emulate_multiple(struct pt_regs *regs, unsigned char __user *addr,
367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg, 367static int emulate_fp_pair(unsigned char __user *addr, unsigned int reg,
368 unsigned int flags) 368 unsigned int flags)
369{ 369{
370 char *ptr = (char *) &current->thread.TS_FPR(reg); 370 char *ptr0 = (char *) &current->thread.TS_FPR(reg);
371 int i, ret; 371 char *ptr1 = (char *) &current->thread.TS_FPR(reg+1);
372 int i, ret, sw = 0;
372 373
373 if (!(flags & F)) 374 if (!(flags & F))
374 return 0; 375 return 0;
375 if (reg & 1) 376 if (reg & 1)
376 return 0; /* invalid form: FRS/FRT must be even */ 377 return 0; /* invalid form: FRS/FRT must be even */
377 if (!(flags & SW)) { 378 if (flags & SW)
378 /* not byte-swapped - easy */ 379 sw = 7;
379 if (!(flags & ST)) 380 ret = 0;
380 ret = __copy_from_user(ptr, addr, 16); 381 for (i = 0; i < 8; ++i) {
381 else 382 if (!(flags & ST)) {
382 ret = __copy_to_user(addr, ptr, 16); 383 ret |= __get_user(ptr0[i^sw], addr + i);
383 } else { 384 ret |= __get_user(ptr1[i^sw], addr + i + 8);
384 /* each FPR value is byte-swapped separately */ 385 } else {
385 ret = 0; 386 ret |= __put_user(ptr0[i^sw], addr + i);
386 for (i = 0; i < 16; ++i) { 387 ret |= __put_user(ptr1[i^sw], addr + i + 8);
387 if (!(flags & ST))
388 ret |= __get_user(ptr[i^7], addr + i);
389 else
390 ret |= __put_user(ptr[i^7], addr + i);
391 } 388 }
392 } 389 }
393 if (ret) 390 if (ret)
diff --git a/arch/powerpc/lib/copyuser_64.S b/arch/powerpc/lib/copyuser_64.S
index 70693a5c12a1..693b14a778fa 100644
--- a/arch/powerpc/lib/copyuser_64.S
+++ b/arch/powerpc/lib/copyuser_64.S
@@ -62,18 +62,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
6272: std r8,8(r3) 6272: std r8,8(r3)
63 beq+ 3f 63 beq+ 3f
64 addi r3,r3,16 64 addi r3,r3,16
6523: ld r9,8(r4)
66.Ldo_tail: 65.Ldo_tail:
67 bf cr7*4+1,1f 66 bf cr7*4+1,1f
68 rotldi r9,r9,32 6723: lwz r9,8(r4)
68 addi r4,r4,4
6973: stw r9,0(r3) 6973: stw r9,0(r3)
70 addi r3,r3,4 70 addi r3,r3,4
711: bf cr7*4+2,2f 711: bf cr7*4+2,2f
72 rotldi r9,r9,16 7244: lhz r9,8(r4)
73 addi r4,r4,2
7374: sth r9,0(r3) 7474: sth r9,0(r3)
74 addi r3,r3,2 75 addi r3,r3,2
752: bf cr7*4+3,3f 762: bf cr7*4+3,3f
76 rotldi r9,r9,8 7745: lbz r9,8(r4)
7775: stb r9,0(r3) 7875: stb r9,0(r3)
783: li r3,0 793: li r3,0
79 blr 80 blr
@@ -141,11 +142,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
1416: cmpwi cr1,r5,8 1426: cmpwi cr1,r5,8
142 addi r3,r3,32 143 addi r3,r3,32
143 sld r9,r9,r10 144 sld r9,r9,r10
144 ble cr1,.Ldo_tail 145 ble cr1,7f
14534: ld r0,8(r4) 14634: ld r0,8(r4)
146 srd r7,r0,r11 147 srd r7,r0,r11
147 or r9,r7,r9 148 or r9,r7,r9
148 b .Ldo_tail 1497:
150 bf cr7*4+1,1f
151 rotldi r9,r9,32
15294: stw r9,0(r3)
153 addi r3,r3,4
1541: bf cr7*4+2,2f
155 rotldi r9,r9,16
15695: sth r9,0(r3)
157 addi r3,r3,2
1582: bf cr7*4+3,3f
159 rotldi r9,r9,8
16096: stb r9,0(r3)
1613: li r3,0
162 blr
149 163
150.Ldst_unaligned: 164.Ldst_unaligned:
151 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */ 165 PPC_MTOCRF 0x01,r6 /* put #bytes to 8B bdry into cr7 */
@@ -218,7 +232,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
218121: 232121:
219132: 233132:
220 addi r3,r3,8 234 addi r3,r3,8
221123:
222134: 235134:
223135: 236135:
224138: 237138:
@@ -226,6 +239,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
226140: 239140:
227141: 240141:
228142: 241142:
242123:
243144:
244145:
229 245
230/* 246/*
231 * here we have had a fault on a load and r3 points to the first 247 * here we have had a fault on a load and r3 points to the first
@@ -309,6 +325,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
309187: 325187:
310188: 326188:
311189: 327189:
328194:
329195:
330196:
3121: 3311:
313 ld r6,-24(r1) 332 ld r6,-24(r1)
314 ld r5,-8(r1) 333 ld r5,-8(r1)
@@ -329,7 +348,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
329 .llong 72b,172b 348 .llong 72b,172b
330 .llong 23b,123b 349 .llong 23b,123b
331 .llong 73b,173b 350 .llong 73b,173b
351 .llong 44b,144b
332 .llong 74b,174b 352 .llong 74b,174b
353 .llong 45b,145b
333 .llong 75b,175b 354 .llong 75b,175b
334 .llong 24b,124b 355 .llong 24b,124b
335 .llong 25b,125b 356 .llong 25b,125b
@@ -347,6 +368,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
347 .llong 79b,179b 368 .llong 79b,179b
348 .llong 80b,180b 369 .llong 80b,180b
349 .llong 34b,134b 370 .llong 34b,134b
371 .llong 94b,194b
372 .llong 95b,195b
373 .llong 96b,196b
350 .llong 35b,135b 374 .llong 35b,135b
351 .llong 81b,181b 375 .llong 81b,181b
352 .llong 36b,136b 376 .llong 36b,136b
diff --git a/arch/powerpc/lib/memcpy_64.S b/arch/powerpc/lib/memcpy_64.S
index fe2d34e5332d..e178922b2c21 100644
--- a/arch/powerpc/lib/memcpy_64.S
+++ b/arch/powerpc/lib/memcpy_64.S
@@ -53,18 +53,19 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
533: std r8,8(r3) 533: std r8,8(r3)
54 beq 3f 54 beq 3f
55 addi r3,r3,16 55 addi r3,r3,16
56 ld r9,8(r4)
57.Ldo_tail: 56.Ldo_tail:
58 bf cr7*4+1,1f 57 bf cr7*4+1,1f
59 rotldi r9,r9,32 58 lwz r9,8(r4)
59 addi r4,r4,4
60 stw r9,0(r3) 60 stw r9,0(r3)
61 addi r3,r3,4 61 addi r3,r3,4
621: bf cr7*4+2,2f 621: bf cr7*4+2,2f
63 rotldi r9,r9,16 63 lhz r9,8(r4)
64 addi r4,r4,2
64 sth r9,0(r3) 65 sth r9,0(r3)
65 addi r3,r3,2 66 addi r3,r3,2
662: bf cr7*4+3,3f 672: bf cr7*4+3,3f
67 rotldi r9,r9,8 68 lbz r9,8(r4)
68 stb r9,0(r3) 69 stb r9,0(r3)
693: ld r3,48(r1) /* return dest pointer */ 703: ld r3,48(r1) /* return dest pointer */
70 blr 71 blr
@@ -133,11 +134,24 @@ END_FTR_SECTION_IFCLR(CPU_FTR_UNALIGNED_LD_STD)
133 cmpwi cr1,r5,8 134 cmpwi cr1,r5,8
134 addi r3,r3,32 135 addi r3,r3,32
135 sld r9,r9,r10 136 sld r9,r9,r10
136 ble cr1,.Ldo_tail 137 ble cr1,6f
137 ld r0,8(r4) 138 ld r0,8(r4)
138 srd r7,r0,r11 139 srd r7,r0,r11
139 or r9,r7,r9 140 or r9,r7,r9
140 b .Ldo_tail 1416:
142 bf cr7*4+1,1f
143 rotldi r9,r9,32
144 stw r9,0(r3)
145 addi r3,r3,4
1461: bf cr7*4+2,2f
147 rotldi r9,r9,16
148 sth r9,0(r3)
149 addi r3,r3,2
1502: bf cr7*4+3,3f
151 rotldi r9,r9,8
152 stb r9,0(r3)
1533: ld r3,48(r1) /* return dest pointer */
154 blr
141 155
142.Ldst_unaligned: 156.Ldst_unaligned:
143 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7 157 PPC_MTOCRF 0x01,r6 # put #bytes to 8B bdry into cr7
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 77fae5f64f2e..5558d932b4d5 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -204,6 +204,23 @@ static int __init ppc4xx_setup_one_pci_PMM(struct pci_controller *hose,
204{ 204{
205 u32 ma, pcila, pciha; 205 u32 ma, pcila, pciha;
206 206
207 /* Hack warning ! The "old" PCI 2.x cell only let us configure the low
208 * 32-bit of incoming PLB addresses. The top 4 bits of the 36-bit
209 * address are actually hard wired to a value that appears to depend
210 * on the specific SoC. For example, it's 0 on 440EP and 1 on 440EPx.
211 *
212 * The trick here is we just crop those top bits and ignore them when
213 * programming the chip. That means the device-tree has to be right
214 * for the specific part used (we don't print a warning if it's wrong
215 * but on the other hand, you'll crash quickly enough), but at least
216 * this code should work whatever the hard coded value is
217 */
218 plb_addr &= 0xffffffffull;
219
220 /* Note: Due to the above hack, the test below doesn't actually test
221 * if you address is above 4G, but it tests that address and
222 * (address + size) are both contained in the same 4G
223 */
207 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) || 224 if ((plb_addr + size) > 0xffffffffull || !is_power_of_2(size) ||
208 size < 0x1000 || (plb_addr & (size - 1)) != 0) { 225 size < 0x1000 || (plb_addr & (size - 1)) != 0) {
209 printk(KERN_WARNING "%s: Resource out of range\n", 226 printk(KERN_WARNING "%s: Resource out of range\n",
diff --git a/arch/sh/boards/board-ap325rxa.c b/arch/sh/boards/board-ap325rxa.c
index 7c35787d29b4..72da416f6162 100644
--- a/arch/sh/boards/board-ap325rxa.c
+++ b/arch/sh/boards/board-ap325rxa.c
@@ -22,7 +22,6 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/spi/spi.h> 23#include <linux/spi/spi.h>
24#include <linux/spi/spi_gpio.h> 24#include <linux/spi/spi_gpio.h>
25#include <media/ov772x.h>
26#include <media/soc_camera_platform.h> 25#include <media/soc_camera_platform.h>
27#include <media/sh_mobile_ceu.h> 26#include <media/sh_mobile_ceu.h>
28#include <video/sh_mobile_lcdc.h> 27#include <video/sh_mobile_lcdc.h>
@@ -224,7 +223,6 @@ static void camera_power(int val)
224} 223}
225 224
226#ifdef CONFIG_I2C 225#ifdef CONFIG_I2C
227/* support for the old ncm03j camera */
228static unsigned char camera_ncm03j_magic[] = 226static unsigned char camera_ncm03j_magic[] =
229{ 227{
230 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8, 228 0x87, 0x00, 0x88, 0x08, 0x89, 0x01, 0x8A, 0xE8,
@@ -245,23 +243,6 @@ static unsigned char camera_ncm03j_magic[] =
245 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F, 243 0x63, 0xD4, 0x64, 0xEA, 0xD6, 0x0F,
246}; 244};
247 245
248static int camera_probe(void)
249{
250 struct i2c_adapter *a = i2c_get_adapter(0);
251 struct i2c_msg msg;
252 int ret;
253
254 camera_power(1);
255 msg.addr = 0x6e;
256 msg.buf = camera_ncm03j_magic;
257 msg.len = 2;
258 msg.flags = 0;
259 ret = i2c_transfer(a, &msg, 1);
260 camera_power(0);
261
262 return ret;
263}
264
265static int camera_set_capture(struct soc_camera_platform_info *info, 246static int camera_set_capture(struct soc_camera_platform_info *info,
266 int enable) 247 int enable)
267{ 248{
@@ -313,35 +294,8 @@ static struct platform_device camera_device = {
313 .platform_data = &camera_info, 294 .platform_data = &camera_info,
314 }, 295 },
315}; 296};
316
317static int __init camera_setup(void)
318{
319 if (camera_probe() > 0)
320 platform_device_register(&camera_device);
321
322 return 0;
323}
324late_initcall(camera_setup);
325
326#endif /* CONFIG_I2C */ 297#endif /* CONFIG_I2C */
327 298
328static int ov7725_power(struct device *dev, int mode)
329{
330 camera_power(0);
331 if (mode)
332 camera_power(1);
333
334 return 0;
335}
336
337static struct ov772x_camera_info ov7725_info = {
338 .buswidth = SOCAM_DATAWIDTH_8,
339 .flags = OV772X_FLAG_VFLIP | OV772X_FLAG_HFLIP,
340 .link = {
341 .power = ov7725_power,
342 },
343};
344
345static struct sh_mobile_ceu_info sh_mobile_ceu_info = { 299static struct sh_mobile_ceu_info sh_mobile_ceu_info = {
346 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | 300 .flags = SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH |
347 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8, 301 SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_MASTER | SOCAM_DATAWIDTH_8,
@@ -392,6 +346,9 @@ static struct platform_device *ap325rxa_devices[] __initdata = {
392 &ap325rxa_nor_flash_device, 346 &ap325rxa_nor_flash_device,
393 &lcdc_device, 347 &lcdc_device,
394 &ceu_device, 348 &ceu_device,
349#ifdef CONFIG_I2C
350 &camera_device,
351#endif
395 &nand_flash_device, 352 &nand_flash_device,
396 &sdcard_cn3_device, 353 &sdcard_cn3_device,
397}; 354};
@@ -400,10 +357,6 @@ static struct i2c_board_info __initdata ap325rxa_i2c_devices[] = {
400 { 357 {
401 I2C_BOARD_INFO("pcf8563", 0x51), 358 I2C_BOARD_INFO("pcf8563", 0x51),
402 }, 359 },
403 {
404 I2C_BOARD_INFO("ov772x", 0x21),
405 .platform_data = &ov7725_info,
406 },
407}; 360};
408 361
409static struct spi_board_info ap325rxa_spi_devices[] = { 362static struct spi_board_info ap325rxa_spi_devices[] = {
diff --git a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
index 020a96fe961a..4a5e59732334 100644
--- a/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
+++ b/arch/sh/kernel/cpu/sh2a/clock-sh7201.c
@@ -18,8 +18,8 @@
18#include <asm/freq.h> 18#include <asm/freq.h>
19#include <asm/io.h> 19#include <asm/io.h>
20 20
21const static int pll1rate[]={1,2,3,4,6,8}; 21static const int pll1rate[]={1,2,3,4,6,8};
22const static int pfc_divisors[]={1,2,3,4,6,8,12}; 22static const int pfc_divisors[]={1,2,3,4,6,8,12};
23#define ifc_divisors pfc_divisors 23#define ifc_divisors pfc_divisors
24 24
25#if (CONFIG_SH_CLK_MD == 0) 25#if (CONFIG_SH_CLK_MD == 0)
diff --git a/arch/sparc/include/asm/compat.h b/arch/sparc/include/asm/compat.h
index f260b58f5ce9..0e706257918f 100644
--- a/arch/sparc/include/asm/compat.h
+++ b/arch/sparc/include/asm/compat.h
@@ -240,4 +240,9 @@ struct compat_shmid64_ds {
240 unsigned int __unused2; 240 unsigned int __unused2;
241}; 241};
242 242
243static inline int is_compat_task(void)
244{
245 return test_thread_flag(TIF_32BIT);
246}
247
243#endif /* _ASM_SPARC64_COMPAT_H */ 248#endif /* _ASM_SPARC64_COMPAT_H */
diff --git a/arch/sparc/include/asm/seccomp.h b/arch/sparc/include/asm/seccomp.h
index 7fcd9968192b..adca1bce41d4 100644
--- a/arch/sparc/include/asm/seccomp.h
+++ b/arch/sparc/include/asm/seccomp.h
@@ -1,11 +1,5 @@
1#ifndef _ASM_SECCOMP_H 1#ifndef _ASM_SECCOMP_H
2 2
3#include <linux/thread_info.h> /* already defines TIF_32BIT */
4
5#ifndef TIF_32BIT
6#error "unexpected TIF_32BIT on sparc64"
7#endif
8
9#include <linux/unistd.h> 3#include <linux/unistd.h>
10 4
11#define __NR_seccomp_read __NR_read 5#define __NR_seccomp_read __NR_read
diff --git a/arch/sparc/kernel/chmc.c b/arch/sparc/kernel/chmc.c
index 3b9f4d6e14a9..e1a9598e2a4d 100644
--- a/arch/sparc/kernel/chmc.c
+++ b/arch/sparc/kernel/chmc.c
@@ -306,6 +306,7 @@ static int jbusmc_print_dimm(int syndrome_code,
306 buf[1] = '?'; 306 buf[1] = '?';
307 buf[2] = '?'; 307 buf[2] = '?';
308 buf[3] = '\0'; 308 buf[3] = '\0';
309 return 0;
309 } 310 }
310 p = dp->controller; 311 p = dp->controller;
311 prop = &p->layout; 312 prop = &p->layout;
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 8015641478bd..f5cef3fbf9a5 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -40,6 +40,9 @@ config X86
40 select HAVE_GENERIC_DMA_COHERENT if X86_32 40 select HAVE_GENERIC_DMA_COHERENT if X86_32
41 select HAVE_EFFICIENT_UNALIGNED_ACCESS 41 select HAVE_EFFICIENT_UNALIGNED_ACCESS
42 select USER_STACKTRACE_SUPPORT 42 select USER_STACKTRACE_SUPPORT
43 select HAVE_KERNEL_GZIP
44 select HAVE_KERNEL_BZIP2
45 select HAVE_KERNEL_LZMA
43 46
44config ARCH_DEFCONFIG 47config ARCH_DEFCONFIG
45 string 48 string
@@ -1825,7 +1828,7 @@ config DMAR
1825 remapping devices. 1828 remapping devices.
1826 1829
1827config DMAR_DEFAULT_ON 1830config DMAR_DEFAULT_ON
1828 def_bool n 1831 def_bool y
1829 prompt "Enable DMA Remapping Devices by default" 1832 prompt "Enable DMA Remapping Devices by default"
1830 depends on DMAR 1833 depends on DMAR
1831 help 1834 help
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 1771c804e02f..3ca4c194b8e5 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -4,7 +4,7 @@
4# create a compressed vmlinux image from the original vmlinux 4# create a compressed vmlinux image from the original vmlinux
5# 5#
6 6
7targets := vmlinux vmlinux.bin vmlinux.bin.gz head_$(BITS).o misc.o piggy.o 7targets := vmlinux vmlinux.bin vmlinux.bin.gz vmlinux.bin.bz2 vmlinux.bin.lzma head_$(BITS).o misc.o piggy.o
8 8
9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2 9KBUILD_CFLAGS := -m$(BITS) -D__KERNEL__ $(LINUX_INCLUDE) -O2
10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC 10KBUILD_CFLAGS += -fno-strict-aliasing -fPIC
@@ -47,18 +47,35 @@ ifeq ($(CONFIG_X86_32),y)
47ifdef CONFIG_RELOCATABLE 47ifdef CONFIG_RELOCATABLE
48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE 48$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin.all FORCE
49 $(call if_changed,gzip) 49 $(call if_changed,gzip)
50$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin.all FORCE
51 $(call if_changed,bzip2)
52$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin.all FORCE
53 $(call if_changed,lzma)
50else 54else
51$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 55$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
52 $(call if_changed,gzip) 56 $(call if_changed,gzip)
57$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,bzip2)
59$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
60 $(call if_changed,lzma)
53endif 61endif
54LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T 62LDFLAGS_piggy.o := -r --format binary --oformat elf32-i386 -T
55 63
56else 64else
65
57$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE 66$(obj)/vmlinux.bin.gz: $(obj)/vmlinux.bin FORCE
58 $(call if_changed,gzip) 67 $(call if_changed,gzip)
68$(obj)/vmlinux.bin.bz2: $(obj)/vmlinux.bin FORCE
69 $(call if_changed,bzip2)
70$(obj)/vmlinux.bin.lzma: $(obj)/vmlinux.bin FORCE
71 $(call if_changed,lzma)
59 72
60LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T 73LDFLAGS_piggy.o := -r --format binary --oformat elf64-x86-64 -T
61endif 74endif
62 75
63$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.gz FORCE 76suffix_$(CONFIG_KERNEL_GZIP) = gz
77suffix_$(CONFIG_KERNEL_BZIP2) = bz2
78suffix_$(CONFIG_KERNEL_LZMA) = lzma
79
80$(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix_y) FORCE
64 $(call if_changed,ld) 81 $(call if_changed,ld)
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index da062216948a..e45be73684ff 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -116,71 +116,13 @@
116/* 116/*
117 * gzip declarations 117 * gzip declarations
118 */ 118 */
119
120#define OF(args) args
121#define STATIC static 119#define STATIC static
122 120
123#undef memset 121#undef memset
124#undef memcpy 122#undef memcpy
125#define memzero(s, n) memset((s), 0, (n)) 123#define memzero(s, n) memset((s), 0, (n))
126 124
127typedef unsigned char uch;
128typedef unsigned short ush;
129typedef unsigned long ulg;
130
131/*
132 * Window size must be at least 32k, and a power of two.
133 * We don't actually have a window just a huge output buffer,
134 * so we report a 2G window size, as that should always be
135 * larger than our output buffer:
136 */
137#define WSIZE 0x80000000
138
139/* Input buffer: */
140static unsigned char *inbuf;
141
142/* Sliding window buffer (and final output buffer): */
143static unsigned char *window;
144
145/* Valid bytes in inbuf: */
146static unsigned insize;
147
148/* Index of next byte to be processed in inbuf: */
149static unsigned inptr;
150
151/* Bytes in output buffer: */
152static unsigned outcnt;
153
154/* gzip flag byte */
155#define ASCII_FLAG 0x01 /* bit 0 set: file probably ASCII text */
156#define CONTINUATION 0x02 /* bit 1 set: continuation of multi-part gz file */
157#define EXTRA_FIELD 0x04 /* bit 2 set: extra field present */
158#define ORIG_NAM 0x08 /* bit 3 set: original file name present */
159#define COMMENT 0x10 /* bit 4 set: file comment present */
160#define ENCRYPTED 0x20 /* bit 5 set: file is encrypted */
161#define RESERVED 0xC0 /* bit 6, 7: reserved */
162
163#define get_byte() (inptr < insize ? inbuf[inptr++] : fill_inbuf())
164
165/* Diagnostic functions */
166#ifdef DEBUG
167# define Assert(cond, msg) do { if (!(cond)) error(msg); } while (0)
168# define Trace(x) do { fprintf x; } while (0)
169# define Tracev(x) do { if (verbose) fprintf x ; } while (0)
170# define Tracevv(x) do { if (verbose > 1) fprintf x ; } while (0)
171# define Tracec(c, x) do { if (verbose && (c)) fprintf x ; } while (0)
172# define Tracecv(c, x) do { if (verbose > 1 && (c)) fprintf x ; } while (0)
173#else
174# define Assert(cond, msg)
175# define Trace(x)
176# define Tracev(x)
177# define Tracevv(x)
178# define Tracec(c, x)
179# define Tracecv(c, x)
180#endif
181 125
182static int fill_inbuf(void);
183static void flush_window(void);
184static void error(char *m); 126static void error(char *m);
185 127
186/* 128/*
@@ -189,13 +131,8 @@ static void error(char *m);
189static struct boot_params *real_mode; /* Pointer to real-mode data */ 131static struct boot_params *real_mode; /* Pointer to real-mode data */
190static int quiet; 132static int quiet;
191 133
192extern unsigned char input_data[];
193extern int input_len;
194
195static long bytes_out;
196
197static void *memset(void *s, int c, unsigned n); 134static void *memset(void *s, int c, unsigned n);
198static void *memcpy(void *dest, const void *src, unsigned n); 135void *memcpy(void *dest, const void *src, unsigned n);
199 136
200static void __putstr(int, const char *); 137static void __putstr(int, const char *);
201#define putstr(__x) __putstr(0, __x) 138#define putstr(__x) __putstr(0, __x)
@@ -213,7 +150,17 @@ static char *vidmem;
213static int vidport; 150static int vidport;
214static int lines, cols; 151static int lines, cols;
215 152
216#include "../../../../lib/inflate.c" 153#ifdef CONFIG_KERNEL_GZIP
154#include "../../../../lib/decompress_inflate.c"
155#endif
156
157#ifdef CONFIG_KERNEL_BZIP2
158#include "../../../../lib/decompress_bunzip2.c"
159#endif
160
161#ifdef CONFIG_KERNEL_LZMA
162#include "../../../../lib/decompress_unlzma.c"
163#endif
217 164
218static void scroll(void) 165static void scroll(void)
219{ 166{
@@ -282,7 +229,7 @@ static void *memset(void *s, int c, unsigned n)
282 return s; 229 return s;
283} 230}
284 231
285static void *memcpy(void *dest, const void *src, unsigned n) 232void *memcpy(void *dest, const void *src, unsigned n)
286{ 233{
287 int i; 234 int i;
288 const char *s = src; 235 const char *s = src;
@@ -293,38 +240,6 @@ static void *memcpy(void *dest, const void *src, unsigned n)
293 return dest; 240 return dest;
294} 241}
295 242
296/* ===========================================================================
297 * Fill the input buffer. This is called only when the buffer is empty
298 * and at least one byte is really needed.
299 */
300static int fill_inbuf(void)
301{
302 error("ran out of input data");
303 return 0;
304}
305
306/* ===========================================================================
307 * Write the output window window[0..outcnt-1] and update crc and bytes_out.
308 * (Used for the decompressed data only.)
309 */
310static void flush_window(void)
311{
312 /* With my window equal to my output buffer
313 * I only need to compute the crc here.
314 */
315 unsigned long c = crc; /* temporary variable */
316 unsigned n;
317 unsigned char *in, ch;
318
319 in = window;
320 for (n = 0; n < outcnt; n++) {
321 ch = *in++;
322 c = crc_32_tab[((int)c ^ ch) & 0xff] ^ (c >> 8);
323 }
324 crc = c;
325 bytes_out += (unsigned long)outcnt;
326 outcnt = 0;
327}
328 243
329static void error(char *x) 244static void error(char *x)
330{ 245{
@@ -407,12 +322,8 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
407 lines = real_mode->screen_info.orig_video_lines; 322 lines = real_mode->screen_info.orig_video_lines;
408 cols = real_mode->screen_info.orig_video_cols; 323 cols = real_mode->screen_info.orig_video_cols;
409 324
410 window = output; /* Output buffer (Normally at 1M) */
411 free_mem_ptr = heap; /* Heap */ 325 free_mem_ptr = heap; /* Heap */
412 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 326 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
413 inbuf = input_data; /* Input buffer */
414 insize = input_len;
415 inptr = 0;
416 327
417#ifdef CONFIG_X86_64 328#ifdef CONFIG_X86_64
418 if ((unsigned long)output & (__KERNEL_ALIGN - 1)) 329 if ((unsigned long)output & (__KERNEL_ALIGN - 1))
@@ -430,10 +341,9 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
430#endif 341#endif
431#endif 342#endif
432 343
433 makecrc();
434 if (!quiet) 344 if (!quiet)
435 putstr("\nDecompressing Linux... "); 345 putstr("\nDecompressing Linux... ");
436 gunzip(); 346 decompress(input_data, input_len, NULL, NULL, output, NULL, error);
437 parse_elf(output); 347 parse_elf(output);
438 if (!quiet) 348 if (!quiet)
439 putstr("done.\nBooting the kernel.\n"); 349 putstr("done.\nBooting the kernel.\n");
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig
index 5c023f6f652c..235b81d0f6f2 100644
--- a/arch/x86/configs/i386_defconfig
+++ b/arch/x86/configs/i386_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:57 2009 4# Tue Feb 24 15:50:58 2009
5# 5#
6# CONFIG_64BIT is not set 6# CONFIG_64BIT is not set
7CONFIG_X86_32=y 7CONFIG_X86_32=y
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_NONFATAL=y
271CONFIG_X86_MCE_P4THERMAL=y
270CONFIG_VM86=y 272CONFIG_VM86=y
271# CONFIG_TOSHIBA is not set 273# CONFIG_TOSHIBA is not set
272# CONFIG_I8K is not set 274# CONFIG_I8K is not set
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig
index 4157cc4a2bde..9fe5d212ab4c 100644
--- a/arch/x86/configs/x86_64_defconfig
+++ b/arch/x86/configs/x86_64_defconfig
@@ -1,7 +1,7 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.29-rc4 3# Linux kernel version: 2.6.29-rc4
4# Thu Feb 12 12:57:29 2009 4# Tue Feb 24 15:44:16 2009
5# 5#
6CONFIG_64BIT=y 6CONFIG_64BIT=y
7# CONFIG_X86_32 is not set 7# CONFIG_X86_32 is not set
@@ -266,7 +266,9 @@ CONFIG_PREEMPT_VOLUNTARY=y
266CONFIG_X86_LOCAL_APIC=y 266CONFIG_X86_LOCAL_APIC=y
267CONFIG_X86_IO_APIC=y 267CONFIG_X86_IO_APIC=y
268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y 268CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y
269# CONFIG_X86_MCE is not set 269CONFIG_X86_MCE=y
270CONFIG_X86_MCE_INTEL=y
271CONFIG_X86_MCE_AMD=y
270# CONFIG_I8K is not set 272# CONFIG_I8K is not set
271CONFIG_MICROCODE=y 273CONFIG_MICROCODE=y
272CONFIG_MICROCODE_INTEL=y 274CONFIG_MICROCODE_INTEL=y
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index a6208dc74633..4ef949c1972e 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -75,7 +75,14 @@ static inline void default_inquire_remote_apic(int apicid)
75#define setup_secondary_clock setup_secondary_APIC_clock 75#define setup_secondary_clock setup_secondary_APIC_clock
76#endif 76#endif
77 77
78#ifdef CONFIG_X86_VSMP
78extern int is_vsmp_box(void); 79extern int is_vsmp_box(void);
80#else
81static inline int is_vsmp_box(void)
82{
83 return 0;
84}
85#endif
79extern void xapic_wait_icr_idle(void); 86extern void xapic_wait_icr_idle(void);
80extern u32 safe_xapic_wait_icr_idle(void); 87extern u32 safe_xapic_wait_icr_idle(void);
81extern void xapic_icr_write(u32, u32); 88extern void xapic_icr_write(u32, u32);
@@ -306,7 +313,7 @@ struct apic {
306 void (*send_IPI_self)(int vector); 313 void (*send_IPI_self)(int vector);
307 314
308 /* wakeup_secondary_cpu */ 315 /* wakeup_secondary_cpu */
309 int (*wakeup_cpu)(int apicid, unsigned long start_eip); 316 int (*wakeup_secondary_cpu)(int apicid, unsigned long start_eip);
310 317
311 int trampoline_phys_low; 318 int trampoline_phys_low;
312 int trampoline_phys_high; 319 int trampoline_phys_high;
@@ -324,8 +331,21 @@ struct apic {
324 u32 (*safe_wait_icr_idle)(void); 331 u32 (*safe_wait_icr_idle)(void);
325}; 332};
326 333
334/*
335 * Pointer to the local APIC driver in use on this system (there's
336 * always just one such driver in use - the kernel decides via an
337 * early probing process which one it picks - and then sticks to it):
338 */
327extern struct apic *apic; 339extern struct apic *apic;
328 340
341/*
342 * APIC functionality to boot other CPUs - only used on SMP:
343 */
344#ifdef CONFIG_SMP
345extern atomic_t init_deasserted;
346extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
347#endif
348
329static inline u32 apic_read(u32 reg) 349static inline u32 apic_read(u32 reg)
330{ 350{
331 return apic->read(reg); 351 return apic->read(reg);
@@ -384,9 +404,7 @@ static inline unsigned default_get_apic_id(unsigned long x)
384#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467 404#define DEFAULT_TRAMPOLINE_PHYS_LOW 0x467
385#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469 405#define DEFAULT_TRAMPOLINE_PHYS_HIGH 0x469
386 406
387#ifdef CONFIG_X86_32 407#ifdef CONFIG_X86_64
388extern void es7000_update_apic_to_cluster(void);
389#else
390extern struct apic apic_flat; 408extern struct apic apic_flat;
391extern struct apic apic_physflat; 409extern struct apic apic_physflat;
392extern struct apic apic_x2apic_cluster; 410extern struct apic apic_x2apic_cluster;
diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
index dd61616cb73d..6526cf08b0e4 100644
--- a/arch/x86/include/asm/boot.h
+++ b/arch/x86/include/asm/boot.h
@@ -10,17 +10,31 @@
10#define EXTENDED_VGA 0xfffe /* 80x50 mode */ 10#define EXTENDED_VGA 0xfffe /* 80x50 mode */
11#define ASK_VGA 0xfffd /* ask for it at bootup */ 11#define ASK_VGA 0xfffd /* ask for it at bootup */
12 12
13#ifdef __KERNEL__
14
13/* Physical address where kernel should be loaded. */ 15/* Physical address where kernel should be loaded. */
14#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ 16#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
15 + (CONFIG_PHYSICAL_ALIGN - 1)) \ 17 + (CONFIG_PHYSICAL_ALIGN - 1)) \
16 & ~(CONFIG_PHYSICAL_ALIGN - 1)) 18 & ~(CONFIG_PHYSICAL_ALIGN - 1))
17 19
20#ifdef CONFIG_KERNEL_BZIP2
21#define BOOT_HEAP_SIZE 0x400000
22#else /* !CONFIG_KERNEL_BZIP2 */
23
18#ifdef CONFIG_X86_64 24#ifdef CONFIG_X86_64
19#define BOOT_HEAP_SIZE 0x7000 25#define BOOT_HEAP_SIZE 0x7000
20#define BOOT_STACK_SIZE 0x4000
21#else 26#else
22#define BOOT_HEAP_SIZE 0x4000 27#define BOOT_HEAP_SIZE 0x4000
28#endif
29
30#endif /* !CONFIG_KERNEL_BZIP2 */
31
32#ifdef CONFIG_X86_64
33#define BOOT_STACK_SIZE 0x4000
34#else
23#define BOOT_STACK_SIZE 0x1000 35#define BOOT_STACK_SIZE 0x1000
24#endif 36#endif
25 37
38#endif /* __KERNEL__ */
39
26#endif /* _ASM_X86_BOOT_H */ 40#endif /* _ASM_X86_BOOT_H */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 23696d44a0af..dca8f03da5b2 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -1,11 +1,155 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 * x86_32 and x86_64 integration by Gustavo F. Padovan, February 2009
12 */
13
1#ifndef _ASM_X86_FIXMAP_H 14#ifndef _ASM_X86_FIXMAP_H
2#define _ASM_X86_FIXMAP_H 15#define _ASM_X86_FIXMAP_H
3 16
17#ifndef __ASSEMBLY__
18#include <linux/kernel.h>
19#include <asm/acpi.h>
20#include <asm/apicdef.h>
21#include <asm/page.h>
22#ifdef CONFIG_X86_32
23#include <linux/threads.h>
24#include <asm/kmap_types.h>
25#else
26#include <asm/vsyscall.h>
27#ifdef CONFIG_EFI
28#include <asm/efi.h>
29#endif
30#endif
31
32/*
33 * We can't declare FIXADDR_TOP as variable for x86_64 because vsyscall
34 * uses fixmaps that relies on FIXADDR_TOP for proper address calculation.
35 * Because of this, FIXADDR_TOP x86 integration was left as later work.
36 */
37#ifdef CONFIG_X86_32
38/* used by vmalloc.c, vsyscall.lds.S.
39 *
40 * Leave one empty page between vmalloc'ed areas and
41 * the start of the fixmap.
42 */
43extern unsigned long __FIXADDR_TOP;
44#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
45
46#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
47#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
48#else
49#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
50
51/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
52#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
53#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
54#endif
55
56
57/*
58 * Here we define all the compile-time 'special' virtual
59 * addresses. The point is to have a constant address at
60 * compile time, but to set the physical address only
61 * in the boot process.
62 * for x86_32: We allocate these special addresses
63 * from the end of virtual memory (0xfffff000) backwards.
64 * Also this lets us do fail-safe vmalloc(), we
65 * can guarantee that these special addresses and
66 * vmalloc()-ed addresses never overlap.
67 *
68 * These 'compile-time allocated' memory buffers are
69 * fixed-size 4k pages (or larger if used with an increment
70 * higher than 1). Use set_fixmap(idx,phys) to associate
71 * physical memory with fixmap indices.
72 *
73 * TLB entries of such buffers will not be flushed across
74 * task switches.
75 */
76enum fixed_addresses {
4#ifdef CONFIG_X86_32 77#ifdef CONFIG_X86_32
5# include "fixmap_32.h" 78 FIX_HOLE,
79 FIX_VDSO,
6#else 80#else
7# include "fixmap_64.h" 81 VSYSCALL_LAST_PAGE,
82 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
83 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
84 VSYSCALL_HPET,
8#endif 85#endif
86 FIX_DBGP_BASE,
87 FIX_EARLYCON_MEM_BASE,
88#ifdef CONFIG_X86_LOCAL_APIC
89 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
90#endif
91#ifdef CONFIG_X86_IO_APIC
92 FIX_IO_APIC_BASE_0,
93 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
94#endif
95#ifdef CONFIG_X86_64
96#ifdef CONFIG_EFI
97 FIX_EFI_IO_MAP_LAST_PAGE,
98 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
99 + MAX_EFI_IO_PAGES - 1,
100#endif
101#endif
102#ifdef CONFIG_X86_VISWS_APIC
103 FIX_CO_CPU, /* Cobalt timer */
104 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
105 FIX_LI_PCIA, /* Lithium PCI Bridge A */
106 FIX_LI_PCIB, /* Lithium PCI Bridge B */
107#endif
108#ifdef CONFIG_X86_F00F_BUG
109 FIX_F00F_IDT, /* Virtual mapping for IDT */
110#endif
111#ifdef CONFIG_X86_CYCLONE_TIMER
112 FIX_CYCLONE_TIMER, /*cyclone timer register*/
113#endif
114#ifdef CONFIG_X86_32
115 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
116 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
117#ifdef CONFIG_PCI_MMCONFIG
118 FIX_PCIE_MCFG,
119#endif
120#endif
121#ifdef CONFIG_PARAVIRT
122 FIX_PARAVIRT_BOOTMAP,
123#endif
124 __end_of_permanent_fixed_addresses,
125#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
126 FIX_OHCI1394_BASE,
127#endif
128 /*
129 * 256 temporary boot-time mappings, used by early_ioremap(),
130 * before ioremap() is functional.
131 *
132 * We round it up to the next 256 pages boundary so that we
133 * can have a single pgd entry and a single pte table:
134 */
135#define NR_FIX_BTMAPS 64
136#define FIX_BTMAPS_SLOTS 4
137 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
138 (__end_of_permanent_fixed_addresses & 255),
139 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
140#ifdef CONFIG_X86_32
141 FIX_WP_TEST,
142#endif
143 __end_of_fixed_addresses
144};
145
146
147extern void reserve_top_address(unsigned long reserve);
148
149#define FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
150#define FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
151#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
152#define FIXADDR_BOOT_START (FIXADDR_TOP - FIXADDR_BOOT_SIZE)
9 153
10extern int fixmaps_set; 154extern int fixmaps_set;
11 155
@@ -69,4 +213,5 @@ static inline unsigned long virt_to_fix(const unsigned long vaddr)
69 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 213 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
70 return __virt_to_fix(vaddr); 214 return __virt_to_fix(vaddr);
71} 215}
216#endif /* !__ASSEMBLY__ */
72#endif /* _ASM_X86_FIXMAP_H */ 217#endif /* _ASM_X86_FIXMAP_H */
diff --git a/arch/x86/include/asm/fixmap_32.h b/arch/x86/include/asm/fixmap_32.h
deleted file mode 100644
index 047d9bab2b31..000000000000
--- a/arch/x86/include/asm/fixmap_32.h
+++ /dev/null
@@ -1,115 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 *
10 * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
11 */
12
13#ifndef _ASM_X86_FIXMAP_32_H
14#define _ASM_X86_FIXMAP_32_H
15
16
17/* used by vmalloc.c, vsyscall.lds.S.
18 *
19 * Leave one empty page between vmalloc'ed areas and
20 * the start of the fixmap.
21 */
22extern unsigned long __FIXADDR_TOP;
23#define FIXADDR_USER_START __fix_to_virt(FIX_VDSO)
24#define FIXADDR_USER_END __fix_to_virt(FIX_VDSO - 1)
25
26#ifndef __ASSEMBLY__
27#include <linux/kernel.h>
28#include <asm/acpi.h>
29#include <asm/apicdef.h>
30#include <asm/page.h>
31#include <linux/threads.h>
32#include <asm/kmap_types.h>
33
34/*
35 * Here we define all the compile-time 'special' virtual
36 * addresses. The point is to have a constant address at
37 * compile time, but to set the physical address only
38 * in the boot process. We allocate these special addresses
39 * from the end of virtual memory (0xfffff000) backwards.
40 * Also this lets us do fail-safe vmalloc(), we
41 * can guarantee that these special addresses and
42 * vmalloc()-ed addresses never overlap.
43 *
44 * these 'compile-time allocated' memory buffers are
45 * fixed-size 4k pages. (or larger if used with an increment
46 * highger than 1) use fixmap_set(idx,phys) to associate
47 * physical memory with fixmap indices.
48 *
49 * TLB entries of such buffers will not be flushed across
50 * task switches.
51 */
52enum fixed_addresses {
53 FIX_HOLE,
54 FIX_VDSO,
55 FIX_DBGP_BASE,
56 FIX_EARLYCON_MEM_BASE,
57#ifdef CONFIG_X86_LOCAL_APIC
58 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
59#endif
60#ifdef CONFIG_X86_IO_APIC
61 FIX_IO_APIC_BASE_0,
62 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
63#endif
64#ifdef CONFIG_X86_VISWS_APIC
65 FIX_CO_CPU, /* Cobalt timer */
66 FIX_CO_APIC, /* Cobalt APIC Redirection Table */
67 FIX_LI_PCIA, /* Lithium PCI Bridge A */
68 FIX_LI_PCIB, /* Lithium PCI Bridge B */
69#endif
70#ifdef CONFIG_X86_F00F_BUG
71 FIX_F00F_IDT, /* Virtual mapping for IDT */
72#endif
73#ifdef CONFIG_X86_CYCLONE_TIMER
74 FIX_CYCLONE_TIMER, /*cyclone timer register*/
75#endif
76 FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
77 FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
78#ifdef CONFIG_PCI_MMCONFIG
79 FIX_PCIE_MCFG,
80#endif
81#ifdef CONFIG_PARAVIRT
82 FIX_PARAVIRT_BOOTMAP,
83#endif
84 __end_of_permanent_fixed_addresses,
85 /*
86 * 256 temporary boot-time mappings, used by early_ioremap(),
87 * before ioremap() is functional.
88 *
89 * We round it up to the next 256 pages boundary so that we
90 * can have a single pgd entry and a single pte table:
91 */
92#define NR_FIX_BTMAPS 64
93#define FIX_BTMAPS_SLOTS 4
94 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
95 (__end_of_permanent_fixed_addresses & 255),
96 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
97 FIX_WP_TEST,
98#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
99 FIX_OHCI1394_BASE,
100#endif
101 __end_of_fixed_addresses
102};
103
104extern void reserve_top_address(unsigned long reserve);
105
106
107#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
108
109#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
110#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
111#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
112#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
113
114#endif /* !__ASSEMBLY__ */
115#endif /* _ASM_X86_FIXMAP_32_H */
diff --git a/arch/x86/include/asm/fixmap_64.h b/arch/x86/include/asm/fixmap_64.h
deleted file mode 100644
index 298d9ba3faeb..000000000000
--- a/arch/x86/include/asm/fixmap_64.h
+++ /dev/null
@@ -1,79 +0,0 @@
1/*
2 * fixmap.h: compile-time virtual memory allocation
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 1998 Ingo Molnar
9 */
10
11#ifndef _ASM_X86_FIXMAP_64_H
12#define _ASM_X86_FIXMAP_64_H
13
14#include <linux/kernel.h>
15#include <asm/acpi.h>
16#include <asm/apicdef.h>
17#include <asm/page.h>
18#include <asm/vsyscall.h>
19#include <asm/efi.h>
20
21/*
22 * Here we define all the compile-time 'special' virtual
23 * addresses. The point is to have a constant address at
24 * compile time, but to set the physical address only
25 * in the boot process.
26 *
27 * These 'compile-time allocated' memory buffers are
28 * fixed-size 4k pages (or larger if used with an increment
29 * higher than 1). Use set_fixmap(idx,phys) to associate
30 * physical memory with fixmap indices.
31 *
32 * TLB entries of such buffers will not be flushed across
33 * task switches.
34 */
35
36enum fixed_addresses {
37 VSYSCALL_LAST_PAGE,
38 VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE
39 + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
40 VSYSCALL_HPET,
41 FIX_DBGP_BASE,
42 FIX_EARLYCON_MEM_BASE,
43 FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
44 FIX_IO_APIC_BASE_0,
45 FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS - 1,
46 FIX_EFI_IO_MAP_LAST_PAGE,
47 FIX_EFI_IO_MAP_FIRST_PAGE = FIX_EFI_IO_MAP_LAST_PAGE
48 + MAX_EFI_IO_PAGES - 1,
49#ifdef CONFIG_PARAVIRT
50 FIX_PARAVIRT_BOOTMAP,
51#endif
52 __end_of_permanent_fixed_addresses,
53#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
54 FIX_OHCI1394_BASE,
55#endif
56 /*
57 * 256 temporary boot-time mappings, used by early_ioremap(),
58 * before ioremap() is functional.
59 *
60 * We round it up to the next 256 pages boundary so that we
61 * can have a single pgd entry and a single pte table:
62 */
63#define NR_FIX_BTMAPS 64
64#define FIX_BTMAPS_SLOTS 4
65 FIX_BTMAP_END = __end_of_permanent_fixed_addresses + 256 -
66 (__end_of_permanent_fixed_addresses & 255),
67 FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS*FIX_BTMAPS_SLOTS - 1,
68 __end_of_fixed_addresses
69};
70
71#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
72#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
73#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
74
75/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
76#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
77#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
78
79#endif /* _ASM_X86_FIXMAP_64_H */
diff --git a/arch/x86/include/asm/iomap.h b/arch/x86/include/asm/iomap.h
index c1f06289b14b..86af26091d6c 100644
--- a/arch/x86/include/asm/iomap.h
+++ b/arch/x86/include/asm/iomap.h
@@ -23,6 +23,9 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26int
27is_io_mapping_possible(resource_size_t base, unsigned long size);
28
26void * 29void *
27iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot); 30iomap_atomic_prot_pfn(unsigned long pfn, enum km_type type, pgprot_t prot);
28 31
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index e9f5db796244..a37229011b56 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,8 +4,12 @@
4extern int pxm_to_nid(int pxm); 4extern int pxm_to_nid(int pxm);
5extern void numa_remove_cpu(int cpu); 5extern void numa_remove_cpu(int cpu);
6 6
7#ifdef CONFIG_NUMA 7#ifdef CONFIG_HIGHMEM
8extern void set_highmem_pages_init(void); 8extern void set_highmem_pages_init(void);
9#else
10static inline void set_highmem_pages_init(void)
11{
12}
9#endif 13#endif
10 14
11#endif /* _ASM_X86_NUMA_32_H */ 15#endif /* _ASM_X86_NUMA_32_H */
diff --git a/arch/x86/include/asm/pat.h b/arch/x86/include/asm/pat.h
index 9709fdff6615..b0e70056838e 100644
--- a/arch/x86/include/asm/pat.h
+++ b/arch/x86/include/asm/pat.h
@@ -15,4 +15,7 @@ extern int reserve_memtype(u64 start, u64 end,
15 unsigned long req_type, unsigned long *ret_type); 15 unsigned long req_type, unsigned long *ret_type);
16extern int free_memtype(u64 start, u64 end); 16extern int free_memtype(u64 start, u64 end);
17 17
18extern int kernel_map_sync_memtype(u64 base, unsigned long size,
19 unsigned long flag);
20
18#endif /* _ASM_X86_PAT_H */ 21#endif /* _ASM_X86_PAT_H */
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index c7a98f738210..76139506c3e4 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -248,7 +248,6 @@ struct x86_hw_tss {
248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long)) 248#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap) 249#define IO_BITMAP_OFFSET offsetof(struct tss_struct, io_bitmap)
250#define INVALID_IO_BITMAP_OFFSET 0x8000 250#define INVALID_IO_BITMAP_OFFSET 0x8000
251#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
252 251
253struct tss_struct { 252struct tss_struct {
254 /* 253 /*
@@ -263,11 +262,6 @@ struct tss_struct {
263 * be within the limit. 262 * be within the limit.
264 */ 263 */
265 unsigned long io_bitmap[IO_BITMAP_LONGS + 1]; 264 unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
266 /*
267 * Cache the current maximum and the last task that used the bitmap:
268 */
269 unsigned long io_bitmap_max;
270 struct thread_struct *io_bitmap_owner;
271 265
272 /* 266 /*
273 * .. and then another 0x100 bytes for the emergency kernel stack: 267 * .. and then another 0x100 bytes for the emergency kernel stack:
diff --git a/arch/x86/include/asm/seccomp_32.h b/arch/x86/include/asm/seccomp_32.h
index a6ad87b352c4..b811d6f5780c 100644
--- a/arch/x86/include/asm/seccomp_32.h
+++ b/arch/x86/include/asm/seccomp_32.h
@@ -1,12 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_32_H 1#ifndef _ASM_X86_SECCOMP_32_H
2#define _ASM_X86_SECCOMP_32_H 2#define _ASM_X86_SECCOMP_32_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on i386"
8#endif
9
10#include <linux/unistd.h> 4#include <linux/unistd.h>
11 5
12#define __NR_seccomp_read __NR_read 6#define __NR_seccomp_read __NR_read
diff --git a/arch/x86/include/asm/seccomp_64.h b/arch/x86/include/asm/seccomp_64.h
index 4171bb794e9e..84ec1bd161a5 100644
--- a/arch/x86/include/asm/seccomp_64.h
+++ b/arch/x86/include/asm/seccomp_64.h
@@ -1,14 +1,6 @@
1#ifndef _ASM_X86_SECCOMP_64_H 1#ifndef _ASM_X86_SECCOMP_64_H
2#define _ASM_X86_SECCOMP_64_H 2#define _ASM_X86_SECCOMP_64_H
3 3
4#include <linux/thread_info.h>
5
6#ifdef TIF_32BIT
7#error "unexpected TIF_32BIT on x86_64"
8#else
9#define TIF_32BIT TIF_IA32
10#endif
11
12#include <linux/unistd.h> 4#include <linux/unistd.h>
13#include <asm/ia32_unistd.h> 5#include <asm/ia32_unistd.h>
14 6
diff --git a/arch/x86/include/asm/setup.h b/arch/x86/include/asm/setup.h
index 66801cb72f69..05c6f6b11fd5 100644
--- a/arch/x86/include/asm/setup.h
+++ b/arch/x86/include/asm/setup.h
@@ -31,7 +31,6 @@ struct x86_quirks {
31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable, 31 void (*smp_read_mpc_oem)(struct mpc_oemtable *oemtable,
32 unsigned short oemsize); 32 unsigned short oemsize);
33 int (*setup_ioapic_ids)(void); 33 int (*setup_ioapic_ids)(void);
34 int (*update_apic)(void);
35}; 34};
36 35
37extern void x86_quirk_pre_intr_init(void); 36extern void x86_quirk_pre_intr_init(void);
@@ -65,7 +64,11 @@ extern void x86_quirk_time_init(void);
65#include <asm/bootparam.h> 64#include <asm/bootparam.h>
66 65
67/* Interrupt control for vSMPowered x86_64 systems */ 66/* Interrupt control for vSMPowered x86_64 systems */
67#ifdef CONFIG_X86_VSMP
68void vsmp_init(void); 68void vsmp_init(void);
69#else
70static inline void vsmp_init(void) { }
71#endif
69 72
70void setup_bios_corruption_check(void); 73void setup_bios_corruption_check(void);
71 74
@@ -77,8 +80,6 @@ static inline void visws_early_detect(void) { }
77static inline int is_visws_box(void) { return 0; } 80static inline int is_visws_box(void) { return 0; }
78#endif 81#endif
79 82
80extern int wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip);
81extern int wakeup_secondary_cpu_via_init(int apicid, unsigned long start_eip);
82extern struct x86_quirks *x86_quirks; 83extern struct x86_quirks *x86_quirks;
83extern unsigned long saved_video_mode; 84extern unsigned long saved_video_mode;
84 85
diff --git a/arch/x86/include/asm/system.h b/arch/x86/include/asm/system.h
index c00bfdbdd456..643c59b4bc6e 100644
--- a/arch/x86/include/asm/system.h
+++ b/arch/x86/include/asm/system.h
@@ -20,6 +20,9 @@
20struct task_struct; /* one of the stranger aspects of C forward declarations */ 20struct task_struct; /* one of the stranger aspects of C forward declarations */
21struct task_struct *__switch_to(struct task_struct *prev, 21struct task_struct *__switch_to(struct task_struct *prev,
22 struct task_struct *next); 22 struct task_struct *next);
23struct tss_struct;
24void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
25 struct tss_struct *tss);
23 26
24#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
25 28
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index 987a2c10fe20..8cc687326eb8 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -188,30 +188,18 @@ __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
188extern long __copy_user_nocache(void *dst, const void __user *src, 188extern long __copy_user_nocache(void *dst, const void __user *src,
189 unsigned size, int zerorest); 189 unsigned size, int zerorest);
190 190
191static inline int __copy_from_user_nocache(void *dst, const void __user *src, 191static inline int
192 unsigned size) 192__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
193{ 193{
194 might_sleep(); 194 might_sleep();
195 /* 195 return __copy_user_nocache(dst, src, size, 1);
196 * In practice this limit means that large file write()s
197 * which get chunked to 4K copies get handled via
198 * non-temporal stores here. Smaller writes get handled
199 * via regular __copy_from_user():
200 */
201 if (likely(size >= PAGE_SIZE))
202 return __copy_user_nocache(dst, src, size, 1);
203 else
204 return __copy_from_user(dst, src, size);
205} 196}
206 197
207static inline int __copy_from_user_inatomic_nocache(void *dst, 198static inline int
208 const void __user *src, 199__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
209 unsigned size) 200 unsigned size)
210{ 201{
211 if (likely(size >= PAGE_SIZE)) 202 return __copy_user_nocache(dst, src, size, 0);
212 return __copy_user_nocache(dst, src, size, 0);
213 else
214 return __copy_from_user_inatomic(dst, src, size);
215} 203}
216 204
217unsigned long 205unsigned long
diff --git a/arch/x86/include/asm/uv/uv.h b/arch/x86/include/asm/uv/uv.h
index 8242bf965812..c0a01b5d985b 100644
--- a/arch/x86/include/asm/uv/uv.h
+++ b/arch/x86/include/asm/uv/uv.h
@@ -12,7 +12,6 @@ extern enum uv_system_type get_uv_system_type(void);
12extern int is_uv_system(void); 12extern int is_uv_system(void);
13extern void uv_cpu_init(void); 13extern void uv_cpu_init(void);
14extern void uv_system_init(void); 14extern void uv_system_init(void);
15extern int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip);
16extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask, 15extern const struct cpumask *uv_flush_tlb_others(const struct cpumask *cpumask,
17 struct mm_struct *mm, 16 struct mm_struct *mm,
18 unsigned long va, 17 unsigned long va,
@@ -24,8 +23,6 @@ static inline enum uv_system_type get_uv_system_type(void) { return UV_NONE; }
24static inline int is_uv_system(void) { return 0; } 23static inline int is_uv_system(void) { return 0; }
25static inline void uv_cpu_init(void) { } 24static inline void uv_cpu_init(void) { }
26static inline void uv_system_init(void) { } 25static inline void uv_system_init(void) { }
27static inline int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip)
28{ return 1; }
29static inline const struct cpumask * 26static inline const struct cpumask *
30uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm, 27uv_flush_tlb_others(const struct cpumask *cpumask, struct mm_struct *mm,
31 unsigned long va, unsigned int cpu) 28 unsigned long va, unsigned int cpu)
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index de5657c039e9..95f216bbfaf1 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -70,7 +70,7 @@ obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o 70obj-$(CONFIG_KEXEC) += machine_kexec_$(BITS).o
71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o 71obj-$(CONFIG_KEXEC) += relocate_kernel_$(BITS).o crash.o
72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o 72obj-$(CONFIG_CRASH_DUMP) += crash_dump_$(BITS).o
73obj-y += vsmp_64.o 73obj-$(CONFIG_X86_VSMP) += vsmp_64.o
74obj-$(CONFIG_KPROBES) += kprobes.o 74obj-$(CONFIG_KPROBES) += kprobes.o
75obj-$(CONFIG_MODULES) += module_$(BITS).o 75obj-$(CONFIG_MODULES) += module_$(BITS).o
76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o 76obj-$(CONFIG_EFI) += efi.o efi_$(BITS).o efi_stub_$(BITS).o
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a84ac7b570e6..6907b8e85d52 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -498,12 +498,12 @@ void *text_poke_early(void *addr, const void *opcode, size_t len)
498 */ 498 */
499void *__kprobes text_poke(void *addr, const void *opcode, size_t len) 499void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
500{ 500{
501 unsigned long flags;
502 char *vaddr; 501 char *vaddr;
503 int nr_pages = 2; 502 int nr_pages = 2;
504 struct page *pages[2]; 503 struct page *pages[2];
505 int i; 504 int i;
506 505
506 might_sleep();
507 if (!core_kernel_text((unsigned long)addr)) { 507 if (!core_kernel_text((unsigned long)addr)) {
508 pages[0] = vmalloc_to_page(addr); 508 pages[0] = vmalloc_to_page(addr);
509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE); 509 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
@@ -517,9 +517,9 @@ void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
517 nr_pages = 1; 517 nr_pages = 1;
518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL); 518 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
519 BUG_ON(!vaddr); 519 BUG_ON(!vaddr);
520 local_irq_save(flags); 520 local_irq_disable();
521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); 521 memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
522 local_irq_restore(flags); 522 local_irq_enable();
523 vunmap(vaddr); 523 vunmap(vaddr);
524 sync_core(); 524 sync_core();
525 /* Could also do a CLFLUSH here to speed up CPU recovery; but 525 /* Could also do a CLFLUSH here to speed up CPU recovery; but
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 3b002995e145..f933822dba18 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -222,7 +222,6 @@ struct apic apic_flat = {
222 .send_IPI_all = flat_send_IPI_all, 222 .send_IPI_all = flat_send_IPI_all,
223 .send_IPI_self = apic_send_IPI_self, 223 .send_IPI_self = apic_send_IPI_self,
224 224
225 .wakeup_cpu = NULL,
226 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 225 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
227 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 226 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
228 .wait_for_init_deassert = NULL, 227 .wait_for_init_deassert = NULL,
@@ -373,7 +372,6 @@ struct apic apic_physflat = {
373 .send_IPI_all = physflat_send_IPI_all, 372 .send_IPI_all = physflat_send_IPI_all,
374 .send_IPI_self = apic_send_IPI_self, 373 .send_IPI_self = apic_send_IPI_self,
375 374
376 .wakeup_cpu = NULL,
377 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 375 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
378 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 376 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
379 .wait_for_init_deassert = NULL, 377 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index 0b1093394fdf..d806ecaa948f 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -16,17 +16,17 @@
16#include <asm/apic.h> 16#include <asm/apic.h>
17#include <asm/ipi.h> 17#include <asm/ipi.h>
18 18
19static inline unsigned bigsmp_get_apic_id(unsigned long x) 19static unsigned bigsmp_get_apic_id(unsigned long x)
20{ 20{
21 return (x >> 24) & 0xFF; 21 return (x >> 24) & 0xFF;
22} 22}
23 23
24static inline int bigsmp_apic_id_registered(void) 24static int bigsmp_apic_id_registered(void)
25{ 25{
26 return 1; 26 return 1;
27} 27}
28 28
29static inline const cpumask_t *bigsmp_target_cpus(void) 29static const cpumask_t *bigsmp_target_cpus(void)
30{ 30{
31#ifdef CONFIG_SMP 31#ifdef CONFIG_SMP
32 return &cpu_online_map; 32 return &cpu_online_map;
@@ -35,13 +35,12 @@ static inline const cpumask_t *bigsmp_target_cpus(void)
35#endif 35#endif
36} 36}
37 37
38static inline unsigned long 38static unsigned long bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
39bigsmp_check_apicid_used(physid_mask_t bitmap, int apicid)
40{ 39{
41 return 0; 40 return 0;
42} 41}
43 42
44static inline unsigned long bigsmp_check_apicid_present(int bit) 43static unsigned long bigsmp_check_apicid_present(int bit)
45{ 44{
46 return 1; 45 return 1;
47} 46}
@@ -64,7 +63,7 @@ static inline unsigned long calculate_ldr(int cpu)
64 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel 63 * an APIC. See e.g. "AP-388 82489DX User's Manual" (Intel
65 * document number 292116). So here it goes... 64 * document number 292116). So here it goes...
66 */ 65 */
67static inline void bigsmp_init_apic_ldr(void) 66static void bigsmp_init_apic_ldr(void)
68{ 67{
69 unsigned long val; 68 unsigned long val;
70 int cpu = smp_processor_id(); 69 int cpu = smp_processor_id();
@@ -74,19 +73,19 @@ static inline void bigsmp_init_apic_ldr(void)
74 apic_write(APIC_LDR, val); 73 apic_write(APIC_LDR, val);
75} 74}
76 75
77static inline void bigsmp_setup_apic_routing(void) 76static void bigsmp_setup_apic_routing(void)
78{ 77{
79 printk(KERN_INFO 78 printk(KERN_INFO
80 "Enabling APIC mode: Physflat. Using %d I/O APICs\n", 79 "Enabling APIC mode: Physflat. Using %d I/O APICs\n",
81 nr_ioapics); 80 nr_ioapics);
82} 81}
83 82
84static inline int bigsmp_apicid_to_node(int logical_apicid) 83static int bigsmp_apicid_to_node(int logical_apicid)
85{ 84{
86 return apicid_2_node[hard_smp_processor_id()]; 85 return apicid_2_node[hard_smp_processor_id()];
87} 86}
88 87
89static inline int bigsmp_cpu_present_to_apicid(int mps_cpu) 88static int bigsmp_cpu_present_to_apicid(int mps_cpu)
90{ 89{
91 if (mps_cpu < nr_cpu_ids) 90 if (mps_cpu < nr_cpu_ids)
92 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu); 91 return (int) per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -94,7 +93,7 @@ static inline int bigsmp_cpu_present_to_apicid(int mps_cpu)
94 return BAD_APICID; 93 return BAD_APICID;
95} 94}
96 95
97static inline physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid) 96static physid_mask_t bigsmp_apicid_to_cpu_present(int phys_apicid)
98{ 97{
99 return physid_mask_of_physid(phys_apicid); 98 return physid_mask_of_physid(phys_apicid);
100} 99}
@@ -107,29 +106,24 @@ static inline int bigsmp_cpu_to_logical_apicid(int cpu)
107 return cpu_physical_id(cpu); 106 return cpu_physical_id(cpu);
108} 107}
109 108
110static inline physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map) 109static physid_mask_t bigsmp_ioapic_phys_id_map(physid_mask_t phys_map)
111{ 110{
112 /* For clustered we don't have a good way to do this yet - hack */ 111 /* For clustered we don't have a good way to do this yet - hack */
113 return physids_promote(0xFFL); 112 return physids_promote(0xFFL);
114} 113}
115 114
116static inline void bigsmp_setup_portio_remap(void) 115static int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
117{
118}
119
120static inline int bigsmp_check_phys_apicid_present(int boot_cpu_physical_apicid)
121{ 116{
122 return 1; 117 return 1;
123} 118}
124 119
125/* As we are using single CPU as destination, pick only one CPU here */ 120/* As we are using single CPU as destination, pick only one CPU here */
126static inline unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask) 121static unsigned int bigsmp_cpu_mask_to_apicid(const cpumask_t *cpumask)
127{ 122{
128 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask)); 123 return bigsmp_cpu_to_logical_apicid(first_cpu(*cpumask));
129} 124}
130 125
131static inline unsigned int 126static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
132bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
133 const struct cpumask *andmask) 127 const struct cpumask *andmask)
134{ 128{
135 int cpu; 129 int cpu;
@@ -148,7 +142,7 @@ bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
148 return BAD_APICID; 142 return BAD_APICID;
149} 143}
150 144
151static inline int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 145static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
152{ 146{
153 return cpuid_apic >> index_msb; 147 return cpuid_apic >> index_msb;
154} 148}
@@ -158,12 +152,12 @@ static inline void bigsmp_send_IPI_mask(const struct cpumask *mask, int vector)
158 default_send_IPI_mask_sequence_phys(mask, vector); 152 default_send_IPI_mask_sequence_phys(mask, vector);
159} 153}
160 154
161static inline void bigsmp_send_IPI_allbutself(int vector) 155static void bigsmp_send_IPI_allbutself(int vector)
162{ 156{
163 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector); 157 default_send_IPI_mask_allbutself_phys(cpu_online_mask, vector);
164} 158}
165 159
166static inline void bigsmp_send_IPI_all(int vector) 160static void bigsmp_send_IPI_all(int vector)
167{ 161{
168 bigsmp_send_IPI_mask(cpu_online_mask, vector); 162 bigsmp_send_IPI_mask(cpu_online_mask, vector);
169} 163}
@@ -256,7 +250,6 @@ struct apic apic_bigsmp = {
256 .send_IPI_all = bigsmp_send_IPI_all, 250 .send_IPI_all = bigsmp_send_IPI_all,
257 .send_IPI_self = default_send_IPI_self, 251 .send_IPI_self = default_send_IPI_self,
258 252
259 .wakeup_cpu = NULL,
260 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 253 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
261 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 254 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
262 255
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 320f2d2e4e54..19588f2770ee 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -163,22 +163,17 @@ static int wakeup_secondary_cpu_via_mip(int cpu, unsigned long eip)
163 return 0; 163 return 0;
164} 164}
165 165
166static int __init es7000_update_apic(void) 166static int es7000_apic_is_cluster(void)
167{ 167{
168 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
169
170 /* MPENTIUMIII */ 168 /* MPENTIUMIII */
171 if (boot_cpu_data.x86 == 6 && 169 if (boot_cpu_data.x86 == 6 &&
172 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11)) { 170 (boot_cpu_data.x86_model >= 7 || boot_cpu_data.x86_model <= 11))
173 es7000_update_apic_to_cluster(); 171 return 1;
174 apic->wait_for_init_deassert = NULL;
175 apic->wakeup_cpu = wakeup_secondary_cpu_via_mip;
176 }
177 172
178 return 0; 173 return 0;
179} 174}
180 175
181static void __init setup_unisys(void) 176static void setup_unisys(void)
182{ 177{
183 /* 178 /*
184 * Determine the generation of the ES7000 currently running. 179 * Determine the generation of the ES7000 currently running.
@@ -192,14 +187,12 @@ static void __init setup_unisys(void)
192 else 187 else
193 es7000_plat = ES7000_CLASSIC; 188 es7000_plat = ES7000_CLASSIC;
194 ioapic_renumber_irq = es7000_rename_gsi; 189 ioapic_renumber_irq = es7000_rename_gsi;
195
196 x86_quirks->update_apic = es7000_update_apic;
197} 190}
198 191
199/* 192/*
200 * Parse the OEM Table: 193 * Parse the OEM Table:
201 */ 194 */
202static int __init parse_unisys_oem(char *oemptr) 195static int parse_unisys_oem(char *oemptr)
203{ 196{
204 int i; 197 int i;
205 int success = 0; 198 int success = 0;
@@ -261,7 +254,7 @@ static int __init parse_unisys_oem(char *oemptr)
261} 254}
262 255
263#ifdef CONFIG_ACPI 256#ifdef CONFIG_ACPI
264static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr) 257static int find_unisys_acpi_oem_table(unsigned long *oem_addr)
265{ 258{
266 struct acpi_table_header *header = NULL; 259 struct acpi_table_header *header = NULL;
267 struct es7000_oem_table *table; 260 struct es7000_oem_table *table;
@@ -292,7 +285,7 @@ static int __init find_unisys_acpi_oem_table(unsigned long *oem_addr)
292 return 0; 285 return 0;
293} 286}
294 287
295static void __init unmap_unisys_acpi_oem_table(unsigned long oem_addr) 288static void unmap_unisys_acpi_oem_table(unsigned long oem_addr)
296{ 289{
297 if (!oem_addr) 290 if (!oem_addr)
298 return; 291 return;
@@ -310,8 +303,10 @@ static int es7000_check_dsdt(void)
310 return 0; 303 return 0;
311} 304}
312 305
306static int es7000_acpi_ret;
307
313/* Hook from generic ACPI tables.c */ 308/* Hook from generic ACPI tables.c */
314static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 309static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
315{ 310{
316 unsigned long oem_addr = 0; 311 unsigned long oem_addr = 0;
317 int check_dsdt; 312 int check_dsdt;
@@ -332,10 +327,26 @@ static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
332 */ 327 */
333 unmap_unisys_acpi_oem_table(oem_addr); 328 unmap_unisys_acpi_oem_table(oem_addr);
334 } 329 }
335 return ret; 330
331 es7000_acpi_ret = ret;
332
333 return ret && !es7000_apic_is_cluster();
336} 334}
335
336static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
337{
338 int ret = es7000_acpi_ret;
339
340 return ret && es7000_apic_is_cluster();
341}
342
337#else /* !CONFIG_ACPI: */ 343#else /* !CONFIG_ACPI: */
338static int __init es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 344static int es7000_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
345{
346 return 0;
347}
348
349static int es7000_acpi_madt_oem_check_cluster(char *oem_id, char *oem_table_id)
339{ 350{
340 return 0; 351 return 0;
341} 352}
@@ -349,8 +360,7 @@ static void es7000_spin(int n)
349 rep_nop(); 360 rep_nop();
350} 361}
351 362
352static int __init 363static int es7000_mip_write(struct mip_reg *mip_reg)
353es7000_mip_write(struct mip_reg *mip_reg)
354{ 364{
355 int status = 0; 365 int status = 0;
356 int spin; 366 int spin;
@@ -383,7 +393,7 @@ es7000_mip_write(struct mip_reg *mip_reg)
383 return status; 393 return status;
384} 394}
385 395
386static void __init es7000_enable_apic_mode(void) 396static void es7000_enable_apic_mode(void)
387{ 397{
388 struct mip_reg es7000_mip_reg; 398 struct mip_reg es7000_mip_reg;
389 int mip_status; 399 int mip_status;
@@ -416,11 +426,8 @@ static void es7000_vector_allocation_domain(int cpu, cpumask_t *retmask)
416 426
417static void es7000_wait_for_init_deassert(atomic_t *deassert) 427static void es7000_wait_for_init_deassert(atomic_t *deassert)
418{ 428{
419#ifndef CONFIG_ES7000_CLUSTERED_APIC
420 while (!atomic_read(deassert)) 429 while (!atomic_read(deassert))
421 cpu_relax(); 430 cpu_relax();
422#endif
423 return;
424} 431}
425 432
426static unsigned int es7000_get_apic_id(unsigned long x) 433static unsigned int es7000_get_apic_id(unsigned long x)
@@ -565,72 +572,24 @@ static int es7000_check_phys_apicid_present(int cpu_physical_apicid)
565 return 1; 572 return 1;
566} 573}
567 574
568static unsigned int
569es7000_cpu_mask_to_apicid_cluster(const struct cpumask *cpumask)
570{
571 int cpus_found = 0;
572 int num_bits_set;
573 int apicid;
574 int cpu;
575
576 num_bits_set = cpumask_weight(cpumask);
577 /* Return id to all */
578 if (num_bits_set == nr_cpu_ids)
579 return 0xFF;
580 /*
581 * The cpus in the mask must all be on the apic cluster. If are not
582 * on the same apicid cluster return default value of target_cpus():
583 */
584 cpu = cpumask_first(cpumask);
585 apicid = es7000_cpu_to_logical_apicid(cpu);
586
587 while (cpus_found < num_bits_set) {
588 if (cpumask_test_cpu(cpu, cpumask)) {
589 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
590
591 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
592 WARN(1, "Not a valid mask!");
593
594 return 0xFF;
595 }
596 apicid = new_apicid;
597 cpus_found++;
598 }
599 cpu++;
600 }
601 return apicid;
602}
603
604static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask) 575static unsigned int es7000_cpu_mask_to_apicid(const cpumask_t *cpumask)
605{ 576{
606 int cpus_found = 0; 577 unsigned int round = 0;
607 int num_bits_set; 578 int cpu, uninitialized_var(apicid);
608 int apicid;
609 int cpu;
610 579
611 num_bits_set = cpus_weight(*cpumask);
612 /* Return id to all */
613 if (num_bits_set == nr_cpu_ids)
614 return es7000_cpu_to_logical_apicid(0);
615 /* 580 /*
616 * The cpus in the mask must all be on the apic cluster. If are not 581 * The cpus in the mask must all be on the apic cluster.
617 * on the same apicid cluster return default value of target_cpus():
618 */ 582 */
619 cpu = first_cpu(*cpumask); 583 for_each_cpu(cpu, cpumask) {
620 apicid = es7000_cpu_to_logical_apicid(cpu); 584 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
621 while (cpus_found < num_bits_set) {
622 if (cpu_isset(cpu, *cpumask)) {
623 int new_apicid = es7000_cpu_to_logical_apicid(cpu);
624 585
625 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 586 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
626 printk("%s: Not a valid mask!\n", __func__); 587 WARN(1, "Not a valid mask!");
627 588
628 return es7000_cpu_to_logical_apicid(0); 589 return BAD_APICID;
629 }
630 apicid = new_apicid;
631 cpus_found++;
632 } 590 }
633 cpu++; 591 apicid = new_apicid;
592 round++;
634 } 593 }
635 return apicid; 594 return apicid;
636} 595}
@@ -659,37 +618,103 @@ static int es7000_phys_pkg_id(int cpuid_apic, int index_msb)
659 return cpuid_apic >> index_msb; 618 return cpuid_apic >> index_msb;
660} 619}
661 620
662void __init es7000_update_apic_to_cluster(void)
663{
664 apic->target_cpus = target_cpus_cluster;
665 apic->irq_delivery_mode = dest_LowestPrio;
666 /* logical delivery broadcast to all procs: */
667 apic->irq_dest_mode = 1;
668
669 apic->init_apic_ldr = es7000_init_apic_ldr_cluster;
670
671 apic->cpu_mask_to_apicid = es7000_cpu_mask_to_apicid_cluster;
672}
673
674static int probe_es7000(void) 621static int probe_es7000(void)
675{ 622{
676 /* probed later in mptable/ACPI hooks */ 623 /* probed later in mptable/ACPI hooks */
677 return 0; 624 return 0;
678} 625}
679 626
680static __init int 627static int es7000_mps_ret;
681es7000_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 628static int es7000_mps_oem_check(struct mpc_table *mpc, char *oem,
629 char *productid)
682{ 630{
631 int ret = 0;
632
683 if (mpc->oemptr) { 633 if (mpc->oemptr) {
684 struct mpc_oemtable *oem_table = 634 struct mpc_oemtable *oem_table =
685 (struct mpc_oemtable *)mpc->oemptr; 635 (struct mpc_oemtable *)mpc->oemptr;
686 636
687 if (!strncmp(oem, "UNISYS", 6)) 637 if (!strncmp(oem, "UNISYS", 6))
688 return parse_unisys_oem((char *)oem_table); 638 ret = parse_unisys_oem((char *)oem_table);
689 } 639 }
690 return 0; 640
641 es7000_mps_ret = ret;
642
643 return ret && !es7000_apic_is_cluster();
691} 644}
692 645
646static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
647 char *productid)
648{
649 int ret = es7000_mps_ret;
650
651 return ret && es7000_apic_is_cluster();
652}
653
654struct apic apic_es7000_cluster = {
655
656 .name = "es7000",
657 .probe = probe_es7000,
658 .acpi_madt_oem_check = es7000_acpi_madt_oem_check_cluster,
659 .apic_id_registered = es7000_apic_id_registered,
660
661 .irq_delivery_mode = dest_LowestPrio,
662 /* logical delivery broadcast to all procs: */
663 .irq_dest_mode = 1,
664
665 .target_cpus = target_cpus_cluster,
666 .disable_esr = 1,
667 .dest_logical = 0,
668 .check_apicid_used = es7000_check_apicid_used,
669 .check_apicid_present = es7000_check_apicid_present,
670
671 .vector_allocation_domain = es7000_vector_allocation_domain,
672 .init_apic_ldr = es7000_init_apic_ldr_cluster,
673
674 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
675 .setup_apic_routing = es7000_setup_apic_routing,
676 .multi_timer_check = NULL,
677 .apicid_to_node = es7000_apicid_to_node,
678 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
679 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
680 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
681 .setup_portio_remap = NULL,
682 .check_phys_apicid_present = es7000_check_phys_apicid_present,
683 .enable_apic_mode = es7000_enable_apic_mode,
684 .phys_pkg_id = es7000_phys_pkg_id,
685 .mps_oem_check = es7000_mps_oem_check_cluster,
686
687 .get_apic_id = es7000_get_apic_id,
688 .set_apic_id = NULL,
689 .apic_id_mask = 0xFF << 24,
690
691 .cpu_mask_to_apicid = es7000_cpu_mask_to_apicid,
692 .cpu_mask_to_apicid_and = es7000_cpu_mask_to_apicid_and,
693
694 .send_IPI_mask = es7000_send_IPI_mask,
695 .send_IPI_mask_allbutself = NULL,
696 .send_IPI_allbutself = es7000_send_IPI_allbutself,
697 .send_IPI_all = es7000_send_IPI_all,
698 .send_IPI_self = default_send_IPI_self,
699
700 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_mip,
701
702 .trampoline_phys_low = 0x467,
703 .trampoline_phys_high = 0x469,
704
705 .wait_for_init_deassert = NULL,
706
707 /* Nothing to do for most platforms, since cleared by the INIT cycle: */
708 .smp_callin_clear_local_apic = NULL,
709 .inquire_remote_apic = default_inquire_remote_apic,
710
711 .read = native_apic_mem_read,
712 .write = native_apic_mem_write,
713 .icr_read = native_apic_icr_read,
714 .icr_write = native_apic_icr_write,
715 .wait_icr_idle = native_apic_wait_icr_idle,
716 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
717};
693 718
694struct apic apic_es7000 = { 719struct apic apic_es7000 = {
695 720
@@ -737,8 +762,6 @@ struct apic apic_es7000 = {
737 .send_IPI_all = es7000_send_IPI_all, 762 .send_IPI_all = es7000_send_IPI_all,
738 .send_IPI_self = default_send_IPI_self, 763 .send_IPI_self = default_send_IPI_self,
739 764
740 .wakeup_cpu = NULL,
741
742 .trampoline_phys_low = 0x467, 765 .trampoline_phys_low = 0x467,
743 .trampoline_phys_high = 0x469, 766 .trampoline_phys_high = 0x469,
744 767
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index d9d6d61eed82..ba2fc6465534 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -69,7 +69,7 @@ struct mpc_trans {
69/* x86_quirks member */ 69/* x86_quirks member */
70static int mpc_record; 70static int mpc_record;
71 71
72static __cpuinitdata struct mpc_trans *translation_table[MAX_MPC_ENTRY]; 72static struct mpc_trans *translation_table[MAX_MPC_ENTRY];
73 73
74int mp_bus_id_to_node[MAX_MP_BUSSES]; 74int mp_bus_id_to_node[MAX_MP_BUSSES];
75int mp_bus_id_to_local[MAX_MP_BUSSES]; 75int mp_bus_id_to_local[MAX_MP_BUSSES];
@@ -256,13 +256,6 @@ static int __init numaq_setup_ioapic_ids(void)
256 return 1; 256 return 1;
257} 257}
258 258
259static int __init numaq_update_apic(void)
260{
261 apic->wakeup_cpu = wakeup_secondary_cpu_via_nmi;
262
263 return 0;
264}
265
266static struct x86_quirks numaq_x86_quirks __initdata = { 259static struct x86_quirks numaq_x86_quirks __initdata = {
267 .arch_pre_time_init = numaq_pre_time_init, 260 .arch_pre_time_init = numaq_pre_time_init,
268 .arch_time_init = NULL, 261 .arch_time_init = NULL,
@@ -278,7 +271,6 @@ static struct x86_quirks numaq_x86_quirks __initdata = {
278 .mpc_oem_pci_bus = mpc_oem_pci_bus, 271 .mpc_oem_pci_bus = mpc_oem_pci_bus,
279 .smp_read_mpc_oem = smp_read_mpc_oem, 272 .smp_read_mpc_oem = smp_read_mpc_oem,
280 .setup_ioapic_ids = numaq_setup_ioapic_ids, 273 .setup_ioapic_ids = numaq_setup_ioapic_ids,
281 .update_apic = numaq_update_apic,
282}; 274};
283 275
284static __init void early_check_numaq(void) 276static __init void early_check_numaq(void)
@@ -546,7 +538,7 @@ struct apic apic_numaq = {
546 .send_IPI_all = numaq_send_IPI_all, 538 .send_IPI_all = numaq_send_IPI_all,
547 .send_IPI_self = default_send_IPI_self, 539 .send_IPI_self = default_send_IPI_self,
548 540
549 .wakeup_cpu = NULL, 541 .wakeup_secondary_cpu = wakeup_secondary_cpu_via_nmi,
550 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW, 542 .trampoline_phys_low = NUMAQ_TRAMPOLINE_PHYS_LOW,
551 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH, 543 .trampoline_phys_high = NUMAQ_TRAMPOLINE_PHYS_HIGH,
552 544
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 3a730fa574bb..141c99a1c264 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -138,7 +138,6 @@ struct apic apic_default = {
138 .send_IPI_all = default_send_IPI_all, 138 .send_IPI_all = default_send_IPI_all,
139 .send_IPI_self = default_send_IPI_self, 139 .send_IPI_self = default_send_IPI_self,
140 140
141 .wakeup_cpu = NULL,
142 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 141 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
143 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 142 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
144 143
@@ -159,6 +158,7 @@ extern struct apic apic_numaq;
159extern struct apic apic_summit; 158extern struct apic apic_summit;
160extern struct apic apic_bigsmp; 159extern struct apic apic_bigsmp;
161extern struct apic apic_es7000; 160extern struct apic apic_es7000;
161extern struct apic apic_es7000_cluster;
162extern struct apic apic_default; 162extern struct apic apic_default;
163 163
164struct apic *apic = &apic_default; 164struct apic *apic = &apic_default;
@@ -176,6 +176,7 @@ static struct apic *apic_probe[] __initdata = {
176#endif 176#endif
177#ifdef CONFIG_X86_ES7000 177#ifdef CONFIG_X86_ES7000
178 &apic_es7000, 178 &apic_es7000,
179 &apic_es7000_cluster,
179#endif 180#endif
180 &apic_default, /* must be last */ 181 &apic_default, /* must be last */
181 NULL, 182 NULL,
@@ -197,9 +198,6 @@ static int __init parse_apic(char *arg)
197 } 198 }
198 } 199 }
199 200
200 if (x86_quirks->update_apic)
201 x86_quirks->update_apic();
202
203 /* Parsed again by __setup for debug/verbose */ 201 /* Parsed again by __setup for debug/verbose */
204 return 0; 202 return 0;
205} 203}
@@ -218,8 +216,6 @@ void __init generic_bigsmp_probe(void)
218 if (!cmdline_apic && apic == &apic_default) { 216 if (!cmdline_apic && apic == &apic_default) {
219 if (apic_bigsmp.probe()) { 217 if (apic_bigsmp.probe()) {
220 apic = &apic_bigsmp; 218 apic = &apic_bigsmp;
221 if (x86_quirks->update_apic)
222 x86_quirks->update_apic();
223 printk(KERN_INFO "Overriding APIC driver with %s\n", 219 printk(KERN_INFO "Overriding APIC driver with %s\n",
224 apic->name); 220 apic->name);
225 } 221 }
@@ -240,9 +236,6 @@ void __init generic_apic_probe(void)
240 /* Not visible without early console */ 236 /* Not visible without early console */
241 if (!apic_probe[i]) 237 if (!apic_probe[i])
242 panic("Didn't find an APIC driver"); 238 panic("Didn't find an APIC driver");
243
244 if (x86_quirks->update_apic)
245 x86_quirks->update_apic();
246 } 239 }
247 printk(KERN_INFO "Using APIC driver %s\n", apic->name); 240 printk(KERN_INFO "Using APIC driver %s\n", apic->name);
248} 241}
@@ -262,8 +255,6 @@ generic_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
262 255
263 if (!cmdline_apic) { 256 if (!cmdline_apic) {
264 apic = apic_probe[i]; 257 apic = apic_probe[i];
265 if (x86_quirks->update_apic)
266 x86_quirks->update_apic();
267 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 258 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
268 apic->name); 259 apic->name);
269 } 260 }
@@ -284,8 +275,6 @@ int __init default_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
284 275
285 if (!cmdline_apic) { 276 if (!cmdline_apic) {
286 apic = apic_probe[i]; 277 apic = apic_probe[i];
287 if (x86_quirks->update_apic)
288 x86_quirks->update_apic();
289 printk(KERN_INFO "Switched to APIC driver `%s'.\n", 278 printk(KERN_INFO "Switched to APIC driver `%s'.\n",
290 apic->name); 279 apic->name);
291 } 280 }
diff --git a/arch/x86/kernel/apic/probe_64.c b/arch/x86/kernel/apic/probe_64.c
index e7c163661c77..8d7748efe6a8 100644
--- a/arch/x86/kernel/apic/probe_64.c
+++ b/arch/x86/kernel/apic/probe_64.c
@@ -68,9 +68,6 @@ void __init default_setup_apic_routing(void)
68 apic = &apic_physflat; 68 apic = &apic_physflat;
69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name); 69 printk(KERN_INFO "Setting APIC routing to %s\n", apic->name);
70 } 70 }
71
72 if (x86_quirks->update_apic)
73 x86_quirks->update_apic();
74} 71}
75 72
76/* Same for both flat and physical. */ 73/* Same for both flat and physical. */
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index cfe7b09015d8..aac52fa873ff 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -48,7 +48,7 @@
48#include <linux/gfp.h> 48#include <linux/gfp.h>
49#include <linux/smp.h> 49#include <linux/smp.h>
50 50
51static inline unsigned summit_get_apic_id(unsigned long x) 51static unsigned summit_get_apic_id(unsigned long x)
52{ 52{
53 return (x >> 24) & 0xFF; 53 return (x >> 24) & 0xFF;
54} 54}
@@ -58,7 +58,7 @@ static inline void summit_send_IPI_mask(const cpumask_t *mask, int vector)
58 default_send_IPI_mask_sequence_logical(mask, vector); 58 default_send_IPI_mask_sequence_logical(mask, vector);
59} 59}
60 60
61static inline void summit_send_IPI_allbutself(int vector) 61static void summit_send_IPI_allbutself(int vector)
62{ 62{
63 cpumask_t mask = cpu_online_map; 63 cpumask_t mask = cpu_online_map;
64 cpu_clear(smp_processor_id(), mask); 64 cpu_clear(smp_processor_id(), mask);
@@ -67,7 +67,7 @@ static inline void summit_send_IPI_allbutself(int vector)
67 summit_send_IPI_mask(&mask, vector); 67 summit_send_IPI_mask(&mask, vector);
68} 68}
69 69
70static inline void summit_send_IPI_all(int vector) 70static void summit_send_IPI_all(int vector)
71{ 71{
72 summit_send_IPI_mask(&cpu_online_map, vector); 72 summit_send_IPI_mask(&cpu_online_map, vector);
73} 73}
@@ -77,13 +77,13 @@ static inline void summit_send_IPI_all(int vector)
77extern int use_cyclone; 77extern int use_cyclone;
78 78
79#ifdef CONFIG_X86_SUMMIT_NUMA 79#ifdef CONFIG_X86_SUMMIT_NUMA
80extern void setup_summit(void); 80static void setup_summit(void);
81#else 81#else
82#define setup_summit() {} 82static inline void setup_summit(void) {}
83#endif 83#endif
84 84
85static inline int 85static int summit_mps_oem_check(struct mpc_table *mpc, char *oem,
86summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid) 86 char *productid)
87{ 87{
88 if (!strncmp(oem, "IBM ENSW", 8) && 88 if (!strncmp(oem, "IBM ENSW", 8) &&
89 (!strncmp(productid, "VIGIL SMP", 9) 89 (!strncmp(productid, "VIGIL SMP", 9)
@@ -98,7 +98,7 @@ summit_mps_oem_check(struct mpc_table *mpc, char *oem, char *productid)
98} 98}
99 99
100/* Hook from generic ACPI tables.c */ 100/* Hook from generic ACPI tables.c */
101static inline int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id) 101static int summit_acpi_madt_oem_check(char *oem_id, char *oem_table_id)
102{ 102{
103 if (!strncmp(oem_id, "IBM", 3) && 103 if (!strncmp(oem_id, "IBM", 3) &&
104 (!strncmp(oem_table_id, "SERVIGIL", 8) 104 (!strncmp(oem_table_id, "SERVIGIL", 8)
@@ -186,7 +186,7 @@ static inline int is_WPEG(struct rio_detail *rio){
186 186
187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER) 187#define SUMMIT_APIC_DFR_VALUE (APIC_DFR_CLUSTER)
188 188
189static inline const cpumask_t *summit_target_cpus(void) 189static const cpumask_t *summit_target_cpus(void)
190{ 190{
191 /* CPU_MASK_ALL (0xff) has undefined behaviour with 191 /* CPU_MASK_ALL (0xff) has undefined behaviour with
192 * dest_LowestPrio mode logical clustered apic interrupt routing 192 * dest_LowestPrio mode logical clustered apic interrupt routing
@@ -195,19 +195,18 @@ static inline const cpumask_t *summit_target_cpus(void)
195 return &cpumask_of_cpu(0); 195 return &cpumask_of_cpu(0);
196} 196}
197 197
198static inline unsigned long 198static unsigned long summit_check_apicid_used(physid_mask_t bitmap, int apicid)
199summit_check_apicid_used(physid_mask_t bitmap, int apicid)
200{ 199{
201 return 0; 200 return 0;
202} 201}
203 202
204/* we don't use the phys_cpu_present_map to indicate apicid presence */ 203/* we don't use the phys_cpu_present_map to indicate apicid presence */
205static inline unsigned long summit_check_apicid_present(int bit) 204static unsigned long summit_check_apicid_present(int bit)
206{ 205{
207 return 1; 206 return 1;
208} 207}
209 208
210static inline void summit_init_apic_ldr(void) 209static void summit_init_apic_ldr(void)
211{ 210{
212 unsigned long val, id; 211 unsigned long val, id;
213 int count = 0; 212 int count = 0;
@@ -234,18 +233,18 @@ static inline void summit_init_apic_ldr(void)
234 apic_write(APIC_LDR, val); 233 apic_write(APIC_LDR, val);
235} 234}
236 235
237static inline int summit_apic_id_registered(void) 236static int summit_apic_id_registered(void)
238{ 237{
239 return 1; 238 return 1;
240} 239}
241 240
242static inline void summit_setup_apic_routing(void) 241static void summit_setup_apic_routing(void)
243{ 242{
244 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n", 243 printk("Enabling APIC mode: Summit. Using %d I/O APICs\n",
245 nr_ioapics); 244 nr_ioapics);
246} 245}
247 246
248static inline int summit_apicid_to_node(int logical_apicid) 247static int summit_apicid_to_node(int logical_apicid)
249{ 248{
250#ifdef CONFIG_SMP 249#ifdef CONFIG_SMP
251 return apicid_2_node[hard_smp_processor_id()]; 250 return apicid_2_node[hard_smp_processor_id()];
@@ -266,7 +265,7 @@ static inline int summit_cpu_to_logical_apicid(int cpu)
266#endif 265#endif
267} 266}
268 267
269static inline int summit_cpu_present_to_apicid(int mps_cpu) 268static int summit_cpu_present_to_apicid(int mps_cpu)
270{ 269{
271 if (mps_cpu < nr_cpu_ids) 270 if (mps_cpu < nr_cpu_ids)
272 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu); 271 return (int)per_cpu(x86_bios_cpu_apicid, mps_cpu);
@@ -274,64 +273,44 @@ static inline int summit_cpu_present_to_apicid(int mps_cpu)
274 return BAD_APICID; 273 return BAD_APICID;
275} 274}
276 275
277static inline physid_mask_t 276static physid_mask_t summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
278summit_ioapic_phys_id_map(physid_mask_t phys_id_map)
279{ 277{
280 /* For clustered we don't have a good way to do this yet - hack */ 278 /* For clustered we don't have a good way to do this yet - hack */
281 return physids_promote(0x0F); 279 return physids_promote(0x0F);
282} 280}
283 281
284static inline physid_mask_t summit_apicid_to_cpu_present(int apicid) 282static physid_mask_t summit_apicid_to_cpu_present(int apicid)
285{ 283{
286 return physid_mask_of_physid(0); 284 return physid_mask_of_physid(0);
287} 285}
288 286
289static inline void summit_setup_portio_remap(void) 287static int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
290{
291}
292
293static inline int summit_check_phys_apicid_present(int boot_cpu_physical_apicid)
294{ 288{
295 return 1; 289 return 1;
296} 290}
297 291
298static inline unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask) 292static unsigned int summit_cpu_mask_to_apicid(const cpumask_t *cpumask)
299{ 293{
300 int cpus_found = 0; 294 unsigned int round = 0;
301 int num_bits_set; 295 int cpu, apicid = 0;
302 int apicid; 296
303 int cpu;
304
305 num_bits_set = cpus_weight(*cpumask);
306 /* Return id to all */
307 if (num_bits_set >= nr_cpu_ids)
308 return 0xFF;
309 /* 297 /*
310 * The cpus in the mask must all be on the apic cluster. If are not 298 * The cpus in the mask must all be on the apic cluster.
311 * on the same apicid cluster return default value of target_cpus():
312 */ 299 */
313 cpu = first_cpu(*cpumask); 300 for_each_cpu(cpu, cpumask) {
314 apicid = summit_cpu_to_logical_apicid(cpu); 301 int new_apicid = summit_cpu_to_logical_apicid(cpu);
315 302
316 while (cpus_found < num_bits_set) { 303 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
317 if (cpu_isset(cpu, *cpumask)) { 304 printk("%s: Not a valid mask!\n", __func__);
318 int new_apicid = summit_cpu_to_logical_apicid(cpu); 305 return BAD_APICID;
319
320 if (APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
321 printk ("%s: Not a valid mask!\n", __func__);
322
323 return 0xFF;
324 }
325 apicid = apicid | new_apicid;
326 cpus_found++;
327 } 306 }
328 cpu++; 307 apicid |= new_apicid;
308 round++;
329 } 309 }
330 return apicid; 310 return apicid;
331} 311}
332 312
333static inline unsigned int 313static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
334summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
335 const struct cpumask *andmask) 314 const struct cpumask *andmask)
336{ 315{
337 int apicid = summit_cpu_to_logical_apicid(0); 316 int apicid = summit_cpu_to_logical_apicid(0);
@@ -356,7 +335,7 @@ summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
356 * 335 *
357 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. 336 * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID.
358 */ 337 */
359static inline int summit_phys_pkg_id(int cpuid_apic, int index_msb) 338static int summit_phys_pkg_id(int cpuid_apic, int index_msb)
360{ 339{
361 return hard_smp_processor_id() >> index_msb; 340 return hard_smp_processor_id() >> index_msb;
362} 341}
@@ -381,15 +360,15 @@ static void summit_vector_allocation_domain(int cpu, cpumask_t *retmask)
381} 360}
382 361
383#ifdef CONFIG_X86_SUMMIT_NUMA 362#ifdef CONFIG_X86_SUMMIT_NUMA
384static struct rio_table_hdr *rio_table_hdr __initdata; 363static struct rio_table_hdr *rio_table_hdr;
385static struct scal_detail *scal_devs[MAX_NUMNODES] __initdata; 364static struct scal_detail *scal_devs[MAX_NUMNODES];
386static struct rio_detail *rio_devs[MAX_NUMNODES*4] __initdata; 365static struct rio_detail *rio_devs[MAX_NUMNODES*4];
387 366
388#ifndef CONFIG_X86_NUMAQ 367#ifndef CONFIG_X86_NUMAQ
389static int mp_bus_id_to_node[MAX_MP_BUSSES] __initdata; 368static int mp_bus_id_to_node[MAX_MP_BUSSES];
390#endif 369#endif
391 370
392static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus) 371static int setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
393{ 372{
394 int twister = 0, node = 0; 373 int twister = 0, node = 0;
395 int i, bus, num_buses; 374 int i, bus, num_buses;
@@ -451,7 +430,7 @@ static int __init setup_pci_node_map_for_wpeg(int wpeg_num, int last_bus)
451 return bus; 430 return bus;
452} 431}
453 432
454static int __init build_detail_arrays(void) 433static int build_detail_arrays(void)
455{ 434{
456 unsigned long ptr; 435 unsigned long ptr;
457 int i, scal_detail_size, rio_detail_size; 436 int i, scal_detail_size, rio_detail_size;
@@ -485,7 +464,7 @@ static int __init build_detail_arrays(void)
485 return 1; 464 return 1;
486} 465}
487 466
488void __init setup_summit(void) 467void setup_summit(void)
489{ 468{
490 unsigned long ptr; 469 unsigned long ptr;
491 unsigned short offset; 470 unsigned short offset;
@@ -583,7 +562,6 @@ struct apic apic_summit = {
583 .send_IPI_all = summit_send_IPI_all, 562 .send_IPI_all = summit_send_IPI_all,
584 .send_IPI_self = default_send_IPI_self, 563 .send_IPI_self = default_send_IPI_self,
585 564
586 .wakeup_cpu = NULL,
587 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 565 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
588 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 566 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
589 567
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index 354b9c45601d..8fb87b6dd633 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -224,7 +224,6 @@ struct apic apic_x2apic_cluster = {
224 .send_IPI_all = x2apic_send_IPI_all, 224 .send_IPI_all = x2apic_send_IPI_all,
225 .send_IPI_self = x2apic_send_IPI_self, 225 .send_IPI_self = x2apic_send_IPI_self,
226 226
227 .wakeup_cpu = NULL,
228 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 227 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
229 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 228 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
230 .wait_for_init_deassert = NULL, 229 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 5bcb174409bc..23625b9f98b2 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -213,7 +213,6 @@ struct apic apic_x2apic_phys = {
213 .send_IPI_all = x2apic_send_IPI_all, 213 .send_IPI_all = x2apic_send_IPI_all,
214 .send_IPI_self = x2apic_send_IPI_self, 214 .send_IPI_self = x2apic_send_IPI_self,
215 215
216 .wakeup_cpu = NULL,
217 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 216 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
218 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 217 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
219 .wait_for_init_deassert = NULL, 218 .wait_for_init_deassert = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index 20b4ad07c3a1..1bd6da1f8fad 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -7,28 +7,28 @@
7 * 7 *
8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved. 8 * Copyright (C) 2007-2008 Silicon Graphics, Inc. All rights reserved.
9 */ 9 */
10
11#include <linux/kernel.h>
12#include <linux/threads.h>
13#include <linux/cpu.h>
14#include <linux/cpumask.h> 10#include <linux/cpumask.h>
11#include <linux/hardirq.h>
12#include <linux/proc_fs.h>
13#include <linux/threads.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
15#include <linux/string.h> 16#include <linux/string.h>
16#include <linux/ctype.h> 17#include <linux/ctype.h>
17#include <linux/init.h>
18#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/module.h>
20#include <linux/hardirq.h>
21#include <linux/timer.h> 19#include <linux/timer.h>
22#include <linux/proc_fs.h> 20#include <linux/cpu.h>
23#include <asm/current.h> 21#include <linux/init.h>
24#include <asm/smp.h> 22
25#include <asm/apic.h>
26#include <asm/ipi.h>
27#include <asm/pgtable.h>
28#include <asm/uv/uv.h>
29#include <asm/uv/uv_mmrs.h> 23#include <asm/uv/uv_mmrs.h>
30#include <asm/uv/uv_hub.h> 24#include <asm/uv/uv_hub.h>
25#include <asm/current.h>
26#include <asm/pgtable.h>
31#include <asm/uv/bios.h> 27#include <asm/uv/bios.h>
28#include <asm/uv/uv.h>
29#include <asm/apic.h>
30#include <asm/ipi.h>
31#include <asm/smp.h>
32 32
33DEFINE_PER_CPU(int, x2apic_extra_bits); 33DEFINE_PER_CPU(int, x2apic_extra_bits);
34 34
@@ -91,24 +91,28 @@ static void uv_vector_allocation_domain(int cpu, struct cpumask *retmask)
91 cpumask_set_cpu(cpu, retmask); 91 cpumask_set_cpu(cpu, retmask);
92} 92}
93 93
94int uv_wakeup_secondary(int phys_apicid, unsigned int start_rip) 94static int uv_wakeup_secondary(int phys_apicid, unsigned long start_rip)
95{ 95{
96#ifdef CONFIG_SMP
96 unsigned long val; 97 unsigned long val;
97 int pnode; 98 int pnode;
98 99
99 pnode = uv_apicid_to_pnode(phys_apicid); 100 pnode = uv_apicid_to_pnode(phys_apicid);
100 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 101 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
101 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 102 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
102 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 103 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
103 APIC_DM_INIT; 104 APIC_DM_INIT;
104 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 105 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
105 mdelay(10); 106 mdelay(10);
106 107
107 val = (1UL << UVH_IPI_INT_SEND_SHFT) | 108 val = (1UL << UVH_IPI_INT_SEND_SHFT) |
108 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) | 109 (phys_apicid << UVH_IPI_INT_APIC_ID_SHFT) |
109 (((long)start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) | 110 ((start_rip << UVH_IPI_INT_VECTOR_SHFT) >> 12) |
110 APIC_DM_STARTUP; 111 APIC_DM_STARTUP;
111 uv_write_global_mmr64(pnode, UVH_IPI_INT, val); 112 uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
113
114 atomic_set(&init_deasserted, 1);
115#endif
112 return 0; 116 return 0;
113} 117}
114 118
@@ -285,7 +289,7 @@ struct apic apic_x2apic_uv_x = {
285 .send_IPI_all = uv_send_IPI_all, 289 .send_IPI_all = uv_send_IPI_all,
286 .send_IPI_self = uv_send_IPI_self, 290 .send_IPI_self = uv_send_IPI_self,
287 291
288 .wakeup_cpu = NULL, 292 .wakeup_secondary_cpu = uv_wakeup_secondary,
289 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW, 293 .trampoline_phys_low = DEFAULT_TRAMPOLINE_PHYS_LOW,
290 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH, 294 .trampoline_phys_high = DEFAULT_TRAMPOLINE_PHYS_HIGH,
291 .wait_for_init_deassert = NULL, 295 .wait_for_init_deassert = NULL,
@@ -365,7 +369,7 @@ static __init void map_high(char *id, unsigned long base, int shift,
365 paddr = base << shift; 369 paddr = base << shift;
366 bytes = (1UL << shift) * (max_pnode + 1); 370 bytes = (1UL << shift) * (max_pnode + 1);
367 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr, 371 printk(KERN_INFO "UV: Map %s_HI 0x%lx - 0x%lx\n", id, paddr,
368 paddr + bytes); 372 paddr + bytes);
369 if (map_type == map_uc) 373 if (map_type == map_uc)
370 init_extra_mapping_uc(paddr, bytes); 374 init_extra_mapping_uc(paddr, bytes);
371 else 375 else
@@ -528,7 +532,7 @@ late_initcall(uv_init_heartbeat);
528 532
529/* 533/*
530 * Called on each cpu to initialize the per_cpu UV data area. 534 * Called on each cpu to initialize the per_cpu UV data area.
531 * ZZZ hotplug not supported yet 535 * FIXME: hotplug not supported yet
532 */ 536 */
533void __cpuinit uv_cpu_init(void) 537void __cpuinit uv_cpu_init(void)
534{ 538{
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 01b1244ef1c0..d67e0e48bc2d 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -7,11 +7,10 @@
7/* 7/*
8 * Get CPU information for use by the procfs. 8 * Get CPU information for use by the procfs.
9 */ 9 */
10#ifdef CONFIG_X86_32
11static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c, 10static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
12 unsigned int cpu) 11 unsigned int cpu)
13{ 12{
14#ifdef CONFIG_X86_HT 13#ifdef CONFIG_SMP
15 if (c->x86_max_cores * smp_num_siblings > 1) { 14 if (c->x86_max_cores * smp_num_siblings > 1) {
16 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 15 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
17 seq_printf(m, "siblings\t: %d\n", 16 seq_printf(m, "siblings\t: %d\n",
@@ -24,6 +23,7 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
24#endif 23#endif
25} 24}
26 25
26#ifdef CONFIG_X86_32
27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 27static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
28{ 28{
29 /* 29 /*
@@ -50,22 +50,6 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
50 c->wp_works_ok ? "yes" : "no"); 50 c->wp_works_ok ? "yes" : "no");
51} 51}
52#else 52#else
53static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
54 unsigned int cpu)
55{
56#ifdef CONFIG_SMP
57 if (c->x86_max_cores * smp_num_siblings > 1) {
58 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
59 seq_printf(m, "siblings\t: %d\n",
60 cpus_weight(per_cpu(cpu_core_map, cpu)));
61 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
62 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
63 seq_printf(m, "apicid\t\t: %d\n", c->apicid);
64 seq_printf(m, "initial apicid\t: %d\n", c->initial_apicid);
65 }
66#endif
67}
68
69static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c) 53static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
70{ 54{
71 seq_printf(m, 55 seq_printf(m,
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index e85826829cf2..508bec1cee27 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -858,6 +858,9 @@ void __init reserve_early_overlap_ok(u64 start, u64 end, char *name)
858 */ 858 */
859void __init reserve_early(u64 start, u64 end, char *name) 859void __init reserve_early(u64 start, u64 end, char *name)
860{ 860{
861 if (start >= end)
862 return;
863
861 drop_overlaps_that_are_ok(start, end); 864 drop_overlaps_that_are_ok(start, end);
862 __reserve_early(start, end, name, 0); 865 __reserve_early(start, end, name, 0);
863} 866}
diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
index e41980a373ab..99c4d308f16b 100644
--- a/arch/x86/kernel/ioport.c
+++ b/arch/x86/kernel/ioport.c
@@ -85,19 +85,8 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
85 85
86 t->io_bitmap_max = bytes; 86 t->io_bitmap_max = bytes;
87 87
88#ifdef CONFIG_X86_32
89 /*
90 * Sets the lazy trigger so that the next I/O operation will
91 * reload the correct bitmap.
92 * Reset the owner so that a process switch will not set
93 * tss->io_bitmap_base to IO_BITMAP_OFFSET.
94 */
95 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
96 tss->io_bitmap_owner = NULL;
97#else
98 /* Update the TSS: */ 88 /* Update the TSS: */
99 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated); 89 memcpy(tss->io_bitmap, t->io_bitmap_ptr, bytes_updated);
100#endif
101 90
102 put_cpu(); 91 put_cpu();
103 92
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 87b69d4fac16..6afa5232dbb7 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -1,8 +1,8 @@
1#include <linux/errno.h> 1#include <linux/errno.h>
2#include <linux/kernel.h> 2#include <linux/kernel.h>
3#include <linux/mm.h> 3#include <linux/mm.h>
4#include <asm/idle.h>
5#include <linux/smp.h> 4#include <linux/smp.h>
5#include <linux/prctl.h>
6#include <linux/slab.h> 6#include <linux/slab.h>
7#include <linux/sched.h> 7#include <linux/sched.h>
8#include <linux/module.h> 8#include <linux/module.h>
@@ -11,6 +11,9 @@
11#include <linux/ftrace.h> 11#include <linux/ftrace.h>
12#include <asm/system.h> 12#include <asm/system.h>
13#include <asm/apic.h> 13#include <asm/apic.h>
14#include <asm/idle.h>
15#include <asm/uaccess.h>
16#include <asm/i387.h>
14 17
15unsigned long idle_halt; 18unsigned long idle_halt;
16EXPORT_SYMBOL(idle_halt); 19EXPORT_SYMBOL(idle_halt);
@@ -56,6 +59,192 @@ void arch_task_cache_init(void)
56} 59}
57 60
58/* 61/*
62 * Free current thread data structures etc..
63 */
64void exit_thread(void)
65{
66 struct task_struct *me = current;
67 struct thread_struct *t = &me->thread;
68
69 if (me->thread.io_bitmap_ptr) {
70 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
71
72 kfree(t->io_bitmap_ptr);
73 t->io_bitmap_ptr = NULL;
74 clear_thread_flag(TIF_IO_BITMAP);
75 /*
76 * Careful, clear this in the TSS too:
77 */
78 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
79 t->io_bitmap_max = 0;
80 put_cpu();
81 }
82
83 ds_exit_thread(current);
84}
85
86void flush_thread(void)
87{
88 struct task_struct *tsk = current;
89
90#ifdef CONFIG_X86_64
91 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
92 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
93 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
94 clear_tsk_thread_flag(tsk, TIF_IA32);
95 } else {
96 set_tsk_thread_flag(tsk, TIF_IA32);
97 current_thread_info()->status |= TS_COMPAT;
98 }
99 }
100#endif
101
102 clear_tsk_thread_flag(tsk, TIF_DEBUG);
103
104 tsk->thread.debugreg0 = 0;
105 tsk->thread.debugreg1 = 0;
106 tsk->thread.debugreg2 = 0;
107 tsk->thread.debugreg3 = 0;
108 tsk->thread.debugreg6 = 0;
109 tsk->thread.debugreg7 = 0;
110 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
111 /*
112 * Forget coprocessor state..
113 */
114 tsk->fpu_counter = 0;
115 clear_fpu(tsk);
116 clear_used_math();
117}
118
119static void hard_disable_TSC(void)
120{
121 write_cr4(read_cr4() | X86_CR4_TSD);
122}
123
124void disable_TSC(void)
125{
126 preempt_disable();
127 if (!test_and_set_thread_flag(TIF_NOTSC))
128 /*
129 * Must flip the CPU state synchronously with
130 * TIF_NOTSC in the current running context.
131 */
132 hard_disable_TSC();
133 preempt_enable();
134}
135
136static void hard_enable_TSC(void)
137{
138 write_cr4(read_cr4() & ~X86_CR4_TSD);
139}
140
141static void enable_TSC(void)
142{
143 preempt_disable();
144 if (test_and_clear_thread_flag(TIF_NOTSC))
145 /*
146 * Must flip the CPU state synchronously with
147 * TIF_NOTSC in the current running context.
148 */
149 hard_enable_TSC();
150 preempt_enable();
151}
152
153int get_tsc_mode(unsigned long adr)
154{
155 unsigned int val;
156
157 if (test_thread_flag(TIF_NOTSC))
158 val = PR_TSC_SIGSEGV;
159 else
160 val = PR_TSC_ENABLE;
161
162 return put_user(val, (unsigned int __user *)adr);
163}
164
165int set_tsc_mode(unsigned int val)
166{
167 if (val == PR_TSC_SIGSEGV)
168 disable_TSC();
169 else if (val == PR_TSC_ENABLE)
170 enable_TSC();
171 else
172 return -EINVAL;
173
174 return 0;
175}
176
177void __switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
178 struct tss_struct *tss)
179{
180 struct thread_struct *prev, *next;
181
182 prev = &prev_p->thread;
183 next = &next_p->thread;
184
185 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
186 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
187 ds_switch_to(prev_p, next_p);
188 else if (next->debugctlmsr != prev->debugctlmsr)
189 update_debugctlmsr(next->debugctlmsr);
190
191 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
192 set_debugreg(next->debugreg0, 0);
193 set_debugreg(next->debugreg1, 1);
194 set_debugreg(next->debugreg2, 2);
195 set_debugreg(next->debugreg3, 3);
196 /* no 4 and 5 */
197 set_debugreg(next->debugreg6, 6);
198 set_debugreg(next->debugreg7, 7);
199 }
200
201 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
202 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
203 /* prev and next are different */
204 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
205 hard_disable_TSC();
206 else
207 hard_enable_TSC();
208 }
209
210 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
211 /*
212 * Copy the relevant range of the IO bitmap.
213 * Normally this is 128 bytes or less:
214 */
215 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
216 max(prev->io_bitmap_max, next->io_bitmap_max));
217 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
218 /*
219 * Clear any possible leftover bits:
220 */
221 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
222 }
223}
224
225int sys_fork(struct pt_regs *regs)
226{
227 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
228}
229
230/*
231 * This is trivial, and on the face of it looks like it
232 * could equally well be done in user mode.
233 *
234 * Not so, for quite unobvious reasons - register pressure.
235 * In user mode vfork() cannot have a stack frame, and if
236 * done by calling the "clone()" system call directly, you
237 * do not have enough call-clobbered registers to hold all
238 * the information you need.
239 */
240int sys_vfork(struct pt_regs *regs)
241{
242 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
243 NULL, NULL);
244}
245
246
247/*
59 * Idle related variables and functions 248 * Idle related variables and functions
60 */ 249 */
61unsigned long boot_option_idle_override = 0; 250unsigned long boot_option_idle_override = 0;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 646da41a620a..14014d766cad 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -230,55 +230,6 @@ int kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
230} 230}
231EXPORT_SYMBOL(kernel_thread); 231EXPORT_SYMBOL(kernel_thread);
232 232
233/*
234 * Free current thread data structures etc..
235 */
236void exit_thread(void)
237{
238 /* The process may have allocated an io port bitmap... nuke it. */
239 if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
240 struct task_struct *tsk = current;
241 struct thread_struct *t = &tsk->thread;
242 int cpu = get_cpu();
243 struct tss_struct *tss = &per_cpu(init_tss, cpu);
244
245 kfree(t->io_bitmap_ptr);
246 t->io_bitmap_ptr = NULL;
247 clear_thread_flag(TIF_IO_BITMAP);
248 /*
249 * Careful, clear this in the TSS too:
250 */
251 memset(tss->io_bitmap, 0xff, tss->io_bitmap_max);
252 t->io_bitmap_max = 0;
253 tss->io_bitmap_owner = NULL;
254 tss->io_bitmap_max = 0;
255 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
256 put_cpu();
257 }
258
259 ds_exit_thread(current);
260}
261
262void flush_thread(void)
263{
264 struct task_struct *tsk = current;
265
266 tsk->thread.debugreg0 = 0;
267 tsk->thread.debugreg1 = 0;
268 tsk->thread.debugreg2 = 0;
269 tsk->thread.debugreg3 = 0;
270 tsk->thread.debugreg6 = 0;
271 tsk->thread.debugreg7 = 0;
272 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
273 clear_tsk_thread_flag(tsk, TIF_DEBUG);
274 /*
275 * Forget coprocessor state..
276 */
277 tsk->fpu_counter = 0;
278 clear_fpu(tsk);
279 clear_used_math();
280}
281
282void release_thread(struct task_struct *dead_task) 233void release_thread(struct task_struct *dead_task)
283{ 234{
284 BUG_ON(dead_task->mm); 235 BUG_ON(dead_task->mm);
@@ -366,127 +317,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
366} 317}
367EXPORT_SYMBOL_GPL(start_thread); 318EXPORT_SYMBOL_GPL(start_thread);
368 319
369static void hard_disable_TSC(void)
370{
371 write_cr4(read_cr4() | X86_CR4_TSD);
372}
373
374void disable_TSC(void)
375{
376 preempt_disable();
377 if (!test_and_set_thread_flag(TIF_NOTSC))
378 /*
379 * Must flip the CPU state synchronously with
380 * TIF_NOTSC in the current running context.
381 */
382 hard_disable_TSC();
383 preempt_enable();
384}
385
386static void hard_enable_TSC(void)
387{
388 write_cr4(read_cr4() & ~X86_CR4_TSD);
389}
390
391static void enable_TSC(void)
392{
393 preempt_disable();
394 if (test_and_clear_thread_flag(TIF_NOTSC))
395 /*
396 * Must flip the CPU state synchronously with
397 * TIF_NOTSC in the current running context.
398 */
399 hard_enable_TSC();
400 preempt_enable();
401}
402
403int get_tsc_mode(unsigned long adr)
404{
405 unsigned int val;
406
407 if (test_thread_flag(TIF_NOTSC))
408 val = PR_TSC_SIGSEGV;
409 else
410 val = PR_TSC_ENABLE;
411
412 return put_user(val, (unsigned int __user *)adr);
413}
414
415int set_tsc_mode(unsigned int val)
416{
417 if (val == PR_TSC_SIGSEGV)
418 disable_TSC();
419 else if (val == PR_TSC_ENABLE)
420 enable_TSC();
421 else
422 return -EINVAL;
423
424 return 0;
425}
426
427static noinline void
428__switch_to_xtra(struct task_struct *prev_p, struct task_struct *next_p,
429 struct tss_struct *tss)
430{
431 struct thread_struct *prev, *next;
432
433 prev = &prev_p->thread;
434 next = &next_p->thread;
435
436 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
437 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
438 ds_switch_to(prev_p, next_p);
439 else if (next->debugctlmsr != prev->debugctlmsr)
440 update_debugctlmsr(next->debugctlmsr);
441
442 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
443 set_debugreg(next->debugreg0, 0);
444 set_debugreg(next->debugreg1, 1);
445 set_debugreg(next->debugreg2, 2);
446 set_debugreg(next->debugreg3, 3);
447 /* no 4 and 5 */
448 set_debugreg(next->debugreg6, 6);
449 set_debugreg(next->debugreg7, 7);
450 }
451
452 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
453 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
454 /* prev and next are different */
455 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
456 hard_disable_TSC();
457 else
458 hard_enable_TSC();
459 }
460
461 if (!test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
462 /*
463 * Disable the bitmap via an invalid offset. We still cache
464 * the previous bitmap owner and the IO bitmap contents:
465 */
466 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET;
467 return;
468 }
469
470 if (likely(next == tss->io_bitmap_owner)) {
471 /*
472 * Previous owner of the bitmap (hence the bitmap content)
473 * matches the next task, we dont have to do anything but
474 * to set a valid offset in the TSS:
475 */
476 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
477 return;
478 }
479 /*
480 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
481 * and we let the task to get a GPF in case an I/O instruction
482 * is performed. The handler of the GPF will verify that the
483 * faulting task has a valid I/O bitmap and, it true, does the
484 * real copy and restart the instruction. This will save us
485 * redundant copies when the currently switched task does not
486 * perform any I/O during its timeslice.
487 */
488 tss->x86_tss.io_bitmap_base = INVALID_IO_BITMAP_OFFSET_LAZY;
489}
490 320
491/* 321/*
492 * switch_to(x,yn) should switch tasks from x to y. 322 * switch_to(x,yn) should switch tasks from x to y.
@@ -600,11 +430,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
600 return prev_p; 430 return prev_p;
601} 431}
602 432
603int sys_fork(struct pt_regs *regs)
604{
605 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
606}
607
608int sys_clone(struct pt_regs *regs) 433int sys_clone(struct pt_regs *regs)
609{ 434{
610 unsigned long clone_flags; 435 unsigned long clone_flags;
@@ -621,21 +446,6 @@ int sys_clone(struct pt_regs *regs)
621} 446}
622 447
623/* 448/*
624 * This is trivial, and on the face of it looks like it
625 * could equally well be done in user mode.
626 *
627 * Not so, for quite unobvious reasons - register pressure.
628 * In user mode vfork() cannot have a stack frame, and if
629 * done by calling the "clone()" system call directly, you
630 * do not have enough call-clobbered registers to hold all
631 * the information you need.
632 */
633int sys_vfork(struct pt_regs *regs)
634{
635 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0, NULL, NULL);
636}
637
638/*
639 * sys_execve() executes a new program. 449 * sys_execve() executes a new program.
640 */ 450 */
641int sys_execve(struct pt_regs *regs) 451int sys_execve(struct pt_regs *regs)
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 836ef6575f01..abb7e6a7f0c6 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -237,61 +237,6 @@ void show_regs(struct pt_regs *regs)
237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp); 237 show_trace(NULL, regs, (void *)(regs + 1), regs->bp);
238} 238}
239 239
240/*
241 * Free current thread data structures etc..
242 */
243void exit_thread(void)
244{
245 struct task_struct *me = current;
246 struct thread_struct *t = &me->thread;
247
248 if (me->thread.io_bitmap_ptr) {
249 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
250
251 kfree(t->io_bitmap_ptr);
252 t->io_bitmap_ptr = NULL;
253 clear_thread_flag(TIF_IO_BITMAP);
254 /*
255 * Careful, clear this in the TSS too:
256 */
257 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
258 t->io_bitmap_max = 0;
259 put_cpu();
260 }
261
262 ds_exit_thread(current);
263}
264
265void flush_thread(void)
266{
267 struct task_struct *tsk = current;
268
269 if (test_tsk_thread_flag(tsk, TIF_ABI_PENDING)) {
270 clear_tsk_thread_flag(tsk, TIF_ABI_PENDING);
271 if (test_tsk_thread_flag(tsk, TIF_IA32)) {
272 clear_tsk_thread_flag(tsk, TIF_IA32);
273 } else {
274 set_tsk_thread_flag(tsk, TIF_IA32);
275 current_thread_info()->status |= TS_COMPAT;
276 }
277 }
278 clear_tsk_thread_flag(tsk, TIF_DEBUG);
279
280 tsk->thread.debugreg0 = 0;
281 tsk->thread.debugreg1 = 0;
282 tsk->thread.debugreg2 = 0;
283 tsk->thread.debugreg3 = 0;
284 tsk->thread.debugreg6 = 0;
285 tsk->thread.debugreg7 = 0;
286 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
287 /*
288 * Forget coprocessor state..
289 */
290 tsk->fpu_counter = 0;
291 clear_fpu(tsk);
292 clear_used_math();
293}
294
295void release_thread(struct task_struct *dead_task) 240void release_thread(struct task_struct *dead_task)
296{ 241{
297 if (dead_task->mm) { 242 if (dead_task->mm) {
@@ -425,118 +370,6 @@ start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp)
425} 370}
426EXPORT_SYMBOL_GPL(start_thread); 371EXPORT_SYMBOL_GPL(start_thread);
427 372
428static void hard_disable_TSC(void)
429{
430 write_cr4(read_cr4() | X86_CR4_TSD);
431}
432
433void disable_TSC(void)
434{
435 preempt_disable();
436 if (!test_and_set_thread_flag(TIF_NOTSC))
437 /*
438 * Must flip the CPU state synchronously with
439 * TIF_NOTSC in the current running context.
440 */
441 hard_disable_TSC();
442 preempt_enable();
443}
444
445static void hard_enable_TSC(void)
446{
447 write_cr4(read_cr4() & ~X86_CR4_TSD);
448}
449
450static void enable_TSC(void)
451{
452 preempt_disable();
453 if (test_and_clear_thread_flag(TIF_NOTSC))
454 /*
455 * Must flip the CPU state synchronously with
456 * TIF_NOTSC in the current running context.
457 */
458 hard_enable_TSC();
459 preempt_enable();
460}
461
462int get_tsc_mode(unsigned long adr)
463{
464 unsigned int val;
465
466 if (test_thread_flag(TIF_NOTSC))
467 val = PR_TSC_SIGSEGV;
468 else
469 val = PR_TSC_ENABLE;
470
471 return put_user(val, (unsigned int __user *)adr);
472}
473
474int set_tsc_mode(unsigned int val)
475{
476 if (val == PR_TSC_SIGSEGV)
477 disable_TSC();
478 else if (val == PR_TSC_ENABLE)
479 enable_TSC();
480 else
481 return -EINVAL;
482
483 return 0;
484}
485
486/*
487 * This special macro can be used to load a debugging register
488 */
489#define loaddebug(thread, r) set_debugreg(thread->debugreg ## r, r)
490
491static inline void __switch_to_xtra(struct task_struct *prev_p,
492 struct task_struct *next_p,
493 struct tss_struct *tss)
494{
495 struct thread_struct *prev, *next;
496
497 prev = &prev_p->thread,
498 next = &next_p->thread;
499
500 if (test_tsk_thread_flag(next_p, TIF_DS_AREA_MSR) ||
501 test_tsk_thread_flag(prev_p, TIF_DS_AREA_MSR))
502 ds_switch_to(prev_p, next_p);
503 else if (next->debugctlmsr != prev->debugctlmsr)
504 update_debugctlmsr(next->debugctlmsr);
505
506 if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
507 loaddebug(next, 0);
508 loaddebug(next, 1);
509 loaddebug(next, 2);
510 loaddebug(next, 3);
511 /* no 4 and 5 */
512 loaddebug(next, 6);
513 loaddebug(next, 7);
514 }
515
516 if (test_tsk_thread_flag(prev_p, TIF_NOTSC) ^
517 test_tsk_thread_flag(next_p, TIF_NOTSC)) {
518 /* prev and next are different */
519 if (test_tsk_thread_flag(next_p, TIF_NOTSC))
520 hard_disable_TSC();
521 else
522 hard_enable_TSC();
523 }
524
525 if (test_tsk_thread_flag(next_p, TIF_IO_BITMAP)) {
526 /*
527 * Copy the relevant range of the IO bitmap.
528 * Normally this is 128 bytes or less:
529 */
530 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
531 max(prev->io_bitmap_max, next->io_bitmap_max));
532 } else if (test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)) {
533 /*
534 * Clear any possible leftover bits:
535 */
536 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
537 }
538}
539
540/* 373/*
541 * switch_to(x,y) should switch tasks from x to y. 374 * switch_to(x,y) should switch tasks from x to y.
542 * 375 *
@@ -694,11 +527,6 @@ void set_personality_64bit(void)
694 current->personality &= ~READ_IMPLIES_EXEC; 527 current->personality &= ~READ_IMPLIES_EXEC;
695} 528}
696 529
697asmlinkage long sys_fork(struct pt_regs *regs)
698{
699 return do_fork(SIGCHLD, regs->sp, regs, 0, NULL, NULL);
700}
701
702asmlinkage long 530asmlinkage long
703sys_clone(unsigned long clone_flags, unsigned long newsp, 531sys_clone(unsigned long clone_flags, unsigned long newsp,
704 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs) 532 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
@@ -708,22 +536,6 @@ sys_clone(unsigned long clone_flags, unsigned long newsp,
708 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid); 536 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
709} 537}
710 538
711/*
712 * This is trivial, and on the face of it looks like it
713 * could equally well be done in user mode.
714 *
715 * Not so, for quite unobvious reasons - register pressure.
716 * In user mode vfork() cannot have a stack frame, and if
717 * done by calling the "clone()" system call directly, you
718 * do not have enough call-clobbered registers to hold all
719 * the information you need.
720 */
721asmlinkage long sys_vfork(struct pt_regs *regs)
722{
723 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->sp, regs, 0,
724 NULL, NULL);
725}
726
727unsigned long get_wchan(struct task_struct *p) 539unsigned long get_wchan(struct task_struct *p)
728{ 540{
729 unsigned long stack; 541 unsigned long stack;
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index fb2159a5c817..3d9672e59c16 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1383,7 +1383,7 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
1383#ifdef CONFIG_X86_32 1383#ifdef CONFIG_X86_32
1384# define IS_IA32 1 1384# define IS_IA32 1
1385#elif defined CONFIG_IA32_EMULATION 1385#elif defined CONFIG_IA32_EMULATION
1386# define IS_IA32 test_thread_flag(TIF_IA32) 1386# define IS_IA32 is_compat_task()
1387#else 1387#else
1388# define IS_IA32 0 1388# define IS_IA32 0
1389#endif 1389#endif
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5b85759e7972..4c54bc0d8ff3 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -600,19 +600,7 @@ static int __init setup_elfcorehdr(char *arg)
600early_param("elfcorehdr", setup_elfcorehdr); 600early_param("elfcorehdr", setup_elfcorehdr);
601#endif 601#endif
602 602
603static int __init default_update_apic(void) 603static struct x86_quirks default_x86_quirks __initdata;
604{
605#ifdef CONFIG_SMP
606 if (!apic->wakeup_cpu)
607 apic->wakeup_cpu = wakeup_secondary_cpu_via_init;
608#endif
609
610 return 0;
611}
612
613static struct x86_quirks default_x86_quirks __initdata = {
614 .update_apic = default_update_apic,
615};
616 604
617struct x86_quirks *x86_quirks __initdata = &default_x86_quirks; 605struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
618 606
@@ -875,9 +863,7 @@ void __init setup_arch(char **cmdline_p)
875 863
876 reserve_initrd(); 864 reserve_initrd();
877 865
878#ifdef CONFIG_X86_64
879 vsmp_init(); 866 vsmp_init();
880#endif
881 867
882 io_delay_init(); 868 io_delay_init();
883 869
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 7cdcd16885ed..d2cc6428c587 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -187,40 +187,35 @@ setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
187/* 187/*
188 * Set up a signal frame. 188 * Set up a signal frame.
189 */ 189 */
190#ifdef CONFIG_X86_32
191static const struct {
192 u16 poplmovl;
193 u32 val;
194 u16 int80;
195} __attribute__((packed)) retcode = {
196 0xb858, /* popl %eax; movl $..., %eax */
197 __NR_sigreturn,
198 0x80cd, /* int $0x80 */
199};
200
201static const struct {
202 u8 movl;
203 u32 val;
204 u16 int80;
205 u8 pad;
206} __attribute__((packed)) rt_retcode = {
207 0xb8, /* movl $..., %eax */
208 __NR_rt_sigreturn,
209 0x80cd, /* int $0x80 */
210 0
211};
212 190
213/* 191/*
214 * Determine which stack to use.. 192 * Determine which stack to use..
215 */ 193 */
194static unsigned long align_sigframe(unsigned long sp)
195{
196#ifdef CONFIG_X86_32
197 /*
198 * Align the stack pointer according to the i386 ABI,
199 * i.e. so that on function entry ((sp + 4) & 15) == 0.
200 */
201 sp = ((sp + 4) & -16ul) - 4;
202#else /* !CONFIG_X86_32 */
203 sp = round_down(sp, 16) - 8;
204#endif
205 return sp;
206}
207
216static inline void __user * 208static inline void __user *
217get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size, 209get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
218 void **fpstate) 210 void __user **fpstate)
219{ 211{
220 unsigned long sp;
221
222 /* Default to using normal stack */ 212 /* Default to using normal stack */
223 sp = regs->sp; 213 unsigned long sp = regs->sp;
214
215#ifdef CONFIG_X86_64
216 /* redzone */
217 sp -= 128;
218#endif /* CONFIG_X86_64 */
224 219
225 /* 220 /*
226 * If we are on the alternate signal stack and would overflow it, don't. 221 * If we are on the alternate signal stack and would overflow it, don't.
@@ -234,30 +229,52 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
234 if (sas_ss_flags(sp) == 0) 229 if (sas_ss_flags(sp) == 0)
235 sp = current->sas_ss_sp + current->sas_ss_size; 230 sp = current->sas_ss_sp + current->sas_ss_size;
236 } else { 231 } else {
232#ifdef CONFIG_X86_32
237 /* This is the legacy signal stack switching. */ 233 /* This is the legacy signal stack switching. */
238 if ((regs->ss & 0xffff) != __USER_DS && 234 if ((regs->ss & 0xffff) != __USER_DS &&
239 !(ka->sa.sa_flags & SA_RESTORER) && 235 !(ka->sa.sa_flags & SA_RESTORER) &&
240 ka->sa.sa_restorer) 236 ka->sa.sa_restorer)
241 sp = (unsigned long) ka->sa.sa_restorer; 237 sp = (unsigned long) ka->sa.sa_restorer;
238#endif /* CONFIG_X86_32 */
242 } 239 }
243 240
244 if (used_math()) { 241 if (used_math()) {
245 sp = sp - sig_xstate_size; 242 sp -= sig_xstate_size;
246 *fpstate = (struct _fpstate *) sp; 243#ifdef CONFIG_X86_64
244 sp = round_down(sp, 64);
245#endif /* CONFIG_X86_64 */
246 *fpstate = (void __user *)sp;
247
247 if (save_i387_xstate(*fpstate) < 0) 248 if (save_i387_xstate(*fpstate) < 0)
248 return (void __user *)-1L; 249 return (void __user *)-1L;
249 } 250 }
250 251
251 sp -= frame_size; 252 return (void __user *)align_sigframe(sp - frame_size);
252 /*
253 * Align the stack pointer according to the i386 ABI,
254 * i.e. so that on function entry ((sp + 4) & 15) == 0.
255 */
256 sp = ((sp + 4) & -16ul) - 4;
257
258 return (void __user *) sp;
259} 253}
260 254
255#ifdef CONFIG_X86_32
256static const struct {
257 u16 poplmovl;
258 u32 val;
259 u16 int80;
260} __attribute__((packed)) retcode = {
261 0xb858, /* popl %eax; movl $..., %eax */
262 __NR_sigreturn,
263 0x80cd, /* int $0x80 */
264};
265
266static const struct {
267 u8 movl;
268 u32 val;
269 u16 int80;
270 u8 pad;
271} __attribute__((packed)) rt_retcode = {
272 0xb8, /* movl $..., %eax */
273 __NR_rt_sigreturn,
274 0x80cd, /* int $0x80 */
275 0
276};
277
261static int 278static int
262__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set, 279__setup_frame(int sig, struct k_sigaction *ka, sigset_t *set,
263 struct pt_regs *regs) 280 struct pt_regs *regs)
@@ -388,24 +405,6 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
388 return 0; 405 return 0;
389} 406}
390#else /* !CONFIG_X86_32 */ 407#else /* !CONFIG_X86_32 */
391/*
392 * Determine which stack to use..
393 */
394static void __user *
395get_stack(struct k_sigaction *ka, unsigned long sp, unsigned long size)
396{
397 /* Default to using normal stack - redzone*/
398 sp -= 128;
399
400 /* This is the X/Open sanctioned signal stack switching. */
401 if (ka->sa.sa_flags & SA_ONSTACK) {
402 if (sas_ss_flags(sp) == 0)
403 sp = current->sas_ss_sp + current->sas_ss_size;
404 }
405
406 return (void __user *)round_down(sp - size, 64);
407}
408
409static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, 408static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
410 sigset_t *set, struct pt_regs *regs) 409 sigset_t *set, struct pt_regs *regs)
411{ 410{
@@ -414,15 +413,7 @@ static int __setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
414 int err = 0; 413 int err = 0;
415 struct task_struct *me = current; 414 struct task_struct *me = current;
416 415
417 if (used_math()) { 416 frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe), &fp);
418 fp = get_stack(ka, regs->sp, sig_xstate_size);
419 frame = (void __user *)round_down(
420 (unsigned long)fp - sizeof(struct rt_sigframe), 16) - 8;
421
422 if (save_i387_xstate(fp) < 0)
423 return -EFAULT;
424 } else
425 frame = get_stack(ka, regs->sp, sizeof(struct rt_sigframe)) - 8;
426 417
427 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) 418 if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
428 return -EFAULT; 419 return -EFAULT;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 9ce666387f37..249334f5080a 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -112,7 +112,7 @@ EXPORT_PER_CPU_SYMBOL(cpu_core_map);
112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 112DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
113EXPORT_PER_CPU_SYMBOL(cpu_info); 113EXPORT_PER_CPU_SYMBOL(cpu_info);
114 114
115static atomic_t init_deasserted; 115atomic_t init_deasserted;
116 116
117 117
118/* Set if we find a B stepping CPU */ 118/* Set if we find a B stepping CPU */
@@ -614,12 +614,6 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
614 unsigned long send_status, accept_status = 0; 614 unsigned long send_status, accept_status = 0;
615 int maxlvt, num_starts, j; 615 int maxlvt, num_starts, j;
616 616
617 if (get_uv_system_type() == UV_NON_UNIQUE_APIC) {
618 send_status = uv_wakeup_secondary(phys_apicid, start_eip);
619 atomic_set(&init_deasserted, 1);
620 return send_status;
621 }
622
623 maxlvt = lapic_get_maxlvt(); 617 maxlvt = lapic_get_maxlvt();
624 618
625 /* 619 /*
@@ -748,7 +742,8 @@ static void __cpuinit do_fork_idle(struct work_struct *work)
748/* 742/*
749 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 743 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
750 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 744 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
751 * Returns zero if CPU booted OK, else error code from ->wakeup_cpu. 745 * Returns zero if CPU booted OK, else error code from
746 * ->wakeup_secondary_cpu.
752 */ 747 */
753static int __cpuinit do_boot_cpu(int apicid, int cpu) 748static int __cpuinit do_boot_cpu(int apicid, int cpu)
754{ 749{
@@ -835,9 +830,13 @@ do_rest:
835 } 830 }
836 831
837 /* 832 /*
838 * Starting actual IPI sequence... 833 * Kick the secondary CPU. Use the method in the APIC driver
834 * if it's defined - or use an INIT boot APIC message otherwise:
839 */ 835 */
840 boot_error = apic->wakeup_cpu(apicid, start_ip); 836 if (apic->wakeup_secondary_cpu)
837 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
838 else
839 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
841 840
842 if (!boot_error) { 841 if (!boot_error) {
843 /* 842 /*
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index c05430ac1b44..a1d288327ff0 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -118,47 +118,6 @@ die_if_kernel(const char *str, struct pt_regs *regs, long err)
118 if (!user_mode_vm(regs)) 118 if (!user_mode_vm(regs))
119 die(str, regs, err); 119 die(str, regs, err);
120} 120}
121
122/*
123 * Perform the lazy TSS's I/O bitmap copy. If the TSS has an
124 * invalid offset set (the LAZY one) and the faulting thread has
125 * a valid I/O bitmap pointer, we copy the I/O bitmap in the TSS,
126 * we set the offset field correctly and return 1.
127 */
128static int lazy_iobitmap_copy(void)
129{
130 struct thread_struct *thread;
131 struct tss_struct *tss;
132 int cpu;
133
134 cpu = get_cpu();
135 tss = &per_cpu(init_tss, cpu);
136 thread = &current->thread;
137
138 if (tss->x86_tss.io_bitmap_base == INVALID_IO_BITMAP_OFFSET_LAZY &&
139 thread->io_bitmap_ptr) {
140 memcpy(tss->io_bitmap, thread->io_bitmap_ptr,
141 thread->io_bitmap_max);
142 /*
143 * If the previously set map was extending to higher ports
144 * than the current one, pad extra space with 0xff (no access).
145 */
146 if (thread->io_bitmap_max < tss->io_bitmap_max) {
147 memset((char *) tss->io_bitmap +
148 thread->io_bitmap_max, 0xff,
149 tss->io_bitmap_max - thread->io_bitmap_max);
150 }
151 tss->io_bitmap_max = thread->io_bitmap_max;
152 tss->x86_tss.io_bitmap_base = IO_BITMAP_OFFSET;
153 tss->io_bitmap_owner = thread;
154 put_cpu();
155
156 return 1;
157 }
158 put_cpu();
159
160 return 0;
161}
162#endif 121#endif
163 122
164static void __kprobes 123static void __kprobes
@@ -309,11 +268,6 @@ do_general_protection(struct pt_regs *regs, long error_code)
309 conditional_sti(regs); 268 conditional_sti(regs);
310 269
311#ifdef CONFIG_X86_32 270#ifdef CONFIG_X86_32
312 if (lazy_iobitmap_copy()) {
313 /* restart the faulting instruction */
314 return;
315 }
316
317 if (regs->flags & X86_VM_MASK) 271 if (regs->flags & X86_VM_MASK)
318 goto gp_in_vm86; 272 goto gp_in_vm86;
319#endif 273#endif
diff --git a/arch/x86/kernel/vsmp_64.c b/arch/x86/kernel/vsmp_64.c
index c609205df594..74de562812cc 100644
--- a/arch/x86/kernel/vsmp_64.c
+++ b/arch/x86/kernel/vsmp_64.c
@@ -22,7 +22,7 @@
22#include <asm/paravirt.h> 22#include <asm/paravirt.h>
23#include <asm/setup.h> 23#include <asm/setup.h>
24 24
25#if defined CONFIG_PCI && defined CONFIG_PARAVIRT 25#ifdef CONFIG_PARAVIRT
26/* 26/*
27 * Interrupt control on vSMPowered systems: 27 * Interrupt control on vSMPowered systems:
28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off' 28 * ~AC is a shadow of IF. If IF is 'on' AC should be 'off'
@@ -114,7 +114,6 @@ static void __init set_vsmp_pv_ops(void)
114} 114}
115#endif 115#endif
116 116
117#ifdef CONFIG_PCI
118static int is_vsmp = -1; 117static int is_vsmp = -1;
119 118
120static void __init detect_vsmp_box(void) 119static void __init detect_vsmp_box(void)
@@ -139,15 +138,6 @@ int is_vsmp_box(void)
139 return 0; 138 return 0;
140 } 139 }
141} 140}
142#else
143static void __init detect_vsmp_box(void)
144{
145}
146int is_vsmp_box(void)
147{
148 return 0;
149}
150#endif
151 141
152void __init vsmp_init(void) 142void __init vsmp_init(void)
153{ 143{
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile
index 2b938a384910..08537747cb58 100644
--- a/arch/x86/mm/Makefile
+++ b/arch/x86/mm/Makefile
@@ -1,4 +1,4 @@
1obj-y := init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \ 1obj-y := init.o init_$(BITS).o fault.o ioremap.o extable.o pageattr.o mmap.o \
2 pat.o pgtable.o gup.o 2 pat.o pgtable.o gup.o
3 3
4obj-$(CONFIG_SMP) += tlb.o 4obj-$(CONFIG_SMP) += tlb.o
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index bcc079c282dd..00f127c80b0e 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -1,5 +1,6 @@
1#include <linux/highmem.h> 1#include <linux/highmem.h>
2#include <linux/module.h> 2#include <linux/module.h>
3#include <linux/swap.h> /* for totalram_pages */
3 4
4void *kmap(struct page *page) 5void *kmap(struct page *page)
5{ 6{
@@ -156,3 +157,36 @@ EXPORT_SYMBOL(kmap);
156EXPORT_SYMBOL(kunmap); 157EXPORT_SYMBOL(kunmap);
157EXPORT_SYMBOL(kmap_atomic); 158EXPORT_SYMBOL(kmap_atomic);
158EXPORT_SYMBOL(kunmap_atomic); 159EXPORT_SYMBOL(kunmap_atomic);
160
161#ifdef CONFIG_NUMA
162void __init set_highmem_pages_init(void)
163{
164 struct zone *zone;
165 int nid;
166
167 for_each_zone(zone) {
168 unsigned long zone_start_pfn, zone_end_pfn;
169
170 if (!is_highmem(zone))
171 continue;
172
173 zone_start_pfn = zone->zone_start_pfn;
174 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
175
176 nid = zone_to_nid(zone);
177 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
178 zone->name, nid, zone_start_pfn, zone_end_pfn);
179
180 add_highpages_with_active_regions(nid, zone_start_pfn,
181 zone_end_pfn);
182 }
183 totalram_pages += totalhigh_pages;
184}
185#else
186void __init set_highmem_pages_init(void)
187{
188 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
189
190 totalram_pages += totalhigh_pages;
191}
192#endif /* CONFIG_NUMA */
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
new file mode 100644
index 000000000000..ce6a722587d8
--- /dev/null
+++ b/arch/x86/mm/init.c
@@ -0,0 +1,49 @@
1#include <linux/swap.h>
2#include <asm/cacheflush.h>
3#include <asm/page.h>
4#include <asm/sections.h>
5#include <asm/system.h>
6
7void free_init_pages(char *what, unsigned long begin, unsigned long end)
8{
9 unsigned long addr = begin;
10
11 if (addr >= end)
12 return;
13
14 /*
15 * If debugging page accesses then do not free this memory but
16 * mark them not present - any buggy init-section access will
17 * create a kernel page fault:
18 */
19#ifdef CONFIG_DEBUG_PAGEALLOC
20 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
21 begin, PAGE_ALIGN(end));
22 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
23#else
24 /*
25 * We just marked the kernel text read only above, now that
26 * we are going to free part of that, we need to make that
27 * writeable first.
28 */
29 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
30
31 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
32
33 for (; addr < end; addr += PAGE_SIZE) {
34 ClearPageReserved(virt_to_page(addr));
35 init_page_count(virt_to_page(addr));
36 memset((void *)(addr & ~(PAGE_SIZE-1)),
37 POISON_FREE_INITMEM, PAGE_SIZE);
38 free_page(addr);
39 totalram_pages++;
40 }
41#endif
42}
43
44void free_initmem(void)
45{
46 free_init_pages("unused kernel memory",
47 (unsigned long)(&__init_begin),
48 (unsigned long)(&__init_end));
49}
diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
index ef0bb941cdf5..47df0e1bbeb9 100644
--- a/arch/x86/mm/init_32.c
+++ b/arch/x86/mm/init_32.c
@@ -50,8 +50,6 @@
50#include <asm/setup.h> 50#include <asm/setup.h>
51#include <asm/cacheflush.h> 51#include <asm/cacheflush.h>
52 52
53unsigned int __VMALLOC_RESERVE = 128 << 20;
54
55unsigned long max_low_pfn_mapped; 53unsigned long max_low_pfn_mapped;
56unsigned long max_pfn_mapped; 54unsigned long max_pfn_mapped;
57 55
@@ -486,22 +484,10 @@ void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn,
486 work_with_active_regions(nid, add_highpages_work_fn, &data); 484 work_with_active_regions(nid, add_highpages_work_fn, &data);
487} 485}
488 486
489#ifndef CONFIG_NUMA
490static void __init set_highmem_pages_init(void)
491{
492 add_highpages_with_active_regions(0, highstart_pfn, highend_pfn);
493
494 totalram_pages += totalhigh_pages;
495}
496#endif /* !CONFIG_NUMA */
497
498#else 487#else
499static inline void permanent_kmaps_init(pgd_t *pgd_base) 488static inline void permanent_kmaps_init(pgd_t *pgd_base)
500{ 489{
501} 490}
502static inline void set_highmem_pages_init(void)
503{
504}
505#endif /* CONFIG_HIGHMEM */ 491#endif /* CONFIG_HIGHMEM */
506 492
507void __init native_pagetable_setup_start(pgd_t *base) 493void __init native_pagetable_setup_start(pgd_t *base)
@@ -864,10 +850,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
864 unsigned long puds, pmds, ptes, tables, start; 850 unsigned long puds, pmds, ptes, tables, start;
865 851
866 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; 852 puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
867 tables = PAGE_ALIGN(puds * sizeof(pud_t)); 853 tables = roundup(puds * sizeof(pud_t), PAGE_SIZE);
868 854
869 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; 855 pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
870 tables += PAGE_ALIGN(pmds * sizeof(pmd_t)); 856 tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE);
871 857
872 if (use_pse) { 858 if (use_pse) {
873 unsigned long extra; 859 unsigned long extra;
@@ -878,10 +864,10 @@ static void __init find_early_table_space(unsigned long end, int use_pse)
878 } else 864 } else
879 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; 865 ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
880 866
881 tables += PAGE_ALIGN(ptes * sizeof(pte_t)); 867 tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE);
882 868
883 /* for fixmap */ 869 /* for fixmap */
884 tables += PAGE_ALIGN(__end_of_fixed_addresses * sizeof(pte_t)); 870 tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);
885 871
886 /* 872 /*
887 * RED-PEN putting page tables only on node 0 could 873 * RED-PEN putting page tables only on node 0 could
@@ -1231,45 +1217,6 @@ void mark_rodata_ro(void)
1231} 1217}
1232#endif 1218#endif
1233 1219
1234void free_init_pages(char *what, unsigned long begin, unsigned long end)
1235{
1236#ifdef CONFIG_DEBUG_PAGEALLOC
1237 /*
1238 * If debugging page accesses then do not free this memory but
1239 * mark them not present - any buggy init-section access will
1240 * create a kernel page fault:
1241 */
1242 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
1243 begin, PAGE_ALIGN(end));
1244 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
1245#else
1246 unsigned long addr;
1247
1248 /*
1249 * We just marked the kernel text read only above, now that
1250 * we are going to free part of that, we need to make that
1251 * writeable first.
1252 */
1253 set_memory_rw(begin, (end - begin) >> PAGE_SHIFT);
1254
1255 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1256 ClearPageReserved(virt_to_page(addr));
1257 init_page_count(virt_to_page(addr));
1258 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1259 free_page(addr);
1260 totalram_pages++;
1261 }
1262 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1263#endif
1264}
1265
1266void free_initmem(void)
1267{
1268 free_init_pages("unused kernel memory",
1269 (unsigned long)(&__init_begin),
1270 (unsigned long)(&__init_end));
1271}
1272
1273#ifdef CONFIG_BLK_DEV_INITRD 1220#ifdef CONFIG_BLK_DEV_INITRD
1274void free_initrd_mem(unsigned long start, unsigned long end) 1221void free_initrd_mem(unsigned long start, unsigned long end)
1275{ 1222{
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 7d4e76da3368..11981fc8570a 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -748,6 +748,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
748 pos = start_pfn << PAGE_SHIFT; 748 pos = start_pfn << PAGE_SHIFT;
749 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT) 749 end_pfn = ((pos + (PMD_SIZE - 1)) >> PMD_SHIFT)
750 << (PMD_SHIFT - PAGE_SHIFT); 750 << (PMD_SHIFT - PAGE_SHIFT);
751 if (end_pfn > (end >> PAGE_SHIFT))
752 end_pfn = end >> PAGE_SHIFT;
751 if (start_pfn < end_pfn) { 753 if (start_pfn < end_pfn) {
752 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); 754 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0);
753 pos = end_pfn << PAGE_SHIFT; 755 pos = end_pfn << PAGE_SHIFT;
@@ -979,43 +981,6 @@ void __init mem_init(void)
979 initsize >> 10); 981 initsize >> 10);
980} 982}
981 983
982void free_init_pages(char *what, unsigned long begin, unsigned long end)
983{
984 unsigned long addr = begin;
985
986 if (addr >= end)
987 return;
988
989 /*
990 * If debugging page accesses then do not free this memory but
991 * mark them not present - any buggy init-section access will
992 * create a kernel page fault:
993 */
994#ifdef CONFIG_DEBUG_PAGEALLOC
995 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
996 begin, PAGE_ALIGN(end));
997 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
998#else
999 printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
1000
1001 for (; addr < end; addr += PAGE_SIZE) {
1002 ClearPageReserved(virt_to_page(addr));
1003 init_page_count(virt_to_page(addr));
1004 memset((void *)(addr & ~(PAGE_SIZE-1)),
1005 POISON_FREE_INITMEM, PAGE_SIZE);
1006 free_page(addr);
1007 totalram_pages++;
1008 }
1009#endif
1010}
1011
1012void free_initmem(void)
1013{
1014 free_init_pages("unused kernel memory",
1015 (unsigned long)(&__init_begin),
1016 (unsigned long)(&__init_end));
1017}
1018
1019#ifdef CONFIG_DEBUG_RODATA 984#ifdef CONFIG_DEBUG_RODATA
1020const int rodata_test_data = 0xC3; 985const int rodata_test_data = 0xC3;
1021EXPORT_SYMBOL_GPL(rodata_test_data); 986EXPORT_SYMBOL_GPL(rodata_test_data);
diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
index ca53224fc56c..04102d42ff42 100644
--- a/arch/x86/mm/iomap_32.c
+++ b/arch/x86/mm/iomap_32.c
@@ -20,6 +20,17 @@
20#include <asm/pat.h> 20#include <asm/pat.h>
21#include <linux/module.h> 21#include <linux/module.h>
22 22
23int is_io_mapping_possible(resource_size_t base, unsigned long size)
24{
25#ifndef CONFIG_X86_PAE
26 /* There is no way to map greater than 1 << 32 address without PAE */
27 if (base + size > 0x100000000ULL)
28 return 0;
29#endif
30 return 1;
31}
32EXPORT_SYMBOL_GPL(is_io_mapping_possible);
33
23/* Map 'pfn' using fixed map 'type' and protections 'prot' 34/* Map 'pfn' using fixed map 'type' and protections 'prot'
24 */ 35 */
25void * 36void *
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c
index 9cab18b0b857..0bcd7883d036 100644
--- a/arch/x86/mm/memtest.c
+++ b/arch/x86/mm/memtest.c
@@ -9,44 +9,44 @@
9 9
10#include <asm/e820.h> 10#include <asm/e820.h>
11 11
12static void __init memtest(unsigned long start_phys, unsigned long size, 12static u64 patterns[] __initdata = {
13 unsigned pattern) 13 0,
14 0xffffffffffffffffULL,
15 0x5555555555555555ULL,
16 0xaaaaaaaaaaaaaaaaULL,
17 0x1111111111111111ULL,
18 0x2222222222222222ULL,
19 0x4444444444444444ULL,
20 0x8888888888888888ULL,
21 0x3333333333333333ULL,
22 0x6666666666666666ULL,
23 0x9999999999999999ULL,
24 0xccccccccccccccccULL,
25 0x7777777777777777ULL,
26 0xbbbbbbbbbbbbbbbbULL,
27 0xddddddddddddddddULL,
28 0xeeeeeeeeeeeeeeeeULL,
29 0x7a6c7258554e494cULL, /* yeah ;-) */
30};
31
32static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad)
14{ 33{
15 unsigned long i; 34 printk(KERN_INFO " %016llx bad mem addr %010llx - %010llx reserved\n",
16 unsigned long *start; 35 (unsigned long long) pattern,
17 unsigned long start_bad; 36 (unsigned long long) start_bad,
18 unsigned long last_bad; 37 (unsigned long long) end_bad);
19 unsigned long val; 38 reserve_early(start_bad, end_bad, "BAD RAM");
20 unsigned long start_phys_aligned; 39}
21 unsigned long count;
22 unsigned long incr;
23
24 switch (pattern) {
25 case 0:
26 val = 0UL;
27 break;
28 case 1:
29 val = -1UL;
30 break;
31 case 2:
32#ifdef CONFIG_X86_64
33 val = 0x5555555555555555UL;
34#else
35 val = 0x55555555UL;
36#endif
37 break;
38 case 3:
39#ifdef CONFIG_X86_64
40 val = 0xaaaaaaaaaaaaaaaaUL;
41#else
42 val = 0xaaaaaaaaUL;
43#endif
44 break;
45 default:
46 return;
47 }
48 40
49 incr = sizeof(unsigned long); 41static void __init memtest(u64 pattern, u64 start_phys, u64 size)
42{
43 u64 i, count;
44 u64 *start;
45 u64 start_bad, last_bad;
46 u64 start_phys_aligned;
47 size_t incr;
48
49 incr = sizeof(pattern);
50 start_phys_aligned = ALIGN(start_phys, incr); 50 start_phys_aligned = ALIGN(start_phys, incr);
51 count = (size - (start_phys_aligned - start_phys))/incr; 51 count = (size - (start_phys_aligned - start_phys))/incr;
52 start = __va(start_phys_aligned); 52 start = __va(start_phys_aligned);
@@ -54,25 +54,42 @@ static void __init memtest(unsigned long start_phys, unsigned long size,
54 last_bad = 0; 54 last_bad = 0;
55 55
56 for (i = 0; i < count; i++) 56 for (i = 0; i < count; i++)
57 start[i] = val; 57 start[i] = pattern;
58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) { 58 for (i = 0; i < count; i++, start++, start_phys_aligned += incr) {
59 if (*start != val) { 59 if (*start == pattern)
60 if (start_phys_aligned == last_bad + incr) { 60 continue;
61 last_bad += incr; 61 if (start_phys_aligned == last_bad + incr) {
62 } else { 62 last_bad += incr;
63 if (start_bad) { 63 continue;
64 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved",
65 val, start_bad, last_bad + incr);
66 reserve_early(start_bad, last_bad + incr, "BAD RAM");
67 }
68 start_bad = last_bad = start_phys_aligned;
69 }
70 } 64 }
65 if (start_bad)
66 reserve_bad_mem(pattern, start_bad, last_bad + incr);
67 start_bad = last_bad = start_phys_aligned;
71 } 68 }
72 if (start_bad) { 69 if (start_bad)
73 printk(KERN_CONT "\n %016lx bad mem addr %010lx - %010lx reserved", 70 reserve_bad_mem(pattern, start_bad, last_bad + incr);
74 val, start_bad, last_bad + incr); 71}
75 reserve_early(start_bad, last_bad + incr, "BAD RAM"); 72
73static void __init do_one_pass(u64 pattern, u64 start, u64 end)
74{
75 u64 size = 0;
76
77 while (start < end) {
78 start = find_e820_area_size(start, &size, 1);
79
80 /* done ? */
81 if (start >= end)
82 break;
83 if (start + size > end)
84 size = end - start;
85
86 printk(KERN_INFO " %010llx - %010llx pattern %016llx\n",
87 (unsigned long long) start,
88 (unsigned long long) start + size,
89 (unsigned long long) cpu_to_be64(pattern));
90 memtest(pattern, start, size);
91
92 start += size;
76 } 93 }
77} 94}
78 95
@@ -90,33 +107,22 @@ early_param("memtest", parse_memtest);
90 107
91void __init early_memtest(unsigned long start, unsigned long end) 108void __init early_memtest(unsigned long start, unsigned long end)
92{ 109{
93 u64 t_start, t_size; 110 unsigned int i;
94 unsigned pattern; 111 unsigned int idx = 0;
95 112
96 if (!memtest_pattern) 113 if (!memtest_pattern)
97 return; 114 return;
98 115
99 printk(KERN_INFO "early_memtest: pattern num %d", memtest_pattern); 116 printk(KERN_INFO "early_memtest: # of tests: %d\n", memtest_pattern);
100 for (pattern = 0; pattern < memtest_pattern; pattern++) { 117 for (i = 0; i < memtest_pattern; i++) {
101 t_start = start; 118 idx = i % ARRAY_SIZE(patterns);
102 t_size = 0; 119 do_one_pass(patterns[idx], start, end);
103 while (t_start < end) { 120 }
104 t_start = find_e820_area_size(t_start, &t_size, 1);
105
106 /* done ? */
107 if (t_start >= end)
108 break;
109 if (t_start + t_size > end)
110 t_size = end - t_start;
111
112 printk(KERN_CONT "\n %010llx - %010llx pattern %d",
113 (unsigned long long)t_start,
114 (unsigned long long)t_start + t_size, pattern);
115
116 memtest(t_start, t_size, pattern);
117 121
118 t_start += t_size; 122 if (idx > 0) {
119 } 123 printk(KERN_INFO "early_memtest: wipe out "
124 "test pattern from memory\n");
125 /* additional test with pattern 0 will do this */
126 do_one_pass(0, start, end);
120 } 127 }
121 printk(KERN_CONT "\n");
122} 128}
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 3957cd6d6454..451fe95a0352 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -423,32 +423,6 @@ void __init initmem_init(unsigned long start_pfn,
423 setup_bootmem_allocator(); 423 setup_bootmem_allocator();
424} 424}
425 425
426void __init set_highmem_pages_init(void)
427{
428#ifdef CONFIG_HIGHMEM
429 struct zone *zone;
430 int nid;
431
432 for_each_zone(zone) {
433 unsigned long zone_start_pfn, zone_end_pfn;
434
435 if (!is_highmem(zone))
436 continue;
437
438 zone_start_pfn = zone->zone_start_pfn;
439 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
440
441 nid = zone_to_nid(zone);
442 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
443 zone->name, nid, zone_start_pfn, zone_end_pfn);
444
445 add_highpages_with_active_regions(nid, zone_start_pfn,
446 zone_end_pfn);
447 }
448 totalram_pages += totalhigh_pages;
449#endif
450}
451
452#ifdef CONFIG_MEMORY_HOTPLUG 426#ifdef CONFIG_MEMORY_HOTPLUG
453static int paddr_to_nid(u64 addr) 427static int paddr_to_nid(u64 addr)
454{ 428{
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index 05f9aef6818a..2ed37158012d 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -11,6 +11,7 @@
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <linux/debugfs.h> 12#include <linux/debugfs.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/module.h>
14#include <linux/gfp.h> 15#include <linux/gfp.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16#include <linux/fs.h> 17#include <linux/fs.h>
@@ -634,6 +635,33 @@ void unmap_devmem(unsigned long pfn, unsigned long size, pgprot_t vma_prot)
634} 635}
635 636
636/* 637/*
638 * Change the memory type for the physial address range in kernel identity
639 * mapping space if that range is a part of identity map.
640 */
641int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
642{
643 unsigned long id_sz;
644
645 if (!pat_enabled || base >= __pa(high_memory))
646 return 0;
647
648 id_sz = (__pa(high_memory) < base + size) ?
649 __pa(high_memory) - base :
650 size;
651
652 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
653 printk(KERN_INFO
654 "%s:%d ioremap_change_attr failed %s "
655 "for %Lx-%Lx\n",
656 current->comm, current->pid,
657 cattr_name(flags),
658 base, (unsigned long long)(base + size));
659 return -EINVAL;
660 }
661 return 0;
662}
663
664/*
637 * Internal interface to reserve a range of physical memory with prot. 665 * Internal interface to reserve a range of physical memory with prot.
638 * Reserved non RAM regions only and after successful reserve_memtype, 666 * Reserved non RAM regions only and after successful reserve_memtype,
639 * this func also keeps identity mapping (if any) in sync with this new prot. 667 * this func also keeps identity mapping (if any) in sync with this new prot.
@@ -642,7 +670,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
642 int strict_prot) 670 int strict_prot)
643{ 671{
644 int is_ram = 0; 672 int is_ram = 0;
645 int id_sz, ret; 673 int ret;
646 unsigned long flags; 674 unsigned long flags;
647 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK); 675 unsigned long want_flags = (pgprot_val(*vma_prot) & _PAGE_CACHE_MASK);
648 676
@@ -679,23 +707,8 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
679 flags); 707 flags);
680 } 708 }
681 709
682 /* Need to keep identity mapping in sync */ 710 if (kernel_map_sync_memtype(paddr, size, flags) < 0) {
683 if (paddr >= __pa(high_memory))
684 return 0;
685
686 id_sz = (__pa(high_memory) < paddr + size) ?
687 __pa(high_memory) - paddr :
688 size;
689
690 if (ioremap_change_attr((unsigned long)__va(paddr), id_sz, flags) < 0) {
691 free_memtype(paddr, paddr + size); 711 free_memtype(paddr, paddr + size);
692 printk(KERN_ERR
693 "%s:%d reserve_pfn_range ioremap_change_attr failed %s "
694 "for %Lx-%Lx\n",
695 current->comm, current->pid,
696 cattr_name(flags),
697 (unsigned long long)paddr,
698 (unsigned long long)(paddr + size));
699 return -EINVAL; 712 return -EINVAL;
700 } 713 }
701 return 0; 714 return 0;
@@ -877,6 +890,7 @@ pgprot_t pgprot_writecombine(pgprot_t prot)
877 else 890 else
878 return pgprot_noncached(prot); 891 return pgprot_noncached(prot);
879} 892}
893EXPORT_SYMBOL_GPL(pgprot_writecombine);
880 894
881#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT) 895#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
882 896
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index 86f2ffc43c3d..5b7c7c8464fe 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -313,6 +313,24 @@ int ptep_clear_flush_young(struct vm_area_struct *vma,
313 return young; 313 return young;
314} 314}
315 315
316/**
317 * reserve_top_address - reserves a hole in the top of kernel address space
318 * @reserve - size of hole to reserve
319 *
320 * Can be used to relocate the fixmap area and poke a hole in the top
321 * of kernel address space to make room for a hypervisor.
322 */
323void __init reserve_top_address(unsigned long reserve)
324{
325#ifdef CONFIG_X86_32
326 BUG_ON(fixmaps_set > 0);
327 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
328 (int)-reserve);
329 __FIXADDR_TOP = -reserve - PAGE_SIZE;
330 __VMALLOC_RESERVE += reserve;
331#endif
332}
333
316int fixmaps_set; 334int fixmaps_set;
317 335
318void __native_set_fixmap(enum fixed_addresses idx, pte_t pte) 336void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
index 0951db9ee519..f2e477c91c1b 100644
--- a/arch/x86/mm/pgtable_32.c
+++ b/arch/x86/mm/pgtable_32.c
@@ -20,6 +20,8 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23unsigned int __VMALLOC_RESERVE = 128 << 20;
24
23/* 25/*
24 * Associate a virtual page frame with a given physical page frame 26 * Associate a virtual page frame with a given physical page frame
25 * and protection flags for that frame. 27 * and protection flags for that frame.
@@ -97,22 +99,6 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
97unsigned long __FIXADDR_TOP = 0xfffff000; 99unsigned long __FIXADDR_TOP = 0xfffff000;
98EXPORT_SYMBOL(__FIXADDR_TOP); 100EXPORT_SYMBOL(__FIXADDR_TOP);
99 101
100/**
101 * reserve_top_address - reserves a hole in the top of kernel address space
102 * @reserve - size of hole to reserve
103 *
104 * Can be used to relocate the fixmap area and poke a hole in the top
105 * of kernel address space to make room for a hypervisor.
106 */
107void __init reserve_top_address(unsigned long reserve)
108{
109 BUG_ON(fixmaps_set > 0);
110 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
111 (int)-reserve);
112 __FIXADDR_TOP = -reserve - PAGE_SIZE;
113 __VMALLOC_RESERVE += reserve;
114}
115
116/* 102/*
117 * vmalloc=size forces the vmalloc area to be exactly 'size' 103 * vmalloc=size forces the vmalloc area to be exactly 'size'
118 * bytes. This can be used to increase (or decrease) the 104 * bytes. This can be used to increase (or decrease) the
diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
index e9f80c744cf3..10131fbdaada 100644
--- a/arch/x86/oprofile/op_model_ppro.c
+++ b/arch/x86/oprofile/op_model_ppro.c
@@ -78,8 +78,18 @@ static void ppro_setup_ctrs(struct op_msrs const * const msrs)
78 if (cpu_has_arch_perfmon) { 78 if (cpu_has_arch_perfmon) {
79 union cpuid10_eax eax; 79 union cpuid10_eax eax;
80 eax.full = cpuid_eax(0xa); 80 eax.full = cpuid_eax(0xa);
81 if (counter_width < eax.split.bit_width) 81
82 counter_width = eax.split.bit_width; 82 /*
83 * For Core2 (family 6, model 15), don't reset the
84 * counter width:
85 */
86 if (!(eax.split.version_id == 0 &&
87 current_cpu_data.x86 == 6 &&
88 current_cpu_data.x86_model == 15)) {
89
90 if (counter_width < eax.split.bit_width)
91 counter_width = eax.split.bit_width;
92 }
83 } 93 }
84 94
85 /* clear all counters */ 95 /* clear all counters */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 352ea6830659..82cd39a6cbd3 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -942,6 +942,9 @@ asmlinkage void __init xen_start_kernel(void)
942 possible map and a non-dummy shared_info. */ 942 possible map and a non-dummy shared_info. */
943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0]; 943 per_cpu(xen_vcpu, 0) = &HYPERVISOR_shared_info->vcpu_info[0];
944 944
945 local_irq_disable();
946 early_boot_irqs_off();
947
945 xen_raw_console_write("mapping kernel into physical memory\n"); 948 xen_raw_console_write("mapping kernel into physical memory\n");
946 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 949 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
947 950