aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/alpha/kernel/semaphore.c16
-rw-r--r--arch/alpha/kernel/traps.c6
-rw-r--r--arch/alpha/lib/fls.c2
-rw-r--r--arch/alpha/mm/fault.c4
-rw-r--r--arch/alpha/oprofile/Kconfig23
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/kernel/process.c2
-rw-r--r--arch/arm/kernel/ptrace.c8
-rw-r--r--arch/arm/kernel/traps.c10
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/fault.c2
-rw-r--r--arch/arm/oprofile/Kconfig42
-rw-r--r--arch/avr32/kernel/traps.c2
-rw-r--r--arch/avr32/mm/fault.c6
-rw-r--r--arch/blackfin/Kconfig2
-rw-r--r--arch/blackfin/oprofile/Kconfig29
-rw-r--r--arch/cris/Kconfig2
-rw-r--r--arch/frv/Kconfig2
-rw-r--r--arch/frv/kernel/irq-mb93091.c2
-rw-r--r--arch/frv/kernel/irq-mb93093.c2
-rw-r--r--arch/frv/kernel/irq-mb93493.c2
-rw-r--r--arch/frv/kernel/irq.c2
-rw-r--r--arch/h8300/Kconfig2
-rw-r--r--arch/i386/Kconfig29
-rw-r--r--arch/i386/Makefile6
-rw-r--r--arch/ia64/Kconfig15
-rw-r--r--arch/ia64/configs/sn2_defconfig1
-rw-r--r--arch/ia64/ia32/sys_ia32.c6
-rw-r--r--arch/ia64/kernel/efi.c4
-rw-r--r--arch/ia64/kernel/perfmon.c161
-rw-r--r--arch/ia64/kernel/perfmon_default_smpl.c8
-rw-r--r--arch/ia64/kernel/process.c3
-rw-r--r--arch/ia64/kernel/setup.c88
-rw-r--r--arch/ia64/kernel/signal.c4
-rw-r--r--arch/ia64/kernel/traps.c6
-rw-r--r--arch/ia64/kernel/unaligned.c5
-rw-r--r--arch/ia64/mm/fault.c2
-rw-r--r--arch/ia64/mm/init.c2
-rw-r--r--arch/ia64/oprofile/Kconfig20
-rw-r--r--arch/m32r/Kconfig2
-rw-r--r--arch/m32r/kernel/traps.c2
-rw-r--r--arch/m32r/mm/fault.c2
-rw-r--r--arch/m32r/oprofile/Kconfig23
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/kernel/traps.c4
-rw-r--r--arch/m68k/mm/fault.c2
-rw-r--r--arch/m68knommu/Kconfig2
-rw-r--r--arch/mips/Kconfig2
-rw-r--r--arch/mips/au1000/pb1200/irqmap.c2
-rw-r--r--arch/mips/basler/excite/excite_irq.c2
-rw-r--r--arch/mips/bcm47xx/time.c7
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/mipssim_defconfig532
-rw-r--r--arch/mips/configs/sb1250-swarm_defconfig1
-rw-r--r--arch/mips/emma2rh/markeins/setup.c6
-rw-r--r--arch/mips/kernel/cevt-r4k.c1
-rw-r--r--arch/mips/kernel/irixelf.c4
-rw-r--r--arch/mips/kernel/irixsig.c2
-rw-r--r--arch/mips/kernel/sysirix.c4
-rw-r--r--arch/mips/kernel/time.c92
-rw-r--r--arch/mips/kernel/traps.c2
-rw-r--r--arch/mips/lemote/lm2e/setup.c5
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/oprofile/Kconfig23
-rw-r--r--arch/mips/pmc-sierra/msp71xx/msp_time.c3
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c5
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c5
-rw-r--r--arch/mips/sibyte/bcm1480/time.c74
-rw-r--r--arch/mips/sibyte/sb1250/irq.c36
-rw-r--r--arch/mips/sibyte/sb1250/smp.c5
-rw-r--r--arch/mips/sibyte/sb1250/time.c104
-rw-r--r--arch/mips/sibyte/swarm/setup.c25
-rw-r--r--arch/mips/sni/time.c18
-rw-r--r--arch/mips/tx4927/common/tx4927_setup.c16
-rw-r--r--arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c10
-rw-r--r--arch/mips/tx4938/common/setup.c7
-rw-r--r--arch/mips/vr41xx/common/init.c5
-rw-r--r--arch/parisc/Kconfig2
-rw-r--r--arch/parisc/kernel/signal.c2
-rw-r--r--arch/parisc/kernel/traps.c10
-rw-r--r--arch/parisc/kernel/unaligned.c2
-rw-r--r--arch/parisc/mm/fault.c2
-rw-r--r--arch/parisc/oprofile/Kconfig23
-rw-r--r--arch/powerpc/Kconfig15
-rw-r--r--arch/powerpc/configs/cell_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig1
-rw-r--r--arch/powerpc/configs/pseries_defconfig1
-rw-r--r--arch/powerpc/kernel/machine_kexec.c54
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/traps.c4
-rw-r--r--arch/powerpc/mm/fault.c2
-rw-r--r--arch/powerpc/oprofile/Kconfig24
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c2
-rw-r--r--arch/ppc/Kconfig2
-rw-r--r--arch/ppc/kernel/traps.c2
-rw-r--r--arch/ppc/mm/fault.c2
-rw-r--r--arch/ppc/platforms/chestnut.c1
-rw-r--r--arch/s390/Kconfig16
-rw-r--r--arch/s390/kernel/process.c2
-rw-r--r--arch/s390/lib/uaccess_pt.c2
-rw-r--r--arch/s390/mm/fault.c2
-rw-r--r--arch/s390/oprofile/Kconfig22
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/kernel/machine_kexec.c21
-rw-r--r--arch/sh/kernel/process.c2
-rw-r--r--arch/sh/kernel/setup.c38
-rw-r--r--arch/sh/kernel/signal.c4
-rw-r--r--arch/sh/kernel/traps.c7
-rw-r--r--arch/sh/mm/fault.c2
-rw-r--r--arch/sh/oprofile/Kconfig23
-rw-r--r--arch/sh64/Kconfig2
-rw-r--r--arch/sh64/kernel/traps.c4
-rw-r--r--arch/sh64/mm/fault.c10
-rw-r--r--arch/sh64/oprofile/Kconfig23
-rw-r--r--arch/sparc/Kconfig6
-rw-r--r--arch/sparc/kernel/ptrace.c4
-rw-r--r--arch/sparc/kernel/sys_sparc.c2
-rw-r--r--arch/sparc/kernel/sys_sunos.c2
-rw-r--r--arch/sparc/kernel/traps.c4
-rw-r--r--arch/sparc/oprofile/Kconfig17
-rw-r--r--arch/sparc64/Kconfig15
-rw-r--r--arch/sparc64/kernel/sys_sunos32.c2
-rw-r--r--arch/sparc64/kernel/traps.c2
-rw-r--r--arch/sparc64/oprofile/Kconfig17
-rw-r--r--arch/sparc64/solaris/misc.c4
-rw-r--r--arch/um/Kconfig2
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/sys-x86_64/sysrq.c4
-rw-r--r--arch/v850/Kconfig2
-rw-r--r--arch/x86/ia32/ia32_binfmt.c124
-rw-r--r--arch/x86/kernel/Makefile_323
-rw-r--r--arch/x86/kernel/Makefile_644
-rw-r--r--arch/x86/kernel/acpi/Makefile_323
-rw-r--r--arch/x86/kernel/acpi/boot.c2
-rw-r--r--arch/x86/kernel/acpi/cstate.c4
-rw-r--r--arch/x86/kernel/acpi/earlyquirk_32.c84
-rw-r--r--arch/x86/kernel/acpi/processor.c2
-rw-r--r--arch/x86/kernel/alternative.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_32 (renamed from arch/x86/kernel/cpu/cpufreq/Kconfig)0
-rw-r--r--arch/x86/kernel/cpu/cpufreq/Kconfig_64 (renamed from arch/x86/kernel/cpufreq/Kconfig)2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/e_powersaver.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/elanfreq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longhaul.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/longrun.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k6.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k7.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/sc520_freq.c4
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c6
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-lib.c2
-rw-r--r--arch/x86/kernel/cpu/intel_cacheinfo.c12
-rw-r--r--arch/x86/kernel/cpu/perfctr-watchdog.c16
-rw-r--r--arch/x86/kernel/cpu/proc.c11
-rw-r--r--arch/x86/kernel/cpuid.c34
-rw-r--r--arch/x86/kernel/crash_dump_32.c1
-rw-r--r--arch/x86/kernel/e820_32.c3
-rw-r--r--arch/x86/kernel/e820_64.c3
-rw-r--r--arch/x86/kernel/early-quirks.c (renamed from arch/x86/kernel/early-quirks_64.c)19
-rw-r--r--arch/x86/kernel/genapic_64.c15
-rw-r--r--arch/x86/kernel/genapic_flat_64.c2
-rw-r--r--arch/x86/kernel/head64.c2
-rw-r--r--arch/x86/kernel/hpet.c3
-rw-r--r--arch/x86/kernel/i8259_32.c3
-rw-r--r--arch/x86/kernel/init_task.c (renamed from arch/x86/kernel/init_task_32.c)11
-rw-r--r--arch/x86/kernel/init_task_64.c54
-rw-r--r--arch/x86/kernel/io_apic_32.c13
-rw-r--r--arch/x86/kernel/machine_kexec_32.c22
-rw-r--r--arch/x86/kernel/machine_kexec_64.c27
-rw-r--r--arch/x86/kernel/mce_64.c3
-rw-r--r--arch/x86/kernel/mce_amd_64.c4
-rw-r--r--arch/x86/kernel/microcode.c6
-rw-r--r--arch/x86/kernel/mpparse_64.c17
-rw-r--r--arch/x86/kernel/msr.c2
-rw-r--r--arch/x86/kernel/pci-dma_64.c2
-rw-r--r--arch/x86/kernel/process_32.c56
-rw-r--r--arch/x86/kernel/quirks.c112
-rw-r--r--arch/x86/kernel/reboot_64.c3
-rw-r--r--arch/x86/kernel/reboot_fixups_32.c8
-rw-r--r--arch/x86/kernel/setup64.c10
-rw-r--r--arch/x86/kernel/setup_32.c53
-rw-r--r--arch/x86/kernel/setup_64.c64
-rw-r--r--arch/x86/kernel/signal_32.c4
-rw-r--r--arch/x86/kernel/smp_32.c4
-rw-r--r--arch/x86/kernel/smp_64.c119
-rw-r--r--arch/x86/kernel/smpboot_32.c81
-rw-r--r--arch/x86/kernel/smpboot_64.c74
-rw-r--r--arch/x86/kernel/suspend_64.c11
-rw-r--r--arch/x86/kernel/traps_32.c58
-rw-r--r--arch/x86/kernel/traps_64.c16
-rw-r--r--arch/x86/kernel/tsc_32.c8
-rw-r--r--arch/x86/kernel/tsc_64.c4
-rw-r--r--arch/x86/kernel/vsyscall_64.c4
-rw-r--r--arch/x86/lib/delay_32.c2
-rw-r--r--arch/x86/lib/delay_64.c3
-rw-r--r--arch/x86/lib/usercopy_32.c2
-rw-r--r--arch/x86/mach-voyager/voyager_smp.c12
-rw-r--r--arch/x86/mm/fault_32.c9
-rw-r--r--arch/x86/mm/fault_64.c18
-rw-r--r--arch/x86/mm/numa_64.c2
-rw-r--r--arch/x86/mm/pageattr_64.c9
-rw-r--r--arch/x86/oprofile/backtrace.c110
-rw-r--r--arch/x86_64/.gitignore1
-rw-r--r--arch/x86_64/Kconfig25
-rw-r--r--arch/x86_64/Makefile6
-rw-r--r--arch/xtensa/Kconfig2
-rw-r--r--arch/xtensa/kernel/traps.c6
-rw-r--r--arch/xtensa/mm/fault.c2
210 files changed, 1326 insertions, 2126 deletions
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 2a85dc33907c..4c002ba37e50 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -654,7 +654,7 @@ source "drivers/Kconfig"
654 654
655source "fs/Kconfig" 655source "fs/Kconfig"
656 656
657source "arch/alpha/oprofile/Kconfig" 657source "kernel/Kconfig.instrumentation"
658 658
659source "arch/alpha/Kconfig.debug" 659source "arch/alpha/Kconfig.debug"
660 660
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
index 8c8aaa205eae..8d2982aa1b8d 100644
--- a/arch/alpha/kernel/semaphore.c
+++ b/arch/alpha/kernel/semaphore.c
@@ -69,7 +69,7 @@ __down_failed(struct semaphore *sem)
69 69
70#ifdef CONFIG_DEBUG_SEMAPHORE 70#ifdef CONFIG_DEBUG_SEMAPHORE
71 printk("%s(%d): down failed(%p)\n", 71 printk("%s(%d): down failed(%p)\n",
72 tsk->comm, tsk->pid, sem); 72 tsk->comm, task_pid_nr(tsk), sem);
73#endif 73#endif
74 74
75 tsk->state = TASK_UNINTERRUPTIBLE; 75 tsk->state = TASK_UNINTERRUPTIBLE;
@@ -98,7 +98,7 @@ __down_failed(struct semaphore *sem)
98 98
99#ifdef CONFIG_DEBUG_SEMAPHORE 99#ifdef CONFIG_DEBUG_SEMAPHORE
100 printk("%s(%d): down acquired(%p)\n", 100 printk("%s(%d): down acquired(%p)\n",
101 tsk->comm, tsk->pid, sem); 101 tsk->comm, task_pid_nr(tsk), sem);
102#endif 102#endif
103} 103}
104 104
@@ -111,7 +111,7 @@ __down_failed_interruptible(struct semaphore *sem)
111 111
112#ifdef CONFIG_DEBUG_SEMAPHORE 112#ifdef CONFIG_DEBUG_SEMAPHORE
113 printk("%s(%d): down failed(%p)\n", 113 printk("%s(%d): down failed(%p)\n",
114 tsk->comm, tsk->pid, sem); 114 tsk->comm, task_pid_nr(tsk), sem);
115#endif 115#endif
116 116
117 tsk->state = TASK_INTERRUPTIBLE; 117 tsk->state = TASK_INTERRUPTIBLE;
@@ -139,7 +139,7 @@ __down_failed_interruptible(struct semaphore *sem)
139 139
140#ifdef CONFIG_DEBUG_SEMAPHORE 140#ifdef CONFIG_DEBUG_SEMAPHORE
141 printk("%s(%d): down %s(%p)\n", 141 printk("%s(%d): down %s(%p)\n",
142 current->comm, current->pid, 142 current->comm, task_pid_nr(current),
143 (ret < 0 ? "interrupted" : "acquired"), sem); 143 (ret < 0 ? "interrupted" : "acquired"), sem);
144#endif 144#endif
145 return ret; 145 return ret;
@@ -168,7 +168,7 @@ down(struct semaphore *sem)
168#endif 168#endif
169#ifdef CONFIG_DEBUG_SEMAPHORE 169#ifdef CONFIG_DEBUG_SEMAPHORE
170 printk("%s(%d): down(%p) <count=%d> from %p\n", 170 printk("%s(%d): down(%p) <count=%d> from %p\n",
171 current->comm, current->pid, sem, 171 current->comm, task_pid_nr(current), sem,
172 atomic_read(&sem->count), __builtin_return_address(0)); 172 atomic_read(&sem->count), __builtin_return_address(0));
173#endif 173#endif
174 __down(sem); 174 __down(sem);
@@ -182,7 +182,7 @@ down_interruptible(struct semaphore *sem)
182#endif 182#endif
183#ifdef CONFIG_DEBUG_SEMAPHORE 183#ifdef CONFIG_DEBUG_SEMAPHORE
184 printk("%s(%d): down(%p) <count=%d> from %p\n", 184 printk("%s(%d): down(%p) <count=%d> from %p\n",
185 current->comm, current->pid, sem, 185 current->comm, task_pid_nr(current), sem,
186 atomic_read(&sem->count), __builtin_return_address(0)); 186 atomic_read(&sem->count), __builtin_return_address(0));
187#endif 187#endif
188 return __down_interruptible(sem); 188 return __down_interruptible(sem);
@@ -201,7 +201,7 @@ down_trylock(struct semaphore *sem)
201 201
202#ifdef CONFIG_DEBUG_SEMAPHORE 202#ifdef CONFIG_DEBUG_SEMAPHORE
203 printk("%s(%d): down_trylock %s from %p\n", 203 printk("%s(%d): down_trylock %s from %p\n",
204 current->comm, current->pid, 204 current->comm, task_pid_nr(current),
205 ret ? "failed" : "acquired", 205 ret ? "failed" : "acquired",
206 __builtin_return_address(0)); 206 __builtin_return_address(0));
207#endif 207#endif
@@ -217,7 +217,7 @@ up(struct semaphore *sem)
217#endif 217#endif
218#ifdef CONFIG_DEBUG_SEMAPHORE 218#ifdef CONFIG_DEBUG_SEMAPHORE
219 printk("%s(%d): up(%p) <count=%d> from %p\n", 219 printk("%s(%d): up(%p) <count=%d> from %p\n",
220 current->comm, current->pid, sem, 220 current->comm, task_pid_nr(current), sem,
221 atomic_read(&sem->count), __builtin_return_address(0)); 221 atomic_read(&sem->count), __builtin_return_address(0));
222#endif 222#endif
223 __up(sem); 223 __up(sem);
diff --git a/arch/alpha/kernel/traps.c b/arch/alpha/kernel/traps.c
index ec0f05e0d8ff..2dc7f9fed213 100644
--- a/arch/alpha/kernel/traps.c
+++ b/arch/alpha/kernel/traps.c
@@ -182,7 +182,7 @@ die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15)
182#ifdef CONFIG_SMP 182#ifdef CONFIG_SMP
183 printk("CPU %d ", hard_smp_processor_id()); 183 printk("CPU %d ", hard_smp_processor_id());
184#endif 184#endif
185 printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); 185 printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err);
186 dik_show_regs(regs, r9_15); 186 dik_show_regs(regs, r9_15);
187 add_taint(TAINT_DIE); 187 add_taint(TAINT_DIE);
188 dik_show_trace((unsigned long *)(regs+1)); 188 dik_show_trace((unsigned long *)(regs+1));
@@ -646,7 +646,7 @@ got_exception:
646 lock_kernel(); 646 lock_kernel();
647 647
648 printk("%s(%d): unhandled unaligned exception\n", 648 printk("%s(%d): unhandled unaligned exception\n",
649 current->comm, current->pid); 649 current->comm, task_pid_nr(current));
650 650
651 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", 651 printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n",
652 pc, una_reg(26), regs->ps); 652 pc, una_reg(26), regs->ps);
@@ -786,7 +786,7 @@ do_entUnaUser(void __user * va, unsigned long opcode,
786 } 786 }
787 if (++cnt < 5) { 787 if (++cnt < 5) {
788 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", 788 printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n",
789 current->comm, current->pid, 789 current->comm, task_pid_nr(current),
790 regs->pc - 4, va, opcode, reg); 790 regs->pc - 4, va, opcode, reg);
791 } 791 }
792 last_time = jiffies; 792 last_time = jiffies;
diff --git a/arch/alpha/lib/fls.c b/arch/alpha/lib/fls.c
index 7ad84ea0acf8..32afaa3fa686 100644
--- a/arch/alpha/lib/fls.c
+++ b/arch/alpha/lib/fls.c
@@ -3,7 +3,7 @@
3 */ 3 */
4 4
5#include <linux/module.h> 5#include <linux/module.h>
6#include <asm/bitops.h> 6#include <linux/bitops.h>
7 7
8/* This is fls(x)-1, except zero is held to zero. This allows most 8/* This is fls(x)-1, except zero is held to zero. This allows most
9 efficent input into extbl, plus it allows easy handling of fls(0)=0. */ 9 efficent input into extbl, plus it allows easy handling of fls(0)=0. */
diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
index 25154df3055a..4829f96585b1 100644
--- a/arch/alpha/mm/fault.c
+++ b/arch/alpha/mm/fault.c
@@ -188,13 +188,13 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
188 /* We ran out of memory, or some other thing happened to us that 188 /* We ran out of memory, or some other thing happened to us that
189 made us unable to handle the page fault gracefully. */ 189 made us unable to handle the page fault gracefully. */
190 out_of_memory: 190 out_of_memory:
191 if (is_init(current)) { 191 if (is_global_init(current)) {
192 yield(); 192 yield();
193 down_read(&mm->mmap_sem); 193 down_read(&mm->mmap_sem);
194 goto survive; 194 goto survive;
195 } 195 }
196 printk(KERN_ALERT "VM: killing process %s(%d)\n", 196 printk(KERN_ALERT "VM: killing process %s(%d)\n",
197 current->comm, current->pid); 197 current->comm, task_pid_nr(current));
198 if (!user_mode(regs)) 198 if (!user_mode(regs))
199 goto no_context; 199 goto no_context;
200 do_group_exit(SIGKILL); 200 do_group_exit(SIGKILL);
diff --git a/arch/alpha/oprofile/Kconfig b/arch/alpha/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/alpha/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 0a0c88d0039c..4cee938df01e 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1068,7 +1068,7 @@ endmenu
1068 1068
1069source "fs/Kconfig" 1069source "fs/Kconfig"
1070 1070
1071source "arch/arm/oprofile/Kconfig" 1071source "kernel/Kconfig.instrumentation"
1072 1072
1073source "arch/arm/Kconfig.debug" 1073source "arch/arm/Kconfig.debug"
1074 1074
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 93b7f8e22dcc..4f1a03124a74 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -265,7 +265,7 @@ void __show_regs(struct pt_regs *regs)
265void show_regs(struct pt_regs * regs) 265void show_regs(struct pt_regs * regs)
266{ 266{
267 printk("\n"); 267 printk("\n");
268 printk("Pid: %d, comm: %20s\n", current->pid, current->comm); 268 printk("Pid: %d, comm: %20s\n", task_pid_nr(current), current->comm);
269 __show_regs(regs); 269 __show_regs(regs);
270 __backtrace(); 270 __backtrace();
271} 271}
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 5feee722ea98..4b05dc5c1023 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -382,16 +382,16 @@ static void clear_breakpoint(struct task_struct *task, struct debug_entry *bp)
382 382
383 if (ret != 2 || old_insn.thumb != BREAKINST_THUMB) 383 if (ret != 2 || old_insn.thumb != BREAKINST_THUMB)
384 printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at " 384 printk(KERN_ERR "%s:%d: corrupted Thumb breakpoint at "
385 "0x%08lx (0x%04x)\n", task->comm, task->pid, 385 "0x%08lx (0x%04x)\n", task->comm,
386 addr, old_insn.thumb); 386 task_pid_nr(task), addr, old_insn.thumb);
387 } else { 387 } else {
388 ret = swap_insn(task, addr & ~3, &old_insn.arm, 388 ret = swap_insn(task, addr & ~3, &old_insn.arm,
389 &bp->insn.arm, 4); 389 &bp->insn.arm, 4);
390 390
391 if (ret != 4 || old_insn.arm != BREAKINST_ARM) 391 if (ret != 4 || old_insn.arm != BREAKINST_ARM)
392 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at " 392 printk(KERN_ERR "%s:%d: corrupted ARM breakpoint at "
393 "0x%08lx (0x%08x)\n", task->comm, task->pid, 393 "0x%08lx (0x%08x)\n", task->comm,
394 addr, old_insn.arm); 394 task_pid_nr(task), addr, old_insn.arm);
395 } 395 }
396} 396}
397 397
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 8ad47619c079..4764bd9ccee8 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -223,7 +223,7 @@ static void __die(const char *str, int err, struct thread_info *thread, struct p
223 print_modules(); 223 print_modules();
224 __show_regs(regs); 224 __show_regs(regs);
225 printk("Process %s (pid: %d, stack limit = 0x%p)\n", 225 printk("Process %s (pid: %d, stack limit = 0x%p)\n",
226 tsk->comm, tsk->pid, thread + 1); 226 tsk->comm, task_pid_nr(tsk), thread + 1);
227 227
228 if (!user_mode(regs) || in_interrupt()) { 228 if (!user_mode(regs) || in_interrupt()) {
229 dump_mem("Stack: ", regs->ARM_sp, 229 dump_mem("Stack: ", regs->ARM_sp,
@@ -337,7 +337,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
337#ifdef CONFIG_DEBUG_USER 337#ifdef CONFIG_DEBUG_USER
338 if (user_debug & UDBG_UNDEFINED) { 338 if (user_debug & UDBG_UNDEFINED) {
339 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 339 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
340 current->comm, current->pid, pc); 340 current->comm, task_pid_nr(current), pc);
341 dump_instr(regs); 341 dump_instr(regs);
342 } 342 }
343#endif 343#endif
@@ -388,7 +388,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
388#ifdef CONFIG_DEBUG_USER 388#ifdef CONFIG_DEBUG_USER
389 if (user_debug & UDBG_SYSCALL) { 389 if (user_debug & UDBG_SYSCALL) {
390 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", 390 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
391 current->pid, current->comm, n); 391 task_pid_nr(current), current->comm, n);
392 dump_instr(regs); 392 dump_instr(regs);
393 } 393 }
394#endif 394#endif
@@ -565,7 +565,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
565 */ 565 */
566 if (user_debug & UDBG_SYSCALL) { 566 if (user_debug & UDBG_SYSCALL) {
567 printk("[%d] %s: arm syscall %d\n", 567 printk("[%d] %s: arm syscall %d\n",
568 current->pid, current->comm, no); 568 task_pid_nr(current), current->comm, no);
569 dump_instr(regs); 569 dump_instr(regs);
570 if (user_mode(regs)) { 570 if (user_mode(regs)) {
571 __show_regs(regs); 571 __show_regs(regs);
@@ -642,7 +642,7 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
642#ifdef CONFIG_DEBUG_USER 642#ifdef CONFIG_DEBUG_USER
643 if (user_debug & UDBG_BADABORT) { 643 if (user_debug & UDBG_BADABORT) {
644 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", 644 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
645 current->pid, current->comm, code, instr); 645 task_pid_nr(current), current->comm, code, instr);
646 dump_instr(regs); 646 dump_instr(regs);
647 show_pte(current->mm, addr); 647 show_pte(current->mm, addr);
648 } 648 }
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 074b7cb07743..e162cca5917f 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -757,7 +757,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
757 if (ai_usermode & 1) 757 if (ai_usermode & 1)
758 printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx " 758 printk("Alignment trap: %s (%d) PC=0x%08lx Instr=0x%0*lx "
759 "Address=0x%08lx FSR 0x%03x\n", current->comm, 759 "Address=0x%08lx FSR 0x%03x\n", current->comm,
760 current->pid, instrptr, 760 task_pid_nr(current), instrptr,
761 thumb_mode(regs) ? 4 : 8, 761 thumb_mode(regs) ? 4 : 8,
762 thumb_mode(regs) ? tinstr : instr, 762 thumb_mode(regs) ? tinstr : instr,
763 addr, fsr); 763 addr, fsr);
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 59ed1d05b71b..a8a7dab757eb 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -197,7 +197,7 @@ survive:
197 return fault; 197 return fault;
198 198
199out_of_memory: 199out_of_memory:
200 if (!is_init(tsk)) 200 if (!is_global_init(tsk))
201 goto out; 201 goto out;
202 202
203 /* 203 /*
diff --git a/arch/arm/oprofile/Kconfig b/arch/arm/oprofile/Kconfig
deleted file mode 100644
index afd93ad02feb..000000000000
--- a/arch/arm/oprofile/Kconfig
+++ /dev/null
@@ -1,42 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22if OPROFILE
23
24config OPROFILE_ARMV6
25 bool
26 depends on CPU_V6 && !SMP
27 default y
28 select OPROFILE_ARM11_CORE
29
30config OPROFILE_MPCORE
31 bool
32 depends on CPU_V6 && SMP
33 default y
34 select OPROFILE_ARM11_CORE
35
36config OPROFILE_ARM11_CORE
37 bool
38
39endif
40
41endmenu
42
diff --git a/arch/avr32/kernel/traps.c b/arch/avr32/kernel/traps.c
index 9a73ce7eb50f..8a7caf8e7b45 100644
--- a/arch/avr32/kernel/traps.c
+++ b/arch/avr32/kernel/traps.c
@@ -89,7 +89,7 @@ void _exception(long signr, struct pt_regs *regs, int code,
89 * generate the same exception over and over again and we get 89 * generate the same exception over and over again and we get
90 * nowhere. Better to kill it and let the kernel panic. 90 * nowhere. Better to kill it and let the kernel panic.
91 */ 91 */
92 if (is_init(current)) { 92 if (is_global_init(current)) {
93 __sighandler_t handler; 93 __sighandler_t handler;
94 94
95 spin_lock_irq(&current->sighand->siglock); 95 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
index 11472f8701bd..6560cb18b4e3 100644
--- a/arch/avr32/mm/fault.c
+++ b/arch/avr32/mm/fault.c
@@ -160,7 +160,7 @@ bad_area:
160 if (exception_trace && printk_ratelimit()) 160 if (exception_trace && printk_ratelimit())
161 printk("%s%s[%d]: segfault at %08lx pc %08lx " 161 printk("%s%s[%d]: segfault at %08lx pc %08lx "
162 "sp %08lx ecr %lu\n", 162 "sp %08lx ecr %lu\n",
163 is_init(tsk) ? KERN_EMERG : KERN_INFO, 163 is_global_init(tsk) ? KERN_EMERG : KERN_INFO,
164 tsk->comm, tsk->pid, address, regs->pc, 164 tsk->comm, tsk->pid, address, regs->pc,
165 regs->sp, ecr); 165 regs->sp, ecr);
166 _exception(SIGSEGV, regs, code, address); 166 _exception(SIGSEGV, regs, code, address);
@@ -209,7 +209,7 @@ no_context:
209 */ 209 */
210out_of_memory: 210out_of_memory:
211 up_read(&mm->mmap_sem); 211 up_read(&mm->mmap_sem);
212 if (is_init(current)) { 212 if (is_global_init(current)) {
213 yield(); 213 yield();
214 down_read(&mm->mmap_sem); 214 down_read(&mm->mmap_sem);
215 goto survive; 215 goto survive;
@@ -231,7 +231,7 @@ do_sigbus:
231 if (exception_trace) 231 if (exception_trace)
232 printk("%s%s[%d]: bus error at %08lx pc %08lx " 232 printk("%s%s[%d]: bus error at %08lx pc %08lx "
233 "sp %08lx ecr %lu\n", 233 "sp %08lx ecr %lu\n",
234 is_init(tsk) ? KERN_EMERG : KERN_INFO, 234 is_global_init(tsk) ? KERN_EMERG : KERN_INFO,
235 tsk->comm, tsk->pid, address, regs->pc, 235 tsk->comm, tsk->pid, address, regs->pc,
236 regs->sp, ecr); 236 regs->sp, ecr);
237 237
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig
index aa9db3073312..4c5ca9d5e40f 100644
--- a/arch/blackfin/Kconfig
+++ b/arch/blackfin/Kconfig
@@ -1012,7 +1012,7 @@ source "drivers/Kconfig"
1012 1012
1013source "fs/Kconfig" 1013source "fs/Kconfig"
1014 1014
1015source "arch/blackfin/oprofile/Kconfig" 1015source "kernel/Kconfig.instrumentation"
1016 1016
1017menu "Kernel hacking" 1017menu "Kernel hacking"
1018 1018
diff --git a/arch/blackfin/oprofile/Kconfig b/arch/blackfin/oprofile/Kconfig
deleted file mode 100644
index 0a2fd999c941..000000000000
--- a/arch/blackfin/oprofile/Kconfig
+++ /dev/null
@@ -1,29 +0,0 @@
1menu "Profiling support"
2depends on EXPERIMENTAL
3
4config PROFILING
5 bool "Profiling support (EXPERIMENTAL)"
6 help
7 Say Y here to enable the extended profiling support mechanisms used
8 by profilers such as OProfile.
9
10config OPROFILE
11 tristate "OProfile system profiling (EXPERIMENTAL)"
12 depends on PROFILING
13 help
14 OProfile is a profiling system capable of profiling the
15 whole system, include the kernel, kernel modules, libraries,
16 and applications.
17
18 If unsure, say N.
19
20config HARDWARE_PM
21 tristate "Hardware Performance Monitor Profiling"
22 depends on PROFILING
23 help
24 take use of hardware performance monitor to profiling the kernel
25 and application.
26
27 If unsure, say N.
28
29endmenu
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 6b4d026a00a1..21900a9378bb 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -196,6 +196,8 @@ source "sound/Kconfig"
196 196
197source "drivers/usb/Kconfig" 197source "drivers/usb/Kconfig"
198 198
199source "kernel/Kconfig.instrumentation"
200
199source "arch/cris/Kconfig.debug" 201source "arch/cris/Kconfig.debug"
200 202
201source "security/Kconfig" 203source "security/Kconfig"
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig
index 74eef7111f2b..43153e767bb1 100644
--- a/arch/frv/Kconfig
+++ b/arch/frv/Kconfig
@@ -375,6 +375,8 @@ source "drivers/Kconfig"
375 375
376source "fs/Kconfig" 376source "fs/Kconfig"
377 377
378source "kernel/Kconfig.instrumentation"
379
378source "arch/frv/Kconfig.debug" 380source "arch/frv/Kconfig.debug"
379 381
380source "security/Kconfig" 382source "security/Kconfig"
diff --git a/arch/frv/kernel/irq-mb93091.c b/arch/frv/kernel/irq-mb93091.c
index ad753c1e9b8f..9e38f99bbab8 100644
--- a/arch/frv/kernel/irq-mb93091.c
+++ b/arch/frv/kernel/irq-mb93091.c
@@ -17,10 +17,10 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/bitops.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/bitops.h>
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/irc-regs.h> 26#include <asm/irc-regs.h>
diff --git a/arch/frv/kernel/irq-mb93093.c b/arch/frv/kernel/irq-mb93093.c
index e0983f6926ed..3c2752ca9775 100644
--- a/arch/frv/kernel/irq-mb93093.c
+++ b/arch/frv/kernel/irq-mb93093.c
@@ -17,10 +17,10 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/bitops.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/bitops.h>
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/irc-regs.h> 26#include <asm/irc-regs.h>
diff --git a/arch/frv/kernel/irq-mb93493.c b/arch/frv/kernel/irq-mb93493.c
index c157eeff871d..7754c7338e4b 100644
--- a/arch/frv/kernel/irq-mb93493.c
+++ b/arch/frv/kernel/irq-mb93493.c
@@ -17,10 +17,10 @@
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/irq.h> 19#include <linux/irq.h>
20#include <linux/bitops.h>
20 21
21#include <asm/io.h> 22#include <asm/io.h>
22#include <asm/system.h> 23#include <asm/system.h>
23#include <asm/bitops.h>
24#include <asm/delay.h> 24#include <asm/delay.h>
25#include <asm/irq.h> 25#include <asm/irq.h>
26#include <asm/irc-regs.h> 26#include <asm/irc-regs.h>
diff --git a/arch/frv/kernel/irq.c b/arch/frv/kernel/irq.c
index c7e59dcadee4..7ddb69089ed4 100644
--- a/arch/frv/kernel/irq.c
+++ b/arch/frv/kernel/irq.c
@@ -24,12 +24,12 @@
24#include <linux/proc_fs.h> 24#include <linux/proc_fs.h>
25#include <linux/seq_file.h> 25#include <linux/seq_file.h>
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/bitops.h>
27 28
28#include <asm/atomic.h> 29#include <asm/atomic.h>
29#include <asm/io.h> 30#include <asm/io.h>
30#include <asm/smp.h> 31#include <asm/smp.h>
31#include <asm/system.h> 32#include <asm/system.h>
32#include <asm/bitops.h>
33#include <asm/uaccess.h> 33#include <asm/uaccess.h>
34#include <asm/pgalloc.h> 34#include <asm/pgalloc.h>
35#include <asm/delay.h> 35#include <asm/delay.h>
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig
index e35f74e6e505..e2e9f57abe2e 100644
--- a/arch/h8300/Kconfig
+++ b/arch/h8300/Kconfig
@@ -223,6 +223,8 @@ endmenu
223 223
224source "fs/Kconfig" 224source "fs/Kconfig"
225 225
226source "kernel/Kconfig.instrumentation"
227
226source "arch/h8300/Kconfig.debug" 228source "arch/h8300/Kconfig.debug"
227 229
228source "security/Kconfig" 230source "security/Kconfig"
diff --git a/arch/i386/Kconfig b/arch/i386/Kconfig
index b84d5050e92e..d1bedbf9deb8 100644
--- a/arch/i386/Kconfig
+++ b/arch/i386/Kconfig
@@ -1080,7 +1080,9 @@ config APM_REAL_MODE_POWER_OFF
1080 1080
1081endif # APM 1081endif # APM
1082 1082
1083source "arch/x86/kernel/cpu/cpufreq/Kconfig" 1083source "arch/x86/kernel/cpu/cpufreq/Kconfig_32"
1084
1085source "drivers/cpuidle/Kconfig"
1084 1086
1085endmenu 1087endmenu
1086 1088
@@ -1256,31 +1258,6 @@ source "drivers/Kconfig"
1256 1258
1257source "fs/Kconfig" 1259source "fs/Kconfig"
1258 1260
1259menuconfig INSTRUMENTATION
1260 bool "Instrumentation Support"
1261 default y
1262 ---help---
1263 Say Y here to get to see options related to performance measurement,
1264 debugging, and testing. This option alone does not add any kernel code.
1265
1266 If you say N, all options in this submenu will be skipped and disabled.
1267
1268if INSTRUMENTATION
1269
1270source "arch/x86/oprofile/Kconfig"
1271
1272config KPROBES
1273 bool "Kprobes"
1274 depends on KALLSYMS && MODULES
1275 help
1276 Kprobes allows you to trap at almost any kernel address and
1277 execute a callback function. register_kprobe() establishes
1278 a probepoint and specifies the callback. Kprobes is useful
1279 for kernel debugging, non-intrusive instrumentation and testing.
1280 If in doubt, say "N".
1281
1282endif # INSTRUMENTATION
1283
1284source "arch/i386/Kconfig.debug" 1261source "arch/i386/Kconfig.debug"
1285 1262
1286source "security/Kconfig" 1263source "security/Kconfig"
diff --git a/arch/i386/Makefile b/arch/i386/Makefile
index f036d2dee3de..b88e47ca3032 100644
--- a/arch/i386/Makefile
+++ b/arch/i386/Makefile
@@ -102,7 +102,7 @@ core-$(CONFIG_XEN) += arch/x86/xen/
102# default subarch .h files 102# default subarch .h files
103mflags-y += -Iinclude/asm-x86/mach-default 103mflags-y += -Iinclude/asm-x86/mach-default
104 104
105head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task_32.o 105head-y := arch/x86/kernel/head_32.o arch/x86/kernel/init_task.o
106 106
107libs-y += arch/x86/lib/ 107libs-y += arch/x86/lib/
108core-y += arch/x86/kernel/ \ 108core-y += arch/x86/kernel/ \
@@ -131,9 +131,9 @@ all: bzImage
131zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage 131zImage zlilo zdisk: KBUILD_IMAGE := arch/x86/boot/zImage
132 132
133zImage bzImage: vmlinux 133zImage bzImage: vmlinux
134 $(Q)mkdir -p $(objtree)/arch/i386/boot
135 $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
136 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE) 134 $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
135 $(Q)mkdir -p $(objtree)/arch/i386/boot
136 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/i386/boot/bzImage
137 137
138compressed: zImage 138compressed: zImage
139 139
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index c60532d93c54..c89108e9770d 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -592,20 +592,7 @@ config IRQ_PER_CPU
592 592
593source "arch/ia64/hp/sim/Kconfig" 593source "arch/ia64/hp/sim/Kconfig"
594 594
595menu "Instrumentation Support" 595source "kernel/Kconfig.instrumentation"
596
597source "arch/ia64/oprofile/Kconfig"
598
599config KPROBES
600 bool "Kprobes"
601 depends on KALLSYMS && MODULES
602 help
603 Kprobes allows you to trap at almost any kernel address and
604 execute a callback function. register_kprobe() establishes
605 a probepoint and specifies the callback. Kprobes is useful
606 for kernel debugging, non-intrusive instrumentation and testing.
607 If in doubt, say "N".
608endmenu
609 596
610source "arch/ia64/Kconfig.debug" 597source "arch/ia64/Kconfig.debug"
611 598
diff --git a/arch/ia64/configs/sn2_defconfig b/arch/ia64/configs/sn2_defconfig
index 449d3e75bfc2..75fd90dc76a3 100644
--- a/arch/ia64/configs/sn2_defconfig
+++ b/arch/ia64/configs/sn2_defconfig
@@ -26,6 +26,7 @@ CONFIG_TASK_IO_ACCOUNTING=y
26# CONFIG_AUDIT is not set 26# CONFIG_AUDIT is not set
27# CONFIG_IKCONFIG is not set 27# CONFIG_IKCONFIG is not set
28CONFIG_LOG_BUF_SHIFT=20 28CONFIG_LOG_BUF_SHIFT=20
29CONFIG_CGROUPS=y
29CONFIG_CPUSETS=y 30CONFIG_CPUSETS=y
30CONFIG_SYSFS_DEPRECATED=y 31CONFIG_SYSFS_DEPRECATED=y
31CONFIG_RELAY=y 32CONFIG_RELAY=y
diff --git a/arch/ia64/ia32/sys_ia32.c b/arch/ia64/ia32/sys_ia32.c
index a3405b3c1eef..d025a22eb225 100644
--- a/arch/ia64/ia32/sys_ia32.c
+++ b/arch/ia64/ia32/sys_ia32.c
@@ -773,7 +773,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
773 if (flags & MAP_SHARED) 773 if (flags & MAP_SHARED)
774 printk(KERN_INFO 774 printk(KERN_INFO
775 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n", 775 "%s(%d): emulate_mmap() can't share head (addr=0x%lx)\n",
776 current->comm, current->pid, start); 776 current->comm, task_pid_nr(current), start);
777 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags, 777 ret = mmap_subpage(file, start, min(PAGE_ALIGN(start), end), prot, flags,
778 off); 778 off);
779 if (IS_ERR((void *) ret)) 779 if (IS_ERR((void *) ret))
@@ -786,7 +786,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
786 if (flags & MAP_SHARED) 786 if (flags & MAP_SHARED)
787 printk(KERN_INFO 787 printk(KERN_INFO
788 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n", 788 "%s(%d): emulate_mmap() can't share tail (end=0x%lx)\n",
789 current->comm, current->pid, end); 789 current->comm, task_pid_nr(current), end);
790 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags, 790 ret = mmap_subpage(file, max(start, PAGE_START(end)), end, prot, flags,
791 (off + len) - offset_in_page(end)); 791 (off + len) - offset_in_page(end));
792 if (IS_ERR((void *) ret)) 792 if (IS_ERR((void *) ret))
@@ -816,7 +816,7 @@ emulate_mmap (struct file *file, unsigned long start, unsigned long len, int pro
816 816
817 if ((flags & MAP_SHARED) && !is_congruent) 817 if ((flags & MAP_SHARED) && !is_congruent)
818 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap " 818 printk(KERN_INFO "%s(%d): emulate_mmap() can't share contents of incongruent mmap "
819 "(addr=0x%lx,off=0x%llx)\n", current->comm, current->pid, start, off); 819 "(addr=0x%lx,off=0x%llx)\n", current->comm, task_pid_nr(current), start, off);
820 820
821 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend, 821 DBG("mmap_body: mapping [0x%lx-0x%lx) %s with poff 0x%llx\n", pstart, pend,
822 is_congruent ? "congruent" : "not congruent", poff); 822 is_congruent ? "congruent" : "not congruent", poff);
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 73ca86d03810..8e4894b205e2 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -967,7 +967,7 @@ find_memmap_space (void)
967 * to use. We can allocate partial granules only if the unavailable 967 * to use. We can allocate partial granules only if the unavailable
968 * parts exist, and are WB. 968 * parts exist, and are WB.
969 */ 969 */
970void 970unsigned long
971efi_memmap_init(unsigned long *s, unsigned long *e) 971efi_memmap_init(unsigned long *s, unsigned long *e)
972{ 972{
973 struct kern_memdesc *k, *prev = NULL; 973 struct kern_memdesc *k, *prev = NULL;
@@ -1084,6 +1084,8 @@ efi_memmap_init(unsigned long *s, unsigned long *e)
1084 /* reserve the memory we are using for kern_memmap */ 1084 /* reserve the memory we are using for kern_memmap */
1085 *s = (u64)kern_memmap; 1085 *s = (u64)kern_memmap;
1086 *e = (u64)++k; 1086 *e = (u64)++k;
1087
1088 return total_mem;
1087} 1089}
1088 1090
1089void 1091void
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f55fa07849c4..59169bf7145f 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -158,14 +158,14 @@
158 */ 158 */
159#define PROTECT_CTX(c, f) \ 159#define PROTECT_CTX(c, f) \
160 do { \ 160 do { \
161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, current->pid)); \ 161 DPRINT(("spinlock_irq_save ctx %p by [%d]\n", c, task_pid_nr(current))); \
162 spin_lock_irqsave(&(c)->ctx_lock, f); \ 162 spin_lock_irqsave(&(c)->ctx_lock, f); \
163 DPRINT(("spinlocked ctx %p by [%d]\n", c, current->pid)); \ 163 DPRINT(("spinlocked ctx %p by [%d]\n", c, task_pid_nr(current))); \
164 } while(0) 164 } while(0)
165 165
166#define UNPROTECT_CTX(c, f) \ 166#define UNPROTECT_CTX(c, f) \
167 do { \ 167 do { \
168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, current->pid)); \ 168 DPRINT(("spinlock_irq_restore ctx %p by [%d]\n", c, task_pid_nr(current))); \
169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \ 169 spin_unlock_irqrestore(&(c)->ctx_lock, f); \
170 } while(0) 170 } while(0)
171 171
@@ -227,12 +227,12 @@
227#ifdef PFM_DEBUGGING 227#ifdef PFM_DEBUGGING
228#define DPRINT(a) \ 228#define DPRINT(a) \
229 do { \ 229 do { \
230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ 230 if (unlikely(pfm_sysctl.debug >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
231 } while (0) 231 } while (0)
232 232
233#define DPRINT_ovfl(a) \ 233#define DPRINT_ovfl(a) \
234 do { \ 234 do { \
235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), current->pid); printk a; } \ 235 if (unlikely(pfm_sysctl.debug > 0 && pfm_sysctl.debug_ovfl >0)) { printk("%s.%d: CPU%d [%d] ", __FUNCTION__, __LINE__, smp_processor_id(), task_pid_nr(current)); printk a; } \
236 } while (0) 236 } while (0)
237#endif 237#endif
238 238
@@ -913,7 +913,7 @@ pfm_mask_monitoring(struct task_struct *task)
913 unsigned long mask, val, ovfl_mask; 913 unsigned long mask, val, ovfl_mask;
914 int i; 914 int i;
915 915
916 DPRINT_ovfl(("masking monitoring for [%d]\n", task->pid)); 916 DPRINT_ovfl(("masking monitoring for [%d]\n", task_pid_nr(task)));
917 917
918 ovfl_mask = pmu_conf->ovfl_val; 918 ovfl_mask = pmu_conf->ovfl_val;
919 /* 919 /*
@@ -992,12 +992,12 @@ pfm_restore_monitoring(struct task_struct *task)
992 ovfl_mask = pmu_conf->ovfl_val; 992 ovfl_mask = pmu_conf->ovfl_val;
993 993
994 if (task != current) { 994 if (task != current) {
995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task->pid, current->pid); 995 printk(KERN_ERR "perfmon.%d: invalid task[%d] current[%d]\n", __LINE__, task_pid_nr(task), task_pid_nr(current));
996 return; 996 return;
997 } 997 }
998 if (ctx->ctx_state != PFM_CTX_MASKED) { 998 if (ctx->ctx_state != PFM_CTX_MASKED) {
999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__, 999 printk(KERN_ERR "perfmon.%d: task[%d] current[%d] invalid state=%d\n", __LINE__,
1000 task->pid, current->pid, ctx->ctx_state); 1000 task_pid_nr(task), task_pid_nr(current), ctx->ctx_state);
1001 return; 1001 return;
1002 } 1002 }
1003 psr = pfm_get_psr(); 1003 psr = pfm_get_psr();
@@ -1051,7 +1051,8 @@ pfm_restore_monitoring(struct task_struct *task)
1051 if ((mask & 0x1) == 0UL) continue; 1051 if ((mask & 0x1) == 0UL) continue;
1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i]; 1052 ctx->th_pmcs[i] = ctx->ctx_pmcs[i];
1053 ia64_set_pmc(i, ctx->th_pmcs[i]); 1053 ia64_set_pmc(i, ctx->th_pmcs[i]);
1054 DPRINT(("[%d] pmc[%d]=0x%lx\n", task->pid, i, ctx->th_pmcs[i])); 1054 DPRINT(("[%d] pmc[%d]=0x%lx\n",
1055 task_pid_nr(task), i, ctx->th_pmcs[i]));
1055 } 1056 }
1056 ia64_srlz_d(); 1057 ia64_srlz_d();
1057 1058
@@ -1370,7 +1371,7 @@ pfm_reserve_session(struct task_struct *task, int is_syswide, unsigned int cpu)
1370 1371
1371error_conflict: 1372error_conflict:
1372 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n", 1373 DPRINT(("system wide not possible, conflicting session [%d] on CPU%d\n",
1373 pfm_sessions.pfs_sys_session[cpu]->pid, 1374 task_pid_nr(pfm_sessions.pfs_sys_session[cpu]),
1374 cpu)); 1375 cpu));
1375abort: 1376abort:
1376 UNLOCK_PFS(flags); 1377 UNLOCK_PFS(flags);
@@ -1442,7 +1443,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
1442 1443
1443 /* sanity checks */ 1444 /* sanity checks */
1444 if (task->mm == NULL || size == 0UL || vaddr == NULL) { 1445 if (task->mm == NULL || size == 0UL || vaddr == NULL) {
1445 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task->pid, task->mm); 1446 printk(KERN_ERR "perfmon: pfm_remove_smpl_mapping [%d] invalid context mm=%p\n", task_pid_nr(task), task->mm);
1446 return -EINVAL; 1447 return -EINVAL;
1447 } 1448 }
1448 1449
@@ -1459,7 +1460,7 @@ pfm_remove_smpl_mapping(struct task_struct *task, void *vaddr, unsigned long siz
1459 1460
1460 up_write(&task->mm->mmap_sem); 1461 up_write(&task->mm->mmap_sem);
1461 if (r !=0) { 1462 if (r !=0) {
1462 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task->pid, vaddr, size); 1463 printk(KERN_ERR "perfmon: [%d] unable to unmap sampling buffer @%p size=%lu\n", task_pid_nr(task), vaddr, size);
1463 } 1464 }
1464 1465
1465 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r)); 1466 DPRINT(("do_unmap(%p, %lu)=%d\n", vaddr, size, r));
@@ -1501,7 +1502,7 @@ pfm_free_smpl_buffer(pfm_context_t *ctx)
1501 return 0; 1502 return 0;
1502 1503
1503invalid_free: 1504invalid_free:
1504 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", current->pid); 1505 printk(KERN_ERR "perfmon: pfm_free_smpl_buffer [%d] no buffer\n", task_pid_nr(current));
1505 return -EINVAL; 1506 return -EINVAL;
1506} 1507}
1507#endif 1508#endif
@@ -1547,13 +1548,13 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1547 unsigned long flags; 1548 unsigned long flags;
1548 DECLARE_WAITQUEUE(wait, current); 1549 DECLARE_WAITQUEUE(wait, current);
1549 if (PFM_IS_FILE(filp) == 0) { 1550 if (PFM_IS_FILE(filp) == 0) {
1550 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); 1551 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1551 return -EINVAL; 1552 return -EINVAL;
1552 } 1553 }
1553 1554
1554 ctx = (pfm_context_t *)filp->private_data; 1555 ctx = (pfm_context_t *)filp->private_data;
1555 if (ctx == NULL) { 1556 if (ctx == NULL) {
1556 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", current->pid); 1557 printk(KERN_ERR "perfmon: pfm_read: NULL ctx [%d]\n", task_pid_nr(current));
1557 return -EINVAL; 1558 return -EINVAL;
1558 } 1559 }
1559 1560
@@ -1607,7 +1608,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1607 1608
1608 PROTECT_CTX(ctx, flags); 1609 PROTECT_CTX(ctx, flags);
1609 } 1610 }
1610 DPRINT(("[%d] back to running ret=%ld\n", current->pid, ret)); 1611 DPRINT(("[%d] back to running ret=%ld\n", task_pid_nr(current), ret));
1611 set_current_state(TASK_RUNNING); 1612 set_current_state(TASK_RUNNING);
1612 remove_wait_queue(&ctx->ctx_msgq_wait, &wait); 1613 remove_wait_queue(&ctx->ctx_msgq_wait, &wait);
1613 1614
@@ -1616,7 +1617,7 @@ pfm_read(struct file *filp, char __user *buf, size_t size, loff_t *ppos)
1616 ret = -EINVAL; 1617 ret = -EINVAL;
1617 msg = pfm_get_next_msg(ctx); 1618 msg = pfm_get_next_msg(ctx);
1618 if (msg == NULL) { 1619 if (msg == NULL) {
1619 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, current->pid); 1620 printk(KERN_ERR "perfmon: pfm_read no msg for ctx=%p [%d]\n", ctx, task_pid_nr(current));
1620 goto abort_locked; 1621 goto abort_locked;
1621 } 1622 }
1622 1623
@@ -1647,13 +1648,13 @@ pfm_poll(struct file *filp, poll_table * wait)
1647 unsigned int mask = 0; 1648 unsigned int mask = 0;
1648 1649
1649 if (PFM_IS_FILE(filp) == 0) { 1650 if (PFM_IS_FILE(filp) == 0) {
1650 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", current->pid); 1651 printk(KERN_ERR "perfmon: pfm_poll: bad magic [%d]\n", task_pid_nr(current));
1651 return 0; 1652 return 0;
1652 } 1653 }
1653 1654
1654 ctx = (pfm_context_t *)filp->private_data; 1655 ctx = (pfm_context_t *)filp->private_data;
1655 if (ctx == NULL) { 1656 if (ctx == NULL) {
1656 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", current->pid); 1657 printk(KERN_ERR "perfmon: pfm_poll: NULL ctx [%d]\n", task_pid_nr(current));
1657 return 0; 1658 return 0;
1658 } 1659 }
1659 1660
@@ -1692,7 +1693,7 @@ pfm_do_fasync(int fd, struct file *filp, pfm_context_t *ctx, int on)
1692 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue); 1693 ret = fasync_helper (fd, filp, on, &ctx->ctx_async_queue);
1693 1694
1694 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n", 1695 DPRINT(("pfm_fasync called by [%d] on ctx_fd=%d on=%d async_queue=%p ret=%d\n",
1695 current->pid, 1696 task_pid_nr(current),
1696 fd, 1697 fd,
1697 on, 1698 on,
1698 ctx->ctx_async_queue, ret)); 1699 ctx->ctx_async_queue, ret));
@@ -1707,13 +1708,13 @@ pfm_fasync(int fd, struct file *filp, int on)
1707 int ret; 1708 int ret;
1708 1709
1709 if (PFM_IS_FILE(filp) == 0) { 1710 if (PFM_IS_FILE(filp) == 0) {
1710 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", current->pid); 1711 printk(KERN_ERR "perfmon: pfm_fasync bad magic [%d]\n", task_pid_nr(current));
1711 return -EBADF; 1712 return -EBADF;
1712 } 1713 }
1713 1714
1714 ctx = (pfm_context_t *)filp->private_data; 1715 ctx = (pfm_context_t *)filp->private_data;
1715 if (ctx == NULL) { 1716 if (ctx == NULL) {
1716 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", current->pid); 1717 printk(KERN_ERR "perfmon: pfm_fasync NULL ctx [%d]\n", task_pid_nr(current));
1717 return -EBADF; 1718 return -EBADF;
1718 } 1719 }
1719 /* 1720 /*
@@ -1759,7 +1760,7 @@ pfm_syswide_force_stop(void *info)
1759 if (owner != ctx->ctx_task) { 1760 if (owner != ctx->ctx_task) {
1760 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n", 1761 printk(KERN_ERR "perfmon: pfm_syswide_force_stop CPU%d unexpected owner [%d] instead of [%d]\n",
1761 smp_processor_id(), 1762 smp_processor_id(),
1762 owner->pid, ctx->ctx_task->pid); 1763 task_pid_nr(owner), task_pid_nr(ctx->ctx_task));
1763 return; 1764 return;
1764 } 1765 }
1765 if (GET_PMU_CTX() != ctx) { 1766 if (GET_PMU_CTX() != ctx) {
@@ -1769,7 +1770,7 @@ pfm_syswide_force_stop(void *info)
1769 return; 1770 return;
1770 } 1771 }
1771 1772
1772 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), ctx->ctx_task->pid)); 1773 DPRINT(("on CPU%d forcing system wide stop for [%d]\n", smp_processor_id(), task_pid_nr(ctx->ctx_task)));
1773 /* 1774 /*
1774 * the context is already protected in pfm_close(), we simply 1775 * the context is already protected in pfm_close(), we simply
1775 * need to mask interrupts to avoid a PMU interrupt race on 1776 * need to mask interrupts to avoid a PMU interrupt race on
@@ -1821,7 +1822,7 @@ pfm_flush(struct file *filp, fl_owner_t id)
1821 1822
1822 ctx = (pfm_context_t *)filp->private_data; 1823 ctx = (pfm_context_t *)filp->private_data;
1823 if (ctx == NULL) { 1824 if (ctx == NULL) {
1824 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", current->pid); 1825 printk(KERN_ERR "perfmon: pfm_flush: NULL ctx [%d]\n", task_pid_nr(current));
1825 return -EBADF; 1826 return -EBADF;
1826 } 1827 }
1827 1828
@@ -1969,7 +1970,7 @@ pfm_close(struct inode *inode, struct file *filp)
1969 1970
1970 ctx = (pfm_context_t *)filp->private_data; 1971 ctx = (pfm_context_t *)filp->private_data;
1971 if (ctx == NULL) { 1972 if (ctx == NULL) {
1972 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", current->pid); 1973 printk(KERN_ERR "perfmon: pfm_close: NULL ctx [%d]\n", task_pid_nr(current));
1973 return -EBADF; 1974 return -EBADF;
1974 } 1975 }
1975 1976
@@ -2066,7 +2067,7 @@ pfm_close(struct inode *inode, struct file *filp)
2066 */ 2067 */
2067 ctx->ctx_state = PFM_CTX_ZOMBIE; 2068 ctx->ctx_state = PFM_CTX_ZOMBIE;
2068 2069
2069 DPRINT(("zombie ctx for [%d]\n", task->pid)); 2070 DPRINT(("zombie ctx for [%d]\n", task_pid_nr(task)));
2070 /* 2071 /*
2071 * cannot free the context on the spot. deferred until 2072 * cannot free the context on the spot. deferred until
2072 * the task notices the ZOMBIE state 2073 * the task notices the ZOMBIE state
@@ -2472,7 +2473,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
2472 /* invoke and lock buffer format, if found */ 2473 /* invoke and lock buffer format, if found */
2473 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id); 2474 fmt = pfm_find_buffer_fmt(arg->ctx_smpl_buf_id);
2474 if (fmt == NULL) { 2475 if (fmt == NULL) {
2475 DPRINT(("[%d] cannot find buffer format\n", task->pid)); 2476 DPRINT(("[%d] cannot find buffer format\n", task_pid_nr(task)));
2476 return -EINVAL; 2477 return -EINVAL;
2477 } 2478 }
2478 2479
@@ -2483,7 +2484,7 @@ pfm_setup_buffer_fmt(struct task_struct *task, struct file *filp, pfm_context_t
2483 2484
2484 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg); 2485 ret = pfm_buf_fmt_validate(fmt, task, ctx_flags, cpu, fmt_arg);
2485 2486
2486 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task->pid, ctx_flags, cpu, fmt_arg, ret)); 2487 DPRINT(("[%d] after validate(0x%x,%d,%p)=%d\n", task_pid_nr(task), ctx_flags, cpu, fmt_arg, ret));
2487 2488
2488 if (ret) goto error; 2489 if (ret) goto error;
2489 2490
@@ -2605,23 +2606,23 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2605 * no kernel task or task not owner by caller 2606 * no kernel task or task not owner by caller
2606 */ 2607 */
2607 if (task->mm == NULL) { 2608 if (task->mm == NULL) {
2608 DPRINT(("task [%d] has not memory context (kernel thread)\n", task->pid)); 2609 DPRINT(("task [%d] has not memory context (kernel thread)\n", task_pid_nr(task)));
2609 return -EPERM; 2610 return -EPERM;
2610 } 2611 }
2611 if (pfm_bad_permissions(task)) { 2612 if (pfm_bad_permissions(task)) {
2612 DPRINT(("no permission to attach to [%d]\n", task->pid)); 2613 DPRINT(("no permission to attach to [%d]\n", task_pid_nr(task)));
2613 return -EPERM; 2614 return -EPERM;
2614 } 2615 }
2615 /* 2616 /*
2616 * cannot block in self-monitoring mode 2617 * cannot block in self-monitoring mode
2617 */ 2618 */
2618 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) { 2619 if (CTX_OVFL_NOBLOCK(ctx) == 0 && task == current) {
2619 DPRINT(("cannot load a blocking context on self for [%d]\n", task->pid)); 2620 DPRINT(("cannot load a blocking context on self for [%d]\n", task_pid_nr(task)));
2620 return -EINVAL; 2621 return -EINVAL;
2621 } 2622 }
2622 2623
2623 if (task->exit_state == EXIT_ZOMBIE) { 2624 if (task->exit_state == EXIT_ZOMBIE) {
2624 DPRINT(("cannot attach to zombie task [%d]\n", task->pid)); 2625 DPRINT(("cannot attach to zombie task [%d]\n", task_pid_nr(task)));
2625 return -EBUSY; 2626 return -EBUSY;
2626 } 2627 }
2627 2628
@@ -2631,7 +2632,7 @@ pfm_task_incompatible(pfm_context_t *ctx, struct task_struct *task)
2631 if (task == current) return 0; 2632 if (task == current) return 0;
2632 2633
2633 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 2634 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
2634 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task->pid, task->state)); 2635 DPRINT(("cannot attach to non-stopped task [%d] state=%ld\n", task_pid_nr(task), task->state));
2635 return -EBUSY; 2636 return -EBUSY;
2636 } 2637 }
2637 /* 2638 /*
@@ -3512,7 +3513,7 @@ pfm_use_debug_registers(struct task_struct *task)
3512 3513
3513 if (pmu_conf->use_rr_dbregs == 0) return 0; 3514 if (pmu_conf->use_rr_dbregs == 0) return 0;
3514 3515
3515 DPRINT(("called for [%d]\n", task->pid)); 3516 DPRINT(("called for [%d]\n", task_pid_nr(task)));
3516 3517
3517 /* 3518 /*
3518 * do it only once 3519 * do it only once
@@ -3543,7 +3544,7 @@ pfm_use_debug_registers(struct task_struct *task)
3543 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n", 3544 DPRINT(("ptrace_use_dbregs=%u sys_use_dbregs=%u by [%d] ret = %d\n",
3544 pfm_sessions.pfs_ptrace_use_dbregs, 3545 pfm_sessions.pfs_ptrace_use_dbregs,
3545 pfm_sessions.pfs_sys_use_dbregs, 3546 pfm_sessions.pfs_sys_use_dbregs,
3546 task->pid, ret)); 3547 task_pid_nr(task), ret));
3547 3548
3548 UNLOCK_PFS(flags); 3549 UNLOCK_PFS(flags);
3549 3550
@@ -3568,7 +3569,7 @@ pfm_release_debug_registers(struct task_struct *task)
3568 3569
3569 LOCK_PFS(flags); 3570 LOCK_PFS(flags);
3570 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) { 3571 if (pfm_sessions.pfs_ptrace_use_dbregs == 0) {
3571 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task->pid); 3572 printk(KERN_ERR "perfmon: invalid release for [%d] ptrace_use_dbregs=0\n", task_pid_nr(task));
3572 ret = -1; 3573 ret = -1;
3573 } else { 3574 } else {
3574 pfm_sessions.pfs_ptrace_use_dbregs--; 3575 pfm_sessions.pfs_ptrace_use_dbregs--;
@@ -3620,7 +3621,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3620 3621
3621 /* sanity check */ 3622 /* sanity check */
3622 if (unlikely(task == NULL)) { 3623 if (unlikely(task == NULL)) {
3623 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", current->pid); 3624 printk(KERN_ERR "perfmon: [%d] pfm_restart no task\n", task_pid_nr(current));
3624 return -EINVAL; 3625 return -EINVAL;
3625 } 3626 }
3626 3627
@@ -3629,7 +3630,7 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3629 fmt = ctx->ctx_buf_fmt; 3630 fmt = ctx->ctx_buf_fmt;
3630 3631
3631 DPRINT(("restarting self %d ovfl=0x%lx\n", 3632 DPRINT(("restarting self %d ovfl=0x%lx\n",
3632 task->pid, 3633 task_pid_nr(task),
3633 ctx->ctx_ovfl_regs[0])); 3634 ctx->ctx_ovfl_regs[0]));
3634 3635
3635 if (CTX_HAS_SMPL(ctx)) { 3636 if (CTX_HAS_SMPL(ctx)) {
@@ -3653,11 +3654,11 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3653 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET); 3654 pfm_reset_regs(ctx, ctx->ctx_ovfl_regs, PFM_PMD_LONG_RESET);
3654 3655
3655 if (rst_ctrl.bits.mask_monitoring == 0) { 3656 if (rst_ctrl.bits.mask_monitoring == 0) {
3656 DPRINT(("resuming monitoring for [%d]\n", task->pid)); 3657 DPRINT(("resuming monitoring for [%d]\n", task_pid_nr(task)));
3657 3658
3658 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task); 3659 if (state == PFM_CTX_MASKED) pfm_restore_monitoring(task);
3659 } else { 3660 } else {
3660 DPRINT(("keeping monitoring stopped for [%d]\n", task->pid)); 3661 DPRINT(("keeping monitoring stopped for [%d]\n", task_pid_nr(task)));
3661 3662
3662 // cannot use pfm_stop_monitoring(task, regs); 3663 // cannot use pfm_stop_monitoring(task, regs);
3663 } 3664 }
@@ -3714,10 +3715,10 @@ pfm_restart(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
3714 * "self-monitoring". 3715 * "self-monitoring".
3715 */ 3716 */
3716 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) { 3717 if (CTX_OVFL_NOBLOCK(ctx) == 0 && state == PFM_CTX_MASKED) {
3717 DPRINT(("unblocking [%d] \n", task->pid)); 3718 DPRINT(("unblocking [%d] \n", task_pid_nr(task)));
3718 complete(&ctx->ctx_restart_done); 3719 complete(&ctx->ctx_restart_done);
3719 } else { 3720 } else {
3720 DPRINT(("[%d] armed exit trap\n", task->pid)); 3721 DPRINT(("[%d] armed exit trap\n", task_pid_nr(task)));
3721 3722
3722 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET; 3723 ctx->ctx_fl_trap_reason = PFM_TRAP_REASON_RESET;
3723 3724
@@ -3805,7 +3806,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
3805 * don't bother if we are loaded and task is being debugged 3806 * don't bother if we are loaded and task is being debugged
3806 */ 3807 */
3807 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) { 3808 if (is_loaded && (thread->flags & IA64_THREAD_DBG_VALID) != 0) {
3808 DPRINT(("debug registers already in use for [%d]\n", task->pid)); 3809 DPRINT(("debug registers already in use for [%d]\n", task_pid_nr(task)));
3809 return -EBUSY; 3810 return -EBUSY;
3810 } 3811 }
3811 3812
@@ -3846,7 +3847,7 @@ pfm_write_ibr_dbr(int mode, pfm_context_t *ctx, void *arg, int count, struct pt_
3846 * is shared by all processes running on it 3847 * is shared by all processes running on it
3847 */ 3848 */
3848 if (first_time && can_access_pmu) { 3849 if (first_time && can_access_pmu) {
3849 DPRINT(("[%d] clearing ibrs, dbrs\n", task->pid)); 3850 DPRINT(("[%d] clearing ibrs, dbrs\n", task_pid_nr(task)));
3850 for (i=0; i < pmu_conf->num_ibrs; i++) { 3851 for (i=0; i < pmu_conf->num_ibrs; i++) {
3851 ia64_set_ibr(i, 0UL); 3852 ia64_set_ibr(i, 0UL);
3852 ia64_dv_serialize_instruction(); 3853 ia64_dv_serialize_instruction();
@@ -4035,7 +4036,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4035 return -EBUSY; 4036 return -EBUSY;
4036 } 4037 }
4037 DPRINT(("task [%d] ctx_state=%d is_system=%d\n", 4038 DPRINT(("task [%d] ctx_state=%d is_system=%d\n",
4038 PFM_CTX_TASK(ctx)->pid, 4039 task_pid_nr(PFM_CTX_TASK(ctx)),
4039 state, 4040 state,
4040 is_system)); 4041 is_system));
4041 /* 4042 /*
@@ -4093,7 +4094,7 @@ pfm_stop(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4093 * monitoring disabled in kernel at next reschedule 4094 * monitoring disabled in kernel at next reschedule
4094 */ 4095 */
4095 ctx->ctx_saved_psr_up = 0; 4096 ctx->ctx_saved_psr_up = 0;
4096 DPRINT(("task=[%d]\n", task->pid)); 4097 DPRINT(("task=[%d]\n", task_pid_nr(task)));
4097 } 4098 }
4098 return 0; 4099 return 0;
4099} 4100}
@@ -4298,11 +4299,12 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4298 4299
4299 if (is_system) { 4300 if (is_system) {
4300 if (pfm_sessions.pfs_ptrace_use_dbregs) { 4301 if (pfm_sessions.pfs_ptrace_use_dbregs) {
4301 DPRINT(("cannot load [%d] dbregs in use\n", task->pid)); 4302 DPRINT(("cannot load [%d] dbregs in use\n",
4303 task_pid_nr(task)));
4302 ret = -EBUSY; 4304 ret = -EBUSY;
4303 } else { 4305 } else {
4304 pfm_sessions.pfs_sys_use_dbregs++; 4306 pfm_sessions.pfs_sys_use_dbregs++;
4305 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task->pid, pfm_sessions.pfs_sys_use_dbregs)); 4307 DPRINT(("load [%d] increased sys_use_dbreg=%u\n", task_pid_nr(task), pfm_sessions.pfs_sys_use_dbregs));
4306 set_dbregs = 1; 4308 set_dbregs = 1;
4307 } 4309 }
4308 } 4310 }
@@ -4394,7 +4396,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4394 4396
4395 /* allow user level control */ 4397 /* allow user level control */
4396 ia64_psr(regs)->sp = 0; 4398 ia64_psr(regs)->sp = 0;
4397 DPRINT(("clearing psr.sp for [%d]\n", task->pid)); 4399 DPRINT(("clearing psr.sp for [%d]\n", task_pid_nr(task)));
4398 4400
4399 SET_LAST_CPU(ctx, smp_processor_id()); 4401 SET_LAST_CPU(ctx, smp_processor_id());
4400 INC_ACTIVATION(); 4402 INC_ACTIVATION();
@@ -4429,7 +4431,7 @@ pfm_context_load(pfm_context_t *ctx, void *arg, int count, struct pt_regs *regs)
4429 */ 4431 */
4430 SET_PMU_OWNER(task, ctx); 4432 SET_PMU_OWNER(task, ctx);
4431 4433
4432 DPRINT(("context loaded on PMU for [%d]\n", task->pid)); 4434 DPRINT(("context loaded on PMU for [%d]\n", task_pid_nr(task)));
4433 } else { 4435 } else {
4434 /* 4436 /*
4435 * when not current, task MUST be stopped, so this is safe 4437 * when not current, task MUST be stopped, so this is safe
@@ -4493,7 +4495,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4493 int prev_state, is_system; 4495 int prev_state, is_system;
4494 int ret; 4496 int ret;
4495 4497
4496 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task->pid : -1)); 4498 DPRINT(("ctx_state=%d task [%d]\n", ctx->ctx_state, task ? task_pid_nr(task) : -1));
4497 4499
4498 prev_state = ctx->ctx_state; 4500 prev_state = ctx->ctx_state;
4499 is_system = ctx->ctx_fl_system; 4501 is_system = ctx->ctx_fl_system;
@@ -4568,7 +4570,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4568 */ 4570 */
4569 ia64_psr(regs)->sp = 1; 4571 ia64_psr(regs)->sp = 1;
4570 4572
4571 DPRINT(("setting psr.sp for [%d]\n", task->pid)); 4573 DPRINT(("setting psr.sp for [%d]\n", task_pid_nr(task)));
4572 } 4574 }
4573 /* 4575 /*
4574 * save PMDs to context 4576 * save PMDs to context
@@ -4608,7 +4610,7 @@ pfm_context_unload(pfm_context_t *ctx, void *arg, int count, struct pt_regs *reg
4608 ctx->ctx_fl_can_restart = 0; 4610 ctx->ctx_fl_can_restart = 0;
4609 ctx->ctx_fl_going_zombie = 0; 4611 ctx->ctx_fl_going_zombie = 0;
4610 4612
4611 DPRINT(("disconnected [%d] from context\n", task->pid)); 4613 DPRINT(("disconnected [%d] from context\n", task_pid_nr(task)));
4612 4614
4613 return 0; 4615 return 0;
4614} 4616}
@@ -4631,7 +4633,7 @@ pfm_exit_thread(struct task_struct *task)
4631 4633
4632 PROTECT_CTX(ctx, flags); 4634 PROTECT_CTX(ctx, flags);
4633 4635
4634 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task->pid)); 4636 DPRINT(("state=%d task [%d]\n", ctx->ctx_state, task_pid_nr(task)));
4635 4637
4636 state = ctx->ctx_state; 4638 state = ctx->ctx_state;
4637 switch(state) { 4639 switch(state) {
@@ -4640,13 +4642,13 @@ pfm_exit_thread(struct task_struct *task)
4640 * only comes to this function if pfm_context is not NULL, i.e., cannot 4642 * only comes to this function if pfm_context is not NULL, i.e., cannot
4641 * be in unloaded state 4643 * be in unloaded state
4642 */ 4644 */
4643 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task->pid); 4645 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] ctx unloaded\n", task_pid_nr(task));
4644 break; 4646 break;
4645 case PFM_CTX_LOADED: 4647 case PFM_CTX_LOADED:
4646 case PFM_CTX_MASKED: 4648 case PFM_CTX_MASKED:
4647 ret = pfm_context_unload(ctx, NULL, 0, regs); 4649 ret = pfm_context_unload(ctx, NULL, 0, regs);
4648 if (ret) { 4650 if (ret) {
4649 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); 4651 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4650 } 4652 }
4651 DPRINT(("ctx unloaded for current state was %d\n", state)); 4653 DPRINT(("ctx unloaded for current state was %d\n", state));
4652 4654
@@ -4655,12 +4657,12 @@ pfm_exit_thread(struct task_struct *task)
4655 case PFM_CTX_ZOMBIE: 4657 case PFM_CTX_ZOMBIE:
4656 ret = pfm_context_unload(ctx, NULL, 0, regs); 4658 ret = pfm_context_unload(ctx, NULL, 0, regs);
4657 if (ret) { 4659 if (ret) {
4658 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task->pid, state, ret); 4660 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] state=%d unload failed %d\n", task_pid_nr(task), state, ret);
4659 } 4661 }
4660 free_ok = 1; 4662 free_ok = 1;
4661 break; 4663 break;
4662 default: 4664 default:
4663 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task->pid, state); 4665 printk(KERN_ERR "perfmon: pfm_exit_thread [%d] unexpected state=%d\n", task_pid_nr(task), state);
4664 break; 4666 break;
4665 } 4667 }
4666 UNPROTECT_CTX(ctx, flags); 4668 UNPROTECT_CTX(ctx, flags);
@@ -4744,7 +4746,7 @@ recheck:
4744 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n", 4746 DPRINT(("context %d state=%d [%d] task_state=%ld must_stop=%d\n",
4745 ctx->ctx_fd, 4747 ctx->ctx_fd,
4746 state, 4748 state,
4747 task->pid, 4749 task_pid_nr(task),
4748 task->state, PFM_CMD_STOPPED(cmd))); 4750 task->state, PFM_CMD_STOPPED(cmd)));
4749 4751
4750 /* 4752 /*
@@ -4791,7 +4793,7 @@ recheck:
4791 */ 4793 */
4792 if (PFM_CMD_STOPPED(cmd)) { 4794 if (PFM_CMD_STOPPED(cmd)) {
4793 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) { 4795 if ((task->state != TASK_STOPPED) && (task->state != TASK_TRACED)) {
4794 DPRINT(("[%d] task not in stopped state\n", task->pid)); 4796 DPRINT(("[%d] task not in stopped state\n", task_pid_nr(task)));
4795 return -EBUSY; 4797 return -EBUSY;
4796 } 4798 }
4797 /* 4799 /*
@@ -4884,7 +4886,7 @@ restart_args:
4884 * limit abuse to min page size 4886 * limit abuse to min page size
4885 */ 4887 */
4886 if (unlikely(sz > PFM_MAX_ARGSIZE)) { 4888 if (unlikely(sz > PFM_MAX_ARGSIZE)) {
4887 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", current->pid, sz); 4889 printk(KERN_ERR "perfmon: [%d] argument too big %lu\n", task_pid_nr(current), sz);
4888 return -E2BIG; 4890 return -E2BIG;
4889 } 4891 }
4890 4892
@@ -5031,11 +5033,11 @@ pfm_context_force_terminate(pfm_context_t *ctx, struct pt_regs *regs)
5031{ 5033{
5032 int ret; 5034 int ret;
5033 5035
5034 DPRINT(("entering for [%d]\n", current->pid)); 5036 DPRINT(("entering for [%d]\n", task_pid_nr(current)));
5035 5037
5036 ret = pfm_context_unload(ctx, NULL, 0, regs); 5038 ret = pfm_context_unload(ctx, NULL, 0, regs);
5037 if (ret) { 5039 if (ret) {
5038 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", current->pid, ret); 5040 printk(KERN_ERR "pfm_context_force_terminate: [%d] unloaded failed with %d\n", task_pid_nr(current), ret);
5039 } 5041 }
5040 5042
5041 /* 5043 /*
@@ -5072,7 +5074,7 @@ pfm_handle_work(void)
5072 5074
5073 ctx = PFM_GET_CTX(current); 5075 ctx = PFM_GET_CTX(current);
5074 if (ctx == NULL) { 5076 if (ctx == NULL) {
5075 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", current->pid); 5077 printk(KERN_ERR "perfmon: [%d] has no PFM context\n", task_pid_nr(current));
5076 return; 5078 return;
5077 } 5079 }
5078 5080
@@ -5269,7 +5271,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5269 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s " 5271 DPRINT_ovfl(("pmc0=0x%lx pid=%d iip=0x%lx, %s "
5270 "used_pmds=0x%lx\n", 5272 "used_pmds=0x%lx\n",
5271 pmc0, 5273 pmc0,
5272 task ? task->pid: -1, 5274 task ? task_pid_nr(task): -1,
5273 (regs ? regs->cr_iip : 0), 5275 (regs ? regs->cr_iip : 0),
5274 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking", 5276 CTX_OVFL_NOBLOCK(ctx) ? "nonblocking" : "blocking",
5275 ctx->ctx_used_pmds[0])); 5277 ctx->ctx_used_pmds[0]));
@@ -5458,7 +5460,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5458 } 5460 }
5459 5461
5460 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n", 5462 DPRINT_ovfl(("owner [%d] pending=%ld reason=%u ovfl_pmds=0x%lx ovfl_notify=0x%lx masked=%d\n",
5461 GET_PMU_OWNER() ? GET_PMU_OWNER()->pid : -1, 5463 GET_PMU_OWNER() ? task_pid_nr(GET_PMU_OWNER()) : -1,
5462 PFM_GET_WORK_PENDING(task), 5464 PFM_GET_WORK_PENDING(task),
5463 ctx->ctx_fl_trap_reason, 5465 ctx->ctx_fl_trap_reason,
5464 ovfl_pmds, 5466 ovfl_pmds,
@@ -5483,7 +5485,7 @@ pfm_overflow_handler(struct task_struct *task, pfm_context_t *ctx, u64 pmc0, str
5483sanity_check: 5485sanity_check:
5484 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n", 5486 printk(KERN_ERR "perfmon: CPU%d overflow handler [%d] pmc0=0x%lx\n",
5485 smp_processor_id(), 5487 smp_processor_id(),
5486 task ? task->pid : -1, 5488 task ? task_pid_nr(task) : -1,
5487 pmc0); 5489 pmc0);
5488 return; 5490 return;
5489 5491
@@ -5516,7 +5518,7 @@ stop_monitoring:
5516 * 5518 *
5517 * Overall pretty hairy stuff.... 5519 * Overall pretty hairy stuff....
5518 */ 5520 */
5519 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task->pid: -1)); 5521 DPRINT(("ctx is zombie for [%d], converted to spurious\n", task ? task_pid_nr(task): -1));
5520 pfm_clear_psr_up(); 5522 pfm_clear_psr_up();
5521 ia64_psr(regs)->up = 0; 5523 ia64_psr(regs)->up = 0;
5522 ia64_psr(regs)->sp = 1; 5524 ia64_psr(regs)->sp = 1;
@@ -5577,13 +5579,13 @@ pfm_do_interrupt_handler(int irq, void *arg, struct pt_regs *regs)
5577 5579
5578report_spurious1: 5580report_spurious1:
5579 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n", 5581 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d has no PFM context\n",
5580 this_cpu, task->pid); 5582 this_cpu, task_pid_nr(task));
5581 pfm_unfreeze_pmu(); 5583 pfm_unfreeze_pmu();
5582 return -1; 5584 return -1;
5583report_spurious2: 5585report_spurious2:
5584 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n", 5586 printk(KERN_INFO "perfmon: spurious overflow interrupt on CPU%d: process %d, invalid flag\n",
5585 this_cpu, 5587 this_cpu,
5586 task->pid); 5588 task_pid_nr(task));
5587 pfm_unfreeze_pmu(); 5589 pfm_unfreeze_pmu();
5588 return -1; 5590 return -1;
5589} 5591}
@@ -5870,7 +5872,8 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5870 ia64_psr(regs)->sp = 1; 5872 ia64_psr(regs)->sp = 1;
5871 5873
5872 if (GET_PMU_OWNER() == task) { 5874 if (GET_PMU_OWNER() == task) {
5873 DPRINT(("cleared ownership for [%d]\n", ctx->ctx_task->pid)); 5875 DPRINT(("cleared ownership for [%d]\n",
5876 task_pid_nr(ctx->ctx_task)));
5874 SET_PMU_OWNER(NULL, NULL); 5877 SET_PMU_OWNER(NULL, NULL);
5875 } 5878 }
5876 5879
@@ -5882,7 +5885,7 @@ pfm_force_cleanup(pfm_context_t *ctx, struct pt_regs *regs)
5882 task->thread.pfm_context = NULL; 5885 task->thread.pfm_context = NULL;
5883 task->thread.flags &= ~IA64_THREAD_PM_VALID; 5886 task->thread.flags &= ~IA64_THREAD_PM_VALID;
5884 5887
5885 DPRINT(("force cleanup for [%d]\n", task->pid)); 5888 DPRINT(("force cleanup for [%d]\n", task_pid_nr(task)));
5886} 5889}
5887 5890
5888 5891
@@ -6426,7 +6429,7 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6426 6429
6427 if (PMD_IS_COUNTING(i)) { 6430 if (PMD_IS_COUNTING(i)) {
6428 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n", 6431 DPRINT(("[%d] pmd[%d] ctx_pmd=0x%lx hw_pmd=0x%lx\n",
6429 task->pid, 6432 task_pid_nr(task),
6430 i, 6433 i,
6431 ctx->ctx_pmds[i].val, 6434 ctx->ctx_pmds[i].val,
6432 val & ovfl_val)); 6435 val & ovfl_val));
@@ -6448,11 +6451,11 @@ pfm_flush_pmds(struct task_struct *task, pfm_context_t *ctx)
6448 */ 6451 */
6449 if (pmc0 & (1UL << i)) { 6452 if (pmc0 & (1UL << i)) {
6450 val += 1 + ovfl_val; 6453 val += 1 + ovfl_val;
6451 DPRINT(("[%d] pmd[%d] overflowed\n", task->pid, i)); 6454 DPRINT(("[%d] pmd[%d] overflowed\n", task_pid_nr(task), i));
6452 } 6455 }
6453 } 6456 }
6454 6457
6455 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task->pid, i, val, pmd_val)); 6458 DPRINT(("[%d] ctx_pmd[%d]=0x%lx pmd_val=0x%lx\n", task_pid_nr(task), i, val, pmd_val));
6456 6459
6457 if (is_self) ctx->th_pmds[i] = pmd_val; 6460 if (is_self) ctx->th_pmds[i] = pmd_val;
6458 6461
@@ -6793,14 +6796,14 @@ dump_pmu_state(const char *from)
6793 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n", 6796 printk("CPU%d from %s() current [%d] iip=0x%lx %s\n",
6794 this_cpu, 6797 this_cpu,
6795 from, 6798 from,
6796 current->pid, 6799 task_pid_nr(current),
6797 regs->cr_iip, 6800 regs->cr_iip,
6798 current->comm); 6801 current->comm);
6799 6802
6800 task = GET_PMU_OWNER(); 6803 task = GET_PMU_OWNER();
6801 ctx = GET_PMU_CTX(); 6804 ctx = GET_PMU_CTX();
6802 6805
6803 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task->pid : -1, ctx); 6806 printk("->CPU%d owner [%d] ctx=%p\n", this_cpu, task ? task_pid_nr(task) : -1, ctx);
6804 6807
6805 psr = pfm_get_psr(); 6808 psr = pfm_get_psr();
6806 6809
@@ -6848,7 +6851,7 @@ pfm_inherit(struct task_struct *task, struct pt_regs *regs)
6848{ 6851{
6849 struct thread_struct *thread; 6852 struct thread_struct *thread;
6850 6853
6851 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task->pid)); 6854 DPRINT(("perfmon: pfm_inherit clearing state for [%d]\n", task_pid_nr(task)));
6852 6855
6853 thread = &task->thread; 6856 thread = &task->thread;
6854 6857
diff --git a/arch/ia64/kernel/perfmon_default_smpl.c b/arch/ia64/kernel/perfmon_default_smpl.c
index ff80eab83b38..a7af1cb419f9 100644
--- a/arch/ia64/kernel/perfmon_default_smpl.c
+++ b/arch/ia64/kernel/perfmon_default_smpl.c
@@ -44,11 +44,11 @@ default_validate(struct task_struct *task, unsigned int flags, int cpu, void *da
44 int ret = 0; 44 int ret = 0;
45 45
46 if (data == NULL) { 46 if (data == NULL) {
47 DPRINT(("[%d] no argument passed\n", task->pid)); 47 DPRINT(("[%d] no argument passed\n", task_pid_nr(task)));
48 return -EINVAL; 48 return -EINVAL;
49 } 49 }
50 50
51 DPRINT(("[%d] validate flags=0x%x CPU%d\n", task->pid, flags, cpu)); 51 DPRINT(("[%d] validate flags=0x%x CPU%d\n", task_pid_nr(task), flags, cpu));
52 52
53 /* 53 /*
54 * must hold at least the buffer header + one minimally sized entry 54 * must hold at least the buffer header + one minimally sized entry
@@ -88,7 +88,7 @@ default_init(struct task_struct *task, void *buf, unsigned int flags, int cpu, v
88 hdr->hdr_count = 0UL; 88 hdr->hdr_count = 0UL;
89 89
90 DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n", 90 DPRINT(("[%d] buffer=%p buf_size=%lu hdr_size=%lu hdr_version=%u cur_offs=%lu\n",
91 task->pid, 91 task_pid_nr(task),
92 buf, 92 buf,
93 hdr->hdr_buf_size, 93 hdr->hdr_buf_size,
94 sizeof(*hdr), 94 sizeof(*hdr),
@@ -245,7 +245,7 @@ default_restart(struct task_struct *task, pfm_ovfl_ctrl_t *ctrl, void *buf, stru
245static int 245static int
246default_exit(struct task_struct *task, void *buf, struct pt_regs *regs) 246default_exit(struct task_struct *task, void *buf, struct pt_regs *regs)
247{ 247{
248 DPRINT(("[%d] exit(%p)\n", task->pid, buf)); 248 DPRINT(("[%d] exit(%p)\n", task_pid_nr(task), buf));
249 return 0; 249 return 0;
250} 250}
251 251
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index c613fc0e91cc..2418289ee5ca 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -105,7 +105,8 @@ show_regs (struct pt_regs *regs)
105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri; 105 unsigned long ip = regs->cr_iip + ia64_psr(regs)->ri;
106 106
107 print_modules(); 107 print_modules();
108 printk("\nPid: %d, CPU %d, comm: %20s\n", current->pid, smp_processor_id(), current->comm); 108 printk("\nPid: %d, CPU %d, comm: %20s\n", task_pid_nr(current),
109 smp_processor_id(), current->comm);
109 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n", 110 printk("psr : %016lx ifs : %016lx ip : [<%016lx>] %s\n",
110 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted()); 111 regs->cr_ipsr, regs->cr_ifs, ip, print_tainted());
111 print_symbol("ip is at %s\n", ip); 112 print_symbol("ip is at %s\n", ip);
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index c5cfcfa4c87c..cbf67f1aa291 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -208,6 +208,48 @@ static int __init register_memory(void)
208 208
209__initcall(register_memory); 209__initcall(register_memory);
210 210
211
212#ifdef CONFIG_KEXEC
213static void __init setup_crashkernel(unsigned long total, int *n)
214{
215 unsigned long long base = 0, size = 0;
216 int ret;
217
218 ret = parse_crashkernel(boot_command_line, total,
219 &size, &base);
220 if (ret == 0 && size > 0) {
221 if (!base) {
222 sort_regions(rsvd_region, *n);
223 base = kdump_find_rsvd_region(size,
224 rsvd_region, *n);
225 }
226 if (base != ~0UL) {
227 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
228 "for crashkernel (System RAM: %ldMB)\n",
229 (unsigned long)(size >> 20),
230 (unsigned long)(base >> 20),
231 (unsigned long)(total >> 20));
232 rsvd_region[*n].start =
233 (unsigned long)__va(base);
234 rsvd_region[*n].end =
235 (unsigned long)__va(base + size);
236 (*n)++;
237 crashk_res.start = base;
238 crashk_res.end = base + size - 1;
239 }
240 }
241 efi_memmap_res.start = ia64_boot_param->efi_memmap;
242 efi_memmap_res.end = efi_memmap_res.start +
243 ia64_boot_param->efi_memmap_size;
244 boot_param_res.start = __pa(ia64_boot_param);
245 boot_param_res.end = boot_param_res.start +
246 sizeof(*ia64_boot_param);
247}
248#else
249static inline void __init setup_crashkernel(unsigned long total, int *n)
250{}
251#endif
252
211/** 253/**
212 * reserve_memory - setup reserved memory areas 254 * reserve_memory - setup reserved memory areas
213 * 255 *
@@ -219,6 +261,7 @@ void __init
219reserve_memory (void) 261reserve_memory (void)
220{ 262{
221 int n = 0; 263 int n = 0;
264 unsigned long total_memory;
222 265
223 /* 266 /*
224 * none of the entries in this table overlap 267 * none of the entries in this table overlap
@@ -254,50 +297,11 @@ reserve_memory (void)
254 n++; 297 n++;
255#endif 298#endif
256 299
257 efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end); 300 total_memory = efi_memmap_init(&rsvd_region[n].start, &rsvd_region[n].end);
258 n++; 301 n++;
259 302
260#ifdef CONFIG_KEXEC 303 setup_crashkernel(total_memory, &n);
261 /* crashkernel=size@offset specifies the size to reserve for a crash 304
262 * kernel. If offset is 0, then it is determined automatically.
263 * By reserving this memory we guarantee that linux never set's it
264 * up as a DMA target.Useful for holding code to do something
265 * appropriate after a kernel panic.
266 */
267 {
268 char *from = strstr(boot_command_line, "crashkernel=");
269 unsigned long base, size;
270 if (from) {
271 size = memparse(from + 12, &from);
272 if (*from == '@')
273 base = memparse(from+1, &from);
274 else
275 base = 0;
276 if (size) {
277 if (!base) {
278 sort_regions(rsvd_region, n);
279 base = kdump_find_rsvd_region(size,
280 rsvd_region, n);
281 }
282 if (base != ~0UL) {
283 rsvd_region[n].start =
284 (unsigned long)__va(base);
285 rsvd_region[n].end =
286 (unsigned long)__va(base + size);
287 n++;
288 crashk_res.start = base;
289 crashk_res.end = base + size - 1;
290 }
291 }
292 }
293 efi_memmap_res.start = ia64_boot_param->efi_memmap;
294 efi_memmap_res.end = efi_memmap_res.start +
295 ia64_boot_param->efi_memmap_size;
296 boot_param_res.start = __pa(ia64_boot_param);
297 boot_param_res.end = boot_param_res.start +
298 sizeof(*ia64_boot_param);
299 }
300#endif
301 /* end of memory marker */ 305 /* end of memory marker */
302 rsvd_region[n].start = ~0UL; 306 rsvd_region[n].start = ~0UL;
303 rsvd_region[n].end = ~0UL; 307 rsvd_region[n].end = ~0UL;
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index aeec8184e862..cdb64cc4d9c8 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -227,7 +227,7 @@ ia64_rt_sigreturn (struct sigscratch *scr)
227 si.si_signo = SIGSEGV; 227 si.si_signo = SIGSEGV;
228 si.si_errno = 0; 228 si.si_errno = 0;
229 si.si_code = SI_KERNEL; 229 si.si_code = SI_KERNEL;
230 si.si_pid = current->pid; 230 si.si_pid = task_pid_vnr(current);
231 si.si_uid = current->uid; 231 si.si_uid = current->uid;
232 si.si_addr = sc; 232 si.si_addr = sc;
233 force_sig_info(SIGSEGV, &si, current); 233 force_sig_info(SIGSEGV, &si, current);
@@ -332,7 +332,7 @@ force_sigsegv_info (int sig, void __user *addr)
332 si.si_signo = SIGSEGV; 332 si.si_signo = SIGSEGV;
333 si.si_errno = 0; 333 si.si_errno = 0;
334 si.si_code = SI_KERNEL; 334 si.si_code = SI_KERNEL;
335 si.si_pid = current->pid; 335 si.si_pid = task_pid_vnr(current);
336 si.si_uid = current->uid; 336 si.si_uid = current->uid;
337 si.si_addr = addr; 337 si.si_addr = addr;
338 force_sig_info(SIGSEGV, &si, current); 338 force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index 3aeaf15e468b..78d65cb947d2 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -61,7 +61,7 @@ die (const char *str, struct pt_regs *regs, long err)
61 61
62 if (++die.lock_owner_depth < 3) { 62 if (++die.lock_owner_depth < 3) {
63 printk("%s[%d]: %s %ld [%d]\n", 63 printk("%s[%d]: %s %ld [%d]\n",
64 current->comm, current->pid, str, err, ++die_counter); 64 current->comm, task_pid_nr(current), str, err, ++die_counter);
65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV); 65 (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
66 show_regs(regs); 66 show_regs(regs);
67 } else 67 } else
@@ -315,7 +315,7 @@ handle_fpu_swa (int fp_fault, struct pt_regs *regs, unsigned long isr)
315 last.time = current_jiffies + 5 * HZ; 315 last.time = current_jiffies + 5 * HZ;
316 printk(KERN_WARNING 316 printk(KERN_WARNING
317 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n", 317 "%s(%d): floating-point assist fault at ip %016lx, isr %016lx\n",
318 current->comm, current->pid, regs->cr_iip + ia64_psr(regs)->ri, isr); 318 current->comm, task_pid_nr(current), regs->cr_iip + ia64_psr(regs)->ri, isr);
319 } 319 }
320 } 320 }
321 } 321 }
@@ -453,7 +453,7 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
453 if (code == 8) { 453 if (code == 8) {
454# ifdef CONFIG_IA64_PRINT_HAZARDS 454# ifdef CONFIG_IA64_PRINT_HAZARDS
455 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n", 455 printk("%s[%d]: possible hazard @ ip=%016lx (pr = %016lx)\n",
456 current->comm, current->pid, 456 current->comm, task_pid_nr(current),
457 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr); 457 regs.cr_iip + ia64_psr(&regs)->ri, regs.pr);
458# endif 458# endif
459 return; 459 return;
diff --git a/arch/ia64/kernel/unaligned.c b/arch/ia64/kernel/unaligned.c
index fe6aa5a9f8fa..2173de9fe917 100644
--- a/arch/ia64/kernel/unaligned.c
+++ b/arch/ia64/kernel/unaligned.c
@@ -1340,7 +1340,8 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
1340 size_t len; 1340 size_t len;
1341 1341
1342 len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, " 1342 len = sprintf(buf, "%s(%d): unaligned access to 0x%016lx, "
1343 "ip=0x%016lx\n\r", current->comm, current->pid, 1343 "ip=0x%016lx\n\r", current->comm,
1344 task_pid_nr(current),
1344 ifa, regs->cr_iip + ipsr->ri); 1345 ifa, regs->cr_iip + ipsr->ri);
1345 /* 1346 /*
1346 * Don't call tty_write_message() if we're in the kernel; we might 1347 * Don't call tty_write_message() if we're in the kernel; we might
@@ -1363,7 +1364,7 @@ ia64_handle_unaligned (unsigned long ifa, struct pt_regs *regs)
1363 "administrator\n" 1364 "administrator\n"
1364 "echo 0 > /proc/sys/kernel/ignore-" 1365 "echo 0 > /proc/sys/kernel/ignore-"
1365 "unaligned-usertrap to re-enable\n", 1366 "unaligned-usertrap to re-enable\n",
1366 current->comm, current->pid); 1367 current->comm, task_pid_nr(current));
1367 } 1368 }
1368 } 1369 }
1369 } else { 1370 } else {
diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
index 32f26253c4e8..7571076a16a1 100644
--- a/arch/ia64/mm/fault.c
+++ b/arch/ia64/mm/fault.c
@@ -274,7 +274,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
274 274
275 out_of_memory: 275 out_of_memory:
276 up_read(&mm->mmap_sem); 276 up_read(&mm->mmap_sem);
277 if (is_init(current)) { 277 if (is_global_init(current)) {
278 yield(); 278 yield();
279 down_read(&mm->mmap_sem); 279 down_read(&mm->mmap_sem);
280 goto survive; 280 goto survive;
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 3e10152abbf0..c6c19bf11bec 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -127,8 +127,8 @@ ia64_init_addr_space (void)
127 vma->vm_mm = current->mm; 127 vma->vm_mm = current->mm;
128 vma->vm_start = current->thread.rbs_bot & PAGE_MASK; 128 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
129 vma->vm_end = vma->vm_start + PAGE_SIZE; 129 vma->vm_end = vma->vm_start + PAGE_SIZE;
130 vma->vm_page_prot = protection_map[VM_DATA_DEFAULT_FLAGS & 0x7];
131 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; 130 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
131 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
132 down_write(&current->mm->mmap_sem); 132 down_write(&current->mm->mmap_sem);
133 if (insert_vm_struct(current->mm, vma)) { 133 if (insert_vm_struct(current->mm, vma)) {
134 up_write(&current->mm->mmap_sem); 134 up_write(&current->mm->mmap_sem);
diff --git a/arch/ia64/oprofile/Kconfig b/arch/ia64/oprofile/Kconfig
deleted file mode 100644
index 97271ab484dc..000000000000
--- a/arch/ia64/oprofile/Kconfig
+++ /dev/null
@@ -1,20 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7config OPROFILE
8 tristate "OProfile system profiling (EXPERIMENTAL)"
9 depends on PROFILING
10 help
11 OProfile is a profiling system capable of profiling the
12 whole system, include the kernel, kernel modules, libraries,
13 and applications.
14
15 Due to firmware bugs, you may need to use the "nohalt" boot
16 option if you're using OProfile with the hardware performance
17 counters.
18
19 If unsure, say N.
20
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig
index bd5fe76401f1..ab9a264cb194 100644
--- a/arch/m32r/Kconfig
+++ b/arch/m32r/Kconfig
@@ -426,7 +426,7 @@ source "drivers/Kconfig"
426 426
427source "fs/Kconfig" 427source "fs/Kconfig"
428 428
429source "arch/m32r/oprofile/Kconfig" 429source "kernel/Kconfig.instrumentation"
430 430
431source "arch/m32r/Kconfig.debug" 431source "arch/m32r/Kconfig.debug"
432 432
diff --git a/arch/m32r/kernel/traps.c b/arch/m32r/kernel/traps.c
index 97e0b1c0830e..89ba4a0b5d51 100644
--- a/arch/m32r/kernel/traps.c
+++ b/arch/m32r/kernel/traps.c
@@ -196,7 +196,7 @@ static void show_registers(struct pt_regs *regs)
196 printk("SPI: %08lx\n", sp); 196 printk("SPI: %08lx\n", sp);
197 } 197 }
198 printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)", 198 printk("Process %s (pid: %d, process nr: %d, stackpage=%08lx)",
199 current->comm, current->pid, 0xffff & i, 4096+(unsigned long)current); 199 current->comm, task_pid_nr(current), 0xffff & i, 4096+(unsigned long)current);
200 200
201 /* 201 /*
202 * When in-kernel, we also print out the stack and code at the 202 * When in-kernel, we also print out the stack and code at the
diff --git a/arch/m32r/mm/fault.c b/arch/m32r/mm/fault.c
index 70a766aad3e0..4a71df4c1b30 100644
--- a/arch/m32r/mm/fault.c
+++ b/arch/m32r/mm/fault.c
@@ -271,7 +271,7 @@ no_context:
271 */ 271 */
272out_of_memory: 272out_of_memory:
273 up_read(&mm->mmap_sem); 273 up_read(&mm->mmap_sem);
274 if (is_init(tsk)) { 274 if (is_global_init(tsk)) {
275 yield(); 275 yield();
276 down_read(&mm->mmap_sem); 276 down_read(&mm->mmap_sem);
277 goto survive; 277 goto survive;
diff --git a/arch/m32r/oprofile/Kconfig b/arch/m32r/oprofile/Kconfig
deleted file mode 100644
index 19d37730b664..000000000000
--- a/arch/m32r/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index 20a9c08e59c3..01dee84f840a 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -683,6 +683,8 @@ endmenu
683 683
684source "fs/Kconfig" 684source "fs/Kconfig"
685 685
686source "kernel/Kconfig.instrumentation"
687
686source "arch/m68k/Kconfig.debug" 688source "arch/m68k/Kconfig.debug"
687 689
688source "security/Kconfig" 690source "security/Kconfig"
diff --git a/arch/m68k/kernel/traps.c b/arch/m68k/kernel/traps.c
index 4e2752a0e89b..97f556fa4932 100644
--- a/arch/m68k/kernel/traps.c
+++ b/arch/m68k/kernel/traps.c
@@ -900,7 +900,7 @@ void show_registers(struct pt_regs *regs)
900 regs->d4, regs->d5, regs->a0, regs->a1); 900 regs->d4, regs->d5, regs->a0, regs->a1);
901 901
902 printk("Process %s (pid: %d, task=%p)\n", 902 printk("Process %s (pid: %d, task=%p)\n",
903 current->comm, current->pid, current); 903 current->comm, task_pid_nr(current), current);
904 addr = (unsigned long)&fp->un; 904 addr = (unsigned long)&fp->un;
905 printk("Frame format=%X ", regs->format); 905 printk("Frame format=%X ", regs->format);
906 switch (regs->format) { 906 switch (regs->format) {
@@ -1038,7 +1038,7 @@ void bad_super_trap (struct frame *fp)
1038 fp->un.fmtb.daddr, space_names[ssw & DFC], 1038 fp->un.fmtb.daddr, space_names[ssw & DFC],
1039 fp->ptregs.pc); 1039 fp->ptregs.pc);
1040 } 1040 }
1041 printk ("Current process id is %d\n", current->pid); 1041 printk ("Current process id is %d\n", task_pid_nr(current));
1042 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0); 1042 die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
1043} 1043}
1044 1044
diff --git a/arch/m68k/mm/fault.c b/arch/m68k/mm/fault.c
index eaa618681159..f493f03231d5 100644
--- a/arch/m68k/mm/fault.c
+++ b/arch/m68k/mm/fault.c
@@ -180,7 +180,7 @@ good_area:
180 */ 180 */
181out_of_memory: 181out_of_memory:
182 up_read(&mm->mmap_sem); 182 up_read(&mm->mmap_sem);
183 if (is_init(current)) { 183 if (is_global_init(current)) {
184 yield(); 184 yield();
185 down_read(&mm->mmap_sem); 185 down_read(&mm->mmap_sem);
186 goto survive; 186 goto survive;
diff --git a/arch/m68knommu/Kconfig b/arch/m68knommu/Kconfig
index 185906b54cb0..f52c627bdadd 100644
--- a/arch/m68knommu/Kconfig
+++ b/arch/m68knommu/Kconfig
@@ -696,6 +696,8 @@ source "drivers/Kconfig"
696 696
697source "fs/Kconfig" 697source "fs/Kconfig"
698 698
699source "kernel/Kconfig.instrumentation"
700
699source "arch/m68knommu/Kconfig.debug" 701source "arch/m68knommu/Kconfig.debug"
700 702
701source "security/Kconfig" 703source "security/Kconfig"
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cb027580cd1d..4dc142d394a3 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2005,7 +2005,7 @@ source "drivers/Kconfig"
2005 2005
2006source "fs/Kconfig" 2006source "fs/Kconfig"
2007 2007
2008source "arch/mips/oprofile/Kconfig" 2008source "kernel/Kconfig.instrumentation"
2009 2009
2010source "arch/mips/Kconfig.debug" 2010source "arch/mips/Kconfig.debug"
2011 2011
diff --git a/arch/mips/au1000/pb1200/irqmap.c b/arch/mips/au1000/pb1200/irqmap.c
index 5f48b0603796..bdf00e2a35e4 100644
--- a/arch/mips/au1000/pb1200/irqmap.c
+++ b/arch/mips/au1000/pb1200/irqmap.c
@@ -36,8 +36,8 @@
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/random.h> 37#include <linux/random.h>
38#include <linux/delay.h> 38#include <linux/delay.h>
39#include <linux/bitops.h>
39 40
40#include <asm/bitops.h>
41#include <asm/bootinfo.h> 41#include <asm/bootinfo.h>
42#include <asm/io.h> 42#include <asm/io.h>
43#include <asm/mipsregs.h> 43#include <asm/mipsregs.h>
diff --git a/arch/mips/basler/excite/excite_irq.c b/arch/mips/basler/excite/excite_irq.c
index 1ecab6350421..4903e067916b 100644
--- a/arch/mips/basler/excite/excite_irq.c
+++ b/arch/mips/basler/excite/excite_irq.c
@@ -29,7 +29,7 @@
29#include <linux/timex.h> 29#include <linux/timex.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/random.h> 31#include <linux/random.h>
32#include <asm/bitops.h> 32#include <linux/bitops.h>
33#include <asm/bootinfo.h> 33#include <asm/bootinfo.h>
34#include <asm/io.h> 34#include <asm/io.h>
35#include <asm/irq.h> 35#include <asm/irq.h>
diff --git a/arch/mips/bcm47xx/time.c b/arch/mips/bcm47xx/time.c
index 0ab4676c8bd3..0c6f47b3fd94 100644
--- a/arch/mips/bcm47xx/time.c
+++ b/arch/mips/bcm47xx/time.c
@@ -46,10 +46,3 @@ void __init plat_time_init(void)
46 /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */ 46 /* Set MIPS counter frequency for fixed_rate_gettimeoffset() */
47 mips_hpt_frequency = hz; 47 mips_hpt_frequency = hz;
48} 48}
49
50void __init
51plat_timer_setup(struct irqaction *irq)
52{
53 /* Enable the timer interrupt */
54 setup_irq(7, irq);
55}
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 49bcc58929ba..892d4c38fd0d 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -175,6 +175,7 @@ CONFIG_POSIX_MQUEUE=y
175CONFIG_IKCONFIG=y 175CONFIG_IKCONFIG=y
176CONFIG_IKCONFIG_PROC=y 176CONFIG_IKCONFIG_PROC=y
177CONFIG_LOG_BUF_SHIFT=15 177CONFIG_LOG_BUF_SHIFT=15
178CONFIG_CGROUPS=y
178CONFIG_CPUSETS=y 179CONFIG_CPUSETS=y
179CONFIG_SYSFS_DEPRECATED=y 180CONFIG_SYSFS_DEPRECATED=y
180CONFIG_RELAY=y 181CONFIG_RELAY=y
diff --git a/arch/mips/configs/mipssim_defconfig b/arch/mips/configs/mipssim_defconfig
index 86dcb7464353..61b72f5a953e 100644
--- a/arch/mips/configs/mipssim_defconfig
+++ b/arch/mips/configs/mipssim_defconfig
@@ -1,71 +1,68 @@
1# 1#
2# Automatically generated make config: don't edit 2# Automatically generated make config: don't edit
3# Linux kernel version: 2.6.20 3# Linux kernel version: 2.6.23
4# Tue Feb 20 21:47:35 2007 4# Thu Oct 18 22:45:52 2007
5# 5#
6CONFIG_MIPS=y 6CONFIG_MIPS=y
7 7
8# 8#
9# Machine selection 9# Machine selection
10# 10#
11CONFIG_ZONE_DMA=y 11# CONFIG_MACH_ALCHEMY is not set
12# CONFIG_MIPS_MTX1 is not set
13# CONFIG_MIPS_BOSPORUS is not set
14# CONFIG_MIPS_PB1000 is not set
15# CONFIG_MIPS_PB1100 is not set
16# CONFIG_MIPS_PB1500 is not set
17# CONFIG_MIPS_PB1550 is not set
18# CONFIG_MIPS_PB1200 is not set
19# CONFIG_MIPS_DB1000 is not set
20# CONFIG_MIPS_DB1100 is not set
21# CONFIG_MIPS_DB1500 is not set
22# CONFIG_MIPS_DB1550 is not set
23# CONFIG_MIPS_DB1200 is not set
24# CONFIG_MIPS_MIRAGE is not set
25# CONFIG_BASLER_EXCITE is not set 12# CONFIG_BASLER_EXCITE is not set
13# CONFIG_BCM47XX is not set
26# CONFIG_MIPS_COBALT is not set 14# CONFIG_MIPS_COBALT is not set
27# CONFIG_MACH_DECSTATION is not set 15# CONFIG_MACH_DECSTATION is not set
28# CONFIG_MACH_JAZZ is not set 16# CONFIG_MACH_JAZZ is not set
17# CONFIG_LASAT is not set
18# CONFIG_LEMOTE_FULONG is not set
29# CONFIG_MIPS_ATLAS is not set 19# CONFIG_MIPS_ATLAS is not set
30# CONFIG_MIPS_MALTA is not set 20# CONFIG_MIPS_MALTA is not set
31# CONFIG_MIPS_SEAD is not set 21# CONFIG_MIPS_SEAD is not set
32# CONFIG_WR_PPMC is not set
33CONFIG_MIPS_SIM=y 22CONFIG_MIPS_SIM=y
34# CONFIG_MOMENCO_JAGUAR_ATX is not set 23# CONFIG_MARKEINS is not set
35# CONFIG_MIPS_XXS1500 is not set 24# CONFIG_MACH_VR41XX is not set
36# CONFIG_PNX8550_JBS is not set 25# CONFIG_PNX8550_JBS is not set
37# CONFIG_PNX8550_STB810 is not set 26# CONFIG_PNX8550_STB810 is not set
38# CONFIG_MACH_VR41XX is not set 27# CONFIG_PMC_MSP is not set
39# CONFIG_PMC_YOSEMITE is not set 28# CONFIG_PMC_YOSEMITE is not set
40# CONFIG_QEMU is not set 29# CONFIG_QEMU is not set
41# CONFIG_MARKEINS is not set
42# CONFIG_SGI_IP22 is not set 30# CONFIG_SGI_IP22 is not set
43# CONFIG_SGI_IP27 is not set 31# CONFIG_SGI_IP27 is not set
44# CONFIG_SGI_IP32 is not set 32# CONFIG_SGI_IP32 is not set
45# CONFIG_SIBYTE_BIGSUR is not set 33# CONFIG_SIBYTE_CRHINE is not set
34# CONFIG_SIBYTE_CARMEL is not set
35# CONFIG_SIBYTE_CRHONE is not set
36# CONFIG_SIBYTE_RHONE is not set
46# CONFIG_SIBYTE_SWARM is not set 37# CONFIG_SIBYTE_SWARM is not set
38# CONFIG_SIBYTE_LITTLESUR is not set
47# CONFIG_SIBYTE_SENTOSA is not set 39# CONFIG_SIBYTE_SENTOSA is not set
48# CONFIG_SIBYTE_RHONE is not set
49# CONFIG_SIBYTE_CARMEL is not set
50# CONFIG_SIBYTE_PTSWARM is not set 40# CONFIG_SIBYTE_PTSWARM is not set
51# CONFIG_SIBYTE_LITTLESUR is not set 41# CONFIG_SIBYTE_BIGSUR is not set
52# CONFIG_SIBYTE_CRHINE is not set
53# CONFIG_SIBYTE_CRHONE is not set
54# CONFIG_SNI_RM is not set 42# CONFIG_SNI_RM is not set
55# CONFIG_TOSHIBA_JMR3927 is not set 43# CONFIG_TOSHIBA_JMR3927 is not set
56# CONFIG_TOSHIBA_RBTX4927 is not set 44# CONFIG_TOSHIBA_RBTX4927 is not set
57# CONFIG_TOSHIBA_RBTX4938 is not set 45# CONFIG_TOSHIBA_RBTX4938 is not set
46# CONFIG_WR_PPMC is not set
58CONFIG_RWSEM_GENERIC_SPINLOCK=y 47CONFIG_RWSEM_GENERIC_SPINLOCK=y
59# CONFIG_ARCH_HAS_ILOG2_U32 is not set 48# CONFIG_ARCH_HAS_ILOG2_U32 is not set
60# CONFIG_ARCH_HAS_ILOG2_U64 is not set 49# CONFIG_ARCH_HAS_ILOG2_U64 is not set
61CONFIG_GENERIC_FIND_NEXT_BIT=y 50CONFIG_GENERIC_FIND_NEXT_BIT=y
62CONFIG_GENERIC_HWEIGHT=y 51CONFIG_GENERIC_HWEIGHT=y
63CONFIG_GENERIC_CALIBRATE_DELAY=y 52CONFIG_GENERIC_CALIBRATE_DELAY=y
53CONFIG_GENERIC_CLOCKEVENTS=y
64CONFIG_GENERIC_TIME=y 54CONFIG_GENERIC_TIME=y
55CONFIG_GENERIC_CMOS_UPDATE=y
65CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y 56CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
66# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set 57# CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ is not set
58CONFIG_BOOT_RAW=y
59CONFIG_CEVT_R4K=y
67CONFIG_DMA_NONCOHERENT=y 60CONFIG_DMA_NONCOHERENT=y
68CONFIG_DMA_NEED_PCI_MAP_STATE=y 61CONFIG_DMA_NEED_PCI_MAP_STATE=y
62CONFIG_EARLY_PRINTK=y
63CONFIG_SYS_HAS_EARLY_PRINTK=y
64# CONFIG_HOTPLUG_CPU is not set
65# CONFIG_NO_IOPORT is not set
69# CONFIG_CPU_BIG_ENDIAN is not set 66# CONFIG_CPU_BIG_ENDIAN is not set
70CONFIG_CPU_LITTLE_ENDIAN=y 67CONFIG_CPU_LITTLE_ENDIAN=y
71CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y 68CONFIG_SYS_SUPPORTS_BIG_ENDIAN=y
@@ -76,6 +73,11 @@ CONFIG_MIPS_L1_CACHE_SHIFT=5
76# 73#
77# CPU selection 74# CPU selection
78# 75#
76# CONFIG_TICK_ONESHOT is not set
77# CONFIG_NO_HZ is not set
78# CONFIG_HIGH_RES_TIMERS is not set
79CONFIG_GENERIC_CLOCKEVENTS_BUILD=y
80# CONFIG_CPU_LOONGSON2 is not set
79CONFIG_CPU_MIPS32_R1=y 81CONFIG_CPU_MIPS32_R1=y
80# CONFIG_CPU_MIPS32_R2 is not set 82# CONFIG_CPU_MIPS32_R2 is not set
81# CONFIG_CPU_MIPS64_R1 is not set 83# CONFIG_CPU_MIPS64_R1 is not set
@@ -115,8 +117,8 @@ CONFIG_CPU_HAS_PREFETCH=y
115CONFIG_MIPS_MT_DISABLED=y 117CONFIG_MIPS_MT_DISABLED=y
116# CONFIG_MIPS_MT_SMP is not set 118# CONFIG_MIPS_MT_SMP is not set
117# CONFIG_MIPS_MT_SMTC is not set 119# CONFIG_MIPS_MT_SMTC is not set
120CONFIG_SYS_SUPPORTS_MULTITHREADING=y
118# CONFIG_MIPS_VPE_LOADER is not set 121# CONFIG_MIPS_VPE_LOADER is not set
119# CONFIG_64BIT_PHYS_ADDR is not set
120CONFIG_CPU_HAS_LLSC=y 122CONFIG_CPU_HAS_LLSC=y
121CONFIG_CPU_HAS_SYNC=y 123CONFIG_CPU_HAS_SYNC=y
122CONFIG_GENERIC_HARDIRQS=y 124CONFIG_GENERIC_HARDIRQS=y
@@ -130,50 +132,52 @@ CONFIG_FLATMEM_MANUAL=y
130CONFIG_FLATMEM=y 132CONFIG_FLATMEM=y
131CONFIG_FLAT_NODE_MEM_MAP=y 133CONFIG_FLAT_NODE_MEM_MAP=y
132# CONFIG_SPARSEMEM_STATIC is not set 134# CONFIG_SPARSEMEM_STATIC is not set
135# CONFIG_SPARSEMEM_VMEMMAP_ENABLE is not set
133CONFIG_SPLIT_PTLOCK_CPUS=4 136CONFIG_SPLIT_PTLOCK_CPUS=4
134# CONFIG_RESOURCES_64BIT is not set 137# CONFIG_RESOURCES_64BIT is not set
135CONFIG_ZONE_DMA_FLAG=1 138CONFIG_ZONE_DMA_FLAG=0
139CONFIG_VIRT_TO_BUS=y
136# CONFIG_HZ_48 is not set 140# CONFIG_HZ_48 is not set
137# CONFIG_HZ_100 is not set 141CONFIG_HZ_100=y
138# CONFIG_HZ_128 is not set 142# CONFIG_HZ_128 is not set
139# CONFIG_HZ_250 is not set 143# CONFIG_HZ_250 is not set
140# CONFIG_HZ_256 is not set 144# CONFIG_HZ_256 is not set
141CONFIG_HZ_1000=y 145# CONFIG_HZ_1000 is not set
142# CONFIG_HZ_1024 is not set 146# CONFIG_HZ_1024 is not set
143CONFIG_SYS_SUPPORTS_ARBIT_HZ=y 147CONFIG_SYS_SUPPORTS_ARBIT_HZ=y
144CONFIG_HZ=1000 148CONFIG_HZ=100
145CONFIG_PREEMPT_NONE=y 149CONFIG_PREEMPT_NONE=y
146# CONFIG_PREEMPT_VOLUNTARY is not set 150# CONFIG_PREEMPT_VOLUNTARY is not set
147# CONFIG_PREEMPT is not set 151# CONFIG_PREEMPT is not set
148# CONFIG_KEXEC is not set 152# CONFIG_KEXEC is not set
153# CONFIG_SECCOMP is not set
149CONFIG_LOCKDEP_SUPPORT=y 154CONFIG_LOCKDEP_SUPPORT=y
150CONFIG_STACKTRACE_SUPPORT=y 155CONFIG_STACKTRACE_SUPPORT=y
151CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" 156CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
152 157
153# 158#
154# Code maturity level options 159# General setup
155# 160#
156CONFIG_EXPERIMENTAL=y 161CONFIG_EXPERIMENTAL=y
157CONFIG_BROKEN_ON_SMP=y 162CONFIG_BROKEN_ON_SMP=y
158CONFIG_INIT_ENV_ARG_LIMIT=32 163CONFIG_INIT_ENV_ARG_LIMIT=32
159
160#
161# General setup
162#
163CONFIG_LOCALVERSION="" 164CONFIG_LOCALVERSION=""
164CONFIG_LOCALVERSION_AUTO=y 165CONFIG_LOCALVERSION_AUTO=y
165CONFIG_SWAP=y 166# CONFIG_SWAP is not set
166CONFIG_SYSVIPC=y 167CONFIG_SYSVIPC=y
167# CONFIG_IPC_NS is not set
168CONFIG_SYSVIPC_SYSCTL=y 168CONFIG_SYSVIPC_SYSCTL=y
169# CONFIG_POSIX_MQUEUE is not set 169# CONFIG_POSIX_MQUEUE is not set
170# CONFIG_BSD_PROCESS_ACCT is not set 170# CONFIG_BSD_PROCESS_ACCT is not set
171# CONFIG_TASKSTATS is not set 171# CONFIG_TASKSTATS is not set
172# CONFIG_UTS_NS is not set 172# CONFIG_USER_NS is not set
173# CONFIG_AUDIT is not set 173# CONFIG_AUDIT is not set
174# CONFIG_IKCONFIG is not set 174# CONFIG_IKCONFIG is not set
175CONFIG_LOG_BUF_SHIFT=14
176CONFIG_FAIR_GROUP_SCHED=y
177CONFIG_FAIR_USER_SCHED=y
175CONFIG_SYSFS_DEPRECATED=y 178CONFIG_SYSFS_DEPRECATED=y
176# CONFIG_RELAY is not set 179# CONFIG_RELAY is not set
180# CONFIG_BLK_DEV_INITRD is not set
177# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set 181# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
178CONFIG_SYSCTL=y 182CONFIG_SYSCTL=y
179CONFIG_EMBEDDED=y 183CONFIG_EMBEDDED=y
@@ -187,31 +191,29 @@ CONFIG_BUG=y
187CONFIG_ELF_CORE=y 191CONFIG_ELF_CORE=y
188CONFIG_BASE_FULL=y 192CONFIG_BASE_FULL=y
189CONFIG_FUTEX=y 193CONFIG_FUTEX=y
194CONFIG_ANON_INODES=y
190CONFIG_EPOLL=y 195CONFIG_EPOLL=y
196CONFIG_SIGNALFD=y
197CONFIG_EVENTFD=y
191CONFIG_SHMEM=y 198CONFIG_SHMEM=y
192CONFIG_SLAB=y
193CONFIG_VM_EVENT_COUNTERS=y 199CONFIG_VM_EVENT_COUNTERS=y
200CONFIG_SLAB=y
201# CONFIG_SLUB is not set
202# CONFIG_SLOB is not set
194CONFIG_RT_MUTEXES=y 203CONFIG_RT_MUTEXES=y
195# CONFIG_TINY_SHMEM is not set 204# CONFIG_TINY_SHMEM is not set
196CONFIG_BASE_SMALL=0 205CONFIG_BASE_SMALL=0
197# CONFIG_SLOB is not set
198
199#
200# Loadable module support
201#
202CONFIG_MODULES=y 206CONFIG_MODULES=y
203CONFIG_MODULE_UNLOAD=y 207CONFIG_MODULE_UNLOAD=y
204# CONFIG_MODULE_FORCE_UNLOAD is not set 208# CONFIG_MODULE_FORCE_UNLOAD is not set
205CONFIG_MODVERSIONS=y 209CONFIG_MODVERSIONS=y
206CONFIG_MODULE_SRCVERSION_ALL=y 210CONFIG_MODULE_SRCVERSION_ALL=y
207CONFIG_KMOD=y 211CONFIG_KMOD=y
208
209#
210# Block layer
211#
212CONFIG_BLOCK=y 212CONFIG_BLOCK=y
213# CONFIG_LBD is not set 213# CONFIG_LBD is not set
214# CONFIG_BLK_DEV_IO_TRACE is not set
214# CONFIG_LSF is not set 215# CONFIG_LSF is not set
216# CONFIG_BLK_DEV_BSG is not set
215 217
216# 218#
217# IO Schedulers 219# IO Schedulers
@@ -229,18 +231,11 @@ CONFIG_DEFAULT_IOSCHED="anticipatory"
229# 231#
230# Bus options (PCI, PCMCIA, EISA, ISA, TC) 232# Bus options (PCI, PCMCIA, EISA, ISA, TC)
231# 233#
234# CONFIG_ARCH_SUPPORTS_MSI is not set
232CONFIG_MMU=y 235CONFIG_MMU=y
233
234#
235# PCCARD (PCMCIA/CardBus) support
236#
237# CONFIG_PCCARD is not set 236# CONFIG_PCCARD is not set
238 237
239# 238#
240# PCI Hotplug Support
241#
242
243#
244# Executable file formats 239# Executable file formats
245# 240#
246CONFIG_BINFMT_ELF=y 241CONFIG_BINFMT_ELF=y
@@ -250,9 +245,8 @@ CONFIG_TRAD_SIGNALS=y
250# 245#
251# Power management options 246# Power management options
252# 247#
253CONFIG_PM=y 248# CONFIG_PM is not set
254# CONFIG_PM_LEGACY is not set 249CONFIG_SUSPEND_UP_POSSIBLE=y
255# CONFIG_PM_DEBUG is not set
256 250
257# 251#
258# Networking 252# Networking
@@ -262,75 +256,50 @@ CONFIG_NET=y
262# 256#
263# Networking options 257# Networking options
264# 258#
265# CONFIG_NETDEBUG is not set
266CONFIG_PACKET=y 259CONFIG_PACKET=y
267CONFIG_PACKET_MMAP=y 260CONFIG_PACKET_MMAP=y
268CONFIG_UNIX=y 261CONFIG_UNIX=y
269CONFIG_XFRM=y 262# CONFIG_NET_KEY is not set
270# CONFIG_XFRM_USER is not set
271# CONFIG_XFRM_SUB_POLICY is not set
272CONFIG_XFRM_MIGRATE=y
273CONFIG_NET_KEY=y
274CONFIG_NET_KEY_MIGRATE=y
275CONFIG_INET=y 263CONFIG_INET=y
276CONFIG_IP_MULTICAST=y 264CONFIG_IP_MULTICAST=y
277CONFIG_IP_ADVANCED_ROUTER=y 265CONFIG_IP_ADVANCED_ROUTER=y
278CONFIG_ASK_IP_FIB_HASH=y 266CONFIG_ASK_IP_FIB_HASH=y
279# CONFIG_IP_FIB_TRIE is not set 267# CONFIG_IP_FIB_TRIE is not set
280CONFIG_IP_FIB_HASH=y 268CONFIG_IP_FIB_HASH=y
281CONFIG_IP_MULTIPLE_TABLES=y 269# CONFIG_IP_MULTIPLE_TABLES is not set
282CONFIG_IP_ROUTE_MULTIPATH=y 270# CONFIG_IP_ROUTE_MULTIPATH is not set
283# CONFIG_IP_ROUTE_MULTIPATH_CACHED is not set 271# CONFIG_IP_ROUTE_VERBOSE is not set
284CONFIG_IP_ROUTE_VERBOSE=y
285CONFIG_IP_PNP=y 272CONFIG_IP_PNP=y
286CONFIG_IP_PNP_DHCP=y 273CONFIG_IP_PNP_DHCP=y
287CONFIG_IP_PNP_BOOTP=y 274CONFIG_IP_PNP_BOOTP=y
288# CONFIG_IP_PNP_RARP is not set 275# CONFIG_IP_PNP_RARP is not set
289# CONFIG_NET_IPIP is not set 276# CONFIG_NET_IPIP is not set
290# CONFIG_NET_IPGRE is not set 277# CONFIG_NET_IPGRE is not set
291CONFIG_IP_MROUTE=y 278# CONFIG_IP_MROUTE is not set
292CONFIG_IP_PIMSM_V1=y
293CONFIG_IP_PIMSM_V2=y
294# CONFIG_ARPD is not set 279# CONFIG_ARPD is not set
295CONFIG_SYN_COOKIES=y 280# CONFIG_SYN_COOKIES is not set
296# CONFIG_INET_AH is not set 281# CONFIG_INET_AH is not set
297# CONFIG_INET_ESP is not set 282# CONFIG_INET_ESP is not set
298# CONFIG_INET_IPCOMP is not set 283# CONFIG_INET_IPCOMP is not set
299# CONFIG_INET_XFRM_TUNNEL is not set 284# CONFIG_INET_XFRM_TUNNEL is not set
300# CONFIG_INET_TUNNEL is not set 285# CONFIG_INET_TUNNEL is not set
301CONFIG_INET_XFRM_MODE_TRANSPORT=m 286# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
302CONFIG_INET_XFRM_MODE_TUNNEL=m 287# CONFIG_INET_XFRM_MODE_TUNNEL is not set
303CONFIG_INET_XFRM_MODE_BEET=m 288# CONFIG_INET_XFRM_MODE_BEET is not set
289# CONFIG_INET_LRO is not set
304CONFIG_INET_DIAG=y 290CONFIG_INET_DIAG=y
305CONFIG_INET_TCP_DIAG=y 291CONFIG_INET_TCP_DIAG=y
306# CONFIG_TCP_CONG_ADVANCED is not set 292# CONFIG_TCP_CONG_ADVANCED is not set
307CONFIG_TCP_CONG_CUBIC=y 293CONFIG_TCP_CONG_CUBIC=y
308CONFIG_DEFAULT_TCP_CONG="cubic" 294CONFIG_DEFAULT_TCP_CONG="cubic"
309CONFIG_TCP_MD5SIG=y 295# CONFIG_TCP_MD5SIG is not set
310# CONFIG_IPV6 is not set 296# CONFIG_IPV6 is not set
311# CONFIG_INET6_XFRM_TUNNEL is not set 297# CONFIG_INET6_XFRM_TUNNEL is not set
312# CONFIG_INET6_TUNNEL is not set 298# CONFIG_INET6_TUNNEL is not set
313CONFIG_NETWORK_SECMARK=y 299# CONFIG_NETWORK_SECMARK is not set
314# CONFIG_NETFILTER is not set 300# CONFIG_NETFILTER is not set
315
316#
317# DCCP Configuration (EXPERIMENTAL)
318#
319# CONFIG_IP_DCCP is not set 301# CONFIG_IP_DCCP is not set
320 302# CONFIG_IP_SCTP is not set
321#
322# SCTP Configuration (EXPERIMENTAL)
323#
324CONFIG_IP_SCTP=m
325# CONFIG_SCTP_DBG_MSG is not set
326# CONFIG_SCTP_DBG_OBJCNT is not set
327# CONFIG_SCTP_HMAC_NONE is not set
328# CONFIG_SCTP_HMAC_SHA1 is not set
329CONFIG_SCTP_HMAC_MD5=y
330
331#
332# TIPC Configuration (EXPERIMENTAL)
333#
334# CONFIG_TIPC is not set 303# CONFIG_TIPC is not set
335# CONFIG_ATM is not set 304# CONFIG_ATM is not set
336# CONFIG_BRIDGE is not set 305# CONFIG_BRIDGE is not set
@@ -347,44 +316,7 @@ CONFIG_SCTP_HMAC_MD5=y
347# 316#
348# QoS and/or fair queueing 317# QoS and/or fair queueing
349# 318#
350CONFIG_NET_SCHED=y 319# CONFIG_NET_SCHED is not set
351CONFIG_NET_SCH_FIFO=y
352CONFIG_NET_SCH_CLK_JIFFIES=y
353# CONFIG_NET_SCH_CLK_GETTIMEOFDAY is not set
354# CONFIG_NET_SCH_CLK_CPU is not set
355
356#
357# Queueing/Scheduling
358#
359CONFIG_NET_SCH_CBQ=m
360CONFIG_NET_SCH_HTB=m
361CONFIG_NET_SCH_HFSC=m
362CONFIG_NET_SCH_PRIO=m
363CONFIG_NET_SCH_RED=m
364CONFIG_NET_SCH_SFQ=m
365CONFIG_NET_SCH_TEQL=m
366CONFIG_NET_SCH_TBF=m
367CONFIG_NET_SCH_GRED=m
368CONFIG_NET_SCH_DSMARK=m
369CONFIG_NET_SCH_NETEM=m
370CONFIG_NET_SCH_INGRESS=m
371
372#
373# Classification
374#
375CONFIG_NET_CLS=y
376CONFIG_NET_CLS_BASIC=m
377CONFIG_NET_CLS_TCINDEX=m
378CONFIG_NET_CLS_ROUTE4=m
379CONFIG_NET_CLS_ROUTE=y
380# CONFIG_NET_CLS_FW is not set
381# CONFIG_NET_CLS_U32 is not set
382# CONFIG_NET_CLS_RSVP is not set
383# CONFIG_NET_CLS_RSVP6 is not set
384# CONFIG_NET_EMATCH is not set
385# CONFIG_NET_CLS_ACT is not set
386# CONFIG_NET_CLS_POLICE is not set
387CONFIG_NET_ESTIMATOR=y
388 320
389# 321#
390# Network testing 322# Network testing
@@ -393,8 +325,17 @@ CONFIG_NET_ESTIMATOR=y
393# CONFIG_HAMRADIO is not set 325# CONFIG_HAMRADIO is not set
394# CONFIG_IRDA is not set 326# CONFIG_IRDA is not set
395# CONFIG_BT is not set 327# CONFIG_BT is not set
328# CONFIG_AF_RXRPC is not set
329
330#
331# Wireless
332#
333# CONFIG_CFG80211 is not set
334# CONFIG_WIRELESS_EXT is not set
335# CONFIG_MAC80211 is not set
396# CONFIG_IEEE80211 is not set 336# CONFIG_IEEE80211 is not set
397CONFIG_FIB_RULES=y 337# CONFIG_RFKILL is not set
338# CONFIG_NET_9P is not set
398 339
399# 340#
400# Device Drivers 341# Device Drivers
@@ -403,52 +344,25 @@ CONFIG_FIB_RULES=y
403# 344#
404# Generic Driver Options 345# Generic Driver Options
405# 346#
347CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
406# CONFIG_STANDALONE is not set 348# CONFIG_STANDALONE is not set
407# CONFIG_PREVENT_FIRMWARE_BUILD is not set 349# CONFIG_PREVENT_FIRMWARE_BUILD is not set
408# CONFIG_FW_LOADER is not set 350# CONFIG_FW_LOADER is not set
409# CONFIG_DEBUG_DRIVER is not set 351# CONFIG_DEBUG_DRIVER is not set
410# CONFIG_DEBUG_DEVRES is not set 352# CONFIG_DEBUG_DEVRES is not set
411# CONFIG_SYS_HYPERVISOR is not set 353# CONFIG_SYS_HYPERVISOR is not set
412
413#
414# Connector - unified userspace <-> kernelspace linker
415#
416# CONFIG_CONNECTOR is not set 354# CONFIG_CONNECTOR is not set
417
418#
419# Memory Technology Devices (MTD)
420#
421# CONFIG_MTD is not set 355# CONFIG_MTD is not set
422
423#
424# Parallel port support
425#
426# CONFIG_PARPORT is not set 356# CONFIG_PARPORT is not set
427 357CONFIG_BLK_DEV=y
428#
429# Plug and Play support
430#
431# CONFIG_PNPACPI is not set
432
433#
434# Block devices
435#
436# CONFIG_BLK_DEV_COW_COMMON is not set 358# CONFIG_BLK_DEV_COW_COMMON is not set
437CONFIG_BLK_DEV_LOOP=y 359CONFIG_BLK_DEV_LOOP=y
438# CONFIG_BLK_DEV_CRYPTOLOOP is not set 360# CONFIG_BLK_DEV_CRYPTOLOOP is not set
439CONFIG_BLK_DEV_NBD=y 361CONFIG_BLK_DEV_NBD=y
440# CONFIG_BLK_DEV_RAM is not set 362# CONFIG_BLK_DEV_RAM is not set
441# CONFIG_BLK_DEV_INITRD is not set
442# CONFIG_CDROM_PKTCDVD is not set 363# CONFIG_CDROM_PKTCDVD is not set
443# CONFIG_ATA_OVER_ETH is not set 364# CONFIG_ATA_OVER_ETH is not set
444 365# CONFIG_MISC_DEVICES is not set
445#
446# Misc devices
447#
448
449#
450# ATA/ATAPI/MFM/RLL support
451#
452# CONFIG_IDE is not set 366# CONFIG_IDE is not set
453 367
454# 368#
@@ -456,48 +370,29 @@ CONFIG_BLK_DEV_NBD=y
456# 370#
457# CONFIG_RAID_ATTRS is not set 371# CONFIG_RAID_ATTRS is not set
458# CONFIG_SCSI is not set 372# CONFIG_SCSI is not set
373# CONFIG_SCSI_DMA is not set
459# CONFIG_SCSI_NETLINK is not set 374# CONFIG_SCSI_NETLINK is not set
460
461#
462# Serial ATA (prod) and Parallel ATA (experimental) drivers
463#
464# CONFIG_ATA is not set 375# CONFIG_ATA is not set
465
466#
467# Multi-device support (RAID and LVM)
468#
469# CONFIG_MD is not set 376# CONFIG_MD is not set
470
471#
472# Fusion MPT device support
473#
474# CONFIG_FUSION is not set
475
476#
477# IEEE 1394 (FireWire) support
478#
479
480#
481# I2O device support
482#
483
484#
485# Network device support
486#
487CONFIG_NETDEVICES=y 377CONFIG_NETDEVICES=y
378# CONFIG_NETDEVICES_MULTIQUEUE is not set
488# CONFIG_DUMMY is not set 379# CONFIG_DUMMY is not set
489# CONFIG_BONDING is not set 380# CONFIG_BONDING is not set
381# CONFIG_MACVLAN is not set
490# CONFIG_EQUALIZER is not set 382# CONFIG_EQUALIZER is not set
491# CONFIG_TUN is not set 383# CONFIG_TUN is not set
384# CONFIG_VETH is not set
492# CONFIG_PHYLIB is not set 385# CONFIG_PHYLIB is not set
493
494#
495# Ethernet (10 or 100Mbit)
496#
497CONFIG_NET_ETHERNET=y 386CONFIG_NET_ETHERNET=y
498# CONFIG_MII is not set 387# CONFIG_MII is not set
388# CONFIG_AX88796 is not set
499CONFIG_MIPS_SIM_NET=y 389CONFIG_MIPS_SIM_NET=y
500# CONFIG_DM9000 is not set 390# CONFIG_DM9000 is not set
391# CONFIG_IBM_NEW_EMAC_ZMII is not set
392# CONFIG_IBM_NEW_EMAC_RGMII is not set
393# CONFIG_IBM_NEW_EMAC_TAH is not set
394# CONFIG_IBM_NEW_EMAC_EMAC4 is not set
395# CONFIG_B44 is not set
501# CONFIG_NETDEV_1000 is not set 396# CONFIG_NETDEV_1000 is not set
502# CONFIG_NETDEV_10000 is not set 397# CONFIG_NETDEV_10000 is not set
503 398
@@ -513,49 +408,18 @@ CONFIG_MIPS_SIM_NET=y
513# CONFIG_NETCONSOLE is not set 408# CONFIG_NETCONSOLE is not set
514# CONFIG_NETPOLL is not set 409# CONFIG_NETPOLL is not set
515# CONFIG_NET_POLL_CONTROLLER is not set 410# CONFIG_NET_POLL_CONTROLLER is not set
516
517#
518# ISDN subsystem
519#
520# CONFIG_ISDN is not set 411# CONFIG_ISDN is not set
521
522#
523# Telephony Support
524#
525# CONFIG_PHONE is not set 412# CONFIG_PHONE is not set
526 413
527# 414#
528# Input device support 415# Input device support
529# 416#
530CONFIG_INPUT=y 417# CONFIG_INPUT is not set
531# CONFIG_INPUT_FF_MEMLESS is not set
532
533#
534# Userland interfaces
535#
536# CONFIG_INPUT_MOUSEDEV is not set
537# CONFIG_INPUT_JOYDEV is not set
538# CONFIG_INPUT_TSDEV is not set
539# CONFIG_INPUT_EVDEV is not set
540# CONFIG_INPUT_EVBUG is not set
541
542#
543# Input Device Drivers
544#
545# CONFIG_INPUT_KEYBOARD is not set
546# CONFIG_INPUT_MOUSE is not set
547# CONFIG_INPUT_JOYSTICK is not set
548# CONFIG_INPUT_TOUCHSCREEN is not set
549# CONFIG_INPUT_MISC is not set
550 418
551# 419#
552# Hardware I/O ports 420# Hardware I/O ports
553# 421#
554CONFIG_SERIO=y 422# CONFIG_SERIO is not set
555# CONFIG_SERIO_I8042 is not set
556CONFIG_SERIO_SERPORT=y
557# CONFIG_SERIO_LIBPS2 is not set
558# CONFIG_SERIO_RAW is not set
559# CONFIG_GAMEPORT is not set 423# CONFIG_GAMEPORT is not set
560 424
561# 425#
@@ -581,31 +445,13 @@ CONFIG_SERIAL_CORE_CONSOLE=y
581CONFIG_UNIX98_PTYS=y 445CONFIG_UNIX98_PTYS=y
582CONFIG_LEGACY_PTYS=y 446CONFIG_LEGACY_PTYS=y
583CONFIG_LEGACY_PTY_COUNT=256 447CONFIG_LEGACY_PTY_COUNT=256
584
585#
586# IPMI
587#
588# CONFIG_IPMI_HANDLER is not set 448# CONFIG_IPMI_HANDLER is not set
589
590#
591# Watchdog Cards
592#
593# CONFIG_WATCHDOG is not set 449# CONFIG_WATCHDOG is not set
594# CONFIG_HW_RANDOM is not set 450# CONFIG_HW_RANDOM is not set
595# CONFIG_RTC is not set 451# CONFIG_RTC is not set
596# CONFIG_GEN_RTC is not set
597# CONFIG_DTLK is not set
598# CONFIG_R3964 is not set 452# CONFIG_R3964 is not set
599# CONFIG_RAW_DRIVER is not set 453# CONFIG_RAW_DRIVER is not set
600
601#
602# TPM devices
603#
604# CONFIG_TCG_TPM is not set 454# CONFIG_TCG_TPM is not set
605
606#
607# I2C support
608#
609# CONFIG_I2C is not set 455# CONFIG_I2C is not set
610 456
611# 457#
@@ -613,118 +459,60 @@ CONFIG_LEGACY_PTY_COUNT=256
613# 459#
614# CONFIG_SPI is not set 460# CONFIG_SPI is not set
615# CONFIG_SPI_MASTER is not set 461# CONFIG_SPI_MASTER is not set
462# CONFIG_W1 is not set
463# CONFIG_POWER_SUPPLY is not set
464# CONFIG_HWMON is not set
616 465
617# 466#
618# Dallas's 1-wire bus 467# Sonics Silicon Backplane
619# 468#
620# CONFIG_W1 is not set 469CONFIG_SSB_POSSIBLE=y
470# CONFIG_SSB is not set
621 471
622# 472#
623# Hardware Monitoring support 473# Multifunction device drivers
624# 474#
625# CONFIG_HWMON is not set 475# CONFIG_MFD_SM501 is not set
626# CONFIG_HWMON_VID is not set
627 476
628# 477#
629# Multimedia devices 478# Multimedia devices
630# 479#
631# CONFIG_VIDEO_DEV is not set 480# CONFIG_VIDEO_DEV is not set
632 481# CONFIG_DVB_CORE is not set
633# 482# CONFIG_DAB is not set
634# Digital Video Broadcasting Devices
635#
636# CONFIG_DVB is not set
637 483
638# 484#
639# Graphics support 485# Graphics support
640# 486#
641# CONFIG_FIRMWARE_EDID is not set 487# CONFIG_VGASTATE is not set
488# CONFIG_VIDEO_OUTPUT_CONTROL is not set
642# CONFIG_FB is not set 489# CONFIG_FB is not set
490# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
643 491
644# 492#
645# Sound 493# Display device support
646# 494#
647# CONFIG_SOUND is not set 495# CONFIG_DISPLAY_SUPPORT is not set
648 496
649# 497#
650# HID Devices 498# Sound
651#
652# CONFIG_HID is not set
653
654#
655# USB support
656#
657# CONFIG_USB_ARCH_HAS_HCD is not set
658# CONFIG_USB_ARCH_HAS_OHCI is not set
659# CONFIG_USB_ARCH_HAS_EHCI is not set
660
661#
662# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
663#
664
665#
666# USB Gadget Support
667#
668# CONFIG_USB_GADGET is not set
669
670#
671# MMC/SD Card support
672# 499#
500# CONFIG_SOUND is not set
501# CONFIG_USB_SUPPORT is not set
673# CONFIG_MMC is not set 502# CONFIG_MMC is not set
674
675#
676# LED devices
677#
678# CONFIG_NEW_LEDS is not set 503# CONFIG_NEW_LEDS is not set
679 504CONFIG_RTC_LIB=y
680#
681# LED drivers
682#
683
684#
685# LED Triggers
686#
687
688#
689# InfiniBand support
690#
691
692#
693# EDAC - error detection and reporting (RAS) (EXPERIMENTAL)
694#
695
696#
697# Real Time Clock
698#
699# CONFIG_RTC_CLASS is not set 505# CONFIG_RTC_CLASS is not set
700 506
701# 507#
702# DMA Engine support 508# Userspace I/O
703#
704# CONFIG_DMA_ENGINE is not set
705
706#
707# DMA Clients
708#
709
710#
711# DMA Devices
712#
713
714#
715# Auxiliary Display support
716#
717
718#
719# Virtualization
720# 509#
510# CONFIG_UIO is not set
721 511
722# 512#
723# File systems 513# File systems
724# 514#
725CONFIG_EXT2_FS=y 515# CONFIG_EXT2_FS is not set
726# CONFIG_EXT2_FS_XATTR is not set
727# CONFIG_EXT2_FS_XIP is not set
728# CONFIG_EXT3_FS is not set 516# CONFIG_EXT3_FS is not set
729# CONFIG_EXT4DEV_FS is not set 517# CONFIG_EXT4DEV_FS is not set
730# CONFIG_REISERFS_FS is not set 518# CONFIG_REISERFS_FS is not set
@@ -732,6 +520,7 @@ CONFIG_EXT2_FS=y
732# CONFIG_FS_POSIX_ACL is not set 520# CONFIG_FS_POSIX_ACL is not set
733# CONFIG_XFS_FS is not set 521# CONFIG_XFS_FS is not set
734# CONFIG_GFS2_FS is not set 522# CONFIG_GFS2_FS is not set
523# CONFIG_OCFS2_FS is not set
735# CONFIG_MINIX_FS is not set 524# CONFIG_MINIX_FS is not set
736CONFIG_ROMFS_FS=y 525CONFIG_ROMFS_FS=y
737# CONFIG_INOTIFY is not set 526# CONFIG_INOTIFY is not set
@@ -760,10 +549,11 @@ CONFIG_ROMFS_FS=y
760CONFIG_PROC_FS=y 549CONFIG_PROC_FS=y
761# CONFIG_PROC_KCORE is not set 550# CONFIG_PROC_KCORE is not set
762CONFIG_PROC_SYSCTL=y 551CONFIG_PROC_SYSCTL=y
763# CONFIG_SYSFS is not set 552CONFIG_SYSFS=y
764# CONFIG_TMPFS is not set 553CONFIG_TMPFS=y
554# CONFIG_TMPFS_POSIX_ACL is not set
765# CONFIG_HUGETLB_PAGE is not set 555# CONFIG_HUGETLB_PAGE is not set
766CONFIG_RAMFS=y 556# CONFIG_CONFIGFS_FS is not set
767 557
768# 558#
769# Miscellaneous filesystems 559# Miscellaneous filesystems
@@ -781,10 +571,7 @@ CONFIG_RAMFS=y
781# CONFIG_QNX4FS_FS is not set 571# CONFIG_QNX4FS_FS is not set
782# CONFIG_SYSV_FS is not set 572# CONFIG_SYSV_FS is not set
783# CONFIG_UFS_FS is not set 573# CONFIG_UFS_FS is not set
784 574CONFIG_NETWORK_FILESYSTEMS=y
785#
786# Network File Systems
787#
788CONFIG_NFS_FS=y 575CONFIG_NFS_FS=y
789CONFIG_NFS_V3=y 576CONFIG_NFS_V3=y
790# CONFIG_NFS_V3_ACL is not set 577# CONFIG_NFS_V3_ACL is not set
@@ -796,6 +583,7 @@ CONFIG_LOCKD=y
796CONFIG_LOCKD_V4=y 583CONFIG_LOCKD_V4=y
797CONFIG_NFS_COMMON=y 584CONFIG_NFS_COMMON=y
798CONFIG_SUNRPC=y 585CONFIG_SUNRPC=y
586# CONFIG_SUNRPC_BIND34 is not set
799# CONFIG_RPCSEC_GSS_KRB5 is not set 587# CONFIG_RPCSEC_GSS_KRB5 is not set
800# CONFIG_RPCSEC_GSS_SPKM3 is not set 588# CONFIG_RPCSEC_GSS_SPKM3 is not set
801# CONFIG_SMB_FS is not set 589# CONFIG_SMB_FS is not set
@@ -803,22 +591,14 @@ CONFIG_SUNRPC=y
803# CONFIG_NCP_FS is not set 591# CONFIG_NCP_FS is not set
804# CONFIG_CODA_FS is not set 592# CONFIG_CODA_FS is not set
805# CONFIG_AFS_FS is not set 593# CONFIG_AFS_FS is not set
806# CONFIG_9P_FS is not set
807 594
808# 595#
809# Partition Types 596# Partition Types
810# 597#
811# CONFIG_PARTITION_ADVANCED is not set 598# CONFIG_PARTITION_ADVANCED is not set
812CONFIG_MSDOS_PARTITION=y 599CONFIG_MSDOS_PARTITION=y
813
814#
815# Native Language Support
816#
817# CONFIG_NLS is not set 600# CONFIG_NLS is not set
818 601# CONFIG_DLM is not set
819#
820# Distributed Lock Manager
821#
822 602
823# 603#
824# Profiling support 604# Profiling support
@@ -833,20 +613,22 @@ CONFIG_TRACE_IRQFLAGS_SUPPORT=y
833CONFIG_ENABLE_MUST_CHECK=y 613CONFIG_ENABLE_MUST_CHECK=y
834# CONFIG_MAGIC_SYSRQ is not set 614# CONFIG_MAGIC_SYSRQ is not set
835# CONFIG_UNUSED_SYMBOLS is not set 615# CONFIG_UNUSED_SYMBOLS is not set
616# CONFIG_DEBUG_FS is not set
836# CONFIG_HEADERS_CHECK is not set 617# CONFIG_HEADERS_CHECK is not set
837CONFIG_DEBUG_KERNEL=y 618CONFIG_DEBUG_KERNEL=y
838# CONFIG_DEBUG_SHIRQ is not set 619# CONFIG_DEBUG_SHIRQ is not set
839CONFIG_LOG_BUF_SHIFT=14
840# CONFIG_DETECT_SOFTLOCKUP is not set 620# CONFIG_DETECT_SOFTLOCKUP is not set
621# CONFIG_SCHED_DEBUG is not set
841# CONFIG_SCHEDSTATS is not set 622# CONFIG_SCHEDSTATS is not set
842# CONFIG_TIMER_STATS is not set 623# CONFIG_TIMER_STATS is not set
843# CONFIG_DEBUG_SLAB is not set 624# CONFIG_DEBUG_SLAB is not set
844# CONFIG_DEBUG_RT_MUTEXES is not set 625# CONFIG_DEBUG_RT_MUTEXES is not set
845# CONFIG_RT_MUTEX_TESTER is not set 626# CONFIG_RT_MUTEX_TESTER is not set
846# CONFIG_DEBUG_SPINLOCK is not set 627# CONFIG_DEBUG_SPINLOCK is not set
847CONFIG_DEBUG_MUTEXES=y 628# CONFIG_DEBUG_MUTEXES is not set
848# CONFIG_DEBUG_LOCK_ALLOC is not set 629# CONFIG_DEBUG_LOCK_ALLOC is not set
849# CONFIG_PROVE_LOCKING is not set 630# CONFIG_PROVE_LOCKING is not set
631# CONFIG_LOCK_STAT is not set
850# CONFIG_DEBUG_SPINLOCK_SLEEP is not set 632# CONFIG_DEBUG_SPINLOCK_SLEEP is not set
851# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set 633# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set
852# CONFIG_DEBUG_KOBJECT is not set 634# CONFIG_DEBUG_KOBJECT is not set
@@ -854,7 +636,9 @@ CONFIG_DEBUG_INFO=y
854# CONFIG_DEBUG_VM is not set 636# CONFIG_DEBUG_VM is not set
855# CONFIG_DEBUG_LIST is not set 637# CONFIG_DEBUG_LIST is not set
856CONFIG_FORCED_INLINING=y 638CONFIG_FORCED_INLINING=y
639# CONFIG_BOOT_PRINTK_DELAY is not set
857# CONFIG_RCU_TORTURE_TEST is not set 640# CONFIG_RCU_TORTURE_TEST is not set
641# CONFIG_FAULT_INJECTION is not set
858CONFIG_CROSSCOMPILE=y 642CONFIG_CROSSCOMPILE=y
859CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp" 643CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp"
860# CONFIG_DEBUG_STACK_USAGE is not set 644# CONFIG_DEBUG_STACK_USAGE is not set
@@ -865,60 +649,20 @@ CONFIG_CMDLINE="nfsroot=192.168.192.169:/u1/mipsel,timeo=20 ip=dhcp"
865# Security options 649# Security options
866# 650#
867# CONFIG_KEYS is not set 651# CONFIG_KEYS is not set
868 652# CONFIG_SECURITY is not set
869# 653# CONFIG_SECURITY_FILE_CAPABILITIES is not set
870# Cryptographic options 654# CONFIG_CRYPTO is not set
871#
872CONFIG_CRYPTO=y
873CONFIG_CRYPTO_ALGAPI=y
874CONFIG_CRYPTO_BLKCIPHER=m
875CONFIG_CRYPTO_HASH=y
876CONFIG_CRYPTO_MANAGER=y
877CONFIG_CRYPTO_HMAC=y
878CONFIG_CRYPTO_XCBC=m
879# CONFIG_CRYPTO_NULL is not set
880# CONFIG_CRYPTO_MD4 is not set
881CONFIG_CRYPTO_MD5=y
882# CONFIG_CRYPTO_SHA1 is not set
883# CONFIG_CRYPTO_SHA256 is not set
884# CONFIG_CRYPTO_SHA512 is not set
885# CONFIG_CRYPTO_WP512 is not set
886# CONFIG_CRYPTO_TGR192 is not set
887CONFIG_CRYPTO_GF128MUL=m
888CONFIG_CRYPTO_ECB=m
889CONFIG_CRYPTO_CBC=m
890CONFIG_CRYPTO_PCBC=m
891CONFIG_CRYPTO_LRW=m
892# CONFIG_CRYPTO_DES is not set
893CONFIG_CRYPTO_FCRYPT=m
894# CONFIG_CRYPTO_BLOWFISH is not set
895# CONFIG_CRYPTO_TWOFISH is not set
896# CONFIG_CRYPTO_SERPENT is not set
897# CONFIG_CRYPTO_AES is not set
898# CONFIG_CRYPTO_CAST5 is not set
899# CONFIG_CRYPTO_CAST6 is not set
900# CONFIG_CRYPTO_TEA is not set
901# CONFIG_CRYPTO_ARC4 is not set
902# CONFIG_CRYPTO_KHAZAD is not set
903# CONFIG_CRYPTO_ANUBIS is not set
904# CONFIG_CRYPTO_DEFLATE is not set
905# CONFIG_CRYPTO_MICHAEL_MIC is not set
906# CONFIG_CRYPTO_CRC32C is not set
907CONFIG_CRYPTO_CAMELLIA=m
908# CONFIG_CRYPTO_TEST is not set
909
910#
911# Hardware crypto devices
912#
913 655
914# 656#
915# Library routines 657# Library routines
916# 658#
917CONFIG_BITREVERSE=y
918# CONFIG_CRC_CCITT is not set 659# CONFIG_CRC_CCITT is not set
919CONFIG_CRC16=y 660# CONFIG_CRC16 is not set
920CONFIG_CRC32=y 661# CONFIG_CRC_ITU_T is not set
662# CONFIG_CRC32 is not set
663# CONFIG_CRC7 is not set
921# CONFIG_LIBCRC32C is not set 664# CONFIG_LIBCRC32C is not set
922CONFIG_PLIST=y 665CONFIG_PLIST=y
923CONFIG_HAS_IOMEM=y 666CONFIG_HAS_IOMEM=y
924CONFIG_HAS_IOPORT=y 667CONFIG_HAS_IOPORT=y
668CONFIG_HAS_DMA=y
diff --git a/arch/mips/configs/sb1250-swarm_defconfig b/arch/mips/configs/sb1250-swarm_defconfig
index 3ed991ae0ebe..49dfcef2518c 100644
--- a/arch/mips/configs/sb1250-swarm_defconfig
+++ b/arch/mips/configs/sb1250-swarm_defconfig
@@ -196,6 +196,7 @@ CONFIG_SYSVIPC_SYSCTL=y
196# CONFIG_UTS_NS is not set 196# CONFIG_UTS_NS is not set
197# CONFIG_AUDIT is not set 197# CONFIG_AUDIT is not set
198# CONFIG_IKCONFIG is not set 198# CONFIG_IKCONFIG is not set
199CONFIG_CGROUPS=y
199CONFIG_CPUSETS=y 200CONFIG_CPUSETS=y
200CONFIG_SYSFS_DEPRECATED=y 201CONFIG_SYSFS_DEPRECATED=y
201CONFIG_RELAY=y 202CONFIG_RELAY=y
diff --git a/arch/mips/emma2rh/markeins/setup.c b/arch/mips/emma2rh/markeins/setup.c
index 5e1da53b04a7..82f9e9013e70 100644
--- a/arch/mips/emma2rh/markeins/setup.c
+++ b/arch/mips/emma2rh/markeins/setup.c
@@ -104,12 +104,6 @@ void __init plat_time_init(void)
104 mips_hpt_frequency = (bus_frequency * (4 + reg)) / 4 / 2; 104 mips_hpt_frequency = (bus_frequency * (4 + reg)) / 4 / 2;
105} 105}
106 106
107void __init plat_timer_setup(struct irqaction *irq)
108{
109 /* we are using the cpu counter for timer interrupts */
110 setup_irq(CPU_IRQ_BASE + 7, irq);
111}
112
113static void markeins_board_init(void); 107static void markeins_board_init(void);
114extern void markeins_irq_setup(void); 108extern void markeins_irq_setup(void);
115 109
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 08b84d476c87..a915e5693421 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -10,6 +10,7 @@
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12 12
13#include <asm/smtc_ipi.h>
13#include <asm/time.h> 14#include <asm/time.h>
14 15
15static int mips_next_event(unsigned long delta, 16static int mips_next_event(unsigned long delta,
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index b997af713eb3..7852c7cdf29e 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -1172,8 +1172,8 @@ static int irix_core_dump(long signr, struct pt_regs *regs, struct file *file, u
1172 prstatus.pr_sighold = current->blocked.sig[0]; 1172 prstatus.pr_sighold = current->blocked.sig[0];
1173 psinfo.pr_pid = prstatus.pr_pid = current->pid; 1173 psinfo.pr_pid = prstatus.pr_pid = current->pid;
1174 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid; 1174 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
1175 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current); 1175 psinfo.pr_pgrp = prstatus.pr_pgrp = task_pgrp_nr(current);
1176 psinfo.pr_sid = prstatus.pr_sid = process_session(current); 1176 psinfo.pr_sid = prstatus.pr_sid = task_session_nr(current);
1177 if (current->pid == current->tgid) { 1177 if (current->pid == current->tgid) {
1178 /* 1178 /*
1179 * This is the record for the group leader. Add in the 1179 * This is the record for the group leader. Add in the
diff --git a/arch/mips/kernel/irixsig.c b/arch/mips/kernel/irixsig.c
index 85c2e389edd6..a0a91056fda7 100644
--- a/arch/mips/kernel/irixsig.c
+++ b/arch/mips/kernel/irixsig.c
@@ -609,7 +609,7 @@ repeat:
609 p = list_entry(_p, struct task_struct, sibling); 609 p = list_entry(_p, struct task_struct, sibling);
610 if ((type == IRIX_P_PID) && p->pid != pid) 610 if ((type == IRIX_P_PID) && p->pid != pid)
611 continue; 611 continue;
612 if ((type == IRIX_P_PGID) && process_group(p) != pid) 612 if ((type == IRIX_P_PGID) && task_pgrp_nr(p) != pid)
613 continue; 613 continue;
614 if ((p->exit_signal != SIGCHLD)) 614 if ((p->exit_signal != SIGCHLD))
615 continue; 615 continue;
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index ee7790d9debe..4c477c7ff74a 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -763,11 +763,11 @@ asmlinkage int irix_setpgrp(int flags)
763 printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags); 763 printk("[%s:%d] setpgrp(%d) ", current->comm, current->pid, flags);
764#endif 764#endif
765 if(!flags) 765 if(!flags)
766 error = process_group(current); 766 error = task_pgrp_nr(current);
767 else 767 else
768 error = sys_setsid(); 768 error = sys_setsid();
769#ifdef DEBUG_PROCGRPS 769#ifdef DEBUG_PROCGRPS
770 printk("returning %d\n", process_group(current)); 770 printk("returning %d\n", task_pgrp_nr(current));
771#endif 771#endif
772 772
773 return error; 773 return error;
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index ea7cfe766a8e..c4e6866d5cbc 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -40,17 +40,6 @@
40#include <irq.h> 40#include <irq.h>
41 41
42/* 42/*
43 * The integer part of the number of usecs per jiffy is taken from tick,
44 * but the fractional part is not recorded, so we calculate it using the
45 * initial value of HZ. This aids systems where tick isn't really an
46 * integer (e.g. for HZ = 128).
47 */
48#define USECS_PER_JIFFY TICK_SIZE
49#define USECS_PER_JIFFY_FRAC ((unsigned long)(u32)((1000000ULL << 32) / HZ))
50
51#define TICK_SIZE (tick_nsec / 1000)
52
53/*
54 * forward reference 43 * forward reference
55 */ 44 */
56DEFINE_SPINLOCK(rtc_lock); 45DEFINE_SPINLOCK(rtc_lock);
@@ -182,84 +171,59 @@ struct clocksource clocksource_mips = {
182 .flags = CLOCK_SOURCE_IS_CONTINUOUS, 171 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
183}; 172};
184 173
185static void __init init_mips_clocksource(void) 174void __init clocksource_set_clock(struct clocksource *cs, unsigned int clock)
186{ 175{
187 u64 temp; 176 u64 temp;
188 u32 shift; 177 u32 shift;
189 178
190 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
191 return;
192
193 /* Calclate a somewhat reasonable rating value */
194 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
195 /* Find a shift value */ 179 /* Find a shift value */
196 for (shift = 32; shift > 0; shift--) { 180 for (shift = 32; shift > 0; shift--) {
197 temp = (u64) NSEC_PER_SEC << shift; 181 temp = (u64) NSEC_PER_SEC << shift;
198 do_div(temp, mips_hpt_frequency); 182 do_div(temp, clock);
199 if ((temp >> 32) == 0) 183 if ((temp >> 32) == 0)
200 break; 184 break;
201 } 185 }
202 clocksource_mips.shift = shift; 186 cs->shift = shift;
203 clocksource_mips.mult = (u32)temp; 187 cs->mult = (u32) temp;
204
205 clocksource_register(&clocksource_mips);
206} 188}
207 189
208void __init __weak plat_time_init(void) 190void __cpuinit clockevent_set_clock(struct clock_event_device *cd,
191 unsigned int clock)
209{ 192{
193 u64 temp;
194 u32 shift;
195
196 /* Find a shift value */
197 for (shift = 32; shift > 0; shift--) {
198 temp = (u64) NSEC_PER_SEC << shift;
199 do_div(temp, clock);
200 if ((temp >> 32) == 0)
201 break;
202 }
203 cd->shift = shift;
204 cd->mult = (u32) temp;
210} 205}
211 206
212void __init __weak plat_timer_setup(struct irqaction *irq) 207static void __init init_mips_clocksource(void)
213{ 208{
214} 209 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
210 return;
215 211
216#ifdef CONFIG_MIPS_MT_SMTC 212 /* Calclate a somewhat reasonable rating value */
217DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 213 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
218 214
219static void smtc_set_mode(enum clock_event_mode mode, 215 clocksource_set_clock(&clocksource_mips, mips_hpt_frequency);
220 struct clock_event_device *evt) 216
221{ 217 clocksource_register(&clocksource_mips);
222} 218}
223 219
224static void mips_broadcast(cpumask_t mask) 220void __init __weak plat_time_init(void)
225{ 221{
226 unsigned int cpu;
227
228 for_each_cpu_mask(cpu, mask)
229 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
230} 222}
231 223
232static void setup_smtc_dummy_clockevent_device(void) 224void __init __weak plat_timer_setup(struct irqaction *irq)
233{ 225{
234 //uint64_t mips_freq = mips_hpt_^frequency;
235 unsigned int cpu = smp_processor_id();
236 struct clock_event_device *cd;
237
238 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
239
240 cd->name = "SMTC";
241 cd->features = CLOCK_EVT_FEAT_DUMMY;
242
243 /* Calculate the min / max delta */
244 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
245 cd->shift = 0; //32;
246 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
247 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
248
249 cd->rating = 200;
250 cd->irq = 17; //-1;
251// if (cpu)
252// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
253// else
254 cd->cpumask = cpumask_of_cpu(cpu);
255
256 cd->set_mode = smtc_set_mode;
257
258 cd->broadcast = mips_broadcast;
259
260 clockevents_register_device(cd);
261} 226}
262#endif
263 227
264void __init time_init(void) 228void __init time_init(void)
265{ 229{
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 7b78d137259f..fa500787152d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -314,7 +314,7 @@ void show_registers(const struct pt_regs *regs)
314 __show_regs(regs); 314 __show_regs(regs);
315 print_modules(); 315 print_modules();
316 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", 316 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
317 current->comm, current->pid, current_thread_info(), current); 317 current->comm, task_pid_nr(current), current_thread_info(), current);
318 show_stacktrace(current, regs); 318 show_stacktrace(current, regs);
319 show_code((unsigned int __user *) regs->cp0_epc); 319 show_code((unsigned int __user *) regs->cp0_epc);
320 printk("\n"); 320 printk("\n");
diff --git a/arch/mips/lemote/lm2e/setup.c b/arch/mips/lemote/lm2e/setup.c
index 09314a20f9fb..2cc6745991ab 100644
--- a/arch/mips/lemote/lm2e/setup.c
+++ b/arch/mips/lemote/lm2e/setup.c
@@ -53,11 +53,6 @@ unsigned long bus_clock;
53unsigned int memsize; 53unsigned int memsize;
54unsigned int highmemsize = 0; 54unsigned int highmemsize = 0;
55 55
56void __init plat_timer_setup(struct irqaction *irq)
57{
58 setup_irq(MIPS_CPU_IRQ_BASE + 7, irq);
59}
60
61void __init plat_time_init(void) 56void __init plat_time_init(void)
62{ 57{
63 /* setup mips r4k timer */ 58 /* setup mips r4k timer */
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 5699c7713e2f..fa636fc6b7b9 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -173,7 +173,7 @@ no_context:
173 */ 173 */
174out_of_memory: 174out_of_memory:
175 up_read(&mm->mmap_sem); 175 up_read(&mm->mmap_sem);
176 if (is_init(tsk)) { 176 if (is_global_init(tsk)) {
177 yield(); 177 yield();
178 down_read(&mm->mmap_sem); 178 down_read(&mm->mmap_sem);
179 goto survive; 179 goto survive;
diff --git a/arch/mips/oprofile/Kconfig b/arch/mips/oprofile/Kconfig
deleted file mode 100644
index fb6f235348b0..000000000000
--- a/arch/mips/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING && !MIPS_MT_SMTC && EXPERIMENTAL
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/mips/pmc-sierra/msp71xx/msp_time.c b/arch/mips/pmc-sierra/msp71xx/msp_time.c
index f221d4763625..7cfeda5a651b 100644
--- a/arch/mips/pmc-sierra/msp71xx/msp_time.c
+++ b/arch/mips/pmc-sierra/msp71xx/msp_time.c
@@ -86,8 +86,5 @@ void __init plat_timer_setup(struct irqaction *irq)
86#ifdef CONFIG_IRQ_MSP_CIC 86#ifdef CONFIG_IRQ_MSP_CIC
87 /* we are using the vpe0 counter for timer interrupts */ 87 /* we are using the vpe0 counter for timer interrupts */
88 setup_irq(MSP_INT_VPE0_TIMER, irq); 88 setup_irq(MSP_INT_VPE0_TIMER, irq);
89#else
90 /* we are using the mips counter for timer interrupts */
91 setup_irq(MSP_INT_TIMER, irq);
92#endif 89#endif
93} 90}
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 015fcc363dc0..855977ca51cd 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -137,11 +137,6 @@ int rtc_mips_set_time(unsigned long tim)
137 return 0; 137 return 0;
138} 138}
139 139
140void __init plat_timer_setup(struct irqaction *irq)
141{
142 setup_irq(7, irq);
143}
144
145void __init plat_time_init(void) 140void __init plat_time_init(void)
146{ 141{
147 mips_hpt_frequency = cpu_clock_freq / 2; 142 mips_hpt_frequency = cpu_clock_freq / 2;
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index 6eac36d1b8c8..02b266a31c46 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -69,8 +69,9 @@ void bcm1480_smp_init(void)
69 69
70void bcm1480_smp_finish(void) 70void bcm1480_smp_finish(void)
71{ 71{
72 extern void bcm1480_time_init(void); 72 extern void sb1480_clockevent_init(void);
73 bcm1480_time_init(); 73
74 sb1480_clockevent_init();
74 local_irq_enable(); 75 local_irq_enable();
75} 76}
76 77
diff --git a/arch/mips/sibyte/bcm1480/time.c b/arch/mips/sibyte/bcm1480/time.c
index 5b4bfbbb5a24..c730744aa474 100644
--- a/arch/mips/sibyte/bcm1480/time.c
+++ b/arch/mips/sibyte/bcm1480/time.c
@@ -27,9 +27,8 @@
27 */ 27 */
28#include <linux/clockchips.h> 28#include <linux/clockchips.h>
29#include <linux/interrupt.h> 29#include <linux/interrupt.h>
30#include <linux/sched.h> 30#include <linux/percpu.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <linux/kernel_stat.h>
33 32
34#include <asm/irq.h> 33#include <asm/irq.h>
35#include <asm/addrspace.h> 34#include <asm/addrspace.h>
@@ -101,25 +100,36 @@ static void sibyte_set_mode(enum clock_event_mode mode,
101 break; 100 break;
102 101
103 case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ 102 case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
103 case CLOCK_EVT_MODE_RESUME:
104 ; 104 ;
105 } 105 }
106} 106}
107 107
108struct clock_event_device sibyte_hpt_clockevent = { 108static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
109 .name = "bcm1480-counter", 109{
110 .features = CLOCK_EVT_FEAT_PERIODIC, 110 unsigned int cpu = smp_processor_id();
111 .set_mode = sibyte_set_mode, 111 void __iomem *timer_init;
112 .shift = 32, 112 unsigned int cnt;
113 .irq = 0, 113 int res;
114}; 114
115 timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
116 cnt = __raw_readq(timer_init);
117 cnt += delta;
118 __raw_writeq(cnt, timer_init);
119 res = ((long)(__raw_readq(timer_init) - cnt ) > 0) ? -ETIME : 0;
120
121 return res;
122}
123
124static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
115 125
116static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) 126static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
117{ 127{
118 struct clock_event_device *cd = &sibyte_hpt_clockevent;
119 unsigned int cpu = smp_processor_id(); 128 unsigned int cpu = smp_processor_id();
129 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
120 130
121 /* Reset the timer */ 131 /* Reset the timer */
122 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS, 132 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
123 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG))); 133 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
124 cd->event_handler(cd); 134 cd->event_handler(cd);
125 135
@@ -140,24 +150,21 @@ static struct irqaction sibyte_counter_irqaction = {
140 * called directly from irq_handler.S when IP[4] is set during an 150 * called directly from irq_handler.S when IP[4] is set during an
141 * interrupt 151 * interrupt
142 */ 152 */
143static void __init sb1480_clockevent_init(void) 153void __cpuinit sb1480_clockevent_init(void)
144{ 154{
145 unsigned int cpu = smp_processor_id(); 155 unsigned int cpu = smp_processor_id();
146 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; 156 unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
157 struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
147 158
148 setup_irq(irq, &sibyte_counter_irqaction); 159 cd->name = "bcm1480-counter";
149} 160 cd->features = CLOCK_EVT_FEAT_PERIODIC |
161 CLOCK_EVT_MODE_ONESHOT;
162 cd->set_next_event = sibyte_next_event;
163 cd->set_mode = sibyte_set_mode;
164 cd->irq = irq;
165 clockevent_set_clock(cd, BCM1480_HPT_VALUE);
150 166
151void bcm1480_timer_interrupt(void) 167 setup_irq(irq, &sibyte_counter_irqaction);
152{
153 int cpu = smp_processor_id();
154 int irq = K_BCM1480_INT_TIMER_0 + cpu;
155
156 /* Reset the timer */
157 __raw_writeq(M_SCD_TIMER_ENABLE|M_SCD_TIMER_MODE_CONTINUOUS,
158 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
159
160 ll_timer_interrupt(irq);
161} 168}
162 169
163static cycle_t bcm1480_hpt_read(void) 170static cycle_t bcm1480_hpt_read(void)
@@ -168,9 +175,26 @@ static cycle_t bcm1480_hpt_read(void)
168 return (jiffies + 1) * (BCM1480_HPT_VALUE / HZ) - count; 175 return (jiffies + 1) * (BCM1480_HPT_VALUE / HZ) - count;
169} 176}
170 177
178struct clocksource bcm1480_clocksource = {
179 .name = "MIPS",
180 .rating = 200,
181 .read = bcm1480_hpt_read,
182 .mask = CLOCKSOURCE_MASK(32),
183 .shift = 32,
184 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
185};
186
187void __init sb1480_clocksource_init(void)
188{
189 struct clocksource *cs = &bcm1480_clocksource;
190
191 clocksource_set_clock(cs, BCM1480_HPT_VALUE);
192 clocksource_register(cs);
193}
194
171void __init bcm1480_hpt_setup(void) 195void __init bcm1480_hpt_setup(void)
172{ 196{
173 clocksource_mips.read = bcm1480_hpt_read;
174 mips_hpt_frequency = BCM1480_HPT_VALUE; 197 mips_hpt_frequency = BCM1480_HPT_VALUE;
198 sb1480_clocksource_init();
175 sb1480_clockevent_init(); 199 sb1480_clockevent_init();
176} 200}
diff --git a/arch/mips/sibyte/sb1250/irq.c b/arch/mips/sibyte/sb1250/irq.c
index 7659174819c6..500d17e84c09 100644
--- a/arch/mips/sibyte/sb1250/irq.c
+++ b/arch/mips/sibyte/sb1250/irq.c
@@ -400,43 +400,11 @@ static void sb1250_kgdb_interrupt(void)
400 400
401#endif /* CONFIG_KGDB */ 401#endif /* CONFIG_KGDB */
402 402
403static inline void sb1250_timer_interrupt(void)
404{
405 int cpu = smp_processor_id();
406 int irq = K_INT_TIMER_0 + cpu;
407
408 irq_enter();
409 kstat_this_cpu.irqs[irq]++;
410
411 write_seqlock(&xtime_lock);
412
413 /* ACK interrupt */
414 ____raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
415 IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)));
416
417 /*
418 * call the generic timer interrupt handling
419 */
420 do_timer(1);
421
422 write_sequnlock(&xtime_lock);
423
424 /*
425 * In UP mode, we call local_timer_interrupt() to do profiling
426 * and process accouting.
427 *
428 * In SMP mode, local_timer_interrupt() is invoked by appropriate
429 * low-level local timer interrupt handler.
430 */
431 local_timer_interrupt(irq);
432
433 irq_exit();
434}
435
436extern void sb1250_mailbox_interrupt(void); 403extern void sb1250_mailbox_interrupt(void);
437 404
438asmlinkage void plat_irq_dispatch(void) 405asmlinkage void plat_irq_dispatch(void)
439{ 406{
407 unsigned int cpu = smp_processor_id();
440 unsigned int pending; 408 unsigned int pending;
441 409
442 /* 410 /*
@@ -454,7 +422,7 @@ asmlinkage void plat_irq_dispatch(void)
454 if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */ 422 if (pending & CAUSEF_IP7) /* CPU performance counter interrupt */
455 do_IRQ(MIPS_CPU_IRQ_BASE + 7); 423 do_IRQ(MIPS_CPU_IRQ_BASE + 7);
456 else if (pending & CAUSEF_IP4) 424 else if (pending & CAUSEF_IP4)
457 sb1250_timer_interrupt(); 425 do_IRQ(K_INT_TIMER_0 + cpu); /* sb1250_timer_interrupt() */
458 426
459#ifdef CONFIG_SMP 427#ifdef CONFIG_SMP
460 else if (pending & CAUSEF_IP3) 428 else if (pending & CAUSEF_IP3)
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c38e1f34460d..aaa4f30dda79 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -57,8 +57,9 @@ void sb1250_smp_init(void)
57 57
58void sb1250_smp_finish(void) 58void sb1250_smp_finish(void)
59{ 59{
60 extern void sb1250_time_init(void); 60 extern void sb1250_clockevent_init(void);
61 sb1250_time_init(); 61
62 sb1250_clockevent_init();
62 local_irq_enable(); 63 local_irq_enable();
63} 64}
64 65
diff --git a/arch/mips/sibyte/sb1250/time.c b/arch/mips/sibyte/sb1250/time.c
index fe11fed8e0d7..9ef54628bc9c 100644
--- a/arch/mips/sibyte/sb1250/time.c
+++ b/arch/mips/sibyte/sb1250/time.c
@@ -100,6 +100,7 @@ static void sibyte_set_mode(enum clock_event_mode mode,
100 break; 100 break;
101 101
102 case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */ 102 case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
103 case CLOCK_EVT_MODE_RESUME:
103 ; 104 ;
104 } 105 }
105} 106}
@@ -144,79 +145,7 @@ static struct irqaction sibyte_irqaction = {
144 .name = "timer", 145 .name = "timer",
145}; 146};
146 147
147/* 148void __cpuinit sb1250_clockevent_init(void)
148 * The general purpose timer ticks at 1 Mhz independent if
149 * the rest of the system
150 */
151static void sibyte_set_mode(enum clock_event_mode mode,
152 struct clock_event_device *evt)
153{
154 unsigned int cpu = smp_processor_id();
155 void __iomem *timer_cfg, *timer_init;
156
157 timer_cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
158 timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
159
160 switch (mode) {
161 case CLOCK_EVT_MODE_PERIODIC:
162 __raw_writeq(0, timer_cfg);
163 __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, timer_init);
164 __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
165 timer_cfg);
166 break;
167
168 case CLOCK_EVT_MODE_ONESHOT:
169 /* Stop the timer until we actually program a shot */
170 case CLOCK_EVT_MODE_SHUTDOWN:
171 __raw_writeq(0, timer_cfg);
172 break;
173
174 case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
175 ;
176 }
177}
178
179static int
180sibyte_next_event(unsigned long delta, struct clock_event_device *evt)
181{
182 unsigned int cpu = smp_processor_id();
183 void __iomem *timer_cfg, *timer_init;
184
185 timer_cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
186 timer_init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
187
188 __raw_writeq(0, timer_cfg);
189 __raw_writeq(delta, timer_init);
190 __raw_writeq(M_SCD_TIMER_ENABLE, timer_cfg);
191
192 return 0;
193}
194
195struct clock_event_device sibyte_hpt_clockevent = {
196 .name = "sb1250-counter",
197 .features = CLOCK_EVT_FEAT_PERIODIC,
198 .set_mode = sibyte_set_mode,
199 .set_next_event = sibyte_next_event,
200 .shift = 32,
201 .irq = 0,
202};
203
204static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
205{
206 struct clock_event_device *cd = &sibyte_hpt_clockevent;
207
208 cd->event_handler(cd);
209
210 return IRQ_HANDLED;
211}
212
213static struct irqaction sibyte_irqaction = {
214 .handler = sibyte_counter_handler,
215 .flags = IRQF_DISABLED | IRQF_PERCPU,
216 .name = "timer",
217};
218
219static void __init sb1250_clockevent_init(void)
220{ 149{
221 struct clock_event_device *cd = &sibyte_hpt_clockevent; 150 struct clock_event_device *cd = &sibyte_hpt_clockevent;
222 unsigned int cpu = smp_processor_id(); 151 unsigned int cpu = smp_processor_id();
@@ -249,12 +178,6 @@ static void __init sb1250_clockevent_init(void)
249 clockevents_register_device(cd); 178 clockevents_register_device(cd);
250} 179}
251 180
252void __init plat_time_init(void)
253{
254 sb1250_clocksource_init();
255 sb1250_clockevent_init();
256}
257
258/* 181/*
259 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over 182 * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
260 * again. 183 * again.
@@ -267,3 +190,26 @@ static cycle_t sb1250_hpt_read(void)
267 190
268 return SB1250_HPT_VALUE - count; 191 return SB1250_HPT_VALUE - count;
269} 192}
193
194struct clocksource bcm1250_clocksource = {
195 .name = "MIPS",
196 .rating = 200,
197 .read = sb1250_hpt_read,
198 .mask = CLOCKSOURCE_MASK(32),
199 .shift = 32,
200 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
201};
202
203void __init sb1250_clocksource_init(void)
204{
205 struct clocksource *cs = &bcm1250_clocksource;
206
207 clocksource_set_clock(cs, V_SCD_TIMER_FREQ);
208 clocksource_register(cs);
209}
210
211void __init plat_time_init(void)
212{
213 sb1250_clocksource_init();
214 sb1250_clockevent_init();
215}
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 8b3ef0e4cd55..080c966263b7 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -69,31 +69,6 @@ const char *get_system_type(void)
69 return "SiByte " SIBYTE_BOARD_NAME; 69 return "SiByte " SIBYTE_BOARD_NAME;
70} 70}
71 71
72void __init plat_time_init(void)
73{
74#if defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
75 /* Setup HPT */
76 sb1250_hpt_setup();
77#endif
78}
79
80void __init plat_timer_setup(struct irqaction *irq)
81{
82 /*
83 * we don't set up irqaction, because we will deliver timer
84 * interrupts through low-level (direct) meachanism.
85 */
86
87 /* We only need to setup the generic timer */
88#if defined(CONFIG_SIBYTE_BCM1x55) || defined(CONFIG_SIBYTE_BCM1x80)
89 bcm1480_time_init();
90#elif defined(CONFIG_SIBYTE_SB1250) || defined(CONFIG_SIBYTE_BCM112X)
91 sb1250_time_init();
92#else
93#error invalid SiByte board configuration
94#endif
95}
96
97int swarm_be_handler(struct pt_regs *regs, int is_fixup) 72int swarm_be_handler(struct pt_regs *regs, int is_fixup)
98{ 73{
99 if (!is_fixup && (regs->cp0_cause & 4)) { 74 if (!is_fixup && (regs->cp0_cause & 4)) {
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index b80877349d38..0910b35cb71f 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -121,15 +121,6 @@ void __init plat_time_init(void)
121 setup_pit_timer(); 121 setup_pit_timer();
122} 122}
123 123
124/*
125 * R4k counter based timer interrupt. Works on RM200-225 and possibly
126 * others but not on RM400
127 */
128static void __init sni_cpu_timer_setup(struct irqaction *irq)
129{
130 setup_irq(SNI_MIPS_IRQ_CPU_TIMER, irq);
131}
132
133void __init plat_timer_setup(struct irqaction *irq) 124void __init plat_timer_setup(struct irqaction *irq)
134{ 125{
135 switch (sni_brd_type) { 126 switch (sni_brd_type) {
@@ -139,15 +130,6 @@ void __init plat_timer_setup(struct irqaction *irq)
139 case SNI_BRD_MINITOWER: 130 case SNI_BRD_MINITOWER:
140 sni_a20r_timer_setup(irq); 131 sni_a20r_timer_setup(irq);
141 break; 132 break;
142
143 case SNI_BRD_PCI_TOWER:
144 case SNI_BRD_RM200:
145 case SNI_BRD_PCI_MTOWER:
146 case SNI_BRD_PCI_DESKTOP:
147 case SNI_BRD_PCI_TOWER_CPLUS:
148 case SNI_BRD_PCI_MTOWER_CPLUS:
149 sni_cpu_timer_setup(irq);
150 break;
151 } 133 }
152} 134}
153 135
diff --git a/arch/mips/tx4927/common/tx4927_setup.c b/arch/mips/tx4927/common/tx4927_setup.c
index 8ce0989671d8..36c5f200eb3d 100644
--- a/arch/mips/tx4927/common/tx4927_setup.c
+++ b/arch/mips/tx4927/common/tx4927_setup.c
@@ -72,22 +72,6 @@ void __init plat_time_init(void)
72#endif 72#endif
73} 73}
74 74
75void __init plat_timer_setup(struct irqaction *irq)
76{
77 setup_irq(TX4927_IRQ_CPU_TIMER, irq);
78
79#ifdef CONFIG_TOSHIBA_RBTX4927
80 {
81 extern void toshiba_rbtx4927_timer_setup(struct irqaction
82 *irq);
83 toshiba_rbtx4927_timer_setup(irq);
84 }
85#endif
86
87 return;
88}
89
90
91#ifdef DEBUG 75#ifdef DEBUG
92void print_cp0(char *key, int num, char *name, u32 val) 76void print_cp0(char *key, int num, char *name, u32 val)
93{ 77{
diff --git a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
index b97102a1c635..c7470fba6180 100644
--- a/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
+++ b/arch/mips/tx4927/toshiba_rbtx4927/toshiba_rbtx4927_setup.c
@@ -94,7 +94,6 @@
94#define TOSHIBA_RBTX4927_SETUP_EFWFU ( 1 << 3 ) 94#define TOSHIBA_RBTX4927_SETUP_EFWFU ( 1 << 3 )
95#define TOSHIBA_RBTX4927_SETUP_SETUP ( 1 << 4 ) 95#define TOSHIBA_RBTX4927_SETUP_SETUP ( 1 << 4 )
96#define TOSHIBA_RBTX4927_SETUP_TIME_INIT ( 1 << 5 ) 96#define TOSHIBA_RBTX4927_SETUP_TIME_INIT ( 1 << 5 )
97#define TOSHIBA_RBTX4927_SETUP_TIMER_SETUP ( 1 << 6 )
98#define TOSHIBA_RBTX4927_SETUP_PCIBIOS ( 1 << 7 ) 97#define TOSHIBA_RBTX4927_SETUP_PCIBIOS ( 1 << 7 )
99#define TOSHIBA_RBTX4927_SETUP_PCI1 ( 1 << 8 ) 98#define TOSHIBA_RBTX4927_SETUP_PCI1 ( 1 << 8 )
100#define TOSHIBA_RBTX4927_SETUP_PCI2 ( 1 << 9 ) 99#define TOSHIBA_RBTX4927_SETUP_PCI2 ( 1 << 9 )
@@ -108,7 +107,6 @@ static const u32 toshiba_rbtx4927_setup_debug_flag =
108 (TOSHIBA_RBTX4927_SETUP_NONE | TOSHIBA_RBTX4927_SETUP_INFO | 107 (TOSHIBA_RBTX4927_SETUP_NONE | TOSHIBA_RBTX4927_SETUP_INFO |
109 TOSHIBA_RBTX4927_SETUP_WARN | TOSHIBA_RBTX4927_SETUP_EROR | 108 TOSHIBA_RBTX4927_SETUP_WARN | TOSHIBA_RBTX4927_SETUP_EROR |
110 TOSHIBA_RBTX4927_SETUP_EFWFU | TOSHIBA_RBTX4927_SETUP_SETUP | 109 TOSHIBA_RBTX4927_SETUP_EFWFU | TOSHIBA_RBTX4927_SETUP_SETUP |
111 TOSHIBA_RBTX4927_SETUP_TIME_INIT | TOSHIBA_RBTX4927_SETUP_TIMER_SETUP
112 | TOSHIBA_RBTX4927_SETUP_PCIBIOS | TOSHIBA_RBTX4927_SETUP_PCI1 | 110 | TOSHIBA_RBTX4927_SETUP_PCIBIOS | TOSHIBA_RBTX4927_SETUP_PCI1 |
113 TOSHIBA_RBTX4927_SETUP_PCI2 | TOSHIBA_RBTX4927_SETUP_PCI66); 111 TOSHIBA_RBTX4927_SETUP_PCI2 | TOSHIBA_RBTX4927_SETUP_PCI66);
114#endif 112#endif
@@ -947,14 +945,6 @@ toshiba_rbtx4927_time_init(void)
947 945
948} 946}
949 947
950void __init toshiba_rbtx4927_timer_setup(struct irqaction *irq)
951{
952 TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIMER_SETUP,
953 "-\n");
954 TOSHIBA_RBTX4927_SETUP_DPRINTK(TOSHIBA_RBTX4927_SETUP_TIMER_SETUP,
955 "+\n");
956}
957
958static int __init toshiba_rbtx4927_rtc_init(void) 948static int __init toshiba_rbtx4927_rtc_init(void)
959{ 949{
960 static struct resource __initdata res = { 950 static struct resource __initdata res = {
diff --git a/arch/mips/tx4938/common/setup.c b/arch/mips/tx4938/common/setup.c
index ab4082267553..3ba4101d141e 100644
--- a/arch/mips/tx4938/common/setup.c
+++ b/arch/mips/tx4938/common/setup.c
@@ -24,7 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/random.h> 25#include <linux/random.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <asm/bitops.h> 27#include <linux/bitops.h>
28#include <asm/bootinfo.h> 28#include <asm/bootinfo.h>
29#include <asm/io.h> 29#include <asm/io.h>
30#include <asm/irq.h> 30#include <asm/irq.h>
@@ -43,8 +43,3 @@ plat_mem_setup(void)
43{ 43{
44 toshiba_rbtx4938_setup(); 44 toshiba_rbtx4938_setup();
45} 45}
46
47void __init plat_timer_setup(struct irqaction *irq)
48{
49 setup_irq(TX4938_IRQ_CPU_TIMER, irq);
50}
diff --git a/arch/mips/vr41xx/common/init.c b/arch/mips/vr41xx/common/init.c
index 407cec203b29..8d760df686c4 100644
--- a/arch/mips/vr41xx/common/init.c
+++ b/arch/mips/vr41xx/common/init.c
@@ -48,11 +48,6 @@ void __init plat_time_init(void)
48 mips_hpt_frequency = tclock / 4; 48 mips_hpt_frequency = tclock / 4;
49} 49}
50 50
51void __init plat_timer_setup(struct irqaction *irq)
52{
53 setup_irq(TIMER_IRQ, irq);
54}
55
56void __init plat_mem_setup(void) 51void __init plat_mem_setup(void)
57{ 52{
58 vr41xx_calculate_clock_frequency(); 53 vr41xx_calculate_clock_frequency();
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index 3d73545e8c48..b8ef1787a191 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -267,7 +267,7 @@ source "drivers/Kconfig"
267 267
268source "fs/Kconfig" 268source "fs/Kconfig"
269 269
270source "arch/parisc/oprofile/Kconfig" 270source "kernel/Kconfig.instrumentation"
271 271
272source "arch/parisc/Kconfig.debug" 272source "arch/parisc/Kconfig.debug"
273 273
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index fb35ebc0c4da..2ce3806f02e1 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -181,7 +181,7 @@ give_sigsegv:
181 si.si_signo = SIGSEGV; 181 si.si_signo = SIGSEGV;
182 si.si_errno = 0; 182 si.si_errno = 0;
183 si.si_code = SI_KERNEL; 183 si.si_code = SI_KERNEL;
184 si.si_pid = current->pid; 184 si.si_pid = task_pid_vnr(current);
185 si.si_uid = current->uid; 185 si.si_uid = current->uid;
186 si.si_addr = &frame->uc; 186 si.si_addr = &frame->uc;
187 force_sig_info(SIGSEGV, &si, current); 187 force_sig_info(SIGSEGV, &si, current);
diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
index bbf029a184ac..99fd56939afa 100644
--- a/arch/parisc/kernel/traps.c
+++ b/arch/parisc/kernel/traps.c
@@ -219,7 +219,7 @@ void die_if_kernel(char *str, struct pt_regs *regs, long err)
219 return; /* STFU */ 219 return; /* STFU */
220 220
221 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n", 221 printk(KERN_CRIT "%s (pid %d): %s (code %ld) at " RFMT "\n",
222 current->comm, current->pid, str, err, regs->iaoq[0]); 222 current->comm, task_pid_nr(current), str, err, regs->iaoq[0]);
223#ifdef PRINT_USER_FAULTS 223#ifdef PRINT_USER_FAULTS
224 /* XXX for debugging only */ 224 /* XXX for debugging only */
225 show_regs(regs); 225 show_regs(regs);
@@ -252,7 +252,7 @@ KERN_CRIT " || ||\n");
252 252
253 if (err) 253 if (err)
254 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n", 254 printk(KERN_CRIT "%s (pid %d): %s (code %ld)\n",
255 current->comm, current->pid, str, err); 255 current->comm, task_pid_nr(current), str, err);
256 256
257 /* Wot's wrong wif bein' racy? */ 257 /* Wot's wrong wif bein' racy? */
258 if (current->thread.flags & PARISC_KERNEL_DEATH) { 258 if (current->thread.flags & PARISC_KERNEL_DEATH) {
@@ -317,7 +317,7 @@ static void handle_break(struct pt_regs *regs)
317 if (unlikely(iir != GDB_BREAK_INSN)) { 317 if (unlikely(iir != GDB_BREAK_INSN)) {
318 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n", 318 printk(KERN_DEBUG "break %d,%d: pid=%d command='%s'\n",
319 iir & 31, (iir>>13) & ((1<<13)-1), 319 iir & 31, (iir>>13) & ((1<<13)-1),
320 current->pid, current->comm); 320 task_pid_nr(current), current->comm);
321 show_regs(regs); 321 show_regs(regs);
322 } 322 }
323#endif 323#endif
@@ -747,7 +747,7 @@ void handle_interruption(int code, struct pt_regs *regs)
747 if (user_mode(regs)) { 747 if (user_mode(regs)) {
748#ifdef PRINT_USER_FAULTS 748#ifdef PRINT_USER_FAULTS
749 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n", 749 printk(KERN_DEBUG "\nhandle_interruption() pid=%d command='%s'\n",
750 current->pid, current->comm); 750 task_pid_nr(current), current->comm);
751 show_regs(regs); 751 show_regs(regs);
752#endif 752#endif
753 /* SIGBUS, for lack of a better one. */ 753 /* SIGBUS, for lack of a better one. */
@@ -772,7 +772,7 @@ void handle_interruption(int code, struct pt_regs *regs)
772 else 772 else
773 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ", 773 printk(KERN_DEBUG "User Fault (long pointer) (fault %d) ",
774 code); 774 code);
775 printk("pid=%d command='%s'\n", current->pid, current->comm); 775 printk("pid=%d command='%s'\n", task_pid_nr(current), current->comm);
776 show_regs(regs); 776 show_regs(regs);
777#endif 777#endif
778 si.si_signo = SIGSEGV; 778 si.si_signo = SIGSEGV;
diff --git a/arch/parisc/kernel/unaligned.c b/arch/parisc/kernel/unaligned.c
index 347bb922e6d0..aebf3c168871 100644
--- a/arch/parisc/kernel/unaligned.c
+++ b/arch/parisc/kernel/unaligned.c
@@ -469,7 +469,7 @@ void handle_unaligned(struct pt_regs *regs)
469 && ++unaligned_count < 5) { 469 && ++unaligned_count < 5) {
470 char buf[256]; 470 char buf[256];
471 sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n", 471 sprintf(buf, "%s(%d): unaligned access to 0x" RFMT " at ip=0x" RFMT "\n",
472 current->comm, current->pid, regs->ior, regs->iaoq[0]); 472 current->comm, task_pid_nr(current), regs->ior, regs->iaoq[0]);
473 printk(KERN_WARNING "%s", buf); 473 printk(KERN_WARNING "%s", buf);
474#ifdef DEBUG_UNALIGNED 474#ifdef DEBUG_UNALIGNED
475 show_regs(regs); 475 show_regs(regs);
diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
index 1c091b415cd9..b2e3e9a8cece 100644
--- a/arch/parisc/mm/fault.c
+++ b/arch/parisc/mm/fault.c
@@ -211,7 +211,7 @@ bad_area:
211#ifdef PRINT_USER_FAULTS 211#ifdef PRINT_USER_FAULTS
212 printk(KERN_DEBUG "\n"); 212 printk(KERN_DEBUG "\n");
213 printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n", 213 printk(KERN_DEBUG "do_page_fault() pid=%d command='%s' type=%lu address=0x%08lx\n",
214 tsk->pid, tsk->comm, code, address); 214 task_pid_nr(tsk), tsk->comm, code, address);
215 if (vma) { 215 if (vma) {
216 printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n", 216 printk(KERN_DEBUG "vm_start = 0x%08lx, vm_end = 0x%08lx\n",
217 vma->vm_start, vma->vm_end); 217 vma->vm_start, vma->vm_end);
diff --git a/arch/parisc/oprofile/Kconfig b/arch/parisc/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/parisc/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 3763f681ce4c..18f397ca05ef 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -669,20 +669,7 @@ source "arch/powerpc/sysdev/qe_lib/Kconfig"
669 669
670source "lib/Kconfig" 670source "lib/Kconfig"
671 671
672menu "Instrumentation Support" 672source "kernel/Kconfig.instrumentation"
673
674source "arch/powerpc/oprofile/Kconfig"
675
676config KPROBES
677 bool "Kprobes"
678 depends on !BOOKE && !4xx && KALLSYMS && MODULES
679 help
680 Kprobes allows you to trap at almost any kernel address and
681 execute a callback function. register_kprobe() establishes
682 a probepoint and specifies the callback. Kprobes is useful
683 for kernel debugging, non-intrusive instrumentation and testing.
684 If in doubt, say "N".
685endmenu
686 673
687source "arch/powerpc/Kconfig.debug" 674source "arch/powerpc/Kconfig.debug"
688 675
diff --git a/arch/powerpc/configs/cell_defconfig b/arch/powerpc/configs/cell_defconfig
index 8b47c846421c..dcd7c02727c2 100644
--- a/arch/powerpc/configs/cell_defconfig
+++ b/arch/powerpc/configs/cell_defconfig
@@ -68,6 +68,7 @@ CONFIG_SYSVIPC_SYSCTL=y
68CONFIG_IKCONFIG=y 68CONFIG_IKCONFIG=y
69CONFIG_IKCONFIG_PROC=y 69CONFIG_IKCONFIG_PROC=y
70CONFIG_LOG_BUF_SHIFT=15 70CONFIG_LOG_BUF_SHIFT=15
71CONFIG_CGROUPS=y
71CONFIG_CPUSETS=y 72CONFIG_CPUSETS=y
72CONFIG_SYSFS_DEPRECATED=y 73CONFIG_SYSFS_DEPRECATED=y
73# CONFIG_RELAY is not set 74# CONFIG_RELAY is not set
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index bb8d4e46f0c5..05582af50c5b 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -71,6 +71,7 @@ CONFIG_TASK_DELAY_ACCT=y
71CONFIG_IKCONFIG=y 71CONFIG_IKCONFIG=y
72CONFIG_IKCONFIG_PROC=y 72CONFIG_IKCONFIG_PROC=y
73CONFIG_LOG_BUF_SHIFT=17 73CONFIG_LOG_BUF_SHIFT=17
74CONFIG_CGROUPS=y
74CONFIG_CPUSETS=y 75CONFIG_CPUSETS=y
75CONFIG_SYSFS_DEPRECATED=y 76CONFIG_SYSFS_DEPRECATED=y
76CONFIG_RELAY=y 77CONFIG_RELAY=y
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig
index c09eb8cfbe71..62a38406b62f 100644
--- a/arch/powerpc/configs/pseries_defconfig
+++ b/arch/powerpc/configs/pseries_defconfig
@@ -71,6 +71,7 @@ CONFIG_AUDITSYSCALL=y
71CONFIG_IKCONFIG=y 71CONFIG_IKCONFIG=y
72CONFIG_IKCONFIG_PROC=y 72CONFIG_IKCONFIG_PROC=y
73CONFIG_LOG_BUF_SHIFT=17 73CONFIG_LOG_BUF_SHIFT=17
74CONFIG_CGROUPS=y
74CONFIG_CPUSETS=y 75CONFIG_CPUSETS=y
75CONFIG_SYSFS_DEPRECATED=y 76CONFIG_SYSFS_DEPRECATED=y
76# CONFIG_RELAY is not set 77# CONFIG_RELAY is not set
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index e60a0c544d63..c0c8e8c3ced9 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -61,45 +61,39 @@ NORET_TYPE void machine_kexec(struct kimage *image)
61 for(;;); 61 for(;;);
62} 62}
63 63
64static int __init early_parse_crashk(char *p)
65{
66 unsigned long size;
67
68 if (!p)
69 return 1;
70
71 size = memparse(p, &p);
72
73 if (*p == '@')
74 crashk_res.start = memparse(p + 1, &p);
75 else
76 crashk_res.start = KDUMP_KERNELBASE;
77
78 crashk_res.end = crashk_res.start + size - 1;
79
80 return 0;
81}
82early_param("crashkernel", early_parse_crashk);
83
84void __init reserve_crashkernel(void) 64void __init reserve_crashkernel(void)
85{ 65{
86 unsigned long size; 66 unsigned long long crash_size, crash_base;
67 int ret;
68
69 /* this is necessary because of lmb_phys_mem_size() */
70 lmb_analyze();
71
72 /* use common parsing */
73 ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(),
74 &crash_size, &crash_base);
75 if (ret == 0 && crash_size > 0) {
76 if (crash_base == 0)
77 crash_base = KDUMP_KERNELBASE;
78 crashk_res.start = crash_base;
79 } else {
80 /* handle the device tree */
81 crash_size = crashk_res.end - crashk_res.start + 1;
82 }
87 83
88 if (crashk_res.start == 0) 84 if (crash_size == 0)
89 return; 85 return;
90 86
91 /* We might have got these values via the command line or the 87 /* We might have got these values via the command line or the
92 * device tree, either way sanitise them now. */ 88 * device tree, either way sanitise them now. */
93 89
94 size = crashk_res.end - crashk_res.start + 1;
95
96 if (crashk_res.start != KDUMP_KERNELBASE) 90 if (crashk_res.start != KDUMP_KERNELBASE)
97 printk("Crash kernel location must be 0x%x\n", 91 printk("Crash kernel location must be 0x%x\n",
98 KDUMP_KERNELBASE); 92 KDUMP_KERNELBASE);
99 93
100 crashk_res.start = KDUMP_KERNELBASE; 94 crashk_res.start = KDUMP_KERNELBASE;
101 size = PAGE_ALIGN(size); 95 crash_size = PAGE_ALIGN(crash_size);
102 crashk_res.end = crashk_res.start + size - 1; 96 crashk_res.end = crashk_res.start + crash_size - 1;
103 97
104 /* Crash kernel trumps memory limit */ 98 /* Crash kernel trumps memory limit */
105 if (memory_limit && memory_limit <= crashk_res.end) { 99 if (memory_limit && memory_limit <= crashk_res.end) {
@@ -108,7 +102,13 @@ void __init reserve_crashkernel(void)
108 memory_limit); 102 memory_limit);
109 } 103 }
110 104
111 lmb_reserve(crashk_res.start, size); 105 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
106 "for crashkernel (System RAM: %ldMB)\n",
107 (unsigned long)(crash_size >> 20),
108 (unsigned long)(crashk_res.start >> 20),
109 (unsigned long)(lmb_phys_mem_size() >> 20));
110
111 lmb_reserve(crashk_res.start, crash_size);
112} 112}
113 113
114int overlaps_crashkernel(unsigned long start, unsigned long size) 114int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ea6ad7a2a7e3..b9d88374f14f 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -459,7 +459,7 @@ void show_regs(struct pt_regs * regs)
459 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); 459 printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr);
460#endif 460#endif
461 printk("TASK = %p[%d] '%s' THREAD: %p", 461 printk("TASK = %p[%d] '%s' THREAD: %p",
462 current, current->pid, current->comm, task_thread_info(current)); 462 current, task_pid_nr(current), current->comm, task_thread_info(current));
463 463
464#ifdef CONFIG_SMP 464#ifdef CONFIG_SMP
465 printk(" CPU: %d", smp_processor_id()); 465 printk(" CPU: %d", smp_processor_id());
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index bf9e39c6e296..59c464e26f38 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -201,7 +201,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
201 * generate the same exception over and over again and we get 201 * generate the same exception over and over again and we get
202 * nowhere. Better to kill it and let the kernel panic. 202 * nowhere. Better to kill it and let the kernel panic.
203 */ 203 */
204 if (is_init(current)) { 204 if (is_global_init(current)) {
205 __sighandler_t handler; 205 __sighandler_t handler;
206 206
207 spin_lock_irq(&current->sighand->siglock); 207 spin_lock_irq(&current->sighand->siglock);
@@ -881,7 +881,7 @@ void nonrecoverable_exception(struct pt_regs *regs)
881void trace_syscall(struct pt_regs *regs) 881void trace_syscall(struct pt_regs *regs)
882{ 882{
883 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n", 883 printk("Task: %p(%d), PC: %08lX/%08lX, Syscall: %3ld, Result: %s%ld %s\n",
884 current, current->pid, regs->nip, regs->link, regs->gpr[0], 884 current, task_pid_nr(current), regs->nip, regs->link, regs->gpr[0],
885 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted()); 885 regs->ccr&0x10000000?"Error=":"", regs->gpr[3], print_tainted());
886} 886}
887 887
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index ab3546c5ac3a..a18fda361cc0 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -375,7 +375,7 @@ bad_area_nosemaphore:
375 */ 375 */
376out_of_memory: 376out_of_memory:
377 up_read(&mm->mmap_sem); 377 up_read(&mm->mmap_sem);
378 if (is_init(current)) { 378 if (is_global_init(current)) {
379 yield(); 379 yield();
380 down_read(&mm->mmap_sem); 380 down_read(&mm->mmap_sem);
381 goto survive; 381 goto survive;
diff --git a/arch/powerpc/oprofile/Kconfig b/arch/powerpc/oprofile/Kconfig
deleted file mode 100644
index 7089e79689b9..000000000000
--- a/arch/powerpc/oprofile/Kconfig
+++ /dev/null
@@ -1,24 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7
8config OPROFILE
9 tristate "OProfile system profiling (EXPERIMENTAL)"
10 depends on PROFILING
11 help
12 OProfile is a profiling system capable of profiling the
13 whole system, include the kernel, kernel modules, libraries,
14 and applications.
15
16 If unsure, say N.
17
18config OPROFILE_CELL
19 bool "OProfile for Cell Broadband Engine"
20 depends on (SPU_FS = y && OPROFILE = m) || (SPU_FS = y && OPROFILE = y) || (SPU_FS = m && OPROFILE = m)
21 default y
22 help
23 Profiling of Cell BE SPUs requires special support enabled
24 by this option.
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 354c05861629..144177d77cf1 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -41,13 +41,13 @@
41#include <linux/root_dev.h> 41#include <linux/root_dev.h>
42#include <linux/serial.h> 42#include <linux/serial.h>
43#include <linux/smp.h> 43#include <linux/smp.h>
44#include <linux/bitops.h>
44 45
45#include <asm/processor.h> 46#include <asm/processor.h>
46#include <asm/sections.h> 47#include <asm/sections.h>
47#include <asm/prom.h> 48#include <asm/prom.h>
48#include <asm/system.h> 49#include <asm/system.h>
49#include <asm/pgtable.h> 50#include <asm/pgtable.h>
50#include <asm/bitops.h>
51#include <asm/io.h> 51#include <asm/io.h>
52#include <asm/kexec.h> 52#include <asm/kexec.h>
53#include <asm/pci-bridge.h> 53#include <asm/pci-bridge.h>
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 3a393c7f390e..a1ab25c7082f 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -332,7 +332,7 @@ static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err)
332 err->disposition == RTAS_DISP_NOT_RECOVERED && 332 err->disposition == RTAS_DISP_NOT_RECOVERED &&
333 err->target == RTAS_TARGET_MEMORY && 333 err->target == RTAS_TARGET_MEMORY &&
334 err->type == RTAS_TYPE_ECC_UNCORR && 334 err->type == RTAS_TYPE_ECC_UNCORR &&
335 !(current->pid == 0 || is_init(current))) { 335 !(current->pid == 0 || is_global_init(current))) {
336 /* Kill off a user process with an ECC error */ 336 /* Kill off a user process with an ECC error */
337 printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n", 337 printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n",
338 current->pid); 338 current->pid);
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig
index 607925c8a99e..6473fa7cb4b9 100644
--- a/arch/ppc/Kconfig
+++ b/arch/ppc/Kconfig
@@ -1317,7 +1317,7 @@ endmenu
1317 1317
1318source "lib/Kconfig" 1318source "lib/Kconfig"
1319 1319
1320source "arch/powerpc/oprofile/Kconfig" 1320source "kernel/Kconfig.instrumentation"
1321 1321
1322source "arch/ppc/Kconfig.debug" 1322source "arch/ppc/Kconfig.debug"
1323 1323
diff --git a/arch/ppc/kernel/traps.c b/arch/ppc/kernel/traps.c
index 3f3b292eb773..c78568905c3b 100644
--- a/arch/ppc/kernel/traps.c
+++ b/arch/ppc/kernel/traps.c
@@ -121,7 +121,7 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
121 * generate the same exception over and over again and we get 121 * generate the same exception over and over again and we get
122 * nowhere. Better to kill it and let the kernel panic. 122 * nowhere. Better to kill it and let the kernel panic.
123 */ 123 */
124 if (is_init(current)) { 124 if (is_global_init(current)) {
125 __sighandler_t handler; 125 __sighandler_t handler;
126 126
127 spin_lock_irq(&current->sighand->siglock); 127 spin_lock_irq(&current->sighand->siglock);
diff --git a/arch/ppc/mm/fault.c b/arch/ppc/mm/fault.c
index 94913ddcf76e..254c23b755e6 100644
--- a/arch/ppc/mm/fault.c
+++ b/arch/ppc/mm/fault.c
@@ -290,7 +290,7 @@ bad_area:
290 */ 290 */
291out_of_memory: 291out_of_memory:
292 up_read(&mm->mmap_sem); 292 up_read(&mm->mmap_sem);
293 if (is_init(current)) { 293 if (is_global_init(current)) {
294 yield(); 294 yield();
295 down_read(&mm->mmap_sem); 295 down_read(&mm->mmap_sem);
296 goto survive; 296 goto survive;
diff --git a/arch/ppc/platforms/chestnut.c b/arch/ppc/platforms/chestnut.c
index 248684f50dd9..dcd6070b85eb 100644
--- a/arch/ppc/platforms/chestnut.c
+++ b/arch/ppc/platforms/chestnut.c
@@ -49,7 +49,6 @@ extern void gen550_progress(char *, unsigned short);
49extern void gen550_init(int, struct uart_port *); 49extern void gen550_init(int, struct uart_port *);
50extern void mv64360_pcibios_fixup(mv64x60_handle_t *bh); 50extern void mv64360_pcibios_fixup(mv64x60_handle_t *bh);
51 51
52#define BIT(x) (1<<x)
53#define CHESTNUT_PRESERVE_MASK (BIT(MV64x60_CPU2DEV_0_WIN) | \ 52#define CHESTNUT_PRESERVE_MASK (BIT(MV64x60_CPU2DEV_0_WIN) | \
54 BIT(MV64x60_CPU2DEV_1_WIN) | \ 53 BIT(MV64x60_CPU2DEV_1_WIN) | \
55 BIT(MV64x60_CPU2DEV_2_WIN) | \ 54 BIT(MV64x60_CPU2DEV_2_WIN) | \
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b71132166f60..4ec716d8c1a6 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -529,21 +529,7 @@ source "drivers/Kconfig"
529 529
530source "fs/Kconfig" 530source "fs/Kconfig"
531 531
532menu "Instrumentation Support" 532source "kernel/Kconfig.instrumentation"
533
534source "arch/s390/oprofile/Kconfig"
535
536config KPROBES
537 bool "Kprobes (EXPERIMENTAL)"
538 depends on EXPERIMENTAL && MODULES
539 help
540 Kprobes allows you to trap at almost any kernel address and
541 execute a callback function. register_kprobe() establishes
542 a probepoint and specifies the callback. Kprobes is useful
543 for kernel debugging, non-intrusive instrumentation and testing.
544 If in doubt, say "N".
545
546endmenu
547 533
548source "arch/s390/Kconfig.debug" 534source "arch/s390/Kconfig.debug"
549 535
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index abb447a3e472..70c57378f426 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -166,7 +166,7 @@ void show_regs(struct pt_regs *regs)
166 166
167 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted()); 167 printk("CPU: %d %s\n", task_thread_info(tsk)->cpu, print_tainted());
168 printk("Process %s (pid: %d, task: %p, ksp: %p)\n", 168 printk("Process %s (pid: %d, task: %p, ksp: %p)\n",
169 current->comm, current->pid, (void *) tsk, 169 current->comm, task_pid_nr(current), (void *) tsk,
170 (void *) tsk->thread.ksp); 170 (void *) tsk->thread.ksp);
171 171
172 show_registers(regs); 172 show_registers(regs);
diff --git a/arch/s390/lib/uaccess_pt.c b/arch/s390/lib/uaccess_pt.c
index 60604b2819b2..b159a9d65680 100644
--- a/arch/s390/lib/uaccess_pt.c
+++ b/arch/s390/lib/uaccess_pt.c
@@ -64,7 +64,7 @@ out:
64 64
65out_of_memory: 65out_of_memory:
66 up_read(&mm->mmap_sem); 66 up_read(&mm->mmap_sem);
67 if (is_init(current)) { 67 if (is_global_init(current)) {
68 yield(); 68 yield();
69 down_read(&mm->mmap_sem); 69 down_read(&mm->mmap_sem);
70 goto survive; 70 goto survive;
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index 14c241ccdd4d..2456b52ed068 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -211,7 +211,7 @@ static int do_out_of_memory(struct pt_regs *regs, unsigned long error_code,
211 struct mm_struct *mm = tsk->mm; 211 struct mm_struct *mm = tsk->mm;
212 212
213 up_read(&mm->mmap_sem); 213 up_read(&mm->mmap_sem);
214 if (is_init(tsk)) { 214 if (is_global_init(tsk)) {
215 yield(); 215 yield();
216 down_read(&mm->mmap_sem); 216 down_read(&mm->mmap_sem);
217 return 1; 217 return 1;
diff --git a/arch/s390/oprofile/Kconfig b/arch/s390/oprofile/Kconfig
deleted file mode 100644
index 208220a5f23f..000000000000
--- a/arch/s390/oprofile/Kconfig
+++ /dev/null
@@ -1,22 +0,0 @@
1
2menu "Profiling support"
3
4config PROFILING
5 bool "Profiling support"
6 help
7 Say Y here to enable profiling support mechanisms used by
8 profilers such as readprofile or OProfile.
9
10
11config OPROFILE
12 tristate "OProfile system profiling"
13 depends on PROFILING
14 help
15 OProfile is a profiling system capable of profiling the
16 whole system, include the kernel, kernel modules, libraries,
17 and applications.
18
19 If unsure, say N.
20
21endmenu
22
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 44982c1dfa23..247f8a65e733 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -758,7 +758,7 @@ source "drivers/Kconfig"
758 758
759source "fs/Kconfig" 759source "fs/Kconfig"
760 760
761source "arch/sh/oprofile/Kconfig" 761source "kernel/Kconfig.instrumentation"
762 762
763source "arch/sh/Kconfig.debug" 763source "arch/sh/Kconfig.debug"
764 764
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 790ed69b8666..5c17de51987e 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -104,24 +104,3 @@ NORET_TYPE void machine_kexec(struct kimage *image)
104 (*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg); 104 (*rnk)(page_list, reboot_code_buffer, image->start, vbr_reg);
105} 105}
106 106
107/* crashkernel=size@addr specifies the location to reserve for
108 * a crash kernel. By reserving this memory we guarantee
109 * that linux never sets it up as a DMA target.
110 * Useful for holding code to do something appropriate
111 * after a kernel panic.
112 */
113static int __init parse_crashkernel(char *arg)
114{
115 unsigned long size, base;
116 size = memparse(arg, &arg);
117 if (*arg == '@') {
118 base = memparse(arg+1, &arg);
119 /* FIXME: Do I want a sanity check
120 * to validate the memory range?
121 */
122 crashk_res.start = base;
123 crashk_res.end = base + size - 1;
124 }
125 return 0;
126}
127early_param("crashkernel", parse_crashkernel);
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index b4469992d6b2..6d7f2b07e491 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -121,7 +121,7 @@ void machine_power_off(void)
121void show_regs(struct pt_regs * regs) 121void show_regs(struct pt_regs * regs)
122{ 122{
123 printk("\n"); 123 printk("\n");
124 printk("Pid : %d, Comm: %20s\n", current->pid, current->comm); 124 printk("Pid : %d, Comm: %20s\n", task_pid_nr(current), current->comm);
125 print_symbol("PC is at %s\n", instruction_pointer(regs)); 125 print_symbol("PC is at %s\n", instruction_pointer(regs));
126 printk("PC : %08lx SP : %08lx SR : %08lx ", 126 printk("PC : %08lx SP : %08lx SR : %08lx ",
127 regs->pc, regs->regs[15], regs->sr); 127 regs->pc, regs->regs[15], regs->sr);
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index b3027a6775b9..b749403f6b38 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -128,6 +128,37 @@ static void __init register_bootmem_low_pages(void)
128 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages)); 128 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
129} 129}
130 130
131#ifdef CONFIG_KEXEC
132static void __init reserve_crashkernel(void)
133{
134 unsigned long long free_mem;
135 unsigned long long crash_size, crash_base;
136 int ret;
137
138 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
139
140 ret = parse_crashkernel(boot_command_line, free_mem,
141 &crash_size, &crash_base);
142 if (ret == 0 && crash_size) {
143 if (crash_base > 0) {
144 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
145 "for crashkernel (System RAM: %ldMB)\n",
146 (unsigned long)(crash_size >> 20),
147 (unsigned long)(crash_base >> 20),
148 (unsigned long)(free_mem >> 20));
149 crashk_res.start = crash_base;
150 crashk_res.end = crash_base + crash_size - 1;
151 reserve_bootmem(crash_base, crash_size);
152 } else
153 printk(KERN_INFO "crashkernel reservation failed - "
154 "you have to specify a base address\n");
155 }
156}
157#else
158static inline void __init reserve_crashkernel(void)
159{}
160#endif
161
131void __init setup_bootmem_allocator(unsigned long free_pfn) 162void __init setup_bootmem_allocator(unsigned long free_pfn)
132{ 163{
133 unsigned long bootmap_size; 164 unsigned long bootmap_size;
@@ -189,11 +220,8 @@ void __init setup_bootmem_allocator(unsigned long free_pfn)
189 } 220 }
190 } 221 }
191#endif 222#endif
192#ifdef CONFIG_KEXEC 223
193 if (crashk_res.start != crashk_res.end) 224 reserve_crashkernel();
194 reserve_bootmem(crashk_res.start,
195 crashk_res.end - crashk_res.start + 1);
196#endif
197} 225}
198 226
199#ifndef CONFIG_NEED_MULTIPLE_NODES 227#ifndef CONFIG_NEED_MULTIPLE_NODES
diff --git a/arch/sh/kernel/signal.c b/arch/sh/kernel/signal.c
index 2f42442cf164..ca754fd42437 100644
--- a/arch/sh/kernel/signal.c
+++ b/arch/sh/kernel/signal.c
@@ -382,7 +382,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
382 set_fs(USER_DS); 382 set_fs(USER_DS);
383 383
384 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", 384 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
385 current->comm, current->pid, frame, regs->pc, regs->pr); 385 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
386 386
387 flush_cache_sigtramp(regs->pr); 387 flush_cache_sigtramp(regs->pr);
388 388
@@ -462,7 +462,7 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
462 set_fs(USER_DS); 462 set_fs(USER_DS);
463 463
464 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n", 464 pr_debug("SIG deliver (%s:%d): sp=%p pc=%08lx pr=%08lx\n",
465 current->comm, current->pid, frame, regs->pc, regs->pr); 465 current->comm, task_pid_nr(current), frame, regs->pc, regs->pr);
466 466
467 flush_cache_sigtramp(regs->pr); 467 flush_cache_sigtramp(regs->pr);
468 468
diff --git a/arch/sh/kernel/traps.c b/arch/sh/kernel/traps.c
index dcb46e71da1c..cf99111cb33f 100644
--- a/arch/sh/kernel/traps.c
+++ b/arch/sh/kernel/traps.c
@@ -95,8 +95,8 @@ void die(const char * str, struct pt_regs * regs, long err)
95 print_modules(); 95 print_modules();
96 show_regs(regs); 96 show_regs(regs);
97 97
98 printk("Process: %s (pid: %d, stack limit = %p)\n", 98 printk("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
99 current->comm, current->pid, task_stack_page(current) + 1); 99 task_pid_nr(current), task_stack_page(current) + 1);
100 100
101 if (!user_mode(regs) || in_interrupt()) 101 if (!user_mode(regs) || in_interrupt())
102 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE + 102 dump_mem("Stack: ", regs->regs[15], THREAD_SIZE +
@@ -386,7 +386,8 @@ static int handle_unaligned_access(u16 instruction, struct pt_regs *regs)
386 386
387 printk(KERN_NOTICE "Fixing up unaligned userspace access " 387 printk(KERN_NOTICE "Fixing up unaligned userspace access "
388 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n", 388 "in \"%s\" pid=%d pc=0x%p ins=0x%04hx\n",
389 current->comm,current->pid,(u16*)regs->pc,instruction); 389 current->comm, task_pid_nr(current),
390 (u16 *)regs->pc, instruction);
390 } 391 }
391 392
392 ret = -EFAULT; 393 ret = -EFAULT;
diff --git a/arch/sh/mm/fault.c b/arch/sh/mm/fault.c
index 4729668ce5bf..f33cedb353fc 100644
--- a/arch/sh/mm/fault.c
+++ b/arch/sh/mm/fault.c
@@ -207,7 +207,7 @@ no_context:
207 */ 207 */
208out_of_memory: 208out_of_memory:
209 up_read(&mm->mmap_sem); 209 up_read(&mm->mmap_sem);
210 if (is_init(current)) { 210 if (is_global_init(current)) {
211 yield(); 211 yield();
212 down_read(&mm->mmap_sem); 212 down_read(&mm->mmap_sem);
213 goto survive; 213 goto survive;
diff --git a/arch/sh/oprofile/Kconfig b/arch/sh/oprofile/Kconfig
deleted file mode 100644
index 5ade19801b97..000000000000
--- a/arch/sh/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/sh64/Kconfig b/arch/sh64/Kconfig
index b3327ce8e82f..ba204bac49df 100644
--- a/arch/sh64/Kconfig
+++ b/arch/sh64/Kconfig
@@ -284,7 +284,7 @@ source "drivers/Kconfig"
284 284
285source "fs/Kconfig" 285source "fs/Kconfig"
286 286
287source "arch/sh64/oprofile/Kconfig" 287source "kernel/Kconfig.instrumentation"
288 288
289source "arch/sh64/Kconfig.debug" 289source "arch/sh64/Kconfig.debug"
290 290
diff --git a/arch/sh64/kernel/traps.c b/arch/sh64/kernel/traps.c
index 9d0d58fb29fa..c03101fab467 100644
--- a/arch/sh64/kernel/traps.c
+++ b/arch/sh64/kernel/traps.c
@@ -764,7 +764,7 @@ static int misaligned_fixup(struct pt_regs *regs)
764 --user_mode_unaligned_fixup_count; 764 --user_mode_unaligned_fixup_count;
765 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */ 765 /* Only do 'count' worth of these reports, to remove a potential DoS against syslog */
766 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", 766 printk("Fixing up unaligned userspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
767 current->comm, current->pid, (__u32)regs->pc, opcode); 767 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
768 } else 768 } else
769#endif 769#endif
770 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) { 770 if (!user_mode(regs) && (kernel_mode_unaligned_fixup_count > 0)) {
@@ -774,7 +774,7 @@ static int misaligned_fixup(struct pt_regs *regs)
774 (__u32)regs->pc, opcode); 774 (__u32)regs->pc, opcode);
775 } else { 775 } else {
776 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n", 776 printk("Fixing up unaligned kernelspace access in \"%s\" pid=%d pc=0x%08x ins=0x%08lx\n",
777 current->comm, current->pid, (__u32)regs->pc, opcode); 777 current->comm, task_pid_nr(current), (__u32)regs->pc, opcode);
778 } 778 }
779 } 779 }
780 780
diff --git a/arch/sh64/mm/fault.c b/arch/sh64/mm/fault.c
index dd81c669c79b..7c79a1ba8059 100644
--- a/arch/sh64/mm/fault.c
+++ b/arch/sh64/mm/fault.c
@@ -81,7 +81,7 @@ static inline void print_vma(struct vm_area_struct *vma)
81 81
82static inline void print_task(struct task_struct *tsk) 82static inline void print_task(struct task_struct *tsk)
83{ 83{
84 printk("Task pid %d\n", tsk->pid); 84 printk("Task pid %d\n", task_pid_nr(tsk));
85} 85}
86 86
87static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address) 87static pte_t *lookup_pte(struct mm_struct *mm, unsigned long address)
@@ -272,13 +272,13 @@ bad_area:
272 * usermode, so only need a few */ 272 * usermode, so only need a few */
273 count++; 273 count++;
274 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n", 274 printk("user mode bad_area address=%08lx pid=%d (%s) pc=%08lx\n",
275 address, current->pid, current->comm, 275 address, task_pid_nr(current), current->comm,
276 (unsigned long) regs->pc); 276 (unsigned long) regs->pc);
277#if 0 277#if 0
278 show_regs(regs); 278 show_regs(regs);
279#endif 279#endif
280 } 280 }
281 if (is_init(tsk)) { 281 if (is_global_init(tsk)) {
282 panic("INIT had user mode bad_area\n"); 282 panic("INIT had user mode bad_area\n");
283 } 283 }
284 tsk->thread.address = address; 284 tsk->thread.address = address;
@@ -320,14 +320,14 @@ no_context:
320 * us unable to handle the page fault gracefully. 320 * us unable to handle the page fault gracefully.
321 */ 321 */
322out_of_memory: 322out_of_memory:
323 if (is_init(current)) { 323 if (is_global_init(current)) {
324 panic("INIT out of memory\n"); 324 panic("INIT out of memory\n");
325 yield(); 325 yield();
326 goto survive; 326 goto survive;
327 } 327 }
328 printk("fault:Out of memory\n"); 328 printk("fault:Out of memory\n");
329 up_read(&mm->mmap_sem); 329 up_read(&mm->mmap_sem);
330 if (is_init(current)) { 330 if (is_global_init(current)) {
331 yield(); 331 yield();
332 down_read(&mm->mmap_sem); 332 down_read(&mm->mmap_sem);
333 goto survive; 333 goto survive;
diff --git a/arch/sh64/oprofile/Kconfig b/arch/sh64/oprofile/Kconfig
deleted file mode 100644
index 19d37730b664..000000000000
--- a/arch/sh64/oprofile/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1
2menu "Profiling support"
3 depends on EXPERIMENTAL
4
5config PROFILING
6 bool "Profiling support (EXPERIMENTAL)"
7 help
8 Say Y here to enable the extended profiling support mechanisms used
9 by profilers such as OProfile.
10
11
12config OPROFILE
13 tristate "OProfile system profiling (EXPERIMENTAL)"
14 depends on PROFILING
15 help
16 OProfile is a profiling system capable of profiling the
17 whole system, include the kernel, kernel modules, libraries,
18 and applications.
19
20 If unsure, say N.
21
22endmenu
23
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index c0f4ba109daa..527adc808ad6 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -320,11 +320,7 @@ endmenu
320 320
321source "fs/Kconfig" 321source "fs/Kconfig"
322 322
323menu "Instrumentation Support" 323source "kernel/Kconfig.instrumentation"
324
325source "arch/sparc/oprofile/Kconfig"
326
327endmenu
328 324
329source "arch/sparc/Kconfig.debug" 325source "arch/sparc/Kconfig.debug"
330 326
diff --git a/arch/sparc/kernel/ptrace.c b/arch/sparc/kernel/ptrace.c
index 003f8eed32f4..fe562db475e9 100644
--- a/arch/sparc/kernel/ptrace.c
+++ b/arch/sparc/kernel/ptrace.c
@@ -155,7 +155,7 @@ static inline void read_sunos_user(struct pt_regs *regs, unsigned long offset,
155 /* Rest of them are completely unsupported. */ 155 /* Rest of them are completely unsupported. */
156 default: 156 default:
157 printk("%s [%d]: Wants to read user offset %ld\n", 157 printk("%s [%d]: Wants to read user offset %ld\n",
158 current->comm, current->pid, offset); 158 current->comm, task_pid_nr(current), offset);
159 pt_error_return(regs, EIO); 159 pt_error_return(regs, EIO);
160 return; 160 return;
161 } 161 }
@@ -222,7 +222,7 @@ static inline void write_sunos_user(struct pt_regs *regs, unsigned long offset,
222 /* Rest of them are completely unsupported or "no-touch". */ 222 /* Rest of them are completely unsupported or "no-touch". */
223 default: 223 default:
224 printk("%s [%d]: Wants to write user offset %ld\n", 224 printk("%s [%d]: Wants to write user offset %ld\n",
225 current->comm, current->pid, offset); 225 current->comm, task_pid_nr(current), offset);
226 goto failure; 226 goto failure;
227 } 227 }
228success: 228success:
diff --git a/arch/sparc/kernel/sys_sparc.c b/arch/sparc/kernel/sys_sparc.c
index 6c0221e9a9f5..42bf09db9a81 100644
--- a/arch/sparc/kernel/sys_sparc.c
+++ b/arch/sparc/kernel/sys_sparc.c
@@ -357,7 +357,7 @@ c_sys_nis_syscall (struct pt_regs *regs)
357 if (count++ > 5) 357 if (count++ > 5)
358 return -ENOSYS; 358 return -ENOSYS;
359 printk ("%s[%d]: Unimplemented SPARC system call %d\n", 359 printk ("%s[%d]: Unimplemented SPARC system call %d\n",
360 current->comm, current->pid, (int)regs->u_regs[1]); 360 current->comm, task_pid_nr(current), (int)regs->u_regs[1]);
361#ifdef DEBUG_UNIMP_SYSCALL 361#ifdef DEBUG_UNIMP_SYSCALL
362 show_regs (regs); 362 show_regs (regs);
363#endif 363#endif
diff --git a/arch/sparc/kernel/sys_sunos.c b/arch/sparc/kernel/sys_sunos.c
index f807172cab0e..28c187c5d9fd 100644
--- a/arch/sparc/kernel/sys_sunos.c
+++ b/arch/sparc/kernel/sys_sunos.c
@@ -866,7 +866,7 @@ asmlinkage int sunos_killpg(int pgrp, int sig)
866 rcu_read_lock(); 866 rcu_read_lock();
867 ret = -EINVAL; 867 ret = -EINVAL;
868 if (pgrp > 0) 868 if (pgrp > 0)
869 ret = kill_pgrp(find_pid(pgrp), sig, 0); 869 ret = kill_pgrp(find_vpid(pgrp), sig, 0);
870 rcu_read_unlock(); 870 rcu_read_unlock();
871 871
872 return ret; 872 return ret;
diff --git a/arch/sparc/kernel/traps.c b/arch/sparc/kernel/traps.c
index 3bc3bff51e08..d404e7994527 100644
--- a/arch/sparc/kernel/traps.c
+++ b/arch/sparc/kernel/traps.c
@@ -38,7 +38,7 @@ struct trap_trace_entry trapbuf[1024];
38 38
39void syscall_trace_entry(struct pt_regs *regs) 39void syscall_trace_entry(struct pt_regs *regs)
40{ 40{
41 printk("%s[%d]: ", current->comm, current->pid); 41 printk("%s[%d]: ", current->comm, task_pid_nr(current));
42 printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1], 42 printk("scall<%d> (could be %d)\n", (int) regs->u_regs[UREG_G1],
43 (int) regs->u_regs[UREG_I0]); 43 (int) regs->u_regs[UREG_I0]);
44} 44}
@@ -99,7 +99,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
99" /_| \\__/ |_\\\n" 99" /_| \\__/ |_\\\n"
100" \\__U_/\n"); 100" \\__U_/\n");
101 101
102 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter); 102 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
103 show_regs(regs); 103 show_regs(regs);
104 add_taint(TAINT_DIE); 104 add_taint(TAINT_DIE);
105 105
diff --git a/arch/sparc/oprofile/Kconfig b/arch/sparc/oprofile/Kconfig
deleted file mode 100644
index d8a84088471a..000000000000
--- a/arch/sparc/oprofile/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7
8config OPROFILE
9 tristate "OProfile system profiling (EXPERIMENTAL)"
10 depends on PROFILING
11 help
12 OProfile is a profiling system capable of profiling the
13 whole system, include the kernel, kernel modules, libraries,
14 and applications.
15
16 If unsure, say N.
17
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index 59c4d752d286..c7a74e376985 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -460,20 +460,7 @@ source "drivers/fc4/Kconfig"
460 460
461source "fs/Kconfig" 461source "fs/Kconfig"
462 462
463menu "Instrumentation Support" 463source "kernel/Kconfig.instrumentation"
464
465source "arch/sparc64/oprofile/Kconfig"
466
467config KPROBES
468 bool "Kprobes (EXPERIMENTAL)"
469 depends on KALLSYMS && EXPERIMENTAL && MODULES
470 help
471 Kprobes allows you to trap at almost any kernel address and
472 execute a callback function. register_kprobe() establishes
473 a probepoint and specifies the callback. Kprobes is useful
474 for kernel debugging, non-intrusive instrumentation and testing.
475 If in doubt, say "N".
476endmenu
477 464
478source "arch/sparc64/Kconfig.debug" 465source "arch/sparc64/Kconfig.debug"
479 466
diff --git a/arch/sparc64/kernel/sys_sunos32.c b/arch/sparc64/kernel/sys_sunos32.c
index 8f7a06e2c7e7..170d6ca8de6f 100644
--- a/arch/sparc64/kernel/sys_sunos32.c
+++ b/arch/sparc64/kernel/sys_sunos32.c
@@ -831,7 +831,7 @@ asmlinkage int sunos_killpg(int pgrp, int sig)
831 rcu_read_lock(); 831 rcu_read_lock();
832 ret = -EINVAL; 832 ret = -EINVAL;
833 if (pgrp > 0) 833 if (pgrp > 0)
834 ret = kill_pgrp(find_pid(pgrp), sig, 0); 834 ret = kill_pgrp(find_vpid(pgrp), sig, 0);
835 rcu_read_unlock(); 835 rcu_read_unlock();
836 836
837 return ret; 837 return ret;
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 34573a55b6e5..e9c7e4f07abf 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -2225,7 +2225,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
2225" /_| \\__/ |_\\\n" 2225" /_| \\__/ |_\\\n"
2226" \\__U_/\n"); 2226" \\__U_/\n");
2227 2227
2228 printk("%s(%d): %s [#%d]\n", current->comm, current->pid, str, ++die_counter); 2228 printk("%s(%d): %s [#%d]\n", current->comm, task_pid_nr(current), str, ++die_counter);
2229 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV); 2229 notify_die(DIE_OOPS, str, regs, 0, 255, SIGSEGV);
2230 __asm__ __volatile__("flushw"); 2230 __asm__ __volatile__("flushw");
2231 __show_regs(regs); 2231 __show_regs(regs);
diff --git a/arch/sparc64/oprofile/Kconfig b/arch/sparc64/oprofile/Kconfig
deleted file mode 100644
index d8a84088471a..000000000000
--- a/arch/sparc64/oprofile/Kconfig
+++ /dev/null
@@ -1,17 +0,0 @@
1config PROFILING
2 bool "Profiling support (EXPERIMENTAL)"
3 help
4 Say Y here to enable the extended profiling support mechanisms used
5 by profilers such as OProfile.
6
7
8config OPROFILE
9 tristate "OProfile system profiling (EXPERIMENTAL)"
10 depends on PROFILING
11 help
12 OProfile is a profiling system capable of profiling the
13 whole system, include the kernel, kernel modules, libraries,
14 and applications.
15
16 If unsure, say N.
17
diff --git a/arch/sparc64/solaris/misc.c b/arch/sparc64/solaris/misc.c
index 3b67de7455f1..c86cb3091a8e 100644
--- a/arch/sparc64/solaris/misc.c
+++ b/arch/sparc64/solaris/misc.c
@@ -415,7 +415,7 @@ asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid)
415 415
416 switch (cmd) { 416 switch (cmd) {
417 case 0: /* getpgrp */ 417 case 0: /* getpgrp */
418 return process_group(current); 418 return task_pgrp_nr(current);
419 case 1: /* setpgrp */ 419 case 1: /* setpgrp */
420 { 420 {
421 int (*sys_setpgid)(pid_t,pid_t) = 421 int (*sys_setpgid)(pid_t,pid_t) =
@@ -426,7 +426,7 @@ asmlinkage int solaris_procids(int cmd, s32 pid, s32 pgid)
426 ret = sys_setpgid(0, 0); 426 ret = sys_setpgid(0, 0);
427 if (ret) return ret; 427 if (ret) return ret;
428 proc_clear_tty(current); 428 proc_clear_tty(current);
429 return process_group(current); 429 return task_pgrp_nr(current);
430 } 430 }
431 case 2: /* getsid */ 431 case 2: /* getsid */
432 { 432 {
diff --git a/arch/um/Kconfig b/arch/um/Kconfig
index 740d8a922e48..d8925d285573 100644
--- a/arch/um/Kconfig
+++ b/arch/um/Kconfig
@@ -289,4 +289,6 @@ config INPUT
289 bool 289 bool
290 default n 290 default n
291 291
292source "kernel/Kconfig.instrumentation"
293
292source "arch/um/Kconfig.debug" 294source "arch/um/Kconfig.debug"
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index bd060551e619..cb3321f8e0a9 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -108,7 +108,7 @@ out_nosemaphore:
108 * us unable to handle the page fault gracefully. 108 * us unable to handle the page fault gracefully.
109 */ 109 */
110out_of_memory: 110out_of_memory:
111 if (is_init(current)) { 111 if (is_global_init(current)) {
112 up_read(&mm->mmap_sem); 112 up_read(&mm->mmap_sem);
113 yield(); 113 yield();
114 down_read(&mm->mmap_sem); 114 down_read(&mm->mmap_sem);
diff --git a/arch/um/sys-x86_64/sysrq.c b/arch/um/sys-x86_64/sysrq.c
index ce3e07fcf283..765444031819 100644
--- a/arch/um/sys-x86_64/sysrq.c
+++ b/arch/um/sys-x86_64/sysrq.c
@@ -15,8 +15,8 @@ void __show_regs(struct pt_regs * regs)
15{ 15{
16 printk("\n"); 16 printk("\n");
17 print_modules(); 17 print_modules();
18 printk("Pid: %d, comm: %.20s %s %s\n", 18 printk("Pid: %d, comm: %.20s %s %s\n", task_pid_nr(current),
19 current->pid, current->comm, print_tainted(), init_utsname()->release); 19 current->comm, print_tainted(), init_utsname()->release);
20 printk("RIP: %04lx:[<%016lx>] ", PT_REGS_CS(regs) & 0xffff, 20 printk("RIP: %04lx:[<%016lx>] ", PT_REGS_CS(regs) & 0xffff,
21 PT_REGS_RIP(regs)); 21 PT_REGS_RIP(regs));
22 printk("\nRSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs), 22 printk("\nRSP: %016lx EFLAGS: %08lx\n", PT_REGS_RSP(regs),
diff --git a/arch/v850/Kconfig b/arch/v850/Kconfig
index ace479ab273f..b6a50b8b38de 100644
--- a/arch/v850/Kconfig
+++ b/arch/v850/Kconfig
@@ -331,6 +331,8 @@ source "sound/Kconfig"
331 331
332source "drivers/usb/Kconfig" 332source "drivers/usb/Kconfig"
333 333
334source "kernel/Kconfig.instrumentation"
335
334source "arch/v850/Kconfig.debug" 336source "arch/v850/Kconfig.debug"
335 337
336source "security/Kconfig" 338source "security/Kconfig"
diff --git a/arch/x86/ia32/ia32_binfmt.c b/arch/x86/ia32/ia32_binfmt.c
index 5027650eb273..55822d2cf053 100644
--- a/arch/x86/ia32/ia32_binfmt.c
+++ b/arch/x86/ia32/ia32_binfmt.c
@@ -5,10 +5,6 @@
5 * This tricks binfmt_elf.c into loading 32bit binaries using lots 5 * This tricks binfmt_elf.c into loading 32bit binaries using lots
6 * of ugly preprocessor tricks. Talk about very very poor man's inheritance. 6 * of ugly preprocessor tricks. Talk about very very poor man's inheritance.
7 */ 7 */
8#define __ASM_X86_64_ELF_H 1
9
10#undef ELF_CLASS
11#define ELF_CLASS ELFCLASS32
12 8
13#include <linux/types.h> 9#include <linux/types.h>
14#include <linux/stddef.h> 10#include <linux/stddef.h>
@@ -19,6 +15,7 @@
19#include <linux/binfmts.h> 15#include <linux/binfmts.h>
20#include <linux/mm.h> 16#include <linux/mm.h>
21#include <linux/security.h> 17#include <linux/security.h>
18#include <linux/elfcore-compat.h>
22 19
23#include <asm/segment.h> 20#include <asm/segment.h>
24#include <asm/ptrace.h> 21#include <asm/ptrace.h>
@@ -31,6 +28,20 @@
31#include <asm/ia32.h> 28#include <asm/ia32.h>
32#include <asm/vsyscall32.h> 29#include <asm/vsyscall32.h>
33 30
31#undef ELF_ARCH
32#undef ELF_CLASS
33#define ELF_CLASS ELFCLASS32
34#define ELF_ARCH EM_386
35
36#undef elfhdr
37#undef elf_phdr
38#undef elf_note
39#undef elf_addr_t
40#define elfhdr elf32_hdr
41#define elf_phdr elf32_phdr
42#define elf_note elf32_note
43#define elf_addr_t Elf32_Off
44
34#define ELF_NAME "elf/i386" 45#define ELF_NAME "elf/i386"
35 46
36#define AT_SYSINFO 32 47#define AT_SYSINFO 32
@@ -48,74 +59,20 @@ int sysctl_vsyscall32 = 1;
48} while(0) 59} while(0)
49 60
50struct file; 61struct file;
51struct elf_phdr;
52 62
53#define IA32_EMULATOR 1 63#define IA32_EMULATOR 1
54 64
55#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) 65#undef ELF_ET_DYN_BASE
56
57#undef ELF_ARCH
58#define ELF_ARCH EM_386
59
60#define ELF_DATA ELFDATA2LSB
61 66
62#define USE_ELF_CORE_DUMP 1 67#define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
63
64/* Override elfcore.h */
65#define _LINUX_ELFCORE_H 1
66typedef unsigned int elf_greg_t;
67
68#define ELF_NGREG (sizeof (struct user_regs_struct32) / sizeof(elf_greg_t))
69typedef elf_greg_t elf_gregset_t[ELF_NGREG];
70
71struct elf_siginfo
72{
73 int si_signo; /* signal number */
74 int si_code; /* extra code */
75 int si_errno; /* errno */
76};
77 68
78#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0) 69#define jiffies_to_timeval(a,b) do { (b)->tv_usec = 0; (b)->tv_sec = (a)/HZ; }while(0)
79 70
80struct elf_prstatus
81{
82 struct elf_siginfo pr_info; /* Info associated with signal */
83 short pr_cursig; /* Current signal */
84 unsigned int pr_sigpend; /* Set of pending signals */
85 unsigned int pr_sighold; /* Set of held signals */
86 pid_t pr_pid;
87 pid_t pr_ppid;
88 pid_t pr_pgrp;
89 pid_t pr_sid;
90 struct compat_timeval pr_utime; /* User time */
91 struct compat_timeval pr_stime; /* System time */
92 struct compat_timeval pr_cutime; /* Cumulative user time */
93 struct compat_timeval pr_cstime; /* Cumulative system time */
94 elf_gregset_t pr_reg; /* GP registers */
95 int pr_fpvalid; /* True if math co-processor being used. */
96};
97
98#define ELF_PRARGSZ (80) /* Number of chars for args */
99
100struct elf_prpsinfo
101{
102 char pr_state; /* numeric process state */
103 char pr_sname; /* char for pr_state */
104 char pr_zomb; /* zombie */
105 char pr_nice; /* nice val */
106 unsigned int pr_flag; /* flags */
107 __u16 pr_uid;
108 __u16 pr_gid;
109 pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid;
110 /* Lots missing */
111 char pr_fname[16]; /* filename of executable */
112 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
113};
114
115#define _GET_SEG(x) \ 71#define _GET_SEG(x) \
116 ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; }) 72 ({ __u32 seg; asm("movl %%" __stringify(x) ",%0" : "=r"(seg)); seg; })
117 73
118/* Assumes current==process to be dumped */ 74/* Assumes current==process to be dumped */
75#undef ELF_CORE_COPY_REGS
119#define ELF_CORE_COPY_REGS(pr_reg, regs) \ 76#define ELF_CORE_COPY_REGS(pr_reg, regs) \
120 pr_reg[0] = regs->rbx; \ 77 pr_reg[0] = regs->rbx; \
121 pr_reg[1] = regs->rcx; \ 78 pr_reg[1] = regs->rcx; \
@@ -135,36 +92,41 @@ struct elf_prpsinfo
135 pr_reg[15] = regs->rsp; \ 92 pr_reg[15] = regs->rsp; \
136 pr_reg[16] = regs->ss; 93 pr_reg[16] = regs->ss;
137 94
138#define user user32 95
96#define elf_prstatus compat_elf_prstatus
97#define elf_prpsinfo compat_elf_prpsinfo
98#define elf_fpregset_t struct user_i387_ia32_struct
99#define elf_fpxregset_t struct user32_fxsr_struct
100#define user user32
139 101
140#undef elf_read_implies_exec 102#undef elf_read_implies_exec
141#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X) 103#define elf_read_implies_exec(ex, executable_stack) (executable_stack != EXSTACK_DISABLE_X)
142//#include <asm/ia32.h>
143#include <linux/elf.h>
144
145typedef struct user_i387_ia32_struct elf_fpregset_t;
146typedef struct user32_fxsr_struct elf_fpxregset_t;
147
148 104
149static inline void elf_core_copy_regs(elf_gregset_t *elfregs, struct pt_regs *regs) 105#define elf_core_copy_regs elf32_core_copy_regs
106static inline void elf32_core_copy_regs(compat_elf_gregset_t *elfregs,
107 struct pt_regs *regs)
150{ 108{
151 ELF_CORE_COPY_REGS((*elfregs), regs) 109 ELF_CORE_COPY_REGS((&elfregs->ebx), regs)
152} 110}
153 111
154static inline int elf_core_copy_task_regs(struct task_struct *t, elf_gregset_t* elfregs) 112#define elf_core_copy_task_regs elf32_core_copy_task_regs
113static inline int elf32_core_copy_task_regs(struct task_struct *t,
114 compat_elf_gregset_t* elfregs)
155{ 115{
156 struct pt_regs *pp = task_pt_regs(t); 116 struct pt_regs *pp = task_pt_regs(t);
157 ELF_CORE_COPY_REGS((*elfregs), pp); 117 ELF_CORE_COPY_REGS((&elfregs->ebx), pp);
158 /* fix wrong segments */ 118 /* fix wrong segments */
159 (*elfregs)[7] = t->thread.ds; 119 elfregs->ds = t->thread.ds;
160 (*elfregs)[9] = t->thread.fsindex; 120 elfregs->fs = t->thread.fsindex;
161 (*elfregs)[10] = t->thread.gsindex; 121 elfregs->gs = t->thread.gsindex;
162 (*elfregs)[8] = t->thread.es; 122 elfregs->es = t->thread.es;
163 return 1; 123 return 1;
164} 124}
165 125
126#define elf_core_copy_task_fpregs elf32_core_copy_task_fpregs
166static inline int 127static inline int
167elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpregset_t *fpu) 128elf32_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs,
129 elf_fpregset_t *fpu)
168{ 130{
169 struct _fpstate_ia32 *fpstate = (void*)fpu; 131 struct _fpstate_ia32 *fpstate = (void*)fpu;
170 mm_segment_t oldfs = get_fs(); 132 mm_segment_t oldfs = get_fs();
@@ -186,8 +148,9 @@ elf_core_copy_task_fpregs(struct task_struct *tsk, struct pt_regs *regs, elf_fpr
186 148
187#define ELF_CORE_COPY_XFPREGS 1 149#define ELF_CORE_COPY_XFPREGS 1
188#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG 150#define ELF_CORE_XFPREG_TYPE NT_PRXFPREG
151#define elf_core_copy_task_xfpregs elf32_core_copy_task_xfpregs
189static inline int 152static inline int
190elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu) 153elf32_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
191{ 154{
192 struct pt_regs *regs = task_pt_regs(t); 155 struct pt_regs *regs = task_pt_regs(t);
193 if (!tsk_used_math(t)) 156 if (!tsk_used_math(t))
@@ -206,6 +169,10 @@ elf_core_copy_task_xfpregs(struct task_struct *t, elf_fpxregset_t *xfpu)
206 169
207extern int force_personality32; 170extern int force_personality32;
208 171
172#undef ELF_EXEC_PAGESIZE
173#undef ELF_HWCAP
174#undef ELF_PLATFORM
175#undef SET_PERSONALITY
209#define ELF_EXEC_PAGESIZE PAGE_SIZE 176#define ELF_EXEC_PAGESIZE PAGE_SIZE
210#define ELF_HWCAP (boot_cpu_data.x86_capability[0]) 177#define ELF_HWCAP (boot_cpu_data.x86_capability[0])
211#define ELF_PLATFORM ("i686") 178#define ELF_PLATFORM ("i686")
@@ -231,6 +198,7 @@ do { \
231 198
232#define load_elf_binary load_elf32_binary 199#define load_elf_binary load_elf32_binary
233 200
201#undef ELF_PLAT_INIT
234#define ELF_PLAT_INIT(r, load_addr) elf32_init(r) 202#define ELF_PLAT_INIT(r, load_addr) elf32_init(r)
235 203
236#undef start_thread 204#undef start_thread
diff --git a/arch/x86/kernel/Makefile_32 b/arch/x86/kernel/Makefile_32
index a3fa11f8f460..ccea590bbb92 100644
--- a/arch/x86/kernel/Makefile_32
+++ b/arch/x86/kernel/Makefile_32
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5extra-y := head_32.o init_task_32.o vmlinux.lds 5extra-y := head_32.o init_task.o vmlinux.lds
6 6
7obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \ 7obj-y := process_32.o signal_32.o entry_32.o traps_32.o irq_32.o \
8 ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \ 8 ptrace_32.o time_32.o ioport_32.o ldt_32.o setup_32.o i8259_32.o sys_i386_32.o \
@@ -17,6 +17,7 @@ obj-$(CONFIG_MCA) += mca_32.o
17obj-$(CONFIG_X86_MSR) += msr.o 17obj-$(CONFIG_X86_MSR) += msr.o
18obj-$(CONFIG_X86_CPUID) += cpuid.o 18obj-$(CONFIG_X86_CPUID) += cpuid.o
19obj-$(CONFIG_MICROCODE) += microcode.o 19obj-$(CONFIG_MICROCODE) += microcode.o
20obj-$(CONFIG_PCI) += early-quirks.o
20obj-$(CONFIG_APM) += apm_32.o 21obj-$(CONFIG_APM) += apm_32.o
21obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o 22obj-$(CONFIG_X86_SMP) += smp_32.o smpboot_32.o tsc_sync.o
22obj-$(CONFIG_SMP) += smpcommon_32.o 23obj-$(CONFIG_SMP) += smpcommon_32.o
diff --git a/arch/x86/kernel/Makefile_64 b/arch/x86/kernel/Makefile_64
index 43da66213a47..dec06e769281 100644
--- a/arch/x86/kernel/Makefile_64
+++ b/arch/x86/kernel/Makefile_64
@@ -2,7 +2,7 @@
2# Makefile for the linux kernel. 2# Makefile for the linux kernel.
3# 3#
4 4
5extra-y := head_64.o head64.o init_task_64.o vmlinux.lds 5extra-y := head_64.o head64.o init_task.o vmlinux.lds
6EXTRA_AFLAGS := -traditional 6EXTRA_AFLAGS := -traditional
7obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \ 7obj-y := process_64.o signal_64.o entry_64.o traps_64.o irq_64.o \
8 ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \ 8 ptrace_64.o time_64.o ioport_64.o ldt_64.o setup_64.o i8259_64.o sys_x86_64.o \
@@ -39,7 +39,7 @@ obj-$(CONFIG_K8_NB) += k8.o
39obj-$(CONFIG_AUDIT) += audit_64.o 39obj-$(CONFIG_AUDIT) += audit_64.o
40 40
41obj-$(CONFIG_MODULES) += module_64.o 41obj-$(CONFIG_MODULES) += module_64.o
42obj-$(CONFIG_PCI) += early-quirks_64.o 42obj-$(CONFIG_PCI) += early-quirks.o
43 43
44obj-y += topology.o 44obj-y += topology.o
45obj-y += intel_cacheinfo.o 45obj-y += intel_cacheinfo.o
diff --git a/arch/x86/kernel/acpi/Makefile_32 b/arch/x86/kernel/acpi/Makefile_32
index a4852a2e9190..045dd54b33e0 100644
--- a/arch/x86/kernel/acpi/Makefile_32
+++ b/arch/x86/kernel/acpi/Makefile_32
@@ -1,7 +1,4 @@
1obj-$(CONFIG_ACPI) += boot.o 1obj-$(CONFIG_ACPI) += boot.o
2ifneq ($(CONFIG_PCI),)
3obj-$(CONFIG_X86_IO_APIC) += earlyquirk_32.o
4endif
5obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o 2obj-$(CONFIG_ACPI_SLEEP) += sleep_32.o wakeup_32.o
6 3
7ifneq ($(CONFIG_ACPI_PROCESSOR),) 4ifneq ($(CONFIG_ACPI_PROCESSOR),)
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index afd2afe9102d..f28b2e251b1d 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -555,7 +555,7 @@ EXPORT_SYMBOL(acpi_map_lsapic);
555 555
556int acpi_unmap_lsapic(int cpu) 556int acpi_unmap_lsapic(int cpu)
557{ 557{
558 x86_cpu_to_apicid[cpu] = -1; 558 per_cpu(x86_cpu_to_apicid, cpu) = -1;
559 cpu_clear(cpu, cpu_present_map); 559 cpu_clear(cpu, cpu_present_map);
560 num_processors--; 560 num_processors--;
561 561
diff --git a/arch/x86/kernel/acpi/cstate.c b/arch/x86/kernel/acpi/cstate.c
index 2d39f55d29a8..10b67170b133 100644
--- a/arch/x86/kernel/acpi/cstate.c
+++ b/arch/x86/kernel/acpi/cstate.c
@@ -29,7 +29,7 @@
29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags, 29void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
30 unsigned int cpu) 30 unsigned int cpu)
31{ 31{
32 struct cpuinfo_x86 *c = cpu_data + cpu; 32 struct cpuinfo_x86 *c = &cpu_data(cpu);
33 33
34 flags->bm_check = 0; 34 flags->bm_check = 0;
35 if (num_online_cpus() == 1) 35 if (num_online_cpus() == 1)
@@ -72,7 +72,7 @@ int acpi_processor_ffh_cstate_probe(unsigned int cpu,
72 struct acpi_processor_cx *cx, struct acpi_power_register *reg) 72 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
73{ 73{
74 struct cstate_entry *percpu_entry; 74 struct cstate_entry *percpu_entry;
75 struct cpuinfo_x86 *c = cpu_data + cpu; 75 struct cpuinfo_x86 *c = &cpu_data(cpu);
76 76
77 cpumask_t saved_mask; 77 cpumask_t saved_mask;
78 int retval; 78 int retval;
diff --git a/arch/x86/kernel/acpi/earlyquirk_32.c b/arch/x86/kernel/acpi/earlyquirk_32.c
deleted file mode 100644
index 23f78efc577d..000000000000
--- a/arch/x86/kernel/acpi/earlyquirk_32.c
+++ /dev/null
@@ -1,84 +0,0 @@
1/*
2 * Do early PCI probing for bug detection when the main PCI subsystem is
3 * not up yet.
4 */
5#include <linux/init.h>
6#include <linux/kernel.h>
7#include <linux/pci.h>
8#include <linux/acpi.h>
9
10#include <asm/pci-direct.h>
11#include <asm/acpi.h>
12#include <asm/apic.h>
13
14#ifdef CONFIG_ACPI
15
16static int __init nvidia_hpet_check(struct acpi_table_header *header)
17{
18 return 0;
19}
20#endif
21
22static int __init check_bridge(int vendor, int device)
23{
24#ifdef CONFIG_ACPI
25 static int warned;
26 /* According to Nvidia all timer overrides are bogus unless HPET
27 is enabled. */
28 if (!acpi_use_timer_override && vendor == PCI_VENDOR_ID_NVIDIA) {
29 if (!warned && acpi_table_parse(ACPI_SIG_HPET,
30 nvidia_hpet_check)) {
31 warned = 1;
32 acpi_skip_timer_override = 1;
33 printk(KERN_INFO "Nvidia board "
34 "detected. Ignoring ACPI "
35 "timer override.\n");
36 printk(KERN_INFO "If you got timer trouble "
37 "try acpi_use_timer_override\n");
38
39 }
40 }
41#endif
42 if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
43 timer_over_8254 = 0;
44 printk(KERN_INFO "ATI board detected. Disabling timer routing "
45 "over 8254.\n");
46 }
47 return 0;
48}
49
50void __init check_acpi_pci(void)
51{
52 int num, slot, func;
53
54 /* Assume the machine supports type 1. If not it will
55 always read ffffffff and should not have any side effect.
56 Actually a few buggy systems can machine check. Allow the user
57 to disable it by command line option at least -AK */
58 if (!early_pci_allowed())
59 return;
60
61 /* Poor man's PCI discovery */
62 for (num = 0; num < 32; num++) {
63 for (slot = 0; slot < 32; slot++) {
64 for (func = 0; func < 8; func++) {
65 u32 class;
66 u32 vendor;
67 class = read_pci_config(num, slot, func,
68 PCI_CLASS_REVISION);
69 if (class == 0xffffffff)
70 break;
71
72 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
73 continue;
74
75 vendor = read_pci_config(num, slot, func,
76 PCI_VENDOR_ID);
77
78 if (check_bridge(vendor & 0xffff, vendor >> 16))
79 return;
80 }
81
82 }
83 }
84}
diff --git a/arch/x86/kernel/acpi/processor.c b/arch/x86/kernel/acpi/processor.c
index b54fded49834..2ed0a4ce62f0 100644
--- a/arch/x86/kernel/acpi/processor.c
+++ b/arch/x86/kernel/acpi/processor.c
@@ -63,7 +63,7 @@ static void init_intel_pdc(struct acpi_processor *pr, struct cpuinfo_x86 *c)
63void arch_acpi_processor_init_pdc(struct acpi_processor *pr) 63void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
64{ 64{
65 unsigned int cpu = pr->id; 65 unsigned int cpu = pr->id;
66 struct cpuinfo_x86 *c = cpu_data + cpu; 66 struct cpuinfo_x86 *c = &cpu_data(cpu);
67 67
68 pr->pdc = NULL; 68 pr->pdc = NULL;
69 if (c->x86_vendor == X86_VENDOR_INTEL) 69 if (c->x86_vendor == X86_VENDOR_INTEL)
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index 3bd2688bd443..d6405e0842b5 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -357,14 +357,14 @@ void alternatives_smp_switch(int smp)
357 if (smp) { 357 if (smp) {
358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n"); 358 printk(KERN_INFO "SMP alternatives: switching to SMP code\n");
359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 359 clear_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
360 clear_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 360 clear_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
361 list_for_each_entry(mod, &smp_alt_modules, next) 361 list_for_each_entry(mod, &smp_alt_modules, next)
362 alternatives_smp_lock(mod->locks, mod->locks_end, 362 alternatives_smp_lock(mod->locks, mod->locks_end,
363 mod->text, mod->text_end); 363 mod->text, mod->text_end);
364 } else { 364 } else {
365 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 365 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 366 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
367 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 367 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
368 list_for_each_entry(mod, &smp_alt_modules, next) 368 list_for_each_entry(mod, &smp_alt_modules, next)
369 alternatives_smp_unlock(mod->locks, mod->locks_end, 369 alternatives_smp_unlock(mod->locks, mod->locks_end,
370 mod->text, mod->text_end); 370 mod->text, mod->text_end);
@@ -432,7 +432,7 @@ void __init alternative_instructions(void)
432 if (1 == num_possible_cpus()) { 432 if (1 == num_possible_cpus()) {
433 printk(KERN_INFO "SMP alternatives: switching to UP code\n"); 433 printk(KERN_INFO "SMP alternatives: switching to UP code\n");
434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability); 434 set_bit(X86_FEATURE_UP, boot_cpu_data.x86_capability);
435 set_bit(X86_FEATURE_UP, cpu_data[0].x86_capability); 435 set_bit(X86_FEATURE_UP, cpu_data(0).x86_capability);
436 alternatives_smp_unlock(__smp_locks, __smp_locks_end, 436 alternatives_smp_unlock(__smp_locks, __smp_locks_end,
437 _text, _etext); 437 _text, _etext);
438 } 438 }
diff --git a/arch/x86/kernel/cpu/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
index d8c6f132dc7a..d8c6f132dc7a 100644
--- a/arch/x86/kernel/cpu/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_32
diff --git a/arch/x86/kernel/cpufreq/Kconfig b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
index a3fd51926cbd..9c9699fdcf52 100644
--- a/arch/x86/kernel/cpufreq/Kconfig
+++ b/arch/x86/kernel/cpu/cpufreq/Kconfig_64
@@ -19,7 +19,7 @@ config X86_POWERNOW_K8
19 To compile this driver as a module, choose M here: the 19 To compile this driver as a module, choose M here: the
20 module will be called powernow-k8. 20 module will be called powernow-k8.
21 21
22 For details, take a look at <file:Documentation/cpu-freq/>. 22 For details, take a look at <file:Documentation/cpu-freq/>.
23 23
24 If in doubt, say N. 24 If in doubt, say N.
25 25
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index 2ca43ba32bc0..fea0af0476b9 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -77,7 +77,7 @@ static unsigned int acpi_pstate_strict;
77 77
78static int check_est_cpu(unsigned int cpuid) 78static int check_est_cpu(unsigned int cpuid)
79{ 79{
80 struct cpuinfo_x86 *cpu = &cpu_data[cpuid]; 80 struct cpuinfo_x86 *cpu = &cpu_data(cpuid);
81 81
82 if (cpu->x86_vendor != X86_VENDOR_INTEL || 82 if (cpu->x86_vendor != X86_VENDOR_INTEL ||
83 !cpu_has(cpu, X86_FEATURE_EST)) 83 !cpu_has(cpu, X86_FEATURE_EST))
@@ -560,7 +560,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
560 unsigned int cpu = policy->cpu; 560 unsigned int cpu = policy->cpu;
561 struct acpi_cpufreq_data *data; 561 struct acpi_cpufreq_data *data;
562 unsigned int result = 0; 562 unsigned int result = 0;
563 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 563 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
564 struct acpi_processor_performance *perf; 564 struct acpi_processor_performance *perf;
565 565
566 dprintk("acpi_cpufreq_cpu_init\n"); 566 dprintk("acpi_cpufreq_cpu_init\n");
diff --git a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
index c11baaf9f2b4..326a4c81f684 100644
--- a/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
+++ b/arch/x86/kernel/cpu/cpufreq/e_powersaver.c
@@ -305,7 +305,7 @@ static struct cpufreq_driver eps_driver = {
305 305
306static int __init eps_init(void) 306static int __init eps_init(void)
307{ 307{
308 struct cpuinfo_x86 *c = cpu_data; 308 struct cpuinfo_x86 *c = &cpu_data(0);
309 309
310 /* This driver will work only on Centaur C7 processors with 310 /* This driver will work only on Centaur C7 processors with
311 * Enhanced SpeedStep/PowerSaver registers */ 311 * Enhanced SpeedStep/PowerSaver registers */
diff --git a/arch/x86/kernel/cpu/cpufreq/elanfreq.c b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
index 1e7ae7dafcf6..94619c22f563 100644
--- a/arch/x86/kernel/cpu/cpufreq/elanfreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/elanfreq.c
@@ -199,7 +199,7 @@ static int elanfreq_target (struct cpufreq_policy *policy,
199 199
200static int elanfreq_cpu_init(struct cpufreq_policy *policy) 200static int elanfreq_cpu_init(struct cpufreq_policy *policy)
201{ 201{
202 struct cpuinfo_x86 *c = cpu_data; 202 struct cpuinfo_x86 *c = &cpu_data(0);
203 unsigned int i; 203 unsigned int i;
204 int result; 204 int result;
205 205
@@ -280,7 +280,7 @@ static struct cpufreq_driver elanfreq_driver = {
280 280
281static int __init elanfreq_init(void) 281static int __init elanfreq_init(void)
282{ 282{
283 struct cpuinfo_x86 *c = cpu_data; 283 struct cpuinfo_x86 *c = &cpu_data(0);
284 284
285 /* Test if we have the right hardware */ 285 /* Test if we have the right hardware */
286 if ((c->x86_vendor != X86_VENDOR_AMD) || 286 if ((c->x86_vendor != X86_VENDOR_AMD) ||
diff --git a/arch/x86/kernel/cpu/cpufreq/longhaul.c b/arch/x86/kernel/cpu/cpufreq/longhaul.c
index 5045f5d583c8..749d00cb2ebd 100644
--- a/arch/x86/kernel/cpu/cpufreq/longhaul.c
+++ b/arch/x86/kernel/cpu/cpufreq/longhaul.c
@@ -780,7 +780,7 @@ static int longhaul_setup_southbridge(void)
780 780
781static int __init longhaul_cpu_init(struct cpufreq_policy *policy) 781static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
782{ 782{
783 struct cpuinfo_x86 *c = cpu_data; 783 struct cpuinfo_x86 *c = &cpu_data(0);
784 char *cpuname=NULL; 784 char *cpuname=NULL;
785 int ret; 785 int ret;
786 u32 lo, hi; 786 u32 lo, hi;
@@ -959,7 +959,7 @@ static struct cpufreq_driver longhaul_driver = {
959 959
960static int __init longhaul_init(void) 960static int __init longhaul_init(void)
961{ 961{
962 struct cpuinfo_x86 *c = cpu_data; 962 struct cpuinfo_x86 *c = &cpu_data(0);
963 963
964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6) 964 if (c->x86_vendor != X86_VENDOR_CENTAUR || c->x86 != 6)
965 return -ENODEV; 965 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/longrun.c b/arch/x86/kernel/cpu/cpufreq/longrun.c
index b2689514295a..af4a867a097c 100644
--- a/arch/x86/kernel/cpu/cpufreq/longrun.c
+++ b/arch/x86/kernel/cpu/cpufreq/longrun.c
@@ -172,7 +172,7 @@ static unsigned int __init longrun_determine_freqs(unsigned int *low_freq,
172 u32 save_lo, save_hi; 172 u32 save_lo, save_hi;
173 u32 eax, ebx, ecx, edx; 173 u32 eax, ebx, ecx, edx;
174 u32 try_hi; 174 u32 try_hi;
175 struct cpuinfo_x86 *c = cpu_data; 175 struct cpuinfo_x86 *c = &cpu_data(0);
176 176
177 if (!low_freq || !high_freq) 177 if (!low_freq || !high_freq)
178 return -EINVAL; 178 return -EINVAL;
@@ -298,7 +298,7 @@ static struct cpufreq_driver longrun_driver = {
298 */ 298 */
299static int __init longrun_init(void) 299static int __init longrun_init(void)
300{ 300{
301 struct cpuinfo_x86 *c = cpu_data; 301 struct cpuinfo_x86 *c = &cpu_data(0);
302 302
303 if (c->x86_vendor != X86_VENDOR_TRANSMETA || 303 if (c->x86_vendor != X86_VENDOR_TRANSMETA ||
304 !cpu_has(c, X86_FEATURE_LONGRUN)) 304 !cpu_has(c, X86_FEATURE_LONGRUN))
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 793eae854f4f..14791ec55cfd 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -195,7 +195,7 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
195 195
196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) 196static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
197{ 197{
198 struct cpuinfo_x86 *c = &cpu_data[policy->cpu]; 198 struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
199 int cpuid = 0; 199 int cpuid = 0;
200 unsigned int i; 200 unsigned int i;
201 201
@@ -279,7 +279,7 @@ static struct cpufreq_driver p4clockmod_driver = {
279 279
280static int __init cpufreq_p4_init(void) 280static int __init cpufreq_p4_init(void)
281{ 281{
282 struct cpuinfo_x86 *c = cpu_data; 282 struct cpuinfo_x86 *c = &cpu_data(0);
283 int ret; 283 int ret;
284 284
285 /* 285 /*
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
index 6d0285339317..42405b4e34ed 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k6.c
@@ -215,7 +215,7 @@ static struct cpufreq_driver powernow_k6_driver = {
215 */ 215 */
216static int __init powernow_k6_init(void) 216static int __init powernow_k6_init(void)
217{ 217{
218 struct cpuinfo_x86 *c = cpu_data; 218 struct cpuinfo_x86 *c = &cpu_data(0);
219 219
220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) || 220 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 != 5) ||
221 ((c->x86_model != 12) && (c->x86_model != 13))) 221 ((c->x86_model != 12) && (c->x86_model != 13)))
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
index f3686a5f2308..b5a9863d6cdc 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k7.c
@@ -114,7 +114,7 @@ static int check_fsb(unsigned int fsbspeed)
114 114
115static int check_powernow(void) 115static int check_powernow(void)
116{ 116{
117 struct cpuinfo_x86 *c = cpu_data; 117 struct cpuinfo_x86 *c = &cpu_data(0);
118 unsigned int maxei, eax, ebx, ecx, edx; 118 unsigned int maxei, eax, ebx, ecx, edx;
119 119
120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) { 120 if ((c->x86_vendor != X86_VENDOR_AMD) || (c->x86 !=6)) {
diff --git a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
index d9f3e90a7ae0..42da9bd677d6 100644
--- a/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
+++ b/arch/x86/kernel/cpu/cpufreq/sc520_freq.c
@@ -102,7 +102,7 @@ static int sc520_freq_target (struct cpufreq_policy *policy,
102 102
103static int sc520_freq_cpu_init(struct cpufreq_policy *policy) 103static int sc520_freq_cpu_init(struct cpufreq_policy *policy)
104{ 104{
105 struct cpuinfo_x86 *c = cpu_data; 105 struct cpuinfo_x86 *c = &cpu_data(0);
106 int result; 106 int result;
107 107
108 /* capability check */ 108 /* capability check */
@@ -151,7 +151,7 @@ static struct cpufreq_driver sc520_freq_driver = {
151 151
152static int __init sc520_freq_init(void) 152static int __init sc520_freq_init(void)
153{ 153{
154 struct cpuinfo_x86 *c = cpu_data; 154 struct cpuinfo_x86 *c = &cpu_data(0);
155 int err; 155 int err;
156 156
157 /* Test if we have the right hardware */ 157 /* Test if we have the right hardware */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
index 811d47438546..3031f1196192 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c
@@ -230,7 +230,7 @@ static struct cpu_model models[] =
230 230
231static int centrino_cpu_init_table(struct cpufreq_policy *policy) 231static int centrino_cpu_init_table(struct cpufreq_policy *policy)
232{ 232{
233 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 233 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
234 struct cpu_model *model; 234 struct cpu_model *model;
235 235
236 for(model = models; model->cpu_id != NULL; model++) 236 for(model = models; model->cpu_id != NULL; model++)
@@ -340,7 +340,7 @@ static unsigned int get_cur_freq(unsigned int cpu)
340 340
341static int centrino_cpu_init(struct cpufreq_policy *policy) 341static int centrino_cpu_init(struct cpufreq_policy *policy)
342{ 342{
343 struct cpuinfo_x86 *cpu = &cpu_data[policy->cpu]; 343 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu);
344 unsigned freq; 344 unsigned freq;
345 unsigned l, h; 345 unsigned l, h;
346 int ret; 346 int ret;
@@ -612,7 +612,7 @@ static struct cpufreq_driver centrino_driver = {
612 */ 612 */
613static int __init centrino_init(void) 613static int __init centrino_init(void)
614{ 614{
615 struct cpuinfo_x86 *cpu = cpu_data; 615 struct cpuinfo_x86 *cpu = &cpu_data(0);
616 616
617 if (!cpu_has(cpu, X86_FEATURE_EST)) 617 if (!cpu_has(cpu, X86_FEATURE_EST))
618 return -ENODEV; 618 return -ENODEV;
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
index b1acc8ce3167..76c3ab0da468 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-lib.c
@@ -228,7 +228,7 @@ EXPORT_SYMBOL_GPL(speedstep_get_processor_frequency);
228 228
229unsigned int speedstep_detect_processor (void) 229unsigned int speedstep_detect_processor (void)
230{ 230{
231 struct cpuinfo_x86 *c = cpu_data; 231 struct cpuinfo_x86 *c = &cpu_data(0);
232 u32 ebx, msr_lo, msr_hi; 232 u32 ebx, msr_lo, msr_hi;
233 233
234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model); 234 dprintk("x86: %x, model: %x\n", c->x86, c->x86_model);
diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
index 297a24116949..9921b01fe199 100644
--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
@@ -295,7 +295,7 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */ 295 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb; 296 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
297#ifdef CONFIG_X86_HT 297#ifdef CONFIG_X86_HT
298 unsigned int cpu = (c == &boot_cpu_data) ? 0 : (c - cpu_data); 298 unsigned int cpu = c->cpu_index;
299#endif 299#endif
300 300
301 if (c->cpuid_level > 3) { 301 if (c->cpuid_level > 3) {
@@ -417,14 +417,14 @@ unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
417 if (new_l2) { 417 if (new_l2) {
418 l2 = new_l2; 418 l2 = new_l2;
419#ifdef CONFIG_X86_HT 419#ifdef CONFIG_X86_HT
420 cpu_llc_id[cpu] = l2_id; 420 per_cpu(cpu_llc_id, cpu) = l2_id;
421#endif 421#endif
422 } 422 }
423 423
424 if (new_l3) { 424 if (new_l3) {
425 l3 = new_l3; 425 l3 = new_l3;
426#ifdef CONFIG_X86_HT 426#ifdef CONFIG_X86_HT
427 cpu_llc_id[cpu] = l3_id; 427 per_cpu(cpu_llc_id, cpu) = l3_id;
428#endif 428#endif
429 } 429 }
430 430
@@ -459,7 +459,7 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
459 struct _cpuid4_info *this_leaf, *sibling_leaf; 459 struct _cpuid4_info *this_leaf, *sibling_leaf;
460 unsigned long num_threads_sharing; 460 unsigned long num_threads_sharing;
461 int index_msb, i; 461 int index_msb, i;
462 struct cpuinfo_x86 *c = cpu_data; 462 struct cpuinfo_x86 *c = &cpu_data(cpu);
463 463
464 this_leaf = CPUID4_INFO_IDX(cpu, index); 464 this_leaf = CPUID4_INFO_IDX(cpu, index);
465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 465 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
@@ -470,8 +470,8 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
470 index_msb = get_count_order(num_threads_sharing); 470 index_msb = get_count_order(num_threads_sharing);
471 471
472 for_each_online_cpu(i) { 472 for_each_online_cpu(i) {
473 if (c[i].apicid >> index_msb == 473 if (cpu_data(i).apicid >> index_msb ==
474 c[cpu].apicid >> index_msb) { 474 c->apicid >> index_msb) {
475 cpu_set(i, this_leaf->shared_cpu_map); 475 cpu_set(i, this_leaf->shared_cpu_map);
476 if (i != cpu && cpuid4_info[i]) { 476 if (i != cpu && cpuid4_info[i]) {
477 sibling_leaf = CPUID4_INFO_IDX(i, index); 477 sibling_leaf = CPUID4_INFO_IDX(i, index);
diff --git a/arch/x86/kernel/cpu/perfctr-watchdog.c b/arch/x86/kernel/cpu/perfctr-watchdog.c
index 54cdbf1a40f1..c02541e6e653 100644
--- a/arch/x86/kernel/cpu/perfctr-watchdog.c
+++ b/arch/x86/kernel/cpu/perfctr-watchdog.c
@@ -120,7 +120,9 @@ int reserve_perfctr_nmi(unsigned int msr)
120 unsigned int counter; 120 unsigned int counter;
121 121
122 counter = nmi_perfctr_msr_to_bit(msr); 122 counter = nmi_perfctr_msr_to_bit(msr);
123 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 123 /* register not managed by the allocator? */
124 if (counter > NMI_MAX_COUNTER_BITS)
125 return 1;
124 126
125 if (!test_and_set_bit(counter, perfctr_nmi_owner)) 127 if (!test_and_set_bit(counter, perfctr_nmi_owner))
126 return 1; 128 return 1;
@@ -132,7 +134,9 @@ void release_perfctr_nmi(unsigned int msr)
132 unsigned int counter; 134 unsigned int counter;
133 135
134 counter = nmi_perfctr_msr_to_bit(msr); 136 counter = nmi_perfctr_msr_to_bit(msr);
135 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 137 /* register not managed by the allocator? */
138 if (counter > NMI_MAX_COUNTER_BITS)
139 return;
136 140
137 clear_bit(counter, perfctr_nmi_owner); 141 clear_bit(counter, perfctr_nmi_owner);
138} 142}
@@ -142,7 +146,9 @@ int reserve_evntsel_nmi(unsigned int msr)
142 unsigned int counter; 146 unsigned int counter;
143 147
144 counter = nmi_evntsel_msr_to_bit(msr); 148 counter = nmi_evntsel_msr_to_bit(msr);
145 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 149 /* register not managed by the allocator? */
150 if (counter > NMI_MAX_COUNTER_BITS)
151 return 1;
146 152
147 if (!test_and_set_bit(counter, evntsel_nmi_owner)) 153 if (!test_and_set_bit(counter, evntsel_nmi_owner))
148 return 1; 154 return 1;
@@ -154,7 +160,9 @@ void release_evntsel_nmi(unsigned int msr)
154 unsigned int counter; 160 unsigned int counter;
155 161
156 counter = nmi_evntsel_msr_to_bit(msr); 162 counter = nmi_evntsel_msr_to_bit(msr);
157 BUG_ON(counter > NMI_MAX_COUNTER_BITS); 163 /* register not managed by the allocator? */
164 if (counter > NMI_MAX_COUNTER_BITS)
165 return;
158 166
159 clear_bit(counter, evntsel_nmi_owner); 167 clear_bit(counter, evntsel_nmi_owner);
160} 168}
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 879a0f789b1e..2d42b414b777 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -85,12 +85,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
85 /* nothing */ 85 /* nothing */
86 }; 86 };
87 struct cpuinfo_x86 *c = v; 87 struct cpuinfo_x86 *c = v;
88 int i, n = c - cpu_data; 88 int i, n = 0;
89 int fpu_exception; 89 int fpu_exception;
90 90
91#ifdef CONFIG_SMP 91#ifdef CONFIG_SMP
92 if (!cpu_online(n)) 92 if (!cpu_online(n))
93 return 0; 93 return 0;
94 n = c->cpu_index;
94#endif 95#endif
95 seq_printf(m, "processor\t: %d\n" 96 seq_printf(m, "processor\t: %d\n"
96 "vendor_id\t: %s\n" 97 "vendor_id\t: %s\n"
@@ -175,11 +176,15 @@ static int show_cpuinfo(struct seq_file *m, void *v)
175 176
176static void *c_start(struct seq_file *m, loff_t *pos) 177static void *c_start(struct seq_file *m, loff_t *pos)
177{ 178{
178 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 179 if (*pos == 0) /* just in case, cpu 0 is not the first */
180 *pos = first_cpu(cpu_possible_map);
181 if ((*pos) < NR_CPUS && cpu_possible(*pos))
182 return &cpu_data(*pos);
183 return NULL;
179} 184}
180static void *c_next(struct seq_file *m, void *v, loff_t *pos) 185static void *c_next(struct seq_file *m, void *v, loff_t *pos)
181{ 186{
182 ++*pos; 187 *pos = next_cpu(*pos, cpu_possible_map);
183 return c_start(m, pos); 188 return c_start(m, pos);
184} 189}
185static void c_stop(struct seq_file *m, void *v) 190static void c_stop(struct seq_file *m, void *v)
diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
index 70dcf912d9fb..05c9936a16cc 100644
--- a/arch/x86/kernel/cpuid.c
+++ b/arch/x86/kernel/cpuid.c
@@ -114,7 +114,7 @@ static ssize_t cpuid_read(struct file *file, char __user *buf,
114static int cpuid_open(struct inode *inode, struct file *file) 114static int cpuid_open(struct inode *inode, struct file *file)
115{ 115{
116 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 116 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
117 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 117 struct cpuinfo_x86 *c = &cpu_data(cpu);
118 118
119 if (cpu >= NR_CPUS || !cpu_online(cpu)) 119 if (cpu >= NR_CPUS || !cpu_online(cpu))
120 return -ENXIO; /* No such CPU */ 120 return -ENXIO; /* No such CPU */
@@ -134,15 +134,18 @@ static const struct file_operations cpuid_fops = {
134 .open = cpuid_open, 134 .open = cpuid_open,
135}; 135};
136 136
137static int __cpuinit cpuid_device_create(int i) 137static __cpuinit int cpuid_device_create(int cpu)
138{ 138{
139 int err = 0;
140 struct device *dev; 139 struct device *dev;
141 140
142 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, i), "cpu%d",i); 141 dev = device_create(cpuid_class, NULL, MKDEV(CPUID_MAJOR, cpu),
143 if (IS_ERR(dev)) 142 "cpu%d", cpu);
144 err = PTR_ERR(dev); 143 return IS_ERR(dev) ? PTR_ERR(dev) : 0;
145 return err; 144}
145
146static void cpuid_device_destroy(int cpu)
147{
148 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu));
146} 149}
147 150
148static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb, 151static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
@@ -150,18 +153,21 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
150 void *hcpu) 153 void *hcpu)
151{ 154{
152 unsigned int cpu = (unsigned long)hcpu; 155 unsigned int cpu = (unsigned long)hcpu;
156 int err = 0;
153 157
154 switch (action) { 158 switch (action) {
155 case CPU_ONLINE: 159 case CPU_UP_PREPARE:
156 case CPU_ONLINE_FROZEN: 160 case CPU_UP_PREPARE_FROZEN:
157 cpuid_device_create(cpu); 161 err = cpuid_device_create(cpu);
158 break; 162 break;
163 case CPU_UP_CANCELED:
164 case CPU_UP_CANCELED_FROZEN:
159 case CPU_DEAD: 165 case CPU_DEAD:
160 case CPU_DEAD_FROZEN: 166 case CPU_DEAD_FROZEN:
161 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 167 cpuid_device_destroy(cpu);
162 break; 168 break;
163 } 169 }
164 return NOTIFY_OK; 170 return err ? NOTIFY_BAD : NOTIFY_OK;
165} 171}
166 172
167static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier = 173static struct notifier_block __cpuinitdata cpuid_class_cpu_notifier =
@@ -198,7 +204,7 @@ static int __init cpuid_init(void)
198out_class: 204out_class:
199 i = 0; 205 i = 0;
200 for_each_online_cpu(i) { 206 for_each_online_cpu(i) {
201 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, i)); 207 cpuid_device_destroy(i);
202 } 208 }
203 class_destroy(cpuid_class); 209 class_destroy(cpuid_class);
204out_chrdev: 210out_chrdev:
@@ -212,7 +218,7 @@ static void __exit cpuid_exit(void)
212 int cpu = 0; 218 int cpu = 0;
213 219
214 for_each_online_cpu(cpu) 220 for_each_online_cpu(cpu)
215 device_destroy(cpuid_class, MKDEV(CPUID_MAJOR, cpu)); 221 cpuid_device_destroy(cpu);
216 class_destroy(cpuid_class); 222 class_destroy(cpuid_class);
217 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid"); 223 unregister_chrdev(CPUID_MAJOR, "cpu/cpuid");
218 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier); 224 unregister_hotcpu_notifier(&cpuid_class_cpu_notifier);
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 32e75d0731a9..72d0c56c1b48 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -47,6 +47,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
47 if (!kdump_buf_page) { 47 if (!kdump_buf_page) {
48 printk(KERN_WARNING "Kdump: Kdump buffer page not" 48 printk(KERN_WARNING "Kdump: Kdump buffer page not"
49 " allocated\n"); 49 " allocated\n");
50 kunmap_atomic(vaddr, KM_PTE0);
50 return -EFAULT; 51 return -EFAULT;
51 } 52 }
52 copy_page(kdump_buf_page, vaddr); 53 copy_page(kdump_buf_page, vaddr);
diff --git a/arch/x86/kernel/e820_32.c b/arch/x86/kernel/e820_32.c
index 3c86b979a40a..d58039e8de74 100644
--- a/arch/x86/kernel/e820_32.c
+++ b/arch/x86/kernel/e820_32.c
@@ -288,7 +288,8 @@ legacy_init_iomem_resources(struct resource *code_resource, struct resource *dat
288 request_resource(res, code_resource); 288 request_resource(res, code_resource);
289 request_resource(res, data_resource); 289 request_resource(res, data_resource);
290#ifdef CONFIG_KEXEC 290#ifdef CONFIG_KEXEC
291 request_resource(res, &crashk_res); 291 if (crashk_res.start != crashk_res.end)
292 request_resource(res, &crashk_res);
292#endif 293#endif
293 } 294 }
294 } 295 }
diff --git a/arch/x86/kernel/e820_64.c b/arch/x86/kernel/e820_64.c
index e422b8159f69..57616865d8a0 100644
--- a/arch/x86/kernel/e820_64.c
+++ b/arch/x86/kernel/e820_64.c
@@ -226,7 +226,8 @@ void __init e820_reserve_resources(void)
226 request_resource(res, &code_resource); 226 request_resource(res, &code_resource);
227 request_resource(res, &data_resource); 227 request_resource(res, &data_resource);
228#ifdef CONFIG_KEXEC 228#ifdef CONFIG_KEXEC
229 request_resource(res, &crashk_res); 229 if (crashk_res.start != crashk_res.end)
230 request_resource(res, &crashk_res);
230#endif 231#endif
231 } 232 }
232 } 233 }
diff --git a/arch/x86/kernel/early-quirks_64.c b/arch/x86/kernel/early-quirks.c
index 13aa4fd728f3..dc34acbd54aa 100644
--- a/arch/x86/kernel/early-quirks_64.c
+++ b/arch/x86/kernel/early-quirks.c
@@ -13,9 +13,13 @@
13#include <linux/acpi.h> 13#include <linux/acpi.h>
14#include <linux/pci_ids.h> 14#include <linux/pci_ids.h>
15#include <asm/pci-direct.h> 15#include <asm/pci-direct.h>
16#include <asm/proto.h>
17#include <asm/iommu.h>
18#include <asm/dma.h> 16#include <asm/dma.h>
17#include <asm/io_apic.h>
18#include <asm/apic.h>
19
20#ifdef CONFIG_IOMMU
21#include <asm/iommu.h>
22#endif
19 23
20static void __init via_bugs(void) 24static void __init via_bugs(void)
21{ 25{
@@ -23,7 +27,8 @@ static void __init via_bugs(void)
23 if ((end_pfn > MAX_DMA32_PFN || force_iommu) && 27 if ((end_pfn > MAX_DMA32_PFN || force_iommu) &&
24 !iommu_aperture_allowed) { 28 !iommu_aperture_allowed) {
25 printk(KERN_INFO 29 printk(KERN_INFO
26 "Looks like a VIA chipset. Disabling IOMMU. Override with iommu=allowed\n"); 30 "Looks like a VIA chipset. Disabling IOMMU."
31 " Override with iommu=allowed\n");
27 iommu_aperture_disabled = 1; 32 iommu_aperture_disabled = 1;
28 } 33 }
29#endif 34#endif
@@ -40,6 +45,7 @@ static int __init nvidia_hpet_check(struct acpi_table_header *header)
40static void __init nvidia_bugs(void) 45static void __init nvidia_bugs(void)
41{ 46{
42#ifdef CONFIG_ACPI 47#ifdef CONFIG_ACPI
48#ifdef CONFIG_X86_IO_APIC
43 /* 49 /*
44 * All timer overrides on Nvidia are 50 * All timer overrides on Nvidia are
45 * wrong unless HPET is enabled. 51 * wrong unless HPET is enabled.
@@ -59,17 +65,20 @@ static void __init nvidia_bugs(void)
59 "try acpi_use_timer_override\n"); 65 "try acpi_use_timer_override\n");
60 } 66 }
61#endif 67#endif
68#endif
62 /* RED-PEN skip them on mptables too? */ 69 /* RED-PEN skip them on mptables too? */
63 70
64} 71}
65 72
66static void __init ati_bugs(void) 73static void __init ati_bugs(void)
67{ 74{
75#ifdef CONFIG_X86_IO_APIC
68 if (timer_over_8254 == 1) { 76 if (timer_over_8254 == 1) {
69 timer_over_8254 = 0; 77 timer_over_8254 = 0;
70 printk(KERN_INFO 78 printk(KERN_INFO
71 "ATI board detected. Disabling timer routing over 8254.\n"); 79 "ATI board detected. Disabling timer routing over 8254.\n");
72 } 80 }
81#endif
73} 82}
74 83
75struct chipset { 84struct chipset {
@@ -104,7 +113,7 @@ void __init early_quirks(void)
104 if (class == 0xffffffff) 113 if (class == 0xffffffff)
105 break; 114 break;
106 115
107 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI) 116 if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
108 continue; 117 continue;
109 118
110 vendor = read_pci_config(num, slot, func, 119 vendor = read_pci_config(num, slot, func,
diff --git a/arch/x86/kernel/genapic_64.c b/arch/x86/kernel/genapic_64.c
index 4ae03e3e8294..ce703e21c912 100644
--- a/arch/x86/kernel/genapic_64.c
+++ b/arch/x86/kernel/genapic_64.c
@@ -24,10 +24,19 @@
24#include <acpi/acpi_bus.h> 24#include <acpi/acpi_bus.h>
25#endif 25#endif
26 26
27/* which logical CPU number maps to which CPU (physical APIC ID) */ 27/*
28u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly 28 * which logical CPU number maps to which CPU (physical APIC ID)
29 *
30 * The following static array is used during kernel startup
31 * and the x86_cpu_to_apicid_ptr contains the address of the
32 * array during this time. Is it zeroed when the per_cpu
33 * data area is removed.
34 */
35u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata
29 = { [0 ... NR_CPUS-1] = BAD_APICID }; 36 = { [0 ... NR_CPUS-1] = BAD_APICID };
30EXPORT_SYMBOL(x86_cpu_to_apicid); 37void *x86_cpu_to_apicid_ptr;
38DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
39EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
31 40
32struct genapic __read_mostly *genapic = &apic_flat; 41struct genapic __read_mostly *genapic = &apic_flat;
33 42
diff --git a/arch/x86/kernel/genapic_flat_64.c b/arch/x86/kernel/genapic_flat_64.c
index 91c7526768ee..07352b74bda6 100644
--- a/arch/x86/kernel/genapic_flat_64.c
+++ b/arch/x86/kernel/genapic_flat_64.c
@@ -172,7 +172,7 @@ static unsigned int physflat_cpu_mask_to_apicid(cpumask_t cpumask)
172 */ 172 */
173 cpu = first_cpu(cpumask); 173 cpu = first_cpu(cpumask);
174 if ((unsigned)cpu < NR_CPUS) 174 if ((unsigned)cpu < NR_CPUS)
175 return x86_cpu_to_apicid[cpu]; 175 return per_cpu(x86_cpu_to_apicid, cpu);
176 else 176 else
177 return BAD_APICID; 177 return BAD_APICID;
178} 178}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a7eee0a4751d..6b3469311e42 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -58,7 +58,7 @@ void __init x86_64_start_kernel(char * real_mode_data)
58 58
59 for (i = 0; i < IDT_ENTRIES; i++) 59 for (i = 0; i < IDT_ENTRIES; i++)
60 set_intr_gate(i, early_idt_handler); 60 set_intr_gate(i, early_idt_handler);
61 asm volatile("lidt %0" :: "m" (idt_descr)); 61 load_idt((const struct desc_ptr *)&idt_descr);
62 62
63 early_printk("Kernel alive\n"); 63 early_printk("Kernel alive\n");
64 64
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index f8367074da0d..22d8f00c80dc 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -69,12 +69,15 @@ static inline void hpet_clear_mapping(void)
69 * HPET command line enable / disable 69 * HPET command line enable / disable
70 */ 70 */
71static int boot_hpet_disable; 71static int boot_hpet_disable;
72int hpet_force_user;
72 73
73static int __init hpet_setup(char* str) 74static int __init hpet_setup(char* str)
74{ 75{
75 if (str) { 76 if (str) {
76 if (!strncmp("disable", str, 7)) 77 if (!strncmp("disable", str, 7))
77 boot_hpet_disable = 1; 78 boot_hpet_disable = 1;
79 if (!strncmp("force", str, 5))
80 hpet_force_user = 1;
78 } 81 }
79 return 1; 82 return 1;
80} 83}
diff --git a/arch/x86/kernel/i8259_32.c b/arch/x86/kernel/i8259_32.c
index d34a10cc13a7..f634fc715c99 100644
--- a/arch/x86/kernel/i8259_32.c
+++ b/arch/x86/kernel/i8259_32.c
@@ -403,7 +403,8 @@ void __init native_init_IRQ(void)
403 int vector = FIRST_EXTERNAL_VECTOR + i; 403 int vector = FIRST_EXTERNAL_VECTOR + i;
404 if (i >= NR_IRQS) 404 if (i >= NR_IRQS)
405 break; 405 break;
406 if (vector != SYSCALL_VECTOR) 406 /* SYSCALL_VECTOR was reserved in trap_init. */
407 if (!test_bit(vector, used_vectors))
407 set_intr_gate(vector, interrupt[i]); 408 set_intr_gate(vector, interrupt[i]);
408 } 409 }
409 410
diff --git a/arch/x86/kernel/init_task_32.c b/arch/x86/kernel/init_task.c
index d26fc063a760..468c9c437842 100644
--- a/arch/x86/kernel/init_task_32.c
+++ b/arch/x86/kernel/init_task.c
@@ -15,7 +15,6 @@ static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals); 15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); 16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm); 17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm); 18EXPORT_SYMBOL(init_mm);
20 19
21/* 20/*
@@ -25,7 +24,7 @@ EXPORT_SYMBOL(init_mm);
25 * way process stacks are handled. This is done by having a special 24 * way process stacks are handled. This is done by having a special
26 * "init_task" linker map entry.. 25 * "init_task" linker map entry..
27 */ 26 */
28union thread_union init_thread_union 27union thread_union init_thread_union
29 __attribute__((__section__(".data.init_task"))) = 28 __attribute__((__section__(".data.init_task"))) =
30 { INIT_THREAD_INFO(init_task) }; 29 { INIT_THREAD_INFO(init_task) };
31 30
@@ -35,12 +34,14 @@ union thread_union init_thread_union
35 * All other task structs will be allocated on slabs in fork.c 34 * All other task structs will be allocated on slabs in fork.c
36 */ 35 */
37struct task_struct init_task = INIT_TASK(init_task); 36struct task_struct init_task = INIT_TASK(init_task);
38
39EXPORT_SYMBOL(init_task); 37EXPORT_SYMBOL(init_task);
40 38
41/* 39/*
42 * per-CPU TSS segments. Threads are completely 'soft' on Linux, 40 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
43 * no more per-task TSS's. 41 * no more per-task TSS's. The TSS size is kept cacheline-aligned
44 */ 42 * so they are allowed to end up in the .data.cacheline_aligned
43 * section. Since TSS's are completely CPU-local, we want them
44 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
45 */
45DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; 46DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
46 47
diff --git a/arch/x86/kernel/init_task_64.c b/arch/x86/kernel/init_task_64.c
deleted file mode 100644
index 4ff33d4f8551..000000000000
--- a/arch/x86/kernel/init_task_64.c
+++ /dev/null
@@ -1,54 +0,0 @@
1#include <linux/mm.h>
2#include <linux/module.h>
3#include <linux/sched.h>
4#include <linux/init.h>
5#include <linux/init_task.h>
6#include <linux/fs.h>
7#include <linux/mqueue.h>
8
9#include <asm/uaccess.h>
10#include <asm/pgtable.h>
11#include <asm/desc.h>
12
13static struct fs_struct init_fs = INIT_FS;
14static struct files_struct init_files = INIT_FILES;
15static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
16static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
17struct mm_struct init_mm = INIT_MM(init_mm);
18
19EXPORT_SYMBOL(init_mm);
20
21/*
22 * Initial task structure.
23 *
24 * We need to make sure that this is 8192-byte aligned due to the
25 * way process stacks are handled. This is done by having a special
26 * "init_task" linker map entry..
27 */
28union thread_union init_thread_union
29 __attribute__((__section__(".data.init_task"))) =
30 { INIT_THREAD_INFO(init_task) };
31
32/*
33 * Initial task structure.
34 *
35 * All other task structs will be allocated on slabs in fork.c
36 */
37struct task_struct init_task = INIT_TASK(init_task);
38
39EXPORT_SYMBOL(init_task);
40/*
41 * per-CPU TSS segments. Threads are completely 'soft' on Linux,
42 * no more per-task TSS's. The TSS size is kept cacheline-aligned
43 * so they are allowed to end up in the .data.cacheline_aligned
44 * section. Since TSS's are completely CPU-local, we want them
45 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
46 */
47DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
48
49/* Copies of the original ist values from the tss are only accessed during
50 * debugging, no special alignment required.
51 */
52DEFINE_PER_CPU(struct orig_ist, orig_ist);
53
54#define ALIGN_TO_4K __attribute__((section(".data.init_task")))
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index 5f10c7189534..0c55e9d86f69 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -1198,7 +1198,7 @@ static u8 irq_vector[NR_IRQ_VECTORS] __read_mostly = { FIRST_DEVICE_VECTOR , 0 }
1198static int __assign_irq_vector(int irq) 1198static int __assign_irq_vector(int irq)
1199{ 1199{
1200 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0; 1200 static int current_vector = FIRST_DEVICE_VECTOR, current_offset = 0;
1201 int vector, offset, i; 1201 int vector, offset;
1202 1202
1203 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS); 1203 BUG_ON((unsigned)irq >= NR_IRQ_VECTORS);
1204 1204
@@ -1215,11 +1215,8 @@ next:
1215 } 1215 }
1216 if (vector == current_vector) 1216 if (vector == current_vector)
1217 return -ENOSPC; 1217 return -ENOSPC;
1218 if (vector == SYSCALL_VECTOR) 1218 if (test_and_set_bit(vector, used_vectors))
1219 goto next; 1219 goto next;
1220 for (i = 0; i < NR_IRQ_VECTORS; i++)
1221 if (irq_vector[i] == vector)
1222 goto next;
1223 1220
1224 current_vector = vector; 1221 current_vector = vector;
1225 current_offset = offset; 1222 current_offset = offset;
@@ -2295,6 +2292,12 @@ static inline void __init check_timer(void)
2295 2292
2296void __init setup_IO_APIC(void) 2293void __init setup_IO_APIC(void)
2297{ 2294{
2295 int i;
2296
2297 /* Reserve all the system vectors. */
2298 for (i = FIRST_SYSTEM_VECTOR; i < NR_VECTORS; i++)
2299 set_bit(i, used_vectors);
2300
2298 enable_IO_APIC(); 2301 enable_IO_APIC();
2299 2302
2300 if (acpi_ioapic) 2303 if (acpi_ioapic)
diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
index 8459ca64bc2f..11b935f4f886 100644
--- a/arch/x86/kernel/machine_kexec_32.c
+++ b/arch/x86/kernel/machine_kexec_32.c
@@ -149,28 +149,6 @@ NORET_TYPE void machine_kexec(struct kimage *image)
149 image->start, cpu_has_pae); 149 image->start, cpu_has_pae);
150} 150}
151 151
152/* crashkernel=size@addr specifies the location to reserve for
153 * a crash kernel. By reserving this memory we guarantee
154 * that linux never sets it up as a DMA target.
155 * Useful for holding code to do something appropriate
156 * after a kernel panic.
157 */
158static int __init parse_crashkernel(char *arg)
159{
160 unsigned long size, base;
161 size = memparse(arg, &arg);
162 if (*arg == '@') {
163 base = memparse(arg+1, &arg);
164 /* FIXME: Do I want a sanity check
165 * to validate the memory range?
166 */
167 crashk_res.start = base;
168 crashk_res.end = base + size - 1;
169 }
170 return 0;
171}
172early_param("crashkernel", parse_crashkernel);
173
174void arch_crash_save_vmcoreinfo(void) 152void arch_crash_save_vmcoreinfo(void)
175{ 153{
176#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE 154#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
diff --git a/arch/x86/kernel/machine_kexec_64.c b/arch/x86/kernel/machine_kexec_64.c
index 7450b69710b5..0d8577f05422 100644
--- a/arch/x86/kernel/machine_kexec_64.c
+++ b/arch/x86/kernel/machine_kexec_64.c
@@ -231,33 +231,6 @@ NORET_TYPE void machine_kexec(struct kimage *image)
231 image->start); 231 image->start);
232} 232}
233 233
234/* crashkernel=size@addr specifies the location to reserve for
235 * a crash kernel. By reserving this memory we guarantee
236 * that linux never set's it up as a DMA target.
237 * Useful for holding code to do something appropriate
238 * after a kernel panic.
239 */
240static int __init setup_crashkernel(char *arg)
241{
242 unsigned long size, base;
243 char *p;
244 if (!arg)
245 return -EINVAL;
246 size = memparse(arg, &p);
247 if (arg == p)
248 return -EINVAL;
249 if (*p == '@') {
250 base = memparse(p+1, &p);
251 /* FIXME: Do I want a sanity check to validate the
252 * memory range? Yes you do, but it's too early for
253 * e820 -AK */
254 crashk_res.start = base;
255 crashk_res.end = base + size - 1;
256 }
257 return 0;
258}
259early_param("crashkernel", setup_crashkernel);
260
261void arch_crash_save_vmcoreinfo(void) 234void arch_crash_save_vmcoreinfo(void)
262{ 235{
263#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE 236#ifdef CONFIG_ARCH_DISCONTIGMEM_ENABLE
diff --git a/arch/x86/kernel/mce_64.c b/arch/x86/kernel/mce_64.c
index 66e6b797b2cb..2cf20de5beca 100644
--- a/arch/x86/kernel/mce_64.c
+++ b/arch/x86/kernel/mce_64.c
@@ -799,7 +799,8 @@ static __cpuinit int mce_create_device(unsigned int cpu)
799{ 799{
800 int err; 800 int err;
801 int i; 801 int i;
802 if (!mce_available(&cpu_data[cpu])) 802
803 if (!mce_available(&cpu_data(cpu)))
803 return -EIO; 804 return -EIO;
804 805
805 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); 806 memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject));
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 0d2afd96aca4..752fb16a817d 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,11 +472,11 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
472 sprintf(name, "threshold_bank%i", bank); 472 sprintf(name, "threshold_bank%i", bank);
473 473
474#ifdef CONFIG_SMP 474#ifdef CONFIG_SMP
475 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ 475 if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */
476 i = first_cpu(per_cpu(cpu_core_map, cpu)); 476 i = first_cpu(per_cpu(cpu_core_map, cpu));
477 477
478 /* first core not up yet */ 478 /* first core not up yet */
479 if (cpu_data[i].cpu_core_id) 479 if (cpu_data(i).cpu_core_id)
480 goto out; 480 goto out;
481 481
482 /* already linked */ 482 /* already linked */
diff --git a/arch/x86/kernel/microcode.c b/arch/x86/kernel/microcode.c
index 09cf78110358..09c315214a5e 100644
--- a/arch/x86/kernel/microcode.c
+++ b/arch/x86/kernel/microcode.c
@@ -132,7 +132,7 @@ static struct ucode_cpu_info {
132 132
133static void collect_cpu_info(int cpu_num) 133static void collect_cpu_info(int cpu_num)
134{ 134{
135 struct cpuinfo_x86 *c = cpu_data + cpu_num; 135 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num; 136 struct ucode_cpu_info *uci = ucode_cpu_info + cpu_num;
137 unsigned int val[2]; 137 unsigned int val[2];
138 138
@@ -522,7 +522,7 @@ static struct platform_device *microcode_pdev;
522static int cpu_request_microcode(int cpu) 522static int cpu_request_microcode(int cpu)
523{ 523{
524 char name[30]; 524 char name[30];
525 struct cpuinfo_x86 *c = cpu_data + cpu; 525 struct cpuinfo_x86 *c = &cpu_data(cpu);
526 const struct firmware *firmware; 526 const struct firmware *firmware;
527 void *buf; 527 void *buf;
528 unsigned long size; 528 unsigned long size;
@@ -570,7 +570,7 @@ static int cpu_request_microcode(int cpu)
570 570
571static int apply_microcode_check_cpu(int cpu) 571static int apply_microcode_check_cpu(int cpu)
572{ 572{
573 struct cpuinfo_x86 *c = cpu_data + cpu; 573 struct cpuinfo_x86 *c = &cpu_data(cpu);
574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu; 574 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
575 cpumask_t old; 575 cpumask_t old;
576 unsigned int val[2]; 576 unsigned int val[2];
diff --git a/arch/x86/kernel/mpparse_64.c b/arch/x86/kernel/mpparse_64.c
index 8bf0ca03ac8e..ef4aab123581 100644
--- a/arch/x86/kernel/mpparse_64.c
+++ b/arch/x86/kernel/mpparse_64.c
@@ -57,6 +57,8 @@ unsigned long mp_lapic_addr = 0;
57 57
58/* Processor that is doing the boot up */ 58/* Processor that is doing the boot up */
59unsigned int boot_cpu_id = -1U; 59unsigned int boot_cpu_id = -1U;
60EXPORT_SYMBOL(boot_cpu_id);
61
60/* Internal processor count */ 62/* Internal processor count */
61unsigned int num_processors __cpuinitdata = 0; 63unsigned int num_processors __cpuinitdata = 0;
62 64
@@ -86,7 +88,7 @@ static int __init mpf_checksum(unsigned char *mp, int len)
86 return sum & 0xFF; 88 return sum & 0xFF;
87} 89}
88 90
89static void __cpuinit MP_processor_info (struct mpc_config_processor *m) 91static void __cpuinit MP_processor_info(struct mpc_config_processor *m)
90{ 92{
91 int cpu; 93 int cpu;
92 cpumask_t tmp_map; 94 cpumask_t tmp_map;
@@ -123,7 +125,18 @@ static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
123 cpu = 0; 125 cpu = 0;
124 } 126 }
125 bios_cpu_apicid[cpu] = m->mpc_apicid; 127 bios_cpu_apicid[cpu] = m->mpc_apicid;
126 x86_cpu_to_apicid[cpu] = m->mpc_apicid; 128 /*
129 * We get called early in the the start_kernel initialization
130 * process when the per_cpu data area is not yet setup, so we
131 * use a static array that is removed after the per_cpu data
132 * area is created.
133 */
134 if (x86_cpu_to_apicid_ptr) {
135 u8 *x86_cpu_to_apicid = (u8 *)x86_cpu_to_apicid_ptr;
136 x86_cpu_to_apicid[cpu] = m->mpc_apicid;
137 } else {
138 per_cpu(x86_cpu_to_apicid, cpu) = m->mpc_apicid;
139 }
127 140
128 cpu_set(cpu, cpu_possible_map); 141 cpu_set(cpu, cpu_possible_map);
129 cpu_set(cpu, cpu_present_map); 142 cpu_set(cpu, cpu_present_map);
diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
index e18e516cf549..ee6eba4ecfea 100644
--- a/arch/x86/kernel/msr.c
+++ b/arch/x86/kernel/msr.c
@@ -112,7 +112,7 @@ static ssize_t msr_write(struct file *file, const char __user *buf,
112static int msr_open(struct inode *inode, struct file *file) 112static int msr_open(struct inode *inode, struct file *file)
113{ 113{
114 unsigned int cpu = iminor(file->f_path.dentry->d_inode); 114 unsigned int cpu = iminor(file->f_path.dentry->d_inode);
115 struct cpuinfo_x86 *c = &(cpu_data)[cpu]; 115 struct cpuinfo_x86 *c = &cpu_data(cpu);
116 116
117 if (cpu >= NR_CPUS || !cpu_online(cpu)) 117 if (cpu >= NR_CPUS || !cpu_online(cpu))
118 return -ENXIO; /* No such CPU */ 118 return -ENXIO; /* No such CPU */
diff --git a/arch/x86/kernel/pci-dma_64.c b/arch/x86/kernel/pci-dma_64.c
index b2b42bdb0a15..afaf9f12c032 100644
--- a/arch/x86/kernel/pci-dma_64.c
+++ b/arch/x86/kernel/pci-dma_64.c
@@ -11,7 +11,7 @@
11#include <asm/iommu.h> 11#include <asm/iommu.h>
12#include <asm/calgary.h> 12#include <asm/calgary.h>
13 13
14int iommu_merge __read_mostly = 0; 14int iommu_merge __read_mostly = 1;
15EXPORT_SYMBOL(iommu_merge); 15EXPORT_SYMBOL(iommu_merge);
16 16
17dma_addr_t bad_dma_address __read_mostly; 17dma_addr_t bad_dma_address __read_mostly;
diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
index 097aeafce5ff..7b899584d290 100644
--- a/arch/x86/kernel/process_32.c
+++ b/arch/x86/kernel/process_32.c
@@ -295,34 +295,52 @@ static int __init idle_setup(char *str)
295} 295}
296early_param("idle", idle_setup); 296early_param("idle", idle_setup);
297 297
298void show_regs(struct pt_regs * regs) 298void __show_registers(struct pt_regs *regs, int all)
299{ 299{
300 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; 300 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
301 unsigned long d0, d1, d2, d3, d6, d7; 301 unsigned long d0, d1, d2, d3, d6, d7;
302 unsigned long esp;
303 unsigned short ss, gs;
304
305 if (user_mode_vm(regs)) {
306 esp = regs->esp;
307 ss = regs->xss & 0xffff;
308 savesegment(gs, gs);
309 } else {
310 esp = (unsigned long) (&regs->esp);
311 savesegment(ss, ss);
312 savesegment(gs, gs);
313 }
302 314
303 printk("\n"); 315 printk("\n");
304 printk("Pid: %d, comm: %20s\n", current->pid, current->comm); 316 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
305 printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id()); 317 task_pid_nr(current), current->comm,
318 print_tainted(), init_utsname()->release,
319 (int)strcspn(init_utsname()->version, " "),
320 init_utsname()->version);
321
322 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
323 0xffff & regs->xcs, regs->eip, regs->eflags,
324 smp_processor_id());
306 print_symbol("EIP is at %s\n", regs->eip); 325 print_symbol("EIP is at %s\n", regs->eip);
307 326
308 if (user_mode_vm(regs))
309 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
310 printk(" EFLAGS: %08lx %s (%s %.*s)\n",
311 regs->eflags, print_tainted(), init_utsname()->release,
312 (int)strcspn(init_utsname()->version, " "),
313 init_utsname()->version);
314 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", 327 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
315 regs->eax,regs->ebx,regs->ecx,regs->edx); 328 regs->eax, regs->ebx, regs->ecx, regs->edx);
316 printk("ESI: %08lx EDI: %08lx EBP: %08lx", 329 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
317 regs->esi, regs->edi, regs->ebp); 330 regs->esi, regs->edi, regs->ebp, esp);
318 printk(" DS: %04x ES: %04x FS: %04x\n", 331 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
319 0xffff & regs->xds,0xffff & regs->xes, 0xffff & regs->xfs); 332 regs->xds & 0xffff, regs->xes & 0xffff,
333 regs->xfs & 0xffff, gs, ss);
334
335 if (!all)
336 return;
320 337
321 cr0 = read_cr0(); 338 cr0 = read_cr0();
322 cr2 = read_cr2(); 339 cr2 = read_cr2();
323 cr3 = read_cr3(); 340 cr3 = read_cr3();
324 cr4 = read_cr4_safe(); 341 cr4 = read_cr4_safe();
325 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); 342 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
343 cr0, cr2, cr3, cr4);
326 344
327 get_debugreg(d0, 0); 345 get_debugreg(d0, 0);
328 get_debugreg(d1, 1); 346 get_debugreg(d1, 1);
@@ -330,10 +348,16 @@ void show_regs(struct pt_regs * regs)
330 get_debugreg(d3, 3); 348 get_debugreg(d3, 3);
331 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", 349 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
332 d0, d1, d2, d3); 350 d0, d1, d2, d3);
351
333 get_debugreg(d6, 6); 352 get_debugreg(d6, 6);
334 get_debugreg(d7, 7); 353 get_debugreg(d7, 7);
335 printk("DR6: %08lx DR7: %08lx\n", d6, d7); 354 printk("DR6: %08lx DR7: %08lx\n",
355 d6, d7);
356}
336 357
358void show_regs(struct pt_regs *regs)
359{
360 __show_registers(regs, 1);
337 show_trace(NULL, regs, &regs->esp); 361 show_trace(NULL, regs, &regs->esp);
338} 362}
339 363
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index d769e204f942..a4ce1911efdf 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -45,9 +45,12 @@ static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
45 if (!(config & 0x2)) 45 if (!(config & 0x2))
46 pci_write_config_byte(dev, 0xf4, config); 46 pci_write_config_byte(dev, 0xf4, config);
47} 47}
48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance); 48DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH,
49DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance); 49 quirk_intel_irqbalance);
50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance); 50DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH,
51 quirk_intel_irqbalance);
52DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH,
53 quirk_intel_irqbalance);
51#endif 54#endif
52 55
53#if defined(CONFIG_HPET_TIMER) 56#if defined(CONFIG_HPET_TIMER)
@@ -56,7 +59,8 @@ unsigned long force_hpet_address;
56static enum { 59static enum {
57 NONE_FORCE_HPET_RESUME, 60 NONE_FORCE_HPET_RESUME,
58 OLD_ICH_FORCE_HPET_RESUME, 61 OLD_ICH_FORCE_HPET_RESUME,
59 ICH_FORCE_HPET_RESUME 62 ICH_FORCE_HPET_RESUME,
63 VT8237_FORCE_HPET_RESUME
60} force_hpet_resume_type; 64} force_hpet_resume_type;
61 65
62static void __iomem *rcba_base; 66static void __iomem *rcba_base;
@@ -146,17 +150,17 @@ static void ich_force_enable_hpet(struct pci_dev *dev)
146} 150}
147 151
148DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0, 152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ESB2_0,
149 ich_force_enable_hpet); 153 ich_force_enable_hpet);
150DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1, 154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH6_1,
151 ich_force_enable_hpet); 155 ich_force_enable_hpet);
152DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0, 156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0,
153 ich_force_enable_hpet); 157 ich_force_enable_hpet);
154DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1, 158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1,
155 ich_force_enable_hpet); 159 ich_force_enable_hpet);
156DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31, 160DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_31,
157 ich_force_enable_hpet); 161 ich_force_enable_hpet);
158DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1, 162DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH8_1,
159 ich_force_enable_hpet); 163 ich_force_enable_hpet);
160 164
161 165
162static struct pci_dev *cached_dev; 166static struct pci_dev *cached_dev;
@@ -232,10 +236,91 @@ static void old_ich_force_enable_hpet(struct pci_dev *dev)
232 printk(KERN_DEBUG "Failed to force enable HPET\n"); 236 printk(KERN_DEBUG "Failed to force enable HPET\n");
233} 237}
234 238
239/*
240 * Undocumented chipset features. Make sure that the user enforced
241 * this.
242 */
243static void old_ich_force_enable_hpet_user(struct pci_dev *dev)
244{
245 if (hpet_force_user)
246 old_ich_force_enable_hpet(dev);
247}
248
249DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_0,
250 old_ich_force_enable_hpet_user);
251DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801CA_12,
252 old_ich_force_enable_hpet_user);
253DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_0,
254 old_ich_force_enable_hpet_user);
255DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801DB_12,
256 old_ich_force_enable_hpet_user);
235DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0, 257DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_0,
236 old_ich_force_enable_hpet); 258 old_ich_force_enable_hpet);
237DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12, 259DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801EB_12,
238 old_ich_force_enable_hpet); 260 old_ich_force_enable_hpet);
261
262
263static void vt8237_force_hpet_resume(void)
264{
265 u32 val;
266
267 if (!force_hpet_address || !cached_dev)
268 return;
269
270 val = 0xfed00000 | 0x80;
271 pci_write_config_dword(cached_dev, 0x68, val);
272
273 pci_read_config_dword(cached_dev, 0x68, &val);
274 if (val & 0x80)
275 printk(KERN_DEBUG "Force enabled HPET at resume\n");
276 else
277 BUG();
278}
279
280static void vt8237_force_enable_hpet(struct pci_dev *dev)
281{
282 u32 uninitialized_var(val);
283
284 if (!hpet_force_user || hpet_address || force_hpet_address)
285 return;
286
287 pci_read_config_dword(dev, 0x68, &val);
288 /*
289 * Bit 7 is HPET enable bit.
290 * Bit 31:10 is HPET base address (contrary to what datasheet claims)
291 */
292 if (val & 0x80) {
293 force_hpet_address = (val & ~0x3ff);
294 printk(KERN_DEBUG "HPET at base address 0x%lx\n",
295 force_hpet_address);
296 return;
297 }
298
299 /*
300 * HPET is disabled. Trying enabling at FED00000 and check
301 * whether it sticks
302 */
303 val = 0xfed00000 | 0x80;
304 pci_write_config_dword(dev, 0x68, val);
305
306 pci_read_config_dword(dev, 0x68, &val);
307 if (val & 0x80) {
308 force_hpet_address = (val & ~0x3ff);
309 printk(KERN_DEBUG "Force enabled HPET at base address 0x%lx\n",
310 force_hpet_address);
311 cached_dev = dev;
312 force_hpet_resume_type = VT8237_FORCE_HPET_RESUME;
313 return;
314 }
315
316 printk(KERN_DEBUG "Failed to force enable HPET\n");
317}
318
319DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235,
320 vt8237_force_enable_hpet);
321DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8237,
322 vt8237_force_enable_hpet);
323
239 324
240void force_hpet_resume(void) 325void force_hpet_resume(void)
241{ 326{
@@ -246,6 +331,9 @@ void force_hpet_resume(void)
246 case OLD_ICH_FORCE_HPET_RESUME: 331 case OLD_ICH_FORCE_HPET_RESUME:
247 return old_ich_force_hpet_resume(); 332 return old_ich_force_hpet_resume();
248 333
334 case VT8237_FORCE_HPET_RESUME:
335 return vt8237_force_hpet_resume();
336
249 default: 337 default:
250 break; 338 break;
251 } 339 }
diff --git a/arch/x86/kernel/reboot_64.c b/arch/x86/kernel/reboot_64.c
index 368db2b9c5ac..776eb06b6512 100644
--- a/arch/x86/kernel/reboot_64.c
+++ b/arch/x86/kernel/reboot_64.c
@@ -11,6 +11,7 @@
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/io.h> 12#include <asm/io.h>
13#include <asm/delay.h> 13#include <asm/delay.h>
14#include <asm/desc.h>
14#include <asm/hw_irq.h> 15#include <asm/hw_irq.h>
15#include <asm/system.h> 16#include <asm/system.h>
16#include <asm/pgtable.h> 17#include <asm/pgtable.h>
@@ -136,7 +137,7 @@ void machine_emergency_restart(void)
136 } 137 }
137 138
138 case BOOT_TRIPLE: 139 case BOOT_TRIPLE:
139 __asm__ __volatile__("lidt (%0)": :"r" (&no_idt)); 140 load_idt((const struct desc_ptr *)&no_idt);
140 __asm__ __volatile__("int3"); 141 __asm__ __volatile__("int3");
141 142
142 reboot_type = BOOT_KBD; 143 reboot_type = BOOT_KBD;
diff --git a/arch/x86/kernel/reboot_fixups_32.c b/arch/x86/kernel/reboot_fixups_32.c
index 8b30b26ad069..1a07bbea7be3 100644
--- a/arch/x86/kernel/reboot_fixups_32.c
+++ b/arch/x86/kernel/reboot_fixups_32.c
@@ -12,6 +12,7 @@
12#include <linux/interrupt.h> 12#include <linux/interrupt.h>
13#include <asm/reboot_fixups.h> 13#include <asm/reboot_fixups.h>
14#include <asm/msr.h> 14#include <asm/msr.h>
15#include <asm/geode.h>
15 16
16static void cs5530a_warm_reset(struct pci_dev *dev) 17static void cs5530a_warm_reset(struct pci_dev *dev)
17{ 18{
@@ -24,11 +25,8 @@ static void cs5530a_warm_reset(struct pci_dev *dev)
24 25
25static void cs5536_warm_reset(struct pci_dev *dev) 26static void cs5536_warm_reset(struct pci_dev *dev)
26{ 27{
27 /* 28 /* writing 1 to the LSB of this MSR causes a hard reset */
28 * 6.6.2.12 Soft Reset (DIVIL_SOFT_RESET) 29 wrmsrl(MSR_DIVIL_SOFT_RESET, 1ULL);
29 * writing 1 to the LSB of this MSR causes a hard reset.
30 */
31 wrmsrl(0x51400017, 1ULL);
32 udelay(50); /* shouldn't get here but be safe and spin a while */ 30 udelay(50); /* shouldn't get here but be safe and spin a while */
33} 31}
34 32
diff --git a/arch/x86/kernel/setup64.c b/arch/x86/kernel/setup64.c
index ba9188235057..3558ac78c926 100644
--- a/arch/x86/kernel/setup64.c
+++ b/arch/x86/kernel/setup64.c
@@ -185,6 +185,12 @@ void __cpuinit check_efer(void)
185unsigned long kernel_eflags; 185unsigned long kernel_eflags;
186 186
187/* 187/*
188 * Copies of the original ist values from the tss are only accessed during
189 * debugging, no special alignment required.
190 */
191DEFINE_PER_CPU(struct orig_ist, orig_ist);
192
193/*
188 * cpu_init() initializes state that is per-CPU. Some data is already 194 * cpu_init() initializes state that is per-CPU. Some data is already
189 * initialized (naturally) in the bootstrap process, such as the GDT 195 * initialized (naturally) in the bootstrap process, such as the GDT
190 * and IDT. We reload them nevertheless, this function acts as a 196 * and IDT. We reload them nevertheless, this function acts as a
@@ -224,8 +230,8 @@ void __cpuinit cpu_init (void)
224 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE); 230 memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
225 231
226 cpu_gdt_descr[cpu].size = GDT_SIZE; 232 cpu_gdt_descr[cpu].size = GDT_SIZE;
227 asm volatile("lgdt %0" :: "m" (cpu_gdt_descr[cpu])); 233 load_gdt((const struct desc_ptr *)&cpu_gdt_descr[cpu]);
228 asm volatile("lidt %0" :: "m" (idt_descr)); 234 load_idt((const struct desc_ptr *)&idt_descr);
229 235
230 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); 236 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
231 syscall_init(); 237 syscall_init();
diff --git a/arch/x86/kernel/setup_32.c b/arch/x86/kernel/setup_32.c
index b87a6fd5ba48..e4f199124761 100644
--- a/arch/x86/kernel/setup_32.c
+++ b/arch/x86/kernel/setup_32.c
@@ -378,6 +378,49 @@ extern unsigned long __init setup_memory(void);
378extern void zone_sizes_init(void); 378extern void zone_sizes_init(void);
379#endif /* !CONFIG_NEED_MULTIPLE_NODES */ 379#endif /* !CONFIG_NEED_MULTIPLE_NODES */
380 380
381static inline unsigned long long get_total_mem(void)
382{
383 unsigned long long total;
384
385 total = max_low_pfn - min_low_pfn;
386#ifdef CONFIG_HIGHMEM
387 total += highend_pfn - highstart_pfn;
388#endif
389
390 return total << PAGE_SHIFT;
391}
392
393#ifdef CONFIG_KEXEC
394static void __init reserve_crashkernel(void)
395{
396 unsigned long long total_mem;
397 unsigned long long crash_size, crash_base;
398 int ret;
399
400 total_mem = get_total_mem();
401
402 ret = parse_crashkernel(boot_command_line, total_mem,
403 &crash_size, &crash_base);
404 if (ret == 0 && crash_size > 0) {
405 if (crash_base > 0) {
406 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
407 "for crashkernel (System RAM: %ldMB)\n",
408 (unsigned long)(crash_size >> 20),
409 (unsigned long)(crash_base >> 20),
410 (unsigned long)(total_mem >> 20));
411 crashk_res.start = crash_base;
412 crashk_res.end = crash_base + crash_size - 1;
413 reserve_bootmem(crash_base, crash_size);
414 } else
415 printk(KERN_INFO "crashkernel reservation failed - "
416 "you have to specify a base address\n");
417 }
418}
419#else
420static inline void __init reserve_crashkernel(void)
421{}
422#endif
423
381void __init setup_bootmem_allocator(void) 424void __init setup_bootmem_allocator(void)
382{ 425{
383 unsigned long bootmap_size; 426 unsigned long bootmap_size;
@@ -453,11 +496,7 @@ void __init setup_bootmem_allocator(void)
453 } 496 }
454 } 497 }
455#endif 498#endif
456#ifdef CONFIG_KEXEC 499 reserve_crashkernel();
457 if (crashk_res.start != crashk_res.end)
458 reserve_bootmem(crashk_res.start,
459 crashk_res.end - crashk_res.start + 1);
460#endif
461} 500}
462 501
463/* 502/*
@@ -622,9 +661,7 @@ void __init setup_arch(char **cmdline_p)
622#endif 661#endif
623 662
624#ifdef CONFIG_PCI 663#ifdef CONFIG_PCI
625#ifdef CONFIG_X86_IO_APIC 664 early_quirks();
626 check_acpi_pci(); /* Checks more than just ACPI actually */
627#endif
628#endif 665#endif
629 666
630#ifdef CONFIG_ACPI 667#ifdef CONFIG_ACPI
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index 5a19f0cc5b67..31322d42eaae 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -191,6 +191,37 @@ static inline void copy_edd(void)
191} 191}
192#endif 192#endif
193 193
194#ifdef CONFIG_KEXEC
195static void __init reserve_crashkernel(void)
196{
197 unsigned long long free_mem;
198 unsigned long long crash_size, crash_base;
199 int ret;
200
201 free_mem = ((unsigned long long)max_low_pfn - min_low_pfn) << PAGE_SHIFT;
202
203 ret = parse_crashkernel(boot_command_line, free_mem,
204 &crash_size, &crash_base);
205 if (ret == 0 && crash_size) {
206 if (crash_base > 0) {
207 printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
208 "for crashkernel (System RAM: %ldMB)\n",
209 (unsigned long)(crash_size >> 20),
210 (unsigned long)(crash_base >> 20),
211 (unsigned long)(free_mem >> 20));
212 crashk_res.start = crash_base;
213 crashk_res.end = crash_base + crash_size - 1;
214 reserve_bootmem(crash_base, crash_size);
215 } else
216 printk(KERN_INFO "crashkernel reservation failed - "
217 "you have to specify a base address\n");
218 }
219}
220#else
221static inline void __init reserve_crashkernel(void)
222{}
223#endif
224
194#define EBDA_ADDR_POINTER 0x40E 225#define EBDA_ADDR_POINTER 0x40E
195 226
196unsigned __initdata ebda_addr; 227unsigned __initdata ebda_addr;
@@ -271,6 +302,11 @@ void __init setup_arch(char **cmdline_p)
271 302
272 dmi_scan_machine(); 303 dmi_scan_machine();
273 304
305#ifdef CONFIG_SMP
306 /* setup to use the static apicid table during kernel startup */
307 x86_cpu_to_apicid_ptr = (void *)&x86_cpu_to_apicid_init;
308#endif
309
274#ifdef CONFIG_ACPI 310#ifdef CONFIG_ACPI
275 /* 311 /*
276 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT). 312 * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
@@ -357,13 +393,7 @@ void __init setup_arch(char **cmdline_p)
357 } 393 }
358 } 394 }
359#endif 395#endif
360#ifdef CONFIG_KEXEC 396 reserve_crashkernel();
361 if (crashk_res.start != crashk_res.end) {
362 reserve_bootmem_generic(crashk_res.start,
363 crashk_res.end - crashk_res.start + 1);
364 }
365#endif
366
367 paging_init(); 397 paging_init();
368 398
369#ifdef CONFIG_PCI 399#ifdef CONFIG_PCI
@@ -529,7 +559,7 @@ static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
529 but in the same order as the HT nodeids. 559 but in the same order as the HT nodeids.
530 If that doesn't result in a usable node fall back to the 560 If that doesn't result in a usable node fall back to the
531 path for the previous case. */ 561 path for the previous case. */
532 int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits); 562 int ht_nodeid = apicid - (cpu_data(0).phys_proc_id << bits);
533 if (ht_nodeid >= 0 && 563 if (ht_nodeid >= 0 &&
534 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 564 apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
535 node = apicid_to_node[ht_nodeid]; 565 node = apicid_to_node[ht_nodeid];
@@ -853,6 +883,7 @@ void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
853 883
854#ifdef CONFIG_SMP 884#ifdef CONFIG_SMP
855 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff; 885 c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
886 c->cpu_index = 0;
856#endif 887#endif
857} 888}
858 889
@@ -959,6 +990,7 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
959static int show_cpuinfo(struct seq_file *m, void *v) 990static int show_cpuinfo(struct seq_file *m, void *v)
960{ 991{
961 struct cpuinfo_x86 *c = v; 992 struct cpuinfo_x86 *c = v;
993 int cpu = 0;
962 994
963 /* 995 /*
964 * These flag bits must match the definitions in <asm/cpufeature.h>. 996 * These flag bits must match the definitions in <asm/cpufeature.h>.
@@ -1037,8 +1069,9 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1037 1069
1038 1070
1039#ifdef CONFIG_SMP 1071#ifdef CONFIG_SMP
1040 if (!cpu_online(c-cpu_data)) 1072 if (!cpu_online(c->cpu_index))
1041 return 0; 1073 return 0;
1074 cpu = c->cpu_index;
1042#endif 1075#endif
1043 1076
1044 seq_printf(m,"processor\t: %u\n" 1077 seq_printf(m,"processor\t: %u\n"
@@ -1046,7 +1079,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1046 "cpu family\t: %d\n" 1079 "cpu family\t: %d\n"
1047 "model\t\t: %d\n" 1080 "model\t\t: %d\n"
1048 "model name\t: %s\n", 1081 "model name\t: %s\n",
1049 (unsigned)(c-cpu_data), 1082 (unsigned)cpu,
1050 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown", 1083 c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
1051 c->x86, 1084 c->x86,
1052 (int)c->x86_model, 1085 (int)c->x86_model,
@@ -1058,7 +1091,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1058 seq_printf(m, "stepping\t: unknown\n"); 1091 seq_printf(m, "stepping\t: unknown\n");
1059 1092
1060 if (cpu_has(c,X86_FEATURE_TSC)) { 1093 if (cpu_has(c,X86_FEATURE_TSC)) {
1061 unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data)); 1094 unsigned int freq = cpufreq_quick_get((unsigned)cpu);
1062 if (!freq) 1095 if (!freq)
1063 freq = cpu_khz; 1096 freq = cpu_khz;
1064 seq_printf(m, "cpu MHz\t\t: %u.%03u\n", 1097 seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
@@ -1071,7 +1104,6 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1071 1104
1072#ifdef CONFIG_SMP 1105#ifdef CONFIG_SMP
1073 if (smp_num_siblings * c->x86_max_cores > 1) { 1106 if (smp_num_siblings * c->x86_max_cores > 1) {
1074 int cpu = c - cpu_data;
1075 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1107 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1076 seq_printf(m, "siblings\t: %d\n", 1108 seq_printf(m, "siblings\t: %d\n",
1077 cpus_weight(per_cpu(cpu_core_map, cpu))); 1109 cpus_weight(per_cpu(cpu_core_map, cpu)));
@@ -1129,12 +1161,16 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1129 1161
1130static void *c_start(struct seq_file *m, loff_t *pos) 1162static void *c_start(struct seq_file *m, loff_t *pos)
1131{ 1163{
1132 return *pos < NR_CPUS ? cpu_data + *pos : NULL; 1164 if (*pos == 0) /* just in case, cpu 0 is not the first */
1165 *pos = first_cpu(cpu_possible_map);
1166 if ((*pos) < NR_CPUS && cpu_possible(*pos))
1167 return &cpu_data(*pos);
1168 return NULL;
1133} 1169}
1134 1170
1135static void *c_next(struct seq_file *m, void *v, loff_t *pos) 1171static void *c_next(struct seq_file *m, void *v, loff_t *pos)
1136{ 1172{
1137 ++*pos; 1173 *pos = next_cpu(*pos, cpu_possible_map);
1138 return c_start(m, pos); 1174 return c_start(m, pos);
1139} 1175}
1140 1176
diff --git a/arch/x86/kernel/signal_32.c b/arch/x86/kernel/signal_32.c
index 0d79df3c5631..6dc394b87255 100644
--- a/arch/x86/kernel/signal_32.c
+++ b/arch/x86/kernel/signal_32.c
@@ -200,8 +200,8 @@ badframe:
200 if (show_unhandled_signals && printk_ratelimit()) 200 if (show_unhandled_signals && printk_ratelimit())
201 printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx" 201 printk("%s%s[%d] bad frame in sigreturn frame:%p eip:%lx"
202 " esp:%lx oeax:%lx\n", 202 " esp:%lx oeax:%lx\n",
203 current->pid > 1 ? KERN_INFO : KERN_EMERG, 203 task_pid_nr(current) > 1 ? KERN_INFO : KERN_EMERG,
204 current->comm, current->pid, frame, regs->eip, 204 current->comm, task_pid_nr(current), frame, regs->eip,
205 regs->esp, regs->orig_eax); 205 regs->esp, regs->orig_eax);
206 206
207 force_sig(SIGSEGV, current); 207 force_sig(SIGSEGV, current);
diff --git a/arch/x86/kernel/smp_32.c b/arch/x86/kernel/smp_32.c
index 791d9f8036ae..2621ca3b2e4d 100644
--- a/arch/x86/kernel/smp_32.c
+++ b/arch/x86/kernel/smp_32.c
@@ -610,7 +610,7 @@ static void stop_this_cpu (void * dummy)
610 */ 610 */
611 cpu_clear(smp_processor_id(), cpu_online_map); 611 cpu_clear(smp_processor_id(), cpu_online_map);
612 disable_local_APIC(); 612 disable_local_APIC();
613 if (cpu_data[smp_processor_id()].hlt_works_ok) 613 if (cpu_data(smp_processor_id()).hlt_works_ok)
614 for(;;) halt(); 614 for(;;) halt();
615 for (;;); 615 for (;;);
616} 616}
@@ -676,7 +676,7 @@ static int convert_apicid_to_cpu(int apic_id)
676 int i; 676 int i;
677 677
678 for (i = 0; i < NR_CPUS; i++) { 678 for (i = 0; i < NR_CPUS; i++) {
679 if (x86_cpu_to_apicid[i] == apic_id) 679 if (per_cpu(x86_cpu_to_apicid, i) == apic_id)
680 return i; 680 return i;
681 } 681 }
682 return -1; 682 return -1;
diff --git a/arch/x86/kernel/smp_64.c b/arch/x86/kernel/smp_64.c
index 5c2964727d19..03fa6ed559c6 100644
--- a/arch/x86/kernel/smp_64.c
+++ b/arch/x86/kernel/smp_64.c
@@ -322,17 +322,27 @@ void unlock_ipi_call_lock(void)
322} 322}
323 323
324/* 324/*
325 * this function sends a 'generic call function' IPI to one other CPU 325 * this function sends a 'generic call function' IPI to all other CPU
326 * in the system. 326 * of the system defined in the mask.
327 *
328 * cpu is a standard Linux logical CPU number.
329 */ 327 */
330static void 328
331__smp_call_function_single(int cpu, void (*func) (void *info), void *info, 329static int
332 int nonatomic, int wait) 330__smp_call_function_mask(cpumask_t mask,
331 void (*func)(void *), void *info,
332 int wait)
333{ 333{
334 struct call_data_struct data; 334 struct call_data_struct data;
335 int cpus = 1; 335 cpumask_t allbutself;
336 int cpus;
337
338 allbutself = cpu_online_map;
339 cpu_clear(smp_processor_id(), allbutself);
340
341 cpus_and(mask, mask, allbutself);
342 cpus = cpus_weight(mask);
343
344 if (!cpus)
345 return 0;
336 346
337 data.func = func; 347 data.func = func;
338 data.info = info; 348 data.info = info;
@@ -343,19 +353,55 @@ __smp_call_function_single(int cpu, void (*func) (void *info), void *info,
343 353
344 call_data = &data; 354 call_data = &data;
345 wmb(); 355 wmb();
346 /* Send a message to all other CPUs and wait for them to respond */ 356
347 send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR); 357 /* Send a message to other CPUs */
358 if (cpus_equal(mask, allbutself))
359 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
360 else
361 send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
348 362
349 /* Wait for response */ 363 /* Wait for response */
350 while (atomic_read(&data.started) != cpus) 364 while (atomic_read(&data.started) != cpus)
351 cpu_relax(); 365 cpu_relax();
352 366
353 if (!wait) 367 if (!wait)
354 return; 368 return 0;
355 369
356 while (atomic_read(&data.finished) != cpus) 370 while (atomic_read(&data.finished) != cpus)
357 cpu_relax(); 371 cpu_relax();
372
373 return 0;
374}
375/**
376 * smp_call_function_mask(): Run a function on a set of other CPUs.
377 * @mask: The set of cpus to run on. Must not include the current cpu.
378 * @func: The function to run. This must be fast and non-blocking.
379 * @info: An arbitrary pointer to pass to the function.
380 * @wait: If true, wait (atomically) until function has completed on other CPUs.
381 *
382 * Returns 0 on success, else a negative status code.
383 *
384 * If @wait is true, then returns once @func has returned; otherwise
385 * it returns just before the target cpu calls @func.
386 *
387 * You must not call this function with disabled interrupts or from a
388 * hardware interrupt handler or from a bottom half handler.
389 */
390int smp_call_function_mask(cpumask_t mask,
391 void (*func)(void *), void *info,
392 int wait)
393{
394 int ret;
395
396 /* Can deadlock when called with interrupts disabled */
397 WARN_ON(irqs_disabled());
398
399 spin_lock(&call_lock);
400 ret = __smp_call_function_mask(mask, func, info, wait);
401 spin_unlock(&call_lock);
402 return ret;
358} 403}
404EXPORT_SYMBOL(smp_call_function_mask);
359 405
360/* 406/*
361 * smp_call_function_single - Run a function on a specific CPU 407 * smp_call_function_single - Run a function on a specific CPU
@@ -374,6 +420,7 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
374 int nonatomic, int wait) 420 int nonatomic, int wait)
375{ 421{
376 /* prevent preemption and reschedule on another processor */ 422 /* prevent preemption and reschedule on another processor */
423 int ret;
377 int me = get_cpu(); 424 int me = get_cpu();
378 425
379 /* Can deadlock when called with interrupts disabled */ 426 /* Can deadlock when called with interrupts disabled */
@@ -387,51 +434,14 @@ int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
387 return 0; 434 return 0;
388 } 435 }
389 436
390 spin_lock(&call_lock); 437 ret = smp_call_function_mask(cpumask_of_cpu(cpu), func, info, wait);
391 __smp_call_function_single(cpu, func, info, nonatomic, wait); 438
392 spin_unlock(&call_lock);
393 put_cpu(); 439 put_cpu();
394 return 0; 440 return ret;
395} 441}
396EXPORT_SYMBOL(smp_call_function_single); 442EXPORT_SYMBOL(smp_call_function_single);
397 443
398/* 444/*
399 * this function sends a 'generic call function' IPI to all other CPUs
400 * in the system.
401 */
402static void __smp_call_function (void (*func) (void *info), void *info,
403 int nonatomic, int wait)
404{
405 struct call_data_struct data;
406 int cpus = num_online_cpus()-1;
407
408 if (!cpus)
409 return;
410
411 data.func = func;
412 data.info = info;
413 atomic_set(&data.started, 0);
414 data.wait = wait;
415 if (wait)
416 atomic_set(&data.finished, 0);
417
418 call_data = &data;
419 wmb();
420 /* Send a message to all other CPUs and wait for them to respond */
421 send_IPI_allbutself(CALL_FUNCTION_VECTOR);
422
423 /* Wait for response */
424 while (atomic_read(&data.started) != cpus)
425 cpu_relax();
426
427 if (!wait)
428 return;
429
430 while (atomic_read(&data.finished) != cpus)
431 cpu_relax();
432}
433
434/*
435 * smp_call_function - run a function on all other CPUs. 445 * smp_call_function - run a function on all other CPUs.
436 * @func: The function to run. This must be fast and non-blocking. 446 * @func: The function to run. This must be fast and non-blocking.
437 * @info: An arbitrary pointer to pass to the function. 447 * @info: An arbitrary pointer to pass to the function.
@@ -449,10 +459,7 @@ static void __smp_call_function (void (*func) (void *info), void *info,
449int smp_call_function (void (*func) (void *info), void *info, int nonatomic, 459int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
450 int wait) 460 int wait)
451{ 461{
452 spin_lock(&call_lock); 462 return smp_call_function_mask(cpu_online_map, func, info, wait);
453 __smp_call_function(func,info,nonatomic,wait);
454 spin_unlock(&call_lock);
455 return 0;
456} 463}
457EXPORT_SYMBOL(smp_call_function); 464EXPORT_SYMBOL(smp_call_function);
458 465
@@ -479,7 +486,7 @@ void smp_send_stop(void)
479 /* Don't deadlock on the call lock in panic */ 486 /* Don't deadlock on the call lock in panic */
480 nolock = !spin_trylock(&call_lock); 487 nolock = !spin_trylock(&call_lock);
481 local_irq_save(flags); 488 local_irq_save(flags);
482 __smp_call_function(stop_this_cpu, NULL, 0, 0); 489 __smp_call_function_mask(cpu_online_map, stop_this_cpu, NULL, 0);
483 if (!nolock) 490 if (!nolock)
484 spin_unlock(&call_lock); 491 spin_unlock(&call_lock);
485 disable_local_APIC(); 492 disable_local_APIC();
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index be3faac04719..7b8fdfa169dd 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -67,7 +67,7 @@ int smp_num_siblings = 1;
67EXPORT_SYMBOL(smp_num_siblings); 67EXPORT_SYMBOL(smp_num_siblings);
68 68
69/* Last level cache ID of each logical CPU */ 69/* Last level cache ID of each logical CPU */
70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 70DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
71 71
72/* representing HT siblings of each logical CPU */ 72/* representing HT siblings of each logical CPU */
73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); 73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
@@ -89,12 +89,20 @@ EXPORT_SYMBOL(cpu_possible_map);
89static cpumask_t smp_commenced_mask; 89static cpumask_t smp_commenced_mask;
90 90
91/* Per CPU bogomips and other parameters */ 91/* Per CPU bogomips and other parameters */
92struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 92DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
93EXPORT_SYMBOL(cpu_data); 93EXPORT_PER_CPU_SYMBOL(cpu_info);
94 94
95u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = 95/*
96 { [0 ... NR_CPUS-1] = 0xff }; 96 * The following static array is used during kernel startup
97EXPORT_SYMBOL(x86_cpu_to_apicid); 97 * and the x86_cpu_to_apicid_ptr contains the address of the
98 * array during this time. Is it zeroed when the per_cpu
99 * data area is removed.
100 */
101u8 x86_cpu_to_apicid_init[NR_CPUS] __initdata =
102 { [0 ... NR_CPUS-1] = BAD_APICID };
103void *x86_cpu_to_apicid_ptr;
104DEFINE_PER_CPU(u8, x86_cpu_to_apicid) = BAD_APICID;
105EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
98 106
99u8 apicid_2_node[MAX_APICID]; 107u8 apicid_2_node[MAX_APICID];
100 108
@@ -150,9 +158,10 @@ void __init smp_alloc_memory(void)
150 158
151void __cpuinit smp_store_cpu_info(int id) 159void __cpuinit smp_store_cpu_info(int id)
152{ 160{
153 struct cpuinfo_x86 *c = cpu_data + id; 161 struct cpuinfo_x86 *c = &cpu_data(id);
154 162
155 *c = boot_cpu_data; 163 *c = boot_cpu_data;
164 c->cpu_index = id;
156 if (id!=0) 165 if (id!=0)
157 identify_secondary_cpu(c); 166 identify_secondary_cpu(c);
158 /* 167 /*
@@ -294,7 +303,7 @@ static int cpucount;
294/* maps the cpu to the sched domain representing multi-core */ 303/* maps the cpu to the sched domain representing multi-core */
295cpumask_t cpu_coregroup_map(int cpu) 304cpumask_t cpu_coregroup_map(int cpu)
296{ 305{
297 struct cpuinfo_x86 *c = cpu_data + cpu; 306 struct cpuinfo_x86 *c = &cpu_data(cpu);
298 /* 307 /*
299 * For perf, we return last level cache shared map. 308 * For perf, we return last level cache shared map.
300 * And for power savings, we return cpu_core_map 309 * And for power savings, we return cpu_core_map
@@ -311,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
311void __cpuinit set_cpu_sibling_map(int cpu) 320void __cpuinit set_cpu_sibling_map(int cpu)
312{ 321{
313 int i; 322 int i;
314 struct cpuinfo_x86 *c = cpu_data; 323 struct cpuinfo_x86 *c = &cpu_data(cpu);
315 324
316 cpu_set(cpu, cpu_sibling_setup_map); 325 cpu_set(cpu, cpu_sibling_setup_map);
317 326
318 if (smp_num_siblings > 1) { 327 if (smp_num_siblings > 1) {
319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 328 for_each_cpu_mask(i, cpu_sibling_setup_map) {
320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 329 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 330 c->cpu_core_id == cpu_data(i).cpu_core_id) {
322 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 331 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
323 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 332 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
324 cpu_set(i, per_cpu(cpu_core_map, cpu)); 333 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, per_cpu(cpu_core_map, i)); 334 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 335 cpu_set(i, c->llc_shared_map);
327 cpu_set(cpu, c[i].llc_shared_map); 336 cpu_set(cpu, cpu_data(i).llc_shared_map);
328 } 337 }
329 } 338 }
330 } else { 339 } else {
331 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 340 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
332 } 341 }
333 342
334 cpu_set(cpu, c[cpu].llc_shared_map); 343 cpu_set(cpu, c->llc_shared_map);
335 344
336 if (current_cpu_data.x86_max_cores == 1) { 345 if (current_cpu_data.x86_max_cores == 1) {
337 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 346 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
338 c[cpu].booted_cores = 1; 347 c->booted_cores = 1;
339 return; 348 return;
340 } 349 }
341 350
342 for_each_cpu_mask(i, cpu_sibling_setup_map) { 351 for_each_cpu_mask(i, cpu_sibling_setup_map) {
343 if (cpu_llc_id[cpu] != BAD_APICID && 352 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
344 cpu_llc_id[cpu] == cpu_llc_id[i]) { 353 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
345 cpu_set(i, c[cpu].llc_shared_map); 354 cpu_set(i, c->llc_shared_map);
346 cpu_set(cpu, c[i].llc_shared_map); 355 cpu_set(cpu, cpu_data(i).llc_shared_map);
347 } 356 }
348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 357 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
349 cpu_set(i, per_cpu(cpu_core_map, cpu)); 358 cpu_set(i, per_cpu(cpu_core_map, cpu));
350 cpu_set(cpu, per_cpu(cpu_core_map, i)); 359 cpu_set(cpu, per_cpu(cpu_core_map, i));
351 /* 360 /*
@@ -357,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
357 * the booted_cores for this new cpu 366 * the booted_cores for this new cpu
358 */ 367 */
359 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 368 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
360 c[cpu].booted_cores++; 369 c->booted_cores++;
361 /* 370 /*
362 * increment the core count for all 371 * increment the core count for all
363 * the other cpus in this package 372 * the other cpus in this package
364 */ 373 */
365 if (i != cpu) 374 if (i != cpu)
366 c[i].booted_cores++; 375 cpu_data(i).booted_cores++;
367 } else if (i != cpu && !c[cpu].booted_cores) 376 } else if (i != cpu && !c->booted_cores)
368 c[cpu].booted_cores = c[i].booted_cores; 377 c->booted_cores = cpu_data(i).booted_cores;
369 } 378 }
370 } 379 }
371} 380}
@@ -804,7 +813,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
804 813
805 irq_ctx_init(cpu); 814 irq_ctx_init(cpu);
806 815
807 x86_cpu_to_apicid[cpu] = apicid; 816 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
808 /* 817 /*
809 * This grunge runs the startup process for 818 * This grunge runs the startup process for
810 * the targeted processor. 819 * the targeted processor.
@@ -844,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
844 /* number CPUs logically, starting from 1 (BSP is 0) */ 853 /* number CPUs logically, starting from 1 (BSP is 0) */
845 Dprintk("OK.\n"); 854 Dprintk("OK.\n");
846 printk("CPU%d: ", cpu); 855 printk("CPU%d: ", cpu);
847 print_cpu_info(&cpu_data[cpu]); 856 print_cpu_info(&cpu_data(cpu));
848 Dprintk("CPU has booted.\n"); 857 Dprintk("CPU has booted.\n");
849 } else { 858 } else {
850 boot_error= 1; 859 boot_error= 1;
@@ -866,7 +875,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
866 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ 875 cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */
867 cpucount--; 876 cpucount--;
868 } else { 877 } else {
869 x86_cpu_to_apicid[cpu] = apicid; 878 per_cpu(x86_cpu_to_apicid, cpu) = apicid;
870 cpu_set(cpu, cpu_present_map); 879 cpu_set(cpu, cpu_present_map);
871 } 880 }
872 881
@@ -915,7 +924,7 @@ static int __cpuinit __smp_prepare_cpu(int cpu)
915 struct warm_boot_cpu_info info; 924 struct warm_boot_cpu_info info;
916 int apicid, ret; 925 int apicid, ret;
917 926
918 apicid = x86_cpu_to_apicid[cpu]; 927 apicid = per_cpu(x86_cpu_to_apicid, cpu);
919 if (apicid == BAD_APICID) { 928 if (apicid == BAD_APICID) {
920 ret = -ENODEV; 929 ret = -ENODEV;
921 goto exit; 930 goto exit;
@@ -961,11 +970,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
961 */ 970 */
962 smp_store_cpu_info(0); /* Final full version of the data */ 971 smp_store_cpu_info(0); /* Final full version of the data */
963 printk("CPU%d: ", 0); 972 printk("CPU%d: ", 0);
964 print_cpu_info(&cpu_data[0]); 973 print_cpu_info(&cpu_data(0));
965 974
966 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 975 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
967 boot_cpu_logical_apicid = logical_smp_processor_id(); 976 boot_cpu_logical_apicid = logical_smp_processor_id();
968 x86_cpu_to_apicid[0] = boot_cpu_physical_apicid; 977 per_cpu(x86_cpu_to_apicid, 0) = boot_cpu_physical_apicid;
969 978
970 current_thread_info()->cpu = 0; 979 current_thread_info()->cpu = 0;
971 980
@@ -1008,6 +1017,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1017 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1009 smpboot_clear_io_apic_irqs(); 1018 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1019 phys_cpu_present_map = physid_mask_of_physid(0);
1020 map_cpu_to_logical_apicid();
1011 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1021 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1012 cpu_set(0, per_cpu(cpu_core_map, 0)); 1022 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1023 return;
@@ -1029,6 +1039,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1029 } 1039 }
1030 smpboot_clear_io_apic_irqs(); 1040 smpboot_clear_io_apic_irqs();
1031 phys_cpu_present_map = physid_mask_of_physid(0); 1041 phys_cpu_present_map = physid_mask_of_physid(0);
1042 map_cpu_to_logical_apicid();
1032 cpu_set(0, per_cpu(cpu_sibling_map, 0)); 1043 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1033 cpu_set(0, per_cpu(cpu_core_map, 0)); 1044 cpu_set(0, per_cpu(cpu_core_map, 0));
1034 return; 1045 return;
@@ -1082,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1082 Dprintk("Before bogomips.\n"); 1093 Dprintk("Before bogomips.\n");
1083 for (cpu = 0; cpu < NR_CPUS; cpu++) 1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1084 if (cpu_isset(cpu, cpu_callout_map)) 1095 if (cpu_isset(cpu, cpu_callout_map))
1085 bogosum += cpu_data[cpu].loops_per_jiffy; 1096 bogosum += cpu_data(cpu).loops_per_jiffy;
1086 printk(KERN_INFO 1097 printk(KERN_INFO
1087 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 1098 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1088 cpucount+1, 1099 cpucount+1,
@@ -1152,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
1152void remove_siblinginfo(int cpu) 1163void remove_siblinginfo(int cpu)
1153{ 1164{
1154 int sibling; 1165 int sibling;
1155 struct cpuinfo_x86 *c = cpu_data; 1166 struct cpuinfo_x86 *c = &cpu_data(cpu);
1156 1167
1157 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1168 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1158 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1169 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -1160,15 +1171,15 @@ void remove_siblinginfo(int cpu)
1160 * last thread sibling in this cpu core going down 1171 * last thread sibling in this cpu core going down
1161 */ 1172 */
1162 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1173 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1163 c[sibling].booted_cores--; 1174 cpu_data(sibling).booted_cores--;
1164 } 1175 }
1165 1176
1166 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1177 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1167 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1178 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1168 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1179 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1169 cpus_clear(per_cpu(cpu_core_map, cpu)); 1180 cpus_clear(per_cpu(cpu_core_map, cpu));
1170 c[cpu].phys_proc_id = 0; 1181 c->phys_proc_id = 0;
1171 c[cpu].cpu_core_id = 0; 1182 c->cpu_core_id = 0;
1172 cpu_clear(cpu, cpu_sibling_setup_map); 1183 cpu_clear(cpu, cpu_sibling_setup_map);
1173} 1184}
1174 1185
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index e351ac4ab5b1..fd1fff6a35a2 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -65,7 +65,7 @@ int smp_num_siblings = 1;
65EXPORT_SYMBOL(smp_num_siblings); 65EXPORT_SYMBOL(smp_num_siblings);
66 66
67/* Last level cache ID of each logical CPU */ 67/* Last level cache ID of each logical CPU */
68u8 cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 68DEFINE_PER_CPU(u8, cpu_llc_id) = BAD_APICID;
69 69
70/* Bitmask of currently online CPUs */ 70/* Bitmask of currently online CPUs */
71cpumask_t cpu_online_map __read_mostly; 71cpumask_t cpu_online_map __read_mostly;
@@ -84,8 +84,8 @@ cpumask_t cpu_possible_map;
84EXPORT_SYMBOL(cpu_possible_map); 84EXPORT_SYMBOL(cpu_possible_map);
85 85
86/* Per CPU bogomips and other parameters */ 86/* Per CPU bogomips and other parameters */
87struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 87DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
88EXPORT_SYMBOL(cpu_data); 88EXPORT_PER_CPU_SYMBOL(cpu_info);
89 89
90/* Set when the idlers are all forked */ 90/* Set when the idlers are all forked */
91int smp_threads_ready; 91int smp_threads_ready;
@@ -138,9 +138,10 @@ static unsigned long __cpuinit setup_trampoline(void)
138 138
139static void __cpuinit smp_store_cpu_info(int id) 139static void __cpuinit smp_store_cpu_info(int id)
140{ 140{
141 struct cpuinfo_x86 *c = cpu_data + id; 141 struct cpuinfo_x86 *c = &cpu_data(id);
142 142
143 *c = boot_cpu_data; 143 *c = boot_cpu_data;
144 c->cpu_index = id;
144 identify_cpu(c); 145 identify_cpu(c);
145 print_cpu_info(c); 146 print_cpu_info(c);
146} 147}
@@ -237,7 +238,7 @@ void __cpuinit smp_callin(void)
237/* maps the cpu to the sched domain representing multi-core */ 238/* maps the cpu to the sched domain representing multi-core */
238cpumask_t cpu_coregroup_map(int cpu) 239cpumask_t cpu_coregroup_map(int cpu)
239{ 240{
240 struct cpuinfo_x86 *c = cpu_data + cpu; 241 struct cpuinfo_x86 *c = &cpu_data(cpu);
241 /* 242 /*
242 * For perf, we return last level cache shared map. 243 * For perf, we return last level cache shared map.
243 * And for power savings, we return cpu_core_map 244 * And for power savings, we return cpu_core_map
@@ -254,41 +255,41 @@ static cpumask_t cpu_sibling_setup_map;
254static inline void set_cpu_sibling_map(int cpu) 255static inline void set_cpu_sibling_map(int cpu)
255{ 256{
256 int i; 257 int i;
257 struct cpuinfo_x86 *c = cpu_data; 258 struct cpuinfo_x86 *c = &cpu_data(cpu);
258 259
259 cpu_set(cpu, cpu_sibling_setup_map); 260 cpu_set(cpu, cpu_sibling_setup_map);
260 261
261 if (smp_num_siblings > 1) { 262 if (smp_num_siblings > 1) {
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 263 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 264 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 265 c->cpu_core_id == cpu_data(i).cpu_core_id) {
265 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 266 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 267 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 268 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 269 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 270 cpu_set(i, c->llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 271 cpu_set(cpu, cpu_data(i).llc_shared_map);
271 } 272 }
272 } 273 }
273 } else { 274 } else {
274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 275 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 276 }
276 277
277 cpu_set(cpu, c[cpu].llc_shared_map); 278 cpu_set(cpu, c->llc_shared_map);
278 279
279 if (current_cpu_data.x86_max_cores == 1) { 280 if (current_cpu_data.x86_max_cores == 1) {
280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 281 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 282 c->booted_cores = 1;
282 return; 283 return;
283 } 284 }
284 285
285 for_each_cpu_mask(i, cpu_sibling_setup_map) { 286 for_each_cpu_mask(i, cpu_sibling_setup_map) {
286 if (cpu_llc_id[cpu] != BAD_APICID && 287 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
287 cpu_llc_id[cpu] == cpu_llc_id[i]) { 288 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
288 cpu_set(i, c[cpu].llc_shared_map); 289 cpu_set(i, c->llc_shared_map);
289 cpu_set(cpu, c[i].llc_shared_map); 290 cpu_set(cpu, cpu_data(i).llc_shared_map);
290 } 291 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 292 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
292 cpu_set(i, per_cpu(cpu_core_map, cpu)); 293 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, per_cpu(cpu_core_map, i)); 294 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 295 /*
@@ -300,15 +301,15 @@ static inline void set_cpu_sibling_map(int cpu)
300 * the booted_cores for this new cpu 301 * the booted_cores for this new cpu
301 */ 302 */
302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 303 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 304 c->booted_cores++;
304 /* 305 /*
305 * increment the core count for all 306 * increment the core count for all
306 * the other cpus in this package 307 * the other cpus in this package
307 */ 308 */
308 if (i != cpu) 309 if (i != cpu)
309 c[i].booted_cores++; 310 cpu_data(i).booted_cores++;
310 } else if (i != cpu && !c[cpu].booted_cores) 311 } else if (i != cpu && !c->booted_cores)
311 c[cpu].booted_cores = c[i].booted_cores; 312 c->booted_cores = cpu_data(i).booted_cores;
312 } 313 }
313 } 314 }
314} 315}
@@ -694,7 +695,7 @@ do_rest:
694 clear_node_cpumask(cpu); /* was set by numa_add_cpu */ 695 clear_node_cpumask(cpu); /* was set by numa_add_cpu */
695 cpu_clear(cpu, cpu_present_map); 696 cpu_clear(cpu, cpu_present_map);
696 cpu_clear(cpu, cpu_possible_map); 697 cpu_clear(cpu, cpu_possible_map);
697 x86_cpu_to_apicid[cpu] = BAD_APICID; 698 per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID;
698 return -EIO; 699 return -EIO;
699 } 700 }
700 701
@@ -841,6 +842,26 @@ static int __init smp_sanity_check(unsigned max_cpus)
841} 842}
842 843
843/* 844/*
845 * Copy apicid's found by MP_processor_info from initial array to the per cpu
846 * data area. The x86_cpu_to_apicid_init array is then expendable and the
847 * x86_cpu_to_apicid_ptr is zeroed indicating that the static array is no
848 * longer available.
849 */
850void __init smp_set_apicids(void)
851{
852 int cpu;
853
854 for_each_cpu_mask(cpu, cpu_possible_map) {
855 if (per_cpu_offset(cpu))
856 per_cpu(x86_cpu_to_apicid, cpu) =
857 x86_cpu_to_apicid_init[cpu];
858 }
859
860 /* indicate the static array will be going away soon */
861 x86_cpu_to_apicid_ptr = NULL;
862}
863
864/*
844 * Prepare for SMP bootup. The MP table or ACPI has been read 865 * Prepare for SMP bootup. The MP table or ACPI has been read
845 * earlier. Just do some sanity checking here and enable APIC mode. 866 * earlier. Just do some sanity checking here and enable APIC mode.
846 */ 867 */
@@ -849,6 +870,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
849 nmi_watchdog_default(); 870 nmi_watchdog_default();
850 current_cpu_data = boot_cpu_data; 871 current_cpu_data = boot_cpu_data;
851 current_thread_info()->cpu = 0; /* needed? */ 872 current_thread_info()->cpu = 0; /* needed? */
873 smp_set_apicids();
852 set_cpu_sibling_map(0); 874 set_cpu_sibling_map(0);
853 875
854 if (smp_sanity_check(max_cpus) < 0) { 876 if (smp_sanity_check(max_cpus) < 0) {
@@ -968,7 +990,7 @@ void __init smp_cpus_done(unsigned int max_cpus)
968static void remove_siblinginfo(int cpu) 990static void remove_siblinginfo(int cpu)
969{ 991{
970 int sibling; 992 int sibling;
971 struct cpuinfo_x86 *c = cpu_data; 993 struct cpuinfo_x86 *c = &cpu_data(cpu);
972 994
973 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 995 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
974 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 996 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -976,15 +998,15 @@ static void remove_siblinginfo(int cpu)
976 * last thread sibling in this cpu core going down 998 * last thread sibling in this cpu core going down
977 */ 999 */
978 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1000 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
979 c[sibling].booted_cores--; 1001 cpu_data(sibling).booted_cores--;
980 } 1002 }
981 1003
982 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1004 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
983 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1005 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
984 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1006 cpus_clear(per_cpu(cpu_sibling_map, cpu));
985 cpus_clear(per_cpu(cpu_core_map, cpu)); 1007 cpus_clear(per_cpu(cpu_core_map, cpu));
986 c[cpu].phys_proc_id = 0; 1008 c->phys_proc_id = 0;
987 c[cpu].cpu_core_id = 0; 1009 c->cpu_core_id = 0;
988 cpu_clear(cpu, cpu_sibling_setup_map); 1010 cpu_clear(cpu, cpu_sibling_setup_map);
989} 1011}
990 1012
diff --git a/arch/x86/kernel/suspend_64.c b/arch/x86/kernel/suspend_64.c
index f8fafe527ff1..622bb0268284 100644
--- a/arch/x86/kernel/suspend_64.c
+++ b/arch/x86/kernel/suspend_64.c
@@ -32,9 +32,9 @@ void __save_processor_state(struct saved_context *ctxt)
32 /* 32 /*
33 * descriptor tables 33 * descriptor tables
34 */ 34 */
35 asm volatile ("sgdt %0" : "=m" (ctxt->gdt_limit)); 35 store_gdt((struct desc_ptr *)&ctxt->gdt_limit);
36 asm volatile ("sidt %0" : "=m" (ctxt->idt_limit)); 36 store_idt((struct desc_ptr *)&ctxt->idt_limit);
37 asm volatile ("str %0" : "=m" (ctxt->tr)); 37 store_tr(ctxt->tr);
38 38
39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ 39 /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */
40 /* 40 /*
@@ -91,8 +91,9 @@ void __restore_processor_state(struct saved_context *ctxt)
91 * now restore the descriptor tables to their proper values 91 * now restore the descriptor tables to their proper values
92 * ltr is done i fix_processor_context(). 92 * ltr is done i fix_processor_context().
93 */ 93 */
94 asm volatile ("lgdt %0" :: "m" (ctxt->gdt_limit)); 94 load_gdt((const struct desc_ptr *)&ctxt->gdt_limit);
95 asm volatile ("lidt %0" :: "m" (ctxt->idt_limit)); 95 load_idt((const struct desc_ptr *)&ctxt->idt_limit);
96
96 97
97 /* 98 /*
98 * segment registers 99 * segment registers
diff --git a/arch/x86/kernel/traps_32.c b/arch/x86/kernel/traps_32.c
index b132d3957dfc..cc9acace7e23 100644
--- a/arch/x86/kernel/traps_32.c
+++ b/arch/x86/kernel/traps_32.c
@@ -63,6 +63,9 @@
63 63
64int panic_on_unrecovered_nmi; 64int panic_on_unrecovered_nmi;
65 65
66DECLARE_BITMAP(used_vectors, NR_VECTORS);
67EXPORT_SYMBOL_GPL(used_vectors);
68
66asmlinkage int system_call(void); 69asmlinkage int system_call(void);
67 70
68/* Do we ignore FPU interrupts ? */ 71/* Do we ignore FPU interrupts ? */
@@ -288,48 +291,24 @@ EXPORT_SYMBOL(dump_stack);
288void show_registers(struct pt_regs *regs) 291void show_registers(struct pt_regs *regs)
289{ 292{
290 int i; 293 int i;
291 int in_kernel = 1; 294
292 unsigned long esp;
293 unsigned short ss, gs;
294
295 esp = (unsigned long) (&regs->esp);
296 savesegment(ss, ss);
297 savesegment(gs, gs);
298 if (user_mode_vm(regs)) {
299 in_kernel = 0;
300 esp = regs->esp;
301 ss = regs->xss & 0xffff;
302 }
303 print_modules(); 295 print_modules();
304 printk(KERN_EMERG "CPU: %d\n" 296 __show_registers(regs, 0);
305 KERN_EMERG "EIP: %04x:[<%08lx>] %s VLI\n"
306 KERN_EMERG "EFLAGS: %08lx (%s %.*s)\n",
307 smp_processor_id(), 0xffff & regs->xcs, regs->eip,
308 print_tainted(), regs->eflags, init_utsname()->release,
309 (int)strcspn(init_utsname()->version, " "),
310 init_utsname()->version);
311 print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
312 printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
313 regs->eax, regs->ebx, regs->ecx, regs->edx);
314 printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
315 regs->esi, regs->edi, regs->ebp, esp);
316 printk(KERN_EMERG "ds: %04x es: %04x fs: %04x gs: %04x ss: %04x\n",
317 regs->xds & 0xffff, regs->xes & 0xffff, regs->xfs & 0xffff, gs, ss);
318 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)", 297 printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
319 TASK_COMM_LEN, current->comm, current->pid, 298 TASK_COMM_LEN, current->comm, task_pid_nr(current),
320 current_thread_info(), current, task_thread_info(current)); 299 current_thread_info(), current, task_thread_info(current));
321 /* 300 /*
322 * When in-kernel, we also print out the stack and code at the 301 * When in-kernel, we also print out the stack and code at the
323 * time of the fault.. 302 * time of the fault..
324 */ 303 */
325 if (in_kernel) { 304 if (!user_mode_vm(regs)) {
326 u8 *eip; 305 u8 *eip;
327 unsigned int code_prologue = code_bytes * 43 / 64; 306 unsigned int code_prologue = code_bytes * 43 / 64;
328 unsigned int code_len = code_bytes; 307 unsigned int code_len = code_bytes;
329 unsigned char c; 308 unsigned char c;
330 309
331 printk("\n" KERN_EMERG "Stack: "); 310 printk("\n" KERN_EMERG "Stack: ");
332 show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG); 311 show_stack_log_lvl(NULL, regs, &regs->esp, KERN_EMERG);
333 312
334 printk(KERN_EMERG "Code: "); 313 printk(KERN_EMERG "Code: ");
335 314
@@ -374,11 +353,11 @@ int is_valid_bugaddr(unsigned long eip)
374void die(const char * str, struct pt_regs * regs, long err) 353void die(const char * str, struct pt_regs * regs, long err)
375{ 354{
376 static struct { 355 static struct {
377 spinlock_t lock; 356 raw_spinlock_t lock;
378 u32 lock_owner; 357 u32 lock_owner;
379 int lock_owner_depth; 358 int lock_owner_depth;
380 } die = { 359 } die = {
381 .lock = __SPIN_LOCK_UNLOCKED(die.lock), 360 .lock = __RAW_SPIN_LOCK_UNLOCKED,
382 .lock_owner = -1, 361 .lock_owner = -1,
383 .lock_owner_depth = 0 362 .lock_owner_depth = 0
384 }; 363 };
@@ -389,13 +368,14 @@ void die(const char * str, struct pt_regs * regs, long err)
389 368
390 if (die.lock_owner != raw_smp_processor_id()) { 369 if (die.lock_owner != raw_smp_processor_id()) {
391 console_verbose(); 370 console_verbose();
392 spin_lock_irqsave(&die.lock, flags); 371 __raw_spin_lock(&die.lock);
372 raw_local_save_flags(flags);
393 die.lock_owner = smp_processor_id(); 373 die.lock_owner = smp_processor_id();
394 die.lock_owner_depth = 0; 374 die.lock_owner_depth = 0;
395 bust_spinlocks(1); 375 bust_spinlocks(1);
396 } 376 }
397 else 377 else
398 local_save_flags(flags); 378 raw_local_save_flags(flags);
399 379
400 if (++die.lock_owner_depth < 3) { 380 if (++die.lock_owner_depth < 3) {
401 unsigned long esp; 381 unsigned long esp;
@@ -439,7 +419,8 @@ void die(const char * str, struct pt_regs * regs, long err)
439 bust_spinlocks(0); 419 bust_spinlocks(0);
440 die.lock_owner = -1; 420 die.lock_owner = -1;
441 add_taint(TAINT_DIE); 421 add_taint(TAINT_DIE);
442 spin_unlock_irqrestore(&die.lock, flags); 422 __raw_spin_unlock(&die.lock);
423 raw_local_irq_restore(flags);
443 424
444 if (!regs) 425 if (!regs)
445 return; 426 return;
@@ -622,7 +603,7 @@ fastcall void __kprobes do_general_protection(struct pt_regs * regs,
622 printk_ratelimit()) 603 printk_ratelimit())
623 printk(KERN_INFO 604 printk(KERN_INFO
624 "%s[%d] general protection eip:%lx esp:%lx error:%lx\n", 605 "%s[%d] general protection eip:%lx esp:%lx error:%lx\n",
625 current->comm, current->pid, 606 current->comm, task_pid_nr(current),
626 regs->eip, regs->esp, error_code); 607 regs->eip, regs->esp, error_code);
627 608
628 force_sig(SIGSEGV, current); 609 force_sig(SIGSEGV, current);
@@ -1142,6 +1123,8 @@ static void __init set_task_gate(unsigned int n, unsigned int gdt_entry)
1142 1123
1143void __init trap_init(void) 1124void __init trap_init(void)
1144{ 1125{
1126 int i;
1127
1145#ifdef CONFIG_EISA 1128#ifdef CONFIG_EISA
1146 void __iomem *p = ioremap(0x0FFFD9, 4); 1129 void __iomem *p = ioremap(0x0FFFD9, 4);
1147 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) { 1130 if (readl(p) == 'E'+('I'<<8)+('S'<<16)+('A'<<24)) {
@@ -1201,6 +1184,11 @@ void __init trap_init(void)
1201 1184
1202 set_system_gate(SYSCALL_VECTOR,&system_call); 1185 set_system_gate(SYSCALL_VECTOR,&system_call);
1203 1186
1187 /* Reserve all the builtin and the syscall vector. */
1188 for (i = 0; i < FIRST_EXTERNAL_VECTOR; i++)
1189 set_bit(i, used_vectors);
1190 set_bit(SYSCALL_VECTOR, used_vectors);
1191
1204 /* 1192 /*
1205 * Should be a barrier for any external CPU state. 1193 * Should be a barrier for any external CPU state.
1206 */ 1194 */
diff --git a/arch/x86/kernel/traps_64.c b/arch/x86/kernel/traps_64.c
index b4a9b3db1994..df690c3fa458 100644
--- a/arch/x86/kernel/traps_64.c
+++ b/arch/x86/kernel/traps_64.c
@@ -462,7 +462,7 @@ void out_of_line_bug(void)
462EXPORT_SYMBOL(out_of_line_bug); 462EXPORT_SYMBOL(out_of_line_bug);
463#endif 463#endif
464 464
465static DEFINE_SPINLOCK(die_lock); 465static raw_spinlock_t die_lock = __RAW_SPIN_LOCK_UNLOCKED;
466static int die_owner = -1; 466static int die_owner = -1;
467static unsigned int die_nest_count; 467static unsigned int die_nest_count;
468 468
@@ -474,13 +474,13 @@ unsigned __kprobes long oops_begin(void)
474 oops_enter(); 474 oops_enter();
475 475
476 /* racy, but better than risking deadlock. */ 476 /* racy, but better than risking deadlock. */
477 local_irq_save(flags); 477 raw_local_irq_save(flags);
478 cpu = smp_processor_id(); 478 cpu = smp_processor_id();
479 if (!spin_trylock(&die_lock)) { 479 if (!__raw_spin_trylock(&die_lock)) {
480 if (cpu == die_owner) 480 if (cpu == die_owner)
481 /* nested oops. should stop eventually */; 481 /* nested oops. should stop eventually */;
482 else 482 else
483 spin_lock(&die_lock); 483 __raw_spin_lock(&die_lock);
484 } 484 }
485 die_nest_count++; 485 die_nest_count++;
486 die_owner = cpu; 486 die_owner = cpu;
@@ -494,12 +494,10 @@ void __kprobes oops_end(unsigned long flags)
494 die_owner = -1; 494 die_owner = -1;
495 bust_spinlocks(0); 495 bust_spinlocks(0);
496 die_nest_count--; 496 die_nest_count--;
497 if (die_nest_count) 497 if (!die_nest_count)
498 /* We still own the lock */
499 local_irq_restore(flags);
500 else
501 /* Nest count reaches zero, release the lock. */ 498 /* Nest count reaches zero, release the lock. */
502 spin_unlock_irqrestore(&die_lock, flags); 499 __raw_spin_unlock(&die_lock);
500 raw_local_irq_restore(flags);
503 if (panic_on_oops) 501 if (panic_on_oops)
504 panic("Fatal exception"); 502 panic("Fatal exception");
505 oops_exit(); 503 oops_exit();
diff --git a/arch/x86/kernel/tsc_32.c b/arch/x86/kernel/tsc_32.c
index e87a3939ed40..b8a7cf671432 100644
--- a/arch/x86/kernel/tsc_32.c
+++ b/arch/x86/kernel/tsc_32.c
@@ -181,8 +181,8 @@ int recalibrate_cpu_khz(void)
181 if (cpu_has_tsc) { 181 if (cpu_has_tsc) {
182 cpu_khz = calculate_cpu_khz(); 182 cpu_khz = calculate_cpu_khz();
183 tsc_khz = cpu_khz; 183 tsc_khz = cpu_khz;
184 cpu_data[0].loops_per_jiffy = 184 cpu_data(0).loops_per_jiffy =
185 cpufreq_scale(cpu_data[0].loops_per_jiffy, 185 cpufreq_scale(cpu_data(0).loops_per_jiffy,
186 cpu_khz_old, cpu_khz); 186 cpu_khz_old, cpu_khz);
187 return 0; 187 return 0;
188 } else 188 } else
@@ -215,7 +215,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
215 return 0; 215 return 0;
216 } 216 }
217 ref_freq = freq->old; 217 ref_freq = freq->old;
218 loops_per_jiffy_ref = cpu_data[freq->cpu].loops_per_jiffy; 218 loops_per_jiffy_ref = cpu_data(freq->cpu).loops_per_jiffy;
219 cpu_khz_ref = cpu_khz; 219 cpu_khz_ref = cpu_khz;
220 } 220 }
221 221
@@ -223,7 +223,7 @@ time_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) || 223 (val == CPUFREQ_POSTCHANGE && freq->old > freq->new) ||
224 (val == CPUFREQ_RESUMECHANGE)) { 224 (val == CPUFREQ_RESUMECHANGE)) {
225 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 225 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
226 cpu_data[freq->cpu].loops_per_jiffy = 226 cpu_data(freq->cpu).loops_per_jiffy =
227 cpufreq_scale(loops_per_jiffy_ref, 227 cpufreq_scale(loops_per_jiffy_ref,
228 ref_freq, freq->new); 228 ref_freq, freq->new);
229 229
diff --git a/arch/x86/kernel/tsc_64.c b/arch/x86/kernel/tsc_64.c
index 9f22e542c374..9c70af45b42b 100644
--- a/arch/x86/kernel/tsc_64.c
+++ b/arch/x86/kernel/tsc_64.c
@@ -73,13 +73,13 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
73 struct cpufreq_freqs *freq = data; 73 struct cpufreq_freqs *freq = data;
74 unsigned long *lpj, dummy; 74 unsigned long *lpj, dummy;
75 75
76 if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC)) 76 if (cpu_has(&cpu_data(freq->cpu), X86_FEATURE_CONSTANT_TSC))
77 return 0; 77 return 0;
78 78
79 lpj = &dummy; 79 lpj = &dummy;
80 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 80 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
81#ifdef CONFIG_SMP 81#ifdef CONFIG_SMP
82 lpj = &cpu_data[freq->cpu].loops_per_jiffy; 82 lpj = &cpu_data(freq->cpu).loops_per_jiffy;
83#else 83#else
84 lpj = &boot_cpu_data.loops_per_jiffy; 84 lpj = &boot_cpu_data.loops_per_jiffy;
85#endif 85#endif
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
index 585541ca1a7e..78f2250963ae 100644
--- a/arch/x86/kernel/vsyscall_64.c
+++ b/arch/x86/kernel/vsyscall_64.c
@@ -48,7 +48,7 @@
48 ({unsigned long v; \ 48 ({unsigned long v; \
49 extern char __vsyscall_0; \ 49 extern char __vsyscall_0; \
50 asm("" : "=r" (v) : "0" (x)); \ 50 asm("" : "=r" (v) : "0" (x)); \
51 ((v - VSYSCALL_FIRST_PAGE) + __pa_symbol(&__vsyscall_0)); }) 51 ((v - VSYSCALL_START) + __pa_symbol(&__vsyscall_0)); })
52 52
53/* 53/*
54 * vsyscall_gtod_data contains data that is : 54 * vsyscall_gtod_data contains data that is :
@@ -291,7 +291,7 @@ static void __cpuinit vsyscall_set_cpu(int cpu)
291#ifdef CONFIG_NUMA 291#ifdef CONFIG_NUMA
292 node = cpu_to_node(cpu); 292 node = cpu_to_node(cpu);
293#endif 293#endif
294 if (cpu_has(&cpu_data[cpu], X86_FEATURE_RDTSCP)) 294 if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
295 write_rdtscp_aux((node << 12) | cpu); 295 write_rdtscp_aux((node << 12) | cpu);
296 296
297 /* Store cpu number in limit so that it can be loaded quickly 297 /* Store cpu number in limit so that it can be loaded quickly
diff --git a/arch/x86/lib/delay_32.c b/arch/x86/lib/delay_32.c
index f6edb11364df..952e7a89c2ac 100644
--- a/arch/x86/lib/delay_32.c
+++ b/arch/x86/lib/delay_32.c
@@ -82,7 +82,7 @@ inline void __const_udelay(unsigned long xloops)
82 __asm__("mull %0" 82 __asm__("mull %0"
83 :"=d" (xloops), "=&a" (d0) 83 :"=d" (xloops), "=&a" (d0)
84 :"1" (xloops), "0" 84 :"1" (xloops), "0"
85 (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4))); 85 (cpu_data(raw_smp_processor_id()).loops_per_jiffy * (HZ/4)));
86 86
87 __delay(++xloops); 87 __delay(++xloops);
88} 88}
diff --git a/arch/x86/lib/delay_64.c b/arch/x86/lib/delay_64.c
index 2dbebd308347..0ebbfb9e7c7f 100644
--- a/arch/x86/lib/delay_64.c
+++ b/arch/x86/lib/delay_64.c
@@ -40,7 +40,8 @@ EXPORT_SYMBOL(__delay);
40 40
41inline void __const_udelay(unsigned long xloops) 41inline void __const_udelay(unsigned long xloops)
42{ 42{
43 __delay(((xloops * HZ * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) + 1); 43 __delay(((xloops * HZ *
44 cpu_data(raw_smp_processor_id()).loops_per_jiffy) >> 32) + 1);
44} 45}
45EXPORT_SYMBOL(__const_udelay); 46EXPORT_SYMBOL(__const_udelay);
46 47
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 9f38b12b4af1..8bab2b2efaff 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -748,7 +748,7 @@ survive:
748 retval = get_user_pages(current, current->mm, 748 retval = get_user_pages(current, current->mm,
749 (unsigned long )to, 1, 1, 0, &pg, NULL); 749 (unsigned long )to, 1, 1, 0, &pg, NULL);
750 750
751 if (retval == -ENOMEM && is_init(current)) { 751 if (retval == -ENOMEM && is_global_init(current)) {
752 up_read(&current->mm->mmap_sem); 752 up_read(&current->mm->mmap_sem);
753 congestion_wait(WRITE, HZ/50); 753 congestion_wait(WRITE, HZ/50);
754 goto survive; 754 goto survive;
diff --git a/arch/x86/mach-voyager/voyager_smp.c b/arch/x86/mach-voyager/voyager_smp.c
index e4928aa6bdfb..f93a730b44d0 100644
--- a/arch/x86/mach-voyager/voyager_smp.c
+++ b/arch/x86/mach-voyager/voyager_smp.c
@@ -36,8 +36,8 @@ static unsigned long cpu_irq_affinity[NR_CPUS] __cacheline_aligned = { [0 ... NR
36 36
37/* per CPU data structure (for /proc/cpuinfo et al), visible externally 37/* per CPU data structure (for /proc/cpuinfo et al), visible externally
38 * indexed physically */ 38 * indexed physically */
39struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 39DEFINE_PER_CPU(cpuinfo_x86, cpu_info) __cacheline_aligned;
40EXPORT_SYMBOL(cpu_data); 40EXPORT_PER_CPU_SYMBOL(cpu_info);
41 41
42/* physical ID of the CPU used to boot the system */ 42/* physical ID of the CPU used to boot the system */
43unsigned char boot_cpu_id; 43unsigned char boot_cpu_id;
@@ -430,7 +430,7 @@ find_smp_config(void)
430void __init 430void __init
431smp_store_cpu_info(int id) 431smp_store_cpu_info(int id)
432{ 432{
433 struct cpuinfo_x86 *c=&cpu_data[id]; 433 struct cpuinfo_x86 *c = &cpu_data(id);
434 434
435 *c = boot_cpu_data; 435 *c = boot_cpu_data;
436 436
@@ -634,7 +634,7 @@ do_boot_cpu(__u8 cpu)
634 cpu, smp_processor_id())); 634 cpu, smp_processor_id()));
635 635
636 printk("CPU%d: ", cpu); 636 printk("CPU%d: ", cpu);
637 print_cpu_info(&cpu_data[cpu]); 637 print_cpu_info(&cpu_data(cpu));
638 wmb(); 638 wmb();
639 cpu_set(cpu, cpu_callout_map); 639 cpu_set(cpu, cpu_callout_map);
640 cpu_set(cpu, cpu_present_map); 640 cpu_set(cpu, cpu_present_map);
@@ -683,7 +683,7 @@ smp_boot_cpus(void)
683 */ 683 */
684 smp_store_cpu_info(boot_cpu_id); 684 smp_store_cpu_info(boot_cpu_id);
685 printk("CPU%d: ", boot_cpu_id); 685 printk("CPU%d: ", boot_cpu_id);
686 print_cpu_info(&cpu_data[boot_cpu_id]); 686 print_cpu_info(&cpu_data(boot_cpu_id));
687 687
688 if(is_cpu_quad()) { 688 if(is_cpu_quad()) {
689 /* booting on a Quad CPU */ 689 /* booting on a Quad CPU */
@@ -714,7 +714,7 @@ smp_boot_cpus(void)
714 unsigned long bogosum = 0; 714 unsigned long bogosum = 0;
715 for (i = 0; i < NR_CPUS; i++) 715 for (i = 0; i < NR_CPUS; i++)
716 if (cpu_isset(i, cpu_online_map)) 716 if (cpu_isset(i, cpu_online_map))
717 bogosum += cpu_data[i].loops_per_jiffy; 717 bogosum += cpu_data(i).loops_per_jiffy;
718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 718 printk(KERN_INFO "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
719 cpucount+1, 719 cpucount+1,
720 bogosum/(500000/HZ), 720 bogosum/(500000/HZ),
diff --git a/arch/x86/mm/fault_32.c b/arch/x86/mm/fault_32.c
index 6555c3d14371..b695d70e998c 100644
--- a/arch/x86/mm/fault_32.c
+++ b/arch/x86/mm/fault_32.c
@@ -471,8 +471,8 @@ bad_area_nosemaphore:
471 printk_ratelimit()) { 471 printk_ratelimit()) {
472 printk("%s%s[%d]: segfault at %08lx eip %08lx " 472 printk("%s%s[%d]: segfault at %08lx eip %08lx "
473 "esp %08lx error %lx\n", 473 "esp %08lx error %lx\n",
474 tsk->pid > 1 ? KERN_INFO : KERN_EMERG, 474 task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG,
475 tsk->comm, tsk->pid, address, regs->eip, 475 tsk->comm, task_pid_nr(tsk), address, regs->eip,
476 regs->esp, error_code); 476 regs->esp, error_code);
477 } 477 }
478 tsk->thread.cr2 = address; 478 tsk->thread.cr2 = address;
@@ -564,7 +564,8 @@ no_context:
564 * it's allocated already. 564 * it's allocated already.
565 */ 565 */
566 if ((page >> PAGE_SHIFT) < max_low_pfn 566 if ((page >> PAGE_SHIFT) < max_low_pfn
567 && (page & _PAGE_PRESENT)) { 567 && (page & _PAGE_PRESENT)
568 && !(page & _PAGE_PSE)) {
568 page &= PAGE_MASK; 569 page &= PAGE_MASK;
569 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT) 570 page = ((__typeof__(page) *) __va(page))[(address >> PAGE_SHIFT)
570 & (PTRS_PER_PTE - 1)]; 571 & (PTRS_PER_PTE - 1)];
@@ -587,7 +588,7 @@ no_context:
587 */ 588 */
588out_of_memory: 589out_of_memory:
589 up_read(&mm->mmap_sem); 590 up_read(&mm->mmap_sem);
590 if (is_init(tsk)) { 591 if (is_global_init(tsk)) {
591 yield(); 592 yield();
592 down_read(&mm->mmap_sem); 593 down_read(&mm->mmap_sem);
593 goto survive; 594 goto survive;
diff --git a/arch/x86/mm/fault_64.c b/arch/x86/mm/fault_64.c
index 5e0e54906c48..00be7f0a71b2 100644
--- a/arch/x86/mm/fault_64.c
+++ b/arch/x86/mm/fault_64.c
@@ -169,7 +169,7 @@ void dump_pagetable(unsigned long address)
169 pmd = pmd_offset(pud, address); 169 pmd = pmd_offset(pud, address);
170 if (bad_address(pmd)) goto bad; 170 if (bad_address(pmd)) goto bad;
171 printk("PMD %lx ", pmd_val(*pmd)); 171 printk("PMD %lx ", pmd_val(*pmd));
172 if (!pmd_present(*pmd)) goto ret; 172 if (!pmd_present(*pmd) || pmd_large(*pmd)) goto ret;
173 173
174 pte = pte_offset_kernel(pmd, address); 174 pte = pte_offset_kernel(pmd, address);
175 if (bad_address(pte)) goto bad; 175 if (bad_address(pte)) goto bad;
@@ -285,7 +285,6 @@ static int vmalloc_fault(unsigned long address)
285 return 0; 285 return 0;
286} 286}
287 287
288static int page_fault_trace;
289int show_unhandled_signals = 1; 288int show_unhandled_signals = 1;
290 289
291/* 290/*
@@ -354,10 +353,6 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
354 if (likely(regs->eflags & X86_EFLAGS_IF)) 353 if (likely(regs->eflags & X86_EFLAGS_IF))
355 local_irq_enable(); 354 local_irq_enable();
356 355
357 if (unlikely(page_fault_trace))
358 printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
359 regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
360
361 if (unlikely(error_code & PF_RSVD)) 356 if (unlikely(error_code & PF_RSVD))
362 pgtable_bad(address, regs, error_code); 357 pgtable_bad(address, regs, error_code);
363 358
@@ -488,7 +483,7 @@ bad_area_nosemaphore:
488 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) && 483 if (show_unhandled_signals && unhandled_signal(tsk, SIGSEGV) &&
489 printk_ratelimit()) { 484 printk_ratelimit()) {
490 printk( 485 printk(
491 "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n", 486 "%s%s[%d]: segfault at %lx rip %lx rsp %lx error %lx\n",
492 tsk->pid > 1 ? KERN_INFO : KERN_EMERG, 487 tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
493 tsk->comm, tsk->pid, address, regs->rip, 488 tsk->comm, tsk->pid, address, regs->rip,
494 regs->rsp, error_code); 489 regs->rsp, error_code);
@@ -554,7 +549,7 @@ no_context:
554 */ 549 */
555out_of_memory: 550out_of_memory:
556 up_read(&mm->mmap_sem); 551 up_read(&mm->mmap_sem);
557 if (is_init(current)) { 552 if (is_global_init(current)) {
558 yield(); 553 yield();
559 goto again; 554 goto again;
560 } 555 }
@@ -621,10 +616,3 @@ void vmalloc_sync_all(void)
621 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) == 616 BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
622 (__START_KERNEL & PGDIR_MASK))); 617 (__START_KERNEL & PGDIR_MASK)));
623} 618}
624
625static int __init enable_pagefaulttrace(char *str)
626{
627 page_fault_trace = 1;
628 return 1;
629}
630__setup("pagefaulttrace", enable_pagefaulttrace);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 5eec5e56d07f..3d6926ba8995 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -612,7 +612,7 @@ void __init init_cpu_to_node(void)
612{ 612{
613 int i; 613 int i;
614 for (i = 0; i < NR_CPUS; i++) { 614 for (i = 0; i < NR_CPUS; i++) {
615 u8 apicid = x86_cpu_to_apicid[i]; 615 u8 apicid = x86_cpu_to_apicid_init[i];
616 if (apicid == BAD_APICID) 616 if (apicid == BAD_APICID)
617 continue; 617 continue;
618 if (apicid_to_node[apicid] == NUMA_NO_NODE) 618 if (apicid_to_node[apicid] == NUMA_NO_NODE)
diff --git a/arch/x86/mm/pageattr_64.c b/arch/x86/mm/pageattr_64.c
index 8a4f65bf956e..c7b7dfe1d405 100644
--- a/arch/x86/mm/pageattr_64.c
+++ b/arch/x86/mm/pageattr_64.c
@@ -230,9 +230,14 @@ void global_flush_tlb(void)
230 struct page *pg, *next; 230 struct page *pg, *next;
231 struct list_head l; 231 struct list_head l;
232 232
233 down_read(&init_mm.mmap_sem); 233 /*
234 * Write-protect the semaphore, to exclude two contexts
235 * doing a list_replace_init() call in parallel and to
236 * exclude new additions to the deferred_pages list:
237 */
238 down_write(&init_mm.mmap_sem);
234 list_replace_init(&deferred_pages, &l); 239 list_replace_init(&deferred_pages, &l);
235 up_read(&init_mm.mmap_sem); 240 up_write(&init_mm.mmap_sem);
236 241
237 flush_map(&l); 242 flush_map(&l);
238 243
diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
index c049ce414f01..0ed046a187f7 100644
--- a/arch/x86/oprofile/backtrace.c
+++ b/arch/x86/oprofile/backtrace.c
@@ -13,25 +13,45 @@
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <asm/ptrace.h> 14#include <asm/ptrace.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16#include <asm/stacktrace.h>
16 17
17struct frame_head { 18static void backtrace_warning_symbol(void *data, char *msg,
18 struct frame_head * ebp; 19 unsigned long symbol)
19 unsigned long ret; 20{
20} __attribute__((packed)); 21 /* Ignore warnings */
22}
21 23
22static struct frame_head * 24static void backtrace_warning(void *data, char *msg)
23dump_kernel_backtrace(struct frame_head * head)
24{ 25{
25 oprofile_add_trace(head->ret); 26 /* Ignore warnings */
27}
26 28
27 /* frame pointers should strictly progress back up the stack 29static int backtrace_stack(void *data, char *name)
28 * (towards higher addresses) */ 30{
29 if (head >= head->ebp) 31 /* Yes, we want all stacks */
30 return NULL; 32 return 0;
33}
34
35static void backtrace_address(void *data, unsigned long addr)
36{
37 unsigned int *depth = data;
31 38
32 return head->ebp; 39 if ((*depth)--)
40 oprofile_add_trace(addr);
33} 41}
34 42
43static struct stacktrace_ops backtrace_ops = {
44 .warning = backtrace_warning,
45 .warning_symbol = backtrace_warning_symbol,
46 .stack = backtrace_stack,
47 .address = backtrace_address,
48};
49
50struct frame_head {
51 struct frame_head *ebp;
52 unsigned long ret;
53} __attribute__((packed));
54
35static struct frame_head * 55static struct frame_head *
36dump_user_backtrace(struct frame_head * head) 56dump_user_backtrace(struct frame_head * head)
37{ 57{
@@ -53,72 +73,16 @@ dump_user_backtrace(struct frame_head * head)
53 return bufhead[0].ebp; 73 return bufhead[0].ebp;
54} 74}
55 75
56/*
57 * | | /\ Higher addresses
58 * | |
59 * --------------- stack base (address of current_thread_info)
60 * | thread info |
61 * . .
62 * | stack |
63 * --------------- saved regs->ebp value if valid (frame_head address)
64 * . .
65 * --------------- saved regs->rsp value if x86_64
66 * | |
67 * --------------- struct pt_regs * stored on stack if 32-bit
68 * | |
69 * . .
70 * | |
71 * --------------- %esp
72 * | |
73 * | | \/ Lower addresses
74 *
75 * Thus, regs (or regs->rsp for x86_64) <-> stack base restricts the
76 * valid(ish) ebp values. Note: (1) for x86_64, NMI and several other
77 * exceptions use special stacks, maintained by the interrupt stack table
78 * (IST). These stacks are set up in trap_init() in
79 * arch/x86_64/kernel/traps.c. Thus, for x86_64, regs now does not point
80 * to the kernel stack; instead, it points to some location on the NMI
81 * stack. On the other hand, regs->rsp is the stack pointer saved when the
82 * NMI occurred. (2) For 32-bit, regs->esp is not valid because the
83 * processor does not save %esp on the kernel stack when interrupts occur
84 * in the kernel mode.
85 */
86#ifdef CONFIG_FRAME_POINTER
87static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
88{
89 unsigned long headaddr = (unsigned long)head;
90#ifdef CONFIG_X86_64
91 unsigned long stack = (unsigned long)regs->rsp;
92#else
93 unsigned long stack = (unsigned long)regs;
94#endif
95 unsigned long stack_base = (stack & ~(THREAD_SIZE - 1)) + THREAD_SIZE;
96
97 return headaddr > stack && headaddr < stack_base;
98}
99#else
100/* without fp, it's just junk */
101static int valid_kernel_stack(struct frame_head * head, struct pt_regs * regs)
102{
103 return 0;
104}
105#endif
106
107
108void 76void
109x86_backtrace(struct pt_regs * const regs, unsigned int depth) 77x86_backtrace(struct pt_regs * const regs, unsigned int depth)
110{ 78{
111 struct frame_head *head; 79 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
112 80 unsigned long stack = stack_pointer(regs);
113#ifdef CONFIG_X86_64
114 head = (struct frame_head *)regs->rbp;
115#else
116 head = (struct frame_head *)regs->ebp;
117#endif
118 81
119 if (!user_mode_vm(regs)) { 82 if (!user_mode_vm(regs)) {
120 while (depth-- && valid_kernel_stack(head, regs)) 83 if (depth)
121 head = dump_kernel_backtrace(head); 84 dump_trace(NULL, regs, (unsigned long *)stack,
85 &backtrace_ops, &depth);
122 return; 86 return;
123 } 87 }
124 88
diff --git a/arch/x86_64/.gitignore b/arch/x86_64/.gitignore
new file mode 100644
index 000000000000..36ef4c374d25
--- /dev/null
+++ b/arch/x86_64/.gitignore
@@ -0,0 +1 @@
boot
diff --git a/arch/x86_64/Kconfig b/arch/x86_64/Kconfig
index 78cb68f2ebbd..aab25f3ba3ce 100644
--- a/arch/x86_64/Kconfig
+++ b/arch/x86_64/Kconfig
@@ -723,7 +723,9 @@ config ARCH_HIBERNATION_HEADER
723 723
724source "drivers/acpi/Kconfig" 724source "drivers/acpi/Kconfig"
725 725
726source "arch/x86/kernel/cpufreq/Kconfig" 726source "arch/x86/kernel/cpu/cpufreq/Kconfig_64"
727
728source "drivers/cpuidle/Kconfig"
727 729
728endmenu 730endmenu
729 731
@@ -766,9 +768,9 @@ source "fs/Kconfig.binfmt"
766config IA32_EMULATION 768config IA32_EMULATION
767 bool "IA32 Emulation" 769 bool "IA32 Emulation"
768 help 770 help
769 Include code to run 32-bit programs under a 64-bit kernel. You should likely 771 Include code to run 32-bit programs under a 64-bit kernel. You should
770 turn this on, unless you're 100% sure that you don't have any 32-bit programs 772 likely turn this on, unless you're 100% sure that you don't have any
771 left. 773 32-bit programs left.
772 774
773config IA32_AOUT 775config IA32_AOUT
774 tristate "IA32 a.out support" 776 tristate "IA32 a.out support"
@@ -799,21 +801,6 @@ source "drivers/firmware/Kconfig"
799 801
800source fs/Kconfig 802source fs/Kconfig
801 803
802menu "Instrumentation Support"
803
804source "arch/x86/oprofile/Kconfig"
805
806config KPROBES
807 bool "Kprobes"
808 depends on KALLSYMS && MODULES
809 help
810 Kprobes allows you to trap at almost any kernel address and
811 execute a callback function. register_kprobe() establishes
812 a probepoint and specifies the callback. Kprobes is useful
813 for kernel debugging, non-intrusive instrumentation and testing.
814 If in doubt, say "N".
815endmenu
816
817source "arch/x86_64/Kconfig.debug" 804source "arch/x86_64/Kconfig.debug"
818 805
819source "security/Kconfig" 806source "security/Kconfig"
diff --git a/arch/x86_64/Makefile b/arch/x86_64/Makefile
index 03e1ede27b85..6d89ab762ffc 100644
--- a/arch/x86_64/Makefile
+++ b/arch/x86_64/Makefile
@@ -74,7 +74,7 @@ KBUILD_CFLAGS += $(cflags-y)
74CFLAGS_KERNEL += $(cflags-kernel-y) 74CFLAGS_KERNEL += $(cflags-kernel-y)
75KBUILD_AFLAGS += -m64 75KBUILD_AFLAGS += -m64
76 76
77head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task_64.o 77head-y := arch/x86/kernel/head_64.o arch/x86/kernel/head64.o arch/x86/kernel/init_task.o
78 78
79libs-y += arch/x86/lib/ 79libs-y += arch/x86/lib/
80core-y += arch/x86/kernel/ \ 80core-y += arch/x86/kernel/ \
@@ -97,9 +97,9 @@ BOOTIMAGE := arch/x86/boot/bzImage
97KBUILD_IMAGE := $(BOOTIMAGE) 97KBUILD_IMAGE := $(BOOTIMAGE)
98 98
99bzImage: vmlinux 99bzImage: vmlinux
100 $(Q)mkdir -p $(objtree)/arch/x86_64/boot
101 $(Q)ln -fsn $(objtree)/arch/x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
102 $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE) 100 $(Q)$(MAKE) $(build)=$(boot) $(BOOTIMAGE)
101 $(Q)mkdir -p $(objtree)/arch/x86_64/boot
102 $(Q)ln -fsn ../../x86/boot/bzImage $(objtree)/arch/x86_64/boot/bzImage
103 103
104bzlilo: vmlinux 104bzlilo: vmlinux
105 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo 105 $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(BOOTIMAGE) zlilo
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 7fbb44bea37f..85ffbb491490 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -251,6 +251,8 @@ config EMBEDDED_RAMDISK_IMAGE
251 provide one yourself. 251 provide one yourself.
252endmenu 252endmenu
253 253
254source "kernel/Kconfig.instrumentation"
255
254source "arch/xtensa/Kconfig.debug" 256source "arch/xtensa/Kconfig.debug"
255 257
256source "security/Kconfig" 258source "security/Kconfig"
diff --git a/arch/xtensa/kernel/traps.c b/arch/xtensa/kernel/traps.c
index 8be99c777d9d..397bcd6ad08d 100644
--- a/arch/xtensa/kernel/traps.c
+++ b/arch/xtensa/kernel/traps.c
@@ -176,7 +176,7 @@ void do_unhandled(struct pt_regs *regs, unsigned long exccause)
176 printk("Caught unhandled exception in '%s' " 176 printk("Caught unhandled exception in '%s' "
177 "(pid = %d, pc = %#010lx) - should not happen\n" 177 "(pid = %d, pc = %#010lx) - should not happen\n"
178 "\tEXCCAUSE is %ld\n", 178 "\tEXCCAUSE is %ld\n",
179 current->comm, current->pid, regs->pc, exccause); 179 current->comm, task_pid_nr(current), regs->pc, exccause);
180 force_sig(SIGILL, current); 180 force_sig(SIGILL, current);
181} 181}
182 182
@@ -228,7 +228,7 @@ do_illegal_instruction(struct pt_regs *regs)
228 /* If in user mode, send SIGILL signal to current process. */ 228 /* If in user mode, send SIGILL signal to current process. */
229 229
230 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n", 230 printk("Illegal Instruction in '%s' (pid = %d, pc = %#010lx)\n",
231 current->comm, current->pid, regs->pc); 231 current->comm, task_pid_nr(current), regs->pc);
232 force_sig(SIGILL, current); 232 force_sig(SIGILL, current);
233} 233}
234 234
@@ -254,7 +254,7 @@ do_unaligned_user (struct pt_regs *regs)
254 current->thread.error_code = -3; 254 current->thread.error_code = -3;
255 printk("Unaligned memory access to %08lx in '%s' " 255 printk("Unaligned memory access to %08lx in '%s' "
256 "(pid = %d, pc = %#010lx)\n", 256 "(pid = %d, pc = %#010lx)\n",
257 regs->excvaddr, current->comm, current->pid, regs->pc); 257 regs->excvaddr, current->comm, task_pid_nr(current), regs->pc);
258 info.si_signo = SIGBUS; 258 info.si_signo = SIGBUS;
259 info.si_errno = 0; 259 info.si_errno = 0;
260 info.si_code = BUS_ADRALN; 260 info.si_code = BUS_ADRALN;
diff --git a/arch/xtensa/mm/fault.c b/arch/xtensa/mm/fault.c
index 2f842859948f..33f366be323f 100644
--- a/arch/xtensa/mm/fault.c
+++ b/arch/xtensa/mm/fault.c
@@ -145,7 +145,7 @@ bad_area:
145 */ 145 */
146out_of_memory: 146out_of_memory:
147 up_read(&mm->mmap_sem); 147 up_read(&mm->mmap_sem);
148 if (is_init(current)) { 148 if (is_global_init(current)) {
149 yield(); 149 yield();
150 down_read(&mm->mmap_sem); 150 down_read(&mm->mmap_sem);
151 goto survive; 151 goto survive;