aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/traps.c2
-rw-r--r--arch/i386/lib/delay.c2
-rw-r--r--arch/ppc/lib/locks.c4
-rw-r--r--arch/ppc64/kernel/idle.c2
-rw-r--r--arch/sh/lib/delay.c2
-rw-r--r--arch/sparc64/lib/delay.c2
-rw-r--r--arch/x86_64/lib/delay.c2
-rw-r--r--drivers/acpi/processor_idle.c2
-rw-r--r--drivers/input/gameport/gameport.c2
-rw-r--r--drivers/oprofile/buffer_sync.c4
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h6
-rw-r--r--include/asm-alpha/smp.h2
-rw-r--r--include/asm-arm/smp.h2
-rw-r--r--include/asm-i386/smp.h2
-rw-r--r--include/asm-ia64/smp.h2
-rw-r--r--include/asm-m32r/smp.h2
-rw-r--r--include/asm-mips/smp.h2
-rw-r--r--include/asm-parisc/smp.h2
-rw-r--r--include/asm-ppc/smp.h2
-rw-r--r--include/asm-ppc64/smp.h2
-rw-r--r--include/asm-s390/smp.h2
-rw-r--r--include/asm-sh/smp.h2
-rw-r--r--include/asm-sparc/smp.h2
-rw-r--r--include/asm-sparc64/smp.h2
-rw-r--r--include/asm-um/smp.h3
-rw-r--r--include/asm-x86_64/smp.h2
-rw-r--r--include/linux/mmzone.h2
-rw-r--r--include/linux/smp.h40
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/snmp.h14
-rw-r--r--kernel/module.c2
-rw-r--r--kernel/power/smp.c4
-rw-r--r--kernel/sched.c4
-rw-r--r--kernel/stop_machine.c4
-rw-r--r--lib/Makefile1
-rw-r--r--lib/kernel_lock.c55
-rw-r--r--lib/smp_processor_id.c55
37 files changed, 119 insertions, 125 deletions
diff --git a/arch/i386/kernel/traps.c b/arch/i386/kernel/traps.c
index 00c63419c06f..83c579e82a81 100644
--- a/arch/i386/kernel/traps.c
+++ b/arch/i386/kernel/traps.c
@@ -306,7 +306,7 @@ void die(const char * str, struct pt_regs * regs, long err)
306 }; 306 };
307 static int die_counter; 307 static int die_counter;
308 308
309 if (die.lock_owner != _smp_processor_id()) { 309 if (die.lock_owner != raw_smp_processor_id()) {
310 console_verbose(); 310 console_verbose();
311 spin_lock_irq(&die.lock); 311 spin_lock_irq(&die.lock);
312 die.lock_owner = smp_processor_id(); 312 die.lock_owner = smp_processor_id();
diff --git a/arch/i386/lib/delay.c b/arch/i386/lib/delay.c
index 080639f262b1..eb0cdfe9280f 100644
--- a/arch/i386/lib/delay.c
+++ b/arch/i386/lib/delay.c
@@ -34,7 +34,7 @@ inline void __const_udelay(unsigned long xloops)
34 xloops *= 4; 34 xloops *= 4;
35 __asm__("mull %0" 35 __asm__("mull %0"
36 :"=d" (xloops), "=&a" (d0) 36 :"=d" (xloops), "=&a" (d0)
37 :"1" (xloops),"0" (cpu_data[_smp_processor_id()].loops_per_jiffy * (HZ/4))); 37 :"1" (xloops),"0" (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (HZ/4)));
38 __delay(++xloops); 38 __delay(++xloops);
39} 39}
40 40
diff --git a/arch/ppc/lib/locks.c b/arch/ppc/lib/locks.c
index 694163d696d8..c450dc4b766e 100644
--- a/arch/ppc/lib/locks.c
+++ b/arch/ppc/lib/locks.c
@@ -130,7 +130,7 @@ void _raw_read_lock(rwlock_t *rw)
130 while (!read_can_lock(rw)) { 130 while (!read_can_lock(rw)) {
131 if (--stuck == 0) { 131 if (--stuck == 0) {
132 printk("_read_lock(%p) CPU#%d lock %d\n", 132 printk("_read_lock(%p) CPU#%d lock %d\n",
133 rw, _smp_processor_id(), rw->lock); 133 rw, raw_smp_processor_id(), rw->lock);
134 stuck = INIT_STUCK; 134 stuck = INIT_STUCK;
135 } 135 }
136 } 136 }
@@ -158,7 +158,7 @@ void _raw_write_lock(rwlock_t *rw)
158 while (!write_can_lock(rw)) { 158 while (!write_can_lock(rw)) {
159 if (--stuck == 0) { 159 if (--stuck == 0) {
160 printk("write_lock(%p) CPU#%d lock %d)\n", 160 printk("write_lock(%p) CPU#%d lock %d)\n",
161 rw, _smp_processor_id(), rw->lock); 161 rw, raw_smp_processor_id(), rw->lock);
162 stuck = INIT_STUCK; 162 stuck = INIT_STUCK;
163 } 163 }
164 } 164 }
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index f24ce2b87200..ff8a7db142d3 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -292,7 +292,7 @@ static int native_idle(void)
292 if (need_resched()) 292 if (need_resched())
293 schedule(); 293 schedule();
294 294
295 if (cpu_is_offline(_smp_processor_id()) && 295 if (cpu_is_offline(raw_smp_processor_id()) &&
296 system_state == SYSTEM_RUNNING) 296 system_state == SYSTEM_RUNNING)
297 cpu_die(); 297 cpu_die();
298 } 298 }
diff --git a/arch/sh/lib/delay.c b/arch/sh/lib/delay.c
index 50b36037d86b..351714694d6d 100644
--- a/arch/sh/lib/delay.c
+++ b/arch/sh/lib/delay.c
@@ -24,7 +24,7 @@ inline void __const_udelay(unsigned long xloops)
24 __asm__("dmulu.l %0, %2\n\t" 24 __asm__("dmulu.l %0, %2\n\t"
25 "sts mach, %0" 25 "sts mach, %0"
26 : "=r" (xloops) 26 : "=r" (xloops)
27 : "0" (xloops), "r" (cpu_data[_smp_processor_id()].loops_per_jiffy) 27 : "0" (xloops), "r" (cpu_data[raw_smp_processor_id()].loops_per_jiffy)
28 : "macl", "mach"); 28 : "macl", "mach");
29 __delay(xloops * HZ); 29 __delay(xloops * HZ);
30} 30}
diff --git a/arch/sparc64/lib/delay.c b/arch/sparc64/lib/delay.c
index f6b4c784d53e..e8808727617a 100644
--- a/arch/sparc64/lib/delay.c
+++ b/arch/sparc64/lib/delay.c
@@ -31,7 +31,7 @@ void __const_udelay(unsigned long n)
31{ 31{
32 n *= 4; 32 n *= 4;
33 33
34 n *= (cpu_data(_smp_processor_id()).udelay_val * (HZ/4)); 34 n *= (cpu_data(raw_smp_processor_id()).udelay_val * (HZ/4));
35 n >>= 32; 35 n >>= 32;
36 36
37 __delay(n + 1); 37 __delay(n + 1);
diff --git a/arch/x86_64/lib/delay.c b/arch/x86_64/lib/delay.c
index 6e2d66472eb1..aed61a668a1b 100644
--- a/arch/x86_64/lib/delay.c
+++ b/arch/x86_64/lib/delay.c
@@ -34,7 +34,7 @@ void __delay(unsigned long loops)
34 34
35inline void __const_udelay(unsigned long xloops) 35inline void __const_udelay(unsigned long xloops)
36{ 36{
37 __delay(((xloops * cpu_data[_smp_processor_id()].loops_per_jiffy) >> 32) * HZ); 37 __delay(((xloops * cpu_data[raw_smp_processor_id()].loops_per_jiffy) >> 32) * HZ);
38} 38}
39 39
40void __udelay(unsigned long usecs) 40void __udelay(unsigned long usecs)
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index ff64d333e95f..c9d671cf7857 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -171,7 +171,7 @@ static void acpi_processor_idle (void)
171 int sleep_ticks = 0; 171 int sleep_ticks = 0;
172 u32 t1, t2 = 0; 172 u32 t1, t2 = 0;
173 173
174 pr = processors[_smp_processor_id()]; 174 pr = processors[raw_smp_processor_id()];
175 if (!pr) 175 if (!pr)
176 return; 176 return;
177 177
diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
index 9b8ff396e6f8..e152d0fa0cdd 100644
--- a/drivers/input/gameport/gameport.c
+++ b/drivers/input/gameport/gameport.c
@@ -134,7 +134,7 @@ static int gameport_measure_speed(struct gameport *gameport)
134 } 134 }
135 135
136 gameport_close(gameport); 136 gameport_close(gameport);
137 return (cpu_data[_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx); 137 return (cpu_data[raw_smp_processor_id()].loops_per_jiffy * (unsigned long)HZ / (1000 / 50)) / (tx < 1 ? 1 : tx);
138 138
139#else 139#else
140 140
diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
index 55720dc6ec43..745a14183634 100644
--- a/drivers/oprofile/buffer_sync.c
+++ b/drivers/oprofile/buffer_sync.c
@@ -62,7 +62,7 @@ static int task_exit_notify(struct notifier_block * self, unsigned long val, voi
62 /* To avoid latency problems, we only process the current CPU, 62 /* To avoid latency problems, we only process the current CPU,
63 * hoping that most samples for the task are on this CPU 63 * hoping that most samples for the task are on this CPU
64 */ 64 */
65 sync_buffer(_smp_processor_id()); 65 sync_buffer(raw_smp_processor_id());
66 return 0; 66 return 0;
67} 67}
68 68
@@ -86,7 +86,7 @@ static int munmap_notify(struct notifier_block * self, unsigned long val, void *
86 /* To avoid latency problems, we only process the current CPU, 86 /* To avoid latency problems, we only process the current CPU,
87 * hoping that most samples for the task are on this CPU 87 * hoping that most samples for the task are on this CPU
88 */ 88 */
89 sync_buffer(_smp_processor_id()); 89 sync_buffer(raw_smp_processor_id());
90 return 0; 90 return 0;
91 } 91 }
92 92
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 71bb41019a12..7d7c8788ea75 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -145,10 +145,10 @@ static inline void set_buffer_unwritten_io(struct buffer_head *bh)
145#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val 145#define xfs_inherit_nosymlinks xfs_params.inherit_nosym.val
146#define xfs_rotorstep xfs_params.rotorstep.val 146#define xfs_rotorstep xfs_params.rotorstep.val
147 147
148#ifndef __smp_processor_id 148#ifndef raw_smp_processor_id
149#define __smp_processor_id() smp_processor_id() 149#define raw_smp_processor_id() smp_processor_id()
150#endif 150#endif
151#define current_cpu() __smp_processor_id() 151#define current_cpu() raw_smp_processor_id()
152#define current_pid() (current->pid) 152#define current_pid() (current->pid)
153#define current_fsuid(cred) (current->fsuid) 153#define current_fsuid(cred) (current->fsuid)
154#define current_fsgid(cred) (current->fsgid) 154#define current_fsgid(cred) (current->fsgid)
diff --git a/include/asm-alpha/smp.h b/include/asm-alpha/smp.h
index cbc173ae45aa..9950706abdf8 100644
--- a/include/asm-alpha/smp.h
+++ b/include/asm-alpha/smp.h
@@ -43,7 +43,7 @@ extern struct cpuinfo_alpha cpu_data[NR_CPUS];
43#define PROC_CHANGE_PENALTY 20 43#define PROC_CHANGE_PENALTY 20
44 44
45#define hard_smp_processor_id() __hard_smp_processor_id() 45#define hard_smp_processor_id() __hard_smp_processor_id()
46#define smp_processor_id() (current_thread_info()->cpu) 46#define raw_smp_processor_id() (current_thread_info()->cpu)
47 47
48extern cpumask_t cpu_present_mask; 48extern cpumask_t cpu_present_mask;
49extern cpumask_t cpu_online_map; 49extern cpumask_t cpu_online_map;
diff --git a/include/asm-arm/smp.h b/include/asm-arm/smp.h
index bd44f894690f..6c6c60adbbaa 100644
--- a/include/asm-arm/smp.h
+++ b/include/asm-arm/smp.h
@@ -21,7 +21,7 @@
21# error "<asm-arm/smp.h> included in non-SMP build" 21# error "<asm-arm/smp.h> included in non-SMP build"
22#endif 22#endif
23 23
24#define smp_processor_id() (current_thread_info()->cpu) 24#define raw_smp_processor_id() (current_thread_info()->cpu)
25 25
26extern cpumask_t cpu_present_mask; 26extern cpumask_t cpu_present_mask;
27#define cpu_possible_map cpu_present_mask 27#define cpu_possible_map cpu_present_mask
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index e03a206dfa36..55ef31f66bbe 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -51,7 +51,7 @@ extern u8 x86_cpu_to_apicid[];
51 * from the initial startup. We map APIC_BASE very early in page_setup(), 51 * from the initial startup. We map APIC_BASE very early in page_setup(),
52 * so this is correct in the x86 case. 52 * so this is correct in the x86 case.
53 */ 53 */
54#define __smp_processor_id() (current_thread_info()->cpu) 54#define raw_smp_processor_id() (current_thread_info()->cpu)
55 55
56extern cpumask_t cpu_callout_map; 56extern cpumask_t cpu_callout_map;
57extern cpumask_t cpu_callin_map; 57extern cpumask_t cpu_callin_map;
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 3ba1a061e4ae..a3914352c995 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -46,7 +46,7 @@ ia64_get_lid (void)
46#define SMP_IRQ_REDIRECTION (1 << 0) 46#define SMP_IRQ_REDIRECTION (1 << 0)
47#define SMP_IPI_REDIRECTION (1 << 1) 47#define SMP_IPI_REDIRECTION (1 << 1)
48 48
49#define smp_processor_id() (current_thread_info()->cpu) 49#define raw_smp_processor_id() (current_thread_info()->cpu)
50 50
51extern struct smp_boot_data { 51extern struct smp_boot_data {
52 int cpu_count; 52 int cpu_count;
diff --git a/include/asm-m32r/smp.h b/include/asm-m32r/smp.h
index 8cd4d0da4be1..b9a20cdad65f 100644
--- a/include/asm-m32r/smp.h
+++ b/include/asm-m32r/smp.h
@@ -66,7 +66,7 @@ extern volatile int cpu_2_physid[NR_CPUS];
66#define physid_to_cpu(physid) physid_2_cpu[physid] 66#define physid_to_cpu(physid) physid_2_cpu[physid]
67#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id] 67#define cpu_to_physid(cpu_id) cpu_2_physid[cpu_id]
68 68
69#define smp_processor_id() (current_thread_info()->cpu) 69#define raw_smp_processor_id() (current_thread_info()->cpu)
70 70
71extern cpumask_t cpu_callout_map; 71extern cpumask_t cpu_callout_map;
72#define cpu_possible_map cpu_callout_map 72#define cpu_possible_map cpu_callout_map
diff --git a/include/asm-mips/smp.h b/include/asm-mips/smp.h
index 8ba370ecfd4c..5618f1e12f40 100644
--- a/include/asm-mips/smp.h
+++ b/include/asm-mips/smp.h
@@ -21,7 +21,7 @@
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <asm/atomic.h> 22#include <asm/atomic.h>
23 23
24#define smp_processor_id() (current_thread_info()->cpu) 24#define raw_smp_processor_id() (current_thread_info()->cpu)
25 25
26/* Map from cpu id to sequential logical cpu number. This will only 26/* Map from cpu id to sequential logical cpu number. This will only
27 not be idempotent when cpus failed to come on-line. */ 27 not be idempotent when cpus failed to come on-line. */
diff --git a/include/asm-parisc/smp.h b/include/asm-parisc/smp.h
index fde77ac35463..9413f67a540b 100644
--- a/include/asm-parisc/smp.h
+++ b/include/asm-parisc/smp.h
@@ -51,7 +51,7 @@ extern void smp_send_reschedule(int cpu);
51 51
52extern unsigned long cpu_present_mask; 52extern unsigned long cpu_present_mask;
53 53
54#define smp_processor_id() (current_thread_info()->cpu) 54#define raw_smp_processor_id() (current_thread_info()->cpu)
55 55
56#endif /* CONFIG_SMP */ 56#endif /* CONFIG_SMP */
57 57
diff --git a/include/asm-ppc/smp.h b/include/asm-ppc/smp.h
index ebfb614f55f6..17530c232c76 100644
--- a/include/asm-ppc/smp.h
+++ b/include/asm-ppc/smp.h
@@ -44,7 +44,7 @@ extern void smp_message_recv(int, struct pt_regs *);
44#define NO_PROC_ID 0xFF /* No processor magic marker */ 44#define NO_PROC_ID 0xFF /* No processor magic marker */
45#define PROC_CHANGE_PENALTY 20 45#define PROC_CHANGE_PENALTY 20
46 46
47#define smp_processor_id() (current_thread_info()->cpu) 47#define raw_smp_processor_id() (current_thread_info()->cpu)
48 48
49extern int __cpu_up(unsigned int cpu); 49extern int __cpu_up(unsigned int cpu);
50 50
diff --git a/include/asm-ppc64/smp.h b/include/asm-ppc64/smp.h
index c8646fa999c2..8115ecb8feee 100644
--- a/include/asm-ppc64/smp.h
+++ b/include/asm-ppc64/smp.h
@@ -45,7 +45,7 @@ void generic_cpu_die(unsigned int cpu);
45void generic_mach_cpu_die(void); 45void generic_mach_cpu_die(void);
46#endif 46#endif
47 47
48#define __smp_processor_id() (get_paca()->paca_index) 48#define raw_smp_processor_id() (get_paca()->paca_index)
49#define hard_smp_processor_id() (get_paca()->hw_cpu_id) 49#define hard_smp_processor_id() (get_paca()->hw_cpu_id)
50 50
51extern cpumask_t cpu_sibling_map[NR_CPUS]; 51extern cpumask_t cpu_sibling_map[NR_CPUS];
diff --git a/include/asm-s390/smp.h b/include/asm-s390/smp.h
index 9473786387a3..dd50e57a928f 100644
--- a/include/asm-s390/smp.h
+++ b/include/asm-s390/smp.h
@@ -47,7 +47,7 @@ extern int smp_call_function_on(void (*func) (void *info), void *info,
47 47
48#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */ 48#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
49 49
50#define smp_processor_id() (S390_lowcore.cpu_data.cpu_nr) 50#define raw_smp_processor_id() (S390_lowcore.cpu_data.cpu_nr)
51 51
52extern int smp_get_cpu(cpumask_t cpu_map); 52extern int smp_get_cpu(cpumask_t cpu_map);
53extern void smp_put_cpu(int cpu); 53extern void smp_put_cpu(int cpu);
diff --git a/include/asm-sh/smp.h b/include/asm-sh/smp.h
index 38b54469d7d1..f19a8b3b69a6 100644
--- a/include/asm-sh/smp.h
+++ b/include/asm-sh/smp.h
@@ -25,7 +25,7 @@ extern cpumask_t cpu_possible_map;
25 25
26#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map) 26#define cpu_online(cpu) cpu_isset(cpu, cpu_online_map)
27 27
28#define smp_processor_id() (current_thread_info()->cpu) 28#define raw_smp_processor_id() (current_thread_info()->cpu)
29 29
30/* I've no idea what the real meaning of this is */ 30/* I've no idea what the real meaning of this is */
31#define PROC_CHANGE_PENALTY 20 31#define PROC_CHANGE_PENALTY 20
diff --git a/include/asm-sparc/smp.h b/include/asm-sparc/smp.h
index f986c0d0922a..4f96d8333a12 100644
--- a/include/asm-sparc/smp.h
+++ b/include/asm-sparc/smp.h
@@ -148,7 +148,7 @@ extern __inline__ int hard_smp_processor_id(void)
148} 148}
149#endif 149#endif
150 150
151#define smp_processor_id() (current_thread_info()->cpu) 151#define raw_smp_processor_id() (current_thread_info()->cpu)
152 152
153#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier 153#define prof_multiplier(__cpu) cpu_data(__cpu).multiplier
154#define prof_counter(__cpu) cpu_data(__cpu).counter 154#define prof_counter(__cpu) cpu_data(__cpu).counter
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 5e3e06d908fe..110a2de89123 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -64,7 +64,7 @@ static __inline__ int hard_smp_processor_id(void)
64 } 64 }
65} 65}
66 66
67#define smp_processor_id() (current_thread_info()->cpu) 67#define raw_smp_processor_id() (current_thread_info()->cpu)
68 68
69#endif /* !(__ASSEMBLY__) */ 69#endif /* !(__ASSEMBLY__) */
70 70
diff --git a/include/asm-um/smp.h b/include/asm-um/smp.h
index 4412d5d9c26b..d879eba2b52c 100644
--- a/include/asm-um/smp.h
+++ b/include/asm-um/smp.h
@@ -8,7 +8,8 @@
8#include "asm/current.h" 8#include "asm/current.h"
9#include "linux/cpumask.h" 9#include "linux/cpumask.h"
10 10
11#define smp_processor_id() (current_thread->cpu) 11#define raw_smp_processor_id() (current_thread->cpu)
12
12#define cpu_logical_map(n) (n) 13#define cpu_logical_map(n) (n)
13#define cpu_number_map(n) (n) 14#define cpu_number_map(n) (n)
14#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */ 15#define PROC_CHANGE_PENALTY 15 /* Pick a number, any number */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index 96844fecbde8..a7425aa5a3b7 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -68,7 +68,7 @@ static inline int num_booting_cpus(void)
68 return cpus_weight(cpu_callout_map); 68 return cpus_weight(cpu_callout_map);
69} 69}
70 70
71#define __smp_processor_id() read_pda(cpunumber) 71#define raw_smp_processor_id() read_pda(cpunumber)
72 72
73extern __inline int hard_smp_processor_id(void) 73extern __inline int hard_smp_processor_id(void)
74{ 74{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index e530c6c092f1..beacd931b606 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -381,7 +381,7 @@ int lowmem_reserve_ratio_sysctl_handler(struct ctl_table *, int, struct file *,
381 381
382#include <linux/topology.h> 382#include <linux/topology.h>
383/* Returns the number of the current Node. */ 383/* Returns the number of the current Node. */
384#define numa_node_id() (cpu_to_node(_smp_processor_id())) 384#define numa_node_id() (cpu_to_node(raw_smp_processor_id()))
385 385
386#ifndef CONFIG_DISCONTIGMEM 386#ifndef CONFIG_DISCONTIGMEM
387 387
diff --git a/include/linux/smp.h b/include/linux/smp.h
index dcf1db3b35d3..9dfa3ee769ae 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -92,10 +92,7 @@ void smp_prepare_boot_cpu(void);
92/* 92/*
93 * These macros fold the SMP functionality into a single CPU system 93 * These macros fold the SMP functionality into a single CPU system
94 */ 94 */
95 95#define raw_smp_processor_id() 0
96#if !defined(__smp_processor_id) || !defined(CONFIG_PREEMPT)
97# define smp_processor_id() 0
98#endif
99#define hard_smp_processor_id() 0 96#define hard_smp_processor_id() 0
100#define smp_call_function(func,info,retry,wait) ({ 0; }) 97#define smp_call_function(func,info,retry,wait) ({ 0; })
101#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; }) 98#define on_each_cpu(func,info,retry,wait) ({ func(info); 0; })
@@ -106,30 +103,25 @@ static inline void smp_send_reschedule(int cpu) { }
106#endif /* !SMP */ 103#endif /* !SMP */
107 104
108/* 105/*
109 * DEBUG_PREEMPT support: check whether smp_processor_id() is being 106 * smp_processor_id(): get the current CPU ID.
110 * used in a preemption-safe way.
111 * 107 *
112 * An architecture has to enable this debugging code explicitly. 108 * if DEBUG_PREEMPT is enabled the we check whether it is
113 * It can do so by renaming the smp_processor_id() macro to 109 * used in a preemption-safe way. (smp_processor_id() is safe
114 * __smp_processor_id(). This should only be done after some minimal 110 * if it's used in a preemption-off critical section, or in
115 * testing, because usually there are a number of false positives 111 * a thread that is bound to the current CPU.)
116 * that an architecture will trigger.
117 * 112 *
118 * To fix a false positive (i.e. smp_processor_id() use that the 113 * NOTE: raw_smp_processor_id() is for internal use only
119 * debugging code reports but which use for some reason is legal), 114 * (smp_processor_id() is the preferred variant), but in rare
120 * change the smp_processor_id() reference to _smp_processor_id(), 115 * instances it might also be used to turn off false positives
121 * which is the nondebug variant. NOTE: don't use this to hack around 116 * (i.e. smp_processor_id() use that the debugging code reports but
122 * real bugs. 117 * which use for some reason is legal). Don't use this to hack around
118 * the warning message, as your code might not work under PREEMPT.
123 */ 119 */
124#ifdef __smp_processor_id 120#ifdef CONFIG_DEBUG_PREEMPT
125# if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 121 extern unsigned int debug_smp_processor_id(void);
126 extern unsigned int smp_processor_id(void); 122# define smp_processor_id() debug_smp_processor_id()
127# else
128# define smp_processor_id() __smp_processor_id()
129# endif
130# define _smp_processor_id() __smp_processor_id()
131#else 123#else
132# define _smp_processor_id() smp_processor_id() 124# define smp_processor_id() raw_smp_processor_id()
133#endif 125#endif
134 126
135#define get_cpu() ({ preempt_disable(); smp_processor_id(); }) 127#define get_cpu() ({ preempt_disable(); smp_processor_id(); })
diff --git a/include/net/route.h b/include/net/route.h
index d34ca8fc6756..c3cd069a9aca 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -107,7 +107,7 @@ struct rt_cache_stat
107 107
108extern struct rt_cache_stat *rt_cache_stat; 108extern struct rt_cache_stat *rt_cache_stat;
109#define RT_CACHE_STAT_INC(field) \ 109#define RT_CACHE_STAT_INC(field) \
110 (per_cpu_ptr(rt_cache_stat, _smp_processor_id())->field++) 110 (per_cpu_ptr(rt_cache_stat, raw_smp_processor_id())->field++)
111 111
112extern struct ip_rt_acct *ip_rt_acct; 112extern struct ip_rt_acct *ip_rt_acct;
113 113
diff --git a/include/net/snmp.h b/include/net/snmp.h
index a15ab256276e..a36bed8ea210 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -128,18 +128,18 @@ struct linux_mib {
128#define SNMP_STAT_USRPTR(name) (name[1]) 128#define SNMP_STAT_USRPTR(name) (name[1])
129 129
130#define SNMP_INC_STATS_BH(mib, field) \ 130#define SNMP_INC_STATS_BH(mib, field) \
131 (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field]++) 131 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field]++)
132#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \ 132#define SNMP_INC_STATS_OFFSET_BH(mib, field, offset) \
133 (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field + (offset)]++) 133 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field + (offset)]++)
134#define SNMP_INC_STATS_USER(mib, field) \ 134#define SNMP_INC_STATS_USER(mib, field) \
135 (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field]++) 135 (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field]++)
136#define SNMP_INC_STATS(mib, field) \ 136#define SNMP_INC_STATS(mib, field) \
137 (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]++) 137 (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]++)
138#define SNMP_DEC_STATS(mib, field) \ 138#define SNMP_DEC_STATS(mib, field) \
139 (per_cpu_ptr(mib[!in_softirq()], _smp_processor_id())->mibs[field]--) 139 (per_cpu_ptr(mib[!in_softirq()], raw_smp_processor_id())->mibs[field]--)
140#define SNMP_ADD_STATS_BH(mib, field, addend) \ 140#define SNMP_ADD_STATS_BH(mib, field, addend) \
141 (per_cpu_ptr(mib[0], _smp_processor_id())->mibs[field] += addend) 141 (per_cpu_ptr(mib[0], raw_smp_processor_id())->mibs[field] += addend)
142#define SNMP_ADD_STATS_USER(mib, field, addend) \ 142#define SNMP_ADD_STATS_USER(mib, field, addend) \
143 (per_cpu_ptr(mib[1], _smp_processor_id())->mibs[field] += addend) 143 (per_cpu_ptr(mib[1], raw_smp_processor_id())->mibs[field] += addend)
144 144
145#endif 145#endif
diff --git a/kernel/module.c b/kernel/module.c
index 83b3d376708c..a566745dde62 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -379,7 +379,7 @@ static void module_unload_init(struct module *mod)
379 for (i = 0; i < NR_CPUS; i++) 379 for (i = 0; i < NR_CPUS; i++)
380 local_set(&mod->ref[i].count, 0); 380 local_set(&mod->ref[i].count, 0);
381 /* Hold reference count during initialization. */ 381 /* Hold reference count during initialization. */
382 local_set(&mod->ref[_smp_processor_id()].count, 1); 382 local_set(&mod->ref[raw_smp_processor_id()].count, 1);
383 /* Backwards compatibility macros put refcount during init. */ 383 /* Backwards compatibility macros put refcount during init. */
384 mod->waiter = current; 384 mod->waiter = current;
385} 385}
diff --git a/kernel/power/smp.c b/kernel/power/smp.c
index cba3584b80fe..457c2302ed42 100644
--- a/kernel/power/smp.c
+++ b/kernel/power/smp.c
@@ -48,11 +48,11 @@ void disable_nonboot_cpus(void)
48{ 48{
49 oldmask = current->cpus_allowed; 49 oldmask = current->cpus_allowed;
50 set_cpus_allowed(current, cpumask_of_cpu(0)); 50 set_cpus_allowed(current, cpumask_of_cpu(0));
51 printk("Freezing CPUs (at %d)", _smp_processor_id()); 51 printk("Freezing CPUs (at %d)", raw_smp_processor_id());
52 current->state = TASK_INTERRUPTIBLE; 52 current->state = TASK_INTERRUPTIBLE;
53 schedule_timeout(HZ); 53 schedule_timeout(HZ);
54 printk("..."); 54 printk("...");
55 BUG_ON(_smp_processor_id() != 0); 55 BUG_ON(raw_smp_processor_id() != 0);
56 56
57 /* FIXME: for this to work, all the CPUs must be running 57 /* FIXME: for this to work, all the CPUs must be running
58 * "idle" thread (or we deadlock). Is that guaranteed? */ 58 * "idle" thread (or we deadlock). Is that guaranteed? */
diff --git a/kernel/sched.c b/kernel/sched.c
index f12a0c8a7d98..deca041fc364 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3814,7 +3814,7 @@ EXPORT_SYMBOL(yield);
3814 */ 3814 */
3815void __sched io_schedule(void) 3815void __sched io_schedule(void)
3816{ 3816{
3817 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3817 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3818 3818
3819 atomic_inc(&rq->nr_iowait); 3819 atomic_inc(&rq->nr_iowait);
3820 schedule(); 3820 schedule();
@@ -3825,7 +3825,7 @@ EXPORT_SYMBOL(io_schedule);
3825 3825
3826long __sched io_schedule_timeout(long timeout) 3826long __sched io_schedule_timeout(long timeout)
3827{ 3827{
3828 struct runqueue *rq = &per_cpu(runqueues, _smp_processor_id()); 3828 struct runqueue *rq = &per_cpu(runqueues, raw_smp_processor_id());
3829 long ret; 3829 long ret;
3830 3830
3831 atomic_inc(&rq->nr_iowait); 3831 atomic_inc(&rq->nr_iowait);
diff --git a/kernel/stop_machine.c b/kernel/stop_machine.c
index 6116b25aa7cf..84a9d18aa8da 100644
--- a/kernel/stop_machine.c
+++ b/kernel/stop_machine.c
@@ -100,7 +100,7 @@ static int stop_machine(void)
100 stopmachine_state = STOPMACHINE_WAIT; 100 stopmachine_state = STOPMACHINE_WAIT;
101 101
102 for_each_online_cpu(i) { 102 for_each_online_cpu(i) {
103 if (i == _smp_processor_id()) 103 if (i == raw_smp_processor_id())
104 continue; 104 continue;
105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL); 105 ret = kernel_thread(stopmachine, (void *)(long)i,CLONE_KERNEL);
106 if (ret < 0) 106 if (ret < 0)
@@ -182,7 +182,7 @@ struct task_struct *__stop_machine_run(int (*fn)(void *), void *data,
182 182
183 /* If they don't care which CPU fn runs on, bind to any online one. */ 183 /* If they don't care which CPU fn runs on, bind to any online one. */
184 if (cpu == NR_CPUS) 184 if (cpu == NR_CPUS)
185 cpu = _smp_processor_id(); 185 cpu = raw_smp_processor_id();
186 186
187 p = kthread_create(do_stop, &smdata, "kstopmachine"); 187 p = kthread_create(do_stop, &smdata, "kstopmachine");
188 if (!IS_ERR(p)) { 188 if (!IS_ERR(p)) {
diff --git a/lib/Makefile b/lib/Makefile
index 9eccea9429a7..5f10cb898407 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -20,6 +20,7 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o 20lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
21lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o 21lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
22obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o 22obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
23obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
23 24
24ifneq ($(CONFIG_HAVE_DEC_LOCK),y) 25ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
25 lib-y += dec_and_lock.o 26 lib-y += dec_and_lock.o
diff --git a/lib/kernel_lock.c b/lib/kernel_lock.c
index 99b0ae3d51dd..bd2bc5d887b8 100644
--- a/lib/kernel_lock.c
+++ b/lib/kernel_lock.c
@@ -9,61 +9,6 @@
9#include <linux/module.h> 9#include <linux/module.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11 11
12#if defined(CONFIG_PREEMPT) && defined(__smp_processor_id) && \
13 defined(CONFIG_DEBUG_PREEMPT)
14
15/*
16 * Debugging check.
17 */
18unsigned int smp_processor_id(void)
19{
20 unsigned long preempt_count = preempt_count();
21 int this_cpu = __smp_processor_id();
22 cpumask_t this_mask;
23
24 if (likely(preempt_count))
25 goto out;
26
27 if (irqs_disabled())
28 goto out;
29
30 /*
31 * Kernel threads bound to a single CPU can safely use
32 * smp_processor_id():
33 */
34 this_mask = cpumask_of_cpu(this_cpu);
35
36 if (cpus_equal(current->cpus_allowed, this_mask))
37 goto out;
38
39 /*
40 * It is valid to assume CPU-locality during early bootup:
41 */
42 if (system_state != SYSTEM_RUNNING)
43 goto out;
44
45 /*
46 * Avoid recursion:
47 */
48 preempt_disable();
49
50 if (!printk_ratelimit())
51 goto out_enable;
52
53 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
54 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
55 dump_stack();
56
57out_enable:
58 preempt_enable_no_resched();
59out:
60 return this_cpu;
61}
62
63EXPORT_SYMBOL(smp_processor_id);
64
65#endif /* PREEMPT && __smp_processor_id && DEBUG_PREEMPT */
66
67#ifdef CONFIG_PREEMPT_BKL 12#ifdef CONFIG_PREEMPT_BKL
68/* 13/*
69 * The 'big kernel semaphore' 14 * The 'big kernel semaphore'
diff --git a/lib/smp_processor_id.c b/lib/smp_processor_id.c
new file mode 100644
index 000000000000..42c08ef828c5
--- /dev/null
+++ b/lib/smp_processor_id.c
@@ -0,0 +1,55 @@
1/*
2 * lib/smp_processor_id.c
3 *
4 * DEBUG_PREEMPT variant of smp_processor_id().
5 */
6#include <linux/module.h>
7#include <linux/kallsyms.h>
8
9unsigned int debug_smp_processor_id(void)
10{
11 unsigned long preempt_count = preempt_count();
12 int this_cpu = raw_smp_processor_id();
13 cpumask_t this_mask;
14
15 if (likely(preempt_count))
16 goto out;
17
18 if (irqs_disabled())
19 goto out;
20
21 /*
22 * Kernel threads bound to a single CPU can safely use
23 * smp_processor_id():
24 */
25 this_mask = cpumask_of_cpu(this_cpu);
26
27 if (cpus_equal(current->cpus_allowed, this_mask))
28 goto out;
29
30 /*
31 * It is valid to assume CPU-locality during early bootup:
32 */
33 if (system_state != SYSTEM_RUNNING)
34 goto out;
35
36 /*
37 * Avoid recursion:
38 */
39 preempt_disable();
40
41 if (!printk_ratelimit())
42 goto out_enable;
43
44 printk(KERN_ERR "BUG: using smp_processor_id() in preemptible [%08x] code: %s/%d\n", preempt_count(), current->comm, current->pid);
45 print_symbol("caller is %s\n", (long)__builtin_return_address(0));
46 dump_stack();
47
48out_enable:
49 preempt_enable_no_resched();
50out:
51 return this_cpu;
52}
53
54EXPORT_SYMBOL(debug_smp_processor_id);
55