aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sparc64/kernel/entry.S84
-rw-r--r--arch/sparc64/kernel/irq.c4
-rw-r--r--arch/sparc64/kernel/setup.c56
-rw-r--r--arch/sparc64/kernel/smp.c9
-rw-r--r--arch/sparc64/kernel/traps.c4
-rw-r--r--arch/sparc64/kernel/vmlinux.lds.S3
-rw-r--r--include/asm-sparc64/cpudata.h89
-rw-r--r--include/asm-sparc64/head.h1
-rw-r--r--include/asm-sparc64/smp.h28
9 files changed, 144 insertions, 134 deletions
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index 563fa4ec33f8..b3511ff5d04a 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -1628,84 +1628,10 @@ __flushw_user:
16282: retl 16282: retl
1629 nop 1629 nop
1630 1630
1631 /* Read cpu ID from hardware, return in %g6. 1631#ifdef CONFIG_SMP
1632 * (callers_pc - 4) is in %g1. Patched at boot time. 1632 .globl hard_smp_processor_id
1633 * 1633hard_smp_processor_id:
1634 * Default is spitfire implementation. 1634 __GET_CPUID(%o0)
1635 *
1636 * The instruction sequence needs to be 5 instructions
1637 * in order to fit the longest implementation, which is
1638 * currently starfire.
1639 */
1640 .align 32
1641 .globl __get_cpu_id
1642__get_cpu_id:
1643 ldxa [%g0] ASI_UPA_CONFIG, %g6
1644 srlx %g6, 17, %g6
1645 jmpl %g1 + 0x4, %g0
1646 and %g6, 0x1f, %g6
1647 nop
1648
1649__get_cpu_id_cheetah_safari:
1650 ldxa [%g0] ASI_SAFARI_CONFIG, %g6
1651 srlx %g6, 17, %g6
1652 jmpl %g1 + 0x4, %g0
1653 and %g6, 0x3ff, %g6
1654 nop
1655
1656__get_cpu_id_cheetah_jbus:
1657 ldxa [%g0] ASI_JBUS_CONFIG, %g6
1658 srlx %g6, 17, %g6
1659 jmpl %g1 + 0x4, %g0
1660 and %g6, 0x1f, %g6
1661 nop
1662
1663__get_cpu_id_starfire:
1664 sethi %hi(0x1fff40000d0 >> 9), %g6
1665 sllx %g6, 9, %g6
1666 or %g6, 0xd0, %g6
1667 jmpl %g1 + 0x4, %g0
1668 lduwa [%g6] ASI_PHYS_BYPASS_EC_E, %g6
1669
1670 .globl per_cpu_patch
1671per_cpu_patch:
1672 sethi %hi(this_is_starfire), %o0
1673 lduw [%o0 + %lo(this_is_starfire)], %o1
1674 sethi %hi(__get_cpu_id_starfire), %o0
1675 brnz,pn %o1, 10f
1676 or %o0, %lo(__get_cpu_id_starfire), %o0
1677 sethi %hi(tlb_type), %o0
1678 lduw [%o0 + %lo(tlb_type)], %o1
1679 brz,pt %o1, 11f
1680 nop
1681 rdpr %ver, %o0
1682 srlx %o0, 32, %o0
1683 sethi %hi(0x003e0016), %o1
1684 or %o1, %lo(0x003e0016), %o1
1685 cmp %o0, %o1
1686 sethi %hi(__get_cpu_id_cheetah_jbus), %o0
1687 be,pn %icc, 10f
1688 or %o0, %lo(__get_cpu_id_cheetah_jbus), %o0
1689 sethi %hi(__get_cpu_id_cheetah_safari), %o0
1690 or %o0, %lo(__get_cpu_id_cheetah_safari), %o0
169110:
1692 sethi %hi(__get_cpu_id), %o1
1693 or %o1, %lo(__get_cpu_id), %o1
1694 lduw [%o0 + 0x00], %o2
1695 stw %o2, [%o1 + 0x00]
1696 flush %o1 + 0x00
1697 lduw [%o0 + 0x04], %o2
1698 stw %o2, [%o1 + 0x04]
1699 flush %o1 + 0x04
1700 lduw [%o0 + 0x08], %o2
1701 stw %o2, [%o1 + 0x08]
1702 flush %o1 + 0x08
1703 lduw [%o0 + 0x0c], %o2
1704 stw %o2, [%o1 + 0x0c]
1705 flush %o1 + 0x0c
1706 lduw [%o0 + 0x10], %o2
1707 stw %o2, [%o1 + 0x10]
1708 flush %o1 + 0x10
170911:
1710 retl 1635 retl
1711 nop 1636 nop
1637#endif
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 3e48af2769d4..d069a6feb535 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -39,6 +39,7 @@
39#include <asm/cache.h> 39#include <asm/cache.h>
40#include <asm/cpudata.h> 40#include <asm/cpudata.h>
41#include <asm/auxio.h> 41#include <asm/auxio.h>
42#include <asm/head.h>
42 43
43#ifdef CONFIG_SMP 44#ifdef CONFIG_SMP
44static void distribute_irqs(void); 45static void distribute_irqs(void);
@@ -153,7 +154,8 @@ void enable_irq(unsigned int irq)
153 unsigned long ver; 154 unsigned long ver;
154 155
155 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 156 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
156 if ((ver >> 32) == 0x003e0016) { 157 if ((ver >> 32) == __JALAPENO_ID ||
158 (ver >> 32) == __SERRANO_ID) {
157 /* We set it to our JBUS ID. */ 159 /* We set it to our JBUS ID. */
158 __asm__ __volatile__("ldxa [%%g0] %1, %0" 160 __asm__ __volatile__("ldxa [%%g0] %1, %0"
159 : "=r" (tid) 161 : "=r" (tid)
diff --git a/arch/sparc64/kernel/setup.c b/arch/sparc64/kernel/setup.c
index 59a70301a6cf..f751d11926bc 100644
--- a/arch/sparc64/kernel/setup.c
+++ b/arch/sparc64/kernel/setup.c
@@ -490,6 +490,58 @@ void register_prom_callbacks(void)
490 "' linux-.soft2 to .soft2"); 490 "' linux-.soft2 to .soft2");
491} 491}
492 492
493static void __init per_cpu_patch(void)
494{
495#ifdef CONFIG_SMP
496 struct cpuid_patch_entry *p;
497 unsigned long ver;
498 int is_jbus;
499
500 if (tlb_type == spitfire && !this_is_starfire)
501 return;
502
503 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
504 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
505 (ver >> 32) == __SERRANO_ID);
506
507 p = &__cpuid_patch;
508 while (p < &__cpuid_patch_end) {
509 unsigned long addr = p->addr;
510 unsigned int *insns;
511
512 switch (tlb_type) {
513 case spitfire:
514 insns = &p->starfire[0];
515 break;
516 case cheetah:
517 case cheetah_plus:
518 if (is_jbus)
519 insns = &p->cheetah_jbus[0];
520 else
521 insns = &p->cheetah_safari[0];
522 break;
523 default:
524 prom_printf("Unknown cpu type, halting.\n");
525 prom_halt();
526 };
527
528 *(unsigned int *) (addr + 0) = insns[0];
529 __asm__ __volatile__("flush %0" : : "r" (addr + 0));
530
531 *(unsigned int *) (addr + 4) = insns[1];
532 __asm__ __volatile__("flush %0" : : "r" (addr + 4));
533
534 *(unsigned int *) (addr + 8) = insns[2];
535 __asm__ __volatile__("flush %0" : : "r" (addr + 8));
536
537 *(unsigned int *) (addr + 12) = insns[3];
538 __asm__ __volatile__("flush %0" : : "r" (addr + 12));
539
540 p++;
541 }
542#endif
543}
544
493void __init setup_arch(char **cmdline_p) 545void __init setup_arch(char **cmdline_p)
494{ 546{
495 /* Initialize PROM console and command line. */ 547 /* Initialize PROM console and command line. */
@@ -507,8 +559,8 @@ void __init setup_arch(char **cmdline_p)
507 /* Work out if we are starfire early on */ 559 /* Work out if we are starfire early on */
508 check_if_starfire(); 560 check_if_starfire();
509 561
510 /* Now we know enough to patch the __get_cpu_id() 562 /* Now we know enough to patch the get_cpuid sequences
511 * trampoline used by trap code. 563 * used by trap code.
512 */ 564 */
513 per_cpu_patch(); 565 per_cpu_patch();
514 566
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 0e7552546d36..16b8eca9754e 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -424,7 +424,7 @@ static __inline__ void spitfire_xcall_deliver(u64 data0, u64 data1, u64 data2, c
424static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask) 424static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mask)
425{ 425{
426 u64 pstate, ver; 426 u64 pstate, ver;
427 int nack_busy_id, is_jalapeno; 427 int nack_busy_id, is_jbus;
428 428
429 if (cpus_empty(mask)) 429 if (cpus_empty(mask))
430 return; 430 return;
@@ -434,7 +434,8 @@ static void cheetah_xcall_deliver(u64 data0, u64 data1, u64 data2, cpumask_t mas
434 * derivative processor. 434 * derivative processor.
435 */ 435 */
436 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 436 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
437 is_jalapeno = ((ver >> 32) == 0x003e0016); 437 is_jbus = ((ver >> 32) == __JALAPENO_ID ||
438 (ver >> 32) == __SERRANO_ID);
438 439
439 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate)); 440 __asm__ __volatile__("rdpr %%pstate, %0" : "=r" (pstate));
440 441
@@ -459,7 +460,7 @@ retry:
459 for_each_cpu_mask(i, mask) { 460 for_each_cpu_mask(i, mask) {
460 u64 target = (i << 14) | 0x70; 461 u64 target = (i << 14) | 0x70;
461 462
462 if (!is_jalapeno) 463 if (!is_jbus)
463 target |= (nack_busy_id << 24); 464 target |= (nack_busy_id << 24);
464 __asm__ __volatile__( 465 __asm__ __volatile__(
465 "stxa %%g0, [%0] %1\n\t" 466 "stxa %%g0, [%0] %1\n\t"
@@ -512,7 +513,7 @@ retry:
512 for_each_cpu_mask(i, mask) { 513 for_each_cpu_mask(i, mask) {
513 u64 check_mask; 514 u64 check_mask;
514 515
515 if (is_jalapeno) 516 if (is_jbus)
516 check_mask = (0x2UL << (2*i)); 517 check_mask = (0x2UL << (2*i));
517 else 518 else
518 check_mask = (0x2UL << 519 check_mask = (0x2UL <<
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 7e52e8972668..1c4744c047ab 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -38,6 +38,7 @@
38#include <asm/processor.h> 38#include <asm/processor.h>
39#include <asm/timer.h> 39#include <asm/timer.h>
40#include <asm/kdebug.h> 40#include <asm/kdebug.h>
41#include <asm/head.h>
41#ifdef CONFIG_KMOD 42#ifdef CONFIG_KMOD
42#include <linux/kmod.h> 43#include <linux/kmod.h>
43#endif 44#endif
@@ -788,7 +789,8 @@ void __init cheetah_ecache_flush_init(void)
788 cheetah_error_log[i].afsr = CHAFSR_INVALID; 789 cheetah_error_log[i].afsr = CHAFSR_INVALID;
789 790
790 __asm__ ("rdpr %%ver, %0" : "=r" (ver)); 791 __asm__ ("rdpr %%ver, %0" : "=r" (ver));
791 if ((ver >> 32) == 0x003e0016) { 792 if ((ver >> 32) == __JALAPENO_ID ||
793 (ver >> 32) == __SERRANO_ID) {
792 cheetah_error_table = &__jalapeno_error_table[0]; 794 cheetah_error_table = &__jalapeno_error_table[0];
793 cheetah_afsr_errors = JPAFSR_ERRORS; 795 cheetah_afsr_errors = JPAFSR_ERRORS;
794 } else if ((ver >> 32) == 0x003e0015) { 796 } else if ((ver >> 32) == 0x003e0015) {
diff --git a/arch/sparc64/kernel/vmlinux.lds.S b/arch/sparc64/kernel/vmlinux.lds.S
index 71b943f1c9b1..1639d9c935c3 100644
--- a/arch/sparc64/kernel/vmlinux.lds.S
+++ b/arch/sparc64/kernel/vmlinux.lds.S
@@ -74,6 +74,9 @@ SECTIONS
74 __tsb_phys_patch = .; 74 __tsb_phys_patch = .;
75 .tsb_phys_patch : { *(.tsb_phys_patch) } 75 .tsb_phys_patch : { *(.tsb_phys_patch) }
76 __tsb_phys_patch_end = .; 76 __tsb_phys_patch_end = .;
77 __cpuid_patch = .;
78 .cpuid_patch : { *(.cpuid_patch) }
79 __cpuid_patch_end = .;
77 . = ALIGN(8192); 80 . = ALIGN(8192);
78 __initramfs_start = .; 81 __initramfs_start = .;
79 .init.ramfs : { *(.init.ramfs) } 82 .init.ramfs : { *(.init.ramfs) }
diff --git a/include/asm-sparc64/cpudata.h b/include/asm-sparc64/cpudata.h
index f83768883e98..da54b4f35403 100644
--- a/include/asm-sparc64/cpudata.h
+++ b/include/asm-sparc64/cpudata.h
@@ -60,9 +60,18 @@ struct trap_per_cpu {
60} __attribute__((aligned(64))); 60} __attribute__((aligned(64)));
61extern struct trap_per_cpu trap_block[NR_CPUS]; 61extern struct trap_per_cpu trap_block[NR_CPUS];
62extern void init_cur_cpu_trap(void); 62extern void init_cur_cpu_trap(void);
63extern void per_cpu_patch(void);
64extern void setup_tba(void); 63extern void setup_tba(void);
65 64
65#ifdef CONFIG_SMP
66struct cpuid_patch_entry {
67 unsigned int addr;
68 unsigned int cheetah_safari[4];
69 unsigned int cheetah_jbus[4];
70 unsigned int starfire[4];
71};
72extern struct cpuid_patch_entry __cpuid_patch, __cpuid_patch_end;
73#endif
74
66#endif /* !(__ASSEMBLY__) */ 75#endif /* !(__ASSEMBLY__) */
67 76
68#define TRAP_PER_CPU_THREAD 0x00 77#define TRAP_PER_CPU_THREAD 0x00
@@ -70,35 +79,58 @@ extern void setup_tba(void);
70 79
71#define TRAP_BLOCK_SZ_SHIFT 6 80#define TRAP_BLOCK_SZ_SHIFT 6
72 81
73/* Clobbers %g1, loads %g6 with local processor's cpuid */ 82#ifdef CONFIG_SMP
74#define __GET_CPUID \ 83
75 ba,pt %xcc, __get_cpu_id; \ 84#define __GET_CPUID(REG) \
76 rd %pc, %g1; 85 /* Spitfire implementation (default). */ \
86661: ldxa [%g0] ASI_UPA_CONFIG, REG; \
87 srlx REG, 17, REG; \
88 and REG, 0x1f, REG; \
89 nop; \
90 .section .cpuid_patch, "ax"; \
91 /* Instruction location. */ \
92 .word 661b; \
93 /* Cheetah Safari implementation. */ \
94 ldxa [%g0] ASI_SAFARI_CONFIG, REG; \
95 srlx REG, 17, REG; \
96 and REG, 0x3ff, REG; \
97 nop; \
98 /* Cheetah JBUS implementation. */ \
99 ldxa [%g0] ASI_JBUS_CONFIG, REG; \
100 srlx REG, 17, REG; \
101 and REG, 0x1f, REG; \
102 nop; \
103 /* Starfire implementation. */ \
104 sethi %hi(0x1fff40000d0 >> 9), REG; \
105 sllx REG, 9, REG; \
106 or REG, 0xd0, REG; \
107 lduwa [REG] ASI_PHYS_BYPASS_EC_E, REG;\
108 .previous;
77 109
78/* Clobbers %g1, current address space PGD phys address into %g7. */ 110/* Clobbers %g1, current address space PGD phys address into %g7. */
79#define TRAP_LOAD_PGD_PHYS \ 111#define TRAP_LOAD_PGD_PHYS \
80 __GET_CPUID \ 112 __GET_CPUID(%g1) \
81 sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \
82 sethi %hi(trap_block), %g7; \ 113 sethi %hi(trap_block), %g7; \
114 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \
83 or %g7, %lo(trap_block), %g7; \ 115 or %g7, %lo(trap_block), %g7; \
84 add %g7, %g6, %g7; \ 116 add %g7, %g1, %g7; \
85 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7; 117 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
86 118
87/* Clobbers %g1, loads local processor's IRQ work area into %g6. */ 119/* Clobbers %g1, loads local processor's IRQ work area into %g6. */
88#define TRAP_LOAD_IRQ_WORK \ 120#define TRAP_LOAD_IRQ_WORK \
89 __GET_CPUID \ 121 __GET_CPUID(%g1) \
90 sethi %hi(__irq_work), %g1; \ 122 sethi %hi(__irq_work), %g6; \
91 sllx %g6, 6, %g6; \ 123 sllx %g1, 6, %g1; \
92 or %g1, %lo(__irq_work), %g1; \ 124 or %g6, %lo(__irq_work), %g6; \
93 add %g1, %g6, %g6; 125 add %g6, %g1, %g6;
94 126
95/* Clobbers %g1, loads %g6 with current thread info pointer. */ 127/* Clobbers %g1, loads %g6 with current thread info pointer. */
96#define TRAP_LOAD_THREAD_REG \ 128#define TRAP_LOAD_THREAD_REG \
97 __GET_CPUID \ 129 __GET_CPUID(%g1) \
98 sllx %g6, TRAP_BLOCK_SZ_SHIFT, %g6; \ 130 sethi %hi(trap_block), %g6; \
99 sethi %hi(trap_block), %g1; \ 131 sllx %g1, TRAP_BLOCK_SZ_SHIFT, %g1; \
100 or %g1, %lo(trap_block), %g1; \ 132 or %g6, %lo(trap_block), %g6; \
101 ldx [%g1 + %g6], %g6; 133 ldx [%g6 + %g1], %g6;
102 134
103/* Given the current thread info pointer in %g6, load the per-cpu 135/* Given the current thread info pointer in %g6, load the per-cpu
104 * area base of the current processor into %g5. REG1, REG2, and REG3 are 136 * area base of the current processor into %g5. REG1, REG2, and REG3 are
@@ -109,7 +141,6 @@ extern void setup_tba(void);
109 * trap will load the fully resolved %g5 per-cpu base. This can corrupt 141 * trap will load the fully resolved %g5 per-cpu base. This can corrupt
110 * the calculations done by the macro mid-stream. 142 * the calculations done by the macro mid-stream.
111 */ 143 */
112#ifdef CONFIG_SMP
113#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) \ 144#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) \
114 ldub [%g6 + TI_CPU], REG1; \ 145 ldub [%g6 + TI_CPU], REG1; \
115 sethi %hi(__per_cpu_shift), REG3; \ 146 sethi %hi(__per_cpu_shift), REG3; \
@@ -118,8 +149,26 @@ extern void setup_tba(void);
118 ldx [REG2 + %lo(__per_cpu_base)], REG2; \ 149 ldx [REG2 + %lo(__per_cpu_base)], REG2; \
119 sllx REG1, REG3, REG3; \ 150 sllx REG1, REG3, REG3; \
120 add REG3, REG2, %g5; 151 add REG3, REG2, %g5;
152
121#else 153#else
154
155/* Uniprocessor versions, we know the cpuid is zero. */
156#define TRAP_LOAD_PGD_PHYS \
157 sethi %hi(trap_block), %g7; \
158 or %g7, %lo(trap_block), %g7; \
159 ldx [%g7 + TRAP_PER_CPU_PGD_PADDR], %g7;
160
161#define TRAP_LOAD_IRQ_WORK \
162 sethi %hi(__irq_work), %g6; \
163 or %g6, %lo(__irq_work), %g6;
164
165#define TRAP_LOAD_THREAD_REG \
166 sethi %hi(trap_block), %g6; \
167 ldx [%g6 + %lo(trap_block)], %g6;
168
169/* No per-cpu areas on uniprocessor, so no need to load %g5. */
122#define LOAD_PER_CPU_BASE(REG1, REG2, REG3) 170#define LOAD_PER_CPU_BASE(REG1, REG2, REG3)
123#endif 171
172#endif /* !(CONFIG_SMP) */
124 173
125#endif /* _SPARC64_CPUDATA_H */ 174#endif /* _SPARC64_CPUDATA_H */
diff --git a/include/asm-sparc64/head.h b/include/asm-sparc64/head.h
index 0abd3a674e8f..731c842f3d11 100644
--- a/include/asm-sparc64/head.h
+++ b/include/asm-sparc64/head.h
@@ -10,6 +10,7 @@
10 10
11#define __CHEETAH_ID 0x003e0014 11#define __CHEETAH_ID 0x003e0014
12#define __JALAPENO_ID 0x003e0016 12#define __JALAPENO_ID 0x003e0016
13#define __SERRANO_ID 0x003e0022
13 14
14#define CHEETAH_MANUF 0x003e 15#define CHEETAH_MANUF 0x003e
15#define CHEETAH_IMPL 0x0014 /* Ultra-III */ 16#define CHEETAH_IMPL 0x0014 /* Ultra-III */
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index 473edb2603ec..ad1d35a7d13f 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -37,33 +37,7 @@ extern cpumask_t phys_cpu_present_map;
37 * General functions that each host system must provide. 37 * General functions that each host system must provide.
38 */ 38 */
39 39
40static __inline__ int hard_smp_processor_id(void) 40extern int hard_smp_processor_id(void);
41{
42 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
43 unsigned long cfg, ver;
44 __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver));
45 if ((ver >> 32) == 0x003e0016) {
46 __asm__ __volatile__("ldxa [%%g0] %1, %0"
47 : "=r" (cfg)
48 : "i" (ASI_JBUS_CONFIG));
49 return ((cfg >> 17) & 0x1f);
50 } else {
51 __asm__ __volatile__("ldxa [%%g0] %1, %0"
52 : "=r" (cfg)
53 : "i" (ASI_SAFARI_CONFIG));
54 return ((cfg >> 17) & 0x3ff);
55 }
56 } else if (this_is_starfire != 0) {
57 return starfire_hard_smp_processor_id();
58 } else {
59 unsigned long upaconfig;
60 __asm__ __volatile__("ldxa [%%g0] %1, %0"
61 : "=r" (upaconfig)
62 : "i" (ASI_UPA_CONFIG));
63 return ((upaconfig >> 17) & 0x1f);
64 }
65}
66
67#define raw_smp_processor_id() (current_thread_info()->cpu) 41#define raw_smp_processor_id() (current_thread_info()->cpu)
68 42
69extern void smp_setup_cpu_possible_map(void); 43extern void smp_setup_cpu_possible_map(void);