aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGlauber de Oliveira Costa <gcosta@redhat.com>2008-03-19 13:25:05 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:00 -0400
commit1d89a7f072d4f76f0538edfb474d527066ee7838 (patch)
tree8d9ae2788f90923a3c31c5fc6b8400b2387c416a
parentf7401f7fe653f90f8f80a241840b9b499779e87d (diff)
x86: merge smp_store_cpu_info
now that it is the same between arches, put it into smpboot.c Signed-off-by: Glauber Costa <gcosta@redhat.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--arch/x86/kernel/smpboot.c77
-rw-r--r--arch/x86/kernel/smpboot_32.c71
-rw-r--r--arch/x86/kernel/smpboot_64.c15
-rw-r--r--include/asm-x86/smp.h2
-rw-r--r--include/asm-x86/smp_32.h2
5 files changed, 80 insertions, 87 deletions
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index b13b9d55f9ce..a157a5245923 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -45,6 +45,83 @@ unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
45/* representing cpus for which sibling maps can be computed */ 45/* representing cpus for which sibling maps can be computed */
46static cpumask_t cpu_sibling_setup_map; 46static cpumask_t cpu_sibling_setup_map;
47 47
48#ifdef CONFIG_X86_32
49/* Set if we find a B stepping CPU */
50int __cpuinitdata smp_b_stepping;
51#endif
52
53static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
54{
55#ifdef CONFIG_X86_32
56 /*
57 * Mask B, Pentium, but not Pentium MMX
58 */
59 if (c->x86_vendor == X86_VENDOR_INTEL &&
60 c->x86 == 5 &&
61 c->x86_mask >= 1 && c->x86_mask <= 4 &&
62 c->x86_model <= 3)
63 /*
64 * Remember we have B step Pentia with bugs
65 */
66 smp_b_stepping = 1;
67
68 /*
69 * Certain Athlons might work (for various values of 'work') in SMP
70 * but they are not certified as MP capable.
71 */
72 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
73
74 if (num_possible_cpus() == 1)
75 goto valid_k7;
76
77 /* Athlon 660/661 is valid. */
78 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
79 (c->x86_mask == 1)))
80 goto valid_k7;
81
82 /* Duron 670 is valid */
83 if ((c->x86_model == 7) && (c->x86_mask == 0))
84 goto valid_k7;
85
86 /*
87 * Athlon 662, Duron 671, and Athlon >model 7 have capability
88 * bit. It's worth noting that the A5 stepping (662) of some
89 * Athlon XP's have the MP bit set.
90 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
91 * more.
92 */
93 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
94 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
95 (c->x86_model > 7))
96 if (cpu_has_mp)
97 goto valid_k7;
98
99 /* If we get here, not a certified SMP capable AMD system. */
100 add_taint(TAINT_UNSAFE_SMP);
101 }
102
103valid_k7:
104 ;
105#endif
106}
107
108/*
109 * The bootstrap kernel entry code has set these up. Save them for
110 * a given CPU
111 */
112
113void __cpuinit smp_store_cpu_info(int id)
114{
115 struct cpuinfo_x86 *c = &cpu_data(id);
116
117 *c = boot_cpu_data;
118 c->cpu_index = id;
119 if (id != 0)
120 identify_secondary_cpu(c);
121 smp_apply_quirks(c);
122}
123
124
48void __cpuinit set_cpu_sibling_map(int cpu) 125void __cpuinit set_cpu_sibling_map(int cpu)
49{ 126{
50 int i; 127 int i;
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e05006416d8c..0bfb31e13540 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -59,8 +59,7 @@
59#include <asm/vmi.h> 59#include <asm/vmi.h>
60#include <asm/mtrr.h> 60#include <asm/mtrr.h>
61 61
62/* Set if we find a B stepping CPU */ 62extern int smp_b_stepping;
63static int __cpuinitdata smp_b_stepping;
64 63
65static cpumask_t smp_commenced_mask; 64static cpumask_t smp_commenced_mask;
66 65
@@ -78,74 +77,6 @@ static void map_cpu_to_logical_apicid(void);
78/* State of each CPU. */ 77/* State of each CPU. */
79DEFINE_PER_CPU(int, cpu_state) = { 0 }; 78DEFINE_PER_CPU(int, cpu_state) = { 0 };
80 79
81static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
82{
83 /*
84 * Mask B, Pentium, but not Pentium MMX
85 */
86 if (c->x86_vendor == X86_VENDOR_INTEL &&
87 c->x86 == 5 &&
88 c->x86_mask >= 1 && c->x86_mask <= 4 &&
89 c->x86_model <= 3)
90 /*
91 * Remember we have B step Pentia with bugs
92 */
93 smp_b_stepping = 1;
94
95 /*
96 * Certain Athlons might work (for various values of 'work') in SMP
97 * but they are not certified as MP capable.
98 */
99 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
100
101 if (num_possible_cpus() == 1)
102 goto valid_k7;
103
104 /* Athlon 660/661 is valid. */
105 if ((c->x86_model==6) && ((c->x86_mask==0) || (c->x86_mask==1)))
106 goto valid_k7;
107
108 /* Duron 670 is valid */
109 if ((c->x86_model==7) && (c->x86_mask==0))
110 goto valid_k7;
111
112 /*
113 * Athlon 662, Duron 671, and Athlon >model 7 have capability bit.
114 * It's worth noting that the A5 stepping (662) of some Athlon XP's
115 * have the MP bit set.
116 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for more.
117 */
118 if (((c->x86_model==6) && (c->x86_mask>=2)) ||
119 ((c->x86_model==7) && (c->x86_mask>=1)) ||
120 (c->x86_model> 7))
121 if (cpu_has_mp)
122 goto valid_k7;
123
124 /* If we get here, it's not a certified SMP capable AMD system. */
125 add_taint(TAINT_UNSAFE_SMP);
126 }
127
128valid_k7:
129 ;
130
131}
132
133/*
134 * The bootstrap kernel entry code has set these up. Save them for
135 * a given CPU
136 */
137
138void __cpuinit smp_store_cpu_info(int id)
139{
140 struct cpuinfo_x86 *c = &cpu_data(id);
141
142 *c = boot_cpu_data;
143 c->cpu_index = id;
144 if (id != 0)
145 identify_secondary_cpu(c);
146 smp_apply_quirks(c);
147}
148
149static atomic_t init_deasserted; 80static atomic_t init_deasserted;
150 81
151static void __cpuinit smp_callin(void) 82static void __cpuinit smp_callin(void)
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index f84e30da521a..c213345ca2f5 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -85,21 +85,6 @@ struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ;
85#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p)) 85#define set_idle_for_cpu(x,p) (idle_thread_array[(x)] = (p))
86#endif 86#endif
87 87
88/*
89 * The bootstrap kernel entry code has set these up. Save them for
90 * a given CPU
91 */
92
93static void __cpuinit smp_store_cpu_info(int id)
94{
95 struct cpuinfo_x86 *c = &cpu_data(id);
96
97 *c = boot_cpu_data;
98 c->cpu_index = id;
99 if (id != 0)
100 identify_secondary_cpu(c);
101}
102
103static inline void wait_for_init_deassert(atomic_t *deassert) 88static inline void wait_for_init_deassert(atomic_t *deassert)
104{ 89{
105 while (!atomic_read(deassert)) 90 while (!atomic_read(deassert))
diff --git a/include/asm-x86/smp.h b/include/asm-x86/smp.h
index 4dc271b43767..b4c5143d7f8d 100644
--- a/include/asm-x86/smp.h
+++ b/include/asm-x86/smp.h
@@ -88,6 +88,8 @@ extern void prefill_possible_map(void);
88 88
89#define SMP_TRAMPOLINE_BASE 0x6000 89#define SMP_TRAMPOLINE_BASE 0x6000
90extern unsigned long setup_trampoline(void); 90extern unsigned long setup_trampoline(void);
91
92void smp_store_cpu_info(int id);
91#endif 93#endif
92 94
93#ifdef CONFIG_X86_32 95#ifdef CONFIG_X86_32
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 76740def6092..51624abda43a 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -42,8 +42,6 @@ DECLARE_PER_CPU(int, cpu_number);
42 42
43extern int safe_smp_processor_id(void); 43extern int safe_smp_processor_id(void);
44 44
45void __cpuinit smp_store_cpu_info(int id);
46
47/* We don't mark CPUs online until __cpu_up(), so we need another measure */ 45/* We don't mark CPUs online until __cpu_up(), so we need another measure */
48static inline int num_booting_cpus(void) 46static inline int num_booting_cpus(void)
49{ 47{