aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/i386/kernel/apic.c14
-rw-r--r--arch/i386/kernel/cpu/common.c30
-rw-r--r--arch/i386/kernel/cpu/intel.c12
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c4
-rw-r--r--arch/i386/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/i386/kernel/cpu/mcheck/p5.c2
-rw-r--r--arch/i386/kernel/process.c2
-rw-r--r--arch/i386/kernel/setup.c2
-rw-r--r--arch/i386/kernel/smpboot.c18
-rw-r--r--arch/i386/kernel/timers/timer_tsc.c2
10 files changed, 44 insertions, 44 deletions
diff --git a/arch/i386/kernel/apic.c b/arch/i386/kernel/apic.c
index a28a088f3e75..b905d7bb9a0d 100644
--- a/arch/i386/kernel/apic.c
+++ b/arch/i386/kernel/apic.c
@@ -364,7 +364,7 @@ void __init init_bsp_APIC(void)
364 apic_write_around(APIC_LVT1, value); 364 apic_write_around(APIC_LVT1, value);
365} 365}
366 366
367void __init setup_local_APIC (void) 367void __devinit setup_local_APIC(void)
368{ 368{
369 unsigned long oldvalue, value, ver, maxlvt; 369 unsigned long oldvalue, value, ver, maxlvt;
370 370
@@ -635,7 +635,7 @@ static struct sys_device device_lapic = {
635 .cls = &lapic_sysclass, 635 .cls = &lapic_sysclass,
636}; 636};
637 637
638static void __init apic_pm_activate(void) 638static void __devinit apic_pm_activate(void)
639{ 639{
640 apic_pm_state.active = 1; 640 apic_pm_state.active = 1;
641} 641}
@@ -856,7 +856,7 @@ fake_ioapic_page:
856 * but we do not accept timer interrupts yet. We only allow the BP 856 * but we do not accept timer interrupts yet. We only allow the BP
857 * to calibrate. 857 * to calibrate.
858 */ 858 */
859static unsigned int __init get_8254_timer_count(void) 859static unsigned int __devinit get_8254_timer_count(void)
860{ 860{
861 extern spinlock_t i8253_lock; 861 extern spinlock_t i8253_lock;
862 unsigned long flags; 862 unsigned long flags;
@@ -875,7 +875,7 @@ static unsigned int __init get_8254_timer_count(void)
875} 875}
876 876
877/* next tick in 8254 can be caught by catching timer wraparound */ 877/* next tick in 8254 can be caught by catching timer wraparound */
878static void __init wait_8254_wraparound(void) 878static void __devinit wait_8254_wraparound(void)
879{ 879{
880 unsigned int curr_count, prev_count; 880 unsigned int curr_count, prev_count;
881 881
@@ -895,7 +895,7 @@ static void __init wait_8254_wraparound(void)
895 * Default initialization for 8254 timers. If we use other timers like HPET, 895 * Default initialization for 8254 timers. If we use other timers like HPET,
896 * we override this later 896 * we override this later
897 */ 897 */
898void (*wait_timer_tick)(void) __initdata = wait_8254_wraparound; 898void (*wait_timer_tick)(void) __devinitdata = wait_8254_wraparound;
899 899
900/* 900/*
901 * This function sets up the local APIC timer, with a timeout of 901 * This function sets up the local APIC timer, with a timeout of
@@ -931,7 +931,7 @@ static void __setup_APIC_LVTT(unsigned int clocks)
931 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR); 931 apic_write_around(APIC_TMICT, clocks/APIC_DIVISOR);
932} 932}
933 933
934static void __init setup_APIC_timer(unsigned int clocks) 934static void __devinit setup_APIC_timer(unsigned int clocks)
935{ 935{
936 unsigned long flags; 936 unsigned long flags;
937 937
@@ -1044,7 +1044,7 @@ void __init setup_boot_APIC_clock(void)
1044 local_irq_enable(); 1044 local_irq_enable();
1045} 1045}
1046 1046
1047void __init setup_secondary_APIC_clock(void) 1047void __devinit setup_secondary_APIC_clock(void)
1048{ 1048{
1049 setup_APIC_timer(calibration_result); 1049 setup_APIC_timer(calibration_result);
1050} 1050}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index d58e169fbdbb..aac74758caf4 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -24,9 +24,9 @@ EXPORT_PER_CPU_SYMBOL(cpu_gdt_table);
24DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]); 24DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
25EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack); 25EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
26 26
27static int cachesize_override __initdata = -1; 27static int cachesize_override __devinitdata = -1;
28static int disable_x86_fxsr __initdata = 0; 28static int disable_x86_fxsr __devinitdata = 0;
29static int disable_x86_serial_nr __initdata = 1; 29static int disable_x86_serial_nr __devinitdata = 1;
30 30
31struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; 31struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
32 32
@@ -59,7 +59,7 @@ static int __init cachesize_setup(char *str)
59} 59}
60__setup("cachesize=", cachesize_setup); 60__setup("cachesize=", cachesize_setup);
61 61
62int __init get_model_name(struct cpuinfo_x86 *c) 62int __devinit get_model_name(struct cpuinfo_x86 *c)
63{ 63{
64 unsigned int *v; 64 unsigned int *v;
65 char *p, *q; 65 char *p, *q;
@@ -89,7 +89,7 @@ int __init get_model_name(struct cpuinfo_x86 *c)
89} 89}
90 90
91 91
92void __init display_cacheinfo(struct cpuinfo_x86 *c) 92void __devinit display_cacheinfo(struct cpuinfo_x86 *c)
93{ 93{
94 unsigned int n, dummy, ecx, edx, l2size; 94 unsigned int n, dummy, ecx, edx, l2size;
95 95
@@ -130,7 +130,7 @@ void __init display_cacheinfo(struct cpuinfo_x86 *c)
130/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */ 130/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
131 131
132/* Look up CPU names by table lookup. */ 132/* Look up CPU names by table lookup. */
133static char __init *table_lookup_model(struct cpuinfo_x86 *c) 133static char __devinit *table_lookup_model(struct cpuinfo_x86 *c)
134{ 134{
135 struct cpu_model_info *info; 135 struct cpu_model_info *info;
136 136
@@ -151,7 +151,7 @@ static char __init *table_lookup_model(struct cpuinfo_x86 *c)
151} 151}
152 152
153 153
154void __init get_cpu_vendor(struct cpuinfo_x86 *c, int early) 154void __devinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
155{ 155{
156 char *v = c->x86_vendor_id; 156 char *v = c->x86_vendor_id;
157 int i; 157 int i;
@@ -202,7 +202,7 @@ static inline int flag_is_changeable_p(u32 flag)
202 202
203 203
204/* Probe for the CPUID instruction */ 204/* Probe for the CPUID instruction */
205static int __init have_cpuid_p(void) 205static int __devinit have_cpuid_p(void)
206{ 206{
207 return flag_is_changeable_p(X86_EFLAGS_ID); 207 return flag_is_changeable_p(X86_EFLAGS_ID);
208} 208}
@@ -249,7 +249,7 @@ static void __init early_cpu_detect(void)
249#endif 249#endif
250} 250}
251 251
252void __init generic_identify(struct cpuinfo_x86 * c) 252void __devinit generic_identify(struct cpuinfo_x86 * c)
253{ 253{
254 u32 tfms, xlvl; 254 u32 tfms, xlvl;
255 int junk; 255 int junk;
@@ -296,7 +296,7 @@ void __init generic_identify(struct cpuinfo_x86 * c)
296 } 296 }
297} 297}
298 298
299static void __init squash_the_stupid_serial_number(struct cpuinfo_x86 *c) 299static void __devinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
300{ 300{
301 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) { 301 if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
302 /* Disable processor serial number */ 302 /* Disable processor serial number */
@@ -324,7 +324,7 @@ __setup("serialnumber", x86_serial_nr_setup);
324/* 324/*
325 * This does the hard work of actually picking apart the CPU stuff... 325 * This does the hard work of actually picking apart the CPU stuff...
326 */ 326 */
327void __init identify_cpu(struct cpuinfo_x86 *c) 327void __devinit identify_cpu(struct cpuinfo_x86 *c)
328{ 328{
329 int i; 329 int i;
330 330
@@ -438,7 +438,7 @@ void __init identify_cpu(struct cpuinfo_x86 *c)
438} 438}
439 439
440#ifdef CONFIG_X86_HT 440#ifdef CONFIG_X86_HT
441void __init detect_ht(struct cpuinfo_x86 *c) 441void __devinit detect_ht(struct cpuinfo_x86 *c)
442{ 442{
443 u32 eax, ebx, ecx, edx; 443 u32 eax, ebx, ecx, edx;
444 int index_msb, tmp; 444 int index_msb, tmp;
@@ -493,7 +493,7 @@ void __init detect_ht(struct cpuinfo_x86 *c)
493} 493}
494#endif 494#endif
495 495
496void __init print_cpu_info(struct cpuinfo_x86 *c) 496void __devinit print_cpu_info(struct cpuinfo_x86 *c)
497{ 497{
498 char *vendor = NULL; 498 char *vendor = NULL;
499 499
@@ -516,7 +516,7 @@ void __init print_cpu_info(struct cpuinfo_x86 *c)
516 printk("\n"); 516 printk("\n");
517} 517}
518 518
519cpumask_t cpu_initialized __initdata = CPU_MASK_NONE; 519cpumask_t cpu_initialized __devinitdata = CPU_MASK_NONE;
520 520
521/* This is hacky. :) 521/* This is hacky. :)
522 * We're emulating future behavior. 522 * We're emulating future behavior.
@@ -563,7 +563,7 @@ void __init early_cpu_init(void)
563 * and IDT. We reload them nevertheless, this function acts as a 563 * and IDT. We reload them nevertheless, this function acts as a
564 * 'CPU state barrier', nothing should get across. 564 * 'CPU state barrier', nothing should get across.
565 */ 565 */
566void __init cpu_init (void) 566void __devinit cpu_init(void)
567{ 567{
568 int cpu = smp_processor_id(); 568 int cpu = smp_processor_id();
569 struct tss_struct * t = &per_cpu(init_tss, cpu); 569 struct tss_struct * t = &per_cpu(init_tss, cpu);
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index 121aa2176e69..96a75d045835 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -28,7 +28,7 @@ extern int trap_init_f00f_bug(void);
28struct movsl_mask movsl_mask; 28struct movsl_mask movsl_mask;
29#endif 29#endif
30 30
31void __init early_intel_workaround(struct cpuinfo_x86 *c) 31void __devinit early_intel_workaround(struct cpuinfo_x86 *c)
32{ 32{
33 if (c->x86_vendor != X86_VENDOR_INTEL) 33 if (c->x86_vendor != X86_VENDOR_INTEL)
34 return; 34 return;
@@ -43,7 +43,7 @@ void __init early_intel_workaround(struct cpuinfo_x86 *c)
43 * This is called before we do cpu ident work 43 * This is called before we do cpu ident work
44 */ 44 */
45 45
46int __init ppro_with_ram_bug(void) 46int __devinit ppro_with_ram_bug(void)
47{ 47{
48 /* Uses data from early_cpu_detect now */ 48 /* Uses data from early_cpu_detect now */
49 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && 49 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
@@ -61,7 +61,7 @@ int __init ppro_with_ram_bug(void)
61 * P4 Xeon errata 037 workaround. 61 * P4 Xeon errata 037 workaround.
62 * Hardware prefetcher may cause stale data to be loaded into the cache. 62 * Hardware prefetcher may cause stale data to be loaded into the cache.
63 */ 63 */
64static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c) 64static void __devinit Intel_errata_workarounds(struct cpuinfo_x86 *c)
65{ 65{
66 unsigned long lo, hi; 66 unsigned long lo, hi;
67 67
@@ -80,7 +80,7 @@ static void __init Intel_errata_workarounds(struct cpuinfo_x86 *c)
80/* 80/*
81 * find out the number of processor cores on the die 81 * find out the number of processor cores on the die
82 */ 82 */
83static int __init num_cpu_cores(struct cpuinfo_x86 *c) 83static int __devinit num_cpu_cores(struct cpuinfo_x86 *c)
84{ 84{
85 unsigned int eax; 85 unsigned int eax;
86 86
@@ -98,7 +98,7 @@ static int __init num_cpu_cores(struct cpuinfo_x86 *c)
98 return 1; 98 return 1;
99} 99}
100 100
101static void __init init_intel(struct cpuinfo_x86 *c) 101static void __devinit init_intel(struct cpuinfo_x86 *c)
102{ 102{
103 unsigned int l2 = 0; 103 unsigned int l2 = 0;
104 char *p = NULL; 104 char *p = NULL;
@@ -204,7 +204,7 @@ static unsigned int intel_size_cache(struct cpuinfo_x86 * c, unsigned int size)
204 return size; 204 return size;
205} 205}
206 206
207static struct cpu_dev intel_cpu_dev __initdata = { 207static struct cpu_dev intel_cpu_dev __devinitdata = {
208 .c_vendor = "Intel", 208 .c_vendor = "Intel",
209 .c_ident = { "GenuineIntel" }, 209 .c_ident = { "GenuineIntel" },
210 .c_models = { 210 .c_models = {
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index a710dc4eb20e..1d768b263269 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -28,7 +28,7 @@ struct _cache_table
28}; 28};
29 29
30/* all the cache descriptor types we care about (no TLB or trace cache entries) */ 30/* all the cache descriptor types we care about (no TLB or trace cache entries) */
31static struct _cache_table cache_table[] __initdata = 31static struct _cache_table cache_table[] __devinitdata =
32{ 32{
33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */ 33 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */ 34 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
@@ -160,7 +160,7 @@ static int __init find_num_cache_leaves(void)
160 return retval; 160 return retval;
161} 161}
162 162
163unsigned int __init init_intel_cacheinfo(struct cpuinfo_x86 *c) 163unsigned int __devinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
164{ 164{
165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */ 165 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0; /* Cache sizes */
166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */ 166 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
diff --git a/arch/i386/kernel/cpu/mcheck/mce.c b/arch/i386/kernel/cpu/mcheck/mce.c
index bf6d1aefafc0..7218a7341fbc 100644
--- a/arch/i386/kernel/cpu/mcheck/mce.c
+++ b/arch/i386/kernel/cpu/mcheck/mce.c
@@ -31,7 +31,7 @@ static fastcall void unexpected_machine_check(struct pt_regs * regs, long error_
31void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check; 31void fastcall (*machine_check_vector)(struct pt_regs *, long error_code) = unexpected_machine_check;
32 32
33/* This has to be run for each processor */ 33/* This has to be run for each processor */
34void __init mcheck_init(struct cpuinfo_x86 *c) 34void __devinit mcheck_init(struct cpuinfo_x86 *c)
35{ 35{
36 if (mce_disabled==1) 36 if (mce_disabled==1)
37 return; 37 return;
diff --git a/arch/i386/kernel/cpu/mcheck/p5.c b/arch/i386/kernel/cpu/mcheck/p5.c
index c45a1b485c80..ec0614cd2925 100644
--- a/arch/i386/kernel/cpu/mcheck/p5.c
+++ b/arch/i386/kernel/cpu/mcheck/p5.c
@@ -29,7 +29,7 @@ static fastcall void pentium_machine_check(struct pt_regs * regs, long error_cod
29} 29}
30 30
31/* Set up machine check reporting for processors with Intel style MCE */ 31/* Set up machine check reporting for processors with Intel style MCE */
32void __init intel_p5_mcheck_init(struct cpuinfo_x86 *c) 32void __devinit intel_p5_mcheck_init(struct cpuinfo_x86 *c)
33{ 33{
34 u32 l, h; 34 u32 l, h;
35 35
diff --git a/arch/i386/kernel/process.c b/arch/i386/kernel/process.c
index c1b11e8df60b..e06f2dc7123d 100644
--- a/arch/i386/kernel/process.c
+++ b/arch/i386/kernel/process.c
@@ -260,7 +260,7 @@ static void mwait_idle(void)
260 } 260 }
261} 261}
262 262
263void __init select_idle_routine(const struct cpuinfo_x86 *c) 263void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
264{ 264{
265 if (cpu_has(c, X86_FEATURE_MWAIT)) { 265 if (cpu_has(c, X86_FEATURE_MWAIT)) {
266 printk("monitor/mwait feature present.\n"); 266 printk("monitor/mwait feature present.\n");
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 30406fd0b64c..cba67e4ba0af 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -60,7 +60,7 @@
60 address, and must not be in the .bss segment! */ 60 address, and must not be in the .bss segment! */
61unsigned long init_pg_tables_end __initdata = ~0UL; 61unsigned long init_pg_tables_end __initdata = ~0UL;
62 62
63int disable_pse __initdata = 0; 63int disable_pse __devinitdata = 0;
64 64
65/* 65/*
66 * Machine setup.. 66 * Machine setup..
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index 09b4ceb832b2..fb0b200d1d85 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -59,7 +59,7 @@
59#include <smpboot_hooks.h> 59#include <smpboot_hooks.h>
60 60
61/* Set if we find a B stepping CPU */ 61/* Set if we find a B stepping CPU */
62static int __initdata smp_b_stepping; 62static int __devinitdata smp_b_stepping;
63 63
64/* Number of siblings per CPU package */ 64/* Number of siblings per CPU package */
65int smp_num_siblings = 1; 65int smp_num_siblings = 1;
@@ -118,7 +118,7 @@ DEFINE_PER_CPU(int, cpu_state) = { 0 };
118 * has made sure it's suitably aligned. 118 * has made sure it's suitably aligned.
119 */ 119 */
120 120
121static unsigned long __init setup_trampoline(void) 121static unsigned long __devinit setup_trampoline(void)
122{ 122{
123 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data); 123 memcpy(trampoline_base, trampoline_data, trampoline_end - trampoline_data);
124 return virt_to_phys(trampoline_base); 124 return virt_to_phys(trampoline_base);
@@ -148,7 +148,7 @@ void __init smp_alloc_memory(void)
148 * a given CPU 148 * a given CPU
149 */ 149 */
150 150
151static void __init smp_store_cpu_info(int id) 151static void __devinit smp_store_cpu_info(int id)
152{ 152{
153 struct cpuinfo_x86 *c = cpu_data + id; 153 struct cpuinfo_x86 *c = cpu_data + id;
154 154
@@ -342,7 +342,7 @@ extern void calibrate_delay(void);
342 342
343static atomic_t init_deasserted; 343static atomic_t init_deasserted;
344 344
345static void __init smp_callin(void) 345static void __devinit smp_callin(void)
346{ 346{
347 int cpuid, phys_id; 347 int cpuid, phys_id;
348 unsigned long timeout; 348 unsigned long timeout;
@@ -468,7 +468,7 @@ set_cpu_sibling_map(int cpu)
468/* 468/*
469 * Activate a secondary processor. 469 * Activate a secondary processor.
470 */ 470 */
471static void __init start_secondary(void *unused) 471static void __devinit start_secondary(void *unused)
472{ 472{
473 /* 473 /*
474 * Dont put anything before smp_callin(), SMP 474 * Dont put anything before smp_callin(), SMP
@@ -521,7 +521,7 @@ static void __init start_secondary(void *unused)
521 * from the task structure 521 * from the task structure
522 * This function must not return. 522 * This function must not return.
523 */ 523 */
524void __init initialize_secondary(void) 524void __devinit initialize_secondary(void)
525{ 525{
526 /* 526 /*
527 * We don't actually need to load the full TSS, 527 * We don't actually need to load the full TSS,
@@ -635,7 +635,7 @@ static inline void __inquire_remote_apic(int apicid)
635 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this 635 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
636 * won't ... remember to clear down the APIC, etc later. 636 * won't ... remember to clear down the APIC, etc later.
637 */ 637 */
638static int __init 638static int __devinit
639wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) 639wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
640{ 640{
641 unsigned long send_status = 0, accept_status = 0; 641 unsigned long send_status = 0, accept_status = 0;
@@ -681,7 +681,7 @@ wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip)
681#endif /* WAKE_SECONDARY_VIA_NMI */ 681#endif /* WAKE_SECONDARY_VIA_NMI */
682 682
683#ifdef WAKE_SECONDARY_VIA_INIT 683#ifdef WAKE_SECONDARY_VIA_INIT
684static int __init 684static int __devinit
685wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) 685wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
686{ 686{
687 unsigned long send_status = 0, accept_status = 0; 687 unsigned long send_status = 0, accept_status = 0;
@@ -817,7 +817,7 @@ wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip)
817 817
818extern cpumask_t cpu_initialized; 818extern cpumask_t cpu_initialized;
819 819
820static int __init do_boot_cpu(int apicid) 820static int __devinit do_boot_cpu(int apicid)
821/* 821/*
822 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad 822 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
823 * (ie clustered apic addressing mode), this is a LOGICAL apic ID. 823 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
diff --git a/arch/i386/kernel/timers/timer_tsc.c b/arch/i386/kernel/timers/timer_tsc.c
index 54c36b182021..f46e625bab67 100644
--- a/arch/i386/kernel/timers/timer_tsc.c
+++ b/arch/i386/kernel/timers/timer_tsc.c
@@ -33,7 +33,7 @@ static struct timer_opts timer_tsc;
33 33
34static inline void cpufreq_delayed_get(void); 34static inline void cpufreq_delayed_get(void);
35 35
36int tsc_disable __initdata = 0; 36int tsc_disable __devinitdata = 0;
37 37
38extern spinlock_t i8253_lock; 38extern spinlock_t i8253_lock;
39 39