diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/i386/kernel/init_task.c | 2 | ||||
-rw-r--r-- | arch/i386/kernel/irq.c | 2 | ||||
-rw-r--r-- | arch/x86_64/kernel/init_task.c | 2 |
3 files changed, 3 insertions, 3 deletions
diff --git a/arch/i386/kernel/init_task.c b/arch/i386/kernel/init_task.c index 9caa8e8db80c..cff95d10a4d8 100644 --- a/arch/i386/kernel/init_task.c +++ b/arch/i386/kernel/init_task.c | |||
@@ -42,5 +42,5 @@ EXPORT_SYMBOL(init_task); | |||
42 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, | 42 | * per-CPU TSS segments. Threads are completely 'soft' on Linux, |
43 | * no more per-task TSS's. | 43 | * no more per-task TSS's. |
44 | */ | 44 | */ |
45 | DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS; | 45 | DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; |
46 | 46 | ||
diff --git a/arch/i386/kernel/irq.c b/arch/i386/kernel/irq.c index 1a201a932865..f3a9c78c4a24 100644 --- a/arch/i386/kernel/irq.c +++ b/arch/i386/kernel/irq.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/cpu.h> | 19 | #include <linux/cpu.h> |
20 | #include <linux/delay.h> | 20 | #include <linux/delay.h> |
21 | 21 | ||
22 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_maxaligned_in_smp; | 22 | DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp; |
23 | EXPORT_PER_CPU_SYMBOL(irq_stat); | 23 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
24 | 24 | ||
25 | #ifndef CONFIG_X86_LOCAL_APIC | 25 | #ifndef CONFIG_X86_LOCAL_APIC |
diff --git a/arch/x86_64/kernel/init_task.c b/arch/x86_64/kernel/init_task.c index e0ba5c1043fd..ce31d904d601 100644 --- a/arch/x86_64/kernel/init_task.c +++ b/arch/x86_64/kernel/init_task.c | |||
@@ -44,6 +44,6 @@ EXPORT_SYMBOL(init_task); | |||
44 | * section. Since TSS's are completely CPU-local, we want them | 44 | * section. Since TSS's are completely CPU-local, we want them |
45 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. | 45 | * on exact cacheline boundaries, to eliminate cacheline ping-pong. |
46 | */ | 46 | */ |
47 | DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_maxaligned_in_smp = INIT_TSS; | 47 | DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS; |
48 | 48 | ||
49 | #define ALIGN_TO_4K __attribute__((section(".data.init_task"))) | 49 | #define ALIGN_TO_4K __attribute__((section(".data.init_task"))) |