diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-01-21 04:39:51 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-21 04:39:51 -0500 |
commit | 198030782cedf25391e67e7c88b04f87a5eb6563 (patch) | |
tree | 5b7368c6bf052bcb4bb273497a57900720d36f51 /arch/x86/mm/tlb.c | |
parent | 4ec71fa2d2c3f1040348f2604f4b8ccc833d1c2e (diff) | |
parent | 92181f190b649f7ef2b79cbf5c00f26ccc66da2a (diff) |
Merge branch 'x86/mm' into core/percpu
Conflicts:
arch/x86/mm/fault.c
Diffstat (limited to 'arch/x86/mm/tlb.c')
-rw-r--r-- | arch/x86/mm/tlb.c | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c index b3ca1b940654..72a6d4ebe34d 100644 --- a/arch/x86/mm/tlb.c +++ b/arch/x86/mm/tlb.c | |||
@@ -29,7 +29,7 @@ DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) | |||
29 | * To avoid global state use 8 different call vectors. | 29 | * To avoid global state use 8 different call vectors. |
30 | * Each CPU uses a specific vector to trigger flushes on other | 30 | * Each CPU uses a specific vector to trigger flushes on other |
31 | * CPUs. Depending on the received vector the target CPUs look into | 31 | * CPUs. Depending on the received vector the target CPUs look into |
32 | * the right per cpu variable for the flush data. | 32 | * the right array slot for the flush data. |
33 | * | 33 | * |
34 | * With more than 8 CPUs they are hashed to the 8 available | 34 | * With more than 8 CPUs they are hashed to the 8 available |
35 | * vectors. The limited global vector space forces us to this right now. | 35 | * vectors. The limited global vector space forces us to this right now. |
@@ -44,13 +44,13 @@ union smp_flush_state { | |||
44 | spinlock_t tlbstate_lock; | 44 | spinlock_t tlbstate_lock; |
45 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); | 45 | DECLARE_BITMAP(flush_cpumask, NR_CPUS); |
46 | }; | 46 | }; |
47 | char pad[SMP_CACHE_BYTES]; | 47 | char pad[CONFIG_X86_INTERNODE_CACHE_BYTES]; |
48 | } ____cacheline_aligned; | 48 | } ____cacheline_internodealigned_in_smp; |
49 | 49 | ||
50 | /* State is put into the per CPU data section, but padded | 50 | /* State is put into the per CPU data section, but padded |
51 | to a full cache line because other CPUs can access it and we don't | 51 | to a full cache line because other CPUs can access it and we don't |
52 | want false sharing in the per cpu data segment. */ | 52 | want false sharing in the per cpu data segment. */ |
53 | static DEFINE_PER_CPU(union smp_flush_state, flush_state); | 53 | static union smp_flush_state flush_state[NUM_INVALIDATE_TLB_VECTORS]; |
54 | 54 | ||
55 | /* | 55 | /* |
56 | * We cannot call mmdrop() because we are in interrupt context, | 56 | * We cannot call mmdrop() because we are in interrupt context, |
@@ -135,7 +135,7 @@ void smp_invalidate_interrupt(struct pt_regs *regs) | |||
135 | * Use that to determine where the sender put the data. | 135 | * Use that to determine where the sender put the data. |
136 | */ | 136 | */ |
137 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; | 137 | sender = ~regs->orig_ax - INVALIDATE_TLB_VECTOR_START; |
138 | f = &per_cpu(flush_state, sender); | 138 | f = &flush_state[sender]; |
139 | 139 | ||
140 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) | 140 | if (!cpumask_test_cpu(cpu, to_cpumask(f->flush_cpumask))) |
141 | goto out; | 141 | goto out; |
@@ -173,7 +173,7 @@ static void flush_tlb_others_ipi(const struct cpumask *cpumask, | |||
173 | 173 | ||
174 | /* Caller has disabled preemption */ | 174 | /* Caller has disabled preemption */ |
175 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; | 175 | sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS; |
176 | f = &per_cpu(flush_state, sender); | 176 | f = &flush_state[sender]; |
177 | 177 | ||
178 | /* | 178 | /* |
179 | * Could avoid this lock when | 179 | * Could avoid this lock when |
@@ -227,8 +227,8 @@ static int __cpuinit init_smp_flush(void) | |||
227 | { | 227 | { |
228 | int i; | 228 | int i; |
229 | 229 | ||
230 | for_each_possible_cpu(i) | 230 | for (i = 0; i < ARRAY_SIZE(flush_state); i++) |
231 | spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock); | 231 | spin_lock_init(&flush_state[i].tlbstate_lock); |
232 | 232 | ||
233 | return 0; | 233 | return 0; |
234 | } | 234 | } |