diff options
author | Gary King <gking@nvidia.com> | 2010-07-29 12:37:20 -0400 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-07-30 18:16:07 -0400 |
commit | 831e8047eb2af310184a9d4d9e749f3de119ae39 (patch) | |
tree | f9fd2e3982796a50e2d2b9374c80ba0c1c4204bb /arch/arm | |
parent | b92b3612134faff171981fad4f0adb33f485e02e (diff) |
ARM: 6279/1: highmem: fix SMP preemption bug in kmap_high_l1_vipt
smp_processor_id() must not be called from a preemptible context (this
is checked by CONFIG_DEBUG_PREEMPT). kmap_high_l1_vipt() was doing so.
This lead to a problem where the wrong per_cpu kmap_high_l1_vipt_depth
could be incremented, causing a BUG_ON(*depth <= 0); in
kunmap_high_l1_vipt().
The solution is to move the call to smp_processor_id() after the call
to preempt_disable().
Originally by: Andrew Howe <ahowe@nvidia.com>
Signed-off-by: Gary King <gking@nvidia.com>
Acked-by: Nicolas Pitre <nico.as.pitre@linaro.org>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
Diffstat (limited to 'arch/arm')
-rw-r--r-- | arch/arm/mm/highmem.c | 13 |
1 files changed, 8 insertions, 5 deletions
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 086816b205b8..6ab244062b4a 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -163,19 +163,22 @@ static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); | |||
163 | 163 | ||
164 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) | 164 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) |
165 | { | 165 | { |
166 | unsigned int idx, cpu = smp_processor_id(); | 166 | unsigned int idx, cpu; |
167 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | 167 | int *depth; |
168 | unsigned long vaddr, flags; | 168 | unsigned long vaddr, flags; |
169 | pte_t pte, *ptep; | 169 | pte_t pte, *ptep; |
170 | 170 | ||
171 | if (!in_interrupt()) | ||
172 | preempt_disable(); | ||
173 | |||
174 | cpu = smp_processor_id(); | ||
175 | depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); | ||
176 | |||
171 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; | 177 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; |
172 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 178 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
173 | ptep = TOP_PTE(vaddr); | 179 | ptep = TOP_PTE(vaddr); |
174 | pte = mk_pte(page, kmap_prot); | 180 | pte = mk_pte(page, kmap_prot); |
175 | 181 | ||
176 | if (!in_interrupt()) | ||
177 | preempt_disable(); | ||
178 | |||
179 | raw_local_irq_save(flags); | 182 | raw_local_irq_save(flags); |
180 | (*depth)++; | 183 | (*depth)++; |
181 | if (pte_val(*ptep) == pte_val(pte)) { | 184 | if (pte_val(*ptep) == pte_val(pte)) { |