aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorKevin Hao <kexin.hao@windriver.com>2010-03-02 16:51:57 -0500
committerThomas Gleixner <tglx@linutronix.de>2010-04-27 11:24:39 -0400
commit1268870be30e4571b0e007c986231c434c3b4912 (patch)
tree84f3128f89dbf0b58c054c4fd3e25c6ed0f9ea15 /arch
parentb6589c1199dea2d26358808b65666c49495f735e (diff)
powerpc: Replace kmap_atomic with kmap in pte_offset_map
The pte_offset_map/pte_offset_map_nested use kmap_atomic to get the virtual address for the pte table, but kmap_atomic will disable preempt. Hence there will be call trace if we acquire a spin lock after invoking pte_offset_map/pte_offset_map_nested in preempt-rt. To fix it, I've replaced kmap_atomic with kmap in these macros. Signed-off-by: Kevin Hao <kexin.hao@windriver.com> Signed-off-by: Paul Gortmaker <paul.gortmaker@windriver.com> LKML-Reference: <ffaf532c138188b526a8c623ed3c7f5067da6d68.1267566249.git.paul.gortmaker@windriver.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h12
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 55646adfa843..a83809916f28 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -307,6 +307,17 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
307 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 307 (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
308#define pte_offset_kernel(dir, addr) \ 308#define pte_offset_kernel(dir, addr) \
309 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) 309 ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr))
310#ifdef CONFIG_PREEMPT_RT
311#define pte_offset_map(dir, addr) \
312 ((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr))
313#define pte_offset_map_nested(dir, addr) \
314 ((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr))
315
316#define pte_unmap(pte) \
317 kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE))
318#define pte_unmap_nested(pte) \
319 kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE))
320#else
310#define pte_offset_map(dir, addr) \ 321#define pte_offset_map(dir, addr) \
311 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) 322 ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr))
312#define pte_offset_map_nested(dir, addr) \ 323#define pte_offset_map_nested(dir, addr) \
@@ -314,6 +325,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry)
314 325
315#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) 326#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
316#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) 327#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
328#endif
317 329
318/* 330/*
319 * Encode and decode a swap entry. 331 * Encode and decode a swap entry.