From 1268870be30e4571b0e007c986231c434c3b4912 Mon Sep 17 00:00:00 2001 From: Kevin Hao Date: Tue, 2 Mar 2010 16:51:57 -0500 Subject: powerpc: Replace kmap_atomic with kmap in pte_offset_map The pte_offset_map/pte_offset_map_nested use kmap_atomic to get the virtual address for the pte table, but kmap_atomic will disable preempt. Hence there will be call trace if we acquire a spin lock after invoking pte_offset_map/pte_offset_map_nested in preempt-rt. To fix it, I've replaced kmap_atomic with kmap in these macros. Signed-off-by: Kevin Hao Signed-off-by: Paul Gortmaker LKML-Reference: Signed-off-by: Thomas Gleixner --- arch/powerpc/include/asm/pgtable-ppc32.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'arch') diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index 55646adfa843..a83809916f28 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h @@ -307,6 +307,17 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) #define pte_offset_kernel(dir, addr) \ ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) +#ifdef CONFIG_PREEMPT_RT +#define pte_offset_map(dir, addr) \ + ((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr)) +#define pte_offset_map_nested(dir, addr) \ + ((pte_t *) kmap(pmd_page(*(dir))) + pte_index(addr)) + +#define pte_unmap(pte) \ + kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE)) +#define pte_unmap_nested(pte) \ + kunmap((struct page *)_ALIGN_DOWN((unsigned int)pte, PAGE_SIZE)) +#else #define pte_offset_map(dir, addr) \ ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) #define pte_offset_map_nested(dir, addr) \ @@ -314,6 +325,7 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) +#endif /* * Encode and decode a swap entry. -- cgit v1.2.2