aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/include/asm/pgtable_64.h
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
commit0906a3ad33a254094fb74828e3ddb9af8771a6da (patch)
tree33acc1be2e213ae2f13439d3d5f8e9dd8a4f2d46 /arch/sh/include/asm/pgtable_64.h
parentd1af119a69fc9a625bd57a66d9c9fa88795b082c (diff)
sh: Fix up and optimize the kmap_coherent() interface.
This fixes up the kmap_coherent/kunmap_coherent() interface for recent changes both in the page fault path and the shared cache flushers, as well as adding in some optimizations. One of the key things to note here is that the TLB flush itself is deferred until the unmap, and the call in to update_mmu_cache() itself goes away, relying on the regular page fault path to handle the lazy dcache writeback if necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/include/asm/pgtable_64.h')
-rw-r--r--arch/sh/include/asm/pgtable_64.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h
index c78990cda557..17cdbecc3adc 100644
--- a/arch/sh/include/asm/pgtable_64.h
+++ b/arch/sh/include/asm/pgtable_64.h
@@ -60,6 +60,9 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
60/* To find an entry in a kernel PGD. */ 60/* To find an entry in a kernel PGD. */
61#define pgd_offset_k(address) pgd_offset(&init_mm, address) 61#define pgd_offset_k(address) pgd_offset(&init_mm, address)
62 62
63#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
64#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
65
63/* 66/*
64 * PMD level access routines. Same notes as above. 67 * PMD level access routines. Same notes as above.
65 */ 68 */
@@ -80,6 +83,8 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep)
80#define pte_index(address) \ 83#define pte_index(address) \
81 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) 84 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
82 85
86#define __pte_offset(address) pte_index(address)
87
83#define pte_offset_kernel(dir, addr) \ 88#define pte_offset_kernel(dir, addr) \
84 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) 89 ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr)))
85 90