diff options
author | Russell King <rmk+kernel@arm.linux.org.uk> | 2009-12-18 11:21:35 -0500 |
---|---|---|
committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2010-01-20 08:48:29 -0500 |
commit | c26c20b823d48addbde9cb5709d80655c6fadf18 (patch) | |
tree | cf3cbfa958860942107070ff6f33eddc2238381c | |
parent | 24bc7347da73a9ed3383056c3d0f28c0e361621e (diff) |
ARM: make_coherent: split adjust_pte() in two
adjust_pte() walks the page tables, and do_adjust_pte() does the
page table manipulation.
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
-rw-r--r-- | arch/arm/mm/fault-armv.c | 52 |
1 files changed, 32 insertions, 20 deletions
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index 56ee15321b00..074e6bb54eb3 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
@@ -36,28 +36,12 @@ static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; | |||
36 | * Therefore those configurations which might call adjust_pte (those | 36 | * Therefore those configurations which might call adjust_pte (those |
37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. | 37 | * without CONFIG_CPU_CACHE_VIPT) cannot support split page_table_lock. |
38 | */ | 38 | */ |
39 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | 39 | static int do_adjust_pte(struct vm_area_struct *vma, unsigned long address, |
40 | pte_t *ptep) | ||
40 | { | 41 | { |
41 | pgd_t *pgd; | 42 | pte_t entry = *ptep; |
42 | pmd_t *pmd; | ||
43 | pte_t *pte, entry; | ||
44 | int ret; | 43 | int ret; |
45 | 44 | ||
46 | pgd = pgd_offset(vma->vm_mm, address); | ||
47 | if (pgd_none(*pgd)) | ||
48 | goto no_pgd; | ||
49 | if (pgd_bad(*pgd)) | ||
50 | goto bad_pgd; | ||
51 | |||
52 | pmd = pmd_offset(pgd, address); | ||
53 | if (pmd_none(*pmd)) | ||
54 | goto no_pmd; | ||
55 | if (pmd_bad(*pmd)) | ||
56 | goto bad_pmd; | ||
57 | |||
58 | pte = pte_offset_map(pmd, address); | ||
59 | entry = *pte; | ||
60 | |||
61 | /* | 45 | /* |
62 | * If this page is present, it's actually being shared. | 46 | * If this page is present, it's actually being shared. |
63 | */ | 47 | */ |
@@ -74,10 +58,38 @@ static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | |||
74 | (pfn << PAGE_SHIFT) + PAGE_SIZE); | 58 | (pfn << PAGE_SHIFT) + PAGE_SIZE); |
75 | pte_val(entry) &= ~L_PTE_MT_MASK; | 59 | pte_val(entry) &= ~L_PTE_MT_MASK; |
76 | pte_val(entry) |= shared_pte_mask; | 60 | pte_val(entry) |= shared_pte_mask; |
77 | set_pte_at(vma->vm_mm, address, pte, entry); | 61 | set_pte_at(vma->vm_mm, address, ptep, entry); |
78 | flush_tlb_page(vma, address); | 62 | flush_tlb_page(vma, address); |
79 | } | 63 | } |
64 | |||
65 | return ret; | ||
66 | } | ||
67 | |||
68 | static int adjust_pte(struct vm_area_struct *vma, unsigned long address) | ||
69 | { | ||
70 | pgd_t *pgd; | ||
71 | pmd_t *pmd; | ||
72 | pte_t *pte; | ||
73 | int ret; | ||
74 | |||
75 | pgd = pgd_offset(vma->vm_mm, address); | ||
76 | if (pgd_none(*pgd)) | ||
77 | goto no_pgd; | ||
78 | if (pgd_bad(*pgd)) | ||
79 | goto bad_pgd; | ||
80 | |||
81 | pmd = pmd_offset(pgd, address); | ||
82 | if (pmd_none(*pmd)) | ||
83 | goto no_pmd; | ||
84 | if (pmd_bad(*pmd)) | ||
85 | goto bad_pmd; | ||
86 | |||
87 | pte = pte_offset_map(pmd, address); | ||
88 | |||
89 | ret = do_adjust_pte(vma, address, pte); | ||
90 | |||
80 | pte_unmap(pte); | 91 | pte_unmap(pte); |
92 | |||
81 | return ret; | 93 | return ret; |
82 | 94 | ||
83 | bad_pgd: | 95 | bad_pgd: |