aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-07-28 11:12:17 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-07-28 11:12:17 -0400
commit9cef7492696a416663b4edb953a4eade8517ebeb (patch)
treee52b19208a2197a624a7942e6c549d274a944fb0 /arch/sh
parent0dfae7d5a21901b28ec0452d71be64adf5ea323e (diff)
sh: update_mmu_cache() consolidation.
This splits out a separate __update_cache()/__update_tlb() for update_mmu_cache() to wrap in to. This lets us share the common __update_cache() bits while keeping special __update_tlb() handling broken out. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/pgtable.h15
-rw-r--r--arch/sh/mm/pg-mmu.c21
-rw-r--r--arch/sh/mm/tlb-nommu.c9
-rw-r--r--arch/sh/mm/tlb-pteaex.c13
-rw-r--r--arch/sh/mm/tlb-sh3.c29
-rw-r--r--arch/sh/mm/tlb-sh4.c29
-rw-r--r--arch/sh/mm/tlbflush_64.c25
7 files changed, 67 insertions, 74 deletions
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index ba2333216c5b..43ef3e99fdd1 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -134,8 +134,19 @@ typedef pte_t *pte_addr_t;
134#define pgtable_cache_init() do { } while (0) 134#define pgtable_cache_init() do { } while (0)
135 135
136struct vm_area_struct; 136struct vm_area_struct;
137extern void update_mmu_cache(struct vm_area_struct * vma, 137
138 unsigned long address, pte_t pte); 138extern void __update_cache(struct vm_area_struct *vma,
139 unsigned long address, pte_t pte);
140extern void __update_tlb(struct vm_area_struct *vma,
141 unsigned long address, pte_t pte);
142
143static inline void
144update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
145{
146 __update_cache(vma, address, pte);
147 __update_tlb(vma, address, pte);
148}
149
139extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 150extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
140extern void paging_init(void); 151extern void paging_init(void);
141extern void page_table_range_init(unsigned long start, unsigned long end, 152extern void page_table_range_init(unsigned long start, unsigned long end,
diff --git a/arch/sh/mm/pg-mmu.c b/arch/sh/mm/pg-mmu.c
index 356d2cdcb209..8602f68af4c8 100644
--- a/arch/sh/mm/pg-mmu.c
+++ b/arch/sh/mm/pg-mmu.c
@@ -134,3 +134,24 @@ void clear_user_highpage(struct page *page, unsigned long vaddr)
134 kunmap_atomic(kaddr, KM_USER0); 134 kunmap_atomic(kaddr, KM_USER0);
135} 135}
136EXPORT_SYMBOL(clear_user_highpage); 136EXPORT_SYMBOL(clear_user_highpage);
137
138void __update_cache(struct vm_area_struct *vma,
139 unsigned long address, pte_t pte)
140{
141 struct page *page;
142 unsigned long pfn = pte_pfn(pte);
143
144 if (!boot_cpu_data.dcache.n_aliases)
145 return;
146
147 page = pfn_to_page(pfn);
148 if (pfn_valid(pfn) && page_mapping(page)) {
149 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
150 if (dirty) {
151 unsigned long addr = (unsigned long)page_address(page);
152
153 if (pages_do_alias(addr, address & PAGE_MASK))
154 __flush_wback_region((void *)addr, PAGE_SIZE);
155 }
156 }
157}
diff --git a/arch/sh/mm/tlb-nommu.c b/arch/sh/mm/tlb-nommu.c
index 71c742b5aee3..0ef5429943df 100644
--- a/arch/sh/mm/tlb-nommu.c
+++ b/arch/sh/mm/tlb-nommu.c
@@ -46,10 +46,13 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
46 BUG(); 46 BUG();
47} 47}
48 48
49void update_mmu_cache(struct vm_area_struct * vma, 49void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
50 unsigned long address, pte_t pte) 50{
51}
52
53void __update_cache(struct vm_area_struct *vma,
54 unsigned long address, pte_t pte)
51{ 55{
52 BUG();
53} 56}
54 57
55void __init page_table_range_init(unsigned long start, unsigned long end, 58void __init page_table_range_init(unsigned long start, unsigned long end,
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index c39b77363352..9aabd313cede 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -16,15 +16,14 @@
16#include <asm/mmu_context.h> 16#include <asm/mmu_context.h>
17#include <asm/cacheflush.h> 17#include <asm/cacheflush.h>
18 18
19void update_mmu_cache(struct vm_area_struct * vma, 19void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
20 unsigned long address, pte_t pte)
21{ 20{
22 unsigned long flags; 21 unsigned long flags, pteval, vpn;
23 unsigned long pteval;
24 unsigned long vpn;
25 22
26 /* Ptrace may call this routine. */ 23 /*
27 if (vma && current->active_mm != vma->vm_mm) 24 * Handle debugger faulting in for debugee.
25 */
26 if (current->active_mm != vma->vm_mm)
28 return; 27 return;
29 28
30 local_irq_save(flags); 29 local_irq_save(flags);
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 9b8459c74abd..425f1f23cf93 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -27,32 +27,16 @@
27#include <asm/mmu_context.h> 27#include <asm/mmu_context.h>
28#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
29 29
30void update_mmu_cache(struct vm_area_struct * vma, 30void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
31 unsigned long address, pte_t pte)
32{ 31{
33 unsigned long flags; 32 unsigned long flags, pteval, vpn;
34 unsigned long pteval;
35 unsigned long vpn;
36 unsigned long pfn = pte_pfn(pte);
37 struct page *page;
38 33
39 /* Ptrace may call this routine. */ 34 /*
40 if (vma && current->active_mm != vma->vm_mm) 35 * Handle debugger faulting in for debugee.
36 */
37 if (current->active_mm != vma->vm_mm)
41 return; 38 return;
42 39
43 page = pfn_to_page(pfn);
44 if (pfn_valid(pfn) && page_mapping(page)) {
45#if defined(CONFIG_SH7705_CACHE_32KB)
46 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
47 if (dirty) {
48 unsigned long addr = (unsigned long)page_address(page);
49
50 if (pages_do_alias(addr, address & PAGE_MASK))
51 __flush_wback_region((void *)addr, PAGE_SIZE);
52 }
53#endif
54 }
55
56 local_irq_save(flags); 40 local_irq_save(flags);
57 41
58 /* Set PTEH register */ 42 /* Set PTEH register */
@@ -93,4 +77,3 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
93 for (i = 0; i < ways; i++) 77 for (i = 0; i < ways; i++)
94 ctrl_outl(data, addr + (i << 8)); 78 ctrl_outl(data, addr + (i << 8));
95} 79}
96
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index cf50082d2435..81199f1e5945 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -15,33 +15,16 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17 17
18void update_mmu_cache(struct vm_area_struct * vma, 18void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
19 unsigned long address, pte_t pte)
20{ 19{
21 unsigned long flags; 20 unsigned long flags, pteval, vpn;
22 unsigned long pteval;
23 unsigned long vpn;
24 unsigned long pfn = pte_pfn(pte);
25 struct page *page;
26 21
27 /* Ptrace may call this routine. */ 22 /*
28 if (vma && current->active_mm != vma->vm_mm) 23 * Handle debugger faulting in for debugee.
24 */
25 if (current->active_mm != vma->vm_mm)
29 return; 26 return;
30 27
31 page = pfn_to_page(pfn);
32 if (pfn_valid(pfn) && page_mapping(page)) {
33#ifndef CONFIG_SMP
34 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
35 if (dirty) {
36
37 unsigned long addr = (unsigned long)page_address(page);
38
39 if (pages_do_alias(addr, address & PAGE_MASK))
40 __flush_wback_region((void *)addr, PAGE_SIZE);
41 }
42#endif
43 }
44
45 local_irq_save(flags); 28 local_irq_save(flags);
46 29
47 /* Set PTEH register */ 30 /* Set PTEH register */
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c
index 3ce40ea34824..f2e44e9ffb75 100644
--- a/arch/sh/mm/tlbflush_64.c
+++ b/arch/sh/mm/tlbflush_64.c
@@ -329,22 +329,6 @@ do_sigbus:
329 goto no_context; 329 goto no_context;
330} 330}
331 331
332void update_mmu_cache(struct vm_area_struct * vma,
333 unsigned long address, pte_t pte)
334{
335 /*
336 * This appears to get called once for every pte entry that gets
337 * established => I don't think it's efficient to try refilling the
338 * TLBs with the pages - some may not get accessed even. Also, for
339 * executable pages, it is impossible to determine reliably here which
340 * TLB they should be mapped into (or both even).
341 *
342 * So, just do nothing here and handle faults on demand. In the
343 * TLBMISS handling case, the refill is now done anyway after the pte
344 * has been fixed up, so that deals with most useful cases.
345 */
346}
347
348void local_flush_tlb_one(unsigned long asid, unsigned long page) 332void local_flush_tlb_one(unsigned long asid, unsigned long page)
349{ 333{
350 unsigned long long match, pteh=0, lpage; 334 unsigned long long match, pteh=0, lpage;
@@ -482,3 +466,12 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
482 /* FIXME: Optimize this later.. */ 466 /* FIXME: Optimize this later.. */
483 flush_tlb_all(); 467 flush_tlb_all();
484} 468}
469
470void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
471{
472}
473
474void __update_cache(struct vm_area_struct *vma,
475 unsigned long address, pte_t pte)
476{
477}