aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/sh/include/asm/page.h6
-rw-r--r--arch/sh/include/asm/pgtable.h7
-rw-r--r--arch/sh/include/cpu-sh3/cpu/cacheflush.h5
-rw-r--r--arch/sh/include/cpu-sh4/cpu/cacheflush.h2
-rw-r--r--arch/sh/mm/cache-sh4.c10
-rw-r--r--arch/sh/mm/cache-sh7705.c7
-rw-r--r--arch/sh/mm/pg-sh4.c74
-rw-r--r--arch/sh/mm/pg-sh7705.c52
-rw-r--r--arch/sh/mm/tlb-pteaex.c17
-rw-r--r--arch/sh/mm/tlb-sh3.c20
-rw-r--r--arch/sh/mm/tlb-sh4.c23
11 files changed, 81 insertions, 142 deletions
diff --git a/arch/sh/include/asm/page.h b/arch/sh/include/asm/page.h
index 49592c780a6e..a31ab40040f0 100644
--- a/arch/sh/include/asm/page.h
+++ b/arch/sh/include/asm/page.h
@@ -50,6 +50,12 @@ extern unsigned long shm_align_mask;
50extern unsigned long max_low_pfn, min_low_pfn; 50extern unsigned long max_low_pfn, min_low_pfn;
51extern unsigned long memory_start, memory_end; 51extern unsigned long memory_start, memory_end;
52 52
53static inline unsigned long
54pages_do_alias(unsigned long addr1, unsigned long addr2)
55{
56 return (addr1 ^ addr2) & shm_align_mask;
57}
58
53extern void clear_page(void *to); 59extern void clear_page(void *to);
54extern void copy_page(void *to, void *from); 60extern void copy_page(void *to, void *from);
55 61
diff --git a/arch/sh/include/asm/pgtable.h b/arch/sh/include/asm/pgtable.h
index 2a011b18090b..d9f68f9c3cb3 100644
--- a/arch/sh/include/asm/pgtable.h
+++ b/arch/sh/include/asm/pgtable.h
@@ -133,13 +133,6 @@ typedef pte_t *pte_addr_t;
133 */ 133 */
134#define pgtable_cache_init() do { } while (0) 134#define pgtable_cache_init() do { } while (0)
135 135
136#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \
137 defined(CONFIG_SH7705_CACHE_32KB))
138struct mm_struct;
139#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
140pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep);
141#endif
142
143struct vm_area_struct; 136struct vm_area_struct;
144extern void update_mmu_cache(struct vm_area_struct * vma, 137extern void update_mmu_cache(struct vm_area_struct * vma,
145 unsigned long address, pte_t pte); 138 unsigned long address, pte_t pte);
diff --git a/arch/sh/include/cpu-sh3/cpu/cacheflush.h b/arch/sh/include/cpu-sh3/cpu/cacheflush.h
index 1ac27aae6700..6485ad5649ad 100644
--- a/arch/sh/include/cpu-sh3/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-sh3/cpu/cacheflush.h
@@ -15,10 +15,7 @@
15 * SH4. Unlike the SH4 this is a unified cache so we need to do some work 15 * SH4. Unlike the SH4 this is a unified cache so we need to do some work
16 * in mmap when 'exec'ing a new binary 16 * in mmap when 'exec'ing a new binary
17 */ 17 */
18 /* 32KB cache, 4kb PAGE sizes need to check bit 12 */ 18#define PG_dcache_dirty PG_arch_1
19#define CACHE_ALIAS 0x00001000
20
21#define PG_mapped PG_arch_1
22 19
23void flush_cache_all(void); 20void flush_cache_all(void);
24void flush_cache_mm(struct mm_struct *mm); 21void flush_cache_mm(struct mm_struct *mm);
diff --git a/arch/sh/include/cpu-sh4/cpu/cacheflush.h b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
index 065306d376eb..3564f1722195 100644
--- a/arch/sh/include/cpu-sh4/cpu/cacheflush.h
+++ b/arch/sh/include/cpu-sh4/cpu/cacheflush.h
@@ -38,6 +38,6 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
38/* Initialization of P3 area for copy_user_page */ 38/* Initialization of P3 area for copy_user_page */
39void p3_cache_init(void); 39void p3_cache_init(void);
40 40
41#define PG_mapped PG_arch_1 41#define PG_dcache_dirty PG_arch_1
42 42
43#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */ 43#endif /* __ASM_CPU_SH4_CACHEFLUSH_H */
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 5cfe08dbb59e..c3a09b27f8d5 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -14,6 +14,7 @@
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/fs.h>
17#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
18#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
19 20
@@ -246,7 +247,14 @@ static inline void flush_cache_4096(unsigned long start,
246 */ 247 */
247void flush_dcache_page(struct page *page) 248void flush_dcache_page(struct page *page)
248{ 249{
249 if (test_bit(PG_mapped, &page->flags)) { 250 struct address_space *mapping = page_mapping(page);
251
252#ifndef CONFIG_SMP
253 if (mapping && !mapping_mapped(mapping))
254 set_bit(PG_dcache_dirty, &page->flags);
255 else
256#endif
257 {
250 unsigned long phys = PHYSADDR(page_address(page)); 258 unsigned long phys = PHYSADDR(page_address(page));
251 unsigned long addr = CACHE_OC_ADDRESS_ARRAY; 259 unsigned long addr = CACHE_OC_ADDRESS_ARRAY;
252 int i, n; 260 int i, n;
diff --git a/arch/sh/mm/cache-sh7705.c b/arch/sh/mm/cache-sh7705.c
index 22dacc778823..fa37bff306b9 100644
--- a/arch/sh/mm/cache-sh7705.c
+++ b/arch/sh/mm/cache-sh7705.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/mman.h> 13#include <linux/mman.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/fs.h>
15#include <linux/threads.h> 16#include <linux/threads.h>
16#include <asm/addrspace.h> 17#include <asm/addrspace.h>
17#include <asm/page.h> 18#include <asm/page.h>
@@ -128,7 +129,11 @@ static void __uses_jump_to_uncached __flush_dcache_page(unsigned long phys)
128 */ 129 */
129void flush_dcache_page(struct page *page) 130void flush_dcache_page(struct page *page)
130{ 131{
131 if (test_bit(PG_mapped, &page->flags)) 132 struct address_space *mapping = page_mapping(page);
133
134 if (mapping && !mapping_mapped(mapping))
135 set_bit(PG_dcache_dirty, &page->flags);
136 else
132 __flush_dcache_page(PHYSADDR(page_address(page))); 137 __flush_dcache_page(PHYSADDR(page_address(page)));
133} 138}
134 139
diff --git a/arch/sh/mm/pg-sh4.c b/arch/sh/mm/pg-sh4.c
index 2fe14da1f839..f3c4b2a54fc7 100644
--- a/arch/sh/mm/pg-sh4.c
+++ b/arch/sh/mm/pg-sh4.c
@@ -15,8 +15,6 @@
15#include <asm/mmu_context.h> 15#include <asm/mmu_context.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17 17
18#define CACHE_ALIAS (current_cpu_data.dcache.alias_mask)
19
20#define kmap_get_fixmap_pte(vaddr) \ 18#define kmap_get_fixmap_pte(vaddr) \
21 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr)) 19 pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)), (vaddr)), (vaddr))
22 20
@@ -68,10 +66,9 @@ static inline void kunmap_coherent(struct page *page)
68 */ 66 */
69void clear_user_page(void *to, unsigned long address, struct page *page) 67void clear_user_page(void *to, unsigned long address, struct page *page)
70{ 68{
71 __set_bit(PG_mapped, &page->flags);
72
73 clear_page(to); 69 clear_page(to);
74 if ((((address & PAGE_MASK) ^ (unsigned long)to) & CACHE_ALIAS)) 70
71 if (pages_do_alias((unsigned long)to, address & PAGE_MASK))
75 __flush_wback_region(to, PAGE_SIZE); 72 __flush_wback_region(to, PAGE_SIZE);
76} 73}
77 74
@@ -79,13 +76,14 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
79 unsigned long vaddr, void *dst, const void *src, 76 unsigned long vaddr, void *dst, const void *src,
80 unsigned long len) 77 unsigned long len)
81{ 78{
82 void *vto; 79 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
83 80 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
84 __set_bit(PG_mapped, &page->flags); 81 memcpy(vto, src, len);
85 82 kunmap_coherent(vto);
86 vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 83 } else {
87 memcpy(vto, src, len); 84 memcpy(dst, src, len);
88 kunmap_coherent(vto); 85 set_bit(PG_dcache_dirty, &page->flags);
86 }
89 87
90 if (vma->vm_flags & VM_EXEC) 88 if (vma->vm_flags & VM_EXEC)
91 flush_cache_page(vma, vaddr, page_to_pfn(page)); 89 flush_cache_page(vma, vaddr, page_to_pfn(page));
@@ -95,13 +93,14 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
95 unsigned long vaddr, void *dst, const void *src, 93 unsigned long vaddr, void *dst, const void *src,
96 unsigned long len) 94 unsigned long len)
97{ 95{
98 void *vfrom; 96 if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) {
99 97 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
100 __set_bit(PG_mapped, &page->flags); 98 memcpy(dst, vfrom, len);
101 99 kunmap_coherent(vfrom);
102 vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 100 } else {
103 memcpy(dst, vfrom, len); 101 memcpy(dst, src, len);
104 kunmap_coherent(vfrom); 102 set_bit(PG_dcache_dirty, &page->flags);
103 }
105} 104}
106 105
107void copy_user_highpage(struct page *to, struct page *from, 106void copy_user_highpage(struct page *to, struct page *from,
@@ -109,14 +108,19 @@ void copy_user_highpage(struct page *to, struct page *from,
109{ 108{
110 void *vfrom, *vto; 109 void *vfrom, *vto;
111 110
112 __set_bit(PG_mapped, &to->flags);
113
114 vto = kmap_atomic(to, KM_USER1); 111 vto = kmap_atomic(to, KM_USER1);
115 vfrom = kmap_coherent(from, vaddr);
116 copy_page(vto, vfrom);
117 kunmap_coherent(vfrom);
118 112
119 if (((vaddr ^ (unsigned long)vto) & CACHE_ALIAS)) 113 if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) {
114 vfrom = kmap_coherent(from, vaddr);
115 copy_page(vto, vfrom);
116 kunmap_coherent(vfrom);
117 } else {
118 vfrom = kmap_atomic(from, KM_USER0);
119 copy_page(vto, vfrom);
120 kunmap_atomic(vfrom, KM_USER0);
121 }
122
123 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
120 __flush_wback_region(vto, PAGE_SIZE); 124 __flush_wback_region(vto, PAGE_SIZE);
121 125
122 kunmap_atomic(vto, KM_USER1); 126 kunmap_atomic(vto, KM_USER1);
@@ -124,23 +128,3 @@ void copy_user_highpage(struct page *to, struct page *from,
124 smp_wmb(); 128 smp_wmb();
125} 129}
126EXPORT_SYMBOL(copy_user_highpage); 130EXPORT_SYMBOL(copy_user_highpage);
127
128/*
129 * For SH-4, we have our own implementation for ptep_get_and_clear
130 */
131pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
132{
133 pte_t pte = *ptep;
134
135 pte_clear(mm, addr, ptep);
136 if (!pte_not_present(pte)) {
137 unsigned long pfn = pte_pfn(pte);
138 if (pfn_valid(pfn)) {
139 struct page *page = pfn_to_page(pfn);
140 struct address_space *mapping = page_mapping(page);
141 if (!mapping || !mapping_writably_mapped(mapping))
142 __clear_bit(PG_mapped, &page->flags);
143 }
144 }
145 return pte;
146}
diff --git a/arch/sh/mm/pg-sh7705.c b/arch/sh/mm/pg-sh7705.c
index eaf25147194c..684891b5c8c0 100644
--- a/arch/sh/mm/pg-sh7705.c
+++ b/arch/sh/mm/pg-sh7705.c
@@ -26,7 +26,7 @@
26#include <asm/mmu_context.h> 26#include <asm/mmu_context.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28 28
29static inline void __flush_purge_virtual_region(void *p1, void *virt, int size) 29static void __flush_purge_virtual_region(void *p1, void *virt, int size)
30{ 30{
31 unsigned long v; 31 unsigned long v;
32 unsigned long begin, end; 32 unsigned long begin, end;
@@ -75,19 +75,13 @@ static inline void __flush_purge_virtual_region(void *p1, void *virt, int size)
75 */ 75 */
76void clear_user_page(void *to, unsigned long address, struct page *pg) 76void clear_user_page(void *to, unsigned long address, struct page *pg)
77{ 77{
78 struct page *page = virt_to_page(to); 78 if (pages_do_alias(address, (unsigned long)to))
79
80 __set_bit(PG_mapped, &page->flags);
81 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
82 clear_page(to);
83 __flush_wback_region(to, PAGE_SIZE);
84 } else {
85 __flush_purge_virtual_region(to, 79 __flush_purge_virtual_region(to,
86 (void *)(address & 0xfffff000), 80 (void *)(address & 0xfffff000),
87 PAGE_SIZE); 81 PAGE_SIZE);
88 clear_page(to); 82
89 __flush_wback_region(to, PAGE_SIZE); 83 clear_page(to);
90 } 84 __flush_wback_region(to, PAGE_SIZE);
91} 85}
92 86
93/* 87/*
@@ -98,41 +92,11 @@ void clear_user_page(void *to, unsigned long address, struct page *pg)
98 */ 92 */
99void copy_user_page(void *to, void *from, unsigned long address, struct page *pg) 93void copy_user_page(void *to, void *from, unsigned long address, struct page *pg)
100{ 94{
101 struct page *page = virt_to_page(to); 95 if (pages_do_alias(address, (unsigned long)to))
102
103
104 __set_bit(PG_mapped, &page->flags);
105 if (((address ^ (unsigned long)to) & CACHE_ALIAS) == 0) {
106 copy_page(to, from);
107 __flush_wback_region(to, PAGE_SIZE);
108 } else {
109 __flush_purge_virtual_region(to, 96 __flush_purge_virtual_region(to,
110 (void *)(address & 0xfffff000), 97 (void *)(address & 0xfffff000),
111 PAGE_SIZE); 98 PAGE_SIZE);
112 copy_page(to, from);
113 __flush_wback_region(to, PAGE_SIZE);
114 }
115}
116 99
117/* 100 copy_page(to, from);
118 * For SH7705, we have our own implementation for ptep_get_and_clear 101 __flush_wback_region(to, PAGE_SIZE);
119 * Copied from pg-sh4.c
120 */
121pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
122{
123 pte_t pte = *ptep;
124
125 pte_clear(mm, addr, ptep);
126 if (!pte_not_present(pte)) {
127 unsigned long pfn = pte_pfn(pte);
128 if (pfn_valid(pfn)) {
129 struct page *page = pfn_to_page(pfn);
130 struct address_space *mapping = page_mapping(page);
131 if (!mapping || !mapping_writably_mapped(mapping))
132 __clear_bit(PG_mapped, &page->flags);
133 }
134 }
135
136 return pte;
137} 102}
138
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 2aab3ea934d7..c39b77363352 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -27,23 +27,6 @@ void update_mmu_cache(struct vm_area_struct * vma,
27 if (vma && current->active_mm != vma->vm_mm) 27 if (vma && current->active_mm != vma->vm_mm)
28 return; 28 return;
29 29
30#ifndef CONFIG_CACHE_OFF
31 {
32 unsigned long pfn = pte_pfn(pte);
33
34 if (pfn_valid(pfn)) {
35 struct page *page = pfn_to_page(pfn);
36
37 if (!test_bit(PG_mapped, &page->flags)) {
38 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK;
39 __flush_wback_region((void *)P1SEGADDR(phys),
40 PAGE_SIZE);
41 __set_bit(PG_mapped, &page->flags);
42 }
43 }
44 }
45#endif
46
47 local_irq_save(flags); 30 local_irq_save(flags);
48 31
49 /* Set PTEH register */ 32 /* Set PTEH register */
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 17cb7c3adf22..9b8459c74abd 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -33,25 +33,25 @@ void update_mmu_cache(struct vm_area_struct * vma,
33 unsigned long flags; 33 unsigned long flags;
34 unsigned long pteval; 34 unsigned long pteval;
35 unsigned long vpn; 35 unsigned long vpn;
36 unsigned long pfn = pte_pfn(pte);
37 struct page *page;
36 38
37 /* Ptrace may call this routine. */ 39 /* Ptrace may call this routine. */
38 if (vma && current->active_mm != vma->vm_mm) 40 if (vma && current->active_mm != vma->vm_mm)
39 return; 41 return;
40 42
43 page = pfn_to_page(pfn);
44 if (pfn_valid(pfn) && page_mapping(page)) {
41#if defined(CONFIG_SH7705_CACHE_32KB) 45#if defined(CONFIG_SH7705_CACHE_32KB)
42 { 46 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
43 struct page *page = pte_page(pte); 47 if (dirty) {
44 unsigned long pfn = pte_pfn(pte); 48 unsigned long addr = (unsigned long)page_address(page);
45 49
46 if (pfn_valid(pfn) && !test_bit(PG_mapped, &page->flags)) { 50 if (pages_do_alias(addr, address & PAGE_MASK))
47 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; 51 __flush_wback_region((void *)addr, PAGE_SIZE);
48
49 __flush_wback_region((void *)P1SEGADDR(phys),
50 PAGE_SIZE);
51 __set_bit(PG_mapped, &page->flags);
52 } 52 }
53 }
54#endif 53#endif
54 }
55 55
56 local_irq_save(flags); 56 local_irq_save(flags);
57 57
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index f0c7b7397fa6..cf50082d2435 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -21,27 +21,26 @@ void update_mmu_cache(struct vm_area_struct * vma,
21 unsigned long flags; 21 unsigned long flags;
22 unsigned long pteval; 22 unsigned long pteval;
23 unsigned long vpn; 23 unsigned long vpn;
24 unsigned long pfn = pte_pfn(pte);
25 struct page *page;
24 26
25 /* Ptrace may call this routine. */ 27 /* Ptrace may call this routine. */
26 if (vma && current->active_mm != vma->vm_mm) 28 if (vma && current->active_mm != vma->vm_mm)
27 return; 29 return;
28 30
29#ifndef CONFIG_CACHE_OFF 31 page = pfn_to_page(pfn);
30 { 32 if (pfn_valid(pfn) && page_mapping(page)) {
31 unsigned long pfn = pte_pfn(pte); 33#ifndef CONFIG_SMP
34 int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags);
35 if (dirty) {
32 36
33 if (pfn_valid(pfn)) { 37 unsigned long addr = (unsigned long)page_address(page);
34 struct page *page = pfn_to_page(pfn);
35 38
36 if (!test_bit(PG_mapped, &page->flags)) { 39 if (pages_do_alias(addr, address & PAGE_MASK))
37 unsigned long phys = pte_val(pte) & PTE_PHYS_MASK; 40 __flush_wback_region((void *)addr, PAGE_SIZE);
38 __flush_wback_region((void *)P1SEGADDR(phys),
39 PAGE_SIZE);
40 __set_bit(PG_mapped, &page->flags);
41 }
42 } 41 }
43 }
44#endif 42#endif
43 }
45 44
46 local_irq_save(flags); 45 local_irq_save(flags);
47 46