aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
committerPaul Mundt <lethal@linux-sh.org>2009-09-03 04:21:10 -0400
commit0906a3ad33a254094fb74828e3ddb9af8771a6da (patch)
tree33acc1be2e213ae2f13439d3d5f8e9dd8a4f2d46 /arch/sh/mm
parentd1af119a69fc9a625bd57a66d9c9fa88795b082c (diff)
sh: Fix up and optimize the kmap_coherent() interface.
This fixes up the kmap_coherent/kunmap_coherent() interface for recent changes both in the page fault path and the shared cache flushers, as well as adding in some optimizations. One of the key things to note here is that the TLB flush itself is deferred until the unmap, and the call in to update_mmu_cache() itself goes away, relying on the regular page fault path to handle the lazy dcache writeback if necessary. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh/mm')
-rw-r--r--arch/sh/mm/cache.c8
-rw-r--r--arch/sh/mm/fault_32.c4
-rw-r--r--arch/sh/mm/init.c45
-rw-r--r--arch/sh/mm/kmap.c36
-rw-r--r--arch/sh/mm/nommu.c2
5 files changed, 49 insertions, 46 deletions
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index db2b1c5beffd..8e4a8d1ac4a9 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -51,7 +51,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
51 !test_bit(PG_dcache_dirty, &page->flags)) { 51 !test_bit(PG_dcache_dirty, &page->flags)) {
52 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 52 void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
53 memcpy(vto, src, len); 53 memcpy(vto, src, len);
54 kunmap_coherent(); 54 kunmap_coherent(vto);
55 } else { 55 } else {
56 memcpy(dst, src, len); 56 memcpy(dst, src, len);
57 if (boot_cpu_data.dcache.n_aliases) 57 if (boot_cpu_data.dcache.n_aliases)
@@ -70,7 +70,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
70 !test_bit(PG_dcache_dirty, &page->flags)) { 70 !test_bit(PG_dcache_dirty, &page->flags)) {
71 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); 71 void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
72 memcpy(dst, vfrom, len); 72 memcpy(dst, vfrom, len);
73 kunmap_coherent(); 73 kunmap_coherent(vfrom);
74 } else { 74 } else {
75 memcpy(dst, src, len); 75 memcpy(dst, src, len);
76 if (boot_cpu_data.dcache.n_aliases) 76 if (boot_cpu_data.dcache.n_aliases)
@@ -89,7 +89,7 @@ void copy_user_highpage(struct page *to, struct page *from,
89 !test_bit(PG_dcache_dirty, &from->flags)) { 89 !test_bit(PG_dcache_dirty, &from->flags)) {
90 vfrom = kmap_coherent(from, vaddr); 90 vfrom = kmap_coherent(from, vaddr);
91 copy_page(vto, vfrom); 91 copy_page(vto, vfrom);
92 kunmap_coherent(); 92 kunmap_coherent(vfrom);
93 } else { 93 } else {
94 vfrom = kmap_atomic(from, KM_USER0); 94 vfrom = kmap_atomic(from, KM_USER0);
95 copy_page(vto, vfrom); 95 copy_page(vto, vfrom);
@@ -150,7 +150,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
150 150
151 kaddr = kmap_coherent(page, vmaddr); 151 kaddr = kmap_coherent(page, vmaddr);
152 __flush_wback_region((void *)kaddr, PAGE_SIZE); 152 __flush_wback_region((void *)kaddr, PAGE_SIZE);
153 kunmap_coherent(); 153 kunmap_coherent(kaddr);
154 } else 154 } else
155 __flush_wback_region((void *)addr, PAGE_SIZE); 155 __flush_wback_region((void *)addr, PAGE_SIZE);
156 } 156 }
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c
index f1c93c880ed4..781b413ff82d 100644
--- a/arch/sh/mm/fault_32.c
+++ b/arch/sh/mm/fault_32.c
@@ -82,8 +82,8 @@ static noinline int vmalloc_fault(unsigned long address)
82 pmd_t *pmd_k; 82 pmd_t *pmd_k;
83 pte_t *pte_k; 83 pte_t *pte_k;
84 84
85 /* Make sure we are in vmalloc area: */ 85 /* Make sure we are in vmalloc/module/P3 area: */
86 if (!(address >= VMALLOC_START && address < VMALLOC_END)) 86 if (!(address >= VMALLOC_START && address < P3_ADDR_MAX))
87 return -1; 87 return -1;
88 88
89 /* 89 /*
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 0a9b4d855bc9..edc842ff61ed 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -106,27 +106,31 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
106 pgd_t *pgd; 106 pgd_t *pgd;
107 pud_t *pud; 107 pud_t *pud;
108 pmd_t *pmd; 108 pmd_t *pmd;
109 int pgd_idx; 109 pte_t *pte;
110 int i, j, k;
110 unsigned long vaddr; 111 unsigned long vaddr;
111 112
112 vaddr = start & PMD_MASK; 113 vaddr = start;
113 end = (end + PMD_SIZE - 1) & PMD_MASK; 114 i = __pgd_offset(vaddr);
114 pgd_idx = pgd_index(vaddr); 115 j = __pud_offset(vaddr);
115 pgd = pgd_base + pgd_idx; 116 k = __pmd_offset(vaddr);
116 117 pgd = pgd_base + i;
117 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { 118
118 BUG_ON(pgd_none(*pgd)); 119 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
119 pud = pud_offset(pgd, 0); 120 pud = (pud_t *)pgd;
120 BUG_ON(pud_none(*pud)); 121 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
121 pmd = pmd_offset(pud, 0); 122 pmd = (pmd_t *)pud;
122 123 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
123 if (!pmd_present(*pmd)) { 124 if (pmd_none(*pmd)) {
124 pte_t *pte_table; 125 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
125 pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); 126 pmd_populate_kernel(&init_mm, pmd, pte);
126 pmd_populate_kernel(&init_mm, pmd, pte_table); 127 BUG_ON(pte != pte_offset_kernel(pmd, 0));
128 }
129 vaddr += PMD_SIZE;
130 }
131 k = 0;
127 } 132 }
128 133 j = 0;
129 vaddr += PMD_SIZE;
130 } 134 }
131} 135}
132#endif /* CONFIG_MMU */ 136#endif /* CONFIG_MMU */
@@ -137,7 +141,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end,
137void __init paging_init(void) 141void __init paging_init(void)
138{ 142{
139 unsigned long max_zone_pfns[MAX_NR_ZONES]; 143 unsigned long max_zone_pfns[MAX_NR_ZONES];
140 unsigned long vaddr; 144 unsigned long vaddr, end;
141 int nid; 145 int nid;
142 146
143 /* We don't need to map the kernel through the TLB, as 147 /* We don't need to map the kernel through the TLB, as
@@ -155,7 +159,8 @@ void __init paging_init(void)
155 * pte's will be filled in by __set_fixmap(). 159 * pte's will be filled in by __set_fixmap().
156 */ 160 */
157 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 161 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
158 page_table_range_init(vaddr, 0, swapper_pg_dir); 162 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
163 page_table_range_init(vaddr, end, swapper_pg_dir);
159 164
160 kmap_coherent_init(); 165 kmap_coherent_init();
161 166
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c
index 3eecf0d42f1a..c52cd8c40a64 100644
--- a/arch/sh/mm/kmap.c
+++ b/arch/sh/mm/kmap.c
@@ -24,9 +24,6 @@ void __init kmap_coherent_init(void)
24{ 24{
25 unsigned long vaddr; 25 unsigned long vaddr;
26 26
27 if (!boot_cpu_data.dcache.n_aliases)
28 return;
29
30 /* cache the first coherent kmap pte */ 27 /* cache the first coherent kmap pte */
31 vaddr = __fix_to_virt(FIX_CMAP_BEGIN); 28 vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
32 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); 29 kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
@@ -35,30 +32,31 @@ void __init kmap_coherent_init(void)
35void *kmap_coherent(struct page *page, unsigned long addr) 32void *kmap_coherent(struct page *page, unsigned long addr)
36{ 33{
37 enum fixed_addresses idx; 34 enum fixed_addresses idx;
38 unsigned long vaddr, flags; 35 unsigned long vaddr;
39 pte_t pte;
40 36
41 BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); 37 BUG_ON(test_bit(PG_dcache_dirty, &page->flags));
42 38
43 inc_preempt_count(); 39 pagefault_disable();
44
45 idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT;
46 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
47 pte = mk_pte(page, PAGE_KERNEL);
48 40
49 local_irq_save(flags); 41 idx = FIX_CMAP_END -
50 flush_tlb_one(get_asid(), vaddr); 42 ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT);
51 local_irq_restore(flags); 43 vaddr = __fix_to_virt(idx);
52 44
53 update_mmu_cache(NULL, vaddr, pte); 45 BUG_ON(!pte_none(*(kmap_coherent_pte - idx)));
54 46 set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL));
55 set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
56 47
57 return (void *)vaddr; 48 return (void *)vaddr;
58} 49}
59 50
60void kunmap_coherent(void) 51void kunmap_coherent(void *kvaddr)
61{ 52{
62 dec_preempt_count(); 53 if (kvaddr >= (void *)FIXADDR_START) {
63 preempt_check_resched(); 54 unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK;
55 enum fixed_addresses idx = __virt_to_fix(vaddr);
56
57 pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx);
58 local_flush_tlb_one(get_asid(), vaddr);
59 }
60
61 pagefault_enable();
64} 62}
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c
index 51b54037216f..ac16c05917ef 100644
--- a/arch/sh/mm/nommu.c
+++ b/arch/sh/mm/nommu.c
@@ -81,7 +81,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
81 return NULL; 81 return NULL;
82} 82}
83 83
84void kunmap_coherent(void) 84void kunmap_coherent(void *kvaddr)
85{ 85{
86 BUG(); 86 BUG();
87} 87}