diff options
Diffstat (limited to 'arch/sh')
-rw-r--r-- | arch/sh/include/asm/cacheflush.h | 2 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_32.h | 8 | ||||
-rw-r--r-- | arch/sh/include/asm/pgtable_64.h | 5 | ||||
-rw-r--r-- | arch/sh/mm/cache.c | 8 | ||||
-rw-r--r-- | arch/sh/mm/fault_32.c | 4 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 45 | ||||
-rw-r--r-- | arch/sh/mm/kmap.c | 36 | ||||
-rw-r--r-- | arch/sh/mm/nommu.c | 2 |
8 files changed, 62 insertions, 48 deletions
diff --git a/arch/sh/include/asm/cacheflush.h b/arch/sh/include/asm/cacheflush.h index 11e416630585..c29918f3c819 100644 --- a/arch/sh/include/asm/cacheflush.h +++ b/arch/sh/include/asm/cacheflush.h | |||
@@ -85,7 +85,7 @@ extern void copy_from_user_page(struct vm_area_struct *vma, | |||
85 | 85 | ||
86 | void kmap_coherent_init(void); | 86 | void kmap_coherent_init(void); |
87 | void *kmap_coherent(struct page *page, unsigned long addr); | 87 | void *kmap_coherent(struct page *page, unsigned long addr); |
88 | void kunmap_coherent(void); | 88 | void kunmap_coherent(void *kvaddr); |
89 | 89 | ||
90 | #define PG_dcache_dirty PG_arch_1 | 90 | #define PG_dcache_dirty PG_arch_1 |
91 | 91 | ||
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 4c4429cda56d..c0d359ce337b 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -408,13 +408,19 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) | |||
408 | 408 | ||
409 | /* to find an entry in a page-table-directory. */ | 409 | /* to find an entry in a page-table-directory. */ |
410 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) | 410 | #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) |
411 | #define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address)) | 411 | #define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) |
412 | #define __pgd_offset(address) pgd_index(address) | ||
412 | 413 | ||
413 | /* to find an entry in a kernel page-table-directory */ | 414 | /* to find an entry in a kernel page-table-directory */ |
414 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 415 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
415 | 416 | ||
417 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | ||
418 | #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
419 | |||
416 | /* Find an entry in the third-level page table.. */ | 420 | /* Find an entry in the third-level page table.. */ |
417 | #define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 421 | #define pte_index(address) ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
422 | #define __pte_offset(address) pte_index(address) | ||
423 | |||
418 | #define pte_offset_kernel(dir, address) \ | 424 | #define pte_offset_kernel(dir, address) \ |
419 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) | 425 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(address)) |
420 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) | 426 | #define pte_offset_map(dir, address) pte_offset_kernel(dir, address) |
diff --git a/arch/sh/include/asm/pgtable_64.h b/arch/sh/include/asm/pgtable_64.h index c78990cda557..17cdbecc3adc 100644 --- a/arch/sh/include/asm/pgtable_64.h +++ b/arch/sh/include/asm/pgtable_64.h | |||
@@ -60,6 +60,9 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | |||
60 | /* To find an entry in a kernel PGD. */ | 60 | /* To find an entry in a kernel PGD. */ |
61 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) | 61 | #define pgd_offset_k(address) pgd_offset(&init_mm, address) |
62 | 62 | ||
63 | #define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) | ||
64 | #define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1)) | ||
65 | |||
63 | /* | 66 | /* |
64 | * PMD level access routines. Same notes as above. | 67 | * PMD level access routines. Same notes as above. |
65 | */ | 68 | */ |
@@ -80,6 +83,8 @@ static __inline__ void pmd_set(pmd_t *pmdp,pte_t *ptep) | |||
80 | #define pte_index(address) \ | 83 | #define pte_index(address) \ |
81 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) | 84 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)) |
82 | 85 | ||
86 | #define __pte_offset(address) pte_index(address) | ||
87 | |||
83 | #define pte_offset_kernel(dir, addr) \ | 88 | #define pte_offset_kernel(dir, addr) \ |
84 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) | 89 | ((pte_t *) ((pmd_val(*(dir))) & PAGE_MASK) + pte_index((addr))) |
85 | 90 | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index db2b1c5beffd..8e4a8d1ac4a9 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -51,7 +51,7 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, | |||
51 | !test_bit(PG_dcache_dirty, &page->flags)) { | 51 | !test_bit(PG_dcache_dirty, &page->flags)) { |
52 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 52 | void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
53 | memcpy(vto, src, len); | 53 | memcpy(vto, src, len); |
54 | kunmap_coherent(); | 54 | kunmap_coherent(vto); |
55 | } else { | 55 | } else { |
56 | memcpy(dst, src, len); | 56 | memcpy(dst, src, len); |
57 | if (boot_cpu_data.dcache.n_aliases) | 57 | if (boot_cpu_data.dcache.n_aliases) |
@@ -70,7 +70,7 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, | |||
70 | !test_bit(PG_dcache_dirty, &page->flags)) { | 70 | !test_bit(PG_dcache_dirty, &page->flags)) { |
71 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); | 71 | void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); |
72 | memcpy(dst, vfrom, len); | 72 | memcpy(dst, vfrom, len); |
73 | kunmap_coherent(); | 73 | kunmap_coherent(vfrom); |
74 | } else { | 74 | } else { |
75 | memcpy(dst, src, len); | 75 | memcpy(dst, src, len); |
76 | if (boot_cpu_data.dcache.n_aliases) | 76 | if (boot_cpu_data.dcache.n_aliases) |
@@ -89,7 +89,7 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
89 | !test_bit(PG_dcache_dirty, &from->flags)) { | 89 | !test_bit(PG_dcache_dirty, &from->flags)) { |
90 | vfrom = kmap_coherent(from, vaddr); | 90 | vfrom = kmap_coherent(from, vaddr); |
91 | copy_page(vto, vfrom); | 91 | copy_page(vto, vfrom); |
92 | kunmap_coherent(); | 92 | kunmap_coherent(vfrom); |
93 | } else { | 93 | } else { |
94 | vfrom = kmap_atomic(from, KM_USER0); | 94 | vfrom = kmap_atomic(from, KM_USER0); |
95 | copy_page(vto, vfrom); | 95 | copy_page(vto, vfrom); |
@@ -150,7 +150,7 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr) | |||
150 | 150 | ||
151 | kaddr = kmap_coherent(page, vmaddr); | 151 | kaddr = kmap_coherent(page, vmaddr); |
152 | __flush_wback_region((void *)kaddr, PAGE_SIZE); | 152 | __flush_wback_region((void *)kaddr, PAGE_SIZE); |
153 | kunmap_coherent(); | 153 | kunmap_coherent(kaddr); |
154 | } else | 154 | } else |
155 | __flush_wback_region((void *)addr, PAGE_SIZE); | 155 | __flush_wback_region((void *)addr, PAGE_SIZE); |
156 | } | 156 | } |
diff --git a/arch/sh/mm/fault_32.c b/arch/sh/mm/fault_32.c index f1c93c880ed4..781b413ff82d 100644 --- a/arch/sh/mm/fault_32.c +++ b/arch/sh/mm/fault_32.c | |||
@@ -82,8 +82,8 @@ static noinline int vmalloc_fault(unsigned long address) | |||
82 | pmd_t *pmd_k; | 82 | pmd_t *pmd_k; |
83 | pte_t *pte_k; | 83 | pte_t *pte_k; |
84 | 84 | ||
85 | /* Make sure we are in vmalloc area: */ | 85 | /* Make sure we are in vmalloc/module/P3 area: */ |
86 | if (!(address >= VMALLOC_START && address < VMALLOC_END)) | 86 | if (!(address >= VMALLOC_START && address < P3_ADDR_MAX)) |
87 | return -1; | 87 | return -1; |
88 | 88 | ||
89 | /* | 89 | /* |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index 0a9b4d855bc9..edc842ff61ed 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -106,27 +106,31 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
106 | pgd_t *pgd; | 106 | pgd_t *pgd; |
107 | pud_t *pud; | 107 | pud_t *pud; |
108 | pmd_t *pmd; | 108 | pmd_t *pmd; |
109 | int pgd_idx; | 109 | pte_t *pte; |
110 | int i, j, k; | ||
110 | unsigned long vaddr; | 111 | unsigned long vaddr; |
111 | 112 | ||
112 | vaddr = start & PMD_MASK; | 113 | vaddr = start; |
113 | end = (end + PMD_SIZE - 1) & PMD_MASK; | 114 | i = __pgd_offset(vaddr); |
114 | pgd_idx = pgd_index(vaddr); | 115 | j = __pud_offset(vaddr); |
115 | pgd = pgd_base + pgd_idx; | 116 | k = __pmd_offset(vaddr); |
116 | 117 | pgd = pgd_base + i; | |
117 | for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { | 118 | |
118 | BUG_ON(pgd_none(*pgd)); | 119 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { |
119 | pud = pud_offset(pgd, 0); | 120 | pud = (pud_t *)pgd; |
120 | BUG_ON(pud_none(*pud)); | 121 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { |
121 | pmd = pmd_offset(pud, 0); | 122 | pmd = (pmd_t *)pud; |
122 | 123 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { | |
123 | if (!pmd_present(*pmd)) { | 124 | if (pmd_none(*pmd)) { |
124 | pte_t *pte_table; | 125 | pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); |
125 | pte_table = (pte_t *)alloc_bootmem_low_pages(PAGE_SIZE); | 126 | pmd_populate_kernel(&init_mm, pmd, pte); |
126 | pmd_populate_kernel(&init_mm, pmd, pte_table); | 127 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); |
128 | } | ||
129 | vaddr += PMD_SIZE; | ||
130 | } | ||
131 | k = 0; | ||
127 | } | 132 | } |
128 | 133 | j = 0; | |
129 | vaddr += PMD_SIZE; | ||
130 | } | 134 | } |
131 | } | 135 | } |
132 | #endif /* CONFIG_MMU */ | 136 | #endif /* CONFIG_MMU */ |
@@ -137,7 +141,7 @@ void __init page_table_range_init(unsigned long start, unsigned long end, | |||
137 | void __init paging_init(void) | 141 | void __init paging_init(void) |
138 | { | 142 | { |
139 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | 143 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
140 | unsigned long vaddr; | 144 | unsigned long vaddr, end; |
141 | int nid; | 145 | int nid; |
142 | 146 | ||
143 | /* We don't need to map the kernel through the TLB, as | 147 | /* We don't need to map the kernel through the TLB, as |
@@ -155,7 +159,8 @@ void __init paging_init(void) | |||
155 | * pte's will be filled in by __set_fixmap(). | 159 | * pte's will be filled in by __set_fixmap(). |
156 | */ | 160 | */ |
157 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | 161 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; |
158 | page_table_range_init(vaddr, 0, swapper_pg_dir); | 162 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
163 | page_table_range_init(vaddr, end, swapper_pg_dir); | ||
159 | 164 | ||
160 | kmap_coherent_init(); | 165 | kmap_coherent_init(); |
161 | 166 | ||
diff --git a/arch/sh/mm/kmap.c b/arch/sh/mm/kmap.c index 3eecf0d42f1a..c52cd8c40a64 100644 --- a/arch/sh/mm/kmap.c +++ b/arch/sh/mm/kmap.c | |||
@@ -24,9 +24,6 @@ void __init kmap_coherent_init(void) | |||
24 | { | 24 | { |
25 | unsigned long vaddr; | 25 | unsigned long vaddr; |
26 | 26 | ||
27 | if (!boot_cpu_data.dcache.n_aliases) | ||
28 | return; | ||
29 | |||
30 | /* cache the first coherent kmap pte */ | 27 | /* cache the first coherent kmap pte */ |
31 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); | 28 | vaddr = __fix_to_virt(FIX_CMAP_BEGIN); |
32 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); | 29 | kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); |
@@ -35,30 +32,31 @@ void __init kmap_coherent_init(void) | |||
35 | void *kmap_coherent(struct page *page, unsigned long addr) | 32 | void *kmap_coherent(struct page *page, unsigned long addr) |
36 | { | 33 | { |
37 | enum fixed_addresses idx; | 34 | enum fixed_addresses idx; |
38 | unsigned long vaddr, flags; | 35 | unsigned long vaddr; |
39 | pte_t pte; | ||
40 | 36 | ||
41 | BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); | 37 | BUG_ON(test_bit(PG_dcache_dirty, &page->flags)); |
42 | 38 | ||
43 | inc_preempt_count(); | 39 | pagefault_disable(); |
44 | |||
45 | idx = (addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT; | ||
46 | vaddr = __fix_to_virt(FIX_CMAP_END - idx); | ||
47 | pte = mk_pte(page, PAGE_KERNEL); | ||
48 | 40 | ||
49 | local_irq_save(flags); | 41 | idx = FIX_CMAP_END - |
50 | flush_tlb_one(get_asid(), vaddr); | 42 | ((addr & current_cpu_data.dcache.alias_mask) >> PAGE_SHIFT); |
51 | local_irq_restore(flags); | 43 | vaddr = __fix_to_virt(idx); |
52 | 44 | ||
53 | update_mmu_cache(NULL, vaddr, pte); | 45 | BUG_ON(!pte_none(*(kmap_coherent_pte - idx))); |
54 | 46 | set_pte(kmap_coherent_pte - idx, mk_pte(page, PAGE_KERNEL)); | |
55 | set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte); | ||
56 | 47 | ||
57 | return (void *)vaddr; | 48 | return (void *)vaddr; |
58 | } | 49 | } |
59 | 50 | ||
60 | void kunmap_coherent(void) | 51 | void kunmap_coherent(void *kvaddr) |
61 | { | 52 | { |
62 | dec_preempt_count(); | 53 | if (kvaddr >= (void *)FIXADDR_START) { |
63 | preempt_check_resched(); | 54 | unsigned long vaddr = (unsigned long)kvaddr & PAGE_MASK; |
55 | enum fixed_addresses idx = __virt_to_fix(vaddr); | ||
56 | |||
57 | pte_clear(&init_mm, vaddr, kmap_coherent_pte - idx); | ||
58 | local_flush_tlb_one(get_asid(), vaddr); | ||
59 | } | ||
60 | |||
61 | pagefault_enable(); | ||
64 | } | 62 | } |
diff --git a/arch/sh/mm/nommu.c b/arch/sh/mm/nommu.c index 51b54037216f..ac16c05917ef 100644 --- a/arch/sh/mm/nommu.c +++ b/arch/sh/mm/nommu.c | |||
@@ -81,7 +81,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) | |||
81 | return NULL; | 81 | return NULL; |
82 | } | 82 | } |
83 | 83 | ||
84 | void kunmap_coherent(void) | 84 | void kunmap_coherent(void *kvaddr) |
85 | { | 85 | { |
86 | BUG(); | 86 | BUG(); |
87 | } | 87 | } |