aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/mm/dma-default.c114
-rw-r--r--arch/mips/mm/fault.c8
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mips/mm/mmap.c197
-rw-r--r--arch/mips/mm/pgtable-32.c2
-rw-r--r--arch/mips/mm/pgtable-64.c2
-rw-r--r--arch/mips/mm/tlbex.c294
8 files changed, 449 insertions, 180 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index eeb642e4066..b9aabb998a3 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -604,6 +604,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
604 r4k_blast_scache(); 604 r4k_blast_scache();
605 else 605 else
606 blast_scache_range(addr, addr + size); 606 blast_scache_range(addr, addr + size);
607 __sync();
607 return; 608 return;
608 } 609 }
609 610
@@ -620,6 +621,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
620 } 621 }
621 622
622 bc_wback_inv(addr, size); 623 bc_wback_inv(addr, size);
624 __sync();
623} 625}
624 626
625static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) 627static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
@@ -647,6 +649,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
647 (addr + size - 1) & almask); 649 (addr + size - 1) & almask);
648 blast_inv_scache_range(addr, addr + size); 650 blast_inv_scache_range(addr, addr + size);
649 } 651 }
652 __sync();
650 return; 653 return;
651 } 654 }
652 655
@@ -663,6 +666,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
663 } 666 }
664 667
665 bc_inv(addr, size); 668 bc_inv(addr, size);
669 __sync();
666} 670}
667#endif /* CONFIG_DMA_NONCOHERENT */ 671#endif /* CONFIG_DMA_NONCOHERENT */
668 672
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 21ea14efb83..46084912e58 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -15,18 +15,18 @@
15#include <linux/scatterlist.h> 15#include <linux/scatterlist.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/gfp.h> 17#include <linux/gfp.h>
18#include <linux/highmem.h>
18 19
19#include <asm/cache.h> 20#include <asm/cache.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22#include <dma-coherence.h> 23#include <dma-coherence.h>
23 24
24static inline unsigned long dma_addr_to_virt(struct device *dev, 25static inline struct page *dma_addr_to_page(struct device *dev,
25 dma_addr_t dma_addr) 26 dma_addr_t dma_addr)
26{ 27{
27 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr); 28 return pfn_to_page(
28 29 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
29 return (unsigned long)phys_to_virt(addr);
30} 30}
31 31
32/* 32/*
@@ -148,20 +148,20 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
148 free_pages(addr, get_order(size)); 148 free_pages(addr, get_order(size));
149} 149}
150 150
151static inline void __dma_sync(unsigned long addr, size_t size, 151static inline void __dma_sync_virtual(void *addr, size_t size,
152 enum dma_data_direction direction) 152 enum dma_data_direction direction)
153{ 153{
154 switch (direction) { 154 switch (direction) {
155 case DMA_TO_DEVICE: 155 case DMA_TO_DEVICE:
156 dma_cache_wback(addr, size); 156 dma_cache_wback((unsigned long)addr, size);
157 break; 157 break;
158 158
159 case DMA_FROM_DEVICE: 159 case DMA_FROM_DEVICE:
160 dma_cache_inv(addr, size); 160 dma_cache_inv((unsigned long)addr, size);
161 break; 161 break;
162 162
163 case DMA_BIDIRECTIONAL: 163 case DMA_BIDIRECTIONAL:
164 dma_cache_wback_inv(addr, size); 164 dma_cache_wback_inv((unsigned long)addr, size);
165 break; 165 break;
166 166
167 default: 167 default:
@@ -169,12 +169,49 @@ static inline void __dma_sync(unsigned long addr, size_t size,
169 } 169 }
170} 170}
171 171
172/*
173 * A single sg entry may refer to multiple physically contiguous
174 * pages. But we still need to process highmem pages individually.
175 * If highmem is not configured then the bulk of this loop gets
176 * optimized out.
177 */
178static inline void __dma_sync(struct page *page,
179 unsigned long offset, size_t size, enum dma_data_direction direction)
180{
181 size_t left = size;
182
183 do {
184 size_t len = left;
185
186 if (PageHighMem(page)) {
187 void *addr;
188
189 if (offset + len > PAGE_SIZE) {
190 if (offset >= PAGE_SIZE) {
191 page += offset >> PAGE_SHIFT;
192 offset &= ~PAGE_MASK;
193 }
194 len = PAGE_SIZE - offset;
195 }
196
197 addr = kmap_atomic(page);
198 __dma_sync_virtual(addr + offset, len, direction);
199 kunmap_atomic(addr);
200 } else
201 __dma_sync_virtual(page_address(page) + offset,
202 size, direction);
203 offset = 0;
204 page++;
205 left -= len;
206 } while (left);
207}
208
172static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr, 209static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
173 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs) 210 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
174{ 211{
175 if (cpu_is_noncoherent_r10000(dev)) 212 if (cpu_is_noncoherent_r10000(dev))
176 __dma_sync(dma_addr_to_virt(dev, dma_addr), size, 213 __dma_sync(dma_addr_to_page(dev, dma_addr),
177 direction); 214 dma_addr & ~PAGE_MASK, size, direction);
178 215
179 plat_unmap_dma_mem(dev, dma_addr, size, direction); 216 plat_unmap_dma_mem(dev, dma_addr, size, direction);
180} 217}
@@ -185,13 +222,11 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
185 int i; 222 int i;
186 223
187 for (i = 0; i < nents; i++, sg++) { 224 for (i = 0; i < nents; i++, sg++) {
188 unsigned long addr; 225 if (!plat_device_is_coherent(dev))
189 226 __dma_sync(sg_page(sg), sg->offset, sg->length,
190 addr = (unsigned long) sg_virt(sg); 227 direction);
191 if (!plat_device_is_coherent(dev) && addr) 228 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
192 __dma_sync(addr, sg->length, direction); 229 sg->offset;
193 sg->dma_address = plat_map_dma_mem(dev,
194 (void *)addr, sg->length);
195 } 230 }
196 231
197 return nents; 232 return nents;
@@ -201,30 +236,23 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size, enum dma_data_direction direction, 236 unsigned long offset, size_t size, enum dma_data_direction direction,
202 struct dma_attrs *attrs) 237 struct dma_attrs *attrs)
203{ 238{
204 unsigned long addr;
205
206 addr = (unsigned long) page_address(page) + offset;
207
208 if (!plat_device_is_coherent(dev)) 239 if (!plat_device_is_coherent(dev))
209 __dma_sync(addr, size, direction); 240 __dma_sync(page, offset, size, direction);
210 241
211 return plat_map_dma_mem(dev, (void *)addr, size); 242 return plat_map_dma_mem_page(dev, page) + offset;
212} 243}
213 244
214static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg, 245static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
215 int nhwentries, enum dma_data_direction direction, 246 int nhwentries, enum dma_data_direction direction,
216 struct dma_attrs *attrs) 247 struct dma_attrs *attrs)
217{ 248{
218 unsigned long addr;
219 int i; 249 int i;
220 250
221 for (i = 0; i < nhwentries; i++, sg++) { 251 for (i = 0; i < nhwentries; i++, sg++) {
222 if (!plat_device_is_coherent(dev) && 252 if (!plat_device_is_coherent(dev) &&
223 direction != DMA_TO_DEVICE) { 253 direction != DMA_TO_DEVICE)
224 addr = (unsigned long) sg_virt(sg); 254 __dma_sync(sg_page(sg), sg->offset, sg->length,
225 if (addr) 255 direction);
226 __dma_sync(addr, sg->length, direction);
227 }
228 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction); 256 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
229 } 257 }
230} 258}
@@ -232,24 +260,18 @@ static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
232static void mips_dma_sync_single_for_cpu(struct device *dev, 260static void mips_dma_sync_single_for_cpu(struct device *dev,
233 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) 261 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
234{ 262{
235 if (cpu_is_noncoherent_r10000(dev)) { 263 if (cpu_is_noncoherent_r10000(dev))
236 unsigned long addr; 264 __dma_sync(dma_addr_to_page(dev, dma_handle),
237 265 dma_handle & ~PAGE_MASK, size, direction);
238 addr = dma_addr_to_virt(dev, dma_handle);
239 __dma_sync(addr, size, direction);
240 }
241} 266}
242 267
243static void mips_dma_sync_single_for_device(struct device *dev, 268static void mips_dma_sync_single_for_device(struct device *dev,
244 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction) 269 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
245{ 270{
246 plat_extra_sync_for_device(dev); 271 plat_extra_sync_for_device(dev);
247 if (!plat_device_is_coherent(dev)) { 272 if (!plat_device_is_coherent(dev))
248 unsigned long addr; 273 __dma_sync(dma_addr_to_page(dev, dma_handle),
249 274 dma_handle & ~PAGE_MASK, size, direction);
250 addr = dma_addr_to_virt(dev, dma_handle);
251 __dma_sync(addr, size, direction);
252 }
253} 275}
254 276
255static void mips_dma_sync_sg_for_cpu(struct device *dev, 277static void mips_dma_sync_sg_for_cpu(struct device *dev,
@@ -260,8 +282,8 @@ static void mips_dma_sync_sg_for_cpu(struct device *dev,
260 /* Make sure that gcc doesn't leave the empty loop body. */ 282 /* Make sure that gcc doesn't leave the empty loop body. */
261 for (i = 0; i < nelems; i++, sg++) { 283 for (i = 0; i < nelems; i++, sg++) {
262 if (cpu_is_noncoherent_r10000(dev)) 284 if (cpu_is_noncoherent_r10000(dev))
263 __dma_sync((unsigned long)page_address(sg_page(sg)), 285 __dma_sync(sg_page(sg), sg->offset, sg->length,
264 sg->length, direction); 286 direction);
265 } 287 }
266} 288}
267 289
@@ -273,8 +295,8 @@ static void mips_dma_sync_sg_for_device(struct device *dev,
273 /* Make sure that gcc doesn't leave the empty loop body. */ 295 /* Make sure that gcc doesn't leave the empty loop body. */
274 for (i = 0; i < nelems; i++, sg++) { 296 for (i = 0; i < nelems; i++, sg++) {
275 if (!plat_device_is_coherent(dev)) 297 if (!plat_device_is_coherent(dev))
276 __dma_sync((unsigned long)page_address(sg_page(sg)), 298 __dma_sync(sg_page(sg), sg->offset, sg->length,
277 sg->length, direction); 299 direction);
278 } 300 }
279} 301}
280 302
@@ -295,7 +317,7 @@ void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
295 317
296 plat_extra_sync_for_device(dev); 318 plat_extra_sync_for_device(dev);
297 if (!plat_device_is_coherent(dev)) 319 if (!plat_device_is_coherent(dev))
298 __dma_sync((unsigned long)vaddr, size, direction); 320 __dma_sync_virtual(vaddr, size, direction);
299} 321}
300 322
301EXPORT_SYMBOL(dma_cache_sync); 323EXPORT_SYMBOL(dma_cache_sync);
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 137ee76a004..937cf336816 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -145,7 +145,7 @@ good_area:
145 * the fault. 145 * the fault.
146 */ 146 */
147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0); 147 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); 148 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
149 if (unlikely(fault & VM_FAULT_ERROR)) { 149 if (unlikely(fault & VM_FAULT_ERROR)) {
150 if (fault & VM_FAULT_OOM) 150 if (fault & VM_FAULT_OOM)
151 goto out_of_memory; 151 goto out_of_memory;
@@ -154,12 +154,10 @@ good_area:
154 BUG(); 154 BUG();
155 } 155 }
156 if (fault & VM_FAULT_MAJOR) { 156 if (fault & VM_FAULT_MAJOR) {
157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 157 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address);
158 1, 0, regs, address);
159 tsk->maj_flt++; 158 tsk->maj_flt++;
160 } else { 159 } else {
161 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 160 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address);
162 1, 0, regs, address);
163 tsk->min_flt++; 161 tsk->min_flt++;
164 } 162 }
165 163
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 1aadeb42c5a..b7ebc4fa89b 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -277,11 +277,11 @@ void __init fixrange_init(unsigned long start, unsigned long end,
277 k = __pmd_offset(vaddr); 277 k = __pmd_offset(vaddr);
278 pgd = pgd_base + i; 278 pgd = pgd_base + i;
279 279
280 for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { 280 for ( ; (i < PTRS_PER_PGD) && (vaddr < end); pgd++, i++) {
281 pud = (pud_t *)pgd; 281 pud = (pud_t *)pgd;
282 for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { 282 for ( ; (j < PTRS_PER_PUD) && (vaddr < end); pud++, j++) {
283 pmd = (pmd_t *)pud; 283 pmd = (pmd_t *)pud;
284 for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { 284 for (; (k < PTRS_PER_PMD) && (vaddr < end); pmd++, k++) {
285 if (pmd_none(*pmd)) { 285 if (pmd_none(*pmd)) {
286 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE); 286 pte = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
287 set_pmd(pmd, __pmd((unsigned long)pte)); 287 set_pmd(pmd, __pmd((unsigned long)pte));
@@ -368,7 +368,7 @@ void __init mem_init(void)
368#ifdef CONFIG_DISCONTIGMEM 368#ifdef CONFIG_DISCONTIGMEM
369#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet" 369#error "CONFIG_HIGHMEM and CONFIG_DISCONTIGMEM dont work together yet"
370#endif 370#endif
371 max_mapnr = highend_pfn; 371 max_mapnr = highend_pfn ? highend_pfn : max_low_pfn;
372#else 372#else
373 max_mapnr = max_low_pfn; 373 max_mapnr = max_low_pfn;
374#endif 374#endif
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index ae3c20a9556..302d779d5b0 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -6,32 +6,77 @@
6 * Copyright (C) 2011 Wind River Systems, 6 * Copyright (C) 2011 Wind River Systems,
7 * written by Ralf Baechle <ralf@linux-mips.org> 7 * written by Ralf Baechle <ralf@linux-mips.org>
8 */ 8 */
9#include <linux/compiler.h>
9#include <linux/errno.h> 10#include <linux/errno.h>
10#include <linux/mm.h> 11#include <linux/mm.h>
11#include <linux/mman.h> 12#include <linux/mman.h>
12#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/personality.h>
13#include <linux/random.h> 15#include <linux/random.h>
14#include <linux/sched.h> 16#include <linux/sched.h>
15 17
16unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */ 18unsigned long shm_align_mask = PAGE_SIZE - 1; /* Sane caches */
17
18EXPORT_SYMBOL(shm_align_mask); 19EXPORT_SYMBOL(shm_align_mask);
19 20
20#define COLOUR_ALIGN(addr,pgoff) \ 21/* gap between mmap and stack */
22#define MIN_GAP (128*1024*1024UL)
23#define MAX_GAP ((TASK_SIZE)/6*5)
24
25static int mmap_is_legacy(void)
26{
27 if (current->personality & ADDR_COMPAT_LAYOUT)
28 return 1;
29
30 if (rlimit(RLIMIT_STACK) == RLIM_INFINITY)
31 return 1;
32
33 return sysctl_legacy_va_layout;
34}
35
36static unsigned long mmap_base(unsigned long rnd)
37{
38 unsigned long gap = rlimit(RLIMIT_STACK);
39
40 if (gap < MIN_GAP)
41 gap = MIN_GAP;
42 else if (gap > MAX_GAP)
43 gap = MAX_GAP;
44
45 return PAGE_ALIGN(TASK_SIZE - gap - rnd);
46}
47
48static inline unsigned long COLOUR_ALIGN_DOWN(unsigned long addr,
49 unsigned long pgoff)
50{
51 unsigned long base = addr & ~shm_align_mask;
52 unsigned long off = (pgoff << PAGE_SHIFT) & shm_align_mask;
53
54 if (base + off <= addr)
55 return base + off;
56
57 return base - off;
58}
59
60#define COLOUR_ALIGN(addr, pgoff) \
21 ((((addr) + shm_align_mask) & ~shm_align_mask) + \ 61 ((((addr) + shm_align_mask) & ~shm_align_mask) + \
22 (((pgoff) << PAGE_SHIFT) & shm_align_mask)) 62 (((pgoff) << PAGE_SHIFT) & shm_align_mask))
23 63
24unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, 64enum mmap_allocation_direction {UP, DOWN};
25 unsigned long len, unsigned long pgoff, unsigned long flags) 65
66static unsigned long arch_get_unmapped_area_common(struct file *filp,
67 unsigned long addr0, unsigned long len, unsigned long pgoff,
68 unsigned long flags, enum mmap_allocation_direction dir)
26{ 69{
27 struct vm_area_struct * vmm; 70 struct mm_struct *mm = current->mm;
71 struct vm_area_struct *vma;
72 unsigned long addr = addr0;
28 int do_color_align; 73 int do_color_align;
29 74
30 if (len > TASK_SIZE) 75 if (unlikely(len > TASK_SIZE))
31 return -ENOMEM; 76 return -ENOMEM;
32 77
33 if (flags & MAP_FIXED) { 78 if (flags & MAP_FIXED) {
34 /* Even MAP_FIXED mappings must reside within TASK_SIZE. */ 79 /* Even MAP_FIXED mappings must reside within TASK_SIZE */
35 if (TASK_SIZE - len < addr) 80 if (TASK_SIZE - len < addr)
36 return -EINVAL; 81 return -EINVAL;
37 82
@@ -48,34 +93,132 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
48 do_color_align = 0; 93 do_color_align = 0;
49 if (filp || (flags & MAP_SHARED)) 94 if (filp || (flags & MAP_SHARED))
50 do_color_align = 1; 95 do_color_align = 1;
96
97 /* requesting a specific address */
51 if (addr) { 98 if (addr) {
52 if (do_color_align) 99 if (do_color_align)
53 addr = COLOUR_ALIGN(addr, pgoff); 100 addr = COLOUR_ALIGN(addr, pgoff);
54 else 101 else
55 addr = PAGE_ALIGN(addr); 102 addr = PAGE_ALIGN(addr);
56 vmm = find_vma(current->mm, addr); 103
104 vma = find_vma(mm, addr);
57 if (TASK_SIZE - len >= addr && 105 if (TASK_SIZE - len >= addr &&
58 (!vmm || addr + len <= vmm->vm_start)) 106 (!vma || addr + len <= vma->vm_start))
59 return addr; 107 return addr;
60 } 108 }
61 addr = current->mm->mmap_base;
62 if (do_color_align)
63 addr = COLOUR_ALIGN(addr, pgoff);
64 else
65 addr = PAGE_ALIGN(addr);
66 109
67 for (vmm = find_vma(current->mm, addr); ; vmm = vmm->vm_next) { 110 if (dir == UP) {
68 /* At this point: (!vmm || addr < vmm->vm_end). */ 111 addr = mm->mmap_base;
69 if (TASK_SIZE - len < addr)
70 return -ENOMEM;
71 if (!vmm || addr + len <= vmm->vm_start)
72 return addr;
73 addr = vmm->vm_end;
74 if (do_color_align) 112 if (do_color_align)
75 addr = COLOUR_ALIGN(addr, pgoff); 113 addr = COLOUR_ALIGN(addr, pgoff);
114 else
115 addr = PAGE_ALIGN(addr);
116
117 for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
118 /* At this point: (!vma || addr < vma->vm_end). */
119 if (TASK_SIZE - len < addr)
120 return -ENOMEM;
121 if (!vma || addr + len <= vma->vm_start)
122 return addr;
123 addr = vma->vm_end;
124 if (do_color_align)
125 addr = COLOUR_ALIGN(addr, pgoff);
126 }
127 } else {
128 /* check if free_area_cache is useful for us */
129 if (len <= mm->cached_hole_size) {
130 mm->cached_hole_size = 0;
131 mm->free_area_cache = mm->mmap_base;
132 }
133
134 /*
135 * either no address requested, or the mapping can't fit into
136 * the requested address hole
137 */
138 addr = mm->free_area_cache;
139 if (do_color_align) {
140 unsigned long base =
141 COLOUR_ALIGN_DOWN(addr - len, pgoff);
142 addr = base + len;
143 }
144
145 /* make sure it can fit in the remaining address space */
146 if (likely(addr > len)) {
147 vma = find_vma(mm, addr - len);
148 if (!vma || addr <= vma->vm_start) {
149 /* cache the address as a hint for next time */
150 return mm->free_area_cache = addr - len;
151 }
152 }
153
154 if (unlikely(mm->mmap_base < len))
155 goto bottomup;
156
157 addr = mm->mmap_base - len;
158 if (do_color_align)
159 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
160
161 do {
162 /*
163 * Lookup failure means no vma is above this address,
164 * else if new region fits below vma->vm_start,
165 * return with success:
166 */
167 vma = find_vma(mm, addr);
168 if (likely(!vma || addr + len <= vma->vm_start)) {
169 /* cache the address as a hint for next time */
170 return mm->free_area_cache = addr;
171 }
172
173 /* remember the largest hole we saw so far */
174 if (addr + mm->cached_hole_size < vma->vm_start)
175 mm->cached_hole_size = vma->vm_start - addr;
176
177 /* try just below the current vma->vm_start */
178 addr = vma->vm_start - len;
179 if (do_color_align)
180 addr = COLOUR_ALIGN_DOWN(addr, pgoff);
181 } while (likely(len < vma->vm_start));
182
183bottomup:
184 /*
185 * A failed mmap() very likely causes application failure,
186 * so fall back to the bottom-up function here. This scenario
187 * can happen with large stack limits and large mmap()
188 * allocations.
189 */
190 mm->cached_hole_size = ~0UL;
191 mm->free_area_cache = TASK_UNMAPPED_BASE;
192 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
193 /*
194 * Restore the topdown base:
195 */
196 mm->free_area_cache = mm->mmap_base;
197 mm->cached_hole_size = ~0UL;
198
199 return addr;
76 } 200 }
77} 201}
78 202
203unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
204 unsigned long len, unsigned long pgoff, unsigned long flags)
205{
206 return arch_get_unmapped_area_common(filp,
207 addr0, len, pgoff, flags, UP);
208}
209
210/*
211 * There is no need to export this but sched.h declares the function as
212 * extern so making it static here results in an error.
213 */
214unsigned long arch_get_unmapped_area_topdown(struct file *filp,
215 unsigned long addr0, unsigned long len, unsigned long pgoff,
216 unsigned long flags)
217{
218 return arch_get_unmapped_area_common(filp,
219 addr0, len, pgoff, flags, DOWN);
220}
221
79void arch_pick_mmap_layout(struct mm_struct *mm) 222void arch_pick_mmap_layout(struct mm_struct *mm)
80{ 223{
81 unsigned long random_factor = 0UL; 224 unsigned long random_factor = 0UL;
@@ -89,9 +232,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
89 random_factor &= 0xffffffful; 232 random_factor &= 0xffffffful;
90 } 233 }
91 234
92 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; 235 if (mmap_is_legacy()) {
93 mm->get_unmapped_area = arch_get_unmapped_area; 236 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
94 mm->unmap_area = arch_unmap_area; 237 mm->get_unmapped_area = arch_get_unmapped_area;
238 mm->unmap_area = arch_unmap_area;
239 } else {
240 mm->mmap_base = mmap_base(random_factor);
241 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
242 mm->unmap_area = arch_unmap_area_topdown;
243 }
95} 244}
96 245
97static inline unsigned long brk_rnd(void) 246static inline unsigned long brk_rnd(void)
diff --git a/arch/mips/mm/pgtable-32.c b/arch/mips/mm/pgtable-32.c
index 575e4019227..adc6911ba74 100644
--- a/arch/mips/mm/pgtable-32.c
+++ b/arch/mips/mm/pgtable-32.c
@@ -52,7 +52,7 @@ void __init pagetable_init(void)
52 * Fixed mappings: 52 * Fixed mappings:
53 */ 53 */
54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 54 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
55 fixrange_init(vaddr, 0, pgd_base); 55 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
56 56
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58 /* 58 /*
diff --git a/arch/mips/mm/pgtable-64.c b/arch/mips/mm/pgtable-64.c
index 78eaa4f0b0e..cda4e300eb0 100644
--- a/arch/mips/mm/pgtable-64.c
+++ b/arch/mips/mm/pgtable-64.c
@@ -76,5 +76,5 @@ void __init pagetable_init(void)
76 * Fixed mappings: 76 * Fixed mappings:
77 */ 77 */
78 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; 78 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
79 fixrange_init(vaddr, 0, pgd_base); 79 fixrange_init(vaddr, vaddr + FIXADDR_SIZE, pgd_base);
80} 80}
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 424ed4b92e6..e06370f58ef 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -42,6 +42,18 @@
42extern void tlb_do_page_fault_0(void); 42extern void tlb_do_page_fault_0(void);
43extern void tlb_do_page_fault_1(void); 43extern void tlb_do_page_fault_1(void);
44 44
45struct work_registers {
46 int r1;
47 int r2;
48 int r3;
49};
50
51struct tlb_reg_save {
52 unsigned long a;
53 unsigned long b;
54} ____cacheline_aligned_in_smp;
55
56static struct tlb_reg_save handler_reg_save[NR_CPUS];
45 57
46static inline int r45k_bvahwbug(void) 58static inline int r45k_bvahwbug(void)
47{ 59{
@@ -248,6 +260,73 @@ static int scratch_reg __cpuinitdata;
248static int pgd_reg __cpuinitdata; 260static int pgd_reg __cpuinitdata;
249enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch}; 261enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
250 262
263static struct work_registers __cpuinit build_get_work_registers(u32 **p)
264{
265 struct work_registers r;
266
267 int smp_processor_id_reg;
268 int smp_processor_id_sel;
269 int smp_processor_id_shift;
270
271 if (scratch_reg > 0) {
272 /* Save in CPU local C0_KScratch? */
273 UASM_i_MTC0(p, 1, 31, scratch_reg);
274 r.r1 = K0;
275 r.r2 = K1;
276 r.r3 = 1;
277 return r;
278 }
279
280 if (num_possible_cpus() > 1) {
281#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
282 smp_processor_id_shift = 51;
283 smp_processor_id_reg = 20; /* XContext */
284 smp_processor_id_sel = 0;
285#else
286# ifdef CONFIG_32BIT
287 smp_processor_id_shift = 25;
288 smp_processor_id_reg = 4; /* Context */
289 smp_processor_id_sel = 0;
290# endif
291# ifdef CONFIG_64BIT
292 smp_processor_id_shift = 26;
293 smp_processor_id_reg = 4; /* Context */
294 smp_processor_id_sel = 0;
295# endif
296#endif
297 /* Get smp_processor_id */
298 UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
299 UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
300
301 /* handler_reg_save index in K0 */
302 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
303
304 UASM_i_LA(p, K1, (long)&handler_reg_save);
305 UASM_i_ADDU(p, K0, K0, K1);
306 } else {
307 UASM_i_LA(p, K0, (long)&handler_reg_save);
308 }
309 /* K0 now points to save area, save $1 and $2 */
310 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
311 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
312
313 r.r1 = K1;
314 r.r2 = 1;
315 r.r3 = 2;
316 return r;
317}
318
319static void __cpuinit build_restore_work_registers(u32 **p)
320{
321 if (scratch_reg > 0) {
322 UASM_i_MFC0(p, 1, 31, scratch_reg);
323 return;
324 }
325 /* K0 already points to save area, restore $1 and $2 */
326 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
327 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
328}
329
251#ifndef CONFIG_MIPS_PGD_C0_CONTEXT 330#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
252 331
253/* 332/*
@@ -1160,9 +1239,6 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
1160 memset(relocs, 0, sizeof(relocs)); 1239 memset(relocs, 0, sizeof(relocs));
1161 memset(final_handler, 0, sizeof(final_handler)); 1240 memset(final_handler, 0, sizeof(final_handler));
1162 1241
1163 if (scratch_reg == 0)
1164 scratch_reg = allocate_kscratch();
1165
1166 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) { 1242 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1167 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1, 1243 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1168 scratch_reg); 1244 scratch_reg);
@@ -1462,22 +1538,28 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1462 */ 1538 */
1463static void __cpuinit 1539static void __cpuinit
1464build_pte_present(u32 **p, struct uasm_reloc **r, 1540build_pte_present(u32 **p, struct uasm_reloc **r,
1465 unsigned int pte, unsigned int ptr, enum label_id lid) 1541 int pte, int ptr, int scratch, enum label_id lid)
1466{ 1542{
1543 int t = scratch >= 0 ? scratch : pte;
1544
1467 if (kernel_uses_smartmips_rixi) { 1545 if (kernel_uses_smartmips_rixi) {
1468 if (use_bbit_insns()) { 1546 if (use_bbit_insns()) {
1469 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1547 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1470 uasm_i_nop(p); 1548 uasm_i_nop(p);
1471 } else { 1549 } else {
1472 uasm_i_andi(p, pte, pte, _PAGE_PRESENT); 1550 uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1473 uasm_il_beqz(p, r, pte, lid); 1551 uasm_il_beqz(p, r, t, lid);
1474 iPTE_LW(p, pte, ptr); 1552 if (pte == t)
1553 /* You lose the SMP race :-(*/
1554 iPTE_LW(p, pte, ptr);
1475 } 1555 }
1476 } else { 1556 } else {
1477 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1557 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1478 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1558 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1479 uasm_il_bnez(p, r, pte, lid); 1559 uasm_il_bnez(p, r, t, lid);
1480 iPTE_LW(p, pte, ptr); 1560 if (pte == t)
1561 /* You lose the SMP race :-(*/
1562 iPTE_LW(p, pte, ptr);
1481 } 1563 }
1482} 1564}
1483 1565
@@ -1497,19 +1579,19 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
1497 */ 1579 */
1498static void __cpuinit 1580static void __cpuinit
1499build_pte_writable(u32 **p, struct uasm_reloc **r, 1581build_pte_writable(u32 **p, struct uasm_reloc **r,
1500 unsigned int pte, unsigned int ptr, enum label_id lid) 1582 unsigned int pte, unsigned int ptr, int scratch,
1583 enum label_id lid)
1501{ 1584{
1502 if (use_bbit_insns()) { 1585 int t = scratch >= 0 ? scratch : pte;
1503 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid); 1586
1504 uasm_i_nop(p); 1587 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1505 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1588 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1506 uasm_i_nop(p); 1589 uasm_il_bnez(p, r, t, lid);
1507 } else { 1590 if (pte == t)
1508 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1591 /* You lose the SMP race :-(*/
1509 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1510 uasm_il_bnez(p, r, pte, lid);
1511 iPTE_LW(p, pte, ptr); 1592 iPTE_LW(p, pte, ptr);
1512 } 1593 else
1594 uasm_i_nop(p);
1513} 1595}
1514 1596
1515/* Make PTE writable, update software status bits as well, then store 1597/* Make PTE writable, update software status bits as well, then store
@@ -1531,15 +1613,19 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
1531 */ 1613 */
1532static void __cpuinit 1614static void __cpuinit
1533build_pte_modifiable(u32 **p, struct uasm_reloc **r, 1615build_pte_modifiable(u32 **p, struct uasm_reloc **r,
1534 unsigned int pte, unsigned int ptr, enum label_id lid) 1616 unsigned int pte, unsigned int ptr, int scratch,
1617 enum label_id lid)
1535{ 1618{
1536 if (use_bbit_insns()) { 1619 if (use_bbit_insns()) {
1537 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid); 1620 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1538 uasm_i_nop(p); 1621 uasm_i_nop(p);
1539 } else { 1622 } else {
1540 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1623 int t = scratch >= 0 ? scratch : pte;
1541 uasm_il_beqz(p, r, pte, lid); 1624 uasm_i_andi(p, t, pte, _PAGE_WRITE);
1542 iPTE_LW(p, pte, ptr); 1625 uasm_il_beqz(p, r, t, lid);
1626 if (pte == t)
1627 /* You lose the SMP race :-(*/
1628 iPTE_LW(p, pte, ptr);
1543 } 1629 }
1544} 1630}
1545 1631
@@ -1619,7 +1705,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
1619 memset(relocs, 0, sizeof(relocs)); 1705 memset(relocs, 0, sizeof(relocs));
1620 1706
1621 build_r3000_tlbchange_handler_head(&p, K0, K1); 1707 build_r3000_tlbchange_handler_head(&p, K0, K1);
1622 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1708 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
1623 uasm_i_nop(&p); /* load delay */ 1709 uasm_i_nop(&p); /* load delay */
1624 build_make_valid(&p, &r, K0, K1); 1710 build_make_valid(&p, &r, K0, K1);
1625 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1711 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1649,7 +1735,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1649 memset(relocs, 0, sizeof(relocs)); 1735 memset(relocs, 0, sizeof(relocs));
1650 1736
1651 build_r3000_tlbchange_handler_head(&p, K0, K1); 1737 build_r3000_tlbchange_handler_head(&p, K0, K1);
1652 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 1738 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
1653 uasm_i_nop(&p); /* load delay */ 1739 uasm_i_nop(&p); /* load delay */
1654 build_make_write(&p, &r, K0, K1); 1740 build_make_write(&p, &r, K0, K1);
1655 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1741 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1679,7 +1765,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1679 memset(relocs, 0, sizeof(relocs)); 1765 memset(relocs, 0, sizeof(relocs));
1680 1766
1681 build_r3000_tlbchange_handler_head(&p, K0, K1); 1767 build_r3000_tlbchange_handler_head(&p, K0, K1);
1682 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 1768 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
1683 uasm_i_nop(&p); /* load delay */ 1769 uasm_i_nop(&p); /* load delay */
1684 build_make_write(&p, &r, K0, K1); 1770 build_make_write(&p, &r, K0, K1);
1685 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1771 build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1702,15 +1788,16 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1702/* 1788/*
1703 * R4000 style TLB load/store/modify handlers. 1789 * R4000 style TLB load/store/modify handlers.
1704 */ 1790 */
1705static void __cpuinit 1791static struct work_registers __cpuinit
1706build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l, 1792build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1707 struct uasm_reloc **r, unsigned int pte, 1793 struct uasm_reloc **r)
1708 unsigned int ptr)
1709{ 1794{
1795 struct work_registers wr = build_get_work_registers(p);
1796
1710#ifdef CONFIG_64BIT 1797#ifdef CONFIG_64BIT
1711 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */ 1798 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
1712#else 1799#else
1713 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1800 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
1714#endif 1801#endif
1715 1802
1716#ifdef CONFIG_HUGETLB_PAGE 1803#ifdef CONFIG_HUGETLB_PAGE
@@ -1719,21 +1806,22 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1719 * instead contains the tlb pte. Check the PAGE_HUGE bit and 1806 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1720 * see if we need to jump to huge tlb processing. 1807 * see if we need to jump to huge tlb processing.
1721 */ 1808 */
1722 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update); 1809 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
1723#endif 1810#endif
1724 1811
1725 UASM_i_MFC0(p, pte, C0_BADVADDR); 1812 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1726 UASM_i_LW(p, ptr, 0, ptr); 1813 UASM_i_LW(p, wr.r2, 0, wr.r2);
1727 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1814 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1728 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2); 1815 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1729 UASM_i_ADDU(p, ptr, ptr, pte); 1816 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
1730 1817
1731#ifdef CONFIG_SMP 1818#ifdef CONFIG_SMP
1732 uasm_l_smp_pgtable_change(l, *p); 1819 uasm_l_smp_pgtable_change(l, *p);
1733#endif 1820#endif
1734 iPTE_LW(p, pte, ptr); /* get even pte */ 1821 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1735 if (!m4kc_tlbp_war()) 1822 if (!m4kc_tlbp_war())
1736 build_tlb_probe_entry(p); 1823 build_tlb_probe_entry(p);
1824 return wr;
1737} 1825}
1738 1826
1739static void __cpuinit 1827static void __cpuinit
@@ -1746,6 +1834,7 @@ build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1746 build_update_entries(p, tmp, ptr); 1834 build_update_entries(p, tmp, ptr);
1747 build_tlb_write_entry(p, l, r, tlb_indexed); 1835 build_tlb_write_entry(p, l, r, tlb_indexed);
1748 uasm_l_leave(l, *p); 1836 uasm_l_leave(l, *p);
1837 build_restore_work_registers(p);
1749 uasm_i_eret(p); /* return from trap */ 1838 uasm_i_eret(p); /* return from trap */
1750 1839
1751#ifdef CONFIG_64BIT 1840#ifdef CONFIG_64BIT
@@ -1758,6 +1847,7 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1758 u32 *p = handle_tlbl; 1847 u32 *p = handle_tlbl;
1759 struct uasm_label *l = labels; 1848 struct uasm_label *l = labels;
1760 struct uasm_reloc *r = relocs; 1849 struct uasm_reloc *r = relocs;
1850 struct work_registers wr;
1761 1851
1762 memset(handle_tlbl, 0, sizeof(handle_tlbl)); 1852 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1763 memset(labels, 0, sizeof(labels)); 1853 memset(labels, 0, sizeof(labels));
@@ -1777,8 +1867,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1777 /* No need for uasm_i_nop */ 1867 /* No need for uasm_i_nop */
1778 } 1868 }
1779 1869
1780 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1870 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1781 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1871 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1782 if (m4kc_tlbp_war()) 1872 if (m4kc_tlbp_war())
1783 build_tlb_probe_entry(&p); 1873 build_tlb_probe_entry(&p);
1784 1874
@@ -1788,44 +1878,43 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1788 * have triggered it. Skip the expensive test.. 1878 * have triggered it. Skip the expensive test..
1789 */ 1879 */
1790 if (use_bbit_insns()) { 1880 if (use_bbit_insns()) {
1791 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), 1881 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1792 label_tlbl_goaround1); 1882 label_tlbl_goaround1);
1793 } else { 1883 } else {
1794 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1884 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1795 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1); 1885 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
1796 } 1886 }
1797 uasm_i_nop(&p); 1887 uasm_i_nop(&p);
1798 1888
1799 uasm_i_tlbr(&p); 1889 uasm_i_tlbr(&p);
1800 /* Examine entrylo 0 or 1 based on ptr. */ 1890 /* Examine entrylo 0 or 1 based on ptr. */
1801 if (use_bbit_insns()) { 1891 if (use_bbit_insns()) {
1802 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); 1892 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1803 } else { 1893 } else {
1804 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1894 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1805 uasm_i_beqz(&p, K0, 8); 1895 uasm_i_beqz(&p, wr.r3, 8);
1806 } 1896 }
1807 1897 /* load it in the delay slot*/
1808 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1898 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1809 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1899 /* load it if ptr is odd */
1900 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1810 /* 1901 /*
1811 * If the entryLo (now in K0) is valid (bit 1), RI or 1902 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1812 * XI must have triggered it. 1903 * XI must have triggered it.
1813 */ 1904 */
1814 if (use_bbit_insns()) { 1905 if (use_bbit_insns()) {
1815 uasm_il_bbit1(&p, &r, K0, 1, label_nopage_tlbl); 1906 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1816 /* Reload the PTE value */ 1907 uasm_i_nop(&p);
1817 iPTE_LW(&p, K0, K1);
1818 uasm_l_tlbl_goaround1(&l, p); 1908 uasm_l_tlbl_goaround1(&l, p);
1819 } else { 1909 } else {
1820 uasm_i_andi(&p, K0, K0, 2); 1910 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1821 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl); 1911 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1822 uasm_l_tlbl_goaround1(&l, p); 1912 uasm_i_nop(&p);
1823 /* Reload the PTE value */
1824 iPTE_LW(&p, K0, K1);
1825 } 1913 }
1914 uasm_l_tlbl_goaround1(&l, p);
1826 } 1915 }
1827 build_make_valid(&p, &r, K0, K1); 1916 build_make_valid(&p, &r, wr.r1, wr.r2);
1828 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1917 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1829 1918
1830#ifdef CONFIG_HUGETLB_PAGE 1919#ifdef CONFIG_HUGETLB_PAGE
1831 /* 1920 /*
@@ -1833,8 +1922,8 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1833 * spots a huge page. 1922 * spots a huge page.
1834 */ 1923 */
1835 uasm_l_tlb_huge_update(&l, p); 1924 uasm_l_tlb_huge_update(&l, p);
1836 iPTE_LW(&p, K0, K1); 1925 iPTE_LW(&p, wr.r1, wr.r2);
1837 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl); 1926 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
1838 build_tlb_probe_entry(&p); 1927 build_tlb_probe_entry(&p);
1839 1928
1840 if (kernel_uses_smartmips_rixi) { 1929 if (kernel_uses_smartmips_rixi) {
@@ -1843,50 +1932,52 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1843 * have triggered it. Skip the expensive test.. 1932 * have triggered it. Skip the expensive test..
1844 */ 1933 */
1845 if (use_bbit_insns()) { 1934 if (use_bbit_insns()) {
1846 uasm_il_bbit0(&p, &r, K0, ilog2(_PAGE_VALID), 1935 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
1847 label_tlbl_goaround2); 1936 label_tlbl_goaround2);
1848 } else { 1937 } else {
1849 uasm_i_andi(&p, K0, K0, _PAGE_VALID); 1938 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1850 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1939 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1851 } 1940 }
1852 uasm_i_nop(&p); 1941 uasm_i_nop(&p);
1853 1942
1854 uasm_i_tlbr(&p); 1943 uasm_i_tlbr(&p);
1855 /* Examine entrylo 0 or 1 based on ptr. */ 1944 /* Examine entrylo 0 or 1 based on ptr. */
1856 if (use_bbit_insns()) { 1945 if (use_bbit_insns()) {
1857 uasm_i_bbit0(&p, K1, ilog2(sizeof(pte_t)), 8); 1946 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
1858 } else { 1947 } else {
1859 uasm_i_andi(&p, K0, K1, sizeof(pte_t)); 1948 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1860 uasm_i_beqz(&p, K0, 8); 1949 uasm_i_beqz(&p, wr.r3, 8);
1861 } 1950 }
1862 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/ 1951 /* load it in the delay slot*/
1863 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */ 1952 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1953 /* load it if ptr is odd */
1954 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
1864 /* 1955 /*
1865 * If the entryLo (now in K0) is valid (bit 1), RI or 1956 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
1866 * XI must have triggered it. 1957 * XI must have triggered it.
1867 */ 1958 */
1868 if (use_bbit_insns()) { 1959 if (use_bbit_insns()) {
1869 uasm_il_bbit0(&p, &r, K0, 1, label_tlbl_goaround2); 1960 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
1870 } else { 1961 } else {
1871 uasm_i_andi(&p, K0, K0, 2); 1962 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1872 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2); 1963 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
1873 } 1964 }
1874 /* Reload the PTE value */ 1965 if (PM_DEFAULT_MASK == 0)
1875 iPTE_LW(&p, K0, K1); 1966 uasm_i_nop(&p);
1876
1877 /* 1967 /*
1878 * We clobbered C0_PAGEMASK, restore it. On the other branch 1968 * We clobbered C0_PAGEMASK, restore it. On the other branch
1879 * it is restored in build_huge_tlb_write_entry. 1969 * it is restored in build_huge_tlb_write_entry.
1880 */ 1970 */
1881 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl, 0); 1971 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
1882 1972
1883 uasm_l_tlbl_goaround2(&l, p); 1973 uasm_l_tlbl_goaround2(&l, p);
1884 } 1974 }
1885 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID)); 1975 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
1886 build_huge_handler_tail(&p, &r, &l, K0, K1); 1976 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1887#endif 1977#endif
1888 1978
1889 uasm_l_nopage_tlbl(&l, p); 1979 uasm_l_nopage_tlbl(&l, p);
1980 build_restore_work_registers(&p);
1890 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1981 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1891 uasm_i_nop(&p); 1982 uasm_i_nop(&p);
1892 1983
@@ -1905,17 +1996,18 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1905 u32 *p = handle_tlbs; 1996 u32 *p = handle_tlbs;
1906 struct uasm_label *l = labels; 1997 struct uasm_label *l = labels;
1907 struct uasm_reloc *r = relocs; 1998 struct uasm_reloc *r = relocs;
1999 struct work_registers wr;
1908 2000
1909 memset(handle_tlbs, 0, sizeof(handle_tlbs)); 2001 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1910 memset(labels, 0, sizeof(labels)); 2002 memset(labels, 0, sizeof(labels));
1911 memset(relocs, 0, sizeof(relocs)); 2003 memset(relocs, 0, sizeof(relocs));
1912 2004
1913 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 2005 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1914 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 2006 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
1915 if (m4kc_tlbp_war()) 2007 if (m4kc_tlbp_war())
1916 build_tlb_probe_entry(&p); 2008 build_tlb_probe_entry(&p);
1917 build_make_write(&p, &r, K0, K1); 2009 build_make_write(&p, &r, wr.r1, wr.r2);
1918 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 2010 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1919 2011
1920#ifdef CONFIG_HUGETLB_PAGE 2012#ifdef CONFIG_HUGETLB_PAGE
1921 /* 2013 /*
@@ -1923,15 +2015,16 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1923 * build_r4000_tlbchange_handler_head spots a huge page. 2015 * build_r4000_tlbchange_handler_head spots a huge page.
1924 */ 2016 */
1925 uasm_l_tlb_huge_update(&l, p); 2017 uasm_l_tlb_huge_update(&l, p);
1926 iPTE_LW(&p, K0, K1); 2018 iPTE_LW(&p, wr.r1, wr.r2);
1927 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs); 2019 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
1928 build_tlb_probe_entry(&p); 2020 build_tlb_probe_entry(&p);
1929 uasm_i_ori(&p, K0, K0, 2021 uasm_i_ori(&p, wr.r1, wr.r1,
1930 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2022 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1931 build_huge_handler_tail(&p, &r, &l, K0, K1); 2023 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1932#endif 2024#endif
1933 2025
1934 uasm_l_nopage_tlbs(&l, p); 2026 uasm_l_nopage_tlbs(&l, p);
2027 build_restore_work_registers(&p);
1935 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2028 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1936 uasm_i_nop(&p); 2029 uasm_i_nop(&p);
1937 2030
@@ -1950,18 +2043,19 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1950 u32 *p = handle_tlbm; 2043 u32 *p = handle_tlbm;
1951 struct uasm_label *l = labels; 2044 struct uasm_label *l = labels;
1952 struct uasm_reloc *r = relocs; 2045 struct uasm_reloc *r = relocs;
2046 struct work_registers wr;
1953 2047
1954 memset(handle_tlbm, 0, sizeof(handle_tlbm)); 2048 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1955 memset(labels, 0, sizeof(labels)); 2049 memset(labels, 0, sizeof(labels));
1956 memset(relocs, 0, sizeof(relocs)); 2050 memset(relocs, 0, sizeof(relocs));
1957 2051
1958 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 2052 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1959 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 2053 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
1960 if (m4kc_tlbp_war()) 2054 if (m4kc_tlbp_war())
1961 build_tlb_probe_entry(&p); 2055 build_tlb_probe_entry(&p);
1962 /* Present and writable bits set, set accessed and dirty bits. */ 2056 /* Present and writable bits set, set accessed and dirty bits. */
1963 build_make_write(&p, &r, K0, K1); 2057 build_make_write(&p, &r, wr.r1, wr.r2);
1964 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 2058 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
1965 2059
1966#ifdef CONFIG_HUGETLB_PAGE 2060#ifdef CONFIG_HUGETLB_PAGE
1967 /* 2061 /*
@@ -1969,15 +2063,16 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1969 * build_r4000_tlbchange_handler_head spots a huge page. 2063 * build_r4000_tlbchange_handler_head spots a huge page.
1970 */ 2064 */
1971 uasm_l_tlb_huge_update(&l, p); 2065 uasm_l_tlb_huge_update(&l, p);
1972 iPTE_LW(&p, K0, K1); 2066 iPTE_LW(&p, wr.r1, wr.r2);
1973 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm); 2067 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
1974 build_tlb_probe_entry(&p); 2068 build_tlb_probe_entry(&p);
1975 uasm_i_ori(&p, K0, K0, 2069 uasm_i_ori(&p, wr.r1, wr.r1,
1976 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY); 2070 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1977 build_huge_handler_tail(&p, &r, &l, K0, K1); 2071 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
1978#endif 2072#endif
1979 2073
1980 uasm_l_nopage_tlbm(&l, p); 2074 uasm_l_nopage_tlbm(&l, p);
2075 build_restore_work_registers(&p);
1981 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 2076 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1982 uasm_i_nop(&p); 2077 uasm_i_nop(&p);
1983 2078
@@ -2036,6 +2131,7 @@ void __cpuinit build_tlb_refill_handler(void)
2036 2131
2037 default: 2132 default:
2038 if (!run_once) { 2133 if (!run_once) {
2134 scratch_reg = allocate_kscratch();
2039#ifdef CONFIG_MIPS_PGD_C0_CONTEXT 2135#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2040 build_r4000_setup_pgd(); 2136 build_r4000_setup_pgd();
2041#endif 2137#endif