aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/Makefile1
-rw-r--r--arch/mips/mm/c-octeon.c5
-rw-r--r--arch/mips/mm/c-r3k.c1
-rw-r--r--arch/mips/mm/c-r4k.c13
-rw-r--r--arch/mips/mm/c-tx39.c1
-rw-r--r--arch/mips/mm/dma-default.c23
-rw-r--r--arch/mips/mm/extable.c6
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mm/highmem.c1
-rw-r--r--arch/mips/mm/hugetlbpage.c100
-rw-r--r--arch/mips/mm/init.c1
-rw-r--r--arch/mips/mm/page.c1
-rw-r--r--arch/mips/mm/tlb-r3k.c1
-rw-r--r--arch/mips/mm/tlb-r4k.c44
-rw-r--r--arch/mips/mm/tlb-r8k.c1
-rw-r--r--arch/mips/mm/tlbex.c283
16 files changed, 421 insertions, 64 deletions
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index d7ec95522292..f0e435599707 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -8,6 +8,7 @@ obj-y += cache.o dma-default.o extable.o fault.o \
8obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 8obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
9obj-$(CONFIG_64BIT) += pgtable-64.o 9obj-$(CONFIG_64BIT) += pgtable-64.o
10obj-$(CONFIG_HIGHMEM) += highmem.o 10obj-$(CONFIG_HIGHMEM) += highmem.o
11obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
11 12
12obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o 13obj-$(CONFIG_CPU_LOONGSON2) += c-r4k.o cex-gen.o tlb-r4k.o
13obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o 14obj-$(CONFIG_CPU_MIPS32) += c-r4k.o cex-gen.o tlb-r4k.o
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 44d01a0a8490..10ab69f7183f 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -8,6 +8,7 @@
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/kernel.h> 9#include <linux/kernel.h>
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/smp.h>
11#include <linux/mm.h> 12#include <linux/mm.h>
12#include <linux/bitops.h> 13#include <linux/bitops.h>
13#include <linux/cpu.h> 14#include <linux/cpu.h>
@@ -288,7 +289,7 @@ static void cache_parity_error_octeon(int non_recoverable)
288} 289}
289 290
290/** 291/**
291 * Called when the the exception is not recoverable 292 * Called when the the exception is recoverable
292 */ 293 */
293 294
294asmlinkage void cache_parity_error_octeon_recoverable(void) 295asmlinkage void cache_parity_error_octeon_recoverable(void)
@@ -297,7 +298,7 @@ asmlinkage void cache_parity_error_octeon_recoverable(void)
297} 298}
298 299
299/** 300/**
300 * Called when the the exception is recoverable 301 * Called when the the exception is not recoverable
301 */ 302 */
302 303
303asmlinkage void cache_parity_error_octeon_non_recoverable(void) 304asmlinkage void cache_parity_error_octeon_non_recoverable(void)
diff --git a/arch/mips/mm/c-r3k.c b/arch/mips/mm/c-r3k.c
index 5500c20c79ae..54e5f7b9f440 100644
--- a/arch/mips/mm/c-r3k.c
+++ b/arch/mips/mm/c-r3k.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/sched.h> 14#include <linux/sched.h>
15#include <linux/smp.h>
15#include <linux/mm.h> 16#include <linux/mm.h>
16 17
17#include <asm/page.h> 18#include <asm/page.h>
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 171951d2305b..6721ee2b1e8b 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -13,6 +13,7 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/linkage.h> 14#include <linux/linkage.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/smp.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/bitops.h> 19#include <linux/bitops.h>
@@ -100,6 +101,12 @@ static inline void r4k_blast_dcache_page_dc32(unsigned long addr)
100 blast_dcache32_page(addr); 101 blast_dcache32_page(addr);
101} 102}
102 103
104static inline void r4k_blast_dcache_page_dc64(unsigned long addr)
105{
106 R4600_HIT_CACHEOP_WAR_IMPL;
107 blast_dcache64_page(addr);
108}
109
103static void __cpuinit r4k_blast_dcache_page_setup(void) 110static void __cpuinit r4k_blast_dcache_page_setup(void)
104{ 111{
105 unsigned long dc_lsize = cpu_dcache_line_size(); 112 unsigned long dc_lsize = cpu_dcache_line_size();
@@ -110,6 +117,8 @@ static void __cpuinit r4k_blast_dcache_page_setup(void)
110 r4k_blast_dcache_page = blast_dcache16_page; 117 r4k_blast_dcache_page = blast_dcache16_page;
111 else if (dc_lsize == 32) 118 else if (dc_lsize == 32)
112 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; 119 r4k_blast_dcache_page = r4k_blast_dcache_page_dc32;
120 else if (dc_lsize == 64)
121 r4k_blast_dcache_page = r4k_blast_dcache_page_dc64;
113} 122}
114 123
115static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); 124static void (* r4k_blast_dcache_page_indexed)(unsigned long addr);
@@ -124,6 +133,8 @@ static void __cpuinit r4k_blast_dcache_page_indexed_setup(void)
124 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; 133 r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed;
125 else if (dc_lsize == 32) 134 else if (dc_lsize == 32)
126 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; 135 r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed;
136 else if (dc_lsize == 64)
137 r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed;
127} 138}
128 139
129static void (* r4k_blast_dcache)(void); 140static void (* r4k_blast_dcache)(void);
@@ -138,6 +149,8 @@ static void __cpuinit r4k_blast_dcache_setup(void)
138 r4k_blast_dcache = blast_dcache16; 149 r4k_blast_dcache = blast_dcache16;
139 else if (dc_lsize == 32) 150 else if (dc_lsize == 32)
140 r4k_blast_dcache = blast_dcache32; 151 r4k_blast_dcache = blast_dcache32;
152 else if (dc_lsize == 64)
153 r4k_blast_dcache = blast_dcache64;
141} 154}
142 155
143/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ 156/* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */
diff --git a/arch/mips/mm/c-tx39.c b/arch/mips/mm/c-tx39.c
index f7c8f9ce39c1..6515b4418714 100644
--- a/arch/mips/mm/c-tx39.c
+++ b/arch/mips/mm/c-tx39.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/smp.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15 16
16#include <asm/cacheops.h> 17#include <asm/cacheops.h>
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 4fdb7f5216b9..7e48e76148aa 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -20,9 +20,10 @@
20 20
21#include <dma-coherence.h> 21#include <dma-coherence.h>
22 22
23static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr) 23static inline unsigned long dma_addr_to_virt(struct device *dev,
24 dma_addr_t dma_addr)
24{ 25{
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr); 26 unsigned long addr = plat_dma_addr_to_phys(dev, dma_addr);
26 27
27 return (unsigned long)phys_to_virt(addr); 28 return (unsigned long)phys_to_virt(addr);
28} 29}
@@ -111,7 +112,7 @@ EXPORT_SYMBOL(dma_alloc_coherent);
111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr, 112void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle) 113 dma_addr_t dma_handle)
113{ 114{
114 plat_unmap_dma_mem(dev, dma_handle); 115 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
115 free_pages((unsigned long) vaddr, get_order(size)); 116 free_pages((unsigned long) vaddr, get_order(size));
116} 117}
117 118
@@ -122,7 +123,7 @@ void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
122{ 123{
123 unsigned long addr = (unsigned long) vaddr; 124 unsigned long addr = (unsigned long) vaddr;
124 125
125 plat_unmap_dma_mem(dev, dma_handle); 126 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
126 127
127 if (!plat_device_is_coherent(dev)) 128 if (!plat_device_is_coherent(dev))
128 addr = CAC_ADDR(addr); 129 addr = CAC_ADDR(addr);
@@ -170,10 +171,10 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
170 enum dma_data_direction direction) 171 enum dma_data_direction direction)
171{ 172{
172 if (cpu_is_noncoherent_r10000(dev)) 173 if (cpu_is_noncoherent_r10000(dev))
173 __dma_sync(dma_addr_to_virt(dma_addr), size, 174 __dma_sync(dma_addr_to_virt(dev, dma_addr), size,
174 direction); 175 direction);
175 176
176 plat_unmap_dma_mem(dev, dma_addr); 177 plat_unmap_dma_mem(dev, dma_addr, size, direction);
177} 178}
178 179
179EXPORT_SYMBOL(dma_unmap_single); 180EXPORT_SYMBOL(dma_unmap_single);
@@ -232,7 +233,7 @@ void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
232 if (addr) 233 if (addr)
233 __dma_sync(addr, sg->length, direction); 234 __dma_sync(addr, sg->length, direction);
234 } 235 }
235 plat_unmap_dma_mem(dev, sg->dma_address); 236 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
236 } 237 }
237} 238}
238 239
@@ -246,7 +247,7 @@ void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
246 if (cpu_is_noncoherent_r10000(dev)) { 247 if (cpu_is_noncoherent_r10000(dev)) {
247 unsigned long addr; 248 unsigned long addr;
248 249
249 addr = dma_addr_to_virt(dma_handle); 250 addr = dma_addr_to_virt(dev, dma_handle);
250 __dma_sync(addr, size, direction); 251 __dma_sync(addr, size, direction);
251 } 252 }
252} 253}
@@ -262,7 +263,7 @@ void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
262 if (!plat_device_is_coherent(dev)) { 263 if (!plat_device_is_coherent(dev)) {
263 unsigned long addr; 264 unsigned long addr;
264 265
265 addr = dma_addr_to_virt(dma_handle); 266 addr = dma_addr_to_virt(dev, dma_handle);
266 __dma_sync(addr, size, direction); 267 __dma_sync(addr, size, direction);
267 } 268 }
268} 269}
@@ -277,7 +278,7 @@ void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
277 if (cpu_is_noncoherent_r10000(dev)) { 278 if (cpu_is_noncoherent_r10000(dev)) {
278 unsigned long addr; 279 unsigned long addr;
279 280
280 addr = dma_addr_to_virt(dma_handle); 281 addr = dma_addr_to_virt(dev, dma_handle);
281 __dma_sync(addr + offset, size, direction); 282 __dma_sync(addr + offset, size, direction);
282 } 283 }
283} 284}
@@ -293,7 +294,7 @@ void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
293 if (!plat_device_is_coherent(dev)) { 294 if (!plat_device_is_coherent(dev)) {
294 unsigned long addr; 295 unsigned long addr;
295 296
296 addr = dma_addr_to_virt(dma_handle); 297 addr = dma_addr_to_virt(dev, dma_handle);
297 __dma_sync(addr + offset, size, direction); 298 __dma_sync(addr + offset, size, direction);
298 } 299 }
299} 300}
diff --git a/arch/mips/mm/extable.c b/arch/mips/mm/extable.c
index 297fb9f390dc..9d25d2ba4b9e 100644
--- a/arch/mips/mm/extable.c
+++ b/arch/mips/mm/extable.c
@@ -1,5 +1,9 @@
1/* 1/*
2 * linux/arch/mips/mm/extable.c 2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1997, 99, 2001 - 2004 Ralf Baechle <ralf@linux-mips.org>
3 */ 7 */
4#include <linux/module.h> 8#include <linux/module.h>
5#include <linux/spinlock.h> 9#include <linux/spinlock.h>
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 55767ad9f00e..f956ecbb8136 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -102,7 +102,7 @@ good_area:
102 * make sure we exit gracefully rather than endlessly redo 102 * make sure we exit gracefully rather than endlessly redo
103 * the fault. 103 * the fault.
104 */ 104 */
105 fault = handle_mm_fault(mm, vma, address, write); 105 fault = handle_mm_fault(mm, vma, address, write ? FAULT_FLAG_WRITE : 0);
106 if (unlikely(fault & VM_FAULT_ERROR)) { 106 if (unlikely(fault & VM_FAULT_ERROR)) {
107 if (fault & VM_FAULT_OOM) 107 if (fault & VM_FAULT_OOM)
108 goto out_of_memory; 108 goto out_of_memory;
@@ -171,6 +171,7 @@ out_of_memory:
171 * We ran out of memory, call the OOM killer, and return the userspace 171 * We ran out of memory, call the OOM killer, and return the userspace
172 * (which will retry the fault, or kill us if we got oom-killed). 172 * (which will retry the fault, or kill us if we got oom-killed).
173 */ 173 */
174 up_read(&mm->mmap_sem);
174 pagefault_out_of_memory(); 175 pagefault_out_of_memory();
175 return; 176 return;
176 177
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 2b1309b2580a..e274fda329f4 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -1,5 +1,6 @@
1#include <linux/module.h> 1#include <linux/module.h>
2#include <linux/highmem.h> 2#include <linux/highmem.h>
3#include <linux/smp.h>
3#include <asm/fixmap.h> 4#include <asm/fixmap.h>
4#include <asm/tlbflush.h> 5#include <asm/tlbflush.h>
5 6
diff --git a/arch/mips/mm/hugetlbpage.c b/arch/mips/mm/hugetlbpage.c
new file mode 100644
index 000000000000..8c2834f5919d
--- /dev/null
+++ b/arch/mips/mm/hugetlbpage.c
@@ -0,0 +1,100 @@
1/*
2 * MIPS Huge TLB Page Support for Kernel.
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
9 * Copyright 2005, Embedded Alley Solutions, Inc.
10 * Matt Porter <mporter@embeddedalley.com>
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
12 */
13
14#include <linux/init.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/hugetlb.h>
18#include <linux/pagemap.h>
19#include <linux/slab.h>
20#include <linux/err.h>
21#include <linux/sysctl.h>
22#include <asm/mman.h>
23#include <asm/tlb.h>
24#include <asm/tlbflush.h>
25
26pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr,
27 unsigned long sz)
28{
29 pgd_t *pgd;
30 pud_t *pud;
31 pte_t *pte = NULL;
32
33 pgd = pgd_offset(mm, addr);
34 pud = pud_alloc(mm, pgd, addr);
35 if (pud)
36 pte = (pte_t *)pmd_alloc(mm, pud, addr);
37
38 return pte;
39}
40
41pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
42{
43 pgd_t *pgd;
44 pud_t *pud;
45 pmd_t *pmd = NULL;
46
47 pgd = pgd_offset(mm, addr);
48 if (pgd_present(*pgd)) {
49 pud = pud_offset(pgd, addr);
50 if (pud_present(*pud))
51 pmd = pmd_offset(pud, addr);
52 }
53 return (pte_t *) pmd;
54}
55
56int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
57{
58 return 0;
59}
60
61/*
62 * This function checks for proper alignment of input addr and len parameters.
63 */
64int is_aligned_hugepage_range(unsigned long addr, unsigned long len)
65{
66 if (len & ~HPAGE_MASK)
67 return -EINVAL;
68 if (addr & ~HPAGE_MASK)
69 return -EINVAL;
70 return 0;
71}
72
73struct page *
74follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
75{
76 return ERR_PTR(-EINVAL);
77}
78
79int pmd_huge(pmd_t pmd)
80{
81 return (pmd_val(pmd) & _PAGE_HUGE) != 0;
82}
83
84int pud_huge(pud_t pud)
85{
86 return (pud_val(pud) & _PAGE_HUGE) != 0;
87}
88
89struct page *
90follow_huge_pmd(struct mm_struct *mm, unsigned long address,
91 pmd_t *pmd, int write)
92{
93 struct page *page;
94
95 page = pte_page(*(pte_t *)pmd);
96 if (page)
97 page += ((address & ~HPAGE_MASK) >> PAGE_SHIFT);
98 return page;
99}
100
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index c5511294a9ee..0e820508ff23 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -13,6 +13,7 @@
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/signal.h> 14#include <linux/signal.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/smp.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
17#include <linux/errno.h> 18#include <linux/errno.h>
18#include <linux/string.h> 19#include <linux/string.h>
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 48060c635acd..f5c73754d664 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -10,6 +10,7 @@
10#include <linux/init.h> 10#include <linux/init.h>
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/smp.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/proc_fs.h> 16#include <linux/proc_fs.h>
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index 1c0048a6f5cf..0f5ab236ab69 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -13,6 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/smp.h>
16#include <linux/mm.h> 17#include <linux/mm.h>
17 18
18#include <asm/page.h> 19#include <asm/page.h>
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 892be426787c..cee502caf398 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -10,7 +10,9 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/smp.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/hugetlb.h>
14 16
15#include <asm/cpu.h> 17#include <asm/cpu.h>
16#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
@@ -295,21 +297,41 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
295 pudp = pud_offset(pgdp, address); 297 pudp = pud_offset(pgdp, address);
296 pmdp = pmd_offset(pudp, address); 298 pmdp = pmd_offset(pudp, address);
297 idx = read_c0_index(); 299 idx = read_c0_index();
298 ptep = pte_offset_map(pmdp, address); 300#ifdef CONFIG_HUGETLB_PAGE
301 /* this could be a huge page */
302 if (pmd_huge(*pmdp)) {
303 unsigned long lo;
304 write_c0_pagemask(PM_HUGE_MASK);
305 ptep = (pte_t *)pmdp;
306 lo = pte_val(*ptep) >> 6;
307 write_c0_entrylo0(lo);
308 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
309
310 mtc0_tlbw_hazard();
311 if (idx < 0)
312 tlb_write_random();
313 else
314 tlb_write_indexed();
315 write_c0_pagemask(PM_DEFAULT_MASK);
316 } else
317#endif
318 {
319 ptep = pte_offset_map(pmdp, address);
299 320
300#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 321#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
301 write_c0_entrylo0(ptep->pte_high); 322 write_c0_entrylo0(ptep->pte_high);
302 ptep++; 323 ptep++;
303 write_c0_entrylo1(ptep->pte_high); 324 write_c0_entrylo1(ptep->pte_high);
304#else 325#else
305 write_c0_entrylo0(pte_val(*ptep++) >> 6); 326 write_c0_entrylo0(pte_val(*ptep++) >> 6);
306 write_c0_entrylo1(pte_val(*ptep) >> 6); 327 write_c0_entrylo1(pte_val(*ptep) >> 6);
307#endif 328#endif
308 mtc0_tlbw_hazard(); 329 mtc0_tlbw_hazard();
309 if (idx < 0) 330 if (idx < 0)
310 tlb_write_random(); 331 tlb_write_random();
311 else 332 else
312 tlb_write_indexed(); 333 tlb_write_indexed();
334 }
313 tlbw_use_hazard(); 335 tlbw_use_hazard();
314 FLUSH_ITLB_VM(vma); 336 FLUSH_ITLB_VM(vma);
315 EXIT_CRITICAL(flags); 337 EXIT_CRITICAL(flags);
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 4ec95cc2df2f..2b82f23df1a1 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/smp.h>
13#include <linux/mm.h> 14#include <linux/mm.h>
14 15
15#include <asm/cpu.h> 16#include <asm/cpu.h>
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 0615b62efd6d..9a17bf8395df 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -6,8 +6,9 @@
6 * Synthesize TLB refill handlers at runtime. 6 * Synthesize TLB refill handlers at runtime.
7 * 7 *
8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer 8 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007 Maciej W. Rozycki 9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) 10 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
11 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
11 * 12 *
12 * ... and the days got worse and worse and now you see 13 * ... and the days got worse and worse and now you see
13 * I've gone completly out of my mind. 14 * I've gone completly out of my mind.
@@ -19,8 +20,10 @@
19 * (Condolences to Napoleon XIV) 20 * (Condolences to Napoleon XIV)
20 */ 21 */
21 22
23#include <linux/bug.h>
22#include <linux/kernel.h> 24#include <linux/kernel.h>
23#include <linux/types.h> 25#include <linux/types.h>
26#include <linux/smp.h>
24#include <linux/string.h> 27#include <linux/string.h>
25#include <linux/init.h> 28#include <linux/init.h>
26 29
@@ -82,6 +85,9 @@ enum label_id {
82 label_nopage_tlbm, 85 label_nopage_tlbm,
83 label_smp_pgtable_change, 86 label_smp_pgtable_change,
84 label_r3000_write_probe_fail, 87 label_r3000_write_probe_fail,
88#ifdef CONFIG_HUGETLB_PAGE
89 label_tlb_huge_update,
90#endif
85}; 91};
86 92
87UASM_L_LA(_second_part) 93UASM_L_LA(_second_part)
@@ -98,6 +104,9 @@ UASM_L_LA(_nopage_tlbs)
98UASM_L_LA(_nopage_tlbm) 104UASM_L_LA(_nopage_tlbm)
99UASM_L_LA(_smp_pgtable_change) 105UASM_L_LA(_smp_pgtable_change)
100UASM_L_LA(_r3000_write_probe_fail) 106UASM_L_LA(_r3000_write_probe_fail)
107#ifdef CONFIG_HUGETLB_PAGE
108UASM_L_LA(_tlb_huge_update)
109#endif
101 110
102/* 111/*
103 * For debug purposes. 112 * For debug purposes.
@@ -125,6 +134,7 @@ static inline void dump_handler(const u32 *handler, int count)
125#define C0_TCBIND 2, 2 134#define C0_TCBIND 2, 2
126#define C0_ENTRYLO1 3, 0 135#define C0_ENTRYLO1 3, 0
127#define C0_CONTEXT 4, 0 136#define C0_CONTEXT 4, 0
137#define C0_PAGEMASK 5, 0
128#define C0_BADVADDR 8, 0 138#define C0_BADVADDR 8, 0
129#define C0_ENTRYHI 10, 0 139#define C0_ENTRYHI 10, 0
130#define C0_EPC 14, 0 140#define C0_EPC 14, 0
@@ -258,7 +268,8 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
258 } 268 }
259 269
260 if (cpu_has_mips_r2) { 270 if (cpu_has_mips_r2) {
261 uasm_i_ehb(p); 271 if (cpu_has_mips_r2_exec_hazard)
272 uasm_i_ehb(p);
262 tlbw(p); 273 tlbw(p);
263 return; 274 return;
264 } 275 }
@@ -310,7 +321,6 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
310 case CPU_BCM3302: 321 case CPU_BCM3302:
311 case CPU_BCM4710: 322 case CPU_BCM4710:
312 case CPU_LOONGSON2: 323 case CPU_LOONGSON2:
313 case CPU_CAVIUM_OCTEON:
314 case CPU_R5500: 324 case CPU_R5500:
315 if (m4kc_tlbp_war()) 325 if (m4kc_tlbp_war())
316 uasm_i_nop(p); 326 uasm_i_nop(p);
@@ -382,6 +392,98 @@ static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
382 } 392 }
383} 393}
384 394
395#ifdef CONFIG_HUGETLB_PAGE
396static __cpuinit void build_huge_tlb_write_entry(u32 **p,
397 struct uasm_label **l,
398 struct uasm_reloc **r,
399 unsigned int tmp,
400 enum tlb_write_entry wmode)
401{
402 /* Set huge page tlb entry size */
403 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
404 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
405 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
406
407 build_tlb_write_entry(p, l, r, wmode);
408
409 /* Reset default page size */
410 if (PM_DEFAULT_MASK >> 16) {
411 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
412 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
413 uasm_il_b(p, r, label_leave);
414 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
415 } else if (PM_DEFAULT_MASK) {
416 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
417 uasm_il_b(p, r, label_leave);
418 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
419 } else {
420 uasm_il_b(p, r, label_leave);
421 uasm_i_mtc0(p, 0, C0_PAGEMASK);
422 }
423}
424
425/*
426 * Check if Huge PTE is present, if so then jump to LABEL.
427 */
428static void __cpuinit
429build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
430 unsigned int pmd, int lid)
431{
432 UASM_i_LW(p, tmp, 0, pmd);
433 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
434 uasm_il_bnez(p, r, tmp, lid);
435}
436
437static __cpuinit void build_huge_update_entries(u32 **p,
438 unsigned int pte,
439 unsigned int tmp)
440{
441 int small_sequence;
442
443 /*
444 * A huge PTE describes an area the size of the
445 * configured huge page size. This is twice the
446 * of the large TLB entry size we intend to use.
447 * A TLB entry half the size of the configured
448 * huge page size is configured into entrylo0
449 * and entrylo1 to cover the contiguous huge PTE
450 * address space.
451 */
452 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
453
454 /* We can clobber tmp. It isn't used after this.*/
455 if (!small_sequence)
456 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
457
458 UASM_i_SRL(p, pte, pte, 6); /* convert to entrylo */
459 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* load it */
460 /* convert to entrylo1 */
461 if (small_sequence)
462 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
463 else
464 UASM_i_ADDU(p, pte, pte, tmp);
465
466 uasm_i_mtc0(p, pte, C0_ENTRYLO1); /* load it */
467}
468
469static __cpuinit void build_huge_handler_tail(u32 **p,
470 struct uasm_reloc **r,
471 struct uasm_label **l,
472 unsigned int pte,
473 unsigned int ptr)
474{
475#ifdef CONFIG_SMP
476 UASM_i_SC(p, pte, 0, ptr);
477 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
478 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
479#else
480 UASM_i_SW(p, pte, 0, ptr);
481#endif
482 build_huge_update_entries(p, pte, ptr);
483 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
484}
485#endif /* CONFIG_HUGETLB_PAGE */
486
385#ifdef CONFIG_64BIT 487#ifdef CONFIG_64BIT
386/* 488/*
387 * TMP and PTR are scratch. 489 * TMP and PTR are scratch.
@@ -649,6 +751,14 @@ static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
649#endif 751#endif
650} 752}
651 753
754/*
755 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
756 * because EXL == 0. If we wrap, we can also use the 32 instruction
757 * slots before the XTLB refill exception handler which belong to the
758 * unused TLB refill exception.
759 */
760#define MIPS64_REFILL_INSNS 32
761
652static void __cpuinit build_r4000_tlb_refill_handler(void) 762static void __cpuinit build_r4000_tlb_refill_handler(void)
653{ 763{
654 u32 *p = tlb_handler; 764 u32 *p = tlb_handler;
@@ -680,12 +790,23 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
680 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ 790 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
681#endif 791#endif
682 792
793#ifdef CONFIG_HUGETLB_PAGE
794 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
795#endif
796
683 build_get_ptep(&p, K0, K1); 797 build_get_ptep(&p, K0, K1);
684 build_update_entries(&p, K0, K1); 798 build_update_entries(&p, K0, K1);
685 build_tlb_write_entry(&p, &l, &r, tlb_random); 799 build_tlb_write_entry(&p, &l, &r, tlb_random);
686 uasm_l_leave(&l, p); 800 uasm_l_leave(&l, p);
687 uasm_i_eret(&p); /* return from trap */ 801 uasm_i_eret(&p); /* return from trap */
688 802
803#ifdef CONFIG_HUGETLB_PAGE
804 uasm_l_tlb_huge_update(&l, p);
805 UASM_i_LW(&p, K0, 0, K1);
806 build_huge_update_entries(&p, K0, K1);
807 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
808#endif
809
689#ifdef CONFIG_64BIT 810#ifdef CONFIG_64BIT
690 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); 811 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1);
691#endif 812#endif
@@ -702,9 +823,10 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
702 if ((p - tlb_handler) > 64) 823 if ((p - tlb_handler) > 64)
703 panic("TLB refill handler space exceeded"); 824 panic("TLB refill handler space exceeded");
704#else 825#else
705 if (((p - tlb_handler) > 63) 826 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
706 || (((p - tlb_handler) > 61) 827 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
707 && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) 828 && uasm_insn_has_bdelay(relocs,
829 tlb_handler + MIPS64_REFILL_INSNS - 3)))
708 panic("TLB refill handler space exceeded"); 830 panic("TLB refill handler space exceeded");
709#endif 831#endif
710 832
@@ -717,39 +839,74 @@ static void __cpuinit build_r4000_tlb_refill_handler(void)
717 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 839 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
718 final_len = p - tlb_handler; 840 final_len = p - tlb_handler;
719#else /* CONFIG_64BIT */ 841#else /* CONFIG_64BIT */
720 f = final_handler + 32; 842 f = final_handler + MIPS64_REFILL_INSNS;
721 if ((p - tlb_handler) <= 32) { 843 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
722 /* Just copy the handler. */ 844 /* Just copy the handler. */
723 uasm_copy_handler(relocs, labels, tlb_handler, p, f); 845 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
724 final_len = p - tlb_handler; 846 final_len = p - tlb_handler;
725 } else { 847 } else {
726 u32 *split = tlb_handler + 30; 848#if defined(CONFIG_HUGETLB_PAGE)
849 const enum label_id ls = label_tlb_huge_update;
850#elif defined(MODULE_START)
851 const enum label_id ls = label_module_alloc;
852#else
853 const enum label_id ls = label_vmalloc;
854#endif
855 u32 *split;
856 int ov = 0;
857 int i;
858
859 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
860 ;
861 BUG_ON(i == ARRAY_SIZE(labels));
862 split = labels[i].addr;
727 863
728 /* 864 /*
729 * Find the split point. 865 * See if we have overflown one way or the other.
730 */ 866 */
731 if (uasm_insn_has_bdelay(relocs, split - 1)) 867 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
732 split--; 868 split < p - MIPS64_REFILL_INSNS)
733 869 ov = 1;
870
871 if (ov) {
872 /*
873 * Split two instructions before the end. One
874 * for the branch and one for the instruction
875 * in the delay slot.
876 */
877 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
878
879 /*
880 * If the branch would fall in a delay slot,
881 * we must back up an additional instruction
882 * so that it is no longer in a delay slot.
883 */
884 if (uasm_insn_has_bdelay(relocs, split - 1))
885 split--;
886 }
734 /* Copy first part of the handler. */ 887 /* Copy first part of the handler. */
735 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 888 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
736 f += split - tlb_handler; 889 f += split - tlb_handler;
737 890
738 /* Insert branch. */ 891 if (ov) {
739 uasm_l_split(&l, final_handler); 892 /* Insert branch. */
740 uasm_il_b(&f, &r, label_split); 893 uasm_l_split(&l, final_handler);
741 if (uasm_insn_has_bdelay(relocs, split)) 894 uasm_il_b(&f, &r, label_split);
742 uasm_i_nop(&f); 895 if (uasm_insn_has_bdelay(relocs, split))
743 else { 896 uasm_i_nop(&f);
744 uasm_copy_handler(relocs, labels, split, split + 1, f); 897 else {
745 uasm_move_labels(labels, f, f + 1, -1); 898 uasm_copy_handler(relocs, labels,
746 f++; 899 split, split + 1, f);
747 split++; 900 uasm_move_labels(labels, f, f + 1, -1);
901 f++;
902 split++;
903 }
748 } 904 }
749 905
750 /* Copy the rest of the handler. */ 906 /* Copy the rest of the handler. */
751 uasm_copy_handler(relocs, labels, split, p, final_handler); 907 uasm_copy_handler(relocs, labels, split, p, final_handler);
752 final_len = (f - (final_handler + 32)) + (p - split); 908 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
909 (p - split);
753 } 910 }
754#endif /* CONFIG_64BIT */ 911#endif /* CONFIG_64BIT */
755 912
@@ -782,7 +939,7 @@ u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
782u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned; 939u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
783 940
784static void __cpuinit 941static void __cpuinit
785iPTE_LW(u32 **p, struct uasm_label **l, unsigned int pte, unsigned int ptr) 942iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
786{ 943{
787#ifdef CONFIG_SMP 944#ifdef CONFIG_SMP
788# ifdef CONFIG_64BIT_PHYS_ADDR 945# ifdef CONFIG_64BIT_PHYS_ADDR
@@ -862,13 +1019,13 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
862 * with it's original value. 1019 * with it's original value.
863 */ 1020 */
864static void __cpuinit 1021static void __cpuinit
865build_pte_present(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1022build_pte_present(u32 **p, struct uasm_reloc **r,
866 unsigned int pte, unsigned int ptr, enum label_id lid) 1023 unsigned int pte, unsigned int ptr, enum label_id lid)
867{ 1024{
868 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1025 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
869 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ); 1026 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
870 uasm_il_bnez(p, r, pte, lid); 1027 uasm_il_bnez(p, r, pte, lid);
871 iPTE_LW(p, l, pte, ptr); 1028 iPTE_LW(p, pte, ptr);
872} 1029}
873 1030
874/* Make PTE valid, store result in PTR. */ 1031/* Make PTE valid, store result in PTR. */
@@ -886,13 +1043,13 @@ build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
886 * restore PTE with value from PTR when done. 1043 * restore PTE with value from PTR when done.
887 */ 1044 */
888static void __cpuinit 1045static void __cpuinit
889build_pte_writable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1046build_pte_writable(u32 **p, struct uasm_reloc **r,
890 unsigned int pte, unsigned int ptr, enum label_id lid) 1047 unsigned int pte, unsigned int ptr, enum label_id lid)
891{ 1048{
892 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1049 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
893 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE); 1050 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
894 uasm_il_bnez(p, r, pte, lid); 1051 uasm_il_bnez(p, r, pte, lid);
895 iPTE_LW(p, l, pte, ptr); 1052 iPTE_LW(p, pte, ptr);
896} 1053}
897 1054
898/* Make PTE writable, update software status bits as well, then store 1055/* Make PTE writable, update software status bits as well, then store
@@ -913,12 +1070,12 @@ build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
913 * restore PTE with value from PTR when done. 1070 * restore PTE with value from PTR when done.
914 */ 1071 */
915static void __cpuinit 1072static void __cpuinit
916build_pte_modifiable(u32 **p, struct uasm_label **l, struct uasm_reloc **r, 1073build_pte_modifiable(u32 **p, struct uasm_reloc **r,
917 unsigned int pte, unsigned int ptr, enum label_id lid) 1074 unsigned int pte, unsigned int ptr, enum label_id lid)
918{ 1075{
919 uasm_i_andi(p, pte, pte, _PAGE_WRITE); 1076 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
920 uasm_il_beqz(p, r, pte, lid); 1077 uasm_il_beqz(p, r, pte, lid);
921 iPTE_LW(p, l, pte, ptr); 1078 iPTE_LW(p, pte, ptr);
922} 1079}
923 1080
924/* 1081/*
@@ -994,7 +1151,7 @@ static void __cpuinit build_r3000_tlb_load_handler(void)
994 memset(relocs, 0, sizeof(relocs)); 1151 memset(relocs, 0, sizeof(relocs));
995 1152
996 build_r3000_tlbchange_handler_head(&p, K0, K1); 1153 build_r3000_tlbchange_handler_head(&p, K0, K1);
997 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1154 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
998 uasm_i_nop(&p); /* load delay */ 1155 uasm_i_nop(&p); /* load delay */
999 build_make_valid(&p, &r, K0, K1); 1156 build_make_valid(&p, &r, K0, K1);
1000 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1157 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1024,7 +1181,7 @@ static void __cpuinit build_r3000_tlb_store_handler(void)
1024 memset(relocs, 0, sizeof(relocs)); 1181 memset(relocs, 0, sizeof(relocs));
1025 1182
1026 build_r3000_tlbchange_handler_head(&p, K0, K1); 1183 build_r3000_tlbchange_handler_head(&p, K0, K1);
1027 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1184 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1028 uasm_i_nop(&p); /* load delay */ 1185 uasm_i_nop(&p); /* load delay */
1029 build_make_write(&p, &r, K0, K1); 1186 build_make_write(&p, &r, K0, K1);
1030 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1); 1187 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
@@ -1054,7 +1211,7 @@ static void __cpuinit build_r3000_tlb_modify_handler(void)
1054 memset(relocs, 0, sizeof(relocs)); 1211 memset(relocs, 0, sizeof(relocs));
1055 1212
1056 build_r3000_tlbchange_handler_head(&p, K0, K1); 1213 build_r3000_tlbchange_handler_head(&p, K0, K1);
1057 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1214 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1058 uasm_i_nop(&p); /* load delay */ 1215 uasm_i_nop(&p); /* load delay */
1059 build_make_write(&p, &r, K0, K1); 1216 build_make_write(&p, &r, K0, K1);
1060 build_r3000_pte_reload_tlbwi(&p, K0, K1); 1217 build_r3000_pte_reload_tlbwi(&p, K0, K1);
@@ -1087,6 +1244,15 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1087 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */ 1244 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1088#endif 1245#endif
1089 1246
1247#ifdef CONFIG_HUGETLB_PAGE
1248 /*
1249 * For huge tlb entries, pmd doesn't contain an address but
1250 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1251 * see if we need to jump to huge tlb processing.
1252 */
1253 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
1254#endif
1255
1090 UASM_i_MFC0(p, pte, C0_BADVADDR); 1256 UASM_i_MFC0(p, pte, C0_BADVADDR);
1091 UASM_i_LW(p, ptr, 0, ptr); 1257 UASM_i_LW(p, ptr, 0, ptr);
1092 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2); 1258 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
@@ -1096,7 +1262,7 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1096#ifdef CONFIG_SMP 1262#ifdef CONFIG_SMP
1097 uasm_l_smp_pgtable_change(l, *p); 1263 uasm_l_smp_pgtable_change(l, *p);
1098#endif 1264#endif
1099 iPTE_LW(p, l, pte, ptr); /* get even pte */ 1265 iPTE_LW(p, pte, ptr); /* get even pte */
1100 if (!m4kc_tlbp_war()) 1266 if (!m4kc_tlbp_war())
1101 build_tlb_probe_entry(p); 1267 build_tlb_probe_entry(p);
1102} 1268}
@@ -1138,12 +1304,25 @@ static void __cpuinit build_r4000_tlb_load_handler(void)
1138 } 1304 }
1139 1305
1140 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1306 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1141 build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); 1307 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1142 if (m4kc_tlbp_war()) 1308 if (m4kc_tlbp_war())
1143 build_tlb_probe_entry(&p); 1309 build_tlb_probe_entry(&p);
1144 build_make_valid(&p, &r, K0, K1); 1310 build_make_valid(&p, &r, K0, K1);
1145 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1311 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1146 1312
1313#ifdef CONFIG_HUGETLB_PAGE
1314 /*
1315 * This is the entry point when build_r4000_tlbchange_handler_head
1316 * spots a huge page.
1317 */
1318 uasm_l_tlb_huge_update(&l, p);
1319 iPTE_LW(&p, K0, K1);
1320 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1321 build_tlb_probe_entry(&p);
1322 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1323 build_huge_handler_tail(&p, &r, &l, K0, K1);
1324#endif
1325
1147 uasm_l_nopage_tlbl(&l, p); 1326 uasm_l_nopage_tlbl(&l, p);
1148 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); 1327 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1149 uasm_i_nop(&p); 1328 uasm_i_nop(&p);
@@ -1169,12 +1348,26 @@ static void __cpuinit build_r4000_tlb_store_handler(void)
1169 memset(relocs, 0, sizeof(relocs)); 1348 memset(relocs, 0, sizeof(relocs));
1170 1349
1171 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1350 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1172 build_pte_writable(&p, &l, &r, K0, K1, label_nopage_tlbs); 1351 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1173 if (m4kc_tlbp_war()) 1352 if (m4kc_tlbp_war())
1174 build_tlb_probe_entry(&p); 1353 build_tlb_probe_entry(&p);
1175 build_make_write(&p, &r, K0, K1); 1354 build_make_write(&p, &r, K0, K1);
1176 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1355 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1177 1356
1357#ifdef CONFIG_HUGETLB_PAGE
1358 /*
1359 * This is the entry point when
1360 * build_r4000_tlbchange_handler_head spots a huge page.
1361 */
1362 uasm_l_tlb_huge_update(&l, p);
1363 iPTE_LW(&p, K0, K1);
1364 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1365 build_tlb_probe_entry(&p);
1366 uasm_i_ori(&p, K0, K0,
1367 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1368 build_huge_handler_tail(&p, &r, &l, K0, K1);
1369#endif
1370
1178 uasm_l_nopage_tlbs(&l, p); 1371 uasm_l_nopage_tlbs(&l, p);
1179 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1372 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1180 uasm_i_nop(&p); 1373 uasm_i_nop(&p);
@@ -1200,13 +1393,27 @@ static void __cpuinit build_r4000_tlb_modify_handler(void)
1200 memset(relocs, 0, sizeof(relocs)); 1393 memset(relocs, 0, sizeof(relocs));
1201 1394
1202 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); 1395 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
1203 build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); 1396 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1204 if (m4kc_tlbp_war()) 1397 if (m4kc_tlbp_war())
1205 build_tlb_probe_entry(&p); 1398 build_tlb_probe_entry(&p);
1206 /* Present and writable bits set, set accessed and dirty bits. */ 1399 /* Present and writable bits set, set accessed and dirty bits. */
1207 build_make_write(&p, &r, K0, K1); 1400 build_make_write(&p, &r, K0, K1);
1208 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); 1401 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1209 1402
1403#ifdef CONFIG_HUGETLB_PAGE
1404 /*
1405 * This is the entry point when
1406 * build_r4000_tlbchange_handler_head spots a huge page.
1407 */
1408 uasm_l_tlb_huge_update(&l, p);
1409 iPTE_LW(&p, K0, K1);
1410 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1411 build_tlb_probe_entry(&p);
1412 uasm_i_ori(&p, K0, K0,
1413 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1414 build_huge_handler_tail(&p, &r, &l, K0, K1);
1415#endif
1416
1210 uasm_l_nopage_tlbm(&l, p); 1417 uasm_l_nopage_tlbm(&l, p);
1211 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); 1418 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1212 uasm_i_nop(&p); 1419 uasm_i_nop(&p);