aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorLeonid Yegoshin <Leonid.Yegoshin@imgtec.com>2013-11-14 11:12:31 -0500
committerRalf Baechle <ralf@linux-mips.org>2014-01-22 14:19:00 -0500
commit75b5b5e0a262790fa11043fe45700499c7e3d818 (patch)
tree3c5af9caa9c5478668159ff34db0ab34b51d7511 /arch/mips/mm
parent601cfa7b6fb657cff9e8f77bbcce79f75dd7ab74 (diff)
MIPS: Add support for FTLBs
The Fixed Page Size TLB (FTLB) is a set-associative dual entry TLB. Its purpose is to reduce the number of TLB misses by increasing the effective TLB size and keep the implementation complexity to minimum levels. A supported core can have both VTLB and FTLB. Reviewed-by: James Hogan <james.hogan@imgtec.com> Reviewed-by: Paul Burton <paul.burton@imgtec.com> Signed-off-by: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> Signed-off-by: Markos Chandras <markos.chandras@imgtec.com> Signed-off-by: John Crispin <blogic@openwrt.org> Patchwork: http://patchwork.linux-mips.org/patch/6139/
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/tlb-r4k.c29
1 files changed, 22 insertions, 7 deletions
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 427dcacca586..ae4ca2450707 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -72,7 +72,7 @@ void local_flush_tlb_all(void)
72{ 72{
73 unsigned long flags; 73 unsigned long flags;
74 unsigned long old_ctx; 74 unsigned long old_ctx;
75 int entry; 75 int entry, ftlbhighset;
76 76
77 ENTER_CRITICAL(flags); 77 ENTER_CRITICAL(flags);
78 /* Save old context and create impossible VPN2 value */ 78 /* Save old context and create impossible VPN2 value */
@@ -83,10 +83,21 @@ void local_flush_tlb_all(void)
83 entry = read_c0_wired(); 83 entry = read_c0_wired();
84 84
85 /* Blast 'em all away. */ 85 /* Blast 'em all away. */
86 if (cpu_has_tlbinv && current_cpu_data.tlbsize) { 86 if (cpu_has_tlbinv) {
87 write_c0_index(0); 87 if (current_cpu_data.tlbsizevtlb) {
88 mtc0_tlbw_hazard(); 88 write_c0_index(0);
89 tlbinvf(); /* invalidate VTLB */ 89 mtc0_tlbw_hazard();
90 tlbinvf(); /* invalidate VTLB */
91 }
92 ftlbhighset = current_cpu_data.tlbsizevtlb +
93 current_cpu_data.tlbsizeftlbsets;
94 for (entry = current_cpu_data.tlbsizevtlb;
95 entry < ftlbhighset;
96 entry++) {
97 write_c0_index(entry);
98 mtc0_tlbw_hazard();
99 tlbinvf(); /* invalidate one FTLB set */
100 }
90 } else { 101 } else {
91 while (entry < current_cpu_data.tlbsize) { 102 while (entry < current_cpu_data.tlbsize) {
92 /* Make sure all entries differ. */ 103 /* Make sure all entries differ. */
@@ -134,7 +145,9 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
134 start = round_down(start, PAGE_SIZE << 1); 145 start = round_down(start, PAGE_SIZE << 1);
135 end = round_up(end, PAGE_SIZE << 1); 146 end = round_up(end, PAGE_SIZE << 1);
136 size = (end - start) >> (PAGE_SHIFT + 1); 147 size = (end - start) >> (PAGE_SHIFT + 1);
137 if (size <= current_cpu_data.tlbsize/2) { 148 if (size <= (current_cpu_data.tlbsizeftlbsets ?
149 current_cpu_data.tlbsize / 8 :
150 current_cpu_data.tlbsize / 2)) {
138 int oldpid = read_c0_entryhi(); 151 int oldpid = read_c0_entryhi();
139 int newpid = cpu_asid(cpu, mm); 152 int newpid = cpu_asid(cpu, mm);
140 153
@@ -173,7 +186,9 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
173 ENTER_CRITICAL(flags); 186 ENTER_CRITICAL(flags);
174 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; 187 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
175 size = (size + 1) >> 1; 188 size = (size + 1) >> 1;
176 if (size <= current_cpu_data.tlbsize / 2) { 189 if (size <= (current_cpu_data.tlbsizeftlbsets ?
190 current_cpu_data.tlbsize / 8 :
191 current_cpu_data.tlbsize / 2)) {
177 int pid = read_c0_entryhi(); 192 int pid = read_c0_entryhi();
178 193
179 start &= (PAGE_MASK << 1); 194 start &= (PAGE_MASK << 1);