aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/mm
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2013-09-25 12:21:26 -0400
committerRalf Baechle <ralf@linux-mips.org>2013-10-29 16:25:24 -0400
commit14bd8c082016cd1f67fdfd702e4cf6367869a712 (patch)
treeb4d517e79c58fd3c665286b39ddb1801890f2cb0 /arch/mips/mm
parent7b784c634b4147345b46251884be6be4bd45fd43 (diff)
MIPS: Loongson: Get rid of Loongson 2 #ifdefery all over arch/mips.
It was ugly. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/mm')
-rw-r--r--arch/mips/mm/c-r4k.c52
-rw-r--r--arch/mips/mm/tlb-r4k.c37
-rw-r--r--arch/mips/mm/tlbex.c169
3 files changed, 139 insertions, 119 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bc6f96fcb529..62ffd20ea869 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -346,14 +346,8 @@ static void r4k_blast_scache_setup(void)
346 346
347static inline void local_r4k___flush_cache_all(void * args) 347static inline void local_r4k___flush_cache_all(void * args)
348{ 348{
349#if defined(CONFIG_CPU_LOONGSON2)
350 r4k_blast_scache();
351 return;
352#endif
353 r4k_blast_dcache();
354 r4k_blast_icache();
355
356 switch (current_cpu_type()) { 349 switch (current_cpu_type()) {
350 case CPU_LOONGSON2:
357 case CPU_R4000SC: 351 case CPU_R4000SC:
358 case CPU_R4000MC: 352 case CPU_R4000MC:
359 case CPU_R4400SC: 353 case CPU_R4400SC:
@@ -361,7 +355,18 @@ static inline void local_r4k___flush_cache_all(void * args)
361 case CPU_R10000: 355 case CPU_R10000:
362 case CPU_R12000: 356 case CPU_R12000:
363 case CPU_R14000: 357 case CPU_R14000:
358 /*
359 * These caches are inclusive caches, that is, if something
360 * is not cached in the S-cache, we know it also won't be
361 * in one of the primary caches.
362 */
364 r4k_blast_scache(); 363 r4k_blast_scache();
364 break;
365
366 default:
367 r4k_blast_dcache();
368 r4k_blast_icache();
369 break;
365 } 370 }
366} 371}
367 372
@@ -572,8 +577,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
572 577
573 if (end - start > icache_size) 578 if (end - start > icache_size)
574 r4k_blast_icache(); 579 r4k_blast_icache();
575 else 580 else {
576 protected_blast_icache_range(start, end); 581 switch (boot_cpu_type()) {
582 case CPU_LOONGSON2:
583 protected_blast_icache_range(start, end);
584 break;
585
586 default:
587 protected_loongson23_blast_icache_range(start, end);
588 break;
589 }
590 }
577} 591}
578 592
579static inline void local_r4k_flush_icache_range_ipi(void *args) 593static inline void local_r4k_flush_icache_range_ipi(void *args)
@@ -1109,15 +1123,14 @@ static void probe_pcache(void)
1109 case CPU_ALCHEMY: 1123 case CPU_ALCHEMY:
1110 c->icache.flags |= MIPS_CACHE_IC_F_DC; 1124 c->icache.flags |= MIPS_CACHE_IC_F_DC;
1111 break; 1125 break;
1112 }
1113 1126
1114#ifdef CONFIG_CPU_LOONGSON2 1127 case CPU_LOONGSON2:
1115 /* 1128 /*
1116 * LOONGSON2 has 4 way icache, but when using indexed cache op, 1129 * LOONGSON2 has 4 way icache, but when using indexed cache op,
1117 * one op will act on all 4 ways 1130 * one op will act on all 4 ways
1118 */ 1131 */
1119 c->icache.ways = 1; 1132 c->icache.ways = 1;
1120#endif 1133 }
1121 1134
1122 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", 1135 printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n",
1123 icache_size >> 10, 1136 icache_size >> 10,
@@ -1193,7 +1206,6 @@ static int probe_scache(void)
1193 return 1; 1206 return 1;
1194} 1207}
1195 1208
1196#if defined(CONFIG_CPU_LOONGSON2)
1197static void __init loongson2_sc_init(void) 1209static void __init loongson2_sc_init(void)
1198{ 1210{
1199 struct cpuinfo_mips *c = &current_cpu_data; 1211 struct cpuinfo_mips *c = &current_cpu_data;
@@ -1209,7 +1221,6 @@ static void __init loongson2_sc_init(void)
1209 1221
1210 c->options |= MIPS_CPU_INCLUSIVE_CACHES; 1222 c->options |= MIPS_CPU_INCLUSIVE_CACHES;
1211} 1223}
1212#endif
1213 1224
1214extern int r5k_sc_init(void); 1225extern int r5k_sc_init(void);
1215extern int rm7k_sc_init(void); 1226extern int rm7k_sc_init(void);
@@ -1259,11 +1270,10 @@ static void setup_scache(void)
1259#endif 1270#endif
1260 return; 1271 return;
1261 1272
1262#if defined(CONFIG_CPU_LOONGSON2)
1263 case CPU_LOONGSON2: 1273 case CPU_LOONGSON2:
1264 loongson2_sc_init(); 1274 loongson2_sc_init();
1265 return; 1275 return;
1266#endif 1276
1267 case CPU_XLP: 1277 case CPU_XLP:
1268 /* don't need to worry about L2, fully coherent */ 1278 /* don't need to worry about L2, fully coherent */
1269 return; 1279 return;
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index bb3a5f643e97..da3b0b9c9eae 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -52,21 +52,26 @@ extern void build_tlb_refill_handler(void);
52 52
53#endif /* CONFIG_MIPS_MT_SMTC */ 53#endif /* CONFIG_MIPS_MT_SMTC */
54 54
55#if defined(CONFIG_CPU_LOONGSON2)
56/* 55/*
57 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, 56 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
58 * unfortrunately, itlb is not totally transparent to software. 57 * unfortrunately, itlb is not totally transparent to software.
59 */ 58 */
60#define FLUSH_ITLB write_c0_diag(4); 59static inline void flush_itlb(void)
61 60{
62#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); } 61 switch (current_cpu_type()) {
63 62 case CPU_LOONGSON2:
64#else 63 write_c0_diag(4);
65 64 break;
66#define FLUSH_ITLB 65 default:
67#define FLUSH_ITLB_VM(vma) 66 break;
67 }
68}
68 69
69#endif 70static inline void flush_itlb_vm(struct vm_area_struct *vma)
71{
72 if (vma->vm_flags & VM_EXEC)
73 flush_itlb();
74}
70 75
71void local_flush_tlb_all(void) 76void local_flush_tlb_all(void)
72{ 77{
@@ -93,7 +98,7 @@ void local_flush_tlb_all(void)
93 } 98 }
94 tlbw_use_hazard(); 99 tlbw_use_hazard();
95 write_c0_entryhi(old_ctx); 100 write_c0_entryhi(old_ctx);
96 FLUSH_ITLB; 101 flush_itlb();
97 EXIT_CRITICAL(flags); 102 EXIT_CRITICAL(flags);
98} 103}
99EXPORT_SYMBOL(local_flush_tlb_all); 104EXPORT_SYMBOL(local_flush_tlb_all);
@@ -155,7 +160,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
155 } else { 160 } else {
156 drop_mmu_context(mm, cpu); 161 drop_mmu_context(mm, cpu);
157 } 162 }
158 FLUSH_ITLB; 163 flush_itlb();
159 EXIT_CRITICAL(flags); 164 EXIT_CRITICAL(flags);
160 } 165 }
161} 166}
@@ -197,7 +202,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
197 } else { 202 } else {
198 local_flush_tlb_all(); 203 local_flush_tlb_all();
199 } 204 }
200 FLUSH_ITLB; 205 flush_itlb();
201 EXIT_CRITICAL(flags); 206 EXIT_CRITICAL(flags);
202} 207}
203 208
@@ -230,7 +235,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
230 235
231 finish: 236 finish:
232 write_c0_entryhi(oldpid); 237 write_c0_entryhi(oldpid);
233 FLUSH_ITLB_VM(vma); 238 flush_itlb_vm(vma);
234 EXIT_CRITICAL(flags); 239 EXIT_CRITICAL(flags);
235 } 240 }
236} 241}
@@ -262,7 +267,7 @@ void local_flush_tlb_one(unsigned long page)
262 tlbw_use_hazard(); 267 tlbw_use_hazard();
263 } 268 }
264 write_c0_entryhi(oldpid); 269 write_c0_entryhi(oldpid);
265 FLUSH_ITLB; 270 flush_itlb();
266 EXIT_CRITICAL(flags); 271 EXIT_CRITICAL(flags);
267} 272}
268 273
@@ -335,7 +340,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
335 tlb_write_indexed(); 340 tlb_write_indexed();
336 } 341 }
337 tlbw_use_hazard(); 342 tlbw_use_hazard();
338 FLUSH_ITLB_VM(vma); 343 flush_itlb_vm(vma);
339 EXIT_CRITICAL(flags); 344 EXIT_CRITICAL(flags);
340} 345}
341 346
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index fffa7fe319a0..183f2b583e4d 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1311,95 +1311,100 @@ static void build_r4000_tlb_refill_handler(void)
1311 * need three, with the second nop'ed and the third being 1311 * need three, with the second nop'ed and the third being
1312 * unused. 1312 * unused.
1313 */ 1313 */
1314 /* Loongson2 ebase is different than r4k, we have more space */ 1314 switch (boot_cpu_type()) {
1315#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) 1315 default:
1316 if ((p - tlb_handler) > 64) 1316 if (sizeof(long) == 4) {
1317 panic("TLB refill handler space exceeded"); 1317 case CPU_LOONGSON2:
1318#else 1318 /* Loongson2 ebase is different than r4k, we have more space */
1319 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1) 1319 if ((p - tlb_handler) > 64)
1320 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3) 1320 panic("TLB refill handler space exceeded");
1321 && uasm_insn_has_bdelay(relocs,
1322 tlb_handler + MIPS64_REFILL_INSNS - 3)))
1323 panic("TLB refill handler space exceeded");
1324#endif
1325
1326 /*
1327 * Now fold the handler in the TLB refill handler space.
1328 */
1329#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
1330 f = final_handler;
1331 /* Simplest case, just copy the handler. */
1332 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1333 final_len = p - tlb_handler;
1334#else /* CONFIG_64BIT */
1335 f = final_handler + MIPS64_REFILL_INSNS;
1336 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1337 /* Just copy the handler. */
1338 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1339 final_len = p - tlb_handler;
1340 } else {
1341#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1342 const enum label_id ls = label_tlb_huge_update;
1343#else
1344 const enum label_id ls = label_vmalloc;
1345#endif
1346 u32 *split;
1347 int ov = 0;
1348 int i;
1349
1350 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1351 ;
1352 BUG_ON(i == ARRAY_SIZE(labels));
1353 split = labels[i].addr;
1354
1355 /*
1356 * See if we have overflown one way or the other.
1357 */
1358 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1359 split < p - MIPS64_REFILL_INSNS)
1360 ov = 1;
1361
1362 if (ov) {
1363 /* 1321 /*
1364 * Split two instructions before the end. One 1322 * Now fold the handler in the TLB refill handler space.
1365 * for the branch and one for the instruction
1366 * in the delay slot.
1367 */ 1323 */
1368 split = tlb_handler + MIPS64_REFILL_INSNS - 2; 1324 f = final_handler;
1369 1325 /* Simplest case, just copy the handler. */
1326 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1327 final_len = p - tlb_handler;
1328 break;
1329 } else {
1330 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1331 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1332 && uasm_insn_has_bdelay(relocs,
1333 tlb_handler + MIPS64_REFILL_INSNS - 3)))
1334 panic("TLB refill handler space exceeded");
1370 /* 1335 /*
1371 * If the branch would fall in a delay slot, 1336 * Now fold the handler in the TLB refill handler space.
1372 * we must back up an additional instruction
1373 * so that it is no longer in a delay slot.
1374 */ 1337 */
1375 if (uasm_insn_has_bdelay(relocs, split - 1)) 1338 f = final_handler + MIPS64_REFILL_INSNS;
1376 split--; 1339 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1377 } 1340 /* Just copy the handler. */
1378 /* Copy first part of the handler. */ 1341 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1379 uasm_copy_handler(relocs, labels, tlb_handler, split, f); 1342 final_len = p - tlb_handler;
1380 f += split - tlb_handler; 1343 } else {
1381 1344#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1382 if (ov) { 1345 const enum label_id ls = label_tlb_huge_update;
1383 /* Insert branch. */ 1346#else
1384 uasm_l_split(&l, final_handler); 1347 const enum label_id ls = label_vmalloc;
1385 uasm_il_b(&f, &r, label_split); 1348#endif
1386 if (uasm_insn_has_bdelay(relocs, split)) 1349 u32 *split;
1387 uasm_i_nop(&f); 1350 int ov = 0;
1388 else { 1351 int i;
1389 uasm_copy_handler(relocs, labels, 1352
1390 split, split + 1, f); 1353 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1391 uasm_move_labels(labels, f, f + 1, -1); 1354 ;
1392 f++; 1355 BUG_ON(i == ARRAY_SIZE(labels));
1393 split++; 1356 split = labels[i].addr;
1357
1358 /*
1359 * See if we have overflown one way or the other.
1360 */
1361 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1362 split < p - MIPS64_REFILL_INSNS)
1363 ov = 1;
1364
1365 if (ov) {
1366 /*
1367 * Split two instructions before the end. One
1368 * for the branch and one for the instruction
1369 * in the delay slot.
1370 */
1371 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1372
1373 /*
1374 * If the branch would fall in a delay slot,
1375 * we must back up an additional instruction
1376 * so that it is no longer in a delay slot.
1377 */
1378 if (uasm_insn_has_bdelay(relocs, split - 1))
1379 split--;
1380 }
1381 /* Copy first part of the handler. */
1382 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1383 f += split - tlb_handler;
1384
1385 if (ov) {
1386 /* Insert branch. */
1387 uasm_l_split(&l, final_handler);
1388 uasm_il_b(&f, &r, label_split);
1389 if (uasm_insn_has_bdelay(relocs, split))
1390 uasm_i_nop(&f);
1391 else {
1392 uasm_copy_handler(relocs, labels,
1393 split, split + 1, f);
1394 uasm_move_labels(labels, f, f + 1, -1);
1395 f++;
1396 split++;
1397 }
1398 }
1399
1400 /* Copy the rest of the handler. */
1401 uasm_copy_handler(relocs, labels, split, p, final_handler);
1402 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1403 (p - split);
1394 } 1404 }
1395 } 1405 }
1396 1406 break;
1397 /* Copy the rest of the handler. */
1398 uasm_copy_handler(relocs, labels, split, p, final_handler);
1399 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1400 (p - split);
1401 } 1407 }
1402#endif /* CONFIG_64BIT */
1403 1408
1404 uasm_resolve_relocs(relocs, labels); 1409 uasm_resolve_relocs(relocs, labels);
1405 pr_debug("Wrote TLB refill handler (%u instructions).\n", 1410 pr_debug("Wrote TLB refill handler (%u instructions).\n",