diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-02 16:40:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-02 16:40:50 -0400 |
commit | bdfc7cbdeef8cadba0e5793079ac0130b8e2220c (patch) | |
tree | 82af0cae4898e259edcc6cbdad639087dc1189a8 /arch/mips/mm | |
parent | 62d1a3ba5adc5653d43f6cd3a90758bb6ad5d5bd (diff) | |
parent | ade63aada79c61bcd5f51cbd310f237399892268 (diff) |
Merge branch 'mips-for-linux-next' of git://git.linux-mips.org/pub/scm/ralf/upstream-sfr
Pull MIPS updates from Ralf Baechle:
- Support for Imgtec's Aptiv family of MIPS cores.
- Improved detection of BCM47xx configurations.
- Fix hiberation for certain configurations.
- Add support for the Chinese Loongson 3 CPU, a MIPS64 R2 core and
systems.
- Detection and support for the MIPS P5600 core.
- A few more random fixes that didn't make 3.14.
- Support for the EVA Extended Virtual Addressing
- Switch Alchemy to the platform PATA driver
- Complete unification of Alchemy support
- Allow availability of I/O cache coherency to be runtime detected
- Improvments to multiprocessing support for Imgtec platforms
- A few microoptimizations
- Cleanups of FPU support
- Paul Gortmaker's fixes for the init stuff
- Support for seccomp
* 'mips-for-linux-next' of git://git.linux-mips.org/pub/scm/ralf/upstream-sfr: (165 commits)
MIPS: CPC: Use __raw_ memory access functions
MIPS: CM: use __raw_ memory access functions
MIPS: Fix warning when including smp-ops.h with CONFIG_SMP=n
MIPS: Malta: GIC IPIs may be used without MT
MIPS: smp-mt: Use common GIC IPI implementation
MIPS: smp-cmp: Remove incorrect core number probe
MIPS: Fix gigaton of warning building with microMIPS.
MIPS: Fix core number detection for MT cores
MIPS: MT: core_nvpes function to retrieve VPE count
MIPS: Provide empty mips_mt_set_cpuoptions when CONFIG_MIPS_MT=n
MIPS: Lasat: Replace del_timer by del_timer_sync
MIPS: Malta: Setup PM I/O region on boot
MIPS: Loongson: Add a Loongson-3 default config file
MIPS: Loongson 3: Add CPU hotplug support
MIPS: Loongson 3: Add Loongson-3 SMP support
MIPS: Loongson: Add Loongson-3 Kconfig options
MIPS: Loongson: Add swiotlb to support All-Memory DMA
MIPS: Loongson 3: Add serial port support
MIPS: Loongson 3: Add IRQ init and dispatch support
MIPS: Loongson 3: Add HT-linked PCI support
...
Diffstat (limited to 'arch/mips/mm')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 147 | ||||
-rw-r--r-- | arch/mips/mm/cache.c | 4 | ||||
-rw-r--r-- | arch/mips/mm/init.c | 12 | ||||
-rw-r--r-- | arch/mips/mm/sc-mips.c | 2 | ||||
-rw-r--r-- | arch/mips/mm/tlb-r4k.c | 5 | ||||
-rw-r--r-- | arch/mips/mm/tlbex.c | 6 |
6 files changed, 160 insertions, 16 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index c14259edd53f..1c74a6ad072a 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -57,7 +57,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) | |||
57 | preempt_enable(); | 57 | preempt_enable(); |
58 | } | 58 | } |
59 | 59 | ||
60 | #if defined(CONFIG_MIPS_CMP) | 60 | #if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS) |
61 | #define cpu_has_safe_index_cacheops 0 | 61 | #define cpu_has_safe_index_cacheops 0 |
62 | #else | 62 | #else |
63 | #define cpu_has_safe_index_cacheops 1 | 63 | #define cpu_has_safe_index_cacheops 1 |
@@ -123,6 +123,28 @@ static void r4k_blast_dcache_page_setup(void) | |||
123 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; | 123 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; |
124 | } | 124 | } |
125 | 125 | ||
126 | #ifndef CONFIG_EVA | ||
127 | #define r4k_blast_dcache_user_page r4k_blast_dcache_page | ||
128 | #else | ||
129 | |||
130 | static void (*r4k_blast_dcache_user_page)(unsigned long addr); | ||
131 | |||
132 | static void r4k_blast_dcache_user_page_setup(void) | ||
133 | { | ||
134 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
135 | |||
136 | if (dc_lsize == 0) | ||
137 | r4k_blast_dcache_user_page = (void *)cache_noop; | ||
138 | else if (dc_lsize == 16) | ||
139 | r4k_blast_dcache_user_page = blast_dcache16_user_page; | ||
140 | else if (dc_lsize == 32) | ||
141 | r4k_blast_dcache_user_page = blast_dcache32_user_page; | ||
142 | else if (dc_lsize == 64) | ||
143 | r4k_blast_dcache_user_page = blast_dcache64_user_page; | ||
144 | } | ||
145 | |||
146 | #endif | ||
147 | |||
126 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); | 148 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); |
127 | 149 | ||
128 | static void r4k_blast_dcache_page_indexed_setup(void) | 150 | static void r4k_blast_dcache_page_indexed_setup(void) |
@@ -245,6 +267,27 @@ static void r4k_blast_icache_page_setup(void) | |||
245 | r4k_blast_icache_page = blast_icache64_page; | 267 | r4k_blast_icache_page = blast_icache64_page; |
246 | } | 268 | } |
247 | 269 | ||
270 | #ifndef CONFIG_EVA | ||
271 | #define r4k_blast_icache_user_page r4k_blast_icache_page | ||
272 | #else | ||
273 | |||
274 | static void (*r4k_blast_icache_user_page)(unsigned long addr); | ||
275 | |||
276 | static void __cpuinit r4k_blast_icache_user_page_setup(void) | ||
277 | { | ||
278 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
279 | |||
280 | if (ic_lsize == 0) | ||
281 | r4k_blast_icache_user_page = (void *)cache_noop; | ||
282 | else if (ic_lsize == 16) | ||
283 | r4k_blast_icache_user_page = blast_icache16_user_page; | ||
284 | else if (ic_lsize == 32) | ||
285 | r4k_blast_icache_user_page = blast_icache32_user_page; | ||
286 | else if (ic_lsize == 64) | ||
287 | r4k_blast_icache_user_page = blast_icache64_user_page; | ||
288 | } | ||
289 | |||
290 | #endif | ||
248 | 291 | ||
249 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); | 292 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); |
250 | 293 | ||
@@ -355,6 +398,7 @@ static inline void local_r4k___flush_cache_all(void * args) | |||
355 | { | 398 | { |
356 | switch (current_cpu_type()) { | 399 | switch (current_cpu_type()) { |
357 | case CPU_LOONGSON2: | 400 | case CPU_LOONGSON2: |
401 | case CPU_LOONGSON3: | ||
358 | case CPU_R4000SC: | 402 | case CPU_R4000SC: |
359 | case CPU_R4000MC: | 403 | case CPU_R4000MC: |
360 | case CPU_R4400SC: | 404 | case CPU_R4400SC: |
@@ -519,7 +563,8 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
519 | } | 563 | } |
520 | 564 | ||
521 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | 565 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { |
522 | r4k_blast_dcache_page(addr); | 566 | vaddr ? r4k_blast_dcache_page(addr) : |
567 | r4k_blast_dcache_user_page(addr); | ||
523 | if (exec && !cpu_icache_snoops_remote_store) | 568 | if (exec && !cpu_icache_snoops_remote_store) |
524 | r4k_blast_scache_page(addr); | 569 | r4k_blast_scache_page(addr); |
525 | } | 570 | } |
@@ -530,7 +575,8 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
530 | if (cpu_context(cpu, mm) != 0) | 575 | if (cpu_context(cpu, mm) != 0) |
531 | drop_mmu_context(mm, cpu); | 576 | drop_mmu_context(mm, cpu); |
532 | } else | 577 | } else |
533 | r4k_blast_icache_page(addr); | 578 | vaddr ? r4k_blast_icache_page(addr) : |
579 | r4k_blast_icache_user_page(addr); | ||
534 | } | 580 | } |
535 | 581 | ||
536 | if (vaddr) { | 582 | if (vaddr) { |
@@ -595,6 +641,17 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo | |||
595 | break; | 641 | break; |
596 | } | 642 | } |
597 | } | 643 | } |
644 | #ifdef CONFIG_EVA | ||
645 | /* | ||
646 | * Due to all possible segment mappings, there might cache aliases | ||
647 | * caused by the bootloader being in non-EVA mode, and the CPU switching | ||
648 | * to EVA during early kernel init. It's best to flush the scache | ||
649 | * to avoid having secondary cores fetching stale data and lead to | ||
650 | * kernel crashes. | ||
651 | */ | ||
652 | bc_wback_inv(start, (end - start)); | ||
653 | __sync(); | ||
654 | #endif | ||
598 | } | 655 | } |
599 | 656 | ||
600 | static inline void local_r4k_flush_icache_range_ipi(void *args) | 657 | static inline void local_r4k_flush_icache_range_ipi(void *args) |
@@ -617,7 +674,7 @@ static void r4k_flush_icache_range(unsigned long start, unsigned long end) | |||
617 | instruction_hazard(); | 674 | instruction_hazard(); |
618 | } | 675 | } |
619 | 676 | ||
620 | #ifdef CONFIG_DMA_NONCOHERENT | 677 | #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) |
621 | 678 | ||
622 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | 679 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) |
623 | { | 680 | { |
@@ -688,7 +745,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | |||
688 | bc_inv(addr, size); | 745 | bc_inv(addr, size); |
689 | __sync(); | 746 | __sync(); |
690 | } | 747 | } |
691 | #endif /* CONFIG_DMA_NONCOHERENT */ | 748 | #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ |
692 | 749 | ||
693 | /* | 750 | /* |
694 | * While we're protected against bad userland addresses we don't care | 751 | * While we're protected against bad userland addresses we don't care |
@@ -1010,6 +1067,33 @@ static void probe_pcache(void) | |||
1010 | c->dcache.waybit = 0; | 1067 | c->dcache.waybit = 0; |
1011 | break; | 1068 | break; |
1012 | 1069 | ||
1070 | case CPU_LOONGSON3: | ||
1071 | config1 = read_c0_config1(); | ||
1072 | lsize = (config1 >> 19) & 7; | ||
1073 | if (lsize) | ||
1074 | c->icache.linesz = 2 << lsize; | ||
1075 | else | ||
1076 | c->icache.linesz = 0; | ||
1077 | c->icache.sets = 64 << ((config1 >> 22) & 7); | ||
1078 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
1079 | icache_size = c->icache.sets * | ||
1080 | c->icache.ways * | ||
1081 | c->icache.linesz; | ||
1082 | c->icache.waybit = 0; | ||
1083 | |||
1084 | lsize = (config1 >> 10) & 7; | ||
1085 | if (lsize) | ||
1086 | c->dcache.linesz = 2 << lsize; | ||
1087 | else | ||
1088 | c->dcache.linesz = 0; | ||
1089 | c->dcache.sets = 64 << ((config1 >> 13) & 7); | ||
1090 | c->dcache.ways = 1 + ((config1 >> 7) & 7); | ||
1091 | dcache_size = c->dcache.sets * | ||
1092 | c->dcache.ways * | ||
1093 | c->dcache.linesz; | ||
1094 | c->dcache.waybit = 0; | ||
1095 | break; | ||
1096 | |||
1013 | default: | 1097 | default: |
1014 | if (!(config & MIPS_CONF_M)) | 1098 | if (!(config & MIPS_CONF_M)) |
1015 | panic("Don't know how to probe P-caches on this cpu."); | 1099 | panic("Don't know how to probe P-caches on this cpu."); |
@@ -1113,13 +1197,21 @@ static void probe_pcache(void) | |||
1113 | case CPU_34K: | 1197 | case CPU_34K: |
1114 | case CPU_74K: | 1198 | case CPU_74K: |
1115 | case CPU_1004K: | 1199 | case CPU_1004K: |
1200 | case CPU_1074K: | ||
1116 | case CPU_INTERAPTIV: | 1201 | case CPU_INTERAPTIV: |
1202 | case CPU_P5600: | ||
1117 | case CPU_PROAPTIV: | 1203 | case CPU_PROAPTIV: |
1118 | if (current_cpu_type() == CPU_74K) | 1204 | case CPU_M5150: |
1205 | if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) | ||
1119 | alias_74k_erratum(c); | 1206 | alias_74k_erratum(c); |
1120 | if ((read_c0_config7() & (1 << 16))) { | 1207 | if (!(read_c0_config7() & MIPS_CONF7_IAR) && |
1121 | /* effectively physically indexed dcache, | 1208 | (c->icache.waysize > PAGE_SIZE)) |
1122 | thus no virtual aliases. */ | 1209 | c->icache.flags |= MIPS_CACHE_ALIASES; |
1210 | if (read_c0_config7() & MIPS_CONF7_AR) { | ||
1211 | /* | ||
1212 | * Effectively physically indexed dcache, | ||
1213 | * thus no virtual aliases. | ||
1214 | */ | ||
1123 | c->dcache.flags |= MIPS_CACHE_PINDEX; | 1215 | c->dcache.flags |= MIPS_CACHE_PINDEX; |
1124 | break; | 1216 | break; |
1125 | } | 1217 | } |
@@ -1239,6 +1331,33 @@ static void __init loongson2_sc_init(void) | |||
1239 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | 1331 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; |
1240 | } | 1332 | } |
1241 | 1333 | ||
1334 | static void __init loongson3_sc_init(void) | ||
1335 | { | ||
1336 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1337 | unsigned int config2, lsize; | ||
1338 | |||
1339 | config2 = read_c0_config2(); | ||
1340 | lsize = (config2 >> 4) & 15; | ||
1341 | if (lsize) | ||
1342 | c->scache.linesz = 2 << lsize; | ||
1343 | else | ||
1344 | c->scache.linesz = 0; | ||
1345 | c->scache.sets = 64 << ((config2 >> 8) & 15); | ||
1346 | c->scache.ways = 1 + (config2 & 15); | ||
1347 | |||
1348 | scache_size = c->scache.sets * | ||
1349 | c->scache.ways * | ||
1350 | c->scache.linesz; | ||
1351 | /* Loongson-3 has 4 cores, 1MB scache for each. scaches are shared */ | ||
1352 | scache_size *= 4; | ||
1353 | c->scache.waybit = 0; | ||
1354 | pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", | ||
1355 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); | ||
1356 | if (scache_size) | ||
1357 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1358 | return; | ||
1359 | } | ||
1360 | |||
1242 | extern int r5k_sc_init(void); | 1361 | extern int r5k_sc_init(void); |
1243 | extern int rm7k_sc_init(void); | 1362 | extern int rm7k_sc_init(void); |
1244 | extern int mips_sc_init(void); | 1363 | extern int mips_sc_init(void); |
@@ -1291,6 +1410,10 @@ static void setup_scache(void) | |||
1291 | loongson2_sc_init(); | 1410 | loongson2_sc_init(); |
1292 | return; | 1411 | return; |
1293 | 1412 | ||
1413 | case CPU_LOONGSON3: | ||
1414 | loongson3_sc_init(); | ||
1415 | return; | ||
1416 | |||
1294 | case CPU_XLP: | 1417 | case CPU_XLP: |
1295 | /* don't need to worry about L2, fully coherent */ | 1418 | /* don't need to worry about L2, fully coherent */ |
1296 | return; | 1419 | return; |
@@ -1461,6 +1584,10 @@ void r4k_cache_init(void) | |||
1461 | r4k_blast_scache_page_setup(); | 1584 | r4k_blast_scache_page_setup(); |
1462 | r4k_blast_scache_page_indexed_setup(); | 1585 | r4k_blast_scache_page_indexed_setup(); |
1463 | r4k_blast_scache_setup(); | 1586 | r4k_blast_scache_setup(); |
1587 | #ifdef CONFIG_EVA | ||
1588 | r4k_blast_dcache_user_page_setup(); | ||
1589 | r4k_blast_icache_user_page_setup(); | ||
1590 | #endif | ||
1464 | 1591 | ||
1465 | /* | 1592 | /* |
1466 | * Some MIPS32 and MIPS64 processors have physically indexed caches. | 1593 | * Some MIPS32 and MIPS64 processors have physically indexed caches. |
@@ -1492,7 +1619,7 @@ void r4k_cache_init(void) | |||
1492 | flush_icache_range = r4k_flush_icache_range; | 1619 | flush_icache_range = r4k_flush_icache_range; |
1493 | local_flush_icache_range = local_r4k_flush_icache_range; | 1620 | local_flush_icache_range = local_r4k_flush_icache_range; |
1494 | 1621 | ||
1495 | #if defined(CONFIG_DMA_NONCOHERENT) | 1622 | #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) |
1496 | if (coherentio) { | 1623 | if (coherentio) { |
1497 | _dma_cache_wback_inv = (void *)cache_noop; | 1624 | _dma_cache_wback_inv = (void *)cache_noop; |
1498 | _dma_cache_wback = (void *)cache_noop; | 1625 | _dma_cache_wback = (void *)cache_noop; |
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c index fde7e56d13fe..e422b38d3113 100644 --- a/arch/mips/mm/cache.c +++ b/arch/mips/mm/cache.c | |||
@@ -49,7 +49,7 @@ EXPORT_SYMBOL_GPL(local_flush_data_cache_page); | |||
49 | EXPORT_SYMBOL(flush_data_cache_page); | 49 | EXPORT_SYMBOL(flush_data_cache_page); |
50 | EXPORT_SYMBOL(flush_icache_all); | 50 | EXPORT_SYMBOL(flush_icache_all); |
51 | 51 | ||
52 | #ifdef CONFIG_DMA_NONCOHERENT | 52 | #if defined(CONFIG_DMA_NONCOHERENT) || defined(CONFIG_DMA_MAYBE_COHERENT) |
53 | 53 | ||
54 | /* DMA cache operations. */ | 54 | /* DMA cache operations. */ |
55 | void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); | 55 | void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); |
@@ -58,7 +58,7 @@ void (*_dma_cache_inv)(unsigned long start, unsigned long size); | |||
58 | 58 | ||
59 | EXPORT_SYMBOL(_dma_cache_wback_inv); | 59 | EXPORT_SYMBOL(_dma_cache_wback_inv); |
60 | 60 | ||
61 | #endif /* CONFIG_DMA_NONCOHERENT */ | 61 | #endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */ |
62 | 62 | ||
63 | /* | 63 | /* |
64 | * We could optimize the case where the cache argument is not BCACHE but | 64 | * We could optimize the case where the cache argument is not BCACHE but |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 6b59617760c1..4fc74c78265a 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -422,10 +422,20 @@ void free_initrd_mem(unsigned long start, unsigned long end) | |||
422 | } | 422 | } |
423 | #endif | 423 | #endif |
424 | 424 | ||
425 | void (*free_init_pages_eva)(void *begin, void *end) = NULL; | ||
426 | |||
425 | void __init_refok free_initmem(void) | 427 | void __init_refok free_initmem(void) |
426 | { | 428 | { |
427 | prom_free_prom_memory(); | 429 | prom_free_prom_memory(); |
428 | free_initmem_default(POISON_FREE_INITMEM); | 430 | /* |
431 | * Let the platform define a specific function to free the | ||
432 | * init section since EVA may have used any possible mapping | ||
433 | * between virtual and physical addresses. | ||
434 | */ | ||
435 | if (free_init_pages_eva) | ||
436 | free_init_pages_eva((void *)&__init_begin, (void *)&__init_end); | ||
437 | else | ||
438 | free_initmem_default(POISON_FREE_INITMEM); | ||
429 | } | 439 | } |
430 | 440 | ||
431 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT | 441 | #ifndef CONFIG_MIPS_PGD_C0_CONTEXT |
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c index 7a56aee5fce7..99eb8fabab60 100644 --- a/arch/mips/mm/sc-mips.c +++ b/arch/mips/mm/sc-mips.c | |||
@@ -76,8 +76,10 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c) | |||
76 | case CPU_34K: | 76 | case CPU_34K: |
77 | case CPU_74K: | 77 | case CPU_74K: |
78 | case CPU_1004K: | 78 | case CPU_1004K: |
79 | case CPU_1074K: | ||
79 | case CPU_INTERAPTIV: | 80 | case CPU_INTERAPTIV: |
80 | case CPU_PROAPTIV: | 81 | case CPU_PROAPTIV: |
82 | case CPU_P5600: | ||
81 | case CPU_BMIPS5000: | 83 | case CPU_BMIPS5000: |
82 | if (config2 & (1 << 12)) | 84 | if (config2 & (1 << 12)) |
83 | return 0; | 85 | return 0; |
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c index ae4ca2450707..eeaf50f5df2b 100644 --- a/arch/mips/mm/tlb-r4k.c +++ b/arch/mips/mm/tlb-r4k.c | |||
@@ -48,13 +48,14 @@ extern void build_tlb_refill_handler(void); | |||
48 | #endif /* CONFIG_MIPS_MT_SMTC */ | 48 | #endif /* CONFIG_MIPS_MT_SMTC */ |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * LOONGSON2 has a 4 entry itlb which is a subset of dtlb, | 51 | * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, |
52 | * unfortrunately, itlb is not totally transparent to software. | 52 | * unfortunately, itlb is not totally transparent to software. |
53 | */ | 53 | */ |
54 | static inline void flush_itlb(void) | 54 | static inline void flush_itlb(void) |
55 | { | 55 | { |
56 | switch (current_cpu_type()) { | 56 | switch (current_cpu_type()) { |
57 | case CPU_LOONGSON2: | 57 | case CPU_LOONGSON2: |
58 | case CPU_LOONGSON3: | ||
58 | write_c0_diag(4); | 59 | write_c0_diag(4); |
59 | break; | 60 | break; |
60 | default: | 61 | default: |
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c index b234b1b5ccad..ee88367ab3ad 100644 --- a/arch/mips/mm/tlbex.c +++ b/arch/mips/mm/tlbex.c | |||
@@ -509,7 +509,10 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
509 | switch (current_cpu_type()) { | 509 | switch (current_cpu_type()) { |
510 | case CPU_M14KC: | 510 | case CPU_M14KC: |
511 | case CPU_74K: | 511 | case CPU_74K: |
512 | case CPU_1074K: | ||
512 | case CPU_PROAPTIV: | 513 | case CPU_PROAPTIV: |
514 | case CPU_P5600: | ||
515 | case CPU_M5150: | ||
513 | break; | 516 | break; |
514 | 517 | ||
515 | default: | 518 | default: |
@@ -579,6 +582,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
579 | case CPU_BMIPS4380: | 582 | case CPU_BMIPS4380: |
580 | case CPU_BMIPS5000: | 583 | case CPU_BMIPS5000: |
581 | case CPU_LOONGSON2: | 584 | case CPU_LOONGSON2: |
585 | case CPU_LOONGSON3: | ||
582 | case CPU_R5500: | 586 | case CPU_R5500: |
583 | if (m4kc_tlbp_war()) | 587 | if (m4kc_tlbp_war()) |
584 | uasm_i_nop(p); | 588 | uasm_i_nop(p); |
@@ -621,7 +625,7 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l, | |||
621 | 625 | ||
622 | default: | 626 | default: |
623 | panic("No TLB refill handler yet (CPU type: %d)", | 627 | panic("No TLB refill handler yet (CPU type: %d)", |
624 | current_cpu_data.cputype); | 628 | current_cpu_type()); |
625 | break; | 629 | break; |
626 | } | 630 | } |
627 | } | 631 | } |