aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/mm
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2012-05-13 23:49:31 -0400
committerDavid S. Miller <davem@davemloft.net>2012-05-13 23:49:31 -0400
commit5d83d66635bb1642f3c6a3690c28ff4afdf1ae5f (patch)
treefb3f20377d8567af11be07c383ff21bf5fc6850a /arch/sparc/mm
parentb25e74b1be321613bf33492cd9d2e5dd0924562d (diff)
sparc32: Move cache and TLB flushes over to method ops.
This eliminated most of the remaining users of btfixup. There are some complications because of the special cases we have for sun4d, leon, and some flavors of viking. It was found that there are no cases where a flush_page_for_dma method was not hooked up to something, so the "noflush" iommu methods were removed. Add some documentation to the viking_sun4d_smp_ops to describe exactly the hardware bug which causes us to need special TLB flushing on sun4d. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc/mm')
-rw-r--r--arch/sparc/mm/btfixup.c11
-rw-r--r--arch/sparc/mm/iommu.c38
-rw-r--r--arch/sparc/mm/srmmu.c534
3 files changed, 366 insertions, 217 deletions
diff --git a/arch/sparc/mm/btfixup.c b/arch/sparc/mm/btfixup.c
index 1b7aa565497e..dcbb7ffcc82e 100644
--- a/arch/sparc/mm/btfixup.c
+++ b/arch/sparc/mm/btfixup.c
@@ -38,7 +38,6 @@ static char insn_s[] __initdata = "Fixup s %p doesn't refer to an OR at %p[%08x]
38static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n"; 38static char insn_h[] __initdata = "Fixup h %p doesn't refer to a SETHI at %p[%08x]\n";
39static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n"; 39static char insn_a[] __initdata = "Fixup a %p doesn't refer to a SETHI nor OR at %p[%08x]\n";
40static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n"; 40static char insn_i[] __initdata = "Fixup i %p doesn't refer to a valid instruction at %p[%08x]\n";
41static char fca_und[] __initdata = "flush_cache_all undefined in btfixup()\n";
42static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n"; 41static char wrong_setaddr[] __initdata = "Garbled CALL/INT patch at %p[%08x,%08x,%08x]=%08x\n";
43 42
44#ifdef BTFIXUP_OPTIMIZE_OTHER 43#ifdef BTFIXUP_OPTIMIZE_OTHER
@@ -75,7 +74,6 @@ void __init btfixup(void)
75 unsigned insn; 74 unsigned insn;
76 unsigned *addr; 75 unsigned *addr;
77 int fmangled = 0; 76 int fmangled = 0;
78 void (*flush_cacheall)(void);
79 77
80 if (!visited) { 78 if (!visited) {
81 visited++; 79 visited++;
@@ -311,13 +309,8 @@ void __init btfixup(void)
311 p = q + count; 309 p = q + count;
312 } 310 }
313#ifdef CONFIG_SMP 311#ifdef CONFIG_SMP
314 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(local_flush_cache_all); 312 local_ops->cache_all();
315#else 313#else
316 flush_cacheall = (void (*)(void))BTFIXUPVAL_CALL(flush_cache_all); 314 sparc32_cachetlb_ops->cache_all();
317#endif 315#endif
318 if (!flush_cacheall) {
319 prom_printf(fca_und);
320 prom_halt();
321 }
322 (*flush_cacheall)();
323} 316}
diff --git a/arch/sparc/mm/iommu.c b/arch/sparc/mm/iommu.c
index c64f81e370aa..720bea2c7fdd 100644
--- a/arch/sparc/mm/iommu.c
+++ b/arch/sparc/mm/iommu.c
@@ -39,8 +39,6 @@
39 39
40/* srmmu.c */ 40/* srmmu.c */
41extern int viking_mxcc_present; 41extern int viking_mxcc_present;
42BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
43#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
44extern int flush_page_for_dma_global; 42extern int flush_page_for_dma_global;
45static int viking_flush; 43static int viking_flush;
46/* viking.S */ 44/* viking.S */
@@ -216,11 +214,6 @@ static u32 iommu_get_scsi_one(struct device *dev, char *vaddr, unsigned int len)
216 return busa + off; 214 return busa + off;
217} 215}
218 216
219static __u32 iommu_get_scsi_one_noflush(struct device *dev, char *vaddr, unsigned long len)
220{
221 return iommu_get_scsi_one(dev, vaddr, len);
222}
223
224static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len) 217static __u32 iommu_get_scsi_one_gflush(struct device *dev, char *vaddr, unsigned long len)
225{ 218{
226 flush_page_for_dma(0); 219 flush_page_for_dma(0);
@@ -238,19 +231,6 @@ static __u32 iommu_get_scsi_one_pflush(struct device *dev, char *vaddr, unsigned
238 return iommu_get_scsi_one(dev, vaddr, len); 231 return iommu_get_scsi_one(dev, vaddr, len);
239} 232}
240 233
241static void iommu_get_scsi_sgl_noflush(struct device *dev, struct scatterlist *sg, int sz)
242{
243 int n;
244
245 while (sz != 0) {
246 --sz;
247 n = (sg->length + sg->offset + PAGE_SIZE-1) >> PAGE_SHIFT;
248 sg->dma_address = iommu_get_one(dev, sg_page(sg), n) + sg->offset;
249 sg->dma_length = sg->length;
250 sg = sg_next(sg);
251 }
252}
253
254static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz) 234static void iommu_get_scsi_sgl_gflush(struct device *dev, struct scatterlist *sg, int sz)
255{ 235{
256 int n; 236 int n;
@@ -426,17 +406,6 @@ static void iommu_unmap_dma_area(struct device *dev, unsigned long busa, int len
426} 406}
427#endif 407#endif
428 408
429static const struct sparc32_dma_ops iommu_dma_noflush_ops = {
430 .get_scsi_one = iommu_get_scsi_one_noflush,
431 .get_scsi_sgl = iommu_get_scsi_sgl_noflush,
432 .release_scsi_one = iommu_release_scsi_one,
433 .release_scsi_sgl = iommu_release_scsi_sgl,
434#ifdef CONFIG_SBUS
435 .map_dma_area = iommu_map_dma_area,
436 .unmap_dma_area = iommu_unmap_dma_area,
437#endif
438};
439
440static const struct sparc32_dma_ops iommu_dma_gflush_ops = { 409static const struct sparc32_dma_ops iommu_dma_gflush_ops = {
441 .get_scsi_one = iommu_get_scsi_one_gflush, 410 .get_scsi_one = iommu_get_scsi_one_gflush,
442 .get_scsi_sgl = iommu_get_scsi_sgl_gflush, 411 .get_scsi_sgl = iommu_get_scsi_sgl_gflush,
@@ -461,12 +430,7 @@ static const struct sparc32_dma_ops iommu_dma_pflush_ops = {
461 430
462void __init ld_mmu_iommu(void) 431void __init ld_mmu_iommu(void)
463{ 432{
464 viking_flush = (BTFIXUPVAL_CALL(flush_page_for_dma) == (unsigned long)viking_flush_page); 433 if (flush_page_for_dma_global) {
465
466 if (!BTFIXUPVAL_CALL(flush_page_for_dma)) {
467 /* IO coherent chip */
468 sparc32_dma_ops = &iommu_dma_noflush_ops;
469 } else if (flush_page_for_dma_global) {
470 /* flush_page_for_dma flushes everything, no matter of what page is it */ 434 /* flush_page_for_dma flushes everything, no matter of what page is it */
471 sparc32_dma_ops = &iommu_dma_gflush_ops; 435 sparc32_dma_ops = &iommu_dma_gflush_ops;
472 } else { 436 } else {
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index dc398e5c71a4..cba05fa3fbc7 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -65,24 +65,20 @@ extern unsigned long last_valid_pfn;
65 65
66static pgd_t *srmmu_swapper_pg_dir; 66static pgd_t *srmmu_swapper_pg_dir;
67 67
68const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
69
68#ifdef CONFIG_SMP 70#ifdef CONFIG_SMP
71const struct sparc32_cachetlb_ops *local_ops;
72
69#define FLUSH_BEGIN(mm) 73#define FLUSH_BEGIN(mm)
70#define FLUSH_END 74#define FLUSH_END
71#else 75#else
72#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { 76#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
73#define FLUSH_END } 77#define FLUSH_END }
74#endif 78#endif
75 79
76BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
77#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78
79int flush_page_for_dma_global = 1; 80int flush_page_for_dma_global = 1;
80 81
81#ifdef CONFIG_SMP
82BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84#endif
85
86char *srmmu_name; 82char *srmmu_name;
87 83
88ctxd_t *srmmu_ctx_table_phys; 84ctxd_t *srmmu_ctx_table_phys;
@@ -1126,7 +1122,7 @@ void __init srmmu_paging_init(void)
1126 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); 1122 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
1127#ifdef CONFIG_SMP 1123#ifdef CONFIG_SMP
1128 /* Stop from hanging here... */ 1124 /* Stop from hanging here... */
1129 local_flush_tlb_all(); 1125 local_ops->tlb_all();
1130#else 1126#else
1131 flush_tlb_all(); 1127 flush_tlb_all();
1132#endif 1128#endif
@@ -1284,6 +1280,20 @@ static void __cpuinit poke_hypersparc(void)
1284 clear = srmmu_get_fstatus(); 1280 clear = srmmu_get_fstatus();
1285} 1281}
1286 1282
1283static const struct sparc32_cachetlb_ops hypersparc_ops = {
1284 .cache_all = hypersparc_flush_cache_all,
1285 .cache_mm = hypersparc_flush_cache_mm,
1286 .cache_page = hypersparc_flush_cache_page,
1287 .cache_range = hypersparc_flush_cache_range,
1288 .tlb_all = hypersparc_flush_tlb_all,
1289 .tlb_mm = hypersparc_flush_tlb_mm,
1290 .tlb_page = hypersparc_flush_tlb_page,
1291 .tlb_range = hypersparc_flush_tlb_range,
1292 .page_to_ram = hypersparc_flush_page_to_ram,
1293 .sig_insns = hypersparc_flush_sig_insns,
1294 .page_for_dma = hypersparc_flush_page_for_dma,
1295};
1296
1287static void __init init_hypersparc(void) 1297static void __init init_hypersparc(void)
1288{ 1298{
1289 srmmu_name = "ROSS HyperSparc"; 1299 srmmu_name = "ROSS HyperSparc";
@@ -1292,21 +1302,7 @@ static void __init init_hypersparc(void)
1292 init_vac_layout(); 1302 init_vac_layout();
1293 1303
1294 is_hypersparc = 1; 1304 is_hypersparc = 1;
1295 1305 sparc32_cachetlb_ops = &hypersparc_ops;
1296 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1297 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1298 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1299 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1300
1301 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1302 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1303 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1304 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1305
1306 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1307 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1308 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1309
1310 1306
1311 poke_srmmu = poke_hypersparc; 1307 poke_srmmu = poke_hypersparc;
1312 1308
@@ -1352,25 +1348,24 @@ static void __cpuinit poke_cypress(void)
1352 srmmu_set_mmureg(mreg); 1348 srmmu_set_mmureg(mreg);
1353} 1349}
1354 1350
1351static const struct sparc32_cachetlb_ops cypress_ops = {
1352 .cache_all = cypress_flush_cache_all,
1353 .cache_mm = cypress_flush_cache_mm,
1354 .cache_page = cypress_flush_cache_page,
1355 .cache_range = cypress_flush_cache_range,
1356 .tlb_all = cypress_flush_tlb_all,
1357 .tlb_mm = cypress_flush_tlb_mm,
1358 .tlb_page = cypress_flush_tlb_page,
1359 .tlb_range = cypress_flush_tlb_range,
1360 .page_to_ram = cypress_flush_page_to_ram,
1361 .sig_insns = cypress_flush_sig_insns,
1362 .page_for_dma = cypress_flush_page_for_dma,
1363};
1364
1355static void __init init_cypress_common(void) 1365static void __init init_cypress_common(void)
1356{ 1366{
1357 init_vac_layout(); 1367 init_vac_layout();
1358 1368 sparc32_cachetlb_ops = &cypress_ops;
1359 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1360 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1361 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1362 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1363
1364 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1365 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1366 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1367 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1368
1369
1370 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1371 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1372 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1373
1374 poke_srmmu = poke_cypress; 1369 poke_srmmu = poke_cypress;
1375} 1370}
1376 1371
@@ -1421,6 +1416,20 @@ static void __cpuinit poke_swift(void)
1421 srmmu_set_mmureg(mreg); 1416 srmmu_set_mmureg(mreg);
1422} 1417}
1423 1418
1419static const struct sparc32_cachetlb_ops swift_ops = {
1420 .cache_all = swift_flush_cache_all,
1421 .cache_mm = swift_flush_cache_mm,
1422 .cache_page = swift_flush_cache_page,
1423 .cache_range = swift_flush_cache_range,
1424 .tlb_all = swift_flush_tlb_all,
1425 .tlb_mm = swift_flush_tlb_mm,
1426 .tlb_page = swift_flush_tlb_page,
1427 .tlb_range = swift_flush_tlb_range,
1428 .page_to_ram = swift_flush_page_to_ram,
1429 .sig_insns = swift_flush_sig_insns,
1430 .page_for_dma = swift_flush_page_for_dma,
1431};
1432
1424#define SWIFT_MASKID_ADDR 0x10003018 1433#define SWIFT_MASKID_ADDR 0x10003018
1425static void __init init_swift(void) 1434static void __init init_swift(void)
1426{ 1435{
@@ -1471,21 +1480,7 @@ static void __init init_swift(void)
1471 break; 1480 break;
1472 } 1481 }
1473 1482
1474 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); 1483 sparc32_cachetlb_ops = &swift_ops;
1475 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1476 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1477 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1478
1479
1480 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1481 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1482 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1483 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1484
1485 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1486 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1487 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1488
1489 flush_page_for_dma_global = 0; 1484 flush_page_for_dma_global = 0;
1490 1485
1491 /* 1486 /*
@@ -1618,26 +1613,25 @@ static void __cpuinit poke_turbosparc(void)
1618 srmmu_set_mmureg(mreg); 1613 srmmu_set_mmureg(mreg);
1619} 1614}
1620 1615
1616static const struct sparc32_cachetlb_ops turbosparc_ops = {
1617 .cache_all = turbosparc_flush_cache_all,
1618 .cache_mm = turbosparc_flush_cache_mm,
1619 .cache_page = turbosparc_flush_cache_page,
1620 .cache_range = turbosparc_flush_cache_range,
1621 .tlb_all = turbosparc_flush_tlb_all,
1622 .tlb_mm = turbosparc_flush_tlb_mm,
1623 .tlb_page = turbosparc_flush_tlb_page,
1624 .tlb_range = turbosparc_flush_tlb_range,
1625 .page_to_ram = turbosparc_flush_page_to_ram,
1626 .sig_insns = turbosparc_flush_sig_insns,
1627 .page_for_dma = turbosparc_flush_page_for_dma,
1628};
1629
1621static void __init init_turbosparc(void) 1630static void __init init_turbosparc(void)
1622{ 1631{
1623 srmmu_name = "Fujitsu TurboSparc"; 1632 srmmu_name = "Fujitsu TurboSparc";
1624 srmmu_modtype = TurboSparc; 1633 srmmu_modtype = TurboSparc;
1625 1634 sparc32_cachetlb_ops = &turbosparc_ops;
1626 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1627 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1628 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1629 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1630
1631 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1632 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1633 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1634 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1635
1636 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1637
1638 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1639 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1640
1641 poke_srmmu = poke_turbosparc; 1635 poke_srmmu = poke_turbosparc;
1642} 1636}
1643 1637
@@ -1652,6 +1646,20 @@ static void __cpuinit poke_tsunami(void)
1652 srmmu_set_mmureg(mreg); 1646 srmmu_set_mmureg(mreg);
1653} 1647}
1654 1648
1649static const struct sparc32_cachetlb_ops tsunami_ops = {
1650 .cache_all = tsunami_flush_cache_all,
1651 .cache_mm = tsunami_flush_cache_mm,
1652 .cache_page = tsunami_flush_cache_page,
1653 .cache_range = tsunami_flush_cache_range,
1654 .tlb_all = tsunami_flush_tlb_all,
1655 .tlb_mm = tsunami_flush_tlb_mm,
1656 .tlb_page = tsunami_flush_tlb_page,
1657 .tlb_range = tsunami_flush_tlb_range,
1658 .page_to_ram = tsunami_flush_page_to_ram,
1659 .sig_insns = tsunami_flush_sig_insns,
1660 .page_for_dma = tsunami_flush_page_for_dma,
1661};
1662
1655static void __init init_tsunami(void) 1663static void __init init_tsunami(void)
1656{ 1664{
1657 /* 1665 /*
@@ -1662,22 +1670,7 @@ static void __init init_tsunami(void)
1662 1670
1663 srmmu_name = "TI Tsunami"; 1671 srmmu_name = "TI Tsunami";
1664 srmmu_modtype = Tsunami; 1672 srmmu_modtype = Tsunami;
1665 1673 sparc32_cachetlb_ops = &tsunami_ops;
1666 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1667 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1668 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1669 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1670
1671
1672 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1673 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1674 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1675 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1676
1677 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1678 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1679 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1680
1681 poke_srmmu = poke_tsunami; 1674 poke_srmmu = poke_tsunami;
1682 1675
1683 tsunami_setup_blockops(); 1676 tsunami_setup_blockops();
@@ -1688,7 +1681,7 @@ static void __cpuinit poke_viking(void)
1688 unsigned long mreg = srmmu_get_mmureg(); 1681 unsigned long mreg = srmmu_get_mmureg();
1689 static int smp_catch; 1682 static int smp_catch;
1690 1683
1691 if(viking_mxcc_present) { 1684 if (viking_mxcc_present) {
1692 unsigned long mxcc_control = mxcc_get_creg(); 1685 unsigned long mxcc_control = mxcc_get_creg();
1693 1686
1694 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); 1687 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
@@ -1725,6 +1718,52 @@ static void __cpuinit poke_viking(void)
1725 srmmu_set_mmureg(mreg); 1718 srmmu_set_mmureg(mreg);
1726} 1719}
1727 1720
1721static struct sparc32_cachetlb_ops viking_ops = {
1722 .cache_all = viking_flush_cache_all,
1723 .cache_mm = viking_flush_cache_mm,
1724 .cache_page = viking_flush_cache_page,
1725 .cache_range = viking_flush_cache_range,
1726 .tlb_all = viking_flush_tlb_all,
1727 .tlb_mm = viking_flush_tlb_mm,
1728 .tlb_page = viking_flush_tlb_page,
1729 .tlb_range = viking_flush_tlb_range,
1730 .page_to_ram = viking_flush_page_to_ram,
1731 .sig_insns = viking_flush_sig_insns,
1732 .page_for_dma = viking_flush_page_for_dma,
1733};
1734
1735#ifdef CONFIG_SMP
1736/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1737 * perform the local TLB flush and all the other cpus will see it.
1738 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1739 * that requires that we add some synchronization to these flushes.
1740 *
1741 * The bug is that the fifo which keeps track of all the pending TLB
1742 * broadcasts in the system is an entry or two too small, so if we
1743 * have too many going at once we'll overflow that fifo and lose a TLB
1744 * flush resulting in corruption.
1745 *
1746 * Our workaround is to take a global spinlock around the TLB flushes,
1747 * which guarentees we won't ever have too many pending. It's a big
1748 * hammer, but a semaphore like system to make sure we only have N TLB
1749 * flushes going at once will require SMP locking anyways so there's
1750 * no real value in trying any harder than this.
1751 */
1752static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1753 .cache_all = viking_flush_cache_all,
1754 .cache_mm = viking_flush_cache_mm,
1755 .cache_page = viking_flush_cache_page,
1756 .cache_range = viking_flush_cache_range,
1757 .tlb_all = sun4dsmp_flush_tlb_all,
1758 .tlb_mm = sun4dsmp_flush_tlb_mm,
1759 .tlb_page = sun4dsmp_flush_tlb_page,
1760 .tlb_range = sun4dsmp_flush_tlb_range,
1761 .page_to_ram = viking_flush_page_to_ram,
1762 .sig_insns = viking_flush_sig_insns,
1763 .page_for_dma = viking_flush_page_for_dma,
1764};
1765#endif
1766
1728static void __init init_viking(void) 1767static void __init init_viking(void)
1729{ 1768{
1730 unsigned long mreg = srmmu_get_mmureg(); 1769 unsigned long mreg = srmmu_get_mmureg();
@@ -1742,76 +1781,101 @@ static void __init init_viking(void)
1742 * This is only necessary because of the new way in 1781 * This is only necessary because of the new way in
1743 * which we use the IOMMU. 1782 * which we use the IOMMU.
1744 */ 1783 */
1745 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); 1784 viking_ops.page_for_dma = viking_flush_page;
1746 1785#ifdef CONFIG_SMP
1786 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1787#endif
1747 flush_page_for_dma_global = 0; 1788 flush_page_for_dma_global = 0;
1748 } else { 1789 } else {
1749 srmmu_name = "TI Viking/MXCC"; 1790 srmmu_name = "TI Viking/MXCC";
1750 viking_mxcc_present = 1; 1791 viking_mxcc_present = 1;
1751
1752 srmmu_cache_pagetables = 1; 1792 srmmu_cache_pagetables = 1;
1753
1754 /* MXCC vikings lack the DMA snooping bug. */
1755 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1756 } 1793 }
1757 1794
1758 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); 1795 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1759 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); 1796 &viking_ops;
1760 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1761 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1762
1763#ifdef CONFIG_SMP 1797#ifdef CONFIG_SMP
1764 if (sparc_cpu_model == sun4d) { 1798 if (sparc_cpu_model == sun4d)
1765 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); 1799 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1766 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); 1800 &viking_sun4d_smp_ops;
1767 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1768 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1769 } else
1770#endif 1801#endif
1771 {
1772 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1773 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1774 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1775 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1776 }
1777
1778 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1779 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1780 1802
1781 poke_srmmu = poke_viking; 1803 poke_srmmu = poke_viking;
1782} 1804}
1783 1805
1784#ifdef CONFIG_SPARC_LEON 1806#ifdef CONFIG_SPARC_LEON
1807static void leon_flush_cache_mm(struct mm_struct *mm)
1808{
1809 leon_flush_cache_all();
1810}
1785 1811
1786void __init poke_leonsparc(void) 1812static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1787{ 1813{
1814 leon_flush_pcache_all(vma, page);
1788} 1815}
1789 1816
1790void __init init_leon(void) 1817static void leon_flush_cache_range(struct vm_area_struct *vma,
1818 unsigned long start,
1819 unsigned long end)
1791{ 1820{
1821 leon_flush_cache_all();
1822}
1792 1823
1793 srmmu_name = "LEON"; 1824static void leon_flush_tlb_mm(struct mm_struct *mm)
1825{
1826 leon_flush_tlb_all();
1827}
1828
1829static void leon_flush_tlb_page(struct vm_area_struct *vma,
1830 unsigned long page)
1831{
1832 leon_flush_tlb_all();
1833}
1794 1834
1795 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, 1835static void leon_flush_tlb_range(struct vm_area_struct *vma,
1796 BTFIXUPCALL_NORM); 1836 unsigned long start,
1797 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, 1837 unsigned long end)
1798 BTFIXUPCALL_NORM); 1838{
1799 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, 1839 leon_flush_tlb_all();
1800 BTFIXUPCALL_NORM); 1840}
1801 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, 1841
1802 BTFIXUPCALL_NORM); 1842static void leon_flush_page_to_ram(unsigned long page)
1803 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, 1843{
1804 BTFIXUPCALL_NORM); 1844 leon_flush_cache_all();
1805 1845}
1806 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1846
1807 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1847static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
1808 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1848{
1809 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); 1849 leon_flush_cache_all();
1810 1850}
1811 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, 1851
1812 BTFIXUPCALL_NOP); 1852static void leon_flush_page_for_dma(unsigned long page)
1813 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); 1853{
1854 leon_flush_dcache_all();
1855}
1856
1857void __init poke_leonsparc(void)
1858{
1859}
1860
1861static const struct sparc32_cachetlb_ops leon_ops = {
1862 .cache_all = leon_flush_cache_all,
1863 .cache_mm = leon_flush_cache_mm,
1864 .cache_page = leon_flush_cache_page,
1865 .cache_range = leon_flush_cache_range,
1866 .tlb_all = leon_flush_tlb_all,
1867 .tlb_mm = leon_flush_tlb_mm,
1868 .tlb_page = leon_flush_tlb_page,
1869 .tlb_range = leon_flush_tlb_range,
1870 .page_to_ram = leon_flush_page_to_ram,
1871 .sig_insns = leon_flush_sig_insns,
1872 .page_for_dma = leon_flush_page_for_dma,
1873};
1814 1874
1875void __init init_leon(void)
1876{
1877 srmmu_name = "LEON";
1878 sparc32_cachetlb_ops = &leon_ops;
1815 poke_srmmu = poke_leonsparc; 1879 poke_srmmu = poke_leonsparc;
1816 1880
1817 srmmu_cache_pagetables = 0; 1881 srmmu_cache_pagetables = 0;
@@ -1925,10 +1989,152 @@ static void __init get_srmmu_type(void)
1925/* Local cross-calls. */ 1989/* Local cross-calls. */
1926static void smp_flush_page_for_dma(unsigned long page) 1990static void smp_flush_page_for_dma(unsigned long page)
1927{ 1991{
1928 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); 1992 xc1((smpfunc_t) local_ops->page_for_dma, page);
1929 local_flush_page_for_dma(page); 1993 local_ops->page_for_dma(page);
1994}
1995
1996static void smp_flush_cache_all(void)
1997{
1998 xc0((smpfunc_t) local_ops->cache_all);
1999 local_ops->cache_all();
2000}
2001
2002static void smp_flush_tlb_all(void)
2003{
2004 xc0((smpfunc_t) local_ops->tlb_all);
2005 local_ops->tlb_all();
2006}
2007
2008static void smp_flush_cache_mm(struct mm_struct *mm)
2009{
2010 if (mm->context != NO_CONTEXT) {
2011 cpumask_t cpu_mask;
2012 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2013 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2014 if (!cpumask_empty(&cpu_mask))
2015 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
2016 local_ops->cache_mm(mm);
2017 }
2018}
2019
2020static void smp_flush_tlb_mm(struct mm_struct *mm)
2021{
2022 if (mm->context != NO_CONTEXT) {
2023 cpumask_t cpu_mask;
2024 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2025 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2026 if (!cpumask_empty(&cpu_mask)) {
2027 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
2028 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
2029 cpumask_copy(mm_cpumask(mm),
2030 cpumask_of(smp_processor_id()));
2031 }
2032 local_ops->tlb_mm(mm);
2033 }
2034}
2035
2036static void smp_flush_cache_range(struct vm_area_struct *vma,
2037 unsigned long start,
2038 unsigned long end)
2039{
2040 struct mm_struct *mm = vma->vm_mm;
2041
2042 if (mm->context != NO_CONTEXT) {
2043 cpumask_t cpu_mask;
2044 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2045 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2046 if (!cpumask_empty(&cpu_mask))
2047 xc3((smpfunc_t) local_ops->cache_range,
2048 (unsigned long) vma, start, end);
2049 local_ops->cache_range(vma, start, end);
2050 }
2051}
2052
2053static void smp_flush_tlb_range(struct vm_area_struct *vma,
2054 unsigned long start,
2055 unsigned long end)
2056{
2057 struct mm_struct *mm = vma->vm_mm;
2058
2059 if (mm->context != NO_CONTEXT) {
2060 cpumask_t cpu_mask;
2061 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2062 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2063 if (!cpumask_empty(&cpu_mask))
2064 xc3((smpfunc_t) local_ops->tlb_range,
2065 (unsigned long) vma, start, end);
2066 local_ops->tlb_range(vma, start, end);
2067 }
1930} 2068}
1931 2069
2070static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
2071{
2072 struct mm_struct *mm = vma->vm_mm;
2073
2074 if (mm->context != NO_CONTEXT) {
2075 cpumask_t cpu_mask;
2076 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2077 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2078 if (!cpumask_empty(&cpu_mask))
2079 xc2((smpfunc_t) local_ops->cache_page,
2080 (unsigned long) vma, page);
2081 local_ops->cache_page(vma, page);
2082 }
2083}
2084
2085static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
2086{
2087 struct mm_struct *mm = vma->vm_mm;
2088
2089 if (mm->context != NO_CONTEXT) {
2090 cpumask_t cpu_mask;
2091 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2092 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2093 if (!cpumask_empty(&cpu_mask))
2094 xc2((smpfunc_t) local_ops->tlb_page,
2095 (unsigned long) vma, page);
2096 local_ops->tlb_page(vma, page);
2097 }
2098}
2099
2100static void smp_flush_page_to_ram(unsigned long page)
2101{
2102 /* Current theory is that those who call this are the one's
2103 * who have just dirtied their cache with the pages contents
2104 * in kernel space, therefore we only run this on local cpu.
2105 *
2106 * XXX This experiment failed, research further... -DaveM
2107 */
2108#if 1
2109 xc1((smpfunc_t) local_ops->page_to_ram, page);
2110#endif
2111 local_ops->page_to_ram(page);
2112}
2113
2114static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
2115{
2116 cpumask_t cpu_mask;
2117 cpumask_copy(&cpu_mask, mm_cpumask(mm));
2118 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
2119 if (!cpumask_empty(&cpu_mask))
2120 xc2((smpfunc_t) local_ops->sig_insns,
2121 (unsigned long) mm, insn_addr);
2122 local_ops->sig_insns(mm, insn_addr);
2123}
2124
2125static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
2126 .cache_all = smp_flush_cache_all,
2127 .cache_mm = smp_flush_cache_mm,
2128 .cache_page = smp_flush_cache_page,
2129 .cache_range = smp_flush_cache_range,
2130 .tlb_all = smp_flush_tlb_all,
2131 .tlb_mm = smp_flush_tlb_mm,
2132 .tlb_page = smp_flush_tlb_page,
2133 .tlb_range = smp_flush_tlb_range,
2134 .page_to_ram = smp_flush_page_to_ram,
2135 .sig_insns = smp_flush_sig_insns,
2136 .page_for_dma = smp_flush_page_for_dma,
2137};
1932#endif 2138#endif
1933 2139
1934/* Load up routines and constants for sun4m and sun4d mmu */ 2140/* Load up routines and constants for sun4m and sun4d mmu */
@@ -1942,44 +2148,30 @@ void __init load_mmu(void)
1942 2148
1943#ifdef CONFIG_SMP 2149#ifdef CONFIG_SMP
1944 /* El switcheroo... */ 2150 /* El switcheroo... */
2151 local_ops = sparc32_cachetlb_ops;
1945 2152
1946 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); 2153 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1947 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); 2154 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1948 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); 2155 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1949 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); 2156 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1950 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); 2157 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
1951 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
1952 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
1953 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
1954 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
1955 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
1956 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
1957
1958 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
1959 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
1960 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
1961 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
1962 if (sparc_cpu_model != sun4d &&
1963 sparc_cpu_model != sparc_leon) {
1964 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
1965 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
1966 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
1967 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
1968 } 2158 }
1969 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
1970 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
1971 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
1972 2159
1973 if (poke_srmmu == poke_viking) { 2160 if (poke_srmmu == poke_viking) {
1974 /* Avoid unnecessary cross calls. */ 2161 /* Avoid unnecessary cross calls. */
1975 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); 2162 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1976 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); 2163 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1977 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); 2164 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1978 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); 2165 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1979 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); 2166
1980 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); 2167 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1981 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); 2168 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
2169 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
1982 } 2170 }
2171
2172 /* It really is const after this point. */
2173 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
2174 &smp_cachetlb_ops;
1983#endif 2175#endif
1984 2176
1985 if (sparc_cpu_model == sun4d) 2177 if (sparc_cpu_model == sun4d)