aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc64/mm/init.c
diff options
context:
space:
mode:
authorDavid S. Miller <davem@sunset.davemloft.net>2006-02-12 00:57:54 -0500
committerDavid S. Miller <davem@sunset.davemloft.net>2006-03-20 04:12:25 -0500
commitc4bce90ea2069e5a87beac806de3090ab32128d5 (patch)
tree3983a206c8060ef65ba17945d1c9f69e68d88b3d /arch/sparc64/mm/init.c
parent490384e752a43aa281ed533e9de2da36df25c337 (diff)
[SPARC64]: Deal with PTE layout differences in SUN4V.
Yes, you heard it right, they changed the PTE layout for SUN4V. Ho hum... This is the simple and inefficient way to support this. It'll get optimized, don't worry. Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'arch/sparc64/mm/init.c')
-rw-r--r--arch/sparc64/mm/init.c703
1 files changed, 609 insertions, 94 deletions
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index 92756da273b..9c2fc239f3e 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <linux/config.h> 8#include <linux/config.h>
9#include <linux/module.h>
9#include <linux/kernel.h> 10#include <linux/kernel.h>
10#include <linux/sched.h> 11#include <linux/sched.h>
11#include <linux/string.h> 12#include <linux/string.h>
@@ -118,6 +119,7 @@ unsigned long phys_base __read_mostly;
118unsigned long kern_base __read_mostly; 119unsigned long kern_base __read_mostly;
119unsigned long kern_size __read_mostly; 120unsigned long kern_size __read_mostly;
120unsigned long pfn_base __read_mostly; 121unsigned long pfn_base __read_mostly;
122unsigned long kern_linear_pte_xor __read_mostly;
121 123
122/* get_new_mmu_context() uses "cache + 1". */ 124/* get_new_mmu_context() uses "cache + 1". */
123DEFINE_SPINLOCK(ctx_alloc_lock); 125DEFINE_SPINLOCK(ctx_alloc_lock);
@@ -256,6 +258,9 @@ static inline void tsb_insert(struct tsb *ent, unsigned long tag, unsigned long
256 __tsb_insert(tsb_addr, tag, pte); 258 __tsb_insert(tsb_addr, tag, pte);
257} 259}
258 260
261unsigned long _PAGE_ALL_SZ_BITS __read_mostly;
262unsigned long _PAGE_SZBITS __read_mostly;
263
259void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte) 264void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t pte)
260{ 265{
261 struct mm_struct *mm; 266 struct mm_struct *mm;
@@ -398,39 +403,9 @@ struct linux_prom_translation {
398struct linux_prom_translation prom_trans[512] __read_mostly; 403struct linux_prom_translation prom_trans[512] __read_mostly;
399unsigned int prom_trans_ents __read_mostly; 404unsigned int prom_trans_ents __read_mostly;
400 405
401extern unsigned long prom_boot_page;
402extern void prom_remap(unsigned long physpage, unsigned long virtpage, int mmu_ihandle);
403extern int prom_get_mmu_ihandle(void);
404extern void register_prom_callbacks(void);
405
406/* Exported for SMP bootup purposes. */ 406/* Exported for SMP bootup purposes. */
407unsigned long kern_locked_tte_data; 407unsigned long kern_locked_tte_data;
408 408
409/*
410 * Translate PROM's mapping we capture at boot time into physical address.
411 * The second parameter is only set from prom_callback() invocations.
412 */
413unsigned long prom_virt_to_phys(unsigned long promva, int *error)
414{
415 int i;
416
417 for (i = 0; i < prom_trans_ents; i++) {
418 struct linux_prom_translation *p = &prom_trans[i];
419
420 if (promva >= p->virt &&
421 promva < (p->virt + p->size)) {
422 unsigned long base = p->data & _PAGE_PADDR;
423
424 if (error)
425 *error = 0;
426 return base + (promva & (8192 - 1));
427 }
428 }
429 if (error)
430 *error = 1;
431 return 0UL;
432}
433
434/* The obp translations are saved based on 8k pagesize, since obp can 409/* The obp translations are saved based on 8k pagesize, since obp can
435 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS -> 410 * use a mixture of pagesizes. Misses to the LOW_OBP_ADDRESS ->
436 * HI_OBP_ADDRESS range are handled in ktlb.S. 411 * HI_OBP_ADDRESS range are handled in ktlb.S.
@@ -537,6 +512,8 @@ static void __init hypervisor_tlb_lock(unsigned long vaddr,
537 "3" (arg2), "4" (arg3)); 512 "3" (arg2), "4" (arg3));
538} 513}
539 514
515static unsigned long kern_large_tte(unsigned long paddr);
516
540static void __init remap_kernel(void) 517static void __init remap_kernel(void)
541{ 518{
542 unsigned long phys_page, tte_vaddr, tte_data; 519 unsigned long phys_page, tte_vaddr, tte_data;
@@ -544,9 +521,7 @@ static void __init remap_kernel(void)
544 521
545 tte_vaddr = (unsigned long) KERNBASE; 522 tte_vaddr = (unsigned long) KERNBASE;
546 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 523 phys_page = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
547 tte_data = (phys_page | (_PAGE_VALID | _PAGE_SZ4MB | 524 tte_data = kern_large_tte(phys_page);
548 _PAGE_CP | _PAGE_CV | _PAGE_P |
549 _PAGE_L | _PAGE_W));
550 525
551 kern_locked_tte_data = tte_data; 526 kern_locked_tte_data = tte_data;
552 527
@@ -591,10 +566,6 @@ static void __init inherit_prom_mappings(void)
591 prom_printf("Remapping the kernel... "); 566 prom_printf("Remapping the kernel... ");
592 remap_kernel(); 567 remap_kernel();
593 prom_printf("done.\n"); 568 prom_printf("done.\n");
594
595 prom_printf("Registering callbacks... ");
596 register_prom_callbacks();
597 prom_printf("done.\n");
598} 569}
599 570
600void prom_world(int enter) 571void prom_world(int enter)
@@ -631,63 +602,6 @@ void __flush_dcache_range(unsigned long start, unsigned long end)
631} 602}
632#endif /* DCACHE_ALIASING_POSSIBLE */ 603#endif /* DCACHE_ALIASING_POSSIBLE */
633 604
634/* If not locked, zap it. */
635void __flush_tlb_all(void)
636{
637 unsigned long pstate;
638 int i;
639
640 __asm__ __volatile__("flushw\n\t"
641 "rdpr %%pstate, %0\n\t"
642 "wrpr %0, %1, %%pstate"
643 : "=r" (pstate)
644 : "i" (PSTATE_IE));
645 if (tlb_type == spitfire) {
646 for (i = 0; i < 64; i++) {
647 /* Spitfire Errata #32 workaround */
648 /* NOTE: Always runs on spitfire, so no
649 * cheetah+ page size encodings.
650 */
651 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
652 "flush %%g6"
653 : /* No outputs */
654 : "r" (0),
655 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
656
657 if (!(spitfire_get_dtlb_data(i) & _PAGE_L)) {
658 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
659 "membar #Sync"
660 : /* no outputs */
661 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
662 spitfire_put_dtlb_data(i, 0x0UL);
663 }
664
665 /* Spitfire Errata #32 workaround */
666 /* NOTE: Always runs on spitfire, so no
667 * cheetah+ page size encodings.
668 */
669 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
670 "flush %%g6"
671 : /* No outputs */
672 : "r" (0),
673 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
674
675 if (!(spitfire_get_itlb_data(i) & _PAGE_L)) {
676 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
677 "membar #Sync"
678 : /* no outputs */
679 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
680 spitfire_put_itlb_data(i, 0x0UL);
681 }
682 }
683 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
684 cheetah_flush_dtlb_all();
685 cheetah_flush_itlb_all();
686 }
687 __asm__ __volatile__("wrpr %0, 0, %%pstate"
688 : : "r" (pstate));
689}
690
691/* Caller does TLB context flushing on local CPU if necessary. 605/* Caller does TLB context flushing on local CPU if necessary.
692 * The caller also ensures that CTX_VALID(mm->context) is false. 606 * The caller also ensures that CTX_VALID(mm->context) is false.
693 * 607 *
@@ -1180,6 +1094,9 @@ extern void sun4v_patch_tlb_handlers(void);
1180static unsigned long last_valid_pfn; 1094static unsigned long last_valid_pfn;
1181pgd_t swapper_pg_dir[2048]; 1095pgd_t swapper_pg_dir[2048];
1182 1096
1097static void sun4u_pgprot_init(void);
1098static void sun4v_pgprot_init(void);
1099
1183void __init paging_init(void) 1100void __init paging_init(void)
1184{ 1101{
1185 unsigned long end_pfn, pages_avail, shift; 1102 unsigned long end_pfn, pages_avail, shift;
@@ -1188,6 +1105,11 @@ void __init paging_init(void)
1188 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL; 1105 kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
1189 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE; 1106 kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
1190 1107
1108 if (tlb_type == hypervisor)
1109 sun4v_pgprot_init();
1110 else
1111 sun4u_pgprot_init();
1112
1191 if (tlb_type == cheetah_plus || 1113 if (tlb_type == cheetah_plus ||
1192 tlb_type == hypervisor) 1114 tlb_type == hypervisor)
1193 tsb_phys_patch(); 1115 tsb_phys_patch();
@@ -1411,3 +1333,596 @@ void free_initrd_mem(unsigned long start, unsigned long end)
1411 } 1333 }
1412} 1334}
1413#endif 1335#endif
1336
1337/* SUN4U pte bits... */
1338#define _PAGE_SZ4MB_4U 0x6000000000000000 /* 4MB Page */
1339#define _PAGE_SZ512K_4U 0x4000000000000000 /* 512K Page */
1340#define _PAGE_SZ64K_4U 0x2000000000000000 /* 64K Page */
1341#define _PAGE_SZ8K_4U 0x0000000000000000 /* 8K Page */
1342#define _PAGE_NFO_4U 0x1000000000000000 /* No Fault Only */
1343#define _PAGE_IE_4U 0x0800000000000000 /* Invert Endianness */
1344#define _PAGE_SOFT2_4U 0x07FC000000000000 /* Software bits, set 2 */
1345#define _PAGE_RES1_4U 0x0002000000000000 /* Reserved */
1346#define _PAGE_SZ32MB_4U 0x0001000000000000 /* (Panther) 32MB page */
1347#define _PAGE_SZ256MB_4U 0x2001000000000000 /* (Panther) 256MB page */
1348#define _PAGE_SN_4U 0x0000800000000000 /* (Cheetah) Snoop */
1349#define _PAGE_RES2_4U 0x0000780000000000 /* Reserved */
1350#define _PAGE_PADDR_4U 0x000007FFFFFFE000 /* (Cheetah) paddr[42:13] */
1351#define _PAGE_SOFT_4U 0x0000000000001F80 /* Software bits: */
1352#define _PAGE_EXEC_4U 0x0000000000001000 /* Executable SW bit */
1353#define _PAGE_MODIFIED_4U 0x0000000000000800 /* Modified (dirty) */
1354#define _PAGE_FILE_4U 0x0000000000000800 /* Pagecache page */
1355#define _PAGE_ACCESSED_4U 0x0000000000000400 /* Accessed (ref'd) */
1356#define _PAGE_READ_4U 0x0000000000000200 /* Readable SW Bit */
1357#define _PAGE_WRITE_4U 0x0000000000000100 /* Writable SW Bit */
1358#define _PAGE_PRESENT_4U 0x0000000000000080 /* Present */
1359#define _PAGE_L_4U 0x0000000000000040 /* Locked TTE */
1360#define _PAGE_CP_4U 0x0000000000000020 /* Cacheable in P-Cache */
1361#define _PAGE_CV_4U 0x0000000000000010 /* Cacheable in V-Cache */
1362#define _PAGE_E_4U 0x0000000000000008 /* side-Effect */
1363#define _PAGE_P_4U 0x0000000000000004 /* Privileged Page */
1364#define _PAGE_W_4U 0x0000000000000002 /* Writable */
1365
1366/* SUN4V pte bits... */
1367#define _PAGE_NFO_4V 0x4000000000000000 /* No Fault Only */
1368#define _PAGE_SOFT2_4V 0x3F00000000000000 /* Software bits, set 2 */
1369#define _PAGE_MODIFIED_4V 0x2000000000000000 /* Modified (dirty) */
1370#define _PAGE_ACCESSED_4V 0x1000000000000000 /* Accessed (ref'd) */
1371#define _PAGE_READ_4V 0x0800000000000000 /* Readable SW Bit */
1372#define _PAGE_WRITE_4V 0x0400000000000000 /* Writable SW Bit */
1373#define _PAGE_PADDR_4V 0x00FFFFFFFFFFE000 /* paddr[55:13] */
1374#define _PAGE_IE_4V 0x0000000000001000 /* Invert Endianness */
1375#define _PAGE_E_4V 0x0000000000000800 /* side-Effect */
1376#define _PAGE_CP_4V 0x0000000000000400 /* Cacheable in P-Cache */
1377#define _PAGE_CV_4V 0x0000000000000200 /* Cacheable in V-Cache */
1378#define _PAGE_P_4V 0x0000000000000100 /* Privileged Page */
1379#define _PAGE_EXEC_4V 0x0000000000000080 /* Executable Page */
1380#define _PAGE_W_4V 0x0000000000000040 /* Writable */
1381#define _PAGE_SOFT_4V 0x0000000000000030 /* Software bits */
1382#define _PAGE_FILE_4V 0x0000000000000020 /* Pagecache page */
1383#define _PAGE_PRESENT_4V 0x0000000000000010 /* Present */
1384#define _PAGE_RESV_4V 0x0000000000000008 /* Reserved */
1385#define _PAGE_SZ16GB_4V 0x0000000000000007 /* 16GB Page */
1386#define _PAGE_SZ2GB_4V 0x0000000000000006 /* 2GB Page */
1387#define _PAGE_SZ256MB_4V 0x0000000000000005 /* 256MB Page */
1388#define _PAGE_SZ32MB_4V 0x0000000000000004 /* 32MB Page */
1389#define _PAGE_SZ4MB_4V 0x0000000000000003 /* 4MB Page */
1390#define _PAGE_SZ512K_4V 0x0000000000000002 /* 512K Page */
1391#define _PAGE_SZ64K_4V 0x0000000000000001 /* 64K Page */
1392#define _PAGE_SZ8K_4V 0x0000000000000000 /* 8K Page */
1393
1394#if PAGE_SHIFT == 13
1395#define _PAGE_SZBITS_4U _PAGE_SZ8K_4U
1396#define _PAGE_SZBITS_4V _PAGE_SZ8K_4V
1397#elif PAGE_SHIFT == 16
1398#define _PAGE_SZBITS_4U _PAGE_SZ64K_4U
1399#define _PAGE_SZBITS_4V _PAGE_SZ64K_4V
1400#elif PAGE_SHIFT == 19
1401#define _PAGE_SZBITS_4U _PAGE_SZ512K_4U
1402#define _PAGE_SZBITS_4V _PAGE_SZ512K_4V
1403#elif PAGE_SHIFT == 22
1404#define _PAGE_SZBITS_4U _PAGE_SZ4MB_4U
1405#define _PAGE_SZBITS_4V _PAGE_SZ4MB_4V
1406#else
1407#error Wrong PAGE_SHIFT specified
1408#endif
1409
1410#if defined(CONFIG_HUGETLB_PAGE_SIZE_4MB)
1411#define _PAGE_SZHUGE_4U _PAGE_SZ4MB_4U
1412#define _PAGE_SZHUGE_4V _PAGE_SZ4MB_4V
1413#elif defined(CONFIG_HUGETLB_PAGE_SIZE_512K)
1414#define _PAGE_SZHUGE_4U _PAGE_SZ512K_4U
1415#define _PAGE_SZHUGE_4V _PAGE_SZ512K_4V
1416#elif defined(CONFIG_HUGETLB_PAGE_SIZE_64K)
1417#define _PAGE_SZHUGE_4U _PAGE_SZ64K_4U
1418#define _PAGE_SZHUGE_4V _PAGE_SZ64K_4V
1419#endif
1420
1421#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
1422#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
1423#define __DIRTY_BITS_4U (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
1424#define __DIRTY_BITS_4V (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
1425#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
1426#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
1427
1428pgprot_t PAGE_KERNEL __read_mostly;
1429EXPORT_SYMBOL(PAGE_KERNEL);
1430
1431pgprot_t PAGE_KERNEL_LOCKED __read_mostly;
1432pgprot_t PAGE_COPY __read_mostly;
1433pgprot_t PAGE_EXEC __read_mostly;
1434unsigned long pg_iobits __read_mostly;
1435
1436unsigned long _PAGE_IE __read_mostly;
1437unsigned long _PAGE_E __read_mostly;
1438unsigned long _PAGE_CACHE __read_mostly;
1439
1440static void prot_init_common(unsigned long page_none,
1441 unsigned long page_shared,
1442 unsigned long page_copy,
1443 unsigned long page_readonly,
1444 unsigned long page_exec_bit)
1445{
1446 PAGE_COPY = __pgprot(page_copy);
1447
1448 protection_map[0x0] = __pgprot(page_none);
1449 protection_map[0x1] = __pgprot(page_readonly & ~page_exec_bit);
1450 protection_map[0x2] = __pgprot(page_copy & ~page_exec_bit);
1451 protection_map[0x3] = __pgprot(page_copy & ~page_exec_bit);
1452 protection_map[0x4] = __pgprot(page_readonly);
1453 protection_map[0x5] = __pgprot(page_readonly);
1454 protection_map[0x6] = __pgprot(page_copy);
1455 protection_map[0x7] = __pgprot(page_copy);
1456 protection_map[0x8] = __pgprot(page_none);
1457 protection_map[0x9] = __pgprot(page_readonly & ~page_exec_bit);
1458 protection_map[0xa] = __pgprot(page_shared & ~page_exec_bit);
1459 protection_map[0xb] = __pgprot(page_shared & ~page_exec_bit);
1460 protection_map[0xc] = __pgprot(page_readonly);
1461 protection_map[0xd] = __pgprot(page_readonly);
1462 protection_map[0xe] = __pgprot(page_shared);
1463 protection_map[0xf] = __pgprot(page_shared);
1464}
1465
1466static void __init sun4u_pgprot_init(void)
1467{
1468 unsigned long page_none, page_shared, page_copy, page_readonly;
1469 unsigned long page_exec_bit;
1470
1471 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1472 _PAGE_CACHE_4U | _PAGE_P_4U |
1473 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1474 _PAGE_EXEC_4U);
1475 PAGE_KERNEL_LOCKED = __pgprot (_PAGE_PRESENT_4U | _PAGE_VALID |
1476 _PAGE_CACHE_4U | _PAGE_P_4U |
1477 __ACCESS_BITS_4U | __DIRTY_BITS_4U |
1478 _PAGE_EXEC_4U | _PAGE_L_4U);
1479 PAGE_EXEC = __pgprot(_PAGE_EXEC_4U);
1480
1481 _PAGE_IE = _PAGE_IE_4U;
1482 _PAGE_E = _PAGE_E_4U;
1483 _PAGE_CACHE = _PAGE_CACHE_4U;
1484
1485 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4U | __DIRTY_BITS_4U |
1486 __ACCESS_BITS_4U | _PAGE_E_4U);
1487
1488 kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4U) ^
1489 0xfffff80000000000;
1490 kern_linear_pte_xor |= (_PAGE_CP_4U | _PAGE_CV_4U |
1491 _PAGE_P_4U | _PAGE_W_4U);
1492
1493 _PAGE_SZBITS = _PAGE_SZBITS_4U;
1494 _PAGE_ALL_SZ_BITS = (_PAGE_SZ4MB_4U | _PAGE_SZ512K_4U |
1495 _PAGE_SZ64K_4U | _PAGE_SZ8K_4U |
1496 _PAGE_SZ32MB_4U | _PAGE_SZ256MB_4U);
1497
1498
1499 page_none = _PAGE_PRESENT_4U | _PAGE_ACCESSED_4U | _PAGE_CACHE_4U;
1500 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1501 __ACCESS_BITS_4U | _PAGE_WRITE_4U | _PAGE_EXEC_4U);
1502 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1503 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1504 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4U | _PAGE_CACHE_4U |
1505 __ACCESS_BITS_4U | _PAGE_EXEC_4U);
1506
1507 page_exec_bit = _PAGE_EXEC_4U;
1508
1509 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1510 page_exec_bit);
1511}
1512
1513static void __init sun4v_pgprot_init(void)
1514{
1515 unsigned long page_none, page_shared, page_copy, page_readonly;
1516 unsigned long page_exec_bit;
1517
1518 PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
1519 _PAGE_CACHE_4V | _PAGE_P_4V |
1520 __ACCESS_BITS_4V | __DIRTY_BITS_4V |
1521 _PAGE_EXEC_4V);
1522 PAGE_KERNEL_LOCKED = PAGE_KERNEL;
1523 PAGE_EXEC = __pgprot(_PAGE_EXEC_4V);
1524
1525 _PAGE_IE = _PAGE_IE_4V;
1526 _PAGE_E = _PAGE_E_4V;
1527 _PAGE_CACHE = _PAGE_CACHE_4V;
1528
1529 kern_linear_pte_xor = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
1530 0xfffff80000000000;
1531 kern_linear_pte_xor |= (_PAGE_CP_4V | _PAGE_CV_4V |
1532 _PAGE_P_4V | _PAGE_W_4V);
1533
1534 pg_iobits = (_PAGE_VALID | _PAGE_PRESENT_4V | __DIRTY_BITS_4V |
1535 __ACCESS_BITS_4V | _PAGE_E_4V);
1536
1537 _PAGE_SZBITS = _PAGE_SZBITS_4V;
1538 _PAGE_ALL_SZ_BITS = (_PAGE_SZ16GB_4V | _PAGE_SZ2GB_4V |
1539 _PAGE_SZ256MB_4V | _PAGE_SZ32MB_4V |
1540 _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
1541 _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
1542
1543 page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
1544 page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1545 __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
1546 page_copy = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1547 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1548 page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
1549 __ACCESS_BITS_4V | _PAGE_EXEC_4V);
1550
1551 page_exec_bit = _PAGE_EXEC_4V;
1552
1553 prot_init_common(page_none, page_shared, page_copy, page_readonly,
1554 page_exec_bit);
1555}
1556
1557unsigned long pte_sz_bits(unsigned long sz)
1558{
1559 if (tlb_type == hypervisor) {
1560 switch (sz) {
1561 case 8 * 1024:
1562 default:
1563 return _PAGE_SZ8K_4V;
1564 case 64 * 1024:
1565 return _PAGE_SZ64K_4V;
1566 case 512 * 1024:
1567 return _PAGE_SZ512K_4V;
1568 case 4 * 1024 * 1024:
1569 return _PAGE_SZ4MB_4V;
1570 };
1571 } else {
1572 switch (sz) {
1573 case 8 * 1024:
1574 default:
1575 return _PAGE_SZ8K_4U;
1576 case 64 * 1024:
1577 return _PAGE_SZ64K_4U;
1578 case 512 * 1024:
1579 return _PAGE_SZ512K_4U;
1580 case 4 * 1024 * 1024:
1581 return _PAGE_SZ4MB_4U;
1582 };
1583 }
1584}
1585
1586pte_t mk_pte_io(unsigned long page, pgprot_t prot, int space, unsigned long page_size)
1587{
1588 pte_t pte;
1589 if (tlb_type == hypervisor) {
1590 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4V) &
1591 ~(unsigned long)_PAGE_CACHE_4V);
1592 } else {
1593 pte_val(pte) = (((page) | pgprot_val(prot) | _PAGE_E_4U) &
1594 ~(unsigned long)_PAGE_CACHE_4U);
1595 }
1596 pte_val(pte) |= (((unsigned long)space) << 32);
1597 pte_val(pte) |= pte_sz_bits(page_size);
1598 return pte;
1599}
1600
1601unsigned long pte_present(pte_t pte)
1602{
1603 return (pte_val(pte) &
1604 ((tlb_type == hypervisor) ?
1605 _PAGE_PRESENT_4V : _PAGE_PRESENT_4U));
1606}
1607
1608unsigned long pte_file(pte_t pte)
1609{
1610 return (pte_val(pte) &
1611 ((tlb_type == hypervisor) ?
1612 _PAGE_FILE_4V : _PAGE_FILE_4U));
1613}
1614
1615unsigned long pte_read(pte_t pte)
1616{
1617 return (pte_val(pte) &
1618 ((tlb_type == hypervisor) ?
1619 _PAGE_READ_4V : _PAGE_READ_4U));
1620}
1621
1622unsigned long pte_exec(pte_t pte)
1623{
1624 return (pte_val(pte) &
1625 ((tlb_type == hypervisor) ?
1626 _PAGE_EXEC_4V : _PAGE_EXEC_4U));
1627}
1628
1629unsigned long pte_write(pte_t pte)
1630{
1631 return (pte_val(pte) &
1632 ((tlb_type == hypervisor) ?
1633 _PAGE_WRITE_4V : _PAGE_WRITE_4U));
1634}
1635
1636unsigned long pte_dirty(pte_t pte)
1637{
1638 return (pte_val(pte) &
1639 ((tlb_type == hypervisor) ?
1640 _PAGE_MODIFIED_4V : _PAGE_MODIFIED_4U));
1641}
1642
1643unsigned long pte_young(pte_t pte)
1644{
1645 return (pte_val(pte) &
1646 ((tlb_type == hypervisor) ?
1647 _PAGE_ACCESSED_4V : _PAGE_ACCESSED_4U));
1648}
1649
1650pte_t pte_wrprotect(pte_t pte)
1651{
1652 unsigned long mask = _PAGE_WRITE_4U | _PAGE_W_4U;
1653
1654 if (tlb_type == hypervisor)
1655 mask = _PAGE_WRITE_4V | _PAGE_W_4V;
1656
1657 return __pte(pte_val(pte) & ~mask);
1658}
1659
1660pte_t pte_rdprotect(pte_t pte)
1661{
1662 unsigned long mask = _PAGE_R | _PAGE_READ_4U;
1663
1664 if (tlb_type == hypervisor)
1665 mask = _PAGE_R | _PAGE_READ_4V;
1666
1667 return __pte(pte_val(pte) & ~mask);
1668}
1669
1670pte_t pte_mkclean(pte_t pte)
1671{
1672 unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
1673
1674 if (tlb_type == hypervisor)
1675 mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
1676
1677 return __pte(pte_val(pte) & ~mask);
1678}
1679
1680pte_t pte_mkold(pte_t pte)
1681{
1682 unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
1683
1684 if (tlb_type == hypervisor)
1685 mask = _PAGE_R | _PAGE_ACCESSED_4V;
1686
1687 return __pte(pte_val(pte) & ~mask);
1688}
1689
1690pte_t pte_mkyoung(pte_t pte)
1691{
1692 unsigned long mask = _PAGE_R | _PAGE_ACCESSED_4U;
1693
1694 if (tlb_type == hypervisor)
1695 mask = _PAGE_R | _PAGE_ACCESSED_4V;
1696
1697 return __pte(pte_val(pte) | mask);
1698}
1699
1700pte_t pte_mkwrite(pte_t pte)
1701{
1702 unsigned long mask = _PAGE_WRITE_4U;
1703
1704 if (tlb_type == hypervisor)
1705 mask = _PAGE_WRITE_4V;
1706
1707 return __pte(pte_val(pte) | mask);
1708}
1709
1710pte_t pte_mkdirty(pte_t pte)
1711{
1712 unsigned long mask = _PAGE_MODIFIED_4U | _PAGE_W_4U;
1713
1714 if (tlb_type == hypervisor)
1715 mask = _PAGE_MODIFIED_4V | _PAGE_W_4V;
1716
1717 return __pte(pte_val(pte) | mask);
1718}
1719
1720pte_t pte_mkhuge(pte_t pte)
1721{
1722 unsigned long mask = _PAGE_SZHUGE_4U;
1723
1724 if (tlb_type == hypervisor)
1725 mask = _PAGE_SZHUGE_4V;
1726
1727 return __pte(pte_val(pte) | mask);
1728}
1729
1730pte_t pgoff_to_pte(unsigned long off)
1731{
1732 unsigned long bit = _PAGE_FILE_4U;
1733
1734 if (tlb_type == hypervisor)
1735 bit = _PAGE_FILE_4V;
1736
1737 return __pte((off << PAGE_SHIFT) | bit);
1738}
1739
1740pgprot_t pgprot_noncached(pgprot_t prot)
1741{
1742 unsigned long val = pgprot_val(prot);
1743 unsigned long off = _PAGE_CP_4U | _PAGE_CV_4U;
1744 unsigned long on = _PAGE_E_4U;
1745
1746 if (tlb_type == hypervisor) {
1747 off = _PAGE_CP_4V | _PAGE_CV_4V;
1748 on = _PAGE_E_4V;
1749 }
1750
1751 return __pgprot((val & ~off) | on);
1752}
1753
1754pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
1755{
1756 unsigned long sz_bits = _PAGE_SZBITS_4U;
1757
1758 if (tlb_type == hypervisor)
1759 sz_bits = _PAGE_SZBITS_4V;
1760
1761 return __pte((pfn << PAGE_SHIFT) | pgprot_val(prot) | sz_bits);
1762}
1763
1764unsigned long pte_pfn(pte_t pte)
1765{
1766 unsigned long mask = _PAGE_PADDR_4U;
1767
1768 if (tlb_type == hypervisor)
1769 mask = _PAGE_PADDR_4V;
1770
1771 return (pte_val(pte) & mask) >> PAGE_SHIFT;
1772}
1773
1774pte_t pte_modify(pte_t orig_pte, pgprot_t new_prot)
1775{
1776 unsigned long preserve_mask;
1777 unsigned long val;
1778
1779 preserve_mask = (_PAGE_PADDR_4U |
1780 _PAGE_MODIFIED_4U |
1781 _PAGE_ACCESSED_4U |
1782 _PAGE_CP_4U |
1783 _PAGE_CV_4U |
1784 _PAGE_E_4U |
1785 _PAGE_PRESENT_4U |
1786 _PAGE_SZBITS_4U);
1787 if (tlb_type == hypervisor)
1788 preserve_mask = (_PAGE_PADDR_4V |
1789 _PAGE_MODIFIED_4V |
1790 _PAGE_ACCESSED_4V |
1791 _PAGE_CP_4V |
1792 _PAGE_CV_4V |
1793 _PAGE_E_4V |
1794 _PAGE_PRESENT_4V |
1795 _PAGE_SZBITS_4V);
1796
1797 val = (pte_val(orig_pte) & preserve_mask);
1798
1799 return __pte(val | (pgprot_val(new_prot) & ~preserve_mask));
1800}
1801
1802static unsigned long kern_large_tte(unsigned long paddr)
1803{
1804 unsigned long val;
1805
1806 val = (_PAGE_VALID | _PAGE_SZ4MB_4U |
1807 _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_P_4U |
1808 _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
1809 if (tlb_type == hypervisor)
1810 val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
1811 _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
1812 _PAGE_EXEC_4V | _PAGE_W_4V);
1813
1814 return val | paddr;
1815}
1816
1817/*
1818 * Translate PROM's mapping we capture at boot time into physical address.
1819 * The second parameter is only set from prom_callback() invocations.
1820 */
1821unsigned long prom_virt_to_phys(unsigned long promva, int *error)
1822{
1823 unsigned long mask;
1824 int i;
1825
1826 mask = _PAGE_PADDR_4U;
1827 if (tlb_type == hypervisor)
1828 mask = _PAGE_PADDR_4V;
1829
1830 for (i = 0; i < prom_trans_ents; i++) {
1831 struct linux_prom_translation *p = &prom_trans[i];
1832
1833 if (promva >= p->virt &&
1834 promva < (p->virt + p->size)) {
1835 unsigned long base = p->data & mask;
1836
1837 if (error)
1838 *error = 0;
1839 return base + (promva & (8192 - 1));
1840 }
1841 }
1842 if (error)
1843 *error = 1;
1844 return 0UL;
1845}
1846
1847/* XXX We should kill off this ugly thing at so me point. XXX */
1848unsigned long sun4u_get_pte(unsigned long addr)
1849{
1850 pgd_t *pgdp;
1851 pud_t *pudp;
1852 pmd_t *pmdp;
1853 pte_t *ptep;
1854 unsigned long mask = _PAGE_PADDR_4U;
1855
1856 if (tlb_type == hypervisor)
1857 mask = _PAGE_PADDR_4V;
1858
1859 if (addr >= PAGE_OFFSET)
1860 return addr & mask;
1861
1862 if ((addr >= LOW_OBP_ADDRESS) && (addr < HI_OBP_ADDRESS))
1863 return prom_virt_to_phys(addr, NULL);
1864
1865 pgdp = pgd_offset_k(addr);
1866 pudp = pud_offset(pgdp, addr);
1867 pmdp = pmd_offset(pudp, addr);
1868 ptep = pte_offset_kernel(pmdp, addr);
1869
1870 return pte_val(*ptep) & mask;
1871}
1872
1873/* If not locked, zap it. */
1874void __flush_tlb_all(void)
1875{
1876 unsigned long pstate;
1877 int i;
1878
1879 __asm__ __volatile__("flushw\n\t"
1880 "rdpr %%pstate, %0\n\t"
1881 "wrpr %0, %1, %%pstate"
1882 : "=r" (pstate)
1883 : "i" (PSTATE_IE));
1884 if (tlb_type == spitfire) {
1885 for (i = 0; i < 64; i++) {
1886 /* Spitfire Errata #32 workaround */
1887 /* NOTE: Always runs on spitfire, so no
1888 * cheetah+ page size encodings.
1889 */
1890 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1891 "flush %%g6"
1892 : /* No outputs */
1893 : "r" (0),
1894 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1895
1896 if (!(spitfire_get_dtlb_data(i) & _PAGE_L_4U)) {
1897 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1898 "membar #Sync"
1899 : /* no outputs */
1900 : "r" (TLB_TAG_ACCESS), "i" (ASI_DMMU));
1901 spitfire_put_dtlb_data(i, 0x0UL);
1902 }
1903
1904 /* Spitfire Errata #32 workaround */
1905 /* NOTE: Always runs on spitfire, so no
1906 * cheetah+ page size encodings.
1907 */
1908 __asm__ __volatile__("stxa %0, [%1] %2\n\t"
1909 "flush %%g6"
1910 : /* No outputs */
1911 : "r" (0),
1912 "r" (PRIMARY_CONTEXT), "i" (ASI_DMMU));
1913
1914 if (!(spitfire_get_itlb_data(i) & _PAGE_L_4U)) {
1915 __asm__ __volatile__("stxa %%g0, [%0] %1\n\t"
1916 "membar #Sync"
1917 : /* no outputs */
1918 : "r" (TLB_TAG_ACCESS), "i" (ASI_IMMU));
1919 spitfire_put_itlb_data(i, 0x0UL);
1920 }
1921 }
1922 } else if (tlb_type == cheetah || tlb_type == cheetah_plus) {
1923 cheetah_flush_dtlb_all();
1924 cheetah_flush_itlb_all();
1925 }
1926 __asm__ __volatile__("wrpr %0, 0, %%pstate"
1927 : : "r" (pstate));
1928}