diff options
-rw-r--r-- | arch/powerpc/include/asm/mmu-hash64.h | 42 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64-4k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/pgtable-ppc64-64k.h | 2 | ||||
-rw-r--r-- | arch/powerpc/include/asm/processor.h | 4 | ||||
-rw-r--r-- | arch/powerpc/include/asm/sparsemem.h | 4 | ||||
-rw-r--r-- | arch/powerpc/kernel/exceptions-64s.S | 4 | ||||
-rw-r--r-- | arch/powerpc/mm/slb_low.S | 12 |
7 files changed, 54 insertions, 16 deletions
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index 7cbd541f915f..23730ee65aa0 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -370,17 +370,21 @@ extern void slb_set_size(u16 size); | |||
370 | * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. | 370 | * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. |
371 | */ | 371 | */ |
372 | 372 | ||
373 | #define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ | 373 | /* |
374 | #define VSID_BITS_256M 36 | 374 | * This should be computed such that protovosid * vsid_mulitplier |
375 | * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus | ||
376 | */ | ||
377 | #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ | ||
378 | #define VSID_BITS_256M 38 | ||
375 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) | 379 | #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) |
376 | 380 | ||
377 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ | 381 | #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ |
378 | #define VSID_BITS_1T 24 | 382 | #define VSID_BITS_1T 26 |
379 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) | 383 | #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) |
380 | 384 | ||
381 | #define CONTEXT_BITS 19 | 385 | #define CONTEXT_BITS 19 |
382 | #define USER_ESID_BITS 16 | 386 | #define USER_ESID_BITS 18 |
383 | #define USER_ESID_BITS_1T 4 | 387 | #define USER_ESID_BITS_1T 6 |
384 | 388 | ||
385 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) | 389 | #define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) |
386 | 390 | ||
@@ -500,12 +504,32 @@ typedef struct { | |||
500 | }) | 504 | }) |
501 | #endif /* 1 */ | 505 | #endif /* 1 */ |
502 | 506 | ||
503 | /* This is only valid for addresses >= PAGE_OFFSET */ | 507 | /* |
508 | * This is only valid for addresses >= PAGE_OFFSET | ||
509 | * The proto-VSID space is divided into two class | ||
510 | * User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1 | ||
511 | * kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1 | ||
512 | * | ||
513 | * With KERNEL_START at 0xc000000000000000, the proto vsid for | ||
514 | * the kernel ends up with 0xc00000000 (36 bits). With 64TB | ||
515 | * support we need to have kernel proto-VSID in the | ||
516 | * [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS. | ||
517 | */ | ||
504 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) | 518 | static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize) |
505 | { | 519 | { |
506 | if (ssize == MMU_SEGSIZE_256M) | 520 | unsigned long proto_vsid; |
507 | return vsid_scramble(ea >> SID_SHIFT, 256M); | 521 | /* |
508 | return vsid_scramble(ea >> SID_SHIFT_1T, 1T); | 522 | * We need to make sure proto_vsid for the kernel is |
523 | * >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T]) | ||
524 | */ | ||
525 | if (ssize == MMU_SEGSIZE_256M) { | ||
526 | proto_vsid = ea >> SID_SHIFT; | ||
527 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS)); | ||
528 | return vsid_scramble(proto_vsid, 256M); | ||
529 | } | ||
530 | proto_vsid = ea >> SID_SHIFT_1T; | ||
531 | proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T)); | ||
532 | return vsid_scramble(proto_vsid, 1T); | ||
509 | } | 533 | } |
510 | 534 | ||
511 | /* Returns the segment size indicator for a user address */ | 535 | /* Returns the segment size indicator for a user address */ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h index d6489a2c64c8..12798c9d4b4b 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h | |||
@@ -7,7 +7,7 @@ | |||
7 | */ | 7 | */ |
8 | #define PTE_INDEX_SIZE 9 | 8 | #define PTE_INDEX_SIZE 9 |
9 | #define PMD_INDEX_SIZE 7 | 9 | #define PMD_INDEX_SIZE 7 |
10 | #define PUD_INDEX_SIZE 7 | 10 | #define PUD_INDEX_SIZE 9 |
11 | #define PGD_INDEX_SIZE 9 | 11 | #define PGD_INDEX_SIZE 9 |
12 | 12 | ||
13 | #ifndef __ASSEMBLY__ | 13 | #ifndef __ASSEMBLY__ |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h index 90533ddcd703..be4e2878fbc0 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h +++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h | |||
@@ -7,7 +7,7 @@ | |||
7 | #define PTE_INDEX_SIZE 12 | 7 | #define PTE_INDEX_SIZE 12 |
8 | #define PMD_INDEX_SIZE 12 | 8 | #define PMD_INDEX_SIZE 12 |
9 | #define PUD_INDEX_SIZE 0 | 9 | #define PUD_INDEX_SIZE 0 |
10 | #define PGD_INDEX_SIZE 4 | 10 | #define PGD_INDEX_SIZE 6 |
11 | 11 | ||
12 | #ifndef __ASSEMBLY__ | 12 | #ifndef __ASSEMBLY__ |
13 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) | 13 | #define PTE_TABLE_SIZE (sizeof(real_pte_t) << PTE_INDEX_SIZE) |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 83efc6e81543..9dc5cd1fde1a 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -97,8 +97,8 @@ extern struct task_struct *last_task_used_spe; | |||
97 | #endif | 97 | #endif |
98 | 98 | ||
99 | #ifdef CONFIG_PPC64 | 99 | #ifdef CONFIG_PPC64 |
100 | /* 64-bit user address space is 44-bits (16TB user VM) */ | 100 | /* 64-bit user address space is 46-bits (64TB user VM) */ |
101 | #define TASK_SIZE_USER64 (0x0000100000000000UL) | 101 | #define TASK_SIZE_USER64 (0x0000400000000000UL) |
102 | 102 | ||
103 | /* | 103 | /* |
104 | * 32-bit user address space is 4GB - 1 page | 104 | * 32-bit user address space is 4GB - 1 page |
diff --git a/arch/powerpc/include/asm/sparsemem.h b/arch/powerpc/include/asm/sparsemem.h index 0c5fa3145615..f6fc0ee813d7 100644 --- a/arch/powerpc/include/asm/sparsemem.h +++ b/arch/powerpc/include/asm/sparsemem.h | |||
@@ -10,8 +10,8 @@ | |||
10 | */ | 10 | */ |
11 | #define SECTION_SIZE_BITS 24 | 11 | #define SECTION_SIZE_BITS 24 |
12 | 12 | ||
13 | #define MAX_PHYSADDR_BITS 44 | 13 | #define MAX_PHYSADDR_BITS 46 |
14 | #define MAX_PHYSMEM_BITS 44 | 14 | #define MAX_PHYSMEM_BITS 46 |
15 | 15 | ||
16 | #endif /* CONFIG_SPARSEMEM */ | 16 | #endif /* CONFIG_SPARSEMEM */ |
17 | 17 | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index 5eb00569199f..10b658ad65e1 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -1083,7 +1083,9 @@ _GLOBAL(do_stab_bolted) | |||
1083 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ | 1083 | rldimi r10,r11,7,52 /* r10 = first ste of the group */ |
1084 | 1084 | ||
1085 | /* Calculate VSID */ | 1085 | /* Calculate VSID */ |
1086 | /* This is a kernel address, so protovsid = ESID */ | 1086 | /* This is a kernel address, so protovsid = ESID | 1 << 37 */ |
1087 | li r9,0x1 | ||
1088 | rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
1087 | ASM_VSID_SCRAMBLE(r11, r9, 256M) | 1089 | ASM_VSID_SCRAMBLE(r11, r9, 256M) |
1088 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ | 1090 | rldic r9,r11,12,16 /* r9 = vsid << 12 */ |
1089 | 1091 | ||
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index f6a262555ef2..1a16ca227757 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -56,6 +56,12 @@ _GLOBAL(slb_allocate_realmode) | |||
56 | */ | 56 | */ |
57 | _GLOBAL(slb_miss_kernel_load_linear) | 57 | _GLOBAL(slb_miss_kernel_load_linear) |
58 | li r11,0 | 58 | li r11,0 |
59 | li r9,0x1 | ||
60 | /* | ||
61 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | ||
62 | * the necessary adjustment | ||
63 | */ | ||
64 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
59 | BEGIN_FTR_SECTION | 65 | BEGIN_FTR_SECTION |
60 | b slb_finish_load | 66 | b slb_finish_load |
61 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 67 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
@@ -85,6 +91,12 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
85 | _GLOBAL(slb_miss_kernel_load_io) | 91 | _GLOBAL(slb_miss_kernel_load_io) |
86 | li r11,0 | 92 | li r11,0 |
87 | 6: | 93 | 6: |
94 | li r9,0x1 | ||
95 | /* | ||
96 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | ||
97 | * the necessary adjustment | ||
98 | */ | ||
99 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | ||
88 | BEGIN_FTR_SECTION | 100 | BEGIN_FTR_SECTION |
89 | b slb_finish_load | 101 | b slb_finish_load |
90 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |