diff options
Diffstat (limited to 'arch/powerpc/mm/slb_low.S')
| -rw-r--r-- | arch/powerpc/mm/slb_low.S | 50 |
1 files changed, 25 insertions, 25 deletions
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 1a16ca227757..17aa6dfceb34 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
| @@ -31,10 +31,15 @@ | |||
| 31 | * No other registers are examined or changed. | 31 | * No other registers are examined or changed. |
| 32 | */ | 32 | */ |
| 33 | _GLOBAL(slb_allocate_realmode) | 33 | _GLOBAL(slb_allocate_realmode) |
| 34 | /* r3 = faulting address */ | 34 | /* |
| 35 | * check for bad kernel/user address | ||
| 36 | * (ea & ~REGION_MASK) >= PGTABLE_RANGE | ||
| 37 | */ | ||
| 38 | rldicr. r9,r3,4,(63 - 46 - 4) | ||
| 39 | bne- 8f | ||
| 35 | 40 | ||
| 36 | srdi r9,r3,60 /* get region */ | 41 | srdi r9,r3,60 /* get region */ |
| 37 | srdi r10,r3,28 /* get esid */ | 42 | srdi r10,r3,SID_SHIFT /* get esid */ |
| 38 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ | 43 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
| 39 | 44 | ||
| 40 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ | 45 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
| @@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) | |||
| 56 | */ | 61 | */ |
| 57 | _GLOBAL(slb_miss_kernel_load_linear) | 62 | _GLOBAL(slb_miss_kernel_load_linear) |
| 58 | li r11,0 | 63 | li r11,0 |
| 59 | li r9,0x1 | ||
| 60 | /* | 64 | /* |
| 61 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 65 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
| 62 | * the necessary adjustment | 66 | * r9 = region id. |
| 63 | */ | 67 | */ |
| 64 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 68 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
| 69 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
| 70 | |||
| 71 | |||
| 65 | BEGIN_FTR_SECTION | 72 | BEGIN_FTR_SECTION |
| 66 | b slb_finish_load | 73 | b slb_finish_load |
| 67 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 74 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
| @@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
| 91 | _GLOBAL(slb_miss_kernel_load_io) | 98 | _GLOBAL(slb_miss_kernel_load_io) |
| 92 | li r11,0 | 99 | li r11,0 |
| 93 | 6: | 100 | 6: |
| 94 | li r9,0x1 | ||
| 95 | /* | 101 | /* |
| 96 | * for 1T we shift 12 bits more. slb_finish_load_1T will do | 102 | * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1 |
| 97 | * the necessary adjustment | 103 | * r9 = region id. |
| 98 | */ | 104 | */ |
| 99 | rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 | 105 | addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha |
| 106 | addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l | ||
| 107 | |||
| 100 | BEGIN_FTR_SECTION | 108 | BEGIN_FTR_SECTION |
| 101 | b slb_finish_load | 109 | b slb_finish_load |
| 102 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | 110 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
| 103 | b slb_finish_load_1T | 111 | b slb_finish_load_1T |
| 104 | 112 | ||
| 105 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 113 | 0: |
| 106 | * if the address is within the boundaries of the user region | ||
| 107 | */ | ||
| 108 | srdi. r9,r10,USER_ESID_BITS | ||
| 109 | bne- 8f /* invalid ea bits set */ | ||
| 110 | |||
| 111 | |||
| 112 | /* when using slices, we extract the psize off the slice bitmaps | 114 | /* when using slices, we extract the psize off the slice bitmaps |
| 113 | * and then we need to get the sllp encoding off the mmu_psize_defs | 115 | * and then we need to get the sllp encoding off the mmu_psize_defs |
| 114 | * array. | 116 | * array. |
| @@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) | |||
| 164 | ld r9,PACACONTEXTID(r13) | 166 | ld r9,PACACONTEXTID(r13) |
| 165 | BEGIN_FTR_SECTION | 167 | BEGIN_FTR_SECTION |
| 166 | cmpldi r10,0x1000 | 168 | cmpldi r10,0x1000 |
| 167 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | ||
| 168 | rldimi r10,r9,USER_ESID_BITS,0 | ||
| 169 | BEGIN_FTR_SECTION | ||
| 170 | bge slb_finish_load_1T | 169 | bge slb_finish_load_1T |
| 171 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) | 170 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
| 172 | b slb_finish_load | 171 | b slb_finish_load |
| 173 | 172 | ||
| 174 | 8: /* invalid EA */ | 173 | 8: /* invalid EA */ |
| 175 | li r10,0 /* BAD_VSID */ | 174 | li r10,0 /* BAD_VSID */ |
| 175 | li r9,0 /* BAD_VSID */ | ||
| 176 | li r11,SLB_VSID_USER /* flags don't much matter */ | 176 | li r11,SLB_VSID_USER /* flags don't much matter */ |
| 177 | b slb_finish_load | 177 | b slb_finish_load |
| 178 | 178 | ||
| @@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) | |||
| 221 | 221 | ||
| 222 | /* get context to calculate proto-VSID */ | 222 | /* get context to calculate proto-VSID */ |
| 223 | ld r9,PACACONTEXTID(r13) | 223 | ld r9,PACACONTEXTID(r13) |
| 224 | rldimi r10,r9,USER_ESID_BITS,0 | ||
| 225 | |||
| 226 | /* fall through slb_finish_load */ | 224 | /* fall through slb_finish_load */ |
| 227 | 225 | ||
| 228 | #endif /* __DISABLED__ */ | 226 | #endif /* __DISABLED__ */ |
| @@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) | |||
| 231 | /* | 229 | /* |
| 232 | * Finish loading of an SLB entry and return | 230 | * Finish loading of an SLB entry and return |
| 233 | * | 231 | * |
| 234 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET | 232 | * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
| 235 | */ | 233 | */ |
| 236 | slb_finish_load: | 234 | slb_finish_load: |
| 235 | rldimi r10,r9,ESID_BITS,0 | ||
| 237 | ASM_VSID_SCRAMBLE(r10,r9,256M) | 236 | ASM_VSID_SCRAMBLE(r10,r9,256M) |
| 238 | /* | 237 | /* |
| 239 | * bits above VSID_BITS_256M need to be ignored from r10 | 238 | * bits above VSID_BITS_256M need to be ignored from r10 |
| @@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) | |||
| 298 | /* | 297 | /* |
| 299 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. | 298 | * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. |
| 300 | * | 299 | * |
| 301 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 | 300 | * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9 |
| 302 | */ | 301 | */ |
| 303 | slb_finish_load_1T: | 302 | slb_finish_load_1T: |
| 304 | srdi r10,r10,40-28 /* get 1T ESID */ | 303 | srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */ |
| 304 | rldimi r10,r9,ESID_BITS_1T,0 | ||
| 305 | ASM_VSID_SCRAMBLE(r10,r9,1T) | 305 | ASM_VSID_SCRAMBLE(r10,r9,1T) |
| 306 | /* | 306 | /* |
| 307 | * bits above VSID_BITS_1T need to be ignored from r10 | 307 | * bits above VSID_BITS_1T need to be ignored from r10 |
