aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/mm
diff options
context:
space:
mode:
authorVineet Gupta <vgupta@synopsys.com>2013-06-17 08:42:13 -0400
committerVineet Gupta <vgupta@synopsys.com>2013-08-29 08:21:36 -0400
commit64b703ef276964b160a5e88df0764f254460cafb (patch)
tree686e1a89cebef90413cb3a283828012de59ba2da /arch/arc/mm
parent4b06ff35fb1dcafbcbdcbe9ce794ab0770f2a843 (diff)
ARC: MMUv4 preps/1 - Fold PTE K/U access flags
The current ARC VM code has 13 flags in Page Table entry: some software (accesed/dirty/non-linear-maps) and rest hardware specific. With 8k MMU page, we need 19 bits for addressing page frame so remaining 13 bits is just about enough to accomodate the current flags. In MMUv4 there are 2 additional flags, SZ (normal or super page) and WT (cache access mode write-thru) - and additionally PFN is 20 bits (vs. 19 before for 8k). Thus these can't be held in current PTE w/o making each entry 64bit wide. It seems there is some scope of compressing the current PTE flags (and freeing up a few bits). Currently PTE contains fully orthogonal distinct access permissions for kernel and user mode (Kr, Kw, Kx; Ur, Uw, Ux) which can be folded into one set (R, W, X). The translation of 3 PTE bits into 6 TLB bits (when programming the MMU) can be done based on following pre-requites/assumptions: 1. For kernel-mode-only translations (vmalloc: 0x7000_0000 to 0x7FFF_FFFF), PTE additionally has PAGE_GLOBAL flag set (and user space entries can never be global). Thus such a PTE can translate to Kr, Kw, Kx (as appropriate) and zero for User mode counterparts. 2. For non global entries, the PTE flags can be used to create mirrored K and U TLB bits. This is true after commit a950549c675f2c8c504 "ARC: copy_(to|from)_user() to honor usermode-access permissions" which ensured that user-space translations _MUST_ have same access permissions for both U/K mode accesses so that copy_{to,from}_user() play fair with fault based CoW break and such... There is no such thing as free lunch - the cost is slightly infalted TLB-Miss Handlers. Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch/arc/mm')
-rw-r--r--arch/arc/mm/tlb.c19
-rw-r--r--arch/arc/mm/tlbex.S44
2 files changed, 40 insertions, 23 deletions
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index 7957dc4e4d4a..f9908341e8a7 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -341,7 +341,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
341void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) 341void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
342{ 342{
343 unsigned long flags; 343 unsigned long flags;
344 unsigned int idx, asid_or_sasid; 344 unsigned int idx, asid_or_sasid, rwx;
345 unsigned long pd0_flags; 345 unsigned long pd0_flags;
346 346
347 /* 347 /*
@@ -393,8 +393,23 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
393 393
394 write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); 394 write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid);
395 395
396 /*
397 * ARC MMU provides fully orthogonal access bits for K/U mode,
398 * however Linux only saves 1 set to save PTE real-estate
399 * Here we convert 3 PTE bits into 6 MMU bits:
400 * -Kernel only entries have Kr Kw Kx 0 0 0
401 * -User entries have mirrored K and U bits
402 */
403 rwx = pte_val(*ptep) & PTE_BITS_RWX;
404
405 if (pte_val(*ptep) & _PAGE_GLOBAL)
406 rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */
407 else
408 rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */
409
396 /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ 410 /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */
397 write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); 411 write_aux_reg(ARC_REG_TLBPD1,
412 rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1));
398 413
399 /* First verify if entry for this vaddr+ASID already exists */ 414 /* First verify if entry for this vaddr+ASID already exists */
400 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); 415 write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe);
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S
index 9cce00e94b43..ec382e59d681 100644
--- a/arch/arc/mm/tlbex.S
+++ b/arch/arc/mm/tlbex.S
@@ -218,8 +218,15 @@ ex_saved_reg1:
218; IN: r0 = PTE, r1 = ptr to PTE 218; IN: r0 = PTE, r1 = ptr to PTE
219 219
220.macro CONV_PTE_TO_TLB 220.macro CONV_PTE_TO_TLB
221 and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE 221 and r3, r0, PTE_BITS_RWX ; r w x
222 sr r3, [ARC_REG_TLBPD1] ; these go in PD1 222 lsl r2, r3, 3 ; r w x 0 0 0
223 and.f 0, r0, _PAGE_GLOBAL
224 or.z r2, r2, r3 ; r w x r w x
225
226 and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE
227 or r3, r3, r2
228
229 sr r3, [ARC_REG_TLBPD1] ; these go in PD1
223 230
224 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb 231 and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb
225#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ 232#if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */
@@ -272,8 +279,8 @@ ARC_ENTRY EV_TLBMissI
272 ;---------------------------------------------------------------- 279 ;----------------------------------------------------------------
273 ; VERIFY_PTE: Check if PTE permissions approp for executing code 280 ; VERIFY_PTE: Check if PTE permissions approp for executing code
274 cmp_s r2, VMALLOC_START 281 cmp_s r2, VMALLOC_START
275 mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) 282 mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE)
276 mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) 283 or.hs r2, r2, _PAGE_GLOBAL
277 284
278 and r3, r0, r2 ; Mask out NON Flag bits from PTE 285 and r3, r0, r2 ; Mask out NON Flag bits from PTE
279 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) 286 xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test )
@@ -312,26 +319,21 @@ ARC_ENTRY EV_TLBMissD
312 ;---------------------------------------------------------------- 319 ;----------------------------------------------------------------
313 ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) 320 ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W)
314 321
315 mov_s r2, 0 322 cmp_s r2, VMALLOC_START
323 mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE
324 or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only
325
326 ; Linux PTE [RWX] bits are semantically overloaded:
327 ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc)
328 ; -Otherwise they are user-mode permissions, and those are exactly
329 ; same for kernel mode as well (e.g. copy_(to|from)_user)
330
316 lr r3, [ecr] 331 lr r3, [ecr]
317 btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access 332 btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access
318 or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE 333 or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE
319 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access 334 btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access
320 or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE 335 or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE
321 ; Above laddering takes care of XCHG access 336 ; Above laddering takes care of XCHG access (both R and W)
322 ; which is both Read and Write
323
324 ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx
325 ; For copy_(to|from)_user, despite exception taken in kernel mode,
326 ; this code is not hit, because EFA would still be the user mode
327 ; address (EFA < 0x6000_0000).
328 ; This code is for legit kernel mode faults, vmalloc specifically
329 ; (EFA: 0x7000_0000 to 0x7FFF_FFFF)
330
331 lr r3, [efa]
332 cmp r3, VMALLOC_START - 1 ; If kernel mode access
333 asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx
334 or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode
335 337
336 ; By now, r2 setup with all the Flags we need to check in PTE 338 ; By now, r2 setup with all the Flags we need to check in PTE
337 and r3, r0, r2 ; Mask out NON Flag bits from PTE 339 and r3, r0, r2 ; Mask out NON Flag bits from PTE