diff options
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 39 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 19 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 44 |
3 files changed, 54 insertions, 48 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 4749a0eee1cf..99799c91ca00 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -57,27 +57,21 @@ | |||
57 | 57 | ||
58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ | 58 | #define _PAGE_ACCESSED (1<<1) /* Page is accessed (S) */ |
59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ | 59 | #define _PAGE_CACHEABLE (1<<2) /* Page is cached (H) */ |
60 | #define _PAGE_U_EXECUTE (1<<3) /* Page has user execute perm (H) */ | 60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | #define _PAGE_U_WRITE (1<<4) /* Page has user write perm (H) */ | 61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | #define _PAGE_U_READ (1<<5) /* Page has user read perm (H) */ | 62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | #define _PAGE_K_EXECUTE (1<<6) /* Page has kernel execute perm (H) */ | ||
64 | #define _PAGE_K_WRITE (1<<7) /* Page has kernel write perm (H) */ | ||
65 | #define _PAGE_K_READ (1<<8) /* Page has kernel perm (H) */ | ||
66 | #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ | 63 | #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ |
67 | #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ | 64 | #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ |
68 | #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ | 65 | #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ |
69 | #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ | 66 | #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ |
70 | 67 | ||
71 | #else | 68 | #else /* MMU v3 onwards */ |
72 | 69 | ||
73 | /* PD1 */ | 70 | /* PD1 */ |
74 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ | 71 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
75 | #define _PAGE_U_EXECUTE (1<<1) /* Page has user execute perm (H) */ | 72 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
76 | #define _PAGE_U_WRITE (1<<2) /* Page has user write perm (H) */ | 73 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
77 | #define _PAGE_U_READ (1<<3) /* Page has user read perm (H) */ | 74 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
78 | #define _PAGE_K_EXECUTE (1<<4) /* Page has kernel execute perm (H) */ | ||
79 | #define _PAGE_K_WRITE (1<<5) /* Page has kernel write perm (H) */ | ||
80 | #define _PAGE_K_READ (1<<6) /* Page has kernel perm (H) */ | ||
81 | #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ | 75 | #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ |
82 | 76 | ||
83 | /* PD0 */ | 77 | /* PD0 */ |
@@ -92,8 +86,8 @@ | |||
92 | #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ | 86 | #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ |
93 | #endif | 87 | #endif |
94 | 88 | ||
95 | /* Kernel allowed all permissions for all pages */ | 89 | /* vmalloc permissions */ |
96 | #define _K_PAGE_PERMS (_PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ | \ | 90 | #define _K_PAGE_PERMS (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ | \ |
97 | _PAGE_GLOBAL | _PAGE_PRESENT) | 91 | _PAGE_GLOBAL | _PAGE_PRESENT) |
98 | 92 | ||
99 | #ifdef CONFIG_ARC_CACHE_PAGES | 93 | #ifdef CONFIG_ARC_CACHE_PAGES |
@@ -109,10 +103,6 @@ | |||
109 | */ | 103 | */ |
110 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) | 104 | #define ___DEF (_PAGE_PRESENT | _PAGE_DEF_CACHEABLE) |
111 | 105 | ||
112 | #define _PAGE_READ (_PAGE_U_READ | _PAGE_K_READ) | ||
113 | #define _PAGE_WRITE (_PAGE_U_WRITE | _PAGE_K_WRITE) | ||
114 | #define _PAGE_EXECUTE (_PAGE_U_EXECUTE | _PAGE_K_EXECUTE) | ||
115 | |||
116 | /* Set of bits not changed in pte_modify */ | 106 | /* Set of bits not changed in pte_modify */ |
117 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) | 107 | #define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_MODIFIED) |
118 | 108 | ||
@@ -126,8 +116,8 @@ | |||
126 | 116 | ||
127 | #define PAGE_SHARED PAGE_U_W_R | 117 | #define PAGE_SHARED PAGE_U_W_R |
128 | 118 | ||
129 | /* While kernel runs out of unstrslated space, vmalloc/modules use a chunk of | 119 | /* While kernel runs out of unstranslated space, vmalloc/modules use a chunk of |
130 | * kernel vaddr space - visible in all addr spaces, but kernel mode only | 120 | * user vaddr space - visible in all addr spaces, but kernel mode only |
131 | * Thus Global, all-kernel-access, no-user-access, cached | 121 | * Thus Global, all-kernel-access, no-user-access, cached |
132 | */ | 122 | */ |
133 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) | 123 | #define PAGE_KERNEL __pgprot(_K_PAGE_PERMS | _PAGE_DEF_CACHEABLE) |
@@ -136,10 +126,9 @@ | |||
136 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) | 126 | #define PAGE_KERNEL_NO_CACHE __pgprot(_K_PAGE_PERMS) |
137 | 127 | ||
138 | /* Masks for actual TLB "PD"s */ | 128 | /* Masks for actual TLB "PD"s */ |
139 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) | 129 | #define PTE_BITS_IN_PD0 (_PAGE_GLOBAL | _PAGE_PRESENT) |
140 | #define PTE_BITS_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE | \ | 130 | #define PTE_BITS_RWX (_PAGE_EXECUTE | _PAGE_WRITE | _PAGE_READ) |
141 | _PAGE_U_EXECUTE | _PAGE_U_WRITE | _PAGE_U_READ | \ | 131 | #define PTE_BITS_NON_RWX_IN_PD1 (PAGE_MASK | _PAGE_CACHEABLE) |
142 | _PAGE_K_EXECUTE | _PAGE_K_WRITE | _PAGE_K_READ) | ||
143 | 132 | ||
144 | /************************************************************************** | 133 | /************************************************************************** |
145 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) | 134 | * Mapping of vm_flags (Generic VM) to PTE flags (arch specific) |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index 7957dc4e4d4a..f9908341e8a7 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -341,7 +341,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | |||
341 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | 341 | void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) |
342 | { | 342 | { |
343 | unsigned long flags; | 343 | unsigned long flags; |
344 | unsigned int idx, asid_or_sasid; | 344 | unsigned int idx, asid_or_sasid, rwx; |
345 | unsigned long pd0_flags; | 345 | unsigned long pd0_flags; |
346 | 346 | ||
347 | /* | 347 | /* |
@@ -393,8 +393,23 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
393 | 393 | ||
394 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | 394 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); |
395 | 395 | ||
396 | /* | ||
397 | * ARC MMU provides fully orthogonal access bits for K/U mode, | ||
398 | * however Linux only saves 1 set to save PTE real-estate | ||
399 | * Here we convert 3 PTE bits into 6 MMU bits: | ||
400 | * -Kernel only entries have Kr Kw Kx 0 0 0 | ||
401 | * -User entries have mirrored K and U bits | ||
402 | */ | ||
403 | rwx = pte_val(*ptep) & PTE_BITS_RWX; | ||
404 | |||
405 | if (pte_val(*ptep) & _PAGE_GLOBAL) | ||
406 | rwx <<= 3; /* r w x => Kr Kw Kx 0 0 0 */ | ||
407 | else | ||
408 | rwx |= (rwx << 3); /* r w x => Kr Kw Kx Ur Uw Ux */ | ||
409 | |||
396 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ | 410 | /* Load remaining info in PD1 (Page Frame Addr and Kx/Kw/Kr Flags) */ |
397 | write_aux_reg(ARC_REG_TLBPD1, (pte_val(*ptep) & PTE_BITS_IN_PD1)); | 411 | write_aux_reg(ARC_REG_TLBPD1, |
412 | rwx | (pte_val(*ptep) & PTE_BITS_NON_RWX_IN_PD1)); | ||
398 | 413 | ||
399 | /* First verify if entry for this vaddr+ASID already exists */ | 414 | /* First verify if entry for this vaddr+ASID already exists */ |
400 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); | 415 | write_aux_reg(ARC_REG_TLBCOMMAND, TLBProbe); |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index 9cce00e94b43..ec382e59d681 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -218,8 +218,15 @@ ex_saved_reg1: | |||
218 | ; IN: r0 = PTE, r1 = ptr to PTE | 218 | ; IN: r0 = PTE, r1 = ptr to PTE |
219 | 219 | ||
220 | .macro CONV_PTE_TO_TLB | 220 | .macro CONV_PTE_TO_TLB |
221 | and r3, r0, PTE_BITS_IN_PD1 ; Extract permission flags+PFN from PTE | 221 | and r3, r0, PTE_BITS_RWX ; r w x |
222 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | 222 | lsl r2, r3, 3 ; r w x 0 0 0 |
223 | and.f 0, r0, _PAGE_GLOBAL | ||
224 | or.z r2, r2, r3 ; r w x r w x | ||
225 | |||
226 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE | ||
227 | or r3, r3, r2 | ||
228 | |||
229 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | ||
223 | 230 | ||
224 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb | 231 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
225 | #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ | 232 | #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ |
@@ -272,8 +279,8 @@ ARC_ENTRY EV_TLBMissI | |||
272 | ;---------------------------------------------------------------- | 279 | ;---------------------------------------------------------------- |
273 | ; VERIFY_PTE: Check if PTE permissions approp for executing code | 280 | ; VERIFY_PTE: Check if PTE permissions approp for executing code |
274 | cmp_s r2, VMALLOC_START | 281 | cmp_s r2, VMALLOC_START |
275 | mov.lo r2, (_PAGE_PRESENT | _PAGE_U_EXECUTE) | 282 | mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) |
276 | mov.hs r2, (_PAGE_PRESENT | _PAGE_K_EXECUTE) | 283 | or.hs r2, r2, _PAGE_GLOBAL |
277 | 284 | ||
278 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | 285 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
279 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) | 286 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) |
@@ -312,26 +319,21 @@ ARC_ENTRY EV_TLBMissD | |||
312 | ;---------------------------------------------------------------- | 319 | ;---------------------------------------------------------------- |
313 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) | 320 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) |
314 | 321 | ||
315 | mov_s r2, 0 | 322 | cmp_s r2, VMALLOC_START |
323 | mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE | ||
324 | or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only | ||
325 | |||
326 | ; Linux PTE [RWX] bits are semantically overloaded: | ||
327 | ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) | ||
328 | ; -Otherwise they are user-mode permissions, and those are exactly | ||
329 | ; same for kernel mode as well (e.g. copy_(to|from)_user) | ||
330 | |||
316 | lr r3, [ecr] | 331 | lr r3, [ecr] |
317 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access | 332 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access |
318 | or.nz r2, r2, _PAGE_U_READ ; chk for Read flag in PTE | 333 | or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE |
319 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access | 334 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access |
320 | or.nz r2, r2, _PAGE_U_WRITE ; chk for Write flag in PTE | 335 | or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE |
321 | ; Above laddering takes care of XCHG access | 336 | ; Above laddering takes care of XCHG access (both R and W) |
322 | ; which is both Read and Write | ||
323 | |||
324 | ; If kernel mode access, ; make _PAGE_xx flags as _PAGE_K_xx | ||
325 | ; For copy_(to|from)_user, despite exception taken in kernel mode, | ||
326 | ; this code is not hit, because EFA would still be the user mode | ||
327 | ; address (EFA < 0x6000_0000). | ||
328 | ; This code is for legit kernel mode faults, vmalloc specifically | ||
329 | ; (EFA: 0x7000_0000 to 0x7FFF_FFFF) | ||
330 | |||
331 | lr r3, [efa] | ||
332 | cmp r3, VMALLOC_START - 1 ; If kernel mode access | ||
333 | asl.hi r2, r2, 3 ; make _PAGE_xx flags as _PAGE_K_xx | ||
334 | or r2, r2, _PAGE_PRESENT ; Common flag for K/U mode | ||
335 | 337 | ||
336 | ; By now, r2 setup with all the Flags we need to check in PTE | 338 | ; By now, r2 setup with all the Flags we need to check in PTE |
337 | and r3, r0, r2 ; Mask out NON Flag bits from PTE | 339 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |