diff options
author | Vineet Gupta <vgupta@synopsys.com> | 2013-06-17 10:14:06 -0400 |
---|---|---|
committer | Vineet Gupta <vgupta@synopsys.com> | 2013-08-30 00:49:12 -0400 |
commit | d091fcb97ff48a5cb6de19ad0881fb2c8e76dbc0 (patch) | |
tree | c54e1ce880e399ed6a72c054f6cf244bc3752071 /arch | |
parent | 64b703ef276964b160a5e88df0764f254460cafb (diff) |
ARC: MMUv4 preps/2 - Reshuffle PTE bits
With previous commit freeing up PTE bits, reassign them so as to:
- Match the bit to H/w counterpart where possible
(e.g. MMUv2 GLOBAL/PRESENT, this avoids a shift in create_tlb())
- Avoid holes in _PAGE_xxx definitions
Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/arc/include/asm/pgtable.h | 22 | ||||
-rw-r--r-- | arch/arc/mm/tlb.c | 11 | ||||
-rw-r--r-- | arch/arc/mm/tlbex.S | 3 |
3 files changed, 11 insertions, 25 deletions
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h index 99799c91ca00..6b0b7f7ef783 100644 --- a/arch/arc/include/asm/pgtable.h +++ b/arch/arc/include/asm/pgtable.h | |||
@@ -60,30 +60,24 @@ | |||
60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ | 60 | #define _PAGE_EXECUTE (1<<3) /* Page has user execute perm (H) */ |
61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ | 61 | #define _PAGE_WRITE (1<<4) /* Page has user write perm (H) */ |
62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ | 62 | #define _PAGE_READ (1<<5) /* Page has user read perm (H) */ |
63 | #define _PAGE_GLOBAL (1<<9) /* Page is global (H) */ | 63 | #define _PAGE_MODIFIED (1<<6) /* Page modified (dirty) (S) */ |
64 | #define _PAGE_MODIFIED (1<<10) /* Page modified (dirty) (S) */ | 64 | #define _PAGE_FILE (1<<7) /* page cache/ swap (S) */ |
65 | #define _PAGE_FILE (1<<10) /* page cache/ swap (S) */ | 65 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
66 | #define _PAGE_PRESENT (1<<11) /* TLB entry is valid (H) */ | 66 | #define _PAGE_PRESENT (1<<10) /* TLB entry is valid (H) */ |
67 | 67 | ||
68 | #else /* MMU v3 onwards */ | 68 | #else /* MMU v3 onwards */ |
69 | 69 | ||
70 | /* PD1 */ | ||
71 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ | 70 | #define _PAGE_CACHEABLE (1<<0) /* Page is cached (H) */ |
72 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ | 71 | #define _PAGE_EXECUTE (1<<1) /* Page has user execute perm (H) */ |
73 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ | 72 | #define _PAGE_WRITE (1<<2) /* Page has user write perm (H) */ |
74 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ | 73 | #define _PAGE_READ (1<<3) /* Page has user read perm (H) */ |
75 | #define _PAGE_ACCESSED (1<<7) /* Page is accessed (S) */ | 74 | #define _PAGE_ACCESSED (1<<4) /* Page is accessed (S) */ |
76 | 75 | #define _PAGE_MODIFIED (1<<5) /* Page modified (dirty) (S) */ | |
77 | /* PD0 */ | 76 | #define _PAGE_FILE (1<<6) /* page cache/ swap (S) */ |
78 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ | 77 | #define _PAGE_GLOBAL (1<<8) /* Page is global (H) */ |
79 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ | 78 | #define _PAGE_PRESENT (1<<9) /* TLB entry is valid (H) */ |
80 | #define _PAGE_SHARED_CODE (1<<10) /* Shared Code page with cmn vaddr | 79 | #define _PAGE_SHARED_CODE (1<<11) /* Shared Code page with cmn vaddr |
81 | usable for shared TLB entries (H) */ | 80 | usable for shared TLB entries (H) */ |
82 | |||
83 | #define _PAGE_MODIFIED (1<<11) /* Page modified (dirty) (S) */ | ||
84 | #define _PAGE_FILE (1<<12) /* page cache/ swap (S) */ | ||
85 | |||
86 | #define _PAGE_SHARED_CODE_H (1<<31) /* Hardware counterpart of above */ | ||
87 | #endif | 81 | #endif |
88 | 82 | ||
89 | /* vmalloc permissions */ | 83 | /* vmalloc permissions */ |
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c index f9908341e8a7..85a8716e6028 100644 --- a/arch/arc/mm/tlb.c +++ b/arch/arc/mm/tlb.c | |||
@@ -342,7 +342,6 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
342 | { | 342 | { |
343 | unsigned long flags; | 343 | unsigned long flags; |
344 | unsigned int idx, asid_or_sasid, rwx; | 344 | unsigned int idx, asid_or_sasid, rwx; |
345 | unsigned long pd0_flags; | ||
346 | 345 | ||
347 | /* | 346 | /* |
348 | * create_tlb() assumes that current->mm == vma->mm, since | 347 | * create_tlb() assumes that current->mm == vma->mm, since |
@@ -381,17 +380,13 @@ void create_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) | |||
381 | /* update this PTE credentials */ | 380 | /* update this PTE credentials */ |
382 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); | 381 | pte_val(*ptep) |= (_PAGE_PRESENT | _PAGE_ACCESSED); |
383 | 382 | ||
384 | /* Create HW TLB entry Flags (in PD0) from PTE Flags */ | 383 | /* Create HW TLB(PD0,PD1) from PTE */ |
385 | #if (CONFIG_ARC_MMU_VER <= 2) | ||
386 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0) >> 1); | ||
387 | #else | ||
388 | pd0_flags = ((pte_val(*ptep) & PTE_BITS_IN_PD0)); | ||
389 | #endif | ||
390 | 384 | ||
391 | /* ASID for this task */ | 385 | /* ASID for this task */ |
392 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; | 386 | asid_or_sasid = read_aux_reg(ARC_REG_PID) & 0xff; |
393 | 387 | ||
394 | write_aux_reg(ARC_REG_TLBPD0, address | pd0_flags | asid_or_sasid); | 388 | write_aux_reg(ARC_REG_TLBPD0, address | asid_or_sasid | |
389 | (pte_val(*ptep) & PTE_BITS_IN_PD0)); | ||
395 | 390 | ||
396 | /* | 391 | /* |
397 | * ARC MMU provides fully orthogonal access bits for K/U mode, | 392 | * ARC MMU provides fully orthogonal access bits for K/U mode, |
diff --git a/arch/arc/mm/tlbex.S b/arch/arc/mm/tlbex.S index ec382e59d681..50e83ca96b96 100644 --- a/arch/arc/mm/tlbex.S +++ b/arch/arc/mm/tlbex.S | |||
@@ -229,9 +229,6 @@ ex_saved_reg1: | |||
229 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 | 229 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 |
230 | 230 | ||
231 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb | 231 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
232 | #if (CONFIG_ARC_MMU_VER <= 2) /* Neednot be done with v3 onwards */ | ||
233 | lsr r2, r2 ; shift PTE flags to match layout in PD0 | ||
234 | #endif | ||
235 | 232 | ||
236 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid | 233 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid |
237 | 234 | ||