diff options
author | Matt Fleming <matt@console-pimps.org> | 2009-11-17 16:05:31 -0500 |
---|---|---|
committer | Matt Fleming <matt@console-pimps.org> | 2010-01-16 09:28:57 -0500 |
commit | 8eda55142080f0373b1f0268fe6d6807f193e713 (patch) | |
tree | 6d103af69153dc5bfd78ebe89930cf3c66ec5b2b /arch | |
parent | 7dcaa8e8e67b2cfbe0097c9bb52e23aed5443b8b (diff) |
sh: New extended page flag to wire/unwire TLB entries
Provide a new extended page flag, _PAGE_WIRED and an SH4 implementation
for wiring TLB entries and use it in the fixmap code path so that we can
wire the fixmap TLB entry.
Signed-off-by: Matt Fleming <matt@console-pimps.org>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/sh/include/asm/pgtable_32.h | 4 | ||||
-rw-r--r-- | arch/sh/include/asm/tlb.h | 16 | ||||
-rw-r--r-- | arch/sh/include/cpu-sh4/cpu/mmu_context.h | 4 | ||||
-rw-r--r-- | arch/sh/mm/tlb-pteaex.c | 66 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 66 |
5 files changed, 156 insertions, 0 deletions
diff --git a/arch/sh/include/asm/pgtable_32.h b/arch/sh/include/asm/pgtable_32.h index 5003ee86f67b..c573d45f1286 100644 --- a/arch/sh/include/asm/pgtable_32.h +++ b/arch/sh/include/asm/pgtable_32.h | |||
@@ -71,6 +71,8 @@ | |||
71 | #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ | 71 | #define _PAGE_EXT_KERN_WRITE 0x1000 /* EPR4-bit: Kernel space writable */ |
72 | #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ | 72 | #define _PAGE_EXT_KERN_READ 0x2000 /* EPR5-bit: Kernel space readable */ |
73 | 73 | ||
74 | #define _PAGE_EXT_WIRED 0x4000 /* software: Wire TLB entry */ | ||
75 | |||
74 | /* Wrapper for extended mode pgprot twiddling */ | 76 | /* Wrapper for extended mode pgprot twiddling */ |
75 | #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) | 77 | #define _PAGE_EXT(x) ((unsigned long long)(x) << 32) |
76 | 78 | ||
@@ -164,6 +166,8 @@ static inline unsigned long copy_ptea_attributes(unsigned long x) | |||
164 | (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ | 166 | (PTE_MASK | _PAGE_ACCESSED | _PAGE_CACHABLE | \ |
165 | _PAGE_DIRTY | _PAGE_SPECIAL) | 167 | _PAGE_DIRTY | _PAGE_SPECIAL) |
166 | 168 | ||
169 | #define _PAGE_WIRED (_PAGE_EXT(_PAGE_EXT_WIRED)) | ||
170 | |||
167 | #ifndef __ASSEMBLY__ | 171 | #ifndef __ASSEMBLY__ |
168 | 172 | ||
169 | #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ | 173 | #if defined(CONFIG_X2TLB) /* SH-X2 TLB */ |
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h index da8fe7ab8728..3ed2f7a05416 100644 --- a/arch/sh/include/asm/tlb.h +++ b/arch/sh/include/asm/tlb.h | |||
@@ -97,6 +97,22 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma) | |||
97 | 97 | ||
98 | #define tlb_migrate_finish(mm) do { } while (0) | 98 | #define tlb_migrate_finish(mm) do { } while (0) |
99 | 99 | ||
100 | #ifdef CONFIG_CPU_SH4 | ||
101 | extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); | ||
102 | extern void tlb_unwire_entry(void); | ||
103 | #else | ||
104 | static inline void tlb_wire_entry(struct vm_area_struct *vma , | ||
105 | unsigned long addr, pte_t pte) | ||
106 | { | ||
107 | BUG(); | ||
108 | } | ||
109 | |||
110 | static inline void tlb_unwire_entry(void) | ||
111 | { | ||
112 | BUG(); | ||
113 | } | ||
114 | #endif /* CONFIG_CPU_SH4 */ | ||
115 | |||
100 | #else /* CONFIG_MMU */ | 116 | #else /* CONFIG_MMU */ |
101 | 117 | ||
102 | #define tlb_start_vma(tlb, vma) do { } while (0) | 118 | #define tlb_start_vma(tlb, vma) do { } while (0) |
diff --git a/arch/sh/include/cpu-sh4/cpu/mmu_context.h b/arch/sh/include/cpu-sh4/cpu/mmu_context.h index 3ce7ef6c2978..03ea75c5315d 100644 --- a/arch/sh/include/cpu-sh4/cpu/mmu_context.h +++ b/arch/sh/include/cpu-sh4/cpu/mmu_context.h | |||
@@ -25,6 +25,10 @@ | |||
25 | 25 | ||
26 | #define MMUCR_TI (1<<2) | 26 | #define MMUCR_TI (1<<2) |
27 | 27 | ||
28 | #define MMUCR_URB 0x00FC0000 | ||
29 | #define MMUCR_URB_SHIFT 18 | ||
30 | #define MMUCR_URB_NENTRIES 64 | ||
31 | |||
28 | #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) | 32 | #if defined(CONFIG_32BIT) && defined(CONFIG_CPU_SUBTYPE_ST40) |
29 | #define MMUCR_SE (1 << 4) | 33 | #define MMUCR_SE (1 << 4) |
30 | #else | 34 | #else |
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..88c8bb05e16d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -76,3 +76,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
76 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | 76 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); |
77 | back_to_cached(); | 77 | back_to_cached(); |
78 | } | 78 | } |
79 | |||
80 | /* | ||
81 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
82 | */ | ||
83 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
84 | { | ||
85 | unsigned long status, flags; | ||
86 | int urb; | ||
87 | |||
88 | local_irq_save(flags); | ||
89 | |||
90 | /* Load the entry into the TLB */ | ||
91 | __update_tlb(vma, addr, pte); | ||
92 | |||
93 | /* ... and wire it up. */ | ||
94 | status = ctrl_inl(MMUCR); | ||
95 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
96 | status &= ~MMUCR_URB; | ||
97 | |||
98 | /* | ||
99 | * Make sure we're not trying to wire the last TLB entry slot. | ||
100 | */ | ||
101 | BUG_ON(!--urb); | ||
102 | |||
103 | urb = urb % MMUCR_URB_NENTRIES; | ||
104 | |||
105 | status |= (urb << MMUCR_URB_SHIFT); | ||
106 | ctrl_outl(status, MMUCR); | ||
107 | ctrl_barrier(); | ||
108 | |||
109 | local_irq_restore(flags); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Unwire the last wired TLB entry. | ||
114 | * | ||
115 | * It should also be noted that it is not possible to wire and unwire | ||
116 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
117 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
118 | * respect, it works like a stack or LIFO queue. | ||
119 | */ | ||
120 | void tlb_unwire_entry(void) | ||
121 | { | ||
122 | unsigned long status, flags; | ||
123 | int urb; | ||
124 | |||
125 | local_irq_save(flags); | ||
126 | |||
127 | status = ctrl_inl(MMUCR); | ||
128 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
129 | status &= ~MMUCR_URB; | ||
130 | |||
131 | /* | ||
132 | * Make sure we're not trying to unwire a TLB entry when none | ||
133 | * have been wired. | ||
134 | */ | ||
135 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
136 | |||
137 | urb = urb % MMUCR_URB_NENTRIES; | ||
138 | |||
139 | status |= (urb << MMUCR_URB_SHIFT); | ||
140 | ctrl_outl(status, MMUCR); | ||
141 | ctrl_barrier(); | ||
142 | |||
143 | local_irq_restore(flags); | ||
144 | } | ||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..4c6234743318 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -81,3 +81,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
81 | ctrl_outl(data, addr); | 81 | ctrl_outl(data, addr); |
82 | back_to_cached(); | 82 | back_to_cached(); |
83 | } | 83 | } |
84 | |||
85 | /* | ||
86 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
87 | */ | ||
88 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
89 | { | ||
90 | unsigned long status, flags; | ||
91 | int urb; | ||
92 | |||
93 | local_irq_save(flags); | ||
94 | |||
95 | /* Load the entry into the TLB */ | ||
96 | __update_tlb(vma, addr, pte); | ||
97 | |||
98 | /* ... and wire it up. */ | ||
99 | status = ctrl_inl(MMUCR); | ||
100 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
101 | status &= ~MMUCR_URB; | ||
102 | |||
103 | /* | ||
104 | * Make sure we're not trying to wire the last TLB entry slot. | ||
105 | */ | ||
106 | BUG_ON(!--urb); | ||
107 | |||
108 | urb = urb % MMUCR_URB_NENTRIES; | ||
109 | |||
110 | status |= (urb << MMUCR_URB_SHIFT); | ||
111 | ctrl_outl(status, MMUCR); | ||
112 | ctrl_barrier(); | ||
113 | |||
114 | local_irq_restore(flags); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Unwire the last wired TLB entry. | ||
119 | * | ||
120 | * It should also be noted that it is not possible to wire and unwire | ||
121 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
122 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
123 | * respect, it works like a stack or LIFO queue. | ||
124 | */ | ||
125 | void tlb_unwire_entry(void) | ||
126 | { | ||
127 | unsigned long status, flags; | ||
128 | int urb; | ||
129 | |||
130 | local_irq_save(flags); | ||
131 | |||
132 | status = ctrl_inl(MMUCR); | ||
133 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
134 | status &= ~MMUCR_URB; | ||
135 | |||
136 | /* | ||
137 | * Make sure we're not trying to unwire a TLB entry when none | ||
138 | * have been wired. | ||
139 | */ | ||
140 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
141 | |||
142 | urb = urb % MMUCR_URB_NENTRIES; | ||
143 | |||
144 | status |= (urb << MMUCR_URB_SHIFT); | ||
145 | ctrl_outl(status, MMUCR); | ||
146 | ctrl_barrier(); | ||
147 | |||
148 | local_irq_restore(flags); | ||
149 | } | ||