aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-01-19 01:20:35 -0500
committerPaul Mundt <lethal@linux-sh.org>2010-01-19 01:20:35 -0500
commitbb29c677b366fdf4f6522cd82228a32567aa98c7 (patch)
tree0235c7477ed635c8c21131b90094d151663ae889 /arch/sh
parent046581f9623b53f551a93864bb74e15ad2514f0c (diff)
sh: Split out MMUCR.URB based entry wiring in to shared helper.
Presently this is duplicated between tlb-sh4 and tlb-pteaex. Split the helpers out in to a generic tlb-urb that can be used by any parts equipped with MMUCR.URB. At the same time, move the SH-5 code out-of-line, as we require single global state for DTLB entry wiring. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/include/asm/tlb.h44
-rw-r--r--arch/sh/mm/Makefile4
-rw-r--r--arch/sh/mm/tlb-pteaex.c66
-rw-r--r--arch/sh/mm/tlb-sh4.c66
-rw-r--r--arch/sh/mm/tlb-sh5.c39
-rw-r--r--arch/sh/mm/tlb-urb.c81
6 files changed, 124 insertions, 176 deletions
diff --git a/arch/sh/include/asm/tlb.h b/arch/sh/include/asm/tlb.h
index dfc8fcd8ee50..75abb38dffd5 100644
--- a/arch/sh/include/asm/tlb.h
+++ b/arch/sh/include/asm/tlb.h
@@ -98,49 +98,9 @@ tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
98 98
99#define tlb_migrate_finish(mm) do { } while (0) 99#define tlb_migrate_finish(mm) do { } while (0)
100 100
101#ifdef CONFIG_CPU_SH4 101#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SUPERH64)
102extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t); 102extern void tlb_wire_entry(struct vm_area_struct *, unsigned long, pte_t);
103extern void tlb_unwire_entry(void); 103extern void tlb_unwire_entry(void);
104#elif defined(CONFIG_SUPERH64)
105static int dtlb_entry;
106static unsigned long long dtlb_entries[64];
107
108static inline void tlb_wire_entry(struct vm_area_struct *vma,
109 unsigned long addr, pte_t pte)
110{
111 unsigned long long entry;
112 unsigned long paddr, flags;
113
114 BUG_ON(dtlb_entry == 64);
115
116 local_irq_save(flags);
117
118 entry = sh64_get_wired_dtlb_entry();
119 dtlb_entries[dtlb_entry++] = entry;
120
121 paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
122 paddr &= ~PAGE_MASK;
123
124 sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
125
126 local_irq_restore(flags);
127}
128
129static inline void tlb_unwire_entry(void)
130{
131 unsigned long long entry;
132 unsigned long flags;
133
134 BUG_ON(!dtlb_entry);
135
136 local_irq_save(flags);
137 entry = dtlb_entries[dtlb_entry--];
138
139 sh64_teardown_tlb_slot(entry);
140 sh64_put_wired_dtlb_entry(entry);
141
142 local_irq_restore(flags);
143}
144#else 104#else
145static inline void tlb_wire_entry(struct vm_area_struct *vma , 105static inline void tlb_wire_entry(struct vm_area_struct *vma ,
146 unsigned long addr, pte_t pte) 106 unsigned long addr, pte_t pte)
@@ -152,7 +112,7 @@ static inline void tlb_unwire_entry(void)
152{ 112{
153 BUG(); 113 BUG();
154} 114}
155#endif /* CONFIG_CPU_SH4 */ 115#endif
156 116
157#else /* CONFIG_MMU */ 117#else /* CONFIG_MMU */
158 118
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile
index 0027cdea2c20..de714cbd961a 100644
--- a/arch/sh/mm/Makefile
+++ b/arch/sh/mm/Makefile
@@ -26,9 +26,9 @@ endif
26 26
27ifdef CONFIG_MMU 27ifdef CONFIG_MMU
28tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o 28tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
29tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o 29tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-urb.o
30tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o 30tlb-$(CONFIG_CPU_SH5) := tlb-sh5.o
31tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o 31tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-urb.o
32obj-y += $(tlb-y) 32obj-y += $(tlb-y)
33endif 33endif
34 34
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index 88c8bb05e16d..409b7c2b4b9d 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -76,69 +76,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
76 __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); 76 __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
77 back_to_cached(); 77 back_to_cached();
78} 78}
79
80/*
81 * Load the entry for 'addr' into the TLB and wire the entry.
82 */
83void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
84{
85 unsigned long status, flags;
86 int urb;
87
88 local_irq_save(flags);
89
90 /* Load the entry into the TLB */
91 __update_tlb(vma, addr, pte);
92
93 /* ... and wire it up. */
94 status = ctrl_inl(MMUCR);
95 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
96 status &= ~MMUCR_URB;
97
98 /*
99 * Make sure we're not trying to wire the last TLB entry slot.
100 */
101 BUG_ON(!--urb);
102
103 urb = urb % MMUCR_URB_NENTRIES;
104
105 status |= (urb << MMUCR_URB_SHIFT);
106 ctrl_outl(status, MMUCR);
107 ctrl_barrier();
108
109 local_irq_restore(flags);
110}
111
112/*
113 * Unwire the last wired TLB entry.
114 *
115 * It should also be noted that it is not possible to wire and unwire
116 * TLB entries in an arbitrary order. If you wire TLB entry N, followed
117 * by entry N+1, you must unwire entry N+1 first, then entry N. In this
118 * respect, it works like a stack or LIFO queue.
119 */
120void tlb_unwire_entry(void)
121{
122 unsigned long status, flags;
123 int urb;
124
125 local_irq_save(flags);
126
127 status = ctrl_inl(MMUCR);
128 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
129 status &= ~MMUCR_URB;
130
131 /*
132 * Make sure we're not trying to unwire a TLB entry when none
133 * have been wired.
134 */
135 BUG_ON(urb++ == MMUCR_URB_NENTRIES);
136
137 urb = urb % MMUCR_URB_NENTRIES;
138
139 status |= (urb << MMUCR_URB_SHIFT);
140 ctrl_outl(status, MMUCR);
141 ctrl_barrier();
142
143 local_irq_restore(flags);
144}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index 4c6234743318..8cf550e2570f 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -81,69 +81,3 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid,
81 ctrl_outl(data, addr); 81 ctrl_outl(data, addr);
82 back_to_cached(); 82 back_to_cached();
83} 83}
84
85/*
86 * Load the entry for 'addr' into the TLB and wire the entry.
87 */
88void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
89{
90 unsigned long status, flags;
91 int urb;
92
93 local_irq_save(flags);
94
95 /* Load the entry into the TLB */
96 __update_tlb(vma, addr, pte);
97
98 /* ... and wire it up. */
99 status = ctrl_inl(MMUCR);
100 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
101 status &= ~MMUCR_URB;
102
103 /*
104 * Make sure we're not trying to wire the last TLB entry slot.
105 */
106 BUG_ON(!--urb);
107
108 urb = urb % MMUCR_URB_NENTRIES;
109
110 status |= (urb << MMUCR_URB_SHIFT);
111 ctrl_outl(status, MMUCR);
112 ctrl_barrier();
113
114 local_irq_restore(flags);
115}
116
117/*
118 * Unwire the last wired TLB entry.
119 *
120 * It should also be noted that it is not possible to wire and unwire
121 * TLB entries in an arbitrary order. If you wire TLB entry N, followed
122 * by entry N+1, you must unwire entry N+1 first, then entry N. In this
123 * respect, it works like a stack or LIFO queue.
124 */
125void tlb_unwire_entry(void)
126{
127 unsigned long status, flags;
128 int urb;
129
130 local_irq_save(flags);
131
132 status = ctrl_inl(MMUCR);
133 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
134 status &= ~MMUCR_URB;
135
136 /*
137 * Make sure we're not trying to unwire a TLB entry when none
138 * have been wired.
139 */
140 BUG_ON(urb++ == MMUCR_URB_NENTRIES);
141
142 urb = urb % MMUCR_URB_NENTRIES;
143
144 status |= (urb << MMUCR_URB_SHIFT);
145 ctrl_outl(status, MMUCR);
146 ctrl_barrier();
147
148 local_irq_restore(flags);
149}
diff --git a/arch/sh/mm/tlb-sh5.c b/arch/sh/mm/tlb-sh5.c
index fdb64e41ec50..f27dbe1c1599 100644
--- a/arch/sh/mm/tlb-sh5.c
+++ b/arch/sh/mm/tlb-sh5.c
@@ -143,3 +143,42 @@ void sh64_setup_tlb_slot(unsigned long long config_addr, unsigned long eaddr,
143 */ 143 */
144void sh64_teardown_tlb_slot(unsigned long long config_addr) 144void sh64_teardown_tlb_slot(unsigned long long config_addr)
145 __attribute__ ((alias("__flush_tlb_slot"))); 145 __attribute__ ((alias("__flush_tlb_slot")));
146
147static int dtlb_entry;
148static unsigned long long dtlb_entries[64];
149
150void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
151{
152 unsigned long long entry;
153 unsigned long paddr, flags;
154
155 BUG_ON(dtlb_entry == ARRAY_SIZE(dtlb_entries));
156
157 local_irq_save(flags);
158
159 entry = sh64_get_wired_dtlb_entry();
160 dtlb_entries[dtlb_entry++] = entry;
161
162 paddr = pte_val(pte) & _PAGE_FLAGS_HARDWARE_MASK;
163 paddr &= ~PAGE_MASK;
164
165 sh64_setup_tlb_slot(entry, addr, get_asid(), paddr);
166
167 local_irq_restore(flags);
168}
169
170void tlb_unwire_entry(void)
171{
172 unsigned long long entry;
173 unsigned long flags;
174
175 BUG_ON(!dtlb_entry);
176
177 local_irq_save(flags);
178 entry = dtlb_entries[dtlb_entry--];
179
180 sh64_teardown_tlb_slot(entry);
181 sh64_put_wired_dtlb_entry(entry);
182
183 local_irq_restore(flags);
184}
diff --git a/arch/sh/mm/tlb-urb.c b/arch/sh/mm/tlb-urb.c
new file mode 100644
index 000000000000..bb5b9098956d
--- /dev/null
+++ b/arch/sh/mm/tlb-urb.c
@@ -0,0 +1,81 @@
1/*
2 * arch/sh/mm/tlb-urb.c
3 *
4 * TLB entry wiring helpers for URB-equipped parts.
5 *
6 * Copyright (C) 2010 Matt Fleming
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/mm.h>
13#include <linux/io.h>
14#include <asm/tlb.h>
15#include <asm/mmu_context.h>
16
17/*
18 * Load the entry for 'addr' into the TLB and wire the entry.
19 */
20void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
21{
22 unsigned long status, flags;
23 int urb;
24
25 local_irq_save(flags);
26
27 /* Load the entry into the TLB */
28 __update_tlb(vma, addr, pte);
29
30 /* ... and wire it up. */
31 status = __raw_readl(MMUCR);
32 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
33 status &= ~MMUCR_URB;
34
35 /*
36 * Make sure we're not trying to wire the last TLB entry slot.
37 */
38 BUG_ON(!--urb);
39
40 urb = urb % MMUCR_URB_NENTRIES;
41
42 status |= (urb << MMUCR_URB_SHIFT);
43 __raw_writel(status, MMUCR);
44 ctrl_barrier();
45
46 local_irq_restore(flags);
47}
48
49/*
50 * Unwire the last wired TLB entry.
51 *
52 * It should also be noted that it is not possible to wire and unwire
53 * TLB entries in an arbitrary order. If you wire TLB entry N, followed
54 * by entry N+1, you must unwire entry N+1 first, then entry N. In this
55 * respect, it works like a stack or LIFO queue.
56 */
57void tlb_unwire_entry(void)
58{
59 unsigned long status, flags;
60 int urb;
61
62 local_irq_save(flags);
63
64 status = __raw_readl(MMUCR);
65 urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT;
66 status &= ~MMUCR_URB;
67
68 /*
69 * Make sure we're not trying to unwire a TLB entry when none
70 * have been wired.
71 */
72 BUG_ON(urb++ == MMUCR_URB_NENTRIES);
73
74 urb = urb % MMUCR_URB_NENTRIES;
75
76 status |= (urb << MMUCR_URB_SHIFT);
77 __raw_writel(status, MMUCR);
78 ctrl_barrier();
79
80 local_irq_restore(flags);
81}