diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-01-18 06:42:39 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-01-18 06:42:39 -0500 |
commit | 8faba6121566248330e738d25a2c43d7500fb9f0 (patch) | |
tree | 9cb09b2ec00b504dd24e1272126a22cd365e7282 /arch/sh/mm | |
parent | 4291b730cd0f0cf98a90d946b6cabbd804397350 (diff) | |
parent | 78bf04fc96f509474c6b443b515d6b79bb7bf584 (diff) |
Merge branch 'sh/ioremap-fixed'
Diffstat (limited to 'arch/sh/mm')
-rw-r--r-- | arch/sh/mm/Kconfig | 4 | ||||
-rw-r--r-- | arch/sh/mm/Makefile | 1 | ||||
-rw-r--r-- | arch/sh/mm/init.c | 44 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_32.c | 28 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_64.c | 287 | ||||
-rw-r--r-- | arch/sh/mm/ioremap_fixed.c | 144 | ||||
-rw-r--r-- | arch/sh/mm/tlb-pteaex.c | 66 | ||||
-rw-r--r-- | arch/sh/mm/tlb-sh4.c | 66 | ||||
-rw-r--r-- | arch/sh/mm/tlbflush_64.c | 2 |
9 files changed, 350 insertions, 292 deletions
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig index 7a4ebc8cbadd..b89075256b70 100644 --- a/arch/sh/mm/Kconfig +++ b/arch/sh/mm/Kconfig | |||
@@ -169,6 +169,10 @@ config ARCH_MEMORY_PROBE | |||
169 | def_bool y | 169 | def_bool y |
170 | depends on MEMORY_HOTPLUG | 170 | depends on MEMORY_HOTPLUG |
171 | 171 | ||
172 | config IOREMAP_FIXED | ||
173 | def_bool y | ||
174 | depends on X2TLB || SUPERH64 | ||
175 | |||
172 | choice | 176 | choice |
173 | prompt "Kernel page size" | 177 | prompt "Kernel page size" |
174 | default PAGE_SIZE_4KB | 178 | default PAGE_SIZE_4KB |
diff --git a/arch/sh/mm/Makefile b/arch/sh/mm/Makefile index edde8bdd681d..89ba56c20ade 100644 --- a/arch/sh/mm/Makefile +++ b/arch/sh/mm/Makefile | |||
@@ -35,6 +35,7 @@ endif | |||
35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 35 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
36 | obj-$(CONFIG_PMB) += pmb.o | 36 | obj-$(CONFIG_PMB) += pmb.o |
37 | obj-$(CONFIG_NUMA) += numa.o | 37 | obj-$(CONFIG_NUMA) += numa.o |
38 | obj-$(CONFIG_IOREMAP_FIXED) += ioremap_fixed.o | ||
38 | 39 | ||
39 | # Special flags for fault_64.o. This puts restrictions on the number of | 40 | # Special flags for fault_64.o. This puts restrictions on the number of |
40 | # caller-save registers that the compiler can target when building this file. | 41 | # caller-save registers that the compiler can target when building this file. |
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c index d5fb014279ad..30a9b530d456 100644 --- a/arch/sh/mm/init.c +++ b/arch/sh/mm/init.c | |||
@@ -39,7 +39,7 @@ unsigned long cached_to_uncached = P2SEG - P1SEG; | |||
39 | #endif | 39 | #endif |
40 | 40 | ||
41 | #ifdef CONFIG_MMU | 41 | #ifdef CONFIG_MMU |
42 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | 42 | static pte_t *__get_pte_phys(unsigned long addr) |
43 | { | 43 | { |
44 | pgd_t *pgd; | 44 | pgd_t *pgd; |
45 | pud_t *pud; | 45 | pud_t *pud; |
@@ -49,22 +49,30 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
49 | pgd = pgd_offset_k(addr); | 49 | pgd = pgd_offset_k(addr); |
50 | if (pgd_none(*pgd)) { | 50 | if (pgd_none(*pgd)) { |
51 | pgd_ERROR(*pgd); | 51 | pgd_ERROR(*pgd); |
52 | return; | 52 | return NULL; |
53 | } | 53 | } |
54 | 54 | ||
55 | pud = pud_alloc(NULL, pgd, addr); | 55 | pud = pud_alloc(NULL, pgd, addr); |
56 | if (unlikely(!pud)) { | 56 | if (unlikely(!pud)) { |
57 | pud_ERROR(*pud); | 57 | pud_ERROR(*pud); |
58 | return; | 58 | return NULL; |
59 | } | 59 | } |
60 | 60 | ||
61 | pmd = pmd_alloc(NULL, pud, addr); | 61 | pmd = pmd_alloc(NULL, pud, addr); |
62 | if (unlikely(!pmd)) { | 62 | if (unlikely(!pmd)) { |
63 | pmd_ERROR(*pmd); | 63 | pmd_ERROR(*pmd); |
64 | return; | 64 | return NULL; |
65 | } | 65 | } |
66 | 66 | ||
67 | pte = pte_offset_kernel(pmd, addr); | 67 | pte = pte_offset_kernel(pmd, addr); |
68 | return pte; | ||
69 | } | ||
70 | |||
71 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | ||
72 | { | ||
73 | pte_t *pte; | ||
74 | |||
75 | pte = __get_pte_phys(addr); | ||
68 | if (!pte_none(*pte)) { | 76 | if (!pte_none(*pte)) { |
69 | pte_ERROR(*pte); | 77 | pte_ERROR(*pte); |
70 | return; | 78 | return; |
@@ -72,6 +80,22 @@ static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |||
72 | 80 | ||
73 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | 81 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); |
74 | local_flush_tlb_one(get_asid(), addr); | 82 | local_flush_tlb_one(get_asid(), addr); |
83 | |||
84 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
85 | tlb_wire_entry(NULL, addr, *pte); | ||
86 | } | ||
87 | |||
88 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | ||
89 | { | ||
90 | pte_t *pte; | ||
91 | |||
92 | pte = __get_pte_phys(addr); | ||
93 | |||
94 | if (pgprot_val(prot) & _PAGE_WIRED) | ||
95 | tlb_unwire_entry(); | ||
96 | |||
97 | set_pte(pte, pfn_pte(0, __pgprot(0))); | ||
98 | local_flush_tlb_one(get_asid(), addr); | ||
75 | } | 99 | } |
76 | 100 | ||
77 | /* | 101 | /* |
@@ -101,6 +125,18 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) | |||
101 | set_pte_phys(address, phys, prot); | 125 | set_pte_phys(address, phys, prot); |
102 | } | 126 | } |
103 | 127 | ||
128 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) | ||
129 | { | ||
130 | unsigned long address = __fix_to_virt(idx); | ||
131 | |||
132 | if (idx >= __end_of_fixed_addresses) { | ||
133 | BUG(); | ||
134 | return; | ||
135 | } | ||
136 | |||
137 | clear_pte_phys(address, prot); | ||
138 | } | ||
139 | |||
104 | void __init page_table_range_init(unsigned long start, unsigned long end, | 140 | void __init page_table_range_init(unsigned long start, unsigned long end, |
105 | pgd_t *pgd_base) | 141 | pgd_t *pgd_base) |
106 | { | 142 | { |
diff --git a/arch/sh/mm/ioremap_32.c b/arch/sh/mm/ioremap_32.c index 2141befb4f91..c80a8166fbb0 100644 --- a/arch/sh/mm/ioremap_32.c +++ b/arch/sh/mm/ioremap_32.c | |||
@@ -105,15 +105,35 @@ void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, | |||
105 | } | 105 | } |
106 | EXPORT_SYMBOL(__ioremap_caller); | 106 | EXPORT_SYMBOL(__ioremap_caller); |
107 | 107 | ||
108 | /* | ||
109 | * Simple checks for non-translatable mappings. | ||
110 | */ | ||
111 | static inline int iomapping_nontranslatable(unsigned long offset) | ||
112 | { | ||
113 | #ifdef CONFIG_29BIT | ||
114 | /* | ||
115 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as | ||
116 | * parts of P3. | ||
117 | */ | ||
118 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) | ||
119 | return 1; | ||
120 | #endif | ||
121 | |||
122 | if (is_pci_memory_fixed_range(offset, 0)) | ||
123 | return 1; | ||
124 | |||
125 | return 0; | ||
126 | } | ||
127 | |||
108 | void __iounmap(void __iomem *addr) | 128 | void __iounmap(void __iomem *addr) |
109 | { | 129 | { |
110 | unsigned long vaddr = (unsigned long __force)addr; | 130 | unsigned long vaddr = (unsigned long __force)addr; |
111 | unsigned long seg = PXSEG(vaddr); | ||
112 | struct vm_struct *p; | 131 | struct vm_struct *p; |
113 | 132 | ||
114 | if (seg < P3SEG || vaddr >= P3_ADDR_MAX) | 133 | /* |
115 | return; | 134 | * Nothing to do if there is no translatable mapping. |
116 | if (is_pci_memory_fixed_range(vaddr, 0)) | 135 | */ |
136 | if (iomapping_nontranslatable(vaddr)) | ||
117 | return; | 137 | return; |
118 | 138 | ||
119 | #ifdef CONFIG_PMB | 139 | #ifdef CONFIG_PMB |
diff --git a/arch/sh/mm/ioremap_64.c b/arch/sh/mm/ioremap_64.c index ef434657d428..fb0aa457c71e 100644 --- a/arch/sh/mm/ioremap_64.c +++ b/arch/sh/mm/ioremap_64.c | |||
@@ -28,299 +28,20 @@ | |||
28 | #include <asm/tlbflush.h> | 28 | #include <asm/tlbflush.h> |
29 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
30 | 30 | ||
31 | static struct resource shmedia_iomap = { | 31 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, |
32 | .name = "shmedia_iomap", | 32 | unsigned long flags, void *caller) |
33 | .start = IOBASE_VADDR + PAGE_SIZE, | ||
34 | .end = IOBASE_END - 1, | ||
35 | }; | ||
36 | |||
37 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
38 | unsigned long flags); | ||
39 | static void shmedia_unmapioaddr(unsigned long vaddr); | ||
40 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, | ||
41 | int sz, unsigned long flags); | ||
42 | |||
43 | /* | ||
44 | * We have the same problem as the SPARC, so lets have the same comment: | ||
45 | * Our mini-allocator... | ||
46 | * Boy this is gross! We need it because we must map I/O for | ||
47 | * timers and interrupt controller before the kmalloc is available. | ||
48 | */ | ||
49 | |||
50 | #define XNMLN 15 | ||
51 | #define XNRES 10 | ||
52 | |||
53 | struct xresource { | ||
54 | struct resource xres; /* Must be first */ | ||
55 | int xflag; /* 1 == used */ | ||
56 | char xname[XNMLN+1]; | ||
57 | }; | ||
58 | |||
59 | static struct xresource xresv[XNRES]; | ||
60 | |||
61 | static struct xresource *xres_alloc(void) | ||
62 | { | ||
63 | struct xresource *xrp; | ||
64 | int n; | ||
65 | |||
66 | xrp = xresv; | ||
67 | for (n = 0; n < XNRES; n++) { | ||
68 | if (xrp->xflag == 0) { | ||
69 | xrp->xflag = 1; | ||
70 | return xrp; | ||
71 | } | ||
72 | xrp++; | ||
73 | } | ||
74 | return NULL; | ||
75 | } | ||
76 | |||
77 | static void xres_free(struct xresource *xrp) | ||
78 | { | ||
79 | xrp->xflag = 0; | ||
80 | } | ||
81 | |||
82 | static struct resource *shmedia_find_resource(struct resource *root, | ||
83 | unsigned long vaddr) | ||
84 | { | ||
85 | struct resource *res; | ||
86 | |||
87 | for (res = root->child; res; res = res->sibling) | ||
88 | if (res->start <= vaddr && res->end >= vaddr) | ||
89 | return res; | ||
90 | |||
91 | return NULL; | ||
92 | } | ||
93 | |||
94 | static void __iomem *shmedia_alloc_io(unsigned long phys, unsigned long size, | ||
95 | const char *name, unsigned long flags) | ||
96 | { | ||
97 | struct xresource *xres; | ||
98 | struct resource *res; | ||
99 | char *tack; | ||
100 | int tlen; | ||
101 | |||
102 | if (name == NULL) | ||
103 | name = "???"; | ||
104 | |||
105 | xres = xres_alloc(); | ||
106 | if (xres != 0) { | ||
107 | tack = xres->xname; | ||
108 | res = &xres->xres; | ||
109 | } else { | ||
110 | printk_once(KERN_NOTICE "%s: done with statics, " | ||
111 | "switching to kmalloc\n", __func__); | ||
112 | tlen = strlen(name); | ||
113 | tack = kmalloc(sizeof(struct resource) + tlen + 1, GFP_KERNEL); | ||
114 | if (!tack) | ||
115 | return NULL; | ||
116 | memset(tack, 0, sizeof(struct resource)); | ||
117 | res = (struct resource *) tack; | ||
118 | tack += sizeof(struct resource); | ||
119 | } | ||
120 | |||
121 | strncpy(tack, name, XNMLN); | ||
122 | tack[XNMLN] = 0; | ||
123 | res->name = tack; | ||
124 | |||
125 | return shmedia_ioremap(res, phys, size, flags); | ||
126 | } | ||
127 | |||
128 | static void __iomem *shmedia_ioremap(struct resource *res, u32 pa, int sz, | ||
129 | unsigned long flags) | ||
130 | { | ||
131 | unsigned long offset = ((unsigned long) pa) & (~PAGE_MASK); | ||
132 | unsigned long round_sz = (offset + sz + PAGE_SIZE-1) & PAGE_MASK; | ||
133 | unsigned long va; | ||
134 | unsigned int psz; | ||
135 | |||
136 | if (allocate_resource(&shmedia_iomap, res, round_sz, | ||
137 | shmedia_iomap.start, shmedia_iomap.end, | ||
138 | PAGE_SIZE, NULL, NULL) != 0) { | ||
139 | panic("alloc_io_res(%s): cannot occupy\n", | ||
140 | (res->name != NULL) ? res->name : "???"); | ||
141 | } | ||
142 | |||
143 | va = res->start; | ||
144 | pa &= PAGE_MASK; | ||
145 | |||
146 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
147 | |||
148 | for (psz = res->end - res->start + 1; psz != 0; psz -= PAGE_SIZE) { | ||
149 | shmedia_mapioaddr(pa, va, flags); | ||
150 | va += PAGE_SIZE; | ||
151 | pa += PAGE_SIZE; | ||
152 | } | ||
153 | |||
154 | return (void __iomem *)(unsigned long)(res->start + offset); | ||
155 | } | ||
156 | |||
157 | static void shmedia_free_io(struct resource *res) | ||
158 | { | ||
159 | unsigned long len = res->end - res->start + 1; | ||
160 | |||
161 | BUG_ON((len & (PAGE_SIZE - 1)) != 0); | ||
162 | |||
163 | while (len) { | ||
164 | len -= PAGE_SIZE; | ||
165 | shmedia_unmapioaddr(res->start + len); | ||
166 | } | ||
167 | |||
168 | release_resource(res); | ||
169 | } | ||
170 | |||
171 | static __init_refok void *sh64_get_page(void) | ||
172 | { | ||
173 | void *page; | ||
174 | |||
175 | if (slab_is_available()) | ||
176 | page = (void *)get_zeroed_page(GFP_KERNEL); | ||
177 | else | ||
178 | page = alloc_bootmem_pages(PAGE_SIZE); | ||
179 | |||
180 | if (!page || ((unsigned long)page & ~PAGE_MASK)) | ||
181 | panic("sh64_get_page: Out of memory already?\n"); | ||
182 | |||
183 | return page; | ||
184 | } | ||
185 | |||
186 | static void shmedia_mapioaddr(unsigned long pa, unsigned long va, | ||
187 | unsigned long flags) | ||
188 | { | 33 | { |
189 | pgd_t *pgdp; | ||
190 | pud_t *pudp; | ||
191 | pmd_t *pmdp; | ||
192 | pte_t *ptep, pte; | ||
193 | pgprot_t prot; | 34 | pgprot_t prot; |
194 | 35 | ||
195 | pr_debug("shmedia_mapiopage pa %08lx va %08lx\n", pa, va); | ||
196 | |||
197 | if (!flags) | ||
198 | flags = 1; /* 1 = CB0-1 device */ | ||
199 | |||
200 | pgdp = pgd_offset_k(va); | ||
201 | if (pgd_none(*pgdp) || !pgd_present(*pgdp)) { | ||
202 | pudp = (pud_t *)sh64_get_page(); | ||
203 | set_pgd(pgdp, __pgd((unsigned long)pudp | _KERNPG_TABLE)); | ||
204 | } | ||
205 | |||
206 | pudp = pud_offset(pgdp, va); | ||
207 | if (pud_none(*pudp) || !pud_present(*pudp)) { | ||
208 | pmdp = (pmd_t *)sh64_get_page(); | ||
209 | set_pud(pudp, __pud((unsigned long)pmdp | _KERNPG_TABLE)); | ||
210 | } | ||
211 | |||
212 | pmdp = pmd_offset(pudp, va); | ||
213 | if (pmd_none(*pmdp) || !pmd_present(*pmdp)) { | ||
214 | ptep = (pte_t *)sh64_get_page(); | ||
215 | set_pmd(pmdp, __pmd((unsigned long)ptep + _PAGE_TABLE)); | ||
216 | } | ||
217 | |||
218 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | | 36 | prot = __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | |
219 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); | 37 | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SHARED | flags); |
220 | 38 | ||
221 | pte = pfn_pte(pa >> PAGE_SHIFT, prot); | 39 | return ioremap_fixed(offset, size, prot); |
222 | ptep = pte_offset_kernel(pmdp, va); | ||
223 | |||
224 | if (!pte_none(*ptep) && | ||
225 | pte_val(*ptep) != pte_val(pte)) | ||
226 | pte_ERROR(*ptep); | ||
227 | |||
228 | set_pte(ptep, pte); | ||
229 | |||
230 | flush_tlb_kernel_range(va, PAGE_SIZE); | ||
231 | } | ||
232 | |||
233 | static void shmedia_unmapioaddr(unsigned long vaddr) | ||
234 | { | ||
235 | pgd_t *pgdp; | ||
236 | pud_t *pudp; | ||
237 | pmd_t *pmdp; | ||
238 | pte_t *ptep; | ||
239 | |||
240 | pgdp = pgd_offset_k(vaddr); | ||
241 | if (pgd_none(*pgdp) || pgd_bad(*pgdp)) | ||
242 | return; | ||
243 | |||
244 | pudp = pud_offset(pgdp, vaddr); | ||
245 | if (pud_none(*pudp) || pud_bad(*pudp)) | ||
246 | return; | ||
247 | |||
248 | pmdp = pmd_offset(pudp, vaddr); | ||
249 | if (pmd_none(*pmdp) || pmd_bad(*pmdp)) | ||
250 | return; | ||
251 | |||
252 | ptep = pte_offset_kernel(pmdp, vaddr); | ||
253 | |||
254 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
255 | return; | ||
256 | |||
257 | clear_page((void *)ptep); | ||
258 | pte_clear(&init_mm, vaddr, ptep); | ||
259 | } | ||
260 | |||
261 | void __iomem *__ioremap_caller(unsigned long offset, unsigned long size, | ||
262 | unsigned long flags, void *caller) | ||
263 | { | ||
264 | char name[14]; | ||
265 | |||
266 | sprintf(name, "phys_%08x", (u32)offset); | ||
267 | return shmedia_alloc_io(offset, size, name, flags); | ||
268 | } | 40 | } |
269 | EXPORT_SYMBOL(__ioremap_caller); | 41 | EXPORT_SYMBOL(__ioremap_caller); |
270 | 42 | ||
271 | void __iounmap(void __iomem *virtual) | 43 | void __iounmap(void __iomem *virtual) |
272 | { | 44 | { |
273 | unsigned long vaddr = (unsigned long)virtual & PAGE_MASK; | 45 | iounmap_fixed(virtual); |
274 | struct resource *res; | ||
275 | unsigned int psz; | ||
276 | |||
277 | res = shmedia_find_resource(&shmedia_iomap, vaddr); | ||
278 | if (!res) { | ||
279 | printk(KERN_ERR "%s: Failed to free 0x%08lx\n", | ||
280 | __func__, vaddr); | ||
281 | return; | ||
282 | } | ||
283 | |||
284 | psz = (res->end - res->start + (PAGE_SIZE - 1)) / PAGE_SIZE; | ||
285 | |||
286 | shmedia_free_io(res); | ||
287 | |||
288 | if ((char *)res >= (char *)xresv && | ||
289 | (char *)res < (char *)&xresv[XNRES]) { | ||
290 | xres_free((struct xresource *)res); | ||
291 | } else { | ||
292 | kfree(res); | ||
293 | } | ||
294 | } | 46 | } |
295 | EXPORT_SYMBOL(__iounmap); | 47 | EXPORT_SYMBOL(__iounmap); |
296 | |||
297 | static int | ||
298 | ioremap_proc_info(char *buf, char **start, off_t fpos, int length, int *eof, | ||
299 | void *data) | ||
300 | { | ||
301 | char *p = buf, *e = buf + length; | ||
302 | struct resource *r; | ||
303 | const char *nm; | ||
304 | |||
305 | for (r = ((struct resource *)data)->child; r != NULL; r = r->sibling) { | ||
306 | if (p + 32 >= e) /* Better than nothing */ | ||
307 | break; | ||
308 | nm = r->name; | ||
309 | if (nm == NULL) | ||
310 | nm = "???"; | ||
311 | |||
312 | p += sprintf(p, "%08lx-%08lx: %s\n", | ||
313 | (unsigned long)r->start, | ||
314 | (unsigned long)r->end, nm); | ||
315 | } | ||
316 | |||
317 | return p-buf; | ||
318 | } | ||
319 | |||
320 | static int __init register_proc_onchip(void) | ||
321 | { | ||
322 | create_proc_read_entry("io_map", 0, 0, ioremap_proc_info, | ||
323 | &shmedia_iomap); | ||
324 | return 0; | ||
325 | } | ||
326 | late_initcall(register_proc_onchip); | ||
diff --git a/arch/sh/mm/ioremap_fixed.c b/arch/sh/mm/ioremap_fixed.c new file mode 100644 index 000000000000..3a9d3d88fe8d --- /dev/null +++ b/arch/sh/mm/ioremap_fixed.c | |||
@@ -0,0 +1,144 @@ | |||
1 | /* | ||
2 | * Re-map IO memory to kernel address space so that we can access it. | ||
3 | * | ||
4 | * These functions should only be used when it is necessary to map a | ||
5 | * physical address space into the kernel address space before ioremap() | ||
6 | * can be used, e.g. early in boot before paging_init(). | ||
7 | * | ||
8 | * Copyright (C) 2009 Matt Fleming | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/ioport.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/io.h> | ||
16 | #include <linux/bootmem.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <asm/fixmap.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/pgalloc.h> | ||
22 | #include <asm/addrspace.h> | ||
23 | #include <asm/cacheflush.h> | ||
24 | #include <asm/tlbflush.h> | ||
25 | #include <asm/mmu.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | |||
28 | struct ioremap_map { | ||
29 | void __iomem *addr; | ||
30 | unsigned long size; | ||
31 | unsigned long fixmap_addr; | ||
32 | }; | ||
33 | |||
34 | static struct ioremap_map ioremap_maps[FIX_N_IOREMAPS]; | ||
35 | |||
36 | void __init ioremap_fixed_init(void) | ||
37 | { | ||
38 | struct ioremap_map *map; | ||
39 | int i; | ||
40 | |||
41 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
42 | map = &ioremap_maps[i]; | ||
43 | map->fixmap_addr = __fix_to_virt(FIX_IOREMAP_BEGIN + i); | ||
44 | } | ||
45 | } | ||
46 | |||
47 | void __init __iomem * | ||
48 | ioremap_fixed(resource_size_t phys_addr, unsigned long size, pgprot_t prot) | ||
49 | { | ||
50 | enum fixed_addresses idx0, idx; | ||
51 | resource_size_t last_addr; | ||
52 | struct ioremap_map *map; | ||
53 | unsigned long offset; | ||
54 | unsigned int nrpages; | ||
55 | int i, slot; | ||
56 | |||
57 | slot = -1; | ||
58 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
59 | map = &ioremap_maps[i]; | ||
60 | if (!map->addr) { | ||
61 | map->size = size; | ||
62 | slot = i; | ||
63 | break; | ||
64 | } | ||
65 | } | ||
66 | |||
67 | if (slot < 0) | ||
68 | return NULL; | ||
69 | |||
70 | /* Don't allow wraparound or zero size */ | ||
71 | last_addr = phys_addr + size - 1; | ||
72 | if (!size || last_addr < phys_addr) | ||
73 | return NULL; | ||
74 | |||
75 | /* | ||
76 | * Fixmap mappings have to be page-aligned | ||
77 | */ | ||
78 | offset = phys_addr & ~PAGE_MASK; | ||
79 | phys_addr &= PAGE_MASK; | ||
80 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; | ||
81 | |||
82 | /* | ||
83 | * Mappings have to fit in the FIX_IOREMAP area. | ||
84 | */ | ||
85 | nrpages = size >> PAGE_SHIFT; | ||
86 | if (nrpages > FIX_N_IOREMAPS) | ||
87 | return NULL; | ||
88 | |||
89 | /* | ||
90 | * Ok, go for it.. | ||
91 | */ | ||
92 | idx0 = FIX_IOREMAP_BEGIN + slot; | ||
93 | idx = idx0; | ||
94 | while (nrpages > 0) { | ||
95 | pgprot_val(prot) |= _PAGE_WIRED; | ||
96 | __set_fixmap(idx, phys_addr, prot); | ||
97 | phys_addr += PAGE_SIZE; | ||
98 | idx++; | ||
99 | --nrpages; | ||
100 | } | ||
101 | |||
102 | map->addr = (void __iomem *)(offset + map->fixmap_addr); | ||
103 | return map->addr; | ||
104 | } | ||
105 | |||
106 | void __init iounmap_fixed(void __iomem *addr) | ||
107 | { | ||
108 | enum fixed_addresses idx; | ||
109 | unsigned long virt_addr; | ||
110 | struct ioremap_map *map; | ||
111 | unsigned long offset; | ||
112 | unsigned int nrpages; | ||
113 | int i, slot; | ||
114 | pgprot_t prot; | ||
115 | |||
116 | slot = -1; | ||
117 | for (i = 0; i < FIX_N_IOREMAPS; i++) { | ||
118 | map = &ioremap_maps[i]; | ||
119 | if (map->addr == addr) { | ||
120 | slot = i; | ||
121 | break; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | if (slot < 0) | ||
126 | return; | ||
127 | |||
128 | virt_addr = (unsigned long)addr; | ||
129 | |||
130 | offset = virt_addr & ~PAGE_MASK; | ||
131 | nrpages = PAGE_ALIGN(offset + map->size - 1) >> PAGE_SHIFT; | ||
132 | |||
133 | pgprot_val(prot) = _PAGE_WIRED; | ||
134 | |||
135 | idx = FIX_IOREMAP_BEGIN + slot + nrpages; | ||
136 | while (nrpages > 0) { | ||
137 | __clear_fixmap(idx, prot); | ||
138 | --idx; | ||
139 | --nrpages; | ||
140 | } | ||
141 | |||
142 | map->size = 0; | ||
143 | map->addr = NULL; | ||
144 | } | ||
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c index 409b7c2b4b9d..88c8bb05e16d 100644 --- a/arch/sh/mm/tlb-pteaex.c +++ b/arch/sh/mm/tlb-pteaex.c | |||
@@ -76,3 +76,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
76 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); | 76 | __raw_writel(asid, MMU_UTLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT); |
77 | back_to_cached(); | 77 | back_to_cached(); |
78 | } | 78 | } |
79 | |||
80 | /* | ||
81 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
82 | */ | ||
83 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
84 | { | ||
85 | unsigned long status, flags; | ||
86 | int urb; | ||
87 | |||
88 | local_irq_save(flags); | ||
89 | |||
90 | /* Load the entry into the TLB */ | ||
91 | __update_tlb(vma, addr, pte); | ||
92 | |||
93 | /* ... and wire it up. */ | ||
94 | status = ctrl_inl(MMUCR); | ||
95 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
96 | status &= ~MMUCR_URB; | ||
97 | |||
98 | /* | ||
99 | * Make sure we're not trying to wire the last TLB entry slot. | ||
100 | */ | ||
101 | BUG_ON(!--urb); | ||
102 | |||
103 | urb = urb % MMUCR_URB_NENTRIES; | ||
104 | |||
105 | status |= (urb << MMUCR_URB_SHIFT); | ||
106 | ctrl_outl(status, MMUCR); | ||
107 | ctrl_barrier(); | ||
108 | |||
109 | local_irq_restore(flags); | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Unwire the last wired TLB entry. | ||
114 | * | ||
115 | * It should also be noted that it is not possible to wire and unwire | ||
116 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
117 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
118 | * respect, it works like a stack or LIFO queue. | ||
119 | */ | ||
120 | void tlb_unwire_entry(void) | ||
121 | { | ||
122 | unsigned long status, flags; | ||
123 | int urb; | ||
124 | |||
125 | local_irq_save(flags); | ||
126 | |||
127 | status = ctrl_inl(MMUCR); | ||
128 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
129 | status &= ~MMUCR_URB; | ||
130 | |||
131 | /* | ||
132 | * Make sure we're not trying to unwire a TLB entry when none | ||
133 | * have been wired. | ||
134 | */ | ||
135 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
136 | |||
137 | urb = urb % MMUCR_URB_NENTRIES; | ||
138 | |||
139 | status |= (urb << MMUCR_URB_SHIFT); | ||
140 | ctrl_outl(status, MMUCR); | ||
141 | ctrl_barrier(); | ||
142 | |||
143 | local_irq_restore(flags); | ||
144 | } | ||
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c index 8cf550e2570f..4c6234743318 100644 --- a/arch/sh/mm/tlb-sh4.c +++ b/arch/sh/mm/tlb-sh4.c | |||
@@ -81,3 +81,69 @@ void __uses_jump_to_uncached local_flush_tlb_one(unsigned long asid, | |||
81 | ctrl_outl(data, addr); | 81 | ctrl_outl(data, addr); |
82 | back_to_cached(); | 82 | back_to_cached(); |
83 | } | 83 | } |
84 | |||
85 | /* | ||
86 | * Load the entry for 'addr' into the TLB and wire the entry. | ||
87 | */ | ||
88 | void tlb_wire_entry(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | ||
89 | { | ||
90 | unsigned long status, flags; | ||
91 | int urb; | ||
92 | |||
93 | local_irq_save(flags); | ||
94 | |||
95 | /* Load the entry into the TLB */ | ||
96 | __update_tlb(vma, addr, pte); | ||
97 | |||
98 | /* ... and wire it up. */ | ||
99 | status = ctrl_inl(MMUCR); | ||
100 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
101 | status &= ~MMUCR_URB; | ||
102 | |||
103 | /* | ||
104 | * Make sure we're not trying to wire the last TLB entry slot. | ||
105 | */ | ||
106 | BUG_ON(!--urb); | ||
107 | |||
108 | urb = urb % MMUCR_URB_NENTRIES; | ||
109 | |||
110 | status |= (urb << MMUCR_URB_SHIFT); | ||
111 | ctrl_outl(status, MMUCR); | ||
112 | ctrl_barrier(); | ||
113 | |||
114 | local_irq_restore(flags); | ||
115 | } | ||
116 | |||
117 | /* | ||
118 | * Unwire the last wired TLB entry. | ||
119 | * | ||
120 | * It should also be noted that it is not possible to wire and unwire | ||
121 | * TLB entries in an arbitrary order. If you wire TLB entry N, followed | ||
122 | * by entry N+1, you must unwire entry N+1 first, then entry N. In this | ||
123 | * respect, it works like a stack or LIFO queue. | ||
124 | */ | ||
125 | void tlb_unwire_entry(void) | ||
126 | { | ||
127 | unsigned long status, flags; | ||
128 | int urb; | ||
129 | |||
130 | local_irq_save(flags); | ||
131 | |||
132 | status = ctrl_inl(MMUCR); | ||
133 | urb = (status & MMUCR_URB) >> MMUCR_URB_SHIFT; | ||
134 | status &= ~MMUCR_URB; | ||
135 | |||
136 | /* | ||
137 | * Make sure we're not trying to unwire a TLB entry when none | ||
138 | * have been wired. | ||
139 | */ | ||
140 | BUG_ON(urb++ == MMUCR_URB_NENTRIES); | ||
141 | |||
142 | urb = urb % MMUCR_URB_NENTRIES; | ||
143 | |||
144 | status |= (urb << MMUCR_URB_SHIFT); | ||
145 | ctrl_outl(status, MMUCR); | ||
146 | ctrl_barrier(); | ||
147 | |||
148 | local_irq_restore(flags); | ||
149 | } | ||
diff --git a/arch/sh/mm/tlbflush_64.c b/arch/sh/mm/tlbflush_64.c index de0b0e881823..706da1d3a67a 100644 --- a/arch/sh/mm/tlbflush_64.c +++ b/arch/sh/mm/tlbflush_64.c | |||
@@ -36,7 +36,7 @@ extern void die(const char *,struct pt_regs *,long); | |||
36 | 36 | ||
37 | static inline void print_prots(pgprot_t prot) | 37 | static inline void print_prots(pgprot_t prot) |
38 | { | 38 | { |
39 | printk("prot is 0x%08lx\n",pgprot_val(prot)); | 39 | printk("prot is 0x%016llx\n",pgprot_val(prot)); |
40 | 40 | ||
41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), | 41 | printk("%s %s %s %s %s\n",PPROT(_PAGE_SHARED),PPROT(_PAGE_READ), |
42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); | 42 | PPROT(_PAGE_EXECUTE),PPROT(_PAGE_WRITE),PPROT(_PAGE_USER)); |