aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mn10300/mm
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2008-02-08 07:19:31 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2008-02-08 12:22:30 -0500
commitb920de1b77b72ca9432ac3f97edb26541e65e5dd (patch)
tree40fa9be1470e929c47927dea7eddf184c0204229 /arch/mn10300/mm
parentef3d534754f31fed9c3b976fee1ece1b3bc38282 (diff)
mn10300: add the MN10300/AM33 architecture to the kernel
Add architecture support for the MN10300/AM33 CPUs produced by MEI to the kernel. This patch also adds board support for the ASB2303 with the ASB2308 daughter board, and the ASB2305. The only processor supported is the MN103E010, which is an AM33v2 core plus on-chip devices. [akpm@linux-foundation.org: nuke cvs control strings] Signed-off-by: Masakazu Urade <urade.masakazu@jp.panasonic.com> Signed-off-by: Koichi Yasutake <yasutake.koichi@jp.panasonic.com> Signed-off-by: David Howells <dhowells@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/mn10300/mm')
-rw-r--r--arch/mn10300/mm/Makefile14
-rw-r--r--arch/mn10300/mm/cache-flush-mn10300.S192
-rw-r--r--arch/mn10300/mm/cache-mn10300.S289
-rw-r--r--arch/mn10300/mm/cache.c121
-rw-r--r--arch/mn10300/mm/dma-alloc.c56
-rw-r--r--arch/mn10300/mm/extable.c26
-rw-r--r--arch/mn10300/mm/fault.c405
-rw-r--r--arch/mn10300/mm/init.c160
-rw-r--r--arch/mn10300/mm/misalignment.c661
-rw-r--r--arch/mn10300/mm/mmu-context.c80
-rw-r--r--arch/mn10300/mm/pgtable.c197
-rw-r--r--arch/mn10300/mm/tlb-mn10300.S207
12 files changed, 2408 insertions, 0 deletions
diff --git a/arch/mn10300/mm/Makefile b/arch/mn10300/mm/Makefile
new file mode 100644
index 000000000000..28b9d983db0c
--- /dev/null
+++ b/arch/mn10300/mm/Makefile
@@ -0,0 +1,14 @@
1#
2# Makefile for the MN10300-specific memory management code
3#
4
5obj-y := \
6 init.o fault.o pgtable.o extable.o tlb-mn10300.o mmu-context.o \
7 misalignment.o dma-alloc.o
8
9ifneq ($(CONFIG_MN10300_CACHE_DISABLED),y)
10obj-y += cache.o cache-mn10300.o
11ifeq ($(CONFIG_MN10300_CACHE_WBACK),y)
12obj-y += cache-flush-mn10300.o
13endif
14endif
diff --git a/arch/mn10300/mm/cache-flush-mn10300.S b/arch/mn10300/mm/cache-flush-mn10300.S
new file mode 100644
index 000000000000..c8ed1cbac107
--- /dev/null
+++ b/arch/mn10300/mm/cache-flush-mn10300.S
@@ -0,0 +1,192 @@
1/* MN10300 CPU core caching routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
13#include <linux/linkage.h>
14#include <asm/smp.h>
15#include <asm/page.h>
16#include <asm/cache.h>
17
18 .am33_2
19 .globl mn10300_dcache_flush
20 .globl mn10300_dcache_flush_page
21 .globl mn10300_dcache_flush_range
22 .globl mn10300_dcache_flush_range2
23 .globl mn10300_dcache_flush_inv
24 .globl mn10300_dcache_flush_inv_page
25 .globl mn10300_dcache_flush_inv_range
26 .globl mn10300_dcache_flush_inv_range2
27
28###############################################################################
29#
30# void mn10300_dcache_flush(void)
31# Flush the entire data cache back to RAM
32#
33###############################################################################
34 ALIGN
35mn10300_dcache_flush:
36 movhu (CHCTR),d0
37 btst CHCTR_DCEN,d0
38 beq mn10300_dcache_flush_end
39
40 # read the addresses tagged in the cache's tag RAM and attempt to flush
41 # those addresses specifically
42 # - we rely on the hardware to filter out invalid tag entry addresses
43 mov DCACHE_TAG(0,0),a0 # dcache tag RAM access address
44 mov DCACHE_PURGE(0,0),a1 # dcache purge request address
45 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
46
47mn10300_dcache_flush_loop:
48 mov (a0),d0
49 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
50 or L1_CACHE_TAG_VALID,d0 # retain valid entries in the
51 # cache
52 mov d0,(a1) # conditional purge
53
54mn10300_dcache_flush_skip:
55 add L1_CACHE_BYTES,a0
56 add L1_CACHE_BYTES,a1
57 add -1,d1
58 bne mn10300_dcache_flush_loop
59
60mn10300_dcache_flush_end:
61 ret [],0
62
63###############################################################################
64#
65# void mn10300_dcache_flush_page(unsigned start)
66# void mn10300_dcache_flush_range(unsigned start, unsigned end)
67# void mn10300_dcache_flush_range2(unsigned start, unsigned size)
68# Flush a range of addresses on a page in the dcache
69#
70###############################################################################
71 ALIGN
72mn10300_dcache_flush_page:
73 mov PAGE_SIZE,d1
74mn10300_dcache_flush_range2:
75 add d0,d1
76mn10300_dcache_flush_range:
77 movm [d2,d3],(sp)
78
79 movhu (CHCTR),d2
80 btst CHCTR_DCEN,d2
81 beq mn10300_dcache_flush_range_end
82
83 # round start addr down
84 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0
85 mov d0,a1
86
87 add L1_CACHE_BYTES,d1 # round end addr up
88 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
89
90 # write a request to flush all instances of an address from the cache
91 mov DCACHE_PURGE(0,0),a0
92 mov a1,d0
93 and L1_CACHE_TAG_ENTRY,d0
94 add d0,a0 # starting dcache purge control
95 # reg address
96
97 sub a1,d1
98 lsr L1_CACHE_SHIFT,d1 # total number of entries to
99 # examine
100
101 or L1_CACHE_TAG_VALID,a1 # retain valid entries in the
102 # cache
103
104mn10300_dcache_flush_range_loop:
105 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
106 # all ways
107
108 add L1_CACHE_BYTES,a0
109 add L1_CACHE_BYTES,a1
110 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
111 add -1,d1
112 bne mn10300_dcache_flush_range_loop
113
114mn10300_dcache_flush_range_end:
115 ret [d2,d3],8
116
117###############################################################################
118#
119# void mn10300_dcache_flush_inv(void)
120# Flush the entire data cache and invalidate all entries
121#
122###############################################################################
123 ALIGN
124mn10300_dcache_flush_inv:
125 movhu (CHCTR),d0
126 btst CHCTR_DCEN,d0
127 beq mn10300_dcache_flush_inv_end
128
129 # hit each line in the dcache with an unconditional purge
130 mov DCACHE_PURGE(0,0),a1 # dcache purge request address
131 mov L1_CACHE_NWAYS*L1_CACHE_NENTRIES,d1 # total number of entries
132
133mn10300_dcache_flush_inv_loop:
134 mov (a1),d0 # unconditional purge
135
136 add L1_CACHE_BYTES,a1
137 add -1,d1
138 bne mn10300_dcache_flush_inv_loop
139
140mn10300_dcache_flush_inv_end:
141 ret [],0
142
143###############################################################################
144#
145# void mn10300_dcache_flush_inv_page(unsigned start)
146# void mn10300_dcache_flush_inv_range(unsigned start, unsigned end)
147# void mn10300_dcache_flush_inv_range2(unsigned start, unsigned size)
148# Flush and invalidate a range of addresses on a page in the dcache
149#
150###############################################################################
151 ALIGN
152mn10300_dcache_flush_inv_page:
153 mov PAGE_SIZE,d1
154mn10300_dcache_flush_inv_range2:
155 add d0,d1
156mn10300_dcache_flush_inv_range:
157 movm [d2,d3],(sp)
158 movhu (CHCTR),d2
159 btst CHCTR_DCEN,d2
160 beq mn10300_dcache_flush_inv_range_end
161
162 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
163 # addr down
164 mov d0,a1
165
166 add L1_CACHE_BYTES,d1 # round end addr up
167 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
168
169 # write a request to flush and invalidate all instances of an address
170 # from the cache
171 mov DCACHE_PURGE(0,0),a0
172 mov a1,d0
173 and L1_CACHE_TAG_ENTRY,d0
174 add d0,a0 # starting dcache purge control
175 # reg address
176
177 sub a1,d1
178 lsr L1_CACHE_SHIFT,d1 # total number of entries to
179 # examine
180
181mn10300_dcache_flush_inv_range_loop:
182 mov a1,(L1_CACHE_WAYDISP*0,a0) # conditionally purge this line
183 # in all ways
184
185 add L1_CACHE_BYTES,a0
186 add L1_CACHE_BYTES,a1
187 and ~L1_CACHE_WAYDISP,a0 # make sure way stay on way 0
188 add -1,d1
189 bne mn10300_dcache_flush_inv_range_loop
190
191mn10300_dcache_flush_inv_range_end:
192 ret [d2,d3],8
diff --git a/arch/mn10300/mm/cache-mn10300.S b/arch/mn10300/mm/cache-mn10300.S
new file mode 100644
index 000000000000..e839d0aedd69
--- /dev/null
+++ b/arch/mn10300/mm/cache-mn10300.S
@@ -0,0 +1,289 @@
1/* MN10300 CPU core caching routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sys.h>
12#include <linux/linkage.h>
13#include <asm/smp.h>
14#include <asm/page.h>
15#include <asm/cache.h>
16
17#define mn10300_dcache_inv_range_intr_interval \
18 +((1 << MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL) - 1)
19
20#if mn10300_dcache_inv_range_intr_interval > 0xff
21#error MN10300_DCACHE_INV_RANGE_INTR_LOG2_INTERVAL must be 8 or less
22#endif
23
24 .am33_2
25
26 .globl mn10300_icache_inv
27 .globl mn10300_dcache_inv
28 .globl mn10300_dcache_inv_range
29 .globl mn10300_dcache_inv_range2
30 .globl mn10300_dcache_inv_page
31
32###############################################################################
33#
34# void mn10300_icache_inv(void)
35# Invalidate the entire icache
36#
37###############################################################################
38 ALIGN
39mn10300_icache_inv:
40 mov CHCTR,a0
41
42 movhu (a0),d0
43 btst CHCTR_ICEN,d0
44 beq mn10300_icache_inv_end
45
46 mov epsw,d1
47 and ~EPSW_IE,epsw
48 nop
49 nop
50
51 # disable the icache
52 and ~CHCTR_ICEN,d0
53 movhu d0,(a0)
54
55 # and wait for it to calm down
56 setlb
57 movhu (a0),d0
58 btst CHCTR_ICBUSY,d0
59 lne
60
61 # invalidate
62 or CHCTR_ICINV,d0
63 movhu d0,(a0)
64
65 # wait for the cache to finish
66 mov CHCTR,a0
67 setlb
68 movhu (a0),d0
69 btst CHCTR_ICBUSY,d0
70 lne
71
72 # and reenable it
73 and ~CHCTR_ICINV,d0
74 or CHCTR_ICEN,d0
75 movhu d0,(a0)
76 movhu (a0),d0
77
78 mov d1,epsw
79
80mn10300_icache_inv_end:
81 ret [],0
82
83###############################################################################
84#
85# void mn10300_dcache_inv(void)
86# Invalidate the entire dcache
87#
88###############################################################################
89 ALIGN
90mn10300_dcache_inv:
91 mov CHCTR,a0
92
93 movhu (a0),d0
94 btst CHCTR_DCEN,d0
95 beq mn10300_dcache_inv_end
96
97 mov epsw,d1
98 and ~EPSW_IE,epsw
99 nop
100 nop
101
102 # disable the dcache
103 and ~CHCTR_DCEN,d0
104 movhu d0,(a0)
105
106 # and wait for it to calm down
107 setlb
108 movhu (a0),d0
109 btst CHCTR_DCBUSY,d0
110 lne
111
112 # invalidate
113 or CHCTR_DCINV,d0
114 movhu d0,(a0)
115
116 # wait for the cache to finish
117 mov CHCTR,a0
118 setlb
119 movhu (a0),d0
120 btst CHCTR_DCBUSY,d0
121 lne
122
123 # and reenable it
124 and ~CHCTR_DCINV,d0
125 or CHCTR_DCEN,d0
126 movhu d0,(a0)
127 movhu (a0),d0
128
129 mov d1,epsw
130
131mn10300_dcache_inv_end:
132 ret [],0
133
134###############################################################################
135#
136# void mn10300_dcache_inv_range(unsigned start, unsigned end)
137# void mn10300_dcache_inv_range2(unsigned start, unsigned size)
138# void mn10300_dcache_inv_page(unsigned start)
139# Invalidate a range of addresses on a page in the dcache
140#
141###############################################################################
142 ALIGN
143mn10300_dcache_inv_page:
144 mov PAGE_SIZE,d1
145mn10300_dcache_inv_range2:
146 add d0,d1
147mn10300_dcache_inv_range:
148 movm [d2,d3,a2],(sp)
149 mov CHCTR,a2
150
151 movhu (a2),d2
152 btst CHCTR_DCEN,d2
153 beq mn10300_dcache_inv_range_end
154
155 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d0 # round start
156 # addr down
157 mov d0,a1
158
159 add L1_CACHE_BYTES,d1 # round end addr up
160 and L1_CACHE_TAG_ADDRESS|L1_CACHE_TAG_ENTRY,d1
161
162 clr d2 # we're going to clear tag ram
163 # entries
164
165 # read the tags from the tag RAM, and if they indicate a valid dirty
166 # cache line then invalidate that line
167 mov DCACHE_TAG(0,0),a0
168 mov a1,d0
169 and L1_CACHE_TAG_ENTRY,d0
170 add d0,a0 # starting dcache tag RAM
171 # access address
172
173 sub a1,d1
174 lsr L1_CACHE_SHIFT,d1 # total number of entries to
175 # examine
176
177 and ~(L1_CACHE_DISPARITY-1),a1 # determine comparator base
178
179mn10300_dcache_inv_range_outer_loop:
180 # disable interrupts
181 mov epsw,d3
182 and ~EPSW_IE,epsw
183 nop # note that reading CHCTR and
184 # AND'ing D0 occupy two delay
185 # slots after disabling
186 # interrupts
187
188 # disable the dcache
189 movhu (a2),d0
190 and ~CHCTR_DCEN,d0
191 movhu d0,(a2)
192
193 # and wait for it to calm down
194 setlb
195 movhu (a2),d0
196 btst CHCTR_DCBUSY,d0
197 lne
198
199mn10300_dcache_inv_range_loop:
200
201 # process the way 0 slot
202 mov (L1_CACHE_WAYDISP*0,a0),d0 # read the tag in the way 0 slot
203 btst L1_CACHE_TAG_VALID,d0
204 beq mn10300_dcache_inv_range_skip_0 # jump if this cacheline is not
205 # valid
206
207 xor a1,d0
208 lsr 12,d0
209 bne mn10300_dcache_inv_range_skip_0 # jump if not this cacheline
210
211 mov d2,(a0) # kill the tag
212
213mn10300_dcache_inv_range_skip_0:
214
215 # process the way 1 slot
216 mov (L1_CACHE_WAYDISP*1,a0),d0 # read the tag in the way 1 slot
217 btst L1_CACHE_TAG_VALID,d0
218 beq mn10300_dcache_inv_range_skip_1 # jump if this cacheline is not
219 # valid
220
221 xor a1,d0
222 lsr 12,d0
223 bne mn10300_dcache_inv_range_skip_1 # jump if not this cacheline
224
225 mov d2,(a0) # kill the tag
226
227mn10300_dcache_inv_range_skip_1:
228
229 # process the way 2 slot
230 mov (L1_CACHE_WAYDISP*2,a0),d0 # read the tag in the way 2 slot
231 btst L1_CACHE_TAG_VALID,d0
232 beq mn10300_dcache_inv_range_skip_2 # jump if this cacheline is not
233 # valid
234
235 xor a1,d0
236 lsr 12,d0
237 bne mn10300_dcache_inv_range_skip_2 # jump if not this cacheline
238
239 mov d2,(a0) # kill the tag
240
241mn10300_dcache_inv_range_skip_2:
242
243 # process the way 3 slot
244 mov (L1_CACHE_WAYDISP*3,a0),d0 # read the tag in the way 3 slot
245 btst L1_CACHE_TAG_VALID,d0
246 beq mn10300_dcache_inv_range_skip_3 # jump if this cacheline is not
247 # valid
248
249 xor a1,d0
250 lsr 12,d0
251 bne mn10300_dcache_inv_range_skip_3 # jump if not this cacheline
252
253 mov d2,(a0) # kill the tag
254
255mn10300_dcache_inv_range_skip_3:
256
257 # approx every N steps we re-enable the cache and see if there are any
258 # interrupts to be processed
259 # we also break out if we've reached the end of the loop
260 # (the bottom nibble of the count is zero in both cases)
261 add L1_CACHE_BYTES,a0
262 add L1_CACHE_BYTES,a1
263 add -1,d1
264 btst mn10300_dcache_inv_range_intr_interval,d1
265 bne mn10300_dcache_inv_range_loop
266
267 # wait for the cache to finish what it's doing
268 setlb
269 movhu (a2),d0
270 btst CHCTR_DCBUSY,d0
271 lne
272
273 # and reenable it
274 or CHCTR_DCEN,d0
275 movhu d0,(a2)
276 movhu (a2),d0
277
278 # re-enable interrupts
279 # - we don't bother with delay NOPs as we'll have enough instructions
280 # before we disable interrupts again to give the interrupts a chance
281 # to happen
282 mov d3,epsw
283
284 # go around again if the counter hasn't yet reached zero
285 add 0,d1
286 bne mn10300_dcache_inv_range_outer_loop
287
288mn10300_dcache_inv_range_end:
289 ret [d2,d3,a2],12
diff --git a/arch/mn10300/mm/cache.c b/arch/mn10300/mm/cache.c
new file mode 100644
index 000000000000..1b76719ec1c3
--- /dev/null
+++ b/arch/mn10300/mm/cache.c
@@ -0,0 +1,121 @@
1/* MN10300 Cache flushing routines
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/mm.h>
13#include <linux/mman.h>
14#include <linux/threads.h>
15#include <asm/page.h>
16#include <asm/pgtable.h>
17#include <asm/processor.h>
18#include <asm/cacheflush.h>
19#include <asm/io.h>
20#include <asm/uaccess.h>
21
22EXPORT_SYMBOL(mn10300_icache_inv);
23EXPORT_SYMBOL(mn10300_dcache_inv);
24EXPORT_SYMBOL(mn10300_dcache_inv_range);
25EXPORT_SYMBOL(mn10300_dcache_inv_range2);
26EXPORT_SYMBOL(mn10300_dcache_inv_page);
27
28#ifdef CONFIG_MN10300_CACHE_WBACK
29EXPORT_SYMBOL(mn10300_dcache_flush);
30EXPORT_SYMBOL(mn10300_dcache_flush_inv);
31EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
32EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
33EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
34EXPORT_SYMBOL(mn10300_dcache_flush_range);
35EXPORT_SYMBOL(mn10300_dcache_flush_range2);
36EXPORT_SYMBOL(mn10300_dcache_flush_page);
37#endif
38
39/*
40 * write a page back from the dcache and invalidate the icache so that we can
41 * run code from it that we've just written into it
42 */
43void flush_icache_page(struct vm_area_struct *vma, struct page *page)
44{
45 mn10300_dcache_flush_page(page_to_phys(page));
46 mn10300_icache_inv();
47}
48EXPORT_SYMBOL(flush_icache_page);
49
50/*
51 * write some code we've just written back from the dcache and invalidate the
52 * icache so that we can run that code
53 */
54void flush_icache_range(unsigned long start, unsigned long end)
55{
56#ifdef CONFIG_MN10300_CACHE_WBACK
57 unsigned long addr, size, off;
58 struct page *page;
59 pgd_t *pgd;
60 pud_t *pud;
61 pmd_t *pmd;
62 pte_t *ppte, pte;
63
64 for (; start < end; start += size) {
65 /* work out how much of the page to flush */
66 off = start & (PAGE_SIZE - 1);
67
68 size = end - start;
69 if (size > PAGE_SIZE - off)
70 size = PAGE_SIZE - off;
71
72 /* get the physical address the page is mapped to from the page
73 * tables */
74 pgd = pgd_offset(current->mm, start);
75 if (!pgd || !pgd_val(*pgd))
76 continue;
77
78 pud = pud_offset(pgd, start);
79 if (!pud || !pud_val(*pud))
80 continue;
81
82 pmd = pmd_offset(pud, start);
83 if (!pmd || !pmd_val(*pmd))
84 continue;
85
86 ppte = pte_offset_map(pmd, start);
87 if (!ppte)
88 continue;
89 pte = *ppte;
90 pte_unmap(ppte);
91
92 if (pte_none(pte))
93 continue;
94
95 page = pte_page(pte);
96 if (!page)
97 continue;
98
99 addr = page_to_phys(page);
100
101 /* flush the dcache and invalidate the icache coverage on that
102 * region */
103 mn10300_dcache_flush_range2(addr + off, size);
104 }
105#endif
106
107 mn10300_icache_inv();
108}
109EXPORT_SYMBOL(flush_icache_range);
110
111/*
112 * allow userspace to flush the instruction cache
113 */
114asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
115{
116 if (end < start)
117 return -EINVAL;
118
119 flush_icache_range(start, end);
120 return 0;
121}
diff --git a/arch/mn10300/mm/dma-alloc.c b/arch/mn10300/mm/dma-alloc.c
new file mode 100644
index 000000000000..f3649d8f50e3
--- /dev/null
+++ b/arch/mn10300/mm/dma-alloc.c
@@ -0,0 +1,56 @@
1/* MN10300 Dynamic DMA mapping support
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 * Derived from: arch/i386/kernel/pci-dma.c
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
13#include <linux/types.h>
14#include <linux/mm.h>
15#include <linux/string.h>
16#include <linux/pci.h>
17#include <asm/io.h>
18
19void *dma_alloc_coherent(struct device *dev, size_t size,
20 dma_addr_t *dma_handle, int gfp)
21{
22 unsigned long addr;
23 void *ret;
24
25 /* ignore region specifiers */
26 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
27
28 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
29 gfp |= GFP_DMA;
30
31 addr = __get_free_pages(gfp, get_order(size));
32 if (!addr)
33 return NULL;
34
35 /* map the coherent memory through the uncached memory window */
36 ret = (void *) (addr | 0x20000000);
37
38 /* fill the memory with obvious rubbish */
39 memset((void *) addr, 0xfb, size);
40
41 /* write back and evict all cache lines covering this region */
42 mn10300_dcache_flush_inv_range2(virt_to_phys((void *) addr), PAGE_SIZE);
43
44 *dma_handle = virt_to_bus((void *) addr);
45 return ret;
46}
47EXPORT_SYMBOL(dma_alloc_coherent);
48
49void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
50 dma_addr_t dma_handle)
51{
52 unsigned long addr = (unsigned long) vaddr & ~0x20000000;
53
54 free_pages(addr, get_order(size));
55}
56EXPORT_SYMBOL(dma_free_coherent);
diff --git a/arch/mn10300/mm/extable.c b/arch/mn10300/mm/extable.c
new file mode 100644
index 000000000000..25e5485ab87d
--- /dev/null
+++ b/arch/mn10300/mm/extable.c
@@ -0,0 +1,26 @@
1/* MN10300 In-kernel exception handling
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/spinlock.h>
13#include <asm/uaccess.h>
14
15int fixup_exception(struct pt_regs *regs)
16{
17 const struct exception_table_entry *fixup;
18
19 fixup = search_exception_tables(regs->pc);
20 if (fixup) {
21 regs->pc = fixup->fixup;
22 return 1;
23 }
24
25 return 0;
26}
diff --git a/arch/mn10300/mm/fault.c b/arch/mn10300/mm/fault.c
new file mode 100644
index 000000000000..78f092ca0316
--- /dev/null
+++ b/arch/mn10300/mm/fault.c
@@ -0,0 +1,405 @@
1/* MN10300 MMU Fault handler
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12
13#include <linux/signal.h>
14#include <linux/sched.h>
15#include <linux/kernel.h>
16#include <linux/errno.h>
17#include <linux/string.h>
18#include <linux/types.h>
19#include <linux/ptrace.h>
20#include <linux/mman.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/smp_lock.h>
24#include <linux/interrupt.h>
25#include <linux/init.h>
26#include <linux/vt_kern.h> /* For unblank_screen() */
27
28#include <asm/system.h>
29#include <asm/uaccess.h>
30#include <asm/pgalloc.h>
31#include <asm/hardirq.h>
32#include <asm/gdb-stub.h>
33#include <asm/cpu-regs.h>
34
35/*
36 * Unlock any spinlocks which will prevent us from getting the
37 * message out
38 */
39void bust_spinlocks(int yes)
40{
41 if (yes) {
42 oops_in_progress = 1;
43#ifdef CONFIG_SMP
44 /* Many serial drivers do __global_cli() */
45 global_irq_lock = 0;
46#endif
47 } else {
48 int loglevel_save = console_loglevel;
49#ifdef CONFIG_VT
50 unblank_screen();
51#endif
52 oops_in_progress = 0;
53 /*
54 * OK, the message is on the console. Now we call printk()
55 * without oops_in_progress set so that printk will give klogd
56 * a poke. Hold onto your hats...
57 */
58 console_loglevel = 15; /* NMI oopser may have shut the console
59 * up */
60 printk(" ");
61 console_loglevel = loglevel_save;
62 }
63}
64
65void do_BUG(const char *file, int line)
66{
67 bust_spinlocks(1);
68 printk(KERN_EMERG "------------[ cut here ]------------\n");
69 printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
70}
71
72#if 0
73static void print_pagetable_entries(pgd_t *pgdir, unsigned long address)
74{
75 pgd_t *pgd;
76 pmd_t *pmd;
77 pte_t *pte;
78
79 pgd = pgdir + __pgd_offset(address);
80 printk(KERN_DEBUG "pgd entry %p: %016Lx\n",
81 pgd, (long long) pgd_val(*pgd));
82
83 if (!pgd_present(*pgd)) {
84 printk(KERN_DEBUG "... pgd not present!\n");
85 return;
86 }
87 pmd = pmd_offset(pgd, address);
88 printk(KERN_DEBUG "pmd entry %p: %016Lx\n",
89 pmd, (long long)pmd_val(*pmd));
90
91 if (!pmd_present(*pmd)) {
92 printk(KERN_DEBUG "... pmd not present!\n");
93 return;
94 }
95 pte = pte_offset(pmd, address);
96 printk(KERN_DEBUG "pte entry %p: %016Lx\n",
97 pte, (long long) pte_val(*pte));
98
99 if (!pte_present(*pte))
100 printk(KERN_DEBUG "... pte not present!\n");
101}
102#endif
103
104asmlinkage void monitor_signal(struct pt_regs *);
105
106/*
107 * This routine handles page faults. It determines the address,
108 * and the problem, and then passes it off to one of the appropriate
109 * routines.
110 *
111 * fault_code:
112 * - LSW: either MMUFCR_IFC or MMUFCR_DFC as appropriate
113 * - MSW: 0 if data access, 1 if instruction access
114 * - bit 0: TLB miss flag
115 * - bit 1: initial write
116 * - bit 2: page invalid
117 * - bit 3: protection violation
118 * - bit 4: accessor (0=user 1=kernel)
119 * - bit 5: 0=read 1=write
120 * - bit 6-8: page protection spec
121 * - bit 9: illegal address
122 * - bit 16: 0=data 1=ins
123 *
124 */
125asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
126 unsigned long address)
127{
128 struct vm_area_struct *vma;
129 struct task_struct *tsk;
130 struct mm_struct *mm;
131 unsigned long page;
132 siginfo_t info;
133 int write, fault;
134
135#ifdef CONFIG_GDBSTUB
136 /* handle GDB stub causing a fault */
137 if (gdbstub_busy) {
138 gdbstub_exception(regs, TBR & TBR_INT_CODE);
139 return;
140 }
141#endif
142
143#if 0
144 printk(KERN_DEBUG "--- do_page_fault(%p,%s:%04lx,%08lx)\n",
145 regs,
146 fault_code & 0x10000 ? "ins" : "data",
147 fault_code & 0xffff, address);
148#endif
149
150 tsk = current;
151
152 /*
153 * We fault-in kernel-space virtual memory on-demand. The
154 * 'reference' page table is init_mm.pgd.
155 *
156 * NOTE! We MUST NOT take any locks for this case. We may
157 * be in an interrupt or a critical region, and should
158 * only copy the information from the master page table,
159 * nothing more.
160 *
161 * This verifies that the fault happens in kernel space
162 * and that the fault was a page not present (invalid) error
163 */
164 if (address >= VMALLOC_START && address < VMALLOC_END &&
165 (fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR &&
166 (fault_code & MMUFCR_xFC_PGINVAL) == MMUFCR_xFC_PGINVAL
167 )
168 goto vmalloc_fault;
169
170 mm = tsk->mm;
171 info.si_code = SEGV_MAPERR;
172
173 /*
174 * If we're in an interrupt or have no user
175 * context, we must not take the fault..
176 */
177 if (in_interrupt() || !mm)
178 goto no_context;
179
180 down_read(&mm->mmap_sem);
181
182 vma = find_vma(mm, address);
183 if (!vma)
184 goto bad_area;
185 if (vma->vm_start <= address)
186 goto good_area;
187 if (!(vma->vm_flags & VM_GROWSDOWN))
188 goto bad_area;
189
190 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
191 /* accessing the stack below the stack pointer is always a
192 * bug */
193 if ((address & PAGE_MASK) + 2 * PAGE_SIZE < regs->sp) {
194#if 0
195 printk(KERN_WARNING
196 "[%d] ### Access below stack @%lx (sp=%lx)\n",
197 current->pid, address, regs->sp);
198 printk(KERN_WARNING
199 "vma [%08x - %08x]\n",
200 vma->vm_start, vma->vm_end);
201 show_registers(regs);
202 printk(KERN_WARNING
203 "[%d] ### Code: [%08lx]"
204 " %02x %02x %02x %02x %02x %02x %02x %02x\n",
205 current->pid,
206 regs->pc,
207 ((u8 *) regs->pc)[0],
208 ((u8 *) regs->pc)[1],
209 ((u8 *) regs->pc)[2],
210 ((u8 *) regs->pc)[3],
211 ((u8 *) regs->pc)[4],
212 ((u8 *) regs->pc)[5],
213 ((u8 *) regs->pc)[6],
214 ((u8 *) regs->pc)[7]
215 );
216#endif
217 goto bad_area;
218 }
219 }
220
221 if (expand_stack(vma, address))
222 goto bad_area;
223
224/*
225 * Ok, we have a good vm_area for this memory access, so
226 * we can handle it..
227 */
228good_area:
229 info.si_code = SEGV_ACCERR;
230 write = 0;
231 switch (fault_code & (MMUFCR_xFC_PGINVAL|MMUFCR_xFC_TYPE)) {
232 default: /* 3: write, present */
233 case MMUFCR_xFC_TYPE_WRITE:
234#ifdef TEST_VERIFY_AREA
235 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
236 printk(KERN_DEBUG "WP fault at %08lx\n", regs->pc);
237#endif
238 /* write to absent page */
239 case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_WRITE:
240 if (!(vma->vm_flags & VM_WRITE))
241 goto bad_area;
242 write++;
243 break;
244
245 /* read from protected page */
246 case MMUFCR_xFC_TYPE_READ:
247 goto bad_area;
248
249 /* read from absent page present */
250 case MMUFCR_xFC_PGINVAL | MMUFCR_xFC_TYPE_READ:
251 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
252 goto bad_area;
253 break;
254 }
255
256 /*
257 * If for any reason at all we couldn't handle the fault,
258 * make sure we exit gracefully rather than endlessly redo
259 * the fault.
260 */
261 fault = handle_mm_fault(mm, vma, address, write);
262 if (unlikely(fault & VM_FAULT_ERROR)) {
263 if (fault & VM_FAULT_OOM)
264 goto out_of_memory;
265 else if (fault & VM_FAULT_SIGBUS)
266 goto do_sigbus;
267 BUG();
268 }
269 if (fault & VM_FAULT_MAJOR)
270 current->maj_flt++;
271 else
272 current->min_flt++;
273
274 up_read(&mm->mmap_sem);
275 return;
276
277/*
278 * Something tried to access memory that isn't in our memory map..
279 * Fix it, but check if it's kernel or user first..
280 */
281bad_area:
282 up_read(&mm->mmap_sem);
283 monitor_signal(regs);
284
285 /* User mode accesses just cause a SIGSEGV */
286 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR) {
287 info.si_signo = SIGSEGV;
288 info.si_errno = 0;
289 /* info.si_code has been set above */
290 info.si_addr = (void *)address;
291 force_sig_info(SIGSEGV, &info, tsk);
292 return;
293 }
294
295no_context:
296 monitor_signal(regs);
297 /* Are we prepared to handle this kernel fault? */
298 if (fixup_exception(regs))
299 return;
300
301/*
302 * Oops. The kernel tried to access some bad page. We'll have to
303 * terminate things with extreme prejudice.
304 */
305
306 bust_spinlocks(1);
307
308 if (address < PAGE_SIZE)
309 printk(KERN_ALERT
310 "Unable to handle kernel NULL pointer dereference");
311 else
312 printk(KERN_ALERT
313 "Unable to handle kernel paging request");
314 printk(" at virtual address %08lx\n", address);
315 printk(" printing pc:\n");
316 printk(KERN_ALERT "%08lx\n", regs->pc);
317
318#ifdef CONFIG_GDBSTUB
319 gdbstub_intercept(
320 regs, fault_code & 0x00010000 ? EXCEP_IAERROR : EXCEP_DAERROR);
321#endif
322
323 page = PTBR;
324 page = ((unsigned long *) __va(page))[address >> 22];
325 printk(KERN_ALERT "*pde = %08lx\n", page);
326 if (page & 1) {
327 page &= PAGE_MASK;
328 address &= 0x003ff000;
329 page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
330 printk(KERN_ALERT "*pte = %08lx\n", page);
331 }
332
333 die("Oops", regs, fault_code);
334 do_exit(SIGKILL);
335
336/*
337 * We ran out of memory, or some other thing happened to us that made
338 * us unable to handle the page fault gracefully.
339 */
340out_of_memory:
341 up_read(&mm->mmap_sem);
342 monitor_signal(regs);
343 printk(KERN_ALERT "VM: killing process %s\n", tsk->comm);
344 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
345 do_exit(SIGKILL);
346 goto no_context;
347
348do_sigbus:
349 up_read(&mm->mmap_sem);
350 monitor_signal(regs);
351
352 /*
353 * Send a sigbus, regardless of whether we were in kernel
354 * or user mode.
355 */
356 info.si_signo = SIGBUS;
357 info.si_errno = 0;
358 info.si_code = BUS_ADRERR;
359 info.si_addr = (void *)address;
360 force_sig_info(SIGBUS, &info, tsk);
361
362 /* Kernel mode? Handle exceptions or die */
363 if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_SR)
364 goto no_context;
365 return;
366
367vmalloc_fault:
368 {
369 /*
370 * Synchronize this task's top level page-table
371 * with the 'reference' page table.
372 *
373 * Do _not_ use "tsk" here. We might be inside
374 * an interrupt in the middle of a task switch..
375 */
376 int index = pgd_index(address);
377 pgd_t *pgd, *pgd_k;
378 pud_t *pud, *pud_k;
379 pmd_t *pmd, *pmd_k;
380 pte_t *pte_k;
381
382 pgd_k = init_mm.pgd + index;
383
384 if (!pgd_present(*pgd_k))
385 goto no_context;
386
387 pud_k = pud_offset(pgd_k, address);
388 if (!pud_present(*pud_k))
389 goto no_context;
390
391 pmd_k = pmd_offset(pud_k, address);
392 if (!pmd_present(*pmd_k))
393 goto no_context;
394
395 pgd = (pgd_t *) PTBR + index;
396 pud = pud_offset(pgd, address);
397 pmd = pmd_offset(pud, address);
398 set_pmd(pmd, *pmd_k);
399
400 pte_k = pte_offset_kernel(pmd_k, address);
401 if (!pte_present(*pte_k))
402 goto no_context;
403 return;
404 }
405}
diff --git a/arch/mn10300/mm/init.c b/arch/mn10300/mm/init.c
new file mode 100644
index 000000000000..8c5d88c7b90a
--- /dev/null
+++ b/arch/mn10300/mm/init.c
@@ -0,0 +1,160 @@
1/* MN10300 Memory management initialisation
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/slab.h>
21#include <linux/fs.h>
22#include <linux/mm.h>
23#include <linux/swap.h>
24#include <linux/smp.h>
25#include <linux/init.h>
26#include <linux/initrd.h>
27#include <linux/highmem.h>
28#include <linux/pagemap.h>
29#include <linux/bootmem.h>
30
31#include <asm/processor.h>
32#include <asm/system.h>
33#include <asm/uaccess.h>
34#include <asm/pgtable.h>
35#include <asm/pgalloc.h>
36#include <asm/dma.h>
37#include <asm/tlb.h>
38#include <asm/sections.h>
39
40DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
41
42unsigned long highstart_pfn, highend_pfn;
43
44/*
45 * set up paging
46 */
47void __init paging_init(void)
48{
49 unsigned long zones_size[MAX_NR_ZONES] = {0,};
50 pte_t *ppte;
51 int loop;
52
53 /* main kernel space -> RAM mapping is handled as 1:1 transparent by
54 * the MMU */
55 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
56 memset(kernel_vmalloc_ptes, 0, sizeof(kernel_vmalloc_ptes));
57
58 /* load the VMALLOC area PTE table addresses into the kernel PGD */
59 ppte = kernel_vmalloc_ptes;
60 for (loop = VMALLOC_START / (PAGE_SIZE * PTRS_PER_PTE);
61 loop < VMALLOC_END / (PAGE_SIZE * PTRS_PER_PTE);
62 loop++
63 ) {
64 set_pgd(swapper_pg_dir + loop, __pgd(__pa(ppte) | _PAGE_TABLE));
65 ppte += PAGE_SIZE / sizeof(pte_t);
66 }
67
68 /* declare the sizes of the RAM zones (only use the normal zone) */
69 zones_size[ZONE_NORMAL] =
70 (contig_page_data.bdata->node_low_pfn) -
71 (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT);
72
73 /* pass the memory from the bootmem allocator to the main allocator */
74 free_area_init(zones_size);
75
76 __flush_tlb_all();
77}
78
79/*
80 * transfer all the memory from the bootmem allocator to the runtime allocator
81 */
82void __init mem_init(void)
83{
84 int codesize, reservedpages, datasize, initsize;
85 int tmp;
86
87 if (!mem_map)
88 BUG();
89
90#define START_PFN (contig_page_data.bdata->node_boot_start >> PAGE_SHIFT)
91#define MAX_LOW_PFN (contig_page_data.bdata->node_low_pfn)
92
93 max_mapnr = num_physpages = MAX_LOW_PFN - START_PFN;
94 high_memory = (void *) __va(MAX_LOW_PFN * PAGE_SIZE);
95
96 /* clear the zero-page */
97 memset(empty_zero_page, 0, PAGE_SIZE);
98
99 /* this will put all low memory onto the freelists */
100 totalram_pages += free_all_bootmem();
101
102 reservedpages = 0;
103 for (tmp = 0; tmp < num_physpages; tmp++)
104 if (PageReserved(&mem_map[tmp]))
105 reservedpages++;
106
107 codesize = (unsigned long) &_etext - (unsigned long) &_stext;
108 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
109 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
110
111 printk(KERN_INFO
112 "Memory: %luk/%luk available"
113 " (%dk kernel code, %dk reserved, %dk data, %dk init,"
114 " %ldk highmem)\n",
115 (unsigned long) nr_free_pages() << (PAGE_SHIFT - 10),
116 max_mapnr << (PAGE_SHIFT - 10),
117 codesize >> 10,
118 reservedpages << (PAGE_SHIFT - 10),
119 datasize >> 10,
120 initsize >> 10,
121 (unsigned long) (totalhigh_pages << (PAGE_SHIFT - 10))
122 );
123}
124
125/*
126 *
127 */
128void free_init_pages(char *what, unsigned long begin, unsigned long end)
129{
130 unsigned long addr;
131
132 for (addr = begin; addr < end; addr += PAGE_SIZE) {
133 ClearPageReserved(virt_to_page(addr));
134 init_page_count(virt_to_page(addr));
135 memset((void *) addr, 0xcc, PAGE_SIZE);
136 free_page(addr);
137 totalram_pages++;
138 }
139 printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
140}
141
142/*
143 * recycle memory containing stuff only required for initialisation
144 */
145void free_initmem(void)
146{
147 free_init_pages("unused kernel memory",
148 (unsigned long) &__init_begin,
149 (unsigned long) &__init_end);
150}
151
152/*
153 * dispose of the memory on which the initial ramdisk resided
154 */
155#ifdef CONFIG_BLK_DEV_INITRD
156void free_initrd_mem(unsigned long start, unsigned long end)
157{
158 free_init_pages("initrd memory", start, end);
159}
160#endif
diff --git a/arch/mn10300/mm/misalignment.c b/arch/mn10300/mm/misalignment.c
new file mode 100644
index 000000000000..32aa89dc3848
--- /dev/null
+++ b/arch/mn10300/mm/misalignment.c
@@ -0,0 +1,661 @@
1/* MN10300 Misalignment fixup handler
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/module.h>
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/string.h>
15#include <linux/errno.h>
16#include <linux/ptrace.h>
17#include <linux/timer.h>
18#include <linux/mm.h>
19#include <linux/smp.h>
20#include <linux/smp_lock.h>
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/spinlock.h>
24#include <linux/interrupt.h>
25#include <linux/pci.h>
26#include <asm/processor.h>
27#include <asm/system.h>
28#include <asm/uaccess.h>
29#include <asm/io.h>
30#include <asm/atomic.h>
31#include <asm/smp.h>
32#include <asm/pgalloc.h>
33#include <asm/cpu-regs.h>
34#include <asm/busctl-regs.h>
35#include <asm/fpu.h>
36#include <asm/gdb-stub.h>
37#include <asm/asm-offsets.h>
38
39#if 0
40#define kdebug(FMT, ...) printk(KERN_DEBUG FMT, ##__VA_ARGS__)
41#else
42#define kdebug(FMT, ...) do {} while (0)
43#endif
44
45static int misalignment_addr(unsigned long *registers, unsigned params,
46 unsigned opcode, unsigned disp,
47 void **_address, unsigned long **_postinc);
48
49static int misalignment_reg(unsigned long *registers, unsigned params,
50 unsigned opcode, unsigned disp,
51 unsigned long **_register);
52
53static inline unsigned int_log2(unsigned x)
54{
55 unsigned y;
56 asm("bsch %1,%0" : "=r"(y) : "r"(x), "0"(0));
57 return y;
58}
59#define log2(x) int_log2(x)
60
61static const unsigned Dreg_index[] = {
62 REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
63};
64
65static const unsigned Areg_index[] = {
66 REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2
67};
68
69static const unsigned Rreg_index[] = {
70 REG_E0 >> 2, REG_E1 >> 2, REG_E2 >> 2, REG_E3 >> 2,
71 REG_E4 >> 2, REG_E5 >> 2, REG_E6 >> 2, REG_E7 >> 2,
72 REG_A0 >> 2, REG_A1 >> 2, REG_A2 >> 2, REG_A3 >> 2,
73 REG_D0 >> 2, REG_D1 >> 2, REG_D2 >> 2, REG_D3 >> 2
74};
75
76enum format_id {
77 FMT_S0,
78 FMT_S1,
79 FMT_S2,
80 FMT_S4,
81 FMT_D0,
82 FMT_D1,
83 FMT_D2,
84 FMT_D4,
85 FMT_D6,
86 FMT_D7,
87 FMT_D8,
88 FMT_D9,
89};
90
91struct {
92 u_int8_t opsz, dispsz;
93} format_tbl[16] = {
94 [FMT_S0] = { 8, 0 },
95 [FMT_S1] = { 8, 8 },
96 [FMT_S2] = { 8, 16 },
97 [FMT_S4] = { 8, 32 },
98 [FMT_D0] = { 16, 0 },
99 [FMT_D1] = { 16, 8 },
100 [FMT_D2] = { 16, 16 },
101 [FMT_D4] = { 16, 32 },
102 [FMT_D6] = { 24, 0 },
103 [FMT_D7] = { 24, 8 },
104 [FMT_D8] = { 24, 24 },
105 [FMT_D9] = { 24, 32 },
106};
107
108enum value_id {
109 DM0, /* data reg in opcode in bits 0-1 */
110 DM1, /* data reg in opcode in bits 2-3 */
111 DM2, /* data reg in opcode in bits 4-5 */
112 AM0, /* addr reg in opcode in bits 0-1 */
113 AM1, /* addr reg in opcode in bits 2-3 */
114 AM2, /* addr reg in opcode in bits 4-5 */
115 RM0, /* reg in opcode in bits 0-3 */
116 RM1, /* reg in opcode in bits 2-5 */
117 RM2, /* reg in opcode in bits 4-7 */
118 RM4, /* reg in opcode in bits 8-11 */
119 RM6, /* reg in opcode in bits 12-15 */
120
121 RD0, /* reg in displacement in bits 0-3 */
122 RD2, /* reg in displacement in bits 4-7 */
123
124 SP, /* stack pointer */
125
126 SD8, /* 8-bit signed displacement */
127 SD16, /* 16-bit signed displacement */
128 SD24, /* 24-bit signed displacement */
129 SIMM4_2, /* 4-bit signed displacement in opcode bits 4-7 */
130 SIMM8, /* 8-bit signed immediate */
131 IMM24, /* 24-bit unsigned immediate */
132 IMM32, /* 32-bit unsigned immediate */
133 IMM32_HIGH8, /* 32-bit unsigned immediate, high 8-bits in opcode */
134
135 DN0 = DM0,
136 DN1 = DM1,
137 DN2 = DM2,
138 AN0 = AM0,
139 AN1 = AM1,
140 AN2 = AM2,
141 RN0 = RM0,
142 RN1 = RM1,
143 RN2 = RM2,
144 RN4 = RM4,
145 RN6 = RM6,
146 DI = DM1,
147 RI = RM2,
148
149};
150
151struct mn10300_opcode {
152 const char *name;
153 u_int32_t opcode;
154 u_int32_t opmask;
155 unsigned exclusion;
156
157 enum format_id format;
158
159 unsigned cpu_mask;
160#define AM33 330
161
162 unsigned params[2];
163#define MEM(ADDR) (0x80000000 | (ADDR))
164#define MEM2(ADDR1, ADDR2) (0x80000000 | (ADDR1) << 8 | (ADDR2))
165#define MEMINC(ADDR) (0x81000000 | (ADDR))
166#define MEMINC2(ADDR, INC) (0x81000000 | (ADDR) << 8 | (INC))
167};
168
169/* LIBOPCODES EXCERPT
170 Assemble Matsushita MN10300 instructions.
171 Copyright 1996, 1997, 1998, 1999, 2000 Free Software Foundation, Inc.
172
173 This program is free software; you can redistribute it and/or modify
174 it under the terms of the GNU General Public Licence as published by
175 the Free Software Foundation; either version 2 of the Licence, or
176 (at your option) any later version.
177
178 This program is distributed in the hope that it will be useful,
179 but WITHOUT ANY WARRANTY; without even the implied warranty of
180 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
181 GNU General Public Licence for more details.
182
183 You should have received a copy of the GNU General Public Licence
184 along with this program; if not, write to the Free Software
185 Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
186*/
187static const struct mn10300_opcode mn10300_opcodes[] = {
188{ "mov", 0x60, 0xf0, 0, FMT_S0, 0, {DM1, MEM(AN0)}},
189{ "mov", 0x70, 0xf0, 0, FMT_S0, 0, {MEM(AM0), DN1}},
190{ "mov", 0xf000, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), AN1}},
191{ "mov", 0xf010, 0xfff0, 0, FMT_D0, 0, {AM1, MEM(AN0)}},
192{ "mov", 0xf300, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
193{ "mov", 0xf340, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
194{ "mov", 0xf380, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), AN2}},
195{ "mov", 0xf3c0, 0xffc0, 0, FMT_D0, 0, {AM2, MEM2(DI, AN0)}},
196{ "mov", 0xf80000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
197{ "mov", 0xf81000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
198{ "mov", 0xf82000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8,AM0), AN1}},
199{ "mov", 0xf83000, 0xfff000, 0, FMT_D1, 0, {AM1, MEM2(SD8, AN0)}},
200{ "mov", 0xf8f000, 0xfffc00, 0, FMT_D1, AM33, {MEM2(SD8, AM0), SP}},
201{ "mov", 0xf8f400, 0xfffc00, 0, FMT_D1, AM33, {SP, MEM2(SD8, AN0)}},
202{ "mov", 0xf90a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
203{ "mov", 0xf91a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
204{ "mov", 0xf96a00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
205{ "mov", 0xf97a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
206{ "mov", 0xfa000000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
207{ "mov", 0xfa100000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
208{ "mov", 0xfa200000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), AN1}},
209{ "mov", 0xfa300000, 0xfff00000, 0, FMT_D2, 0, {AM1, MEM2(SD16, AN0)}},
210{ "mov", 0xfb0a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
211{ "mov", 0xfb1a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
212{ "mov", 0xfb6a0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
213{ "mov", 0xfb7a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
214{ "mov", 0xfb8e0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
215{ "mov", 0xfb9e0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
216{ "mov", 0xfc000000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
217{ "mov", 0xfc100000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
218{ "mov", 0xfc200000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), AN1}},
219{ "mov", 0xfc300000, 0xfff00000, 0, FMT_D4, 0, {AM1, MEM2(IMM32,AN0)}},
220{ "mov", 0xfd0a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
221{ "mov", 0xfd1a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
222{ "mov", 0xfd6a0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
223{ "mov", 0xfd7a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
224{ "mov", 0xfe0a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
225{ "mov", 0xfe1a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
226{ "mov", 0xfe6a0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
227{ "mov", 0xfe7a0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
228
229{ "movhu", 0xf060, 0xfff0, 0, FMT_D0, 0, {MEM(AM0), DN1}},
230{ "movhu", 0xf070, 0xfff0, 0, FMT_D0, 0, {DM1, MEM(AN0)}},
231{ "movhu", 0xf480, 0xffc0, 0, FMT_D0, 0, {MEM2(DI, AM0), DN2}},
232{ "movhu", 0xf4c0, 0xffc0, 0, FMT_D0, 0, {DM2, MEM2(DI, AN0)}},
233{ "movhu", 0xf86000, 0xfff000, 0, FMT_D1, 0, {MEM2(SD8, AM0), DN1}},
234{ "movhu", 0xf87000, 0xfff000, 0, FMT_D1, 0, {DM1, MEM2(SD8, AN0)}},
235{ "movhu", 0xf94a00, 0xffff00, 0, FMT_D6, AM33, {MEM(RM0), RN2}},
236{ "movhu", 0xf95a00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEM(RN0)}},
237{ "movhu", 0xf9ea00, 0xffff00, 0x12, FMT_D6, AM33, {MEMINC(RM0), RN2}},
238{ "movhu", 0xf9fa00, 0xffff00, 0, FMT_D6, AM33, {RM2, MEMINC(RN0)}},
239{ "movhu", 0xfa600000, 0xfff00000, 0, FMT_D2, 0, {MEM2(SD16, AM0), DN1}},
240{ "movhu", 0xfa700000, 0xfff00000, 0, FMT_D2, 0, {DM1, MEM2(SD16, AN0)}},
241{ "movhu", 0xfb4a0000, 0xffff0000, 0, FMT_D7, AM33, {MEM2(SD8, RM0), RN2}},
242{ "movhu", 0xfb5a0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEM2(SD8, RN0)}},
243{ "movhu", 0xfbce0000, 0xffff000f, 0, FMT_D7, AM33, {MEM2(RI, RM0), RD2}},
244{ "movhu", 0xfbde0000, 0xffff000f, 0, FMT_D7, AM33, {RD2, MEM2(RI, RN0)}},
245{ "movhu", 0xfbea0000, 0xffff0000, 0x22, FMT_D7, AM33, {MEMINC2 (RM0, SIMM8), RN2}},
246{ "movhu", 0xfbfa0000, 0xffff0000, 0, FMT_D7, AM33, {RM2, MEMINC2 (RN0, SIMM8)}},
247{ "movhu", 0xfc600000, 0xfff00000, 0, FMT_D4, 0, {MEM2(IMM32,AM0), DN1}},
248{ "movhu", 0xfc700000, 0xfff00000, 0, FMT_D4, 0, {DM1, MEM2(IMM32,AN0)}},
249{ "movhu", 0xfd4a0000, 0xffff0000, 0, FMT_D8, AM33, {MEM2(SD24, RM0), RN2}},
250{ "movhu", 0xfd5a0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEM2(SD24, RN0)}},
251{ "movhu", 0xfdea0000, 0xffff0000, 0x22, FMT_D8, AM33, {MEMINC2 (RM0, IMM24), RN2}},
252{ "movhu", 0xfdfa0000, 0xffff0000, 0, FMT_D8, AM33, {RM2, MEMINC2 (RN0, IMM24)}},
253{ "movhu", 0xfe4a0000, 0xffff0000, 0, FMT_D9, AM33, {MEM2(IMM32_HIGH8,RM0), RN2}},
254{ "movhu", 0xfe5a0000, 0xffff0000, 0, FMT_D9, AM33, {RM2, MEM2(IMM32_HIGH8, RN0)}},
255{ "movhu", 0xfeea0000, 0xffff0000, 0x22, FMT_D9, AM33, {MEMINC2 (RM0, IMM32_HIGH8), RN2}},
256{ "movhu", 0xfefa0000, 0xffff0000, 0, FMT_D9, AM33, {RN2, MEMINC2 (RM0, IMM32_HIGH8)}},
257{ 0, 0, 0, 0, 0, 0, {0}},
258};
259
260/*
261 * fix up misalignment problems where possible
262 */
263asmlinkage void misalignment(struct pt_regs *regs, enum exception_code code)
264{
265 const struct exception_table_entry *fixup;
266 const struct mn10300_opcode *pop;
267 unsigned long *registers = (unsigned long *) regs;
268 unsigned long data, *store, *postinc;
269 mm_segment_t seg;
270 siginfo_t info;
271 uint32_t opcode, disp, noc, xo, xm;
272 uint8_t *pc, byte;
273 void *address;
274 unsigned tmp, npop;
275
276 kdebug("MISALIGN at %lx\n", regs->pc);
277
278 if (in_interrupt())
279 die("Misalignment trap in interrupt context", regs, code);
280
281 if (regs->epsw & EPSW_IE)
282 asm volatile("or %0,epsw" : : "i"(EPSW_IE));
283
284 seg = get_fs();
285 set_fs(KERNEL_DS);
286
287 fixup = search_exception_tables(regs->pc);
288
289 /* first thing to do is to match the opcode */
290 pc = (u_int8_t *) regs->pc;
291
292 if (__get_user(byte, pc) != 0)
293 goto fetch_error;
294 opcode = byte;
295 noc = 8;
296
297 for (pop = mn10300_opcodes; pop->name; pop++) {
298 npop = log2(pop->opcode | pop->opmask);
299 if (npop <= 0 || npop > 31)
300 continue;
301 npop = (npop + 8) & ~7;
302
303 got_more_bits:
304 if (npop == noc) {
305 if ((opcode & pop->opmask) == pop->opcode)
306 goto found_opcode;
307 } else if (npop > noc) {
308 xo = pop->opcode >> (npop - noc);
309 xm = pop->opmask >> (npop - noc);
310
311 if ((opcode & xm) != xo)
312 continue;
313
314 /* we've got a partial match (an exact match on the
315 * first N bytes), so we need to get some more data */
316 pc++;
317 if (__get_user(byte, pc) != 0)
318 goto fetch_error;
319 opcode = opcode << 8 | byte;
320 noc += 8;
321 goto got_more_bits;
322 } else {
323 /* there's already been a partial match as long as the
324 * complete match we're now considering, so this one
325 * should't match */
326 continue;
327 }
328 }
329
330 /* didn't manage to find a fixup */
331 if (!user_mode(regs))
332 printk(KERN_CRIT "MISALIGN: %lx: unsupported instruction %x\n",
333 regs->pc, opcode);
334
335failed:
336 set_fs(seg);
337 if (die_if_no_fixup("misalignment error", regs, code))
338 return;
339
340 info.si_signo = SIGBUS;
341 info.si_errno = 0;
342 info.si_code = BUS_ADRALN;
343 info.si_addr = (void *) regs->pc;
344 force_sig_info(SIGBUS, &info, current);
345 return;
346
347 /* error reading opcodes */
348fetch_error:
349 if (!user_mode(regs))
350 printk(KERN_CRIT
351 "MISALIGN: %p: fault whilst reading instruction data\n",
352 pc);
353 goto failed;
354
355bad_addr_mode:
356 if (!user_mode(regs))
357 printk(KERN_CRIT
358 "MISALIGN: %lx: unsupported addressing mode %x\n",
359 regs->pc, opcode);
360 goto failed;
361
362bad_reg_mode:
363 if (!user_mode(regs))
364 printk(KERN_CRIT
365 "MISALIGN: %lx: unsupported register mode %x\n",
366 regs->pc, opcode);
367 goto failed;
368
369unsupported_instruction:
370 if (!user_mode(regs))
371 printk(KERN_CRIT
372 "MISALIGN: %lx: unsupported instruction %x (%s)\n",
373 regs->pc, opcode, pop->name);
374 goto failed;
375
376transfer_failed:
377 set_fs(seg);
378 if (fixup) {
379 regs->pc = fixup->fixup;
380 return;
381 }
382 if (die_if_no_fixup("misalignment fixup", regs, code))
383 return;
384
385 info.si_signo = SIGSEGV;
386 info.si_errno = 0;
387 info.si_code = 0;
388 info.si_addr = (void *) regs->pc;
389 force_sig_info(SIGSEGV, &info, current);
390 return;
391
392 /* we matched the opcode */
393found_opcode:
394 kdebug("MISALIGN: %lx: %x==%x { %x, %x }\n",
395 regs->pc, opcode, pop->opcode, pop->params[0], pop->params[1]);
396
397 tmp = format_tbl[pop->format].opsz;
398 if (tmp > noc)
399 BUG(); /* match was less complete than it ought to have been */
400
401 if (tmp < noc) {
402 tmp = noc - tmp;
403 opcode >>= tmp;
404 pc -= tmp >> 3;
405 }
406
407 /* grab the extra displacement (note it's LSB first) */
408 disp = 0;
409 tmp = format_tbl[pop->format].dispsz >> 3;
410 while (tmp > 0) {
411 tmp--;
412 disp <<= 8;
413
414 pc++;
415 if (__get_user(byte, pc) != 0)
416 goto fetch_error;
417 disp |= byte;
418 }
419
420 set_fs(KERNEL_XDS);
421 if (fixup || regs->epsw & EPSW_nSL)
422 set_fs(seg);
423
424 tmp = (pop->params[0] ^ pop->params[1]) & 0x80000000;
425 if (!tmp) {
426 if (!user_mode(regs))
427 printk(KERN_CRIT
428 "MISALIGN: %lx:"
429 " insn not move to/from memory %x\n",
430 regs->pc, opcode);
431 goto failed;
432 }
433
434 if (pop->params[0] & 0x80000000) {
435 /* move memory to register */
436 if (!misalignment_addr(registers, pop->params[0], opcode, disp,
437 &address, &postinc))
438 goto bad_addr_mode;
439
440 if (!misalignment_reg(registers, pop->params[1], opcode, disp,
441 &store))
442 goto bad_reg_mode;
443
444 if (strcmp(pop->name, "mov") == 0) {
445 kdebug("FIXUP: mov (%p),DARn\n", address);
446 if (copy_from_user(&data, (void *) address, 4) != 0)
447 goto transfer_failed;
448 if (pop->params[0] & 0x1000000)
449 *postinc += 4;
450 } else if (strcmp(pop->name, "movhu") == 0) {
451 kdebug("FIXUP: movhu (%p),DARn\n", address);
452 data = 0;
453 if (copy_from_user(&data, (void *) address, 2) != 0)
454 goto transfer_failed;
455 if (pop->params[0] & 0x1000000)
456 *postinc += 2;
457 } else {
458 goto unsupported_instruction;
459 }
460
461 *store = data;
462 } else {
463 /* move register to memory */
464 if (!misalignment_reg(registers, pop->params[0], opcode, disp,
465 &store))
466 goto bad_reg_mode;
467
468 if (!misalignment_addr(registers, pop->params[1], opcode, disp,
469 &address, &postinc))
470 goto bad_addr_mode;
471
472 data = *store;
473
474 if (strcmp(pop->name, "mov") == 0) {
475 kdebug("FIXUP: mov %lx,(%p)\n", data, address);
476 if (copy_to_user((void *) address, &data, 4) != 0)
477 goto transfer_failed;
478 if (pop->params[1] & 0x1000000)
479 *postinc += 4;
480 } else if (strcmp(pop->name, "movhu") == 0) {
481 kdebug("FIXUP: movhu %hx,(%p)\n",
482 (uint16_t) data, address);
483 if (copy_to_user((void *) address, &data, 2) != 0)
484 goto transfer_failed;
485 if (pop->params[1] & 0x1000000)
486 *postinc += 2;
487 } else {
488 goto unsupported_instruction;
489 }
490 }
491
492 tmp = format_tbl[pop->format].opsz + format_tbl[pop->format].dispsz;
493 regs->pc += tmp >> 3;
494
495 set_fs(seg);
496 return;
497}
498
499/*
500 * determine the address that was being accessed
501 */
502static int misalignment_addr(unsigned long *registers, unsigned params,
503 unsigned opcode, unsigned disp,
504 void **_address, unsigned long **_postinc)
505{
506 unsigned long *postinc = NULL, address = 0, tmp;
507
508 params &= 0x7fffffff;
509
510 do {
511 switch (params & 0xff) {
512 case DM0:
513 postinc = &registers[Dreg_index[opcode & 0x03]];
514 address += *postinc;
515 break;
516 case DM1:
517 postinc = &registers[Dreg_index[opcode >> 2 & 0x0c]];
518 address += *postinc;
519 break;
520 case DM2:
521 postinc = &registers[Dreg_index[opcode >> 4 & 0x30]];
522 address += *postinc;
523 break;
524 case AM0:
525 postinc = &registers[Areg_index[opcode & 0x03]];
526 address += *postinc;
527 break;
528 case AM1:
529 postinc = &registers[Areg_index[opcode >> 2 & 0x0c]];
530 address += *postinc;
531 break;
532 case AM2:
533 postinc = &registers[Areg_index[opcode >> 4 & 0x30]];
534 address += *postinc;
535 break;
536 case RM0:
537 postinc = &registers[Rreg_index[opcode & 0x0f]];
538 address += *postinc;
539 break;
540 case RM1:
541 postinc = &registers[Rreg_index[opcode >> 2 & 0x0f]];
542 address += *postinc;
543 break;
544 case RM2:
545 postinc = &registers[Rreg_index[opcode >> 4 & 0x0f]];
546 address += *postinc;
547 break;
548 case RM4:
549 postinc = &registers[Rreg_index[opcode >> 8 & 0x0f]];
550 address += *postinc;
551 break;
552 case RM6:
553 postinc = &registers[Rreg_index[opcode >> 12 & 0x0f]];
554 address += *postinc;
555 break;
556 case RD0:
557 postinc = &registers[Rreg_index[disp & 0x0f]];
558 address += *postinc;
559 break;
560 case RD2:
561 postinc = &registers[Rreg_index[disp >> 4 & 0x0f]];
562 address += *postinc;
563 break;
564
565 case SD8:
566 case SIMM8:
567 address += (int32_t) (int8_t) (disp & 0xff);
568 break;
569 case SD16:
570 address += (int32_t) (int16_t) (disp & 0xffff);
571 break;
572 case SD24:
573 tmp = disp << 8;
574 asm("asr 8,%0" : "=r"(tmp) : "0"(tmp));
575 address += tmp;
576 break;
577 case SIMM4_2:
578 tmp = opcode >> 4 & 0x0f;
579 tmp <<= 28;
580 asm("asr 28,%0" : "=r"(tmp) : "0"(tmp));
581 address += tmp;
582 break;
583 case IMM24:
584 address += disp & 0x00ffffff;
585 break;
586 case IMM32:
587 case IMM32_HIGH8:
588 address += disp;
589 break;
590 default:
591 return 0;
592 }
593 } while ((params >>= 8));
594
595 *_address = (void *) address;
596 *_postinc = postinc;
597 return 1;
598}
599
600/*
601 * determine the register that is acting as source/dest
602 */
603static int misalignment_reg(unsigned long *registers, unsigned params,
604 unsigned opcode, unsigned disp,
605 unsigned long **_register)
606{
607 params &= 0x7fffffff;
608
609 if (params & 0xffffff00)
610 return 0;
611
612 switch (params & 0xff) {
613 case DM0:
614 *_register = &registers[Dreg_index[opcode & 0x03]];
615 break;
616 case DM1:
617 *_register = &registers[Dreg_index[opcode >> 2 & 0x03]];
618 break;
619 case DM2:
620 *_register = &registers[Dreg_index[opcode >> 4 & 0x03]];
621 break;
622 case AM0:
623 *_register = &registers[Areg_index[opcode & 0x03]];
624 break;
625 case AM1:
626 *_register = &registers[Areg_index[opcode >> 2 & 0x03]];
627 break;
628 case AM2:
629 *_register = &registers[Areg_index[opcode >> 4 & 0x03]];
630 break;
631 case RM0:
632 *_register = &registers[Rreg_index[opcode & 0x0f]];
633 break;
634 case RM1:
635 *_register = &registers[Rreg_index[opcode >> 2 & 0x0f]];
636 break;
637 case RM2:
638 *_register = &registers[Rreg_index[opcode >> 4 & 0x0f]];
639 break;
640 case RM4:
641 *_register = &registers[Rreg_index[opcode >> 8 & 0x0f]];
642 break;
643 case RM6:
644 *_register = &registers[Rreg_index[opcode >> 12 & 0x0f]];
645 break;
646 case RD0:
647 *_register = &registers[Rreg_index[disp & 0x0f]];
648 break;
649 case RD2:
650 *_register = &registers[Rreg_index[disp >> 4 & 0x0f]];
651 break;
652 case SP:
653 *_register = &registers[REG_SP >> 2];
654 break;
655
656 default:
657 return 0;
658 }
659
660 return 1;
661}
diff --git a/arch/mn10300/mm/mmu-context.c b/arch/mn10300/mm/mmu-context.c
new file mode 100644
index 000000000000..31c9d27a75ae
--- /dev/null
+++ b/arch/mn10300/mm/mmu-context.c
@@ -0,0 +1,80 @@
1/* MN10300 MMU context allocation and management
2 *
3 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public Licence
8 * as published by the Free Software Foundation; either version
9 * 2 of the Licence, or (at your option) any later version.
10 */
11#include <linux/sched.h>
12#include <linux/mm.h>
13#include <asm/mmu_context.h>
14#include <asm/tlbflush.h>
15
16/*
17 * list of the MMU contexts last allocated on each CPU
18 */
19unsigned long mmu_context_cache[NR_CPUS] = {
20 [0 ... NR_CPUS - 1] = MMU_CONTEXT_FIRST_VERSION * 2 - 1,
21};
22
23/*
24 * flush the specified TLB entry
25 */
26void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
27{
28 unsigned long pteu, cnx, flags;
29
30 addr &= PAGE_MASK;
31
32 /* make sure the context doesn't migrate and defend against
33 * interference from vmalloc'd regions */
34 local_irq_save(flags);
35
36 cnx = mm_context(vma->vm_mm);
37
38 if (cnx != MMU_NO_CONTEXT) {
39 pteu = addr | (cnx & 0x000000ffUL);
40 IPTEU = pteu;
41 DPTEU = pteu;
42 if (IPTEL & xPTEL_V)
43 IPTEL = 0;
44 if (DPTEL & xPTEL_V)
45 DPTEL = 0;
46 }
47
48 local_irq_restore(flags);
49}
50
51/*
52 * preemptively set a TLB entry
53 */
54void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
55{
56 unsigned long pteu, ptel, cnx, flags;
57
58 addr &= PAGE_MASK;
59 ptel = pte_val(pte) & ~(xPTEL_UNUSED1 | xPTEL_UNUSED2);
60
61 /* make sure the context doesn't migrate and defend against
62 * interference from vmalloc'd regions */
63 local_irq_save(flags);
64
65 cnx = mm_context(vma->vm_mm);
66
67 if (cnx != MMU_NO_CONTEXT) {
68 pteu = addr | (cnx & 0x000000ffUL);
69 if (!(pte_val(pte) & _PAGE_NX)) {
70 IPTEU = pteu;
71 if (IPTEL & xPTEL_V)
72 IPTEL = ptel;
73 }
74 DPTEU = pteu;
75 if (DPTEL & xPTEL_V)
76 DPTEL = ptel;
77 }
78
79 local_irq_restore(flags);
80}
diff --git a/arch/mn10300/mm/pgtable.c b/arch/mn10300/mm/pgtable.c
new file mode 100644
index 000000000000..a477038752ba
--- /dev/null
+++ b/arch/mn10300/mm/pgtable.c
@@ -0,0 +1,197 @@
1/* MN10300 Page table management
2 *
3 * Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Modified by David Howells (dhowells@redhat.com)
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public Licence
9 * as published by the Free Software Foundation; either version
10 * 2 of the Licence, or (at your option) any later version.
11 */
12#include <linux/sched.h>
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/mm.h>
16#include <linux/swap.h>
17#include <linux/smp.h>
18#include <linux/highmem.h>
19#include <linux/slab.h>
20#include <linux/pagemap.h>
21#include <linux/spinlock.h>
22#include <linux/quicklist.h>
23
24#include <asm/system.h>
25#include <asm/pgtable.h>
26#include <asm/pgalloc.h>
27#include <asm/tlb.h>
28#include <asm/tlbflush.h>
29
30void show_mem(void)
31{
32 unsigned long i;
33 int free = 0, total = 0, reserved = 0, shared = 0;
34
35 int cached = 0;
36 printk(KERN_INFO "Mem-info:\n");
37 show_free_areas();
38 i = max_mapnr;
39 while (i-- > 0) {
40 total++;
41 if (PageReserved(mem_map + i))
42 reserved++;
43 else if (PageSwapCache(mem_map + i))
44 cached++;
45 else if (!page_count(mem_map + i))
46 free++;
47 else
48 shared += page_count(mem_map + i) - 1;
49 }
50 printk(KERN_INFO "%d pages of RAM\n", total);
51 printk(KERN_INFO "%d free pages\n", free);
52 printk(KERN_INFO "%d reserved pages\n", reserved);
53 printk(KERN_INFO "%d pages shared\n", shared);
54 printk(KERN_INFO "%d pages swap cached\n", cached);
55}
56
57/*
58 * Associate a large virtual page frame with a given physical page frame
59 * and protection flags for that frame. pfn is for the base of the page,
60 * vaddr is what the page gets mapped to - both must be properly aligned.
61 * The pmd must already be instantiated. Assumes PAE mode.
62 */
63void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
64{
65 pgd_t *pgd;
66 pud_t *pud;
67 pmd_t *pmd;
68
69 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
70 printk(KERN_ERR "set_pmd_pfn: vaddr misaligned\n");
71 return; /* BUG(); */
72 }
73 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
74 printk(KERN_ERR "set_pmd_pfn: pfn misaligned\n");
75 return; /* BUG(); */
76 }
77 pgd = swapper_pg_dir + pgd_index(vaddr);
78 if (pgd_none(*pgd)) {
79 printk(KERN_ERR "set_pmd_pfn: pgd_none\n");
80 return; /* BUG(); */
81 }
82 pud = pud_offset(pgd, vaddr);
83 pmd = pmd_offset(pud, vaddr);
84 set_pmd(pmd, pfn_pmd(pfn, flags));
85 /*
86 * It's enough to flush this one mapping.
87 * (PGE mappings get flushed as well)
88 */
89 __flush_tlb_one(vaddr);
90}
91
92pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
93{
94 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
95 if (pte)
96 clear_page(pte);
97 return pte;
98}
99
100struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
101{
102 struct page *pte;
103
104#ifdef CONFIG_HIGHPTE
105 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
106#else
107 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
108#endif
109 if (pte)
110 clear_highpage(pte);
111 return pte;
112}
113
114/*
115 * List of all pgd's needed for non-PAE so it can invalidate entries
116 * in both cached and uncached pgd's; not needed for PAE since the
117 * kernel pmd is shared. If PAE were not to share the pmd a similar
118 * tactic would be needed. This is essentially codepath-based locking
119 * against pageattr.c; it is the unique case in which a valid change
120 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
121 * vmalloc faults work because attached pagetables are never freed.
122 * If the locking proves to be non-performant, a ticketing scheme with
123 * checks at dup_mmap(), exec(), and other mmlist addition points
124 * could be used. The locking scheme was chosen on the basis of
125 * manfred's recommendations and having no core impact whatsoever.
126 * -- wli
127 */
128DEFINE_SPINLOCK(pgd_lock);
129struct page *pgd_list;
130
131static inline void pgd_list_add(pgd_t *pgd)
132{
133 struct page *page = virt_to_page(pgd);
134 page->index = (unsigned long) pgd_list;
135 if (pgd_list)
136 set_page_private(pgd_list, (unsigned long) &page->index);
137 pgd_list = page;
138 set_page_private(page, (unsigned long) &pgd_list);
139}
140
141static inline void pgd_list_del(pgd_t *pgd)
142{
143 struct page *next, **pprev, *page = virt_to_page(pgd);
144 next = (struct page *) page->index;
145 pprev = (struct page **) page_private(page);
146 *pprev = next;
147 if (next)
148 set_page_private(next, (unsigned long) pprev);
149}
150
151void pgd_ctor(void *pgd)
152{
153 unsigned long flags;
154
155 if (PTRS_PER_PMD == 1)
156 spin_lock_irqsave(&pgd_lock, flags);
157
158 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
159 swapper_pg_dir + USER_PTRS_PER_PGD,
160 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
161
162 if (PTRS_PER_PMD > 1)
163 return;
164
165 pgd_list_add(pgd);
166 spin_unlock_irqrestore(&pgd_lock, flags);
167 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
168}
169
170/* never called when PTRS_PER_PMD > 1 */
171void pgd_dtor(void *pgd)
172{
173 unsigned long flags; /* can be called from interrupt context */
174
175 spin_lock_irqsave(&pgd_lock, flags);
176 pgd_list_del(pgd);
177 spin_unlock_irqrestore(&pgd_lock, flags);
178}
179
180pgd_t *pgd_alloc(struct mm_struct *mm)
181{
182 return quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
183}
184
185void pgd_free(struct mm_struct *mm, pgd_t *pgd)
186{
187 quicklist_free(0, pgd_dtor, pgd);
188}
189
190void __init pgtable_cache_init(void)
191{
192}
193
194void check_pgt_cache(void)
195{
196 quicklist_trim(0, pgd_dtor, 25, 16);
197}
diff --git a/arch/mn10300/mm/tlb-mn10300.S b/arch/mn10300/mm/tlb-mn10300.S
new file mode 100644
index 000000000000..789208094e98
--- /dev/null
+++ b/arch/mn10300/mm/tlb-mn10300.S
@@ -0,0 +1,207 @@
1###############################################################################
2#
3# TLB loading functions
4#
5# Copyright (C) 2007 Matsushita Electric Industrial Co., Ltd.
6# Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
7# Modified by David Howells (dhowells@redhat.com)
8#
9# This program is free software; you can redistribute it and/or
10# modify it under the terms of the GNU General Public Licence
11# as published by the Free Software Foundation; either version
12# 2 of the Licence, or (at your option) any later version.
13#
14###############################################################################
15#include <linux/sys.h>
16#include <linux/linkage.h>
17#include <asm/smp.h>
18#include <asm/intctl-regs.h>
19#include <asm/frame.inc>
20#include <asm/page.h>
21#include <asm/pgtable.h>
22
23###############################################################################
24#
25# Instruction TLB Miss handler entry point
26#
27###############################################################################
28 .type itlb_miss,@function
29ENTRY(itlb_miss)
30 and ~EPSW_NMID,epsw
31#ifdef CONFIG_GDBSTUB
32 movm [d2,d3,a2],(sp)
33#else
34 or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
35 # register bank
36 nop
37 nop
38 nop
39#endif
40
41 mov (IPTEU),d3
42 mov (PTBR),a2
43 mov d3,d2
44 and 0xffc00000,d2
45 lsr 20,d2
46 mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
47 btst _PAGE_VALID,a2
48 beq itlb_miss_fault # jump if doesn't point anywhere
49
50 and ~(PAGE_SIZE-1),a2
51 mov d3,d2
52 and 0x003ff000,d2
53 lsr 10,d2
54 add d2,a2
55 mov (a2),d2 # get pte from PTD[addr 21..12]
56 btst _PAGE_VALID,d2
57 beq itlb_miss_fault # jump if doesn't point to a page
58 # (might be a swap id)
59 bset _PAGE_ACCESSED,(0,a2)
60 and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
61itlb_miss_set:
62 mov d2,(IPTEL) # change the TLB
63#ifdef CONFIG_GDBSTUB
64 movm (sp),[d2,d3,a2]
65#endif
66 rti
67
68itlb_miss_fault:
69 mov _PAGE_VALID,d2 # force address error handler to be
70 # invoked
71 bra itlb_miss_set
72
73 .size itlb_miss, . - itlb_miss
74
75###############################################################################
76#
77# Data TLB Miss handler entry point
78#
79###############################################################################
80 .type dtlb_miss,@function
81ENTRY(dtlb_miss)
82 and ~EPSW_NMID,epsw
83#ifdef CONFIG_GDBSTUB
84 movm [d2,d3,a2],(sp)
85#else
86 or EPSW_nAR,epsw # switch D0-D3 & A0-A3 to the alternate
87 # register bank
88 nop
89 nop
90 nop
91#endif
92
93 mov (DPTEU),d3
94 mov (PTBR),a2
95 mov d3,d2
96 and 0xffc00000,d2
97 lsr 20,d2
98 mov (a2,d2),a2 # PTD *ptd = PGD[addr 31..22]
99 btst _PAGE_VALID,a2
100 beq dtlb_miss_fault # jump if doesn't point anywhere
101
102 and ~(PAGE_SIZE-1),a2
103 mov d3,d2
104 and 0x003ff000,d2
105 lsr 10,d2
106 add d2,a2
107 mov (a2),d2 # get pte from PTD[addr 21..12]
108 btst _PAGE_VALID,d2
109 beq dtlb_miss_fault # jump if doesn't point to a page
110 # (might be a swap id)
111 bset _PAGE_ACCESSED,(0,a2)
112 and ~(xPTEL_UNUSED1|xPTEL_UNUSED2),d2
113dtlb_miss_set:
114 mov d2,(DPTEL) # change the TLB
115#ifdef CONFIG_GDBSTUB
116 movm (sp),[d2,d3,a2]
117#endif
118 rti
119
120dtlb_miss_fault:
121 mov _PAGE_VALID,d2 # force address error handler to be
122 # invoked
123 bra dtlb_miss_set
124 .size dtlb_miss, . - dtlb_miss
125
126###############################################################################
127#
128# Instruction TLB Address Error handler entry point
129#
130###############################################################################
131 .type itlb_aerror,@function
132ENTRY(itlb_aerror)
133 and ~EPSW_NMID,epsw
134 add -4,sp
135 SAVE_ALL
136 add -4,sp # need to pass three params
137
138 # calculate the fault code
139 movhu (MMUFCR_IFC),d1
140 or 0x00010000,d1 # it's an instruction fetch
141
142 # determine the page address
143 mov (IPTEU),a2
144 mov a2,d0
145 and PAGE_MASK,d0
146 mov d0,(12,sp)
147
148 clr d0
149 mov d0,(IPTEL)
150
151 and ~EPSW_NMID,epsw
152 or EPSW_IE,epsw
153 mov fp,d0
154 call do_page_fault[],0 # do_page_fault(regs,code,addr
155
156 jmp ret_from_exception
157 .size itlb_aerror, . - itlb_aerror
158
159###############################################################################
160#
161# Data TLB Address Error handler entry point
162#
163###############################################################################
164 .type dtlb_aerror,@function
165ENTRY(dtlb_aerror)
166 and ~EPSW_NMID,epsw
167 add -4,sp
168 mov d1,(sp)
169
170 movhu (MMUFCR_DFC),d1 # is it the initial valid write
171 # to this page?
172 and MMUFCR_xFC_INITWR,d1
173 beq dtlb_pagefault # jump if not
174
175 mov (DPTEL),d1 # set the dirty bit
176 # (don't replace with BSET!)
177 or _PAGE_DIRTY,d1
178 mov d1,(DPTEL)
179 mov (sp),d1
180 add 4,sp
181 rti
182
183 ALIGN
184dtlb_pagefault:
185 mov (sp),d1
186 SAVE_ALL
187 add -4,sp # need to pass three params
188
189 # calculate the fault code
190 movhu (MMUFCR_DFC),d1
191
192 # determine the page address
193 mov (DPTEU),a2
194 mov a2,d0
195 and PAGE_MASK,d0
196 mov d0,(12,sp)
197
198 clr d0
199 mov d0,(DPTEL)
200
201 and ~EPSW_NMID,epsw
202 or EPSW_IE,epsw
203 mov fp,d0
204 call do_page_fault[],0 # do_page_fault(regs,code,addr
205
206 jmp ret_from_exception
207 .size dtlb_aerror, . - dtlb_aerror