aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm/proc-mohawk.S
diff options
context:
space:
mode:
authorEric Miao <eric.miao@marvell.com>2009-01-20 01:15:18 -0500
committerEric Miao <eric.miao@marvell.com>2009-03-22 22:11:34 -0400
commit49cbe78637eb0503f45fc9b556ec08918a616534 (patch)
tree96de29959c5ef512d8f1e0bea7eae2245b7cc3f9 /arch/arm/mm/proc-mohawk.S
parentf8dec04d33b94a4cfa9358fd9666c01480bb164d (diff)
[ARM] pxa: add base support for Marvell's PXA168 processor line
"""The MarvellĀ® PXA168 processor is the first in a family of application processors targeted at mass market opportunities in computing and consumer devices. It balances high computing and multimedia performance with low power consumption to support extended battery life, and includes a wealth of integrated peripherals to reduce overall BOM cost .... """ See http://www.marvell.com/featured/pxa168.jsp for more information. 1. Marvell Mohawk core is a hybrid of xscale3 and its own ARM core, there are many enhancements like instructions for flushing the whole D-cache, and so on 2. Clock reuses Russell's common clkdev, and added the basic support for UART1/2. 3. Devices are a bit different from the 'mach-pxa' way, the platform devices are now dynamically allocated only when necessary (i.e. when pxa_register_device() is called). Description for each device are stored in an array of 'struct pxa_device_desc'. Now that: a. this array of device description is marked with __initdata and can be freed up system is fully up b. which means board code has to add all needed devices early in his initializing function c. platform specific data can now be marked as __initdata since they are allocated and copied by platform_device_add_data() 4. only the basic UART1/2/3 are added, more devices will come later. Signed-off-by: Jason Chagas <chagas@marvell.com> Signed-off-by: Eric Miao <eric.miao@marvell.com>
Diffstat (limited to 'arch/arm/mm/proc-mohawk.S')
-rw-r--r--arch/arm/mm/proc-mohawk.S416
1 files changed, 416 insertions, 0 deletions
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S
new file mode 100644
index 000000000000..540f5078496b
--- /dev/null
+++ b/arch/arm/mm/proc-mohawk.S
@@ -0,0 +1,416 @@
1/*
2 * linux/arch/arm/mm/proc-mohawk.S: MMU functions for Marvell PJ1 core
3 *
4 * PJ1 (codename Mohawk) is a hybrid of the xscale3 and Marvell's own core.
5 *
6 * Heavily based on proc-arm926.S and proc-xsc3.S
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/linkage.h>
24#include <linux/init.h>
25#include <asm/assembler.h>
26#include <asm/hwcap.h>
27#include <asm/pgtable-hwdef.h>
28#include <asm/pgtable.h>
29#include <asm/page.h>
30#include <asm/ptrace.h>
31#include "proc-macros.S"
32
33/*
34 * This is the maximum size of an area which will be flushed. If the
35 * area is larger than this, then we flush the whole cache.
36 */
37#define CACHE_DLIMIT 32768
38
39/*
40 * The cache line size of the L1 D cache.
41 */
42#define CACHE_DLINESIZE 32
43
44/*
45 * cpu_mohawk_proc_init()
46 */
47ENTRY(cpu_mohawk_proc_init)
48 mov pc, lr
49
50/*
51 * cpu_mohawk_proc_fin()
52 */
53ENTRY(cpu_mohawk_proc_fin)
54 stmfd sp!, {lr}
55 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
56 msr cpsr_c, ip
57 bl mohawk_flush_kern_cache_all
58 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
59 bic r0, r0, #0x1800 @ ...iz...........
60 bic r0, r0, #0x0006 @ .............ca.
61 mcr p15, 0, r0, c1, c0, 0 @ disable caches
62 ldmfd sp!, {pc}
63
64/*
65 * cpu_mohawk_reset(loc)
66 *
67 * Perform a soft reset of the system. Put the CPU into the
68 * same state as it would be if it had been reset, and branch
69 * to what would be the reset vector.
70 *
71 * loc: location to jump to for soft reset
72 *
73 * (same as arm926)
74 */
75 .align 5
76ENTRY(cpu_mohawk_reset)
77 mov ip, #0
78 mcr p15, 0, ip, c7, c7, 0 @ invalidate I,D caches
79 mcr p15, 0, ip, c7, c10, 4 @ drain WB
80 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
81 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
82 bic ip, ip, #0x0007 @ .............cam
83 bic ip, ip, #0x1100 @ ...i...s........
84 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
85 mov pc, r0
86
87/*
88 * cpu_mohawk_do_idle()
89 *
90 * Called with IRQs disabled
91 */
92 .align 5
93ENTRY(cpu_mohawk_do_idle)
94 mov r0, #0
95 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
96 mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
97 mov pc, lr
98
99/*
100 * flush_user_cache_all()
101 *
102 * Clean and invalidate all cache entries in a particular
103 * address space.
104 */
105ENTRY(mohawk_flush_user_cache_all)
106 /* FALLTHROUGH */
107
108/*
109 * flush_kern_cache_all()
110 *
111 * Clean and invalidate the entire cache.
112 */
113ENTRY(mohawk_flush_kern_cache_all)
114 mov r2, #VM_EXEC
115 mov ip, #0
116__flush_whole_cache:
117 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
118 tst r2, #VM_EXEC
119 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
120 mcrne p15, 0, ip, c7, c10, 0 @ drain write buffer
121 mov pc, lr
122
123/*
124 * flush_user_cache_range(start, end, flags)
125 *
126 * Clean and invalidate a range of cache entries in the
127 * specified address range.
128 *
129 * - start - start address (inclusive)
130 * - end - end address (exclusive)
131 * - flags - vm_flags describing address space
132 *
133 * (same as arm926)
134 */
135ENTRY(mohawk_flush_user_cache_range)
136 mov ip, #0
137 sub r3, r1, r0 @ calculate total size
138 cmp r3, #CACHE_DLIMIT
139 bgt __flush_whole_cache
1401: tst r2, #VM_EXEC
141 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
142 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
143 add r0, r0, #CACHE_DLINESIZE
144 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
145 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
146 add r0, r0, #CACHE_DLINESIZE
147 cmp r0, r1
148 blo 1b
149 tst r2, #VM_EXEC
150 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
151 mov pc, lr
152
153/*
154 * coherent_kern_range(start, end)
155 *
156 * Ensure coherency between the Icache and the Dcache in the
157 * region described by start, end. If you have non-snooping
158 * Harvard caches, you need to implement this function.
159 *
160 * - start - virtual start address
161 * - end - virtual end address
162 */
163ENTRY(mohawk_coherent_kern_range)
164 /* FALLTHROUGH */
165
166/*
167 * coherent_user_range(start, end)
168 *
169 * Ensure coherency between the Icache and the Dcache in the
170 * region described by start, end. If you have non-snooping
171 * Harvard caches, you need to implement this function.
172 *
173 * - start - virtual start address
174 * - end - virtual end address
175 *
176 * (same as arm926)
177 */
178ENTRY(mohawk_coherent_user_range)
179 bic r0, r0, #CACHE_DLINESIZE - 1
1801: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
181 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
182 add r0, r0, #CACHE_DLINESIZE
183 cmp r0, r1
184 blo 1b
185 mcr p15, 0, r0, c7, c10, 4 @ drain WB
186 mov pc, lr
187
188/*
189 * flush_kern_dcache_page(void *page)
190 *
191 * Ensure no D cache aliasing occurs, either with itself or
192 * the I cache
193 *
194 * - addr - page aligned address
195 */
196ENTRY(mohawk_flush_kern_dcache_page)
197 add r1, r0, #PAGE_SZ
1981: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
199 add r0, r0, #CACHE_DLINESIZE
200 cmp r0, r1
201 blo 1b
202 mov r0, #0
203 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
204 mcr p15, 0, r0, c7, c10, 4 @ drain WB
205 mov pc, lr
206
207/*
208 * dma_inv_range(start, end)
209 *
210 * Invalidate (discard) the specified virtual address range.
211 * May not write back any entries. If 'start' or 'end'
212 * are not cache line aligned, those lines must be written
213 * back.
214 *
215 * - start - virtual start address
216 * - end - virtual end address
217 *
218 * (same as v4wb)
219 */
220ENTRY(mohawk_dma_inv_range)
221 tst r0, #CACHE_DLINESIZE - 1
222 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
223 tst r1, #CACHE_DLINESIZE - 1
224 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
225 bic r0, r0, #CACHE_DLINESIZE - 1
2261: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
227 add r0, r0, #CACHE_DLINESIZE
228 cmp r0, r1
229 blo 1b
230 mcr p15, 0, r0, c7, c10, 4 @ drain WB
231 mov pc, lr
232
233/*
234 * dma_clean_range(start, end)
235 *
236 * Clean the specified virtual address range.
237 *
238 * - start - virtual start address
239 * - end - virtual end address
240 *
241 * (same as v4wb)
242 */
243ENTRY(mohawk_dma_clean_range)
244 bic r0, r0, #CACHE_DLINESIZE - 1
2451: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
246 add r0, r0, #CACHE_DLINESIZE
247 cmp r0, r1
248 blo 1b
249 mcr p15, 0, r0, c7, c10, 4 @ drain WB
250 mov pc, lr
251
252/*
253 * dma_flush_range(start, end)
254 *
255 * Clean and invalidate the specified virtual address range.
256 *
257 * - start - virtual start address
258 * - end - virtual end address
259 */
260ENTRY(mohawk_dma_flush_range)
261 bic r0, r0, #CACHE_DLINESIZE - 1
2621:
263 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
264 add r0, r0, #CACHE_DLINESIZE
265 cmp r0, r1
266 blo 1b
267 mcr p15, 0, r0, c7, c10, 4 @ drain WB
268 mov pc, lr
269
270ENTRY(mohawk_cache_fns)
271 .long mohawk_flush_kern_cache_all
272 .long mohawk_flush_user_cache_all
273 .long mohawk_flush_user_cache_range
274 .long mohawk_coherent_kern_range
275 .long mohawk_coherent_user_range
276 .long mohawk_flush_kern_dcache_page
277 .long mohawk_dma_inv_range
278 .long mohawk_dma_clean_range
279 .long mohawk_dma_flush_range
280
281ENTRY(cpu_mohawk_dcache_clean_area)
2821: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
283 add r0, r0, #CACHE_DLINESIZE
284 subs r1, r1, #CACHE_DLINESIZE
285 bhi 1b
286 mcr p15, 0, r0, c7, c10, 4 @ drain WB
287 mov pc, lr
288
289/*
290 * cpu_mohawk_switch_mm(pgd)
291 *
292 * Set the translation base pointer to be as described by pgd.
293 *
294 * pgd: new page tables
295 */
296 .align 5
297ENTRY(cpu_mohawk_switch_mm)
298 mov ip, #0
299 mcr p15, 0, ip, c7, c14, 0 @ clean & invalidate all D cache
300 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
301 mcr p15, 0, ip, c7, c10, 4 @ drain WB
302 orr r0, r0, #0x18 @ cache the page table in L2
303 mcr p15, 0, r0, c2, c0, 0 @ load page table pointer
304 mcr p15, 0, ip, c8, c7, 0 @ invalidate I & D TLBs
305 mov pc, lr
306
307/*
308 * cpu_mohawk_set_pte_ext(ptep, pte, ext)
309 *
310 * Set a PTE and flush it out
311 */
312 .align 5
313ENTRY(cpu_mohawk_set_pte_ext)
314 armv3_set_pte_ext
315 mov r0, r0
316 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
317 mcr p15, 0, r0, c7, c10, 4 @ drain WB
318 mov pc, lr
319
320 __INIT
321
322 .type __mohawk_setup, #function
323__mohawk_setup:
324 mov r0, #0
325 mcr p15, 0, r0, c7, c7 @ invalidate I,D caches
326 mcr p15, 0, r0, c7, c10, 4 @ drain write buffer
327 mcr p15, 0, r0, c8, c7 @ invalidate I,D TLBs
328 orr r4, r4, #0x18 @ cache the page table in L2
329 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
330
331 mov r0, #0 @ don't allow CP access
332 mcr p15, 0, r0, c15, c1, 0 @ write CP access register
333
334 adr r5, mohawk_crval
335 ldmia r5, {r5, r6}
336 mrc p15, 0, r0, c1, c0 @ get control register
337 bic r0, r0, r5
338 orr r0, r0, r6
339 mov pc, lr
340
341 .size __mohawk_setup, . - __mohawk_setup
342
343 /*
344 * R
345 * .RVI ZFRS BLDP WCAM
346 * .011 1001 ..00 0101
347 *
348 */
349 .type mohawk_crval, #object
350mohawk_crval:
351 crval clear=0x00007f3f, mmuset=0x00003905, ucset=0x00001134
352
353 __INITDATA
354
355/*
356 * Purpose : Function pointers used to access above functions - all calls
357 * come through these
358 */
359 .type mohawk_processor_functions, #object
360mohawk_processor_functions:
361 .word v5t_early_abort
362 .word pabort_noifar
363 .word cpu_mohawk_proc_init
364 .word cpu_mohawk_proc_fin
365 .word cpu_mohawk_reset
366 .word cpu_mohawk_do_idle
367 .word cpu_mohawk_dcache_clean_area
368 .word cpu_mohawk_switch_mm
369 .word cpu_mohawk_set_pte_ext
370 .size mohawk_processor_functions, . - mohawk_processor_functions
371
372 .section ".rodata"
373
374 .type cpu_arch_name, #object
375cpu_arch_name:
376 .asciz "armv5te"
377 .size cpu_arch_name, . - cpu_arch_name
378
379 .type cpu_elf_name, #object
380cpu_elf_name:
381 .asciz "v5"
382 .size cpu_elf_name, . - cpu_elf_name
383
384 .type cpu_mohawk_name, #object
385cpu_mohawk_name:
386 .asciz "Marvell 88SV331x"
387 .size cpu_mohawk_name, . - cpu_mohawk_name
388
389 .align
390
391 .section ".proc.info.init", #alloc, #execinstr
392
393 .type __88sv331x_proc_info,#object
394__88sv331x_proc_info:
395 .long 0x56158000 @ Marvell 88SV331x (MOHAWK)
396 .long 0xfffff000
397 .long PMD_TYPE_SECT | \
398 PMD_SECT_BUFFERABLE | \
399 PMD_SECT_CACHEABLE | \
400 PMD_BIT4 | \
401 PMD_SECT_AP_WRITE | \
402 PMD_SECT_AP_READ
403 .long PMD_TYPE_SECT | \
404 PMD_BIT4 | \
405 PMD_SECT_AP_WRITE | \
406 PMD_SECT_AP_READ
407 b __mohawk_setup
408 .long cpu_arch_name
409 .long cpu_elf_name
410 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
411 .long cpu_mohawk_name
412 .long mohawk_processor_functions
413 .long v4wbi_tlb_fns
414 .long v4wb_user_fns
415 .long mohawk_cache_fns
416 .size __88sv331x_proc_info, . - __88sv331x_proc_info