aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig154
-rw-r--r--arch/arm/mm/Makefile10
-rw-r--r--arch/arm/mm/abort-lv4t.S7
-rw-r--r--arch/arm/mm/abort-nommu.S19
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/context.c45
-rw-r--r--arch/arm/mm/copypage-v4mc.c4
-rw-r--r--arch/arm/mm/copypage-v6.c4
-rw-r--r--arch/arm/mm/copypage-xscale.c4
-rw-r--r--arch/arm/mm/fault.c15
-rw-r--r--arch/arm/mm/fault.h5
-rw-r--r--arch/arm/mm/flush.c6
-rw-r--r--arch/arm/mm/init.c224
-rw-r--r--arch/arm/mm/mm-armv.c663
-rw-r--r--arch/arm/mm/mm.h22
-rw-r--r--arch/arm/mm/mmap.c22
-rw-r--r--arch/arm/mm/mmu.c768
-rw-r--r--arch/arm/mm/nommu.c43
-rw-r--r--arch/arm/mm/pgd.c101
-rw-r--r--arch/arm/mm/proc-arm740.S174
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S249
-rw-r--r--arch/arm/mm/proc-arm940.S369
-rw-r--r--arch/arm/mm/proc-arm946.S424
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S134
-rw-r--r--arch/arm/mm/proc-xscale.S58
26 files changed, 2604 insertions, 932 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index b4f220dd5eb8..c0bfb8212b77 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -15,6 +15,7 @@ config CPU_ARM610
15 select CPU_32v3 15 select CPU_32v3
16 select CPU_CACHE_V3 16 select CPU_CACHE_V3
17 select CPU_CACHE_VIVT 17 select CPU_CACHE_VIVT
18 select CPU_CP15_MMU
18 select CPU_COPY_V3 if MMU 19 select CPU_COPY_V3 if MMU
19 select CPU_TLB_V3 if MMU 20 select CPU_TLB_V3 if MMU
20 help 21 help
@@ -24,6 +25,20 @@ config CPU_ARM610
24 Say Y if you want support for the ARM610 processor. 25 Say Y if you want support for the ARM610 processor.
25 Otherwise, say N. 26 Otherwise, say N.
26 27
28# ARM7TDMI
29config CPU_ARM7TDMI
30 bool "Support ARM7TDMI processor"
31 depends on !MMU
32 select CPU_32v4T
33 select CPU_ABRT_LV4T
34 select CPU_CACHE_V4
35 help
36 A 32-bit RISC microprocessor based on the ARM7 processor core
37 which has no memory control unit and cache.
38
39 Say Y if you want support for the ARM7TDMI processor.
40 Otherwise, say N.
41
27# ARM710 42# ARM710
28config CPU_ARM710 43config CPU_ARM710
29 bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC 44 bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
@@ -31,6 +46,7 @@ config CPU_ARM710
31 select CPU_32v3 46 select CPU_32v3
32 select CPU_CACHE_V3 47 select CPU_CACHE_V3
33 select CPU_CACHE_VIVT 48 select CPU_CACHE_VIVT
49 select CPU_CP15_MMU
34 select CPU_COPY_V3 if MMU 50 select CPU_COPY_V3 if MMU
35 select CPU_TLB_V3 if MMU 51 select CPU_TLB_V3 if MMU
36 help 52 help
@@ -50,6 +66,7 @@ config CPU_ARM720T
50 select CPU_ABRT_LV4T 66 select CPU_ABRT_LV4T
51 select CPU_CACHE_V4 67 select CPU_CACHE_V4
52 select CPU_CACHE_VIVT 68 select CPU_CACHE_VIVT
69 select CPU_CP15_MMU
53 select CPU_COPY_V4WT if MMU 70 select CPU_COPY_V4WT if MMU
54 select CPU_TLB_V4WT if MMU 71 select CPU_TLB_V4WT if MMU
55 help 72 help
@@ -59,6 +76,36 @@ config CPU_ARM720T
59 Say Y if you want support for the ARM720T processor. 76 Say Y if you want support for the ARM720T processor.
60 Otherwise, say N. 77 Otherwise, say N.
61 78
79# ARM740T
80config CPU_ARM740T
81 bool "Support ARM740T processor" if ARCH_INTEGRATOR
82 depends on !MMU
83 select CPU_32v4T
84 select CPU_ABRT_LV4T
85 select CPU_CACHE_V3 # although the core is v4t
86 select CPU_CP15_MPU
87 help
88 A 32-bit RISC processor with 8KB cache or 4KB variants,
89 write buffer and MPU(Protection Unit) built around
90 an ARM7TDMI core.
91
92 Say Y if you want support for the ARM740T processor.
93 Otherwise, say N.
94
95# ARM9TDMI
96config CPU_ARM9TDMI
97 bool "Support ARM9TDMI processor"
98 depends on !MMU
99 select CPU_32v4T
100 select CPU_ABRT_NOMMU
101 select CPU_CACHE_V4
102 help
103 A 32-bit RISC microprocessor based on the ARM9 processor core
104 which has no memory control unit and cache.
105
106 Say Y if you want support for the ARM9TDMI processor.
107 Otherwise, say N.
108
62# ARM920T 109# ARM920T
63config CPU_ARM920T 110config CPU_ARM920T
64 bool "Support ARM920T processor" 111 bool "Support ARM920T processor"
@@ -68,6 +115,7 @@ config CPU_ARM920T
68 select CPU_ABRT_EV4T 115 select CPU_ABRT_EV4T
69 select CPU_CACHE_V4WT 116 select CPU_CACHE_V4WT
70 select CPU_CACHE_VIVT 117 select CPU_CACHE_VIVT
118 select CPU_CP15_MMU
71 select CPU_COPY_V4WB if MMU 119 select CPU_COPY_V4WB if MMU
72 select CPU_TLB_V4WBI if MMU 120 select CPU_TLB_V4WBI if MMU
73 help 121 help
@@ -89,6 +137,7 @@ config CPU_ARM922T
89 select CPU_ABRT_EV4T 137 select CPU_ABRT_EV4T
90 select CPU_CACHE_V4WT 138 select CPU_CACHE_V4WT
91 select CPU_CACHE_VIVT 139 select CPU_CACHE_VIVT
140 select CPU_CP15_MMU
92 select CPU_COPY_V4WB if MMU 141 select CPU_COPY_V4WB if MMU
93 select CPU_TLB_V4WBI if MMU 142 select CPU_TLB_V4WBI if MMU
94 help 143 help
@@ -108,6 +157,7 @@ config CPU_ARM925T
108 select CPU_ABRT_EV4T 157 select CPU_ABRT_EV4T
109 select CPU_CACHE_V4WT 158 select CPU_CACHE_V4WT
110 select CPU_CACHE_VIVT 159 select CPU_CACHE_VIVT
160 select CPU_CP15_MMU
111 select CPU_COPY_V4WB if MMU 161 select CPU_COPY_V4WB if MMU
112 select CPU_TLB_V4WBI if MMU 162 select CPU_TLB_V4WBI if MMU
113 help 163 help
@@ -126,6 +176,7 @@ config CPU_ARM926T
126 select CPU_32v5 176 select CPU_32v5
127 select CPU_ABRT_EV5TJ 177 select CPU_ABRT_EV5TJ
128 select CPU_CACHE_VIVT 178 select CPU_CACHE_VIVT
179 select CPU_CP15_MMU
129 select CPU_COPY_V4WB if MMU 180 select CPU_COPY_V4WB if MMU
130 select CPU_TLB_V4WBI if MMU 181 select CPU_TLB_V4WBI if MMU
131 help 182 help
@@ -136,6 +187,39 @@ config CPU_ARM926T
136 Say Y if you want support for the ARM926T processor. 187 Say Y if you want support for the ARM926T processor.
137 Otherwise, say N. 188 Otherwise, say N.
138 189
190# ARM940T
191config CPU_ARM940T
192 bool "Support ARM940T processor" if ARCH_INTEGRATOR
193 depends on !MMU
194 select CPU_32v4T
195 select CPU_ABRT_NOMMU
196 select CPU_CACHE_VIVT
197 select CPU_CP15_MPU
198 help
199 ARM940T is a member of the ARM9TDMI family of general-
200 purpose microprocessors with MPU and seperate 4KB
201 instruction and 4KB data cases, each with a 4-word line
202 length.
203
204 Say Y if you want support for the ARM940T processor.
205 Otherwise, say N.
206
207# ARM946E-S
208config CPU_ARM946E
209 bool "Support ARM946E-S processor" if ARCH_INTEGRATOR
210 depends on !MMU
211 select CPU_32v5
212 select CPU_ABRT_NOMMU
213 select CPU_CACHE_VIVT
214 select CPU_CP15_MPU
215 help
216 ARM946E-S is a member of the ARM9E-S family of high-
217 performance, 32-bit system-on-chip processor solutions.
218 The TCM and ARMv5TE 32-bit instruction set is supported.
219
220 Say Y if you want support for the ARM946E-S processor.
221 Otherwise, say N.
222
139# ARM1020 - needs validating 223# ARM1020 - needs validating
140config CPU_ARM1020 224config CPU_ARM1020
141 bool "Support ARM1020T (rev 0) processor" 225 bool "Support ARM1020T (rev 0) processor"
@@ -144,6 +228,7 @@ config CPU_ARM1020
144 select CPU_ABRT_EV4T 228 select CPU_ABRT_EV4T
145 select CPU_CACHE_V4WT 229 select CPU_CACHE_V4WT
146 select CPU_CACHE_VIVT 230 select CPU_CACHE_VIVT
231 select CPU_CP15_MMU
147 select CPU_COPY_V4WB if MMU 232 select CPU_COPY_V4WB if MMU
148 select CPU_TLB_V4WBI if MMU 233 select CPU_TLB_V4WBI if MMU
149 help 234 help
@@ -161,6 +246,7 @@ config CPU_ARM1020E
161 select CPU_ABRT_EV4T 246 select CPU_ABRT_EV4T
162 select CPU_CACHE_V4WT 247 select CPU_CACHE_V4WT
163 select CPU_CACHE_VIVT 248 select CPU_CACHE_VIVT
249 select CPU_CP15_MMU
164 select CPU_COPY_V4WB if MMU 250 select CPU_COPY_V4WB if MMU
165 select CPU_TLB_V4WBI if MMU 251 select CPU_TLB_V4WBI if MMU
166 depends on n 252 depends on n
@@ -172,6 +258,7 @@ config CPU_ARM1022
172 select CPU_32v5 258 select CPU_32v5
173 select CPU_ABRT_EV4T 259 select CPU_ABRT_EV4T
174 select CPU_CACHE_VIVT 260 select CPU_CACHE_VIVT
261 select CPU_CP15_MMU
175 select CPU_COPY_V4WB if MMU # can probably do better 262 select CPU_COPY_V4WB if MMU # can probably do better
176 select CPU_TLB_V4WBI if MMU 263 select CPU_TLB_V4WBI if MMU
177 help 264 help
@@ -189,6 +276,7 @@ config CPU_ARM1026
189 select CPU_32v5 276 select CPU_32v5
190 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 277 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
191 select CPU_CACHE_VIVT 278 select CPU_CACHE_VIVT
279 select CPU_CP15_MMU
192 select CPU_COPY_V4WB if MMU # can probably do better 280 select CPU_COPY_V4WB if MMU # can probably do better
193 select CPU_TLB_V4WBI if MMU 281 select CPU_TLB_V4WBI if MMU
194 help 282 help
@@ -207,6 +295,7 @@ config CPU_SA110
207 select CPU_ABRT_EV4 295 select CPU_ABRT_EV4
208 select CPU_CACHE_V4WB 296 select CPU_CACHE_V4WB
209 select CPU_CACHE_VIVT 297 select CPU_CACHE_VIVT
298 select CPU_CP15_MMU
210 select CPU_COPY_V4WB if MMU 299 select CPU_COPY_V4WB if MMU
211 select CPU_TLB_V4WB if MMU 300 select CPU_TLB_V4WB if MMU
212 help 301 help
@@ -227,16 +316,18 @@ config CPU_SA1100
227 select CPU_ABRT_EV4 316 select CPU_ABRT_EV4
228 select CPU_CACHE_V4WB 317 select CPU_CACHE_V4WB
229 select CPU_CACHE_VIVT 318 select CPU_CACHE_VIVT
319 select CPU_CP15_MMU
230 select CPU_TLB_V4WB if MMU 320 select CPU_TLB_V4WB if MMU
231 321
232# XScale 322# XScale
233config CPU_XSCALE 323config CPU_XSCALE
234 bool 324 bool
235 depends on ARCH_IOP3XX || ARCH_PXA || ARCH_IXP4XX || ARCH_IXP2000 325 depends on ARCH_IOP32X || ARCH_IOP33X || ARCH_PXA || ARCH_IXP4XX || ARCH_IXP2000
236 default y 326 default y
237 select CPU_32v5 327 select CPU_32v5
238 select CPU_ABRT_EV5T 328 select CPU_ABRT_EV5T
239 select CPU_CACHE_VIVT 329 select CPU_CACHE_VIVT
330 select CPU_CP15_MMU
240 select CPU_TLB_V4WBI if MMU 331 select CPU_TLB_V4WBI if MMU
241 332
242# XScale Core Version 3 333# XScale Core Version 3
@@ -247,6 +338,7 @@ config CPU_XSC3
247 select CPU_32v5 338 select CPU_32v5
248 select CPU_ABRT_EV5T 339 select CPU_ABRT_EV5T
249 select CPU_CACHE_VIVT 340 select CPU_CACHE_VIVT
341 select CPU_CP15_MMU
250 select CPU_TLB_V4WBI if MMU 342 select CPU_TLB_V4WBI if MMU
251 select IO_36 343 select IO_36
252 344
@@ -258,6 +350,7 @@ config CPU_V6
258 select CPU_ABRT_EV6 350 select CPU_ABRT_EV6
259 select CPU_CACHE_V6 351 select CPU_CACHE_V6
260 select CPU_CACHE_VIPT 352 select CPU_CACHE_VIPT
353 select CPU_CP15_MMU
261 select CPU_COPY_V6 if MMU 354 select CPU_COPY_V6 if MMU
262 select CPU_TLB_V6 if MMU 355 select CPU_TLB_V6 if MMU
263 356
@@ -299,6 +392,9 @@ config CPU_32v6
299 bool 392 bool
300 393
301# The abort model 394# The abort model
395config CPU_ABRT_NOMMU
396 bool
397
302config CPU_ABRT_EV4 398config CPU_ABRT_EV4
303 bool 399 bool
304 400
@@ -380,6 +476,23 @@ config CPU_TLB_V6
380 476
381endif 477endif
382 478
479config CPU_CP15
480 bool
481 help
482 Processor has the CP15 register.
483
484config CPU_CP15_MMU
485 bool
486 select CPU_CP15
487 help
488 Processor has the CP15 register, which has MMU related registers.
489
490config CPU_CP15_MPU
491 bool
492 select CPU_CP15
493 help
494 Processor has the CP15 register, which has MPU related registers.
495
383# 496#
384# CPU supports 36-bit I/O 497# CPU supports 36-bit I/O
385# 498#
@@ -390,7 +503,7 @@ comment "Processor Features"
390 503
391config ARM_THUMB 504config ARM_THUMB
392 bool "Support Thumb user binaries" 505 bool "Support Thumb user binaries"
393 depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 506 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
394 default y 507 default y
395 help 508 help
396 Say Y if you want to include kernel support for running user space 509 Say Y if you want to include kernel support for running user space
@@ -411,23 +524,48 @@ config CPU_BIG_ENDIAN
411 port must properly enable any big-endian related features 524 port must properly enable any big-endian related features
412 of your chipset/board/processor. 525 of your chipset/board/processor.
413 526
527config CPU_HIGH_VECTOR
528 depends !MMU && CPU_CP15 && !CPU_ARM740T
529 bool "Select the High exception vector"
530 default n
531 help
532 Say Y here to select high exception vector(0xFFFF0000~).
533 The exception vector can be vary depending on the platform
534 design in nommu mode. If your platform needs to select
535 high exception vector, say Y.
536 Otherwise or if you are unsure, say N, and the low exception
537 vector (0x00000000~) will be used.
538
414config CPU_ICACHE_DISABLE 539config CPU_ICACHE_DISABLE
415 bool "Disable I-Cache" 540 bool "Disable I-Cache (I-bit)"
416 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 541 depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
417 help 542 help
418 Say Y here to disable the processor instruction cache. Unless 543 Say Y here to disable the processor instruction cache. Unless
419 you have a reason not to or are unsure, say N. 544 you have a reason not to or are unsure, say N.
420 545
421config CPU_DCACHE_DISABLE 546config CPU_DCACHE_DISABLE
422 bool "Disable D-Cache" 547 bool "Disable D-Cache (C-bit)"
423 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 548 depends on CPU_CP15
424 help 549 help
425 Say Y here to disable the processor data cache. Unless 550 Say Y here to disable the processor data cache. Unless
426 you have a reason not to or are unsure, say N. 551 you have a reason not to or are unsure, say N.
427 552
553config CPU_DCACHE_SIZE
554 hex
555 depends on CPU_ARM740T || CPU_ARM946E
556 default 0x00001000 if CPU_ARM740T
557 default 0x00002000 # default size for ARM946E-S
558 help
559 Some cores are synthesizable to have various sized cache. For
560 ARM946E-S case, it can vary from 0KB to 1MB.
561 To support such cache operations, it is efficient to know the size
562 before compile time.
563 If your SoC is configured to have a different size, define the value
564 here with proper conditions.
565
428config CPU_DCACHE_WRITETHROUGH 566config CPU_DCACHE_WRITETHROUGH
429 bool "Force write through D-cache" 567 bool "Force write through D-cache"
430 depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE 568 depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
431 default y if CPU_ARM925T 569 default y if CPU_ARM925T
432 help 570 help
433 Say Y here to use the data cache in writethrough mode. Unless you 571 Say Y here to use the data cache in writethrough mode. Unless you
@@ -435,7 +573,7 @@ config CPU_DCACHE_WRITETHROUGH
435 573
436config CPU_CACHE_ROUND_ROBIN 574config CPU_CACHE_ROUND_ROBIN
437 bool "Round robin I and D cache replacement algorithm" 575 bool "Round robin I and D cache replacement algorithm"
438 depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE) 576 depends on (CPU_ARM926T || CPU_ARM946E || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
439 help 577 help
440 Say Y here to use the predictable round-robin cache replacement 578 Say Y here to use the predictable round-robin cache replacement
441 policy. Unless you specifically require this or are unsure, say N. 579 policy. Unless you specifically require this or are unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 21a2770226ee..d2f5672ecf62 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := consistent.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
9 mm-armv.o 9 pgd.o mmu.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o 18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19 19
20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
20obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o 21obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
21obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o 22obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
22obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o 23obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
@@ -33,7 +34,7 @@ obj-$(CONFIG_CPU_CACHE_V6) += cache-v6.o
33obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o 34obj-$(CONFIG_CPU_COPY_V3) += copypage-v3.o
34obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o 35obj-$(CONFIG_CPU_COPY_V4WT) += copypage-v4wt.o
35obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o 36obj-$(CONFIG_CPU_COPY_V4WB) += copypage-v4wb.o
36obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o mmu.o 37obj-$(CONFIG_CPU_COPY_V6) += copypage-v6.o context.o
37obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o 38obj-$(CONFIG_CPU_SA1100) += copypage-v4mc.o
38obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o 39obj-$(CONFIG_CPU_XSCALE) += copypage-xscale.o
39obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o 40obj-$(CONFIG_CPU_XSC3) += copypage-xsc3.o
@@ -46,11 +47,16 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
46 47
47obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o 48obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
48obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o 49obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
50obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
49obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o 51obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
52obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o
53obj-$(CONFIG_CPU_ARM9TDMI) += proc-arm9tdmi.o
50obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o 54obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o
51obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o 55obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o
52obj-$(CONFIG_CPU_ARM925T) += proc-arm925.o 56obj-$(CONFIG_CPU_ARM925T) += proc-arm925.o
53obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o 57obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o
58obj-$(CONFIG_CPU_ARM940T) += proc-arm940.o
59obj-$(CONFIG_CPU_ARM946E) += proc-arm946.o
54obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o 60obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o
55obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o 61obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o
56obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o 62obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index db743e510214..9fb7b0e25ea1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -19,11 +19,16 @@
19 */ 19 */
20ENTRY(v4t_late_abort) 20ENTRY(v4t_late_abort)
21 tst r3, #PSR_T_BIT @ check for thumb mode 21 tst r3, #PSR_T_BIT @ check for thumb mode
22#ifdef CONFIG_CPU_CP15_MMU
22 mrc p15, 0, r1, c5, c0, 0 @ get FSR 23 mrc p15, 0, r1, c5, c0, 0 @ get FSR
23 mrc p15, 0, r0, c6, c0, 0 @ get FAR 24 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
26#else
27 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
28 mov r1, #0
29#endif
24 bne .data_thumb_abort 30 bne .data_thumb_abort
25 ldr r8, [r2] @ read arm instruction 31 ldr r8, [r2] @ read arm instruction
26 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
27 tst r8, #1 << 20 @ L = 1 -> write? 32 tst r8, #1 << 20 @ L = 1 -> write?
28 orreq r1, r1, #1 << 11 @ yes. 33 orreq r1, r1, #1 << 11 @ yes.
29 and r7, r8, #15 << 24 34 and r7, r8, #15 << 24
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
new file mode 100644
index 000000000000..a7cc7f9ee45d
--- /dev/null
+++ b/arch/arm/mm/abort-nommu.S
@@ -0,0 +1,19 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3/*
4 * Function: nommu_early_abort
5 *
6 * Params : r2 = address of aborted instruction
7 * : r3 = saved SPSR
8 *
9 * Returns : r0 = 0 (abort address)
10 * : r1 = 0 (FSR)
11 *
12 * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
13 * Just fill zero into the registers.
14 */
15 .align 5
16ENTRY(nommu_early_abort)
17 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
18 mov r1, #0
19 mov pc, lr
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e0d21bbbe7d7..aa109f074dd9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -735,7 +735,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
735 /* 735 /*
736 * We got a fault - fix it up, or die. 736 * We got a fault - fix it up, or die.
737 */ 737 */
738 do_bad_area(current, current->mm, addr, fsr, regs); 738 do_bad_area(addr, fsr, regs);
739 return 0; 739 return 0;
740 740
741 swp: 741 swp:
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index b8ad5d58ebe2..b2908063ed6a 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -29,9 +29,13 @@ ENTRY(v4_flush_user_cache_all)
29 * Clean and invalidate the entire cache. 29 * Clean and invalidate the entire cache.
30 */ 30 */
31ENTRY(v4_flush_kern_cache_all) 31ENTRY(v4_flush_kern_cache_all)
32#ifdef CPU_CP15
32 mov r0, #0 33 mov r0, #0
33 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
34 mov pc, lr 35 mov pc, lr
36#else
37 /* FALLTHROUGH */
38#endif
35 39
36/* 40/*
37 * flush_user_cache_range(start, end, flags) 41 * flush_user_cache_range(start, end, flags)
@@ -44,9 +48,13 @@ ENTRY(v4_flush_kern_cache_all)
44 * - flags - vma_area_struct flags describing address space 48 * - flags - vma_area_struct flags describing address space
45 */ 49 */
46ENTRY(v4_flush_user_cache_range) 50ENTRY(v4_flush_user_cache_range)
51#ifdef CPU_CP15
47 mov ip, #0 52 mov ip, #0
48 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 53 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
49 mov pc, lr 54 mov pc, lr
55#else
56 /* FALLTHROUGH */
57#endif
50 58
51/* 59/*
52 * coherent_kern_range(start, end) 60 * coherent_kern_range(start, end)
@@ -108,8 +116,10 @@ ENTRY(v4_dma_inv_range)
108 * - end - virtual end address 116 * - end - virtual end address
109 */ 117 */
110ENTRY(v4_dma_flush_range) 118ENTRY(v4_dma_flush_range)
119#ifdef CPU_CP15
111 mov r0, #0 120 mov r0, #0
112 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 121 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
122#endif
113 /* FALLTHROUGH */ 123 /* FALLTHROUGH */
114 124
115/* 125/*
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
new file mode 100644
index 000000000000..79e800202424
--- /dev/null
+++ b/arch/arm/mm/context.c
@@ -0,0 +1,45 @@
1/*
2 * linux/arch/arm/mm/context.c
3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/init.h>
11#include <linux/sched.h>
12#include <linux/mm.h>
13
14#include <asm/mmu_context.h>
15#include <asm/tlbflush.h>
16
17unsigned int cpu_last_asid = { 1 << ASID_BITS };
18
19/*
20 * We fork()ed a process, and we need a new context for the child
21 * to run in. We reserve version 0 for initial tasks so we will
22 * always allocate an ASID.
23 */
24void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
25{
26 mm->context.id = 0;
27}
28
29void __new_context(struct mm_struct *mm)
30{
31 unsigned int asid;
32
33 asid = ++cpu_last_asid;
34 if (asid == 0)
35 asid = cpu_last_asid = 1 << ASID_BITS;
36
37 /*
38 * If we've used up all our ASIDs, we need
39 * to start a new version and flush the TLB.
40 */
41 if ((asid & ~ASID_MASK) == 0)
42 flush_tlb_all();
43
44 mm->context.id = asid;
45}
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index fc69dccdace1..df1645e14b4c 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -20,6 +20,8 @@
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23#include "mm.h"
24
23/* 25/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 26 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently. 27 * specific hacks for copying pages efficiently.
@@ -27,8 +29,6 @@
27#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 29#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
28 L_PTE_CACHEABLE) 30 L_PTE_CACHEABLE)
29 31
30#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
31
32static DEFINE_SPINLOCK(minicache_lock); 32static DEFINE_SPINLOCK(minicache_lock);
33 33
34/* 34/*
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 269ce6913ee9..3d0d3a963d20 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -17,6 +17,8 @@
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18#include <asm/cacheflush.h> 18#include <asm/cacheflush.h>
19 19
20#include "mm.h"
21
20#if SHMLBA > 16384 22#if SHMLBA > 16384
21#error FIX ME 23#error FIX ME
22#endif 24#endif
@@ -24,8 +26,6 @@
24#define from_address (0xffff8000) 26#define from_address (0xffff8000)
25#define to_address (0xffffc000) 27#define to_address (0xffffc000)
26 28
27#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
28
29static DEFINE_SPINLOCK(v6_lock); 29static DEFINE_SPINLOCK(v6_lock);
30 30
31/* 31/*
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 42a6ee255ce0..84ebe0aa379e 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -20,6 +20,8 @@
20#include <asm/pgtable.h> 20#include <asm/pgtable.h>
21#include <asm/tlbflush.h> 21#include <asm/tlbflush.h>
22 22
23#include "mm.h"
24
23/* 25/*
24 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture 26 * 0xffff8000 to 0xffffffff is reserved for any ARM architecture
25 * specific hacks for copying pages efficiently. 27 * specific hacks for copying pages efficiently.
@@ -29,8 +31,6 @@
29#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \ 31#define minicache_pgprot __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | \
30 L_PTE_CACHEABLE) 32 L_PTE_CACHEABLE)
31 33
32#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
33
34static DEFINE_SPINLOCK(minicache_lock); 34static DEFINE_SPINLOCK(minicache_lock);
35 35
36/* 36/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c5e0622c7765..f0943d160ffe 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -131,10 +131,11 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
131 force_sig_info(sig, &si, tsk); 131 force_sig_info(sig, &si, tsk);
132} 132}
133 133
134void 134void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
135do_bad_area(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr,
136 unsigned int fsr, struct pt_regs *regs)
137{ 135{
136 struct task_struct *tsk = current;
137 struct mm_struct *mm = tsk->active_mm;
138
138 /* 139 /*
139 * If we are in kernel mode at this point, we 140 * If we are in kernel mode at this point, we
140 * have no context to handle this fault with. 141 * have no context to handle this fault with.
@@ -319,7 +320,6 @@ static int
319do_translation_fault(unsigned long addr, unsigned int fsr, 320do_translation_fault(unsigned long addr, unsigned int fsr,
320 struct pt_regs *regs) 321 struct pt_regs *regs)
321{ 322{
322 struct task_struct *tsk;
323 unsigned int index; 323 unsigned int index;
324 pgd_t *pgd, *pgd_k; 324 pgd_t *pgd, *pgd_k;
325 pmd_t *pmd, *pmd_k; 325 pmd_t *pmd, *pmd_k;
@@ -351,9 +351,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
351 return 0; 351 return 0;
352 352
353bad_area: 353bad_area:
354 tsk = current; 354 do_bad_area(addr, fsr, regs);
355
356 do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
357 return 0; 355 return 0;
358} 356}
359 357
@@ -364,8 +362,7 @@ bad_area:
364static int 362static int
365do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 363do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
366{ 364{
367 struct task_struct *tsk = current; 365 do_bad_area(addr, fsr, regs);
368 do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
369 return 0; 366 return 0;
370} 367}
371 368
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 73b59e83227f..49e9e3804de4 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,6 +1,3 @@
1void do_bad_area(struct task_struct *tsk, struct mm_struct *mm, 1void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2 unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3
4void show_pte(struct mm_struct *mm, unsigned long addr);
5 2
6unsigned long search_exception_table(unsigned long addr); 3unsigned long search_exception_table(unsigned long addr);
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index d438ce41cdd5..454205b789d5 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -15,12 +15,12 @@
15#include <asm/system.h> 15#include <asm/system.h>
16#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
17 17
18#include "mm.h"
19
18#ifdef CONFIG_CPU_CACHE_VIPT 20#ifdef CONFIG_CPU_CACHE_VIPT
19 21
20#define ALIAS_FLUSH_START 0xffff4000 22#define ALIAS_FLUSH_START 0xffff4000
21 23
22#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
23
24static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) 24static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
25{ 25{
26 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT); 26 unsigned long to = ALIAS_FLUSH_START + (CACHE_COLOUR(vaddr) << PAGE_SHIFT);
@@ -107,7 +107,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
107 107
108 /* VIPT non-aliasing cache */ 108 /* VIPT non-aliasing cache */
109 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && 109 if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) &&
110 vma->vm_flags | VM_EXEC) { 110 vma->vm_flags & VM_EXEC) {
111 unsigned long addr = (unsigned long)kaddr; 111 unsigned long addr = (unsigned long)kaddr;
112 /* only flushing the kernel mapping on non-aliasing VIPT */ 112 /* only flushing the kernel mapping on non-aliasing VIPT */
113 __cpuc_coherent_kern_range(addr, addr + len); 113 __cpuc_coherent_kern_range(addr, addr + len);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index fe3f7f625008..22217fe2650b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -25,10 +25,9 @@
25#include <asm/mach/arch.h> 25#include <asm/mach/arch.h>
26#include <asm/mach/map.h> 26#include <asm/mach/map.h>
27 27
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 28#include "mm.h"
29 29
30extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; 30extern void _text, _etext, __data_start, _end, __init_begin, __init_end;
31extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end;
32extern unsigned long phys_initrd_start; 31extern unsigned long phys_initrd_start;
33extern unsigned long phys_initrd_size; 32extern unsigned long phys_initrd_size;
34 33
@@ -38,12 +37,6 @@ extern unsigned long phys_initrd_size;
38 */ 37 */
39static struct meminfo meminfo __initdata = { 0, }; 38static struct meminfo meminfo __initdata = { 0, };
40 39
41/*
42 * empty_zero_page is a special page that is used for
43 * zero-initialized data and COW.
44 */
45struct page *empty_zero_page;
46
47void show_mem(void) 40void show_mem(void)
48{ 41{
49 int free = 0, total = 0, reserved = 0; 42 int free = 0, total = 0, reserved = 0;
@@ -83,16 +76,6 @@ void show_mem(void)
83 printk("%d pages swap cached\n", cached); 76 printk("%d pages swap cached\n", cached);
84} 77}
85 78
86static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
87{
88 return pmd_offset(pgd, virt);
89}
90
91static inline pmd_t *pmd_off_k(unsigned long virt)
92{
93 return pmd_off(pgd_offset_k(virt), virt);
94}
95
96#define for_each_nodebank(iter,mi,no) \ 79#define for_each_nodebank(iter,mi,no) \
97 for (iter = 0; iter < mi->nr_banks; iter++) \ 80 for (iter = 0; iter < mi->nr_banks; iter++) \
98 if (mi->bank[iter].node == no) 81 if (mi->bank[iter].node == no)
@@ -176,62 +159,20 @@ static int __init check_initrd(struct meminfo *mi)
176 return initrd_node; 159 return initrd_node;
177} 160}
178 161
179/* 162static inline void map_memory_bank(struct membank *bank)
180 * Reserve the various regions of node 0
181 */
182static __init void reserve_node_zero(pg_data_t *pgdat)
183{ 163{
184 unsigned long res_size = 0; 164#ifdef CONFIG_MMU
185 165 struct map_desc map;
186 /*
187 * Register the kernel text and data with bootmem.
188 * Note that this can only be in node 0.
189 */
190#ifdef CONFIG_XIP_KERNEL
191 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
192#else
193 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
194#endif
195
196 /*
197 * Reserve the page tables. These are already in use,
198 * and can only be in node 0.
199 */
200 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
201 PTRS_PER_PGD * sizeof(pgd_t));
202
203 /*
204 * Hmm... This should go elsewhere, but we really really need to
205 * stop things allocating the low memory; ideally we need a better
206 * implementation of GFP_DMA which does not assume that DMA-able
207 * memory starts at zero.
208 */
209 if (machine_is_integrator() || machine_is_cintegrator())
210 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
211 166
212 /* 167 map.pfn = __phys_to_pfn(bank->start);
213 * These should likewise go elsewhere. They pre-reserve the 168 map.virtual = __phys_to_virt(bank->start);
214 * screen memory region at the start of main system memory. 169 map.length = bank->size;
215 */ 170 map.type = MT_MEMORY;
216 if (machine_is_edb7211())
217 res_size = 0x00020000;
218 if (machine_is_p720t())
219 res_size = 0x00014000;
220 171
221#ifdef CONFIG_SA1111 172 create_mapping(&map);
222 /*
223 * Because of the SA1111 DMA bug, we want to preserve our
224 * precious DMA-able memory...
225 */
226 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
227#endif 173#endif
228 if (res_size)
229 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
230} 174}
231 175
232void __init build_mem_type_table(void);
233void __init create_mapping(struct map_desc *md);
234
235static unsigned long __init 176static unsigned long __init
236bootmem_init_node(int node, int initrd_node, struct meminfo *mi) 177bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
237{ 178{
@@ -248,23 +189,18 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
248 * Calculate the pfn range, and map the memory banks for this node. 189 * Calculate the pfn range, and map the memory banks for this node.
249 */ 190 */
250 for_each_nodebank(i, mi, node) { 191 for_each_nodebank(i, mi, node) {
192 struct membank *bank = &mi->bank[i];
251 unsigned long start, end; 193 unsigned long start, end;
252 struct map_desc map;
253 194
254 start = mi->bank[i].start >> PAGE_SHIFT; 195 start = bank->start >> PAGE_SHIFT;
255 end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; 196 end = (bank->start + bank->size) >> PAGE_SHIFT;
256 197
257 if (start_pfn > start) 198 if (start_pfn > start)
258 start_pfn = start; 199 start_pfn = start;
259 if (end_pfn < end) 200 if (end_pfn < end)
260 end_pfn = end; 201 end_pfn = end;
261 202
262 map.pfn = __phys_to_pfn(mi->bank[i].start); 203 map_memory_bank(bank);
263 map.virtual = __phys_to_virt(mi->bank[i].start);
264 map.length = mi->bank[i].size;
265 map.type = MT_MEMORY;
266
267 create_mapping(&map);
268 } 204 }
269 205
270 /* 206 /*
@@ -346,9 +282,9 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
346 return end_pfn; 282 return end_pfn;
347} 283}
348 284
349static void __init bootmem_init(struct meminfo *mi) 285void __init bootmem_init(struct meminfo *mi)
350{ 286{
351 unsigned long addr, memend_pfn = 0; 287 unsigned long memend_pfn = 0;
352 int node, initrd_node, i; 288 int node, initrd_node, i;
353 289
354 /* 290 /*
@@ -361,26 +297,6 @@ static void __init bootmem_init(struct meminfo *mi)
361 memcpy(&meminfo, mi, sizeof(meminfo)); 297 memcpy(&meminfo, mi, sizeof(meminfo));
362 298
363 /* 299 /*
364 * Clear out all the mappings below the kernel image.
365 */
366 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
367 pmd_clear(pmd_off_k(addr));
368#ifdef CONFIG_XIP_KERNEL
369 /* The XIP kernel is mapped in the module area -- skip over it */
370 addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
371#endif
372 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
373 pmd_clear(pmd_off_k(addr));
374
375 /*
376 * Clear out all the kernel space mappings, except for the first
377 * memory bank, up to the end of the vmalloc region.
378 */
379 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
380 addr < VMALLOC_END; addr += PGDIR_SIZE)
381 pmd_clear(pmd_off_k(addr));
382
383 /*
384 * Locate which node contains the ramdisk image, if any. 300 * Locate which node contains the ramdisk image, if any.
385 */ 301 */
386 initrd_node = check_initrd(mi); 302 initrd_node = check_initrd(mi);
@@ -413,114 +329,6 @@ static void __init bootmem_init(struct meminfo *mi)
413 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; 329 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
414} 330}
415 331
416/*
417 * Set up device the mappings. Since we clear out the page tables for all
418 * mappings above VMALLOC_END, we will remove any debug device mappings.
419 * This means you have to be careful how you debug this function, or any
420 * called function. This means you can't use any function or debugging
421 * method which may touch any device, otherwise the kernel _will_ crash.
422 */
423static void __init devicemaps_init(struct machine_desc *mdesc)
424{
425 struct map_desc map;
426 unsigned long addr;
427 void *vectors;
428
429 /*
430 * Allocate the vector page early.
431 */
432 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
433 BUG_ON(!vectors);
434
435 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
436 pmd_clear(pmd_off_k(addr));
437
438 /*
439 * Map the kernel if it is XIP.
440 * It is always first in the modulearea.
441 */
442#ifdef CONFIG_XIP_KERNEL
443 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & PGDIR_MASK);
444 map.virtual = MODULE_START;
445 map.length = ((unsigned long)&_etext - map.virtual + ~PGDIR_MASK) & PGDIR_MASK;
446 map.type = MT_ROM;
447 create_mapping(&map);
448#endif
449
450 /*
451 * Map the cache flushing regions.
452 */
453#ifdef FLUSH_BASE
454 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
455 map.virtual = FLUSH_BASE;
456 map.length = SZ_1M;
457 map.type = MT_CACHECLEAN;
458 create_mapping(&map);
459#endif
460#ifdef FLUSH_BASE_MINICACHE
461 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
462 map.virtual = FLUSH_BASE_MINICACHE;
463 map.length = SZ_1M;
464 map.type = MT_MINICLEAN;
465 create_mapping(&map);
466#endif
467
468 /*
469 * Create a mapping for the machine vectors at the high-vectors
470 * location (0xffff0000). If we aren't using high-vectors, also
471 * create a mapping at the low-vectors virtual address.
472 */
473 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
474 map.virtual = 0xffff0000;
475 map.length = PAGE_SIZE;
476 map.type = MT_HIGH_VECTORS;
477 create_mapping(&map);
478
479 if (!vectors_high()) {
480 map.virtual = 0;
481 map.type = MT_LOW_VECTORS;
482 create_mapping(&map);
483 }
484
485 /*
486 * Ask the machine support to map in the statically mapped devices.
487 */
488 if (mdesc->map_io)
489 mdesc->map_io();
490
491 /*
492 * Finally flush the caches and tlb to ensure that we're in a
493 * consistent state wrt the writebuffer. This also ensures that
494 * any write-allocated cache lines in the vector page are written
495 * back. After this point, we can start to touch devices again.
496 */
497 local_flush_tlb_all();
498 flush_cache_all();
499}
500
501/*
502 * paging_init() sets up the page tables, initialises the zone memory
503 * maps, and sets up the zero page, bad page and bad page tables.
504 */
505void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
506{
507 void *zero_page;
508
509 build_mem_type_table();
510 bootmem_init(mi);
511 devicemaps_init(mdesc);
512
513 top_pmd = pmd_off_k(0xffff0000);
514
515 /*
516 * allocate the zero page. Note that we count on this going ok.
517 */
518 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
519 memzero(zero_page, PAGE_SIZE);
520 empty_zero_page = virt_to_page(zero_page);
521 flush_dcache_page(empty_zero_page);
522}
523
524static inline void free_area(unsigned long addr, unsigned long end, char *s) 332static inline void free_area(unsigned long addr, unsigned long end, char *s)
525{ 333{
526 unsigned int size = (end - addr) >> 10; 334 unsigned int size = (end - addr) >> 10;
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mm-armv.c
deleted file mode 100644
index 38769f5862bc..000000000000
--- a/arch/arm/mm/mm-armv.c
+++ /dev/null
@@ -1,663 +0,0 @@
1/*
2 * linux/arch/arm/mm/mm-armv.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Page table sludge for ARM v3 and v4 processor architectures.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/init.h>
15#include <linux/bootmem.h>
16#include <linux/highmem.h>
17#include <linux/nodemask.h>
18
19#include <asm/pgalloc.h>
20#include <asm/page.h>
21#include <asm/setup.h>
22#include <asm/tlbflush.h>
23
24#include <asm/mach/map.h>
25
26#define CPOLICY_UNCACHED 0
27#define CPOLICY_BUFFERED 1
28#define CPOLICY_WRITETHROUGH 2
29#define CPOLICY_WRITEBACK 3
30#define CPOLICY_WRITEALLOC 4
31
32static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
33static unsigned int ecc_mask __initdata = 0;
34pgprot_t pgprot_kernel;
35
36EXPORT_SYMBOL(pgprot_kernel);
37
38pmd_t *top_pmd;
39
40struct cachepolicy {
41 const char policy[16];
42 unsigned int cr_mask;
43 unsigned int pmd;
44 unsigned int pte;
45};
46
47static struct cachepolicy cache_policies[] __initdata = {
48 {
49 .policy = "uncached",
50 .cr_mask = CR_W|CR_C,
51 .pmd = PMD_SECT_UNCACHED,
52 .pte = 0,
53 }, {
54 .policy = "buffered",
55 .cr_mask = CR_C,
56 .pmd = PMD_SECT_BUFFERED,
57 .pte = PTE_BUFFERABLE,
58 }, {
59 .policy = "writethrough",
60 .cr_mask = 0,
61 .pmd = PMD_SECT_WT,
62 .pte = PTE_CACHEABLE,
63 }, {
64 .policy = "writeback",
65 .cr_mask = 0,
66 .pmd = PMD_SECT_WB,
67 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
68 }, {
69 .policy = "writealloc",
70 .cr_mask = 0,
71 .pmd = PMD_SECT_WBWA,
72 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
73 }
74};
75
76/*
77 * These are useful for identifing cache coherency
78 * problems by allowing the cache or the cache and
79 * writebuffer to be turned off. (Note: the write
80 * buffer should not be on and the cache off).
81 */
82static void __init early_cachepolicy(char **p)
83{
84 int i;
85
86 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
87 int len = strlen(cache_policies[i].policy);
88
89 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
90 cachepolicy = i;
91 cr_alignment &= ~cache_policies[i].cr_mask;
92 cr_no_alignment &= ~cache_policies[i].cr_mask;
93 *p += len;
94 break;
95 }
96 }
97 if (i == ARRAY_SIZE(cache_policies))
98 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
99 flush_cache_all();
100 set_cr(cr_alignment);
101}
102
103static void __init early_nocache(char **__unused)
104{
105 char *p = "buffered";
106 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
107 early_cachepolicy(&p);
108}
109
110static void __init early_nowrite(char **__unused)
111{
112 char *p = "uncached";
113 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
114 early_cachepolicy(&p);
115}
116
117static void __init early_ecc(char **p)
118{
119 if (memcmp(*p, "on", 2) == 0) {
120 ecc_mask = PMD_PROTECTION;
121 *p += 2;
122 } else if (memcmp(*p, "off", 3) == 0) {
123 ecc_mask = 0;
124 *p += 3;
125 }
126}
127
128__early_param("nocache", early_nocache);
129__early_param("nowb", early_nowrite);
130__early_param("cachepolicy=", early_cachepolicy);
131__early_param("ecc=", early_ecc);
132
133static int __init noalign_setup(char *__unused)
134{
135 cr_alignment &= ~CR_A;
136 cr_no_alignment &= ~CR_A;
137 set_cr(cr_alignment);
138 return 1;
139}
140
141__setup("noalign", noalign_setup);
142
143#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
144
145static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
146{
147 return pmd_offset(pgd, virt);
148}
149
150static inline pmd_t *pmd_off_k(unsigned long virt)
151{
152 return pmd_off(pgd_offset_k(virt), virt);
153}
154
155/*
156 * need to get a 16k page for level 1
157 */
158pgd_t *get_pgd_slow(struct mm_struct *mm)
159{
160 pgd_t *new_pgd, *init_pgd;
161 pmd_t *new_pmd, *init_pmd;
162 pte_t *new_pte, *init_pte;
163
164 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
165 if (!new_pgd)
166 goto no_pgd;
167
168 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
169
170 /*
171 * Copy over the kernel and IO PGD entries
172 */
173 init_pgd = pgd_offset_k(0);
174 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
175 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
176
177 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
178
179 if (!vectors_high()) {
180 /*
181 * On ARM, first page must always be allocated since it
182 * contains the machine vectors.
183 */
184 new_pmd = pmd_alloc(mm, new_pgd, 0);
185 if (!new_pmd)
186 goto no_pmd;
187
188 new_pte = pte_alloc_map(mm, new_pmd, 0);
189 if (!new_pte)
190 goto no_pte;
191
192 init_pmd = pmd_offset(init_pgd, 0);
193 init_pte = pte_offset_map_nested(init_pmd, 0);
194 set_pte(new_pte, *init_pte);
195 pte_unmap_nested(init_pte);
196 pte_unmap(new_pte);
197 }
198
199 return new_pgd;
200
201no_pte:
202 pmd_free(new_pmd);
203no_pmd:
204 free_pages((unsigned long)new_pgd, 2);
205no_pgd:
206 return NULL;
207}
208
209void free_pgd_slow(pgd_t *pgd)
210{
211 pmd_t *pmd;
212 struct page *pte;
213
214 if (!pgd)
215 return;
216
217 /* pgd is always present and good */
218 pmd = pmd_off(pgd, 0);
219 if (pmd_none(*pmd))
220 goto free;
221 if (pmd_bad(*pmd)) {
222 pmd_ERROR(*pmd);
223 pmd_clear(pmd);
224 goto free;
225 }
226
227 pte = pmd_page(*pmd);
228 pmd_clear(pmd);
229 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
230 pte_lock_deinit(pte);
231 pte_free(pte);
232 pmd_free(pmd);
233free:
234 free_pages((unsigned long) pgd, 2);
235}
236
237/*
238 * Create a SECTION PGD between VIRT and PHYS in domain
239 * DOMAIN with protection PROT. This operates on half-
240 * pgdir entry increments.
241 */
242static inline void
243alloc_init_section(unsigned long virt, unsigned long phys, int prot)
244{
245 pmd_t *pmdp = pmd_off_k(virt);
246
247 if (virt & (1 << 20))
248 pmdp++;
249
250 *pmdp = __pmd(phys | prot);
251 flush_pmd_entry(pmdp);
252}
253
254/*
255 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
256 */
257static inline void
258alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
259{
260 int i;
261
262 for (i = 0; i < 16; i += 1) {
263 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
264
265 virt += (PGDIR_SIZE / 2);
266 }
267}
268
269/*
270 * Add a PAGE mapping between VIRT and PHYS in domain
271 * DOMAIN with protection PROT. Note that due to the
272 * way we map the PTEs, we must allocate two PTE_SIZE'd
273 * blocks - one for the Linux pte table, and one for
274 * the hardware pte table.
275 */
276static inline void
277alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
278{
279 pmd_t *pmdp = pmd_off_k(virt);
280 pte_t *ptep;
281
282 if (pmd_none(*pmdp)) {
283 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
284 sizeof(pte_t));
285
286 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
287 }
288 ptep = pte_offset_kernel(pmdp, virt);
289
290 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
291}
292
293struct mem_types {
294 unsigned int prot_pte;
295 unsigned int prot_l1;
296 unsigned int prot_sect;
297 unsigned int domain;
298};
299
300static struct mem_types mem_types[] __initdata = {
301 [MT_DEVICE] = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
303 L_PTE_WRITE,
304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
306 PMD_SECT_AP_WRITE,
307 .domain = DOMAIN_IO,
308 },
309 [MT_CACHECLEAN] = {
310 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
311 .domain = DOMAIN_KERNEL,
312 },
313 [MT_MINICLEAN] = {
314 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
315 .domain = DOMAIN_KERNEL,
316 },
317 [MT_LOW_VECTORS] = {
318 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
319 L_PTE_EXEC,
320 .prot_l1 = PMD_TYPE_TABLE,
321 .domain = DOMAIN_USER,
322 },
323 [MT_HIGH_VECTORS] = {
324 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
325 L_PTE_USER | L_PTE_EXEC,
326 .prot_l1 = PMD_TYPE_TABLE,
327 .domain = DOMAIN_USER,
328 },
329 [MT_MEMORY] = {
330 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
331 .domain = DOMAIN_KERNEL,
332 },
333 [MT_ROM] = {
334 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
335 .domain = DOMAIN_KERNEL,
336 },
337 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
338 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
339 L_PTE_WRITE,
340 .prot_l1 = PMD_TYPE_TABLE,
341 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
342 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
343 PMD_SECT_TEX(1),
344 .domain = DOMAIN_IO,
345 },
346 [MT_NONSHARED_DEVICE] = {
347 .prot_l1 = PMD_TYPE_TABLE,
348 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
349 PMD_SECT_AP_WRITE,
350 .domain = DOMAIN_IO,
351 }
352};
353
354/*
355 * Adjust the PMD section entries according to the CPU in use.
356 */
357void __init build_mem_type_table(void)
358{
359 struct cachepolicy *cp;
360 unsigned int cr = get_cr();
361 unsigned int user_pgprot, kern_pgprot;
362 int cpu_arch = cpu_architecture();
363 int i;
364
365#if defined(CONFIG_CPU_DCACHE_DISABLE)
366 if (cachepolicy > CPOLICY_BUFFERED)
367 cachepolicy = CPOLICY_BUFFERED;
368#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
369 if (cachepolicy > CPOLICY_WRITETHROUGH)
370 cachepolicy = CPOLICY_WRITETHROUGH;
371#endif
372 if (cpu_arch < CPU_ARCH_ARMv5) {
373 if (cachepolicy >= CPOLICY_WRITEALLOC)
374 cachepolicy = CPOLICY_WRITEBACK;
375 ecc_mask = 0;
376 }
377
378 /*
379 * Xscale must not have PMD bit 4 set for section mappings.
380 */
381 if (cpu_is_xscale())
382 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
383 mem_types[i].prot_sect &= ~PMD_BIT4;
384
385 /*
386 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
387 * page tables.
388 */
389 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
390 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
391 if (mem_types[i].prot_l1)
392 mem_types[i].prot_l1 |= PMD_BIT4;
393
394 cp = &cache_policies[cachepolicy];
395 kern_pgprot = user_pgprot = cp->pte;
396
397 /*
398 * Enable CPU-specific coherency if supported.
399 * (Only available on XSC3 at the moment.)
400 */
401 if (arch_is_coherent()) {
402 if (cpu_is_xsc3()) {
403 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
404 mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
405 }
406 }
407
408 /*
409 * ARMv6 and above have extended page tables.
410 */
411 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
412 /*
413 * bit 4 becomes XN which we must clear for the
414 * kernel memory mapping.
415 */
416 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
417 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
418
419 /*
420 * Mark cache clean areas and XIP ROM read only
421 * from SVC mode and no access from userspace.
422 */
423 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
424 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
425 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
426
427 /*
428 * Mark the device area as "shared device"
429 */
430 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
431 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
432
433 /*
434 * User pages need to be mapped with the ASID
435 * (iow, non-global)
436 */
437 user_pgprot |= L_PTE_ASID;
438
439#ifdef CONFIG_SMP
440 /*
441 * Mark memory with the "shared" attribute for SMP systems
442 */
443 user_pgprot |= L_PTE_SHARED;
444 kern_pgprot |= L_PTE_SHARED;
445 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
446#endif
447 }
448
449 for (i = 0; i < 16; i++) {
450 unsigned long v = pgprot_val(protection_map[i]);
451 v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
452 protection_map[i] = __pgprot(v);
453 }
454
455 mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
456 mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
457
458 if (cpu_arch >= CPU_ARCH_ARMv5) {
459#ifndef CONFIG_SMP
460 /*
461 * Only use write-through for non-SMP systems
462 */
463 mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
464 mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
465#endif
466 } else {
467 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
468 }
469
470 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
471 L_PTE_DIRTY | L_PTE_WRITE |
472 L_PTE_EXEC | kern_pgprot);
473
474 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
475 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
476 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
477 mem_types[MT_ROM].prot_sect |= cp->pmd;
478
479 switch (cp->pmd) {
480 case PMD_SECT_WT:
481 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
482 break;
483 case PMD_SECT_WB:
484 case PMD_SECT_WBWA:
485 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
486 break;
487 }
488 printk("Memory policy: ECC %sabled, Data cache %s\n",
489 ecc_mask ? "en" : "dis", cp->policy);
490}
491
492#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
493
494/*
495 * Create the page directory entries and any necessary
496 * page tables for the mapping specified by `md'. We
497 * are able to cope here with varying sizes and address
498 * offsets, and we take full advantage of sections and
499 * supersections.
500 */
501void __init create_mapping(struct map_desc *md)
502{
503 unsigned long virt, length;
504 int prot_sect, prot_l1, domain;
505 pgprot_t prot_pte;
506 unsigned long off = (u32)__pfn_to_phys(md->pfn);
507
508 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
509 printk(KERN_WARNING "BUG: not creating mapping for "
510 "0x%08llx at 0x%08lx in user region\n",
511 __pfn_to_phys((u64)md->pfn), md->virtual);
512 return;
513 }
514
515 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
516 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
517 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
518 "overlaps vmalloc space\n",
519 __pfn_to_phys((u64)md->pfn), md->virtual);
520 }
521
522 domain = mem_types[md->type].domain;
523 prot_pte = __pgprot(mem_types[md->type].prot_pte);
524 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
525 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
526
527 /*
528 * Catch 36-bit addresses
529 */
530 if(md->pfn >= 0x100000) {
531 if(domain) {
532 printk(KERN_ERR "MM: invalid domain in supersection "
533 "mapping for 0x%08llx at 0x%08lx\n",
534 __pfn_to_phys((u64)md->pfn), md->virtual);
535 return;
536 }
537 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
538 & ~SUPERSECTION_MASK) {
539 printk(KERN_ERR "MM: cannot create mapping for "
540 "0x%08llx at 0x%08lx invalid alignment\n",
541 __pfn_to_phys((u64)md->pfn), md->virtual);
542 return;
543 }
544
545 /*
546 * Shift bits [35:32] of address into bits [23:20] of PMD
547 * (See ARMv6 spec).
548 */
549 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
550 }
551
552 virt = md->virtual;
553 off -= virt;
554 length = md->length;
555
556 if (mem_types[md->type].prot_l1 == 0 &&
557 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
558 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
559 "be mapped using pages, ignoring.\n",
560 __pfn_to_phys(md->pfn), md->virtual);
561 return;
562 }
563
564 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
565 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
566
567 virt += PAGE_SIZE;
568 length -= PAGE_SIZE;
569 }
570
571 /* N.B. ARMv6 supersections are only defined to work with domain 0.
572 * Since domain assignments can in fact be arbitrary, the
573 * 'domain == 0' check below is required to insure that ARMv6
574 * supersections are only allocated for domain 0 regardless
575 * of the actual domain assignments in use.
576 */
577 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
578 && domain == 0) {
579 /*
580 * Align to supersection boundary if !high pages.
581 * High pages have already been checked for proper
582 * alignment above and they will fail the SUPSERSECTION_MASK
583 * check because of the way the address is encoded into
584 * offset.
585 */
586 if (md->pfn <= 0x100000) {
587 while ((virt & ~SUPERSECTION_MASK ||
588 (virt + off) & ~SUPERSECTION_MASK) &&
589 length >= (PGDIR_SIZE / 2)) {
590 alloc_init_section(virt, virt + off, prot_sect);
591
592 virt += (PGDIR_SIZE / 2);
593 length -= (PGDIR_SIZE / 2);
594 }
595 }
596
597 while (length >= SUPERSECTION_SIZE) {
598 alloc_init_supersection(virt, virt + off, prot_sect);
599
600 virt += SUPERSECTION_SIZE;
601 length -= SUPERSECTION_SIZE;
602 }
603 }
604
605 /*
606 * A section mapping covers half a "pgdir" entry.
607 */
608 while (length >= (PGDIR_SIZE / 2)) {
609 alloc_init_section(virt, virt + off, prot_sect);
610
611 virt += (PGDIR_SIZE / 2);
612 length -= (PGDIR_SIZE / 2);
613 }
614
615 while (length >= PAGE_SIZE) {
616 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
617
618 virt += PAGE_SIZE;
619 length -= PAGE_SIZE;
620 }
621}
622
623/*
624 * In order to soft-boot, we need to insert a 1:1 mapping in place of
625 * the user-mode pages. This will then ensure that we have predictable
626 * results when turning the mmu off
627 */
628void setup_mm_for_reboot(char mode)
629{
630 unsigned long base_pmdval;
631 pgd_t *pgd;
632 int i;
633
634 if (current->mm && current->mm->pgd)
635 pgd = current->mm->pgd;
636 else
637 pgd = init_mm.pgd;
638
639 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
640 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
641 base_pmdval |= PMD_BIT4;
642
643 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
644 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
645 pmd_t *pmd;
646
647 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
648 pmd[0] = __pmd(pmdval);
649 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
650 flush_pmd_entry(pmd);
651 }
652}
653
654/*
655 * Create the architecture specific mappings
656 */
657void __init iotable_init(struct map_desc *io_desc, int nr)
658{
659 int i;
660
661 for (i = 0; i < nr; i++)
662 create_mapping(io_desc + i);
663}
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
new file mode 100644
index 000000000000..bb2bc9ab6bd3
--- /dev/null
+++ b/arch/arm/mm/mm.h
@@ -0,0 +1,22 @@
1/* the upper-most page table pointer */
2extern pmd_t *top_pmd;
3
4#define TOP_PTE(x) pte_offset_kernel(top_pmd, x)
5
6static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt)
7{
8 return pmd_offset(pgd, virt);
9}
10
11static inline pmd_t *pmd_off_k(unsigned long virt)
12{
13 return pmd_off(pgd_offset_k(virt), virt);
14}
15
16struct map_desc;
17struct meminfo;
18struct pglist_data;
19
20void __init create_mapping(struct map_desc *md);
21void __init bootmem_init(struct meminfo *mi);
22void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 29e54807c5bc..b0b5f4694070 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -114,3 +114,25 @@ full_search:
114 } 114 }
115} 115}
116 116
117
118/*
119 * You really shouldn't be using read() or write() on /dev/mem. This
120 * might go away in the future.
121 */
122int valid_phys_addr_range(unsigned long addr, size_t size)
123{
124 if (addr + size > __pa(high_memory))
125 return 0;
126
127 return 1;
128}
129
130/*
131 * We don't use supersection mappings for mmap() on /dev/mem, which
132 * means that we can't map the memory area above the 4G barrier into
133 * userspace.
134 */
135int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
136{
137 return !(pfn + (size >> PAGE_SHIFT) > 0x00100000);
138}
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 0d90227a0a32..e566cbe4b222 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -1,45 +1,771 @@
1/* 1/*
2 * linux/arch/arm/mm/mmu.c 2 * linux/arch/arm/mm/mmu.c
3 * 3 *
4 * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. 4 * Copyright (C) 1995-2005 Russell King
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#include <linux/module.h>
11#include <linux/kernel.h>
12#include <linux/errno.h>
10#include <linux/init.h> 13#include <linux/init.h>
11#include <linux/sched.h> 14#include <linux/bootmem.h>
12#include <linux/mm.h> 15#include <linux/mman.h>
16#include <linux/nodemask.h>
13 17
14#include <asm/mmu_context.h> 18#include <asm/mach-types.h>
15#include <asm/tlbflush.h> 19#include <asm/setup.h>
20#include <asm/sizes.h>
21#include <asm/tlb.h>
16 22
17unsigned int cpu_last_asid = { 1 << ASID_BITS }; 23#include <asm/mach/arch.h>
24#include <asm/mach/map.h>
25
26#include "mm.h"
27
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29
30extern void _stext, __data_start, _end;
31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33/*
34 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW.
36 */
37struct page *empty_zero_page;
18 38
19/* 39/*
20 * We fork()ed a process, and we need a new context for the child 40 * The pmd table for the upper-most set of pages.
21 * to run in. We reserve version 0 for initial tasks so we will
22 * always allocate an ASID.
23 */ 41 */
24void __init_new_context(struct task_struct *tsk, struct mm_struct *mm) 42pmd_t *top_pmd;
43
44#define CPOLICY_UNCACHED 0
45#define CPOLICY_BUFFERED 1
46#define CPOLICY_WRITETHROUGH 2
47#define CPOLICY_WRITEBACK 3
48#define CPOLICY_WRITEALLOC 4
49
50static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
51static unsigned int ecc_mask __initdata = 0;
52pgprot_t pgprot_kernel;
53
54EXPORT_SYMBOL(pgprot_kernel);
55
56struct cachepolicy {
57 const char policy[16];
58 unsigned int cr_mask;
59 unsigned int pmd;
60 unsigned int pte;
61};
62
63static struct cachepolicy cache_policies[] __initdata = {
64 {
65 .policy = "uncached",
66 .cr_mask = CR_W|CR_C,
67 .pmd = PMD_SECT_UNCACHED,
68 .pte = 0,
69 }, {
70 .policy = "buffered",
71 .cr_mask = CR_C,
72 .pmd = PMD_SECT_BUFFERED,
73 .pte = PTE_BUFFERABLE,
74 }, {
75 .policy = "writethrough",
76 .cr_mask = 0,
77 .pmd = PMD_SECT_WT,
78 .pte = PTE_CACHEABLE,
79 }, {
80 .policy = "writeback",
81 .cr_mask = 0,
82 .pmd = PMD_SECT_WB,
83 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
84 }, {
85 .policy = "writealloc",
86 .cr_mask = 0,
87 .pmd = PMD_SECT_WBWA,
88 .pte = PTE_BUFFERABLE|PTE_CACHEABLE,
89 }
90};
91
92/*
93 * These are useful for identifing cache coherency
94 * problems by allowing the cache or the cache and
95 * writebuffer to be turned off. (Note: the write
96 * buffer should not be on and the cache off).
97 */
98static void __init early_cachepolicy(char **p)
99{
100 int i;
101
102 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
103 int len = strlen(cache_policies[i].policy);
104
105 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
106 cachepolicy = i;
107 cr_alignment &= ~cache_policies[i].cr_mask;
108 cr_no_alignment &= ~cache_policies[i].cr_mask;
109 *p += len;
110 break;
111 }
112 }
113 if (i == ARRAY_SIZE(cache_policies))
114 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
115 flush_cache_all();
116 set_cr(cr_alignment);
117}
118__early_param("cachepolicy=", early_cachepolicy);
119
120static void __init early_nocache(char **__unused)
121{
122 char *p = "buffered";
123 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
124 early_cachepolicy(&p);
125}
126__early_param("nocache", early_nocache);
127
128static void __init early_nowrite(char **__unused)
129{
130 char *p = "uncached";
131 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
132 early_cachepolicy(&p);
133}
134__early_param("nowb", early_nowrite);
135
136static void __init early_ecc(char **p)
137{
138 if (memcmp(*p, "on", 2) == 0) {
139 ecc_mask = PMD_PROTECTION;
140 *p += 2;
141 } else if (memcmp(*p, "off", 3) == 0) {
142 ecc_mask = 0;
143 *p += 3;
144 }
145}
146__early_param("ecc=", early_ecc);
147
148static int __init noalign_setup(char *__unused)
25{ 149{
26 mm->context.id = 0; 150 cr_alignment &= ~CR_A;
151 cr_no_alignment &= ~CR_A;
152 set_cr(cr_alignment);
153 return 1;
27} 154}
155__setup("noalign", noalign_setup);
156
157struct mem_types {
158 unsigned int prot_pte;
159 unsigned int prot_l1;
160 unsigned int prot_sect;
161 unsigned int domain;
162};
28 163
29void __new_context(struct mm_struct *mm) 164static struct mem_types mem_types[] __initdata = {
165 [MT_DEVICE] = {
166 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
167 L_PTE_WRITE,
168 .prot_l1 = PMD_TYPE_TABLE,
169 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
170 PMD_SECT_AP_WRITE,
171 .domain = DOMAIN_IO,
172 },
173 [MT_CACHECLEAN] = {
174 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
175 .domain = DOMAIN_KERNEL,
176 },
177 [MT_MINICLEAN] = {
178 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_MINICACHE,
179 .domain = DOMAIN_KERNEL,
180 },
181 [MT_LOW_VECTORS] = {
182 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
183 L_PTE_EXEC,
184 .prot_l1 = PMD_TYPE_TABLE,
185 .domain = DOMAIN_USER,
186 },
187 [MT_HIGH_VECTORS] = {
188 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
189 L_PTE_USER | L_PTE_EXEC,
190 .prot_l1 = PMD_TYPE_TABLE,
191 .domain = DOMAIN_USER,
192 },
193 [MT_MEMORY] = {
194 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_AP_WRITE,
195 .domain = DOMAIN_KERNEL,
196 },
197 [MT_ROM] = {
198 .prot_sect = PMD_TYPE_SECT | PMD_BIT4,
199 .domain = DOMAIN_KERNEL,
200 },
201 [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */
202 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
203 L_PTE_WRITE,
204 .prot_l1 = PMD_TYPE_TABLE,
205 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_UNCACHED |
206 PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE |
207 PMD_SECT_TEX(1),
208 .domain = DOMAIN_IO,
209 },
210 [MT_NONSHARED_DEVICE] = {
211 .prot_l1 = PMD_TYPE_TABLE,
212 .prot_sect = PMD_TYPE_SECT | PMD_BIT4 | PMD_SECT_NONSHARED_DEV |
213 PMD_SECT_AP_WRITE,
214 .domain = DOMAIN_IO,
215 }
216};
217
218/*
219 * Adjust the PMD section entries according to the CPU in use.
220 */
221static void __init build_mem_type_table(void)
30{ 222{
31 unsigned int asid; 223 struct cachepolicy *cp;
224 unsigned int cr = get_cr();
225 unsigned int user_pgprot, kern_pgprot;
226 int cpu_arch = cpu_architecture();
227 int i;
32 228
33 asid = ++cpu_last_asid; 229#if defined(CONFIG_CPU_DCACHE_DISABLE)
34 if (asid == 0) 230 if (cachepolicy > CPOLICY_BUFFERED)
35 asid = cpu_last_asid = 1 << ASID_BITS; 231 cachepolicy = CPOLICY_BUFFERED;
232#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
233 if (cachepolicy > CPOLICY_WRITETHROUGH)
234 cachepolicy = CPOLICY_WRITETHROUGH;
235#endif
236 if (cpu_arch < CPU_ARCH_ARMv5) {
237 if (cachepolicy >= CPOLICY_WRITEALLOC)
238 cachepolicy = CPOLICY_WRITEBACK;
239 ecc_mask = 0;
240 }
241
242 /*
243 * Xscale must not have PMD bit 4 set for section mappings.
244 */
245 if (cpu_is_xscale())
246 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
247 mem_types[i].prot_sect &= ~PMD_BIT4;
36 248
37 /* 249 /*
38 * If we've used up all our ASIDs, we need 250 * ARMv5 and lower, excluding Xscale, bit 4 must be set for
39 * to start a new version and flush the TLB. 251 * page tables.
40 */ 252 */
41 if ((asid & ~ASID_MASK) == 0) 253 if (cpu_arch < CPU_ARCH_ARMv6 && !cpu_is_xscale())
42 flush_tlb_all(); 254 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
255 if (mem_types[i].prot_l1)
256 mem_types[i].prot_l1 |= PMD_BIT4;
257
258 cp = &cache_policies[cachepolicy];
259 kern_pgprot = user_pgprot = cp->pte;
260
261 /*
262 * Enable CPU-specific coherency if supported.
263 * (Only available on XSC3 at the moment.)
264 */
265 if (arch_is_coherent()) {
266 if (cpu_is_xsc3()) {
267 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
268 mem_types[MT_MEMORY].prot_pte |= L_PTE_COHERENT;
269 }
270 }
271
272 /*
273 * ARMv6 and above have extended page tables.
274 */
275 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
276 /*
277 * bit 4 becomes XN which we must clear for the
278 * kernel memory mapping.
279 */
280 mem_types[MT_MEMORY].prot_sect &= ~PMD_SECT_XN;
281 mem_types[MT_ROM].prot_sect &= ~PMD_SECT_XN;
282
283 /*
284 * Mark cache clean areas and XIP ROM read only
285 * from SVC mode and no access from userspace.
286 */
287 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
288 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
289 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
290
291 /*
292 * Mark the device area as "shared device"
293 */
294 mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE;
295 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
296
297 /*
298 * User pages need to be mapped with the ASID
299 * (iow, non-global)
300 */
301 user_pgprot |= L_PTE_ASID;
302
303#ifdef CONFIG_SMP
304 /*
305 * Mark memory with the "shared" attribute for SMP systems
306 */
307 user_pgprot |= L_PTE_SHARED;
308 kern_pgprot |= L_PTE_SHARED;
309 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
310#endif
311 }
312
313 for (i = 0; i < 16; i++) {
314 unsigned long v = pgprot_val(protection_map[i]);
315 v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot;
316 protection_map[i] = __pgprot(v);
317 }
318
319 mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot;
320 mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot;
321
322 if (cpu_arch >= CPU_ARCH_ARMv5) {
323#ifndef CONFIG_SMP
324 /*
325 * Only use write-through for non-SMP systems
326 */
327 mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
328 mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE;
329#endif
330 } else {
331 mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1);
332 }
333
334 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
335 L_PTE_DIRTY | L_PTE_WRITE |
336 L_PTE_EXEC | kern_pgprot);
337
338 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
339 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
340 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
341 mem_types[MT_ROM].prot_sect |= cp->pmd;
342
343 switch (cp->pmd) {
344 case PMD_SECT_WT:
345 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
346 break;
347 case PMD_SECT_WB:
348 case PMD_SECT_WBWA:
349 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
350 break;
351 }
352 printk("Memory policy: ECC %sabled, Data cache %s\n",
353 ecc_mask ? "en" : "dis", cp->policy);
354}
355
356#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
357
358/*
359 * Create a SECTION PGD between VIRT and PHYS in domain
360 * DOMAIN with protection PROT. This operates on half-
361 * pgdir entry increments.
362 */
363static inline void
364alloc_init_section(unsigned long virt, unsigned long phys, int prot)
365{
366 pmd_t *pmdp = pmd_off_k(virt);
367
368 if (virt & (1 << 20))
369 pmdp++;
370
371 *pmdp = __pmd(phys | prot);
372 flush_pmd_entry(pmdp);
373}
374
375/*
376 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
377 */
378static inline void
379alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
380{
381 int i;
382
383 for (i = 0; i < 16; i += 1) {
384 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
385
386 virt += (PGDIR_SIZE / 2);
387 }
388}
389
390/*
391 * Add a PAGE mapping between VIRT and PHYS in domain
392 * DOMAIN with protection PROT. Note that due to the
393 * way we map the PTEs, we must allocate two PTE_SIZE'd
394 * blocks - one for the Linux pte table, and one for
395 * the hardware pte table.
396 */
397static inline void
398alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
399{
400 pmd_t *pmdp = pmd_off_k(virt);
401 pte_t *ptep;
402
403 if (pmd_none(*pmdp)) {
404 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
405 sizeof(pte_t));
406
407 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
408 }
409 ptep = pte_offset_kernel(pmdp, virt);
410
411 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
412}
413
414/*
415 * Create the page directory entries and any necessary
416 * page tables for the mapping specified by `md'. We
417 * are able to cope here with varying sizes and address
418 * offsets, and we take full advantage of sections and
419 * supersections.
420 */
421void __init create_mapping(struct map_desc *md)
422{
423 unsigned long virt, length;
424 int prot_sect, prot_l1, domain;
425 pgprot_t prot_pte;
426 unsigned long off = (u32)__pfn_to_phys(md->pfn);
427
428 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
429 printk(KERN_WARNING "BUG: not creating mapping for "
430 "0x%08llx at 0x%08lx in user region\n",
431 __pfn_to_phys((u64)md->pfn), md->virtual);
432 return;
433 }
434
435 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
436 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
437 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
438 "overlaps vmalloc space\n",
439 __pfn_to_phys((u64)md->pfn), md->virtual);
440 }
441
442 domain = mem_types[md->type].domain;
443 prot_pte = __pgprot(mem_types[md->type].prot_pte);
444 prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain);
445 prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain);
446
447 /*
448 * Catch 36-bit addresses
449 */
450 if(md->pfn >= 0x100000) {
451 if(domain) {
452 printk(KERN_ERR "MM: invalid domain in supersection "
453 "mapping for 0x%08llx at 0x%08lx\n",
454 __pfn_to_phys((u64)md->pfn), md->virtual);
455 return;
456 }
457 if((md->virtual | md->length | __pfn_to_phys(md->pfn))
458 & ~SUPERSECTION_MASK) {
459 printk(KERN_ERR "MM: cannot create mapping for "
460 "0x%08llx at 0x%08lx invalid alignment\n",
461 __pfn_to_phys((u64)md->pfn), md->virtual);
462 return;
463 }
464
465 /*
466 * Shift bits [35:32] of address into bits [23:20] of PMD
467 * (See ARMv6 spec).
468 */
469 off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
470 }
471
472 virt = md->virtual;
473 off -= virt;
474 length = md->length;
475
476 if (mem_types[md->type].prot_l1 == 0 &&
477 (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) {
478 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
479 "be mapped using pages, ignoring.\n",
480 __pfn_to_phys(md->pfn), md->virtual);
481 return;
482 }
483
484 while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) {
485 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
486
487 virt += PAGE_SIZE;
488 length -= PAGE_SIZE;
489 }
490
491 /* N.B. ARMv6 supersections are only defined to work with domain 0.
492 * Since domain assignments can in fact be arbitrary, the
493 * 'domain == 0' check below is required to insure that ARMv6
494 * supersections are only allocated for domain 0 regardless
495 * of the actual domain assignments in use.
496 */
497 if ((cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())
498 && domain == 0) {
499 /*
500 * Align to supersection boundary if !high pages.
501 * High pages have already been checked for proper
502 * alignment above and they will fail the SUPSERSECTION_MASK
503 * check because of the way the address is encoded into
504 * offset.
505 */
506 if (md->pfn <= 0x100000) {
507 while ((virt & ~SUPERSECTION_MASK ||
508 (virt + off) & ~SUPERSECTION_MASK) &&
509 length >= (PGDIR_SIZE / 2)) {
510 alloc_init_section(virt, virt + off, prot_sect);
511
512 virt += (PGDIR_SIZE / 2);
513 length -= (PGDIR_SIZE / 2);
514 }
515 }
516
517 while (length >= SUPERSECTION_SIZE) {
518 alloc_init_supersection(virt, virt + off, prot_sect);
519
520 virt += SUPERSECTION_SIZE;
521 length -= SUPERSECTION_SIZE;
522 }
523 }
524
525 /*
526 * A section mapping covers half a "pgdir" entry.
527 */
528 while (length >= (PGDIR_SIZE / 2)) {
529 alloc_init_section(virt, virt + off, prot_sect);
530
531 virt += (PGDIR_SIZE / 2);
532 length -= (PGDIR_SIZE / 2);
533 }
534
535 while (length >= PAGE_SIZE) {
536 alloc_init_page(virt, virt + off, prot_l1, prot_pte);
537
538 virt += PAGE_SIZE;
539 length -= PAGE_SIZE;
540 }
541}
542
543/*
544 * Create the architecture specific mappings
545 */
546void __init iotable_init(struct map_desc *io_desc, int nr)
547{
548 int i;
549
550 for (i = 0; i < nr; i++)
551 create_mapping(io_desc + i);
552}
553
554static inline void prepare_page_table(struct meminfo *mi)
555{
556 unsigned long addr;
557
558 /*
559 * Clear out all the mappings below the kernel image.
560 */
561 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
562 pmd_clear(pmd_off_k(addr));
563
564#ifdef CONFIG_XIP_KERNEL
565 /* The XIP kernel is mapped in the module area -- skip over it */
566 addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
567#endif
568 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
569 pmd_clear(pmd_off_k(addr));
570
571 /*
572 * Clear out all the kernel space mappings, except for the first
573 * memory bank, up to the end of the vmalloc region.
574 */
575 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
576 addr < VMALLOC_END; addr += PGDIR_SIZE)
577 pmd_clear(pmd_off_k(addr));
578}
579
580/*
581 * Reserve the various regions of node 0
582 */
583void __init reserve_node_zero(pg_data_t *pgdat)
584{
585 unsigned long res_size = 0;
586
587 /*
588 * Register the kernel text and data with bootmem.
589 * Note that this can only be in node 0.
590 */
591#ifdef CONFIG_XIP_KERNEL
592 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
593#else
594 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
595#endif
596
597 /*
598 * Reserve the page tables. These are already in use,
599 * and can only be in node 0.
600 */
601 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
602 PTRS_PER_PGD * sizeof(pgd_t));
603
604 /*
605 * Hmm... This should go elsewhere, but we really really need to
606 * stop things allocating the low memory; ideally we need a better
607 * implementation of GFP_DMA which does not assume that DMA-able
608 * memory starts at zero.
609 */
610 if (machine_is_integrator() || machine_is_cintegrator())
611 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
612
613 /*
614 * These should likewise go elsewhere. They pre-reserve the
615 * screen memory region at the start of main system memory.
616 */
617 if (machine_is_edb7211())
618 res_size = 0x00020000;
619 if (machine_is_p720t())
620 res_size = 0x00014000;
621
622#ifdef CONFIG_SA1111
623 /*
624 * Because of the SA1111 DMA bug, we want to preserve our
625 * precious DMA-able memory...
626 */
627 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
628#endif
629 if (res_size)
630 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
631}
632
633/*
634 * Set up device the mappings. Since we clear out the page tables for all
635 * mappings above VMALLOC_END, we will remove any debug device mappings.
636 * This means you have to be careful how you debug this function, or any
637 * called function. This means you can't use any function or debugging
638 * method which may touch any device, otherwise the kernel _will_ crash.
639 */
640static void __init devicemaps_init(struct machine_desc *mdesc)
641{
642 struct map_desc map;
643 unsigned long addr;
644 void *vectors;
645
646 /*
647 * Allocate the vector page early.
648 */
649 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
650 BUG_ON(!vectors);
651
652 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
653 pmd_clear(pmd_off_k(addr));
654
655 /*
656 * Map the kernel if it is XIP.
657 * It is always first in the modulearea.
658 */
659#ifdef CONFIG_XIP_KERNEL
660 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
661 map.virtual = MODULE_START;
662 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
663 map.type = MT_ROM;
664 create_mapping(&map);
665#endif
666
667 /*
668 * Map the cache flushing regions.
669 */
670#ifdef FLUSH_BASE
671 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
672 map.virtual = FLUSH_BASE;
673 map.length = SZ_1M;
674 map.type = MT_CACHECLEAN;
675 create_mapping(&map);
676#endif
677#ifdef FLUSH_BASE_MINICACHE
678 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
679 map.virtual = FLUSH_BASE_MINICACHE;
680 map.length = SZ_1M;
681 map.type = MT_MINICLEAN;
682 create_mapping(&map);
683#endif
684
685 /*
686 * Create a mapping for the machine vectors at the high-vectors
687 * location (0xffff0000). If we aren't using high-vectors, also
688 * create a mapping at the low-vectors virtual address.
689 */
690 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
691 map.virtual = 0xffff0000;
692 map.length = PAGE_SIZE;
693 map.type = MT_HIGH_VECTORS;
694 create_mapping(&map);
695
696 if (!vectors_high()) {
697 map.virtual = 0;
698 map.type = MT_LOW_VECTORS;
699 create_mapping(&map);
700 }
701
702 /*
703 * Ask the machine support to map in the statically mapped devices.
704 */
705 if (mdesc->map_io)
706 mdesc->map_io();
707
708 /*
709 * Finally flush the caches and tlb to ensure that we're in a
710 * consistent state wrt the writebuffer. This also ensures that
711 * any write-allocated cache lines in the vector page are written
712 * back. After this point, we can start to touch devices again.
713 */
714 local_flush_tlb_all();
715 flush_cache_all();
716}
717
718/*
719 * paging_init() sets up the page tables, initialises the zone memory
720 * maps, and sets up the zero page, bad page and bad page tables.
721 */
722void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
723{
724 void *zero_page;
725
726 build_mem_type_table();
727 prepare_page_table(mi);
728 bootmem_init(mi);
729 devicemaps_init(mdesc);
730
731 top_pmd = pmd_off_k(0xffff0000);
732
733 /*
734 * allocate the zero page. Note that we count on this going ok.
735 */
736 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
737 memzero(zero_page, PAGE_SIZE);
738 empty_zero_page = virt_to_page(zero_page);
739 flush_dcache_page(empty_zero_page);
740}
741
742/*
743 * In order to soft-boot, we need to insert a 1:1 mapping in place of
744 * the user-mode pages. This will then ensure that we have predictable
745 * results when turning the mmu off
746 */
747void setup_mm_for_reboot(char mode)
748{
749 unsigned long base_pmdval;
750 pgd_t *pgd;
751 int i;
752
753 if (current->mm && current->mm->pgd)
754 pgd = current->mm->pgd;
755 else
756 pgd = init_mm.pgd;
757
758 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
759 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
760 base_pmdval |= PMD_BIT4;
761
762 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
763 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
764 pmd_t *pmd;
43 765
44 mm->context.id = asid; 766 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
767 pmd[0] = __pmd(pmdval);
768 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
769 flush_pmd_entry(pmd);
770 }
45} 771}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1464ed817b5d..d0e66424a597 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -11,6 +11,49 @@
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#include "mm.h"
15
16extern void _stext, __data_start, _end;
17
18/*
19 * Reserve the various regions of node 0
20 */
21void __init reserve_node_zero(pg_data_t *pgdat)
22{
23 /*
24 * Register the kernel text and data with bootmem.
25 * Note that this can only be in node 0.
26 */
27#ifdef CONFIG_XIP_KERNEL
28 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
29#else
30 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
31#endif
32
33 /*
34 * Register the exception vector page.
35 * some architectures which the DRAM is the exception vector to trap,
36 * alloc_page breaks with error, although it is not NULL, but "0."
37 */
38 reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE);
39}
40
41/*
42 * paging_init() sets up the page tables, initialises the zone memory
43 * maps, and sets up the zero page, bad page and bad page tables.
44 */
45void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
46{
47 bootmem_init(mi);
48}
49
50/*
51 * We don't need to do anything here for nommu machines.
52 */
53void setup_mm_for_reboot(char mode)
54{
55}
56
14void flush_dcache_page(struct page *page) 57void flush_dcache_page(struct page *page)
15{ 58{
16 __cpuc_flush_dcache_page(page_address(page)); 59 __cpuc_flush_dcache_page(page_address(page));
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
new file mode 100644
index 000000000000..20c1b0df75f2
--- /dev/null
+++ b/arch/arm/mm/pgd.c
@@ -0,0 +1,101 @@
1/*
2 * linux/arch/arm/mm/pgd.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/mm.h>
11#include <linux/highmem.h>
12
13#include <asm/pgalloc.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16
17#include "mm.h"
18
19#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
20
21/*
22 * need to get a 16k page for level 1
23 */
24pgd_t *get_pgd_slow(struct mm_struct *mm)
25{
26 pgd_t *new_pgd, *init_pgd;
27 pmd_t *new_pmd, *init_pmd;
28 pte_t *new_pte, *init_pte;
29
30 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
31 if (!new_pgd)
32 goto no_pgd;
33
34 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
35
36 /*
37 * Copy over the kernel and IO PGD entries
38 */
39 init_pgd = pgd_offset_k(0);
40 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
41 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
42
43 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44
45 if (!vectors_high()) {
46 /*
47 * On ARM, first page must always be allocated since it
48 * contains the machine vectors.
49 */
50 new_pmd = pmd_alloc(mm, new_pgd, 0);
51 if (!new_pmd)
52 goto no_pmd;
53
54 new_pte = pte_alloc_map(mm, new_pmd, 0);
55 if (!new_pte)
56 goto no_pte;
57
58 init_pmd = pmd_offset(init_pgd, 0);
59 init_pte = pte_offset_map_nested(init_pmd, 0);
60 set_pte(new_pte, *init_pte);
61 pte_unmap_nested(init_pte);
62 pte_unmap(new_pte);
63 }
64
65 return new_pgd;
66
67no_pte:
68 pmd_free(new_pmd);
69no_pmd:
70 free_pages((unsigned long)new_pgd, 2);
71no_pgd:
72 return NULL;
73}
74
75void free_pgd_slow(pgd_t *pgd)
76{
77 pmd_t *pmd;
78 struct page *pte;
79
80 if (!pgd)
81 return;
82
83 /* pgd is always present and good */
84 pmd = pmd_off(pgd, 0);
85 if (pmd_none(*pmd))
86 goto free;
87 if (pmd_bad(*pmd)) {
88 pmd_ERROR(*pmd);
89 pmd_clear(pmd);
90 goto free;
91 }
92
93 pte = pmd_page(*pmd);
94 pmd_clear(pmd);
95 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
96 pte_lock_deinit(pte);
97 pte_free(pte);
98 pmd_free(pmd);
99free:
100 free_pages((unsigned long) pgd, 2);
101}
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
new file mode 100644
index 000000000000..40713818a87b
--- /dev/null
+++ b/arch/arm/mm/proc-arm740.S
@@ -0,0 +1,174 @@
1/*
2 * linux/arch/arm/mm/arm740.S: utility functions for ARM740
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm740_proc_init()
23 * cpu_arm740_do_idle()
24 * cpu_arm740_dcache_clean_area()
25 * cpu_arm740_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm740_proc_init)
30ENTRY(cpu_arm740_do_idle)
31ENTRY(cpu_arm740_dcache_clean_area)
32ENTRY(cpu_arm740_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm740_proc_fin()
37 */
38ENTRY(cpu_arm740_proc_fin)
39 stmfd sp!, {lr}
40 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41 msr cpsr_c, ip
42 mrc p15, 0, r0, c1, c0, 0
43 bic r0, r0, #0x3f000000 @ bank/f/lock/s
44 bic r0, r0, #0x0000000c @ w-buffer/cache
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mcr p15, 0, r0, c7, c0, 0 @ invalidate cache
47 ldmfd sp!, {pc}
48
49/*
50 * cpu_arm740_reset(loc)
51 * Params : r0 = address to jump to
52 * Notes : This sets up everything for a reset
53 */
54ENTRY(cpu_arm740_reset)
55 mov ip, #0
56 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
57 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
58 bic ip, ip, #0x0000000c @ ............wc..
59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
60 mov pc, r0
61
62 __INIT
63
64 .type __arm740_setup, #function
65__arm740_setup:
66 mov r0, #0
67 mcr p15, 0, r0, c7, c0, 0 @ invalidate caches
68
69 mcr p15, 0, r0, c6, c3 @ disable area 3~7
70 mcr p15, 0, r0, c6, c4
71 mcr p15, 0, r0, c6, c5
72 mcr p15, 0, r0, c6, c6
73 mcr p15, 0, r0, c6, c7
74
75 mov r0, #0x0000003F @ base = 0, size = 4GB
76 mcr p15, 0, r0, c6, c0 @ set area 0, default
77
78 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
79 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
80 mov r2, #10 @ 11 is the minimum (4KB)
811: add r2, r2, #1 @ area size *= 2
82 mov r1, r1, lsr #1
83 bne 1b @ count not zero r-shift
84 orr r0, r0, r2, lsl #1 @ the area register value
85 orr r0, r0, #1 @ set enable bit
86 mcr p15, 0, r0, c6, c1 @ set area 1, RAM
87
88 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
89 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
90 mov r2, #10 @ 11 is the minimum (4KB)
911: add r2, r2, #1 @ area size *= 2
92 mov r1, r1, lsr #1
93 bne 1b @ count not zero r-shift
94 orr r0, r0, r2, lsl #1 @ the area register value
95 orr r0, r0, #1 @ set enable bit
96 mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
97
98 mov r0, #0x06
99 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
100#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
101 mov r0, #0x00 @ disable whole write buffer
102#else
103 mov r0, #0x02 @ Region 1 write bufferred
104#endif
105 mcr p15, 0, r0, c3, c0
106
107 mov r0, #0x10000
108 sub r0, r0, #1 @ r0 = 0xffff
109 mcr p15, 0, r0, c5, c0 @ all read/write access
110
111 mrc p15, 0, r0, c1, c0 @ get control register
112 bic r0, r0, #0x3F000000 @ set to standard caching mode
113 @ need some benchmark
114 orr r0, r0, #0x0000000d @ MPU/Cache/WB
115
116 mov pc, lr
117
118 .size __arm740_setup, . - __arm740_setup
119
120 __INITDATA
121
122/*
123 * Purpose : Function pointers used to access above functions - all calls
124 * come through these
125 */
126 .type arm740_processor_functions, #object
127ENTRY(arm740_processor_functions)
128 .word v4t_late_abort
129 .word cpu_arm740_proc_init
130 .word cpu_arm740_proc_fin
131 .word cpu_arm740_reset
132 .word cpu_arm740_do_idle
133 .word cpu_arm740_dcache_clean_area
134 .word cpu_arm740_switch_mm
135 .word 0 @ cpu_*_set_pte
136 .size arm740_processor_functions, . - arm740_processor_functions
137
138 .section ".rodata"
139
140 .type cpu_arch_name, #object
141cpu_arch_name:
142 .asciz "armv4"
143 .size cpu_arch_name, . - cpu_arch_name
144
145 .type cpu_elf_name, #object
146cpu_elf_name:
147 .asciz "v4"
148 .size cpu_elf_name, . - cpu_elf_name
149
150 .type cpu_arm740_name, #object
151cpu_arm740_name:
152 .ascii "ARM740T"
153 .size cpu_arm740_name, . - cpu_arm740_name
154
155 .align
156
157 .section ".proc.info.init", #alloc, #execinstr
158 .type __arm740_proc_info,#object
159__arm740_proc_info:
160 .long 0x41807400
161 .long 0xfffffff0
162 .long 0
163 b __arm740_setup
164 .long cpu_arch_name
165 .long cpu_elf_name
166 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
167 .long cpu_arm740_name
168 .long arm740_processor_functions
169 .long 0
170 .long 0
171 .long v3_cache_fns @ cache model
172 .size __arm740_proc_info, . - __arm740_proc_info
173
174
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
new file mode 100644
index 000000000000..22d7e3100ea6
--- /dev/null
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -0,0 +1,249 @@
1/*
2 * linux/arch/arm/mm/proc-arm7tdmi.S: utility functions for ARM7TDMI
3 *
4 * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm7tdmi_proc_init()
23 * cpu_arm7tdmi_do_idle()
24 * cpu_arm7tdmi_dcache_clean_area()
25 * cpu_arm7tdmi_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm7tdmi_proc_init)
30ENTRY(cpu_arm7tdmi_do_idle)
31ENTRY(cpu_arm7tdmi_dcache_clean_area)
32ENTRY(cpu_arm7tdmi_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm7tdmi_proc_fin()
37 */
38ENTRY(cpu_arm7tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr
42
43/*
44 * Function: cpu_arm7tdmi_reset(loc)
45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */
48ENTRY(cpu_arm7tdmi_reset)
49 mov pc, r0
50
51 __INIT
52
53 .type __arm7tdmi_setup, #function
54__arm7tdmi_setup:
55 mov pc, lr
56 .size __arm7tdmi_setup, . - __arm7tdmi_setup
57
58 __INITDATA
59
60/*
61 * Purpose : Function pointers used to access above functions - all calls
62 * come through these
63 */
64 .type arm7tdmi_processor_functions, #object
65ENTRY(arm7tdmi_processor_functions)
66 .word v4t_late_abort
67 .word cpu_arm7tdmi_proc_init
68 .word cpu_arm7tdmi_proc_fin
69 .word cpu_arm7tdmi_reset
70 .word cpu_arm7tdmi_do_idle
71 .word cpu_arm7tdmi_dcache_clean_area
72 .word cpu_arm7tdmi_switch_mm
73 .word 0 @ cpu_*_set_pte
74 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
75
76 .section ".rodata"
77
78 .type cpu_arch_name, #object
79cpu_arch_name:
80 .asciz "armv4t"
81 .size cpu_arch_name, . - cpu_arch_name
82
83 .type cpu_elf_name, #object
84cpu_elf_name:
85 .asciz "v4"
86 .size cpu_elf_name, . - cpu_elf_name
87
88 .type cpu_arm7tdmi_name, #object
89cpu_arm7tdmi_name:
90 .asciz "ARM7TDMI"
91 .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name
92
93 .type cpu_triscenda7_name, #object
94cpu_triscenda7_name:
95 .asciz "Triscend-A7x"
96 .size cpu_triscenda7_name, . - cpu_triscenda7_name
97
98 .type cpu_at91_name, #object
99cpu_at91_name:
100 .asciz "Atmel-AT91M40xxx"
101 .size cpu_at91_name, . - cpu_at91_name
102
103 .type cpu_s3c3410_name, #object
104cpu_s3c3410_name:
105 .asciz "Samsung-S3C3410"
106 .size cpu_s3c3410_name, . - cpu_s3c3410_name
107
108 .type cpu_s3c44b0x_name, #object
109cpu_s3c44b0x_name:
110 .asciz "Samsung-S3C44B0x"
111 .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name
112
113 .type cpu_s3c4510b, #object
114cpu_s3c4510b_name:
115 .asciz "Samsung-S3C4510B"
116 .size cpu_s3c4510b_name, . - cpu_s3c4510b_name
117
118 .type cpu_s3c4530_name, #object
119cpu_s3c4530_name:
120 .asciz "Samsung-S3C4530"
121 .size cpu_s3c4530_name, . - cpu_s3c4530_name
122
123 .type cpu_netarm_name, #object
124cpu_netarm_name:
125 .asciz "NETARM"
126 .size cpu_netarm_name, . - cpu_netarm_name
127
128 .align
129
130 .section ".proc.info.init", #alloc, #execinstr
131
132 .type __arm7tdmi_proc_info, #object
133__arm7tdmi_proc_info:
134 .long 0x41007700
135 .long 0xfff8ff00
136 .long 0
137 .long 0
138 b __arm7tdmi_setup
139 .long cpu_arch_name
140 .long cpu_elf_name
141 .long HWCAP_SWP | HWCAP_26BIT
142 .long cpu_arm7tdmi_name
143 .long arm7tdmi_processor_functions
144 .long 0
145 .long 0
146 .long v4_cache_fns
147 .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info
148
149 .type __triscenda7_proc_info, #object
150__triscenda7_proc_info:
151 .long 0x0001d2ff
152 .long 0x0001ffff
153 .long 0
154 .long 0
155 b __arm7tdmi_setup
156 .long cpu_arch_name
157 .long cpu_elf_name
158 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
159 .long cpu_triscenda7_name
160 .long arm7tdmi_processor_functions
161 .long 0
162 .long 0
163 .long v4_cache_fns
164 .size __triscenda7_proc_info, . - __triscenda7_proc_info
165
166 .type __at91_proc_info, #object
167__at91_proc_info:
168 .long 0x14000040
169 .long 0xfff000e0
170 .long 0
171 .long 0
172 b __arm7tdmi_setup
173 .long cpu_arch_name
174 .long cpu_elf_name
175 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
176 .long cpu_at91_name
177 .long arm7tdmi_processor_functions
178 .long 0
179 .long 0
180 .long v4_cache_fns
181 .size __at91_proc_info, . - __at91_proc_info
182
183 .type __s3c4510b_proc_info, #object
184__s3c4510b_proc_info:
185 .long 0x36365000
186 .long 0xfffff000
187 .long 0
188 .long 0
189 b __arm7tdmi_setup
190 .long cpu_arch_name
191 .long cpu_elf_name
192 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
193 .long cpu_s3c4510b_name
194 .long arm7tdmi_processor_functions
195 .long 0
196 .long 0
197 .long v4_cache_fns
198 .size __s3c4510b_proc_info, . - __s3c4510b_proc_info
199
200 .type __s3c4530_proc_info, #object
201__s3c4530_proc_info:
202 .long 0x4c000000
203 .long 0xfff000e0
204 .long 0
205 .long 0
206 b __arm7tdmi_setup
207 .long cpu_arch_name
208 .long cpu_elf_name
209 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
210 .long cpu_s3c4530_name
211 .long arm7tdmi_processor_functions
212 .long 0
213 .long 0
214 .long v4_cache_fns
215 .size __s3c4530_proc_info, . - __s3c4530_proc_info
216
217 .type __s3c3410_proc_info, #object
218__s3c3410_proc_info:
219 .long 0x34100000
220 .long 0xffff0000
221 .long 0
222 .long 0
223 b __arm7tdmi_setup
224 .long cpu_arch_name
225 .long cpu_elf_name
226 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
227 .long cpu_s3c3410_name
228 .long arm7tdmi_processor_functions
229 .long 0
230 .long 0
231 .long v4_cache_fns
232 .size __s3c3410_proc_info, . - __s3c3410_proc_info
233
234 .type __s3c44b0x_proc_info, #object
235__s3c44b0x_proc_info:
236 .long 0x44b00000
237 .long 0xffff0000
238 .long 0
239 .long 0
240 b __arm7tdmi_setup
241 .long cpu_arch_name
242 .long cpu_elf_name
243 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
244 .long cpu_s3c44b0x_name
245 .long arm7tdmi_processor_functions
246 .long 0
247 .long 0
248 .long v4_cache_fns
249 .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
new file mode 100644
index 000000000000..2397f4b6e151
--- /dev/null
+++ b/arch/arm/mm/proc-arm940.S
@@ -0,0 +1,369 @@
1/*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/pgtable.h>
16#include <asm/procinfo.h>
17#include <asm/ptrace.h>
18
19/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
20#define CACHE_DLINESIZE 16
21#define CACHE_DSEGMENTS 4
22#define CACHE_DENTRIES 64
23
24 .text
25/*
26 * cpu_arm940_proc_init()
27 * cpu_arm940_switch_mm()
28 *
29 * These are not required.
30 */
31ENTRY(cpu_arm940_proc_init)
32ENTRY(cpu_arm940_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm940_proc_fin()
37 */
38ENTRY(cpu_arm940_proc_fin)
39 stmfd sp!, {lr}
40 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41 msr cpsr_c, ip
42 bl arm940_flush_kern_cache_all
43 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
44 bic r0, r0, #0x00001000 @ i-cache
45 bic r0, r0, #0x00000004 @ d-cache
46 mcr p15, 0, r0, c1, c0, 0 @ disable caches
47 ldmfd sp!, {pc}
48
49/*
50 * cpu_arm940_reset(loc)
51 * Params : r0 = address to jump to
52 * Notes : This sets up everything for a reset
53 */
54ENTRY(cpu_arm940_reset)
55 mov ip, #0
56 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
57 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
58 mcr p15, 0, ip, c7, c10, 4 @ drain WB
59 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
60 bic ip, ip, #0x00000005 @ .............c.p
61 bic ip, ip, #0x00001000 @ i-cache
62 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
63 mov pc, r0
64
65/*
66 * cpu_arm940_do_idle()
67 */
68 .align 5
69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr
72
73/*
74 * flush_user_cache_all()
75 */
76ENTRY(arm940_flush_user_cache_all)
77 /* FALLTHROUGH */
78
79/*
80 * flush_kern_cache_all()
81 *
82 * Clean and invalidate the entire cache.
83 */
84ENTRY(arm940_flush_kern_cache_all)
85 mov r2, #VM_EXEC
86 /* FALLTHROUGH */
87
88/*
89 * flush_user_cache_range(start, end, flags)
90 *
91 * There is no efficient way to flush a range of cache entries
92 * in the specified address range. Thus, flushes all.
93 *
94 * - start - start address (inclusive)
95 * - end - end address (exclusive)
96 * - flags - vm_flags describing address space
97 */
98ENTRY(arm940_flush_user_cache_range)
99 mov ip, #0
100#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
101 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
102#else
103 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1041: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1052: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
106 subs r3, r3, #1 << 26
107 bcs 2b @ entries 63 to 0
108 subs r1, r1, #1 << 4
109 bcs 1b @ segments 3 to 0
110#endif
111 tst r2, #VM_EXEC
112 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
113 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
114 mov pc, lr
115
116/*
117 * coherent_kern_range(start, end)
118 *
119 * Ensure coherency between the Icache and the Dcache in the
120 * region described by start, end. If you have non-snooping
121 * Harvard caches, you need to implement this function.
122 *
123 * - start - virtual start address
124 * - end - virtual end address
125 */
126ENTRY(arm940_coherent_kern_range)
127 /* FALLTHROUGH */
128
129/*
130 * coherent_user_range(start, end)
131 *
132 * Ensure coherency between the Icache and the Dcache in the
133 * region described by start, end. If you have non-snooping
134 * Harvard caches, you need to implement this function.
135 *
136 * - start - virtual start address
137 * - end - virtual end address
138 */
139ENTRY(arm940_coherent_user_range)
140 /* FALLTHROUGH */
141
142/*
143 * flush_kern_dcache_page(void *page)
144 *
145 * Ensure no D cache aliasing occurs, either with itself or
146 * the I cache
147 *
148 * - addr - page aligned address
149 */
150ENTRY(arm940_flush_kern_dcache_page)
151 mov ip, #0
152 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1531: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1542: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
155 subs r3, r3, #1 << 26
156 bcs 2b @ entries 63 to 0
157 subs r1, r1, #1 << 4
158 bcs 1b @ segments 7 to 0
159 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
160 mcr p15, 0, ip, c7, c10, 4 @ drain WB
161 mov pc, lr
162
163/*
164 * dma_inv_range(start, end)
165 *
166 * There is no efficient way to invalidate a specifid virtual
167 * address range. Thus, invalidates all.
168 *
169 * - start - virtual start address
170 * - end - virtual end address
171 */
172ENTRY(arm940_dma_inv_range)
173 mov ip, #0
174 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1751: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1762: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
177 subs r3, r3, #1 << 26
178 bcs 2b @ entries 63 to 0
179 subs r1, r1, #1 << 4
180 bcs 1b @ segments 7 to 0
181 mcr p15, 0, ip, c7, c10, 4 @ drain WB
182 mov pc, lr
183
184/*
185 * dma_clean_range(start, end)
186 *
187 * There is no efficient way to clean a specifid virtual
188 * address range. Thus, cleans all.
189 *
190 * - start - virtual start address
191 * - end - virtual end address
192 */
193ENTRY(arm940_dma_clean_range)
194ENTRY(cpu_arm940_dcache_clean_area)
195 mov ip, #0
196#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
197 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1981: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1992: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
200 subs r3, r3, #1 << 26
201 bcs 2b @ entries 63 to 0
202 subs r1, r1, #1 << 4
203 bcs 1b @ segments 7 to 0
204#endif
205 mcr p15, 0, ip, c7, c10, 4 @ drain WB
206 mov pc, lr
207
208/*
209 * dma_flush_range(start, end)
210 *
211 * There is no efficient way to clean and invalidate a specifid
212 * virtual address range.
213 *
214 * - start - virtual start address
215 * - end - virtual end address
216 */
217ENTRY(arm940_dma_flush_range)
218 mov ip, #0
219 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2201: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2212:
222#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
223 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
224#else
225 mcr p15, 0, r3, c7, c10, 2 @ clean D entry
226#endif
227 subs r3, r3, #1 << 26
228 bcs 2b @ entries 63 to 0
229 subs r1, r1, #1 << 4
230 bcs 1b @ segments 7 to 0
231 mcr p15, 0, ip, c7, c10, 4 @ drain WB
232 mov pc, lr
233
234ENTRY(arm940_cache_fns)
235 .long arm940_flush_kern_cache_all
236 .long arm940_flush_user_cache_all
237 .long arm940_flush_user_cache_range
238 .long arm940_coherent_kern_range
239 .long arm940_coherent_user_range
240 .long arm940_flush_kern_dcache_page
241 .long arm940_dma_inv_range
242 .long arm940_dma_clean_range
243 .long arm940_dma_flush_range
244
245 __INIT
246
247 .type __arm940_setup, #function
248__arm940_setup:
249 mov r0, #0
250 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
251 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
252 mcr p15, 0, r0, c7, c10, 4 @ drain WB
253
254 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
255 mcr p15, 0, r0, c6, c4, 0
256 mcr p15, 0, r0, c6, c5, 0
257 mcr p15, 0, r0, c6, c6, 0
258 mcr p15, 0, r0, c6, c7, 0
259
260 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
261 mcr p15, 0, r0, c6, c4, 1
262 mcr p15, 0, r0, c6, c5, 1
263 mcr p15, 0, r0, c6, c6, 1
264 mcr p15, 0, r0, c6, c7, 1
265
266 mov r0, #0x0000003F @ base = 0, size = 4GB
267 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
268 mcr p15, 0, r0, c6, c0, 1
269
270 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
271 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
272 mov r2, #10 @ 11 is the minimum (4KB)
2731: add r2, r2, #1 @ area size *= 2
274 mov r1, r1, lsr #1
275 bne 1b @ count not zero r-shift
276 orr r0, r0, r2, lsl #1 @ the area register value
277 orr r0, r0, #1 @ set enable bit
278 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
279 mcr p15, 0, r0, c6, c1, 1
280
281 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
282 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
283 mov r2, #10 @ 11 is the minimum (4KB)
2841: add r2, r2, #1 @ area size *= 2
285 mov r1, r1, lsr #1
286 bne 1b @ count not zero r-shift
287 orr r0, r0, r2, lsl #1 @ the area register value
288 orr r0, r0, #1 @ set enable bit
289 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
290 mcr p15, 0, r0, c6, c2, 1
291
292 mov r0, #0x06
293 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
294 mcr p15, 0, r0, c2, c0, 1
295#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
296 mov r0, #0x00 @ disable whole write buffer
297#else
298 mov r0, #0x02 @ Region 1 write bufferred
299#endif
300 mcr p15, 0, r0, c3, c0, 0
301
302 mov r0, #0x10000
303 sub r0, r0, #1 @ r0 = 0xffff
304 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
305 mcr p15, 0, r0, c5, c0, 1
306
307 mrc p15, 0, r0, c1, c0 @ get control register
308 orr r0, r0, #0x00001000 @ I-cache
309 orr r0, r0, #0x00000005 @ MPU/D-cache
310
311 mov pc, lr
312
313 .size __arm940_setup, . - __arm940_setup
314
315 __INITDATA
316
317/*
318 * Purpose : Function pointers used to access above functions - all calls
319 * come through these
320 */
321 .type arm940_processor_functions, #object
322ENTRY(arm940_processor_functions)
323 .word nommu_early_abort
324 .word cpu_arm940_proc_init
325 .word cpu_arm940_proc_fin
326 .word cpu_arm940_reset
327 .word cpu_arm940_do_idle
328 .word cpu_arm940_dcache_clean_area
329 .word cpu_arm940_switch_mm
330 .word 0 @ cpu_*_set_pte
331 .size arm940_processor_functions, . - arm940_processor_functions
332
333 .section ".rodata"
334
335.type cpu_arch_name, #object
336cpu_arch_name:
337 .asciz "armv4t"
338 .size cpu_arch_name, . - cpu_arch_name
339
340 .type cpu_elf_name, #object
341cpu_elf_name:
342 .asciz "v4"
343 .size cpu_elf_name, . - cpu_elf_name
344
345 .type cpu_arm940_name, #object
346cpu_arm940_name:
347 .ascii "ARM940T"
348 .size cpu_arm940_name, . - cpu_arm940_name
349
350 .align
351
352 .section ".proc.info.init", #alloc, #execinstr
353
354 .type __arm940_proc_info,#object
355__arm940_proc_info:
356 .long 0x41009400
357 .long 0xff00fff0
358 .long 0
359 b __arm940_setup
360 .long cpu_arch_name
361 .long cpu_elf_name
362 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363 .long cpu_arm940_name
364 .long arm940_processor_functions
365 .long 0
366 .long 0
367 .long arm940_cache_fns
368 .size __arm940_proc_info, . - __arm940_proc_info
369
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
new file mode 100644
index 000000000000..e18617564421
--- /dev/null
+++ b/arch/arm/mm/proc-arm946.S
@@ -0,0 +1,424 @@
1/*
2 * linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * (Many of cache codes are from proc-arm926.S)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/assembler.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h>
18#include <asm/procinfo.h>
19#include <asm/ptrace.h>
20
21/*
22 * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
23 * comprising 256 lines of 32 bytes (8 words).
24 */
25#define CACHE_DSIZE (CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */
26#define CACHE_DLINESIZE 32 /* fixed */
27#define CACHE_DSEGMENTS 4 /* fixed */
28#define CACHE_DENTRIES (CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE)
29#define CACHE_DLIMIT (CACHE_DSIZE * 4) /* benchmark needed */
30
31 .text
32/*
33 * cpu_arm946_proc_init()
34 * cpu_arm946_switch_mm()
35 *
36 * These are not required.
37 */
38ENTRY(cpu_arm946_proc_init)
39ENTRY(cpu_arm946_switch_mm)
40 mov pc, lr
41
42/*
43 * cpu_arm946_proc_fin()
44 */
45ENTRY(cpu_arm946_proc_fin)
46 stmfd sp!, {lr}
47 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
48 msr cpsr_c, ip
49 bl arm946_flush_kern_cache_all
50 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
51 bic r0, r0, #0x00001000 @ i-cache
52 bic r0, r0, #0x00000004 @ d-cache
53 mcr p15, 0, r0, c1, c0, 0 @ disable caches
54 ldmfd sp!, {pc}
55
56/*
57 * cpu_arm946_reset(loc)
58 * Params : r0 = address to jump to
59 * Notes : This sets up everything for a reset
60 */
61ENTRY(cpu_arm946_reset)
62 mov ip, #0
63 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
64 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
65 mcr p15, 0, ip, c7, c10, 4 @ drain WB
66 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
67 bic ip, ip, #0x00000005 @ .............c.p
68 bic ip, ip, #0x00001000 @ i-cache
69 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
70 mov pc, r0
71
72/*
73 * cpu_arm946_do_idle()
74 */
75 .align 5
76ENTRY(cpu_arm946_do_idle)
77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
78 mov pc, lr
79
80/*
81 * flush_user_cache_all()
82 */
83ENTRY(arm946_flush_user_cache_all)
84 /* FALLTHROUGH */
85
86/*
87 * flush_kern_cache_all()
88 *
89 * Clean and invalidate the entire cache.
90 */
91ENTRY(arm946_flush_kern_cache_all)
92 mov r2, #VM_EXEC
93 mov ip, #0
94__flush_whole_cache:
95#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
96 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
97#else
98 mov r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments
991: orr r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries
1002: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
101 subs r3, r3, #1 << 4
102 bcs 2b @ entries n to 0
103 subs r1, r1, #1 << 29
104 bcs 1b @ segments 3 to 0
105#endif
106 tst r2, #VM_EXEC
107 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
108 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
109 mov pc, lr
110
111/*
112 * flush_user_cache_range(start, end, flags)
113 *
114 * Clean and invalidate a range of cache entries in the
115 * specified address range.
116 *
117 * - start - start address (inclusive)
118 * - end - end address (exclusive)
119 * - flags - vm_flags describing address space
120 * (same as arm926)
121 */
122ENTRY(arm946_flush_user_cache_range)
123 mov ip, #0
124 sub r3, r1, r0 @ calculate total size
125 cmp r3, #CACHE_DLIMIT
126 bhs __flush_whole_cache
127
1281: tst r2, #VM_EXEC
129#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
130 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
131 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
132 add r0, r0, #CACHE_DLINESIZE
133 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
134 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
135 add r0, r0, #CACHE_DLINESIZE
136#else
137 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
138 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
139 add r0, r0, #CACHE_DLINESIZE
140 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
141 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
142 add r0, r0, #CACHE_DLINESIZE
143#endif
144 cmp r0, r1
145 blo 1b
146 tst r2, #VM_EXEC
147 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
148 mov pc, lr
149
150/*
151 * coherent_kern_range(start, end)
152 *
153 * Ensure coherency between the Icache and the Dcache in the
154 * region described by start, end. If you have non-snooping
155 * Harvard caches, you need to implement this function.
156 *
157 * - start - virtual start address
158 * - end - virtual end address
159 */
160ENTRY(arm946_coherent_kern_range)
161 /* FALLTHROUGH */
162
163/*
164 * coherent_user_range(start, end)
165 *
166 * Ensure coherency between the Icache and the Dcache in the
167 * region described by start, end. If you have non-snooping
168 * Harvard caches, you need to implement this function.
169 *
170 * - start - virtual start address
171 * - end - virtual end address
172 * (same as arm926)
173 */
174ENTRY(arm946_coherent_user_range)
175 bic r0, r0, #CACHE_DLINESIZE - 1
1761: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
177 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
178 add r0, r0, #CACHE_DLINESIZE
179 cmp r0, r1
180 blo 1b
181 mcr p15, 0, r0, c7, c10, 4 @ drain WB
182 mov pc, lr
183
184/*
185 * flush_kern_dcache_page(void *page)
186 *
187 * Ensure no D cache aliasing occurs, either with itself or
188 * the I cache
189 *
190 * - addr - page aligned address
191 * (same as arm926)
192 */
193ENTRY(arm946_flush_kern_dcache_page)
194 add r1, r0, #PAGE_SZ
1951: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
196 add r0, r0, #CACHE_DLINESIZE
197 cmp r0, r1
198 blo 1b
199 mov r0, #0
200 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
201 mcr p15, 0, r0, c7, c10, 4 @ drain WB
202 mov pc, lr
203
204/*
205 * dma_inv_range(start, end)
206 *
207 * Invalidate (discard) the specified virtual address range.
208 * May not write back any entries. If 'start' or 'end'
209 * are not cache line aligned, those lines must be written
210 * back.
211 *
212 * - start - virtual start address
213 * - end - virtual end address
214 * (same as arm926)
215 */
216ENTRY(arm946_dma_inv_range)
217#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
218 tst r0, #CACHE_DLINESIZE - 1
219 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
220 tst r1, #CACHE_DLINESIZE - 1
221 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
222#endif
223 bic r0, r0, #CACHE_DLINESIZE - 1
2241: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
225 add r0, r0, #CACHE_DLINESIZE
226 cmp r0, r1
227 blo 1b
228 mcr p15, 0, r0, c7, c10, 4 @ drain WB
229 mov pc, lr
230
231/*
232 * dma_clean_range(start, end)
233 *
234 * Clean the specified virtual address range.
235 *
236 * - start - virtual start address
237 * - end - virtual end address
238 *
239 * (same as arm926)
240 */
241ENTRY(arm946_dma_clean_range)
242#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
243 bic r0, r0, #CACHE_DLINESIZE - 1
2441: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
245 add r0, r0, #CACHE_DLINESIZE
246 cmp r0, r1
247 blo 1b
248#endif
249 mcr p15, 0, r0, c7, c10, 4 @ drain WB
250 mov pc, lr
251
252/*
253 * dma_flush_range(start, end)
254 *
255 * Clean and invalidate the specified virtual address range.
256 *
257 * - start - virtual start address
258 * - end - virtual end address
259 *
260 * (same as arm926)
261 */
262ENTRY(arm946_dma_flush_range)
263 bic r0, r0, #CACHE_DLINESIZE - 1
2641:
265#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
266 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
267#else
268 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
269#endif
270 add r0, r0, #CACHE_DLINESIZE
271 cmp r0, r1
272 blo 1b
273 mcr p15, 0, r0, c7, c10, 4 @ drain WB
274 mov pc, lr
275
276ENTRY(arm946_cache_fns)
277 .long arm946_flush_kern_cache_all
278 .long arm946_flush_user_cache_all
279 .long arm946_flush_user_cache_range
280 .long arm946_coherent_kern_range
281 .long arm946_coherent_user_range
282 .long arm946_flush_kern_dcache_page
283 .long arm946_dma_inv_range
284 .long arm946_dma_clean_range
285 .long arm946_dma_flush_range
286
287
288ENTRY(cpu_arm946_dcache_clean_area)
289#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
2901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
291 add r0, r0, #CACHE_DLINESIZE
292 subs r1, r1, #CACHE_DLINESIZE
293 bhi 1b
294#endif
295 mcr p15, 0, r0, c7, c10, 4 @ drain WB
296 mov pc, lr
297
298 __INIT
299
300 .type __arm946_setup, #function
301__arm946_setup:
302 mov r0, #0
303 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
304 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
305 mcr p15, 0, r0, c7, c10, 4 @ drain WB
306
307 mcr p15, 0, r0, c6, c3, 0 @ disable memory region 3~7
308 mcr p15, 0, r0, c6, c4, 0
309 mcr p15, 0, r0, c6, c5, 0
310 mcr p15, 0, r0, c6, c6, 0
311 mcr p15, 0, r0, c6, c7, 0
312
313 mov r0, #0x0000003F @ base = 0, size = 4GB
314 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default
315
316 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
317 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
318 mov r2, #10 @ 11 is the minimum (4KB)
3191: add r2, r2, #1 @ area size *= 2
320 mov r1, r1, lsr #1
321 bne 1b @ count not zero r-shift
322 orr r0, r0, r2, lsl #1 @ the region register value
323 orr r0, r0, #1 @ set enable bit
324 mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM
325
326 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
327 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
328 mov r2, #10 @ 11 is the minimum (4KB)
3291: add r2, r2, #1 @ area size *= 2
330 mov r1, r1, lsr #1
331 bne 1b @ count not zero r-shift
332 orr r0, r0, r2, lsl #1 @ the region register value
333 orr r0, r0, #1 @ set enable bit
334 mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH
335
336 mov r0, #0x06
337 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable
338 mcr p15, 0, r0, c2, c0, 1 @ region 1,2 i-cacheable
339#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
340 mov r0, #0x00 @ disable whole write buffer
341#else
342 mov r0, #0x02 @ region 1 write bufferred
343#endif
344 mcr p15, 0, r0, c3, c0, 0
345
346/*
347 * Access Permission Settings for future permission control by PU.
348 *
349 * priv. user
350 * region 0 (whole) rw -- : b0001
351 * region 1 (RAM) rw rw : b0011
352 * region 2 (FLASH) rw r- : b0010
353 * region 3~7 (none) -- -- : b0000
354 */
355 mov r0, #0x00000031
356 orr r0, r0, #0x00000200
357 mcr p15, 0, r0, c5, c0, 2 @ set data access permission
358 mcr p15, 0, r0, c5, c0, 3 @ set inst. access permission
359
360 mrc p15, 0, r0, c1, c0 @ get control register
361 orr r0, r0, #0x00001000 @ I-cache
362 orr r0, r0, #0x00000005 @ MPU/D-cache
363#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
364 orr r0, r0, #0x00004000 @ .1.. .... .... ....
365#endif
366 mov pc, lr
367
368 .size __arm946_setup, . - __arm946_setup
369
370 __INITDATA
371
372/*
373 * Purpose : Function pointers used to access above functions - all calls
374 * come through these
375 */
376 .type arm946_processor_functions, #object
377ENTRY(arm946_processor_functions)
378 .word nommu_early_abort
379 .word cpu_arm946_proc_init
380 .word cpu_arm946_proc_fin
381 .word cpu_arm946_reset
382 .word cpu_arm946_do_idle
383
384 .word cpu_arm946_dcache_clean_area
385 .word cpu_arm946_switch_mm
386 .word 0 @ cpu_*_set_pte
387 .size arm946_processor_functions, . - arm946_processor_functions
388
389 .section ".rodata"
390
391 .type cpu_arch_name, #object
392cpu_arch_name:
393 .asciz "armv5te"
394 .size cpu_arch_name, . - cpu_arch_name
395
396 .type cpu_elf_name, #object
397cpu_elf_name:
398 .asciz "v5t"
399 .size cpu_elf_name, . - cpu_elf_name
400
401 .type cpu_arm946_name, #object
402cpu_arm946_name:
403 .ascii "ARM946E-S"
404 .size cpu_arm946_name, . - cpu_arm946_name
405
406 .align
407
408 .section ".proc.info.init", #alloc, #execinstr
409 .type __arm946_proc_info,#object
410__arm946_proc_info:
411 .long 0x41009460
412 .long 0xff00fff0
413 .long 0
414 b __arm946_setup
415 .long cpu_arch_name
416 .long cpu_elf_name
417 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
418 .long cpu_arm946_name
419 .long arm946_processor_functions
420 .long 0
421 .long 0
422 .long arm940_cache_fns
423 .size __arm946_proc_info, . - __arm946_proc_info
424
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
new file mode 100644
index 000000000000..918ebf65d4f6
--- /dev/null
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -0,0 +1,134 @@
1/*
2 * linux/arch/arm/mm/proc-arm9tdmi.S: utility functions for ARM9TDMI
3 *
4 * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm9tdmi_proc_init()
23 * cpu_arm9tdmi_do_idle()
24 * cpu_arm9tdmi_dcache_clean_area()
25 * cpu_arm9tdmi_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm9tdmi_proc_init)
30ENTRY(cpu_arm9tdmi_do_idle)
31ENTRY(cpu_arm9tdmi_dcache_clean_area)
32ENTRY(cpu_arm9tdmi_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm9tdmi_proc_fin()
37 */
38ENTRY(cpu_arm9tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr
42
43/*
44 * Function: cpu_arm9tdmi_reset(loc)
45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */
48ENTRY(cpu_arm9tdmi_reset)
49 mov pc, r0
50
51 __INIT
52
53 .type __arm9tdmi_setup, #function
54__arm9tdmi_setup:
55 mov pc, lr
56 .size __arm9tdmi_setup, . - __arm9tdmi_setup
57
58 __INITDATA
59
60/*
61 * Purpose : Function pointers used to access above functions - all calls
62 * come through these
63 */
64 .type arm9tdmi_processor_functions, #object
65ENTRY(arm9tdmi_processor_functions)
66 .word nommu_early_abort
67 .word cpu_arm9tdmi_proc_init
68 .word cpu_arm9tdmi_proc_fin
69 .word cpu_arm9tdmi_reset
70 .word cpu_arm9tdmi_do_idle
71 .word cpu_arm9tdmi_dcache_clean_area
72 .word cpu_arm9tdmi_switch_mm
73 .word 0 @ cpu_*_set_pte
74 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
75
76 .section ".rodata"
77
78 .type cpu_arch_name, #object
79cpu_arch_name:
80 .asciz "armv4t"
81 .size cpu_arch_name, . - cpu_arch_name
82
83 .type cpu_elf_name, #object
84cpu_elf_name:
85 .asciz "v4"
86 .size cpu_elf_name, . - cpu_elf_name
87
88 .type cpu_arm9tdmi_name, #object
89cpu_arm9tdmi_name:
90 .asciz "ARM9TDMI"
91 .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name
92
93 .type cpu_p2001_name, #object
94cpu_p2001_name:
95 .asciz "P2001"
96 .size cpu_p2001_name, . - cpu_p2001_name
97
98 .align
99
100 .section ".proc.info.init", #alloc, #execinstr
101
102 .type __arm9tdmi_proc_info, #object
103__arm9tdmi_proc_info:
104 .long 0x41009900
105 .long 0xfff8ff00
106 .long 0
107 .long 0
108 b __arm9tdmi_setup
109 .long cpu_arch_name
110 .long cpu_elf_name
111 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
112 .long cpu_arm9tdmi_name
113 .long arm9tdmi_processor_functions
114 .long 0
115 .long 0
116 .long v4_cache_fns
117 .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info
118
119 .type __p2001_proc_info, #object
120__p2001_proc_info:
121 .long 0x41029000
122 .long 0xffffffff
123 .long 0
124 .long 0
125 b __arm9tdmi_setup
126 .long cpu_arch_name
127 .long cpu_elf_name
128 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
129 .long cpu_p2001_name
130 .long arm9tdmi_processor_functions
131 .long 0
132 .long 0
133 .long v4_cache_fns
134 .size __p2001_proc_info, . - __p2001_proc_info
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 3ca0c92e98a2..e8b377d637f6 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -311,12 +311,6 @@ ENTRY(xscale_flush_kern_dcache_page)
311 * - end - virtual end address 311 * - end - virtual end address
312 */ 312 */
313ENTRY(xscale_dma_inv_range) 313ENTRY(xscale_dma_inv_range)
314 mrc p15, 0, r2, c0, c0, 0 @ read ID
315 eor r2, r2, #0x69000000
316 eor r2, r2, #0x00052000
317 bics r2, r2, #1
318 beq xscale_dma_flush_range
319
320 tst r0, #CACHELINESIZE - 1 314 tst r0, #CACHELINESIZE - 1
321 bic r0, r0, #CACHELINESIZE - 1 315 bic r0, r0, #CACHELINESIZE - 1
322 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry 316 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
@@ -375,6 +369,30 @@ ENTRY(xscale_cache_fns)
375 .long xscale_dma_clean_range 369 .long xscale_dma_clean_range
376 .long xscale_dma_flush_range 370 .long xscale_dma_flush_range
377 371
372/*
373 * On stepping A0/A1 of the 80200, invalidating D-cache by line doesn't
374 * clear the dirty bits, which means that if we invalidate a dirty line,
375 * the dirty data can still be written back to external memory later on.
376 *
377 * The recommended workaround is to always do a clean D-cache line before
378 * doing an invalidate D-cache line, so on the affected processors,
379 * dma_inv_range() is implemented as dma_flush_range().
380 *
381 * See erratum #25 of "Intel 80200 Processor Specification Update",
382 * revision January 22, 2003, available at:
383 * http://www.intel.com/design/iio/specupdt/273415.htm
384 */
385ENTRY(xscale_80200_A0_A1_cache_fns)
386 .long xscale_flush_kern_cache_all
387 .long xscale_flush_user_cache_all
388 .long xscale_flush_user_cache_range
389 .long xscale_coherent_kern_range
390 .long xscale_coherent_user_range
391 .long xscale_flush_kern_dcache_page
392 .long xscale_dma_flush_range
393 .long xscale_dma_clean_range
394 .long xscale_dma_flush_range
395
378ENTRY(cpu_xscale_dcache_clean_area) 396ENTRY(cpu_xscale_dcache_clean_area)
3791: mcr p15, 0, r0, c7, c10, 1 @ clean D entry 3971: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
380 add r0, r0, #CACHELINESIZE 398 add r0, r0, #CACHELINESIZE
@@ -531,6 +549,11 @@ cpu_elf_name:
531 .asciz "v5" 549 .asciz "v5"
532 .size cpu_elf_name, . - cpu_elf_name 550 .size cpu_elf_name, . - cpu_elf_name
533 551
552 .type cpu_80200_A0_A1_name, #object
553cpu_80200_A0_A1_name:
554 .asciz "XScale-80200 A0/A1"
555 .size cpu_80200_A0_A1_name, . - cpu_80200_A0_A1_name
556
534 .type cpu_80200_name, #object 557 .type cpu_80200_name, #object
535cpu_80200_name: 558cpu_80200_name:
536 .asciz "XScale-80200" 559 .asciz "XScale-80200"
@@ -595,6 +618,29 @@ cpu_pxa270_name:
595 618
596 .section ".proc.info.init", #alloc, #execinstr 619 .section ".proc.info.init", #alloc, #execinstr
597 620
621 .type __80200_A0_A1_proc_info,#object
622__80200_A0_A1_proc_info:
623 .long 0x69052000
624 .long 0xfffffffe
625 .long PMD_TYPE_SECT | \
626 PMD_SECT_BUFFERABLE | \
627 PMD_SECT_CACHEABLE | \
628 PMD_SECT_AP_WRITE | \
629 PMD_SECT_AP_READ
630 .long PMD_TYPE_SECT | \
631 PMD_SECT_AP_WRITE | \
632 PMD_SECT_AP_READ
633 b __xscale_setup
634 .long cpu_arch_name
635 .long cpu_elf_name
636 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
637 .long cpu_80200_name
638 .long xscale_processor_functions
639 .long v4wbi_tlb_fns
640 .long xscale_mc_user_fns
641 .long xscale_80200_A0_A1_cache_fns
642 .size __80200_A0_A1_proc_info, . - __80200_A0_A1_proc_info
643
598 .type __80200_proc_info,#object 644 .type __80200_proc_info,#object
599__80200_proc_info: 645__80200_proc_info:
600 .long 0x69052000 646 .long 0x69052000