aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2006-09-28 17:20:39 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2006-09-28 17:20:39 -0400
commit250d375d1da45a5e08ab8baf5eaa7eb258afd82b (patch)
treefb5dc6df00065f11578f837835c6d5a99530b223 /arch
parent84904d0ead0a8c419abd45c7b2ac8d76d50a0d48 (diff)
parent6afd6fae1d5f7e7129a10c4f3e32018966eeac1c (diff)
Merge nommu branch
Diffstat (limited to 'arch')
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/Kconfig-nommu8
-rw-r--r--arch/arm/Makefile5
-rw-r--r--arch/arm/boot/compressed/Makefile4
-rw-r--r--arch/arm/boot/compressed/head.S6
-rw-r--r--arch/arm/kernel/head-nommu.S12
-rw-r--r--arch/arm/kernel/module.c8
-rw-r--r--arch/arm/kernel/process.c22
-rw-r--r--arch/arm/mm/Kconfig152
-rw-r--r--arch/arm/mm/Makefile8
-rw-r--r--arch/arm/mm/abort-lv4t.S7
-rw-r--r--arch/arm/mm/abort-nommu.S19
-rw-r--r--arch/arm/mm/alignment.c2
-rw-r--r--arch/arm/mm/cache-v4.S10
-rw-r--r--arch/arm/mm/fault.c15
-rw-r--r--arch/arm/mm/fault.h5
-rw-r--r--arch/arm/mm/init.c216
-rw-r--r--arch/arm/mm/mm.h5
-rw-r--r--arch/arm/mm/mmu.c (renamed from arch/arm/mm/mm-armv.c)450
-rw-r--r--arch/arm/mm/nommu.c43
-rw-r--r--arch/arm/mm/pgd.c101
-rw-r--r--arch/arm/mm/proc-arm740.S174
-rw-r--r--arch/arm/mm/proc-arm7tdmi.S249
-rw-r--r--arch/arm/mm/proc-arm940.S369
-rw-r--r--arch/arm/mm/proc-arm946.S424
-rw-r--r--arch/arm/mm/proc-arm9tdmi.S134
26 files changed, 2053 insertions, 400 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 147abe0383d8..f9362ee9955f 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -99,7 +99,7 @@ config ARCH_MTD_XIP
99 99
100config VECTORS_BASE 100config VECTORS_BASE
101 hex 101 hex
102 default 0xffff0000 if MMU 102 default 0xffff0000 if MMU || CPU_HIGH_VECTOR
103 default DRAM_BASE if REMAP_VECTORS_TO_RAM 103 default DRAM_BASE if REMAP_VECTORS_TO_RAM
104 default 0x00000000 104 default 0x00000000
105 help 105 help
@@ -626,6 +626,7 @@ config LEDS_CPU
626 626
627config ALIGNMENT_TRAP 627config ALIGNMENT_TRAP
628 bool 628 bool
629 depends on CPU_CP15_MMU
629 default y if !ARCH_EBSA110 630 default y if !ARCH_EBSA110
630 help 631 help
631 ARM processors can not fetch/store information which is not 632 ARM processors can not fetch/store information which is not
@@ -857,7 +858,7 @@ source "drivers/base/Kconfig"
857 858
858source "drivers/connector/Kconfig" 859source "drivers/connector/Kconfig"
859 860
860if ALIGNMENT_TRAP 861if ALIGNMENT_TRAP || !CPU_CP15_MMU
861source "drivers/mtd/Kconfig" 862source "drivers/mtd/Kconfig"
862endif 863endif
863 864
diff --git a/arch/arm/Kconfig-nommu b/arch/arm/Kconfig-nommu
index e1574be2ded6..f087376748d1 100644
--- a/arch/arm/Kconfig-nommu
+++ b/arch/arm/Kconfig-nommu
@@ -25,6 +25,14 @@ config FLASH_SIZE
25 hex 'FLASH Size' if SET_MEM_PARAM 25 hex 'FLASH Size' if SET_MEM_PARAM
26 default 0x00400000 26 default 0x00400000
27 27
28config PROCESSOR_ID
29 hex
30 default 0x00007700
31 depends on !CPU_CP15
32 help
33 If processor has no CP15 register, this processor ID is
34 used instead of the auto-probing which utilizes the register.
35
28config REMAP_VECTORS_TO_RAM 36config REMAP_VECTORS_TO_RAM
29 bool 'Install vectors to the begining of RAM' if DRAM_BASE 37 bool 'Install vectors to the begining of RAM' if DRAM_BASE
30 depends on DRAM_BASE 38 depends on DRAM_BASE
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 80cee786500c..2a0b2c8a1fe0 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -55,7 +55,12 @@ arch-$(CONFIG_CPU_32v3) :=-D__LINUX_ARM_ARCH__=3 -march=armv3
55# This selects how we optimise for the processor. 55# This selects how we optimise for the processor.
56tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610 56tune-$(CONFIG_CPU_ARM610) :=-mtune=arm610
57tune-$(CONFIG_CPU_ARM710) :=-mtune=arm710 57tune-$(CONFIG_CPU_ARM710) :=-mtune=arm710
58tune-$(CONFIG_CPU_ARM7TDMI) :=-mtune=arm7tdmi
58tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi 59tune-$(CONFIG_CPU_ARM720T) :=-mtune=arm7tdmi
60tune-$(CONFIG_CPU_ARM740T) :=-mtune=arm7tdmi
61tune-$(CONFIG_CPU_ARM9TDMI) :=-mtune=arm9tdmi
62tune-$(CONFIG_CPU_ARM940T) :=-mtune=arm9tdmi
63tune-$(CONFIG_CPU_ARM946T) :=$(call cc-option,-mtune=arm9e,-mtune=arm9tdmi)
59tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi 64tune-$(CONFIG_CPU_ARM920T) :=-mtune=arm9tdmi
60tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi 65tune-$(CONFIG_CPU_ARM922T) :=-mtune=arm9tdmi
61tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi 66tune-$(CONFIG_CPU_ARM925T) :=-mtune=arm9tdmi
diff --git a/arch/arm/boot/compressed/Makefile b/arch/arm/boot/compressed/Makefile
index 2adc1527e0eb..adddc7131685 100644
--- a/arch/arm/boot/compressed/Makefile
+++ b/arch/arm/boot/compressed/Makefile
@@ -51,7 +51,11 @@ OBJS += head-at91rm9200.o
51endif 51endif
52 52
53ifeq ($(CONFIG_CPU_BIG_ENDIAN),y) 53ifeq ($(CONFIG_CPU_BIG_ENDIAN),y)
54ifeq ($(CONFIG_CPU_CP15),y)
54OBJS += big-endian.o 55OBJS += big-endian.o
56else
57# The endian should be set by h/w design.
58endif
55endif 59endif
56 60
57# 61#
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 75df1f764a10..e5ab51b9cceb 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -82,9 +82,11 @@
82 kphex r6, 8 /* processor id */ 82 kphex r6, 8 /* processor id */
83 kputc #':' 83 kputc #':'
84 kphex r7, 8 /* architecture id */ 84 kphex r7, 8 /* architecture id */
85#ifdef CONFIG_CPU_CP15
85 kputc #':' 86 kputc #':'
86 mrc p15, 0, r0, c1, c0 87 mrc p15, 0, r0, c1, c0
87 kphex r0, 8 /* control reg */ 88 kphex r0, 8 /* control reg */
89#endif
88 kputc #'\n' 90 kputc #'\n'
89 kphex r5, 8 /* decompressed kernel start */ 91 kphex r5, 8 /* decompressed kernel start */
90 kputc #'-' 92 kputc #'-'
@@ -507,7 +509,11 @@ call_kernel: bl cache_clean_flush
507 */ 509 */
508 510
509call_cache_fn: adr r12, proc_types 511call_cache_fn: adr r12, proc_types
512#ifdef CONFIG_CPU_CP15
510 mrc p15, 0, r6, c0, c0 @ get processor ID 513 mrc p15, 0, r6, c0, c0 @ get processor ID
514#else
515 ldr r6, =CONFIG_PROCESSOR_ID
516#endif
5111: ldr r1, [r12, #0] @ get value 5171: ldr r1, [r12, #0] @ get value
512 ldr r2, [r12, #4] @ get mask 518 ldr r2, [r12, #4] @ get mask
513 eor r1, r1, r6 @ (real ^ match) 519 eor r1, r1, r6 @ (real ^ match)
diff --git a/arch/arm/kernel/head-nommu.S b/arch/arm/kernel/head-nommu.S
index ac9eb3d30518..f359a189dcf2 100644
--- a/arch/arm/kernel/head-nommu.S
+++ b/arch/arm/kernel/head-nommu.S
@@ -9,7 +9,6 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 * 10 *
11 * Common kernel startup code (non-paged MM) 11 * Common kernel startup code (non-paged MM)
12 * for 32-bit CPUs which has a process ID register(CP15).
13 * 12 *
14 */ 13 */
15#include <linux/linkage.h> 14#include <linux/linkage.h>
@@ -40,7 +39,11 @@
40ENTRY(stext) 39ENTRY(stext)
41 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode 40 msr cpsr_c, #PSR_F_BIT | PSR_I_BIT | SVC_MODE @ ensure svc mode
42 @ and irqs disabled 41 @ and irqs disabled
42#ifndef CONFIG_CPU_CP15
43 ldr r9, =CONFIG_PROCESSOR_ID
44#else
43 mrc p15, 0, r9, c0, c0 @ get processor id 45 mrc p15, 0, r9, c0, c0 @ get processor id
46#endif
44 bl __lookup_processor_type @ r5=procinfo r9=cpuid 47 bl __lookup_processor_type @ r5=procinfo r9=cpuid
45 movs r10, r5 @ invalid processor (r5=0)? 48 movs r10, r5 @ invalid processor (r5=0)?
46 beq __error_p @ yes, error 'p' 49 beq __error_p @ yes, error 'p'
@@ -58,6 +61,7 @@ ENTRY(stext)
58 */ 61 */
59 .type __after_proc_init, %function 62 .type __after_proc_init, %function
60__after_proc_init: 63__after_proc_init:
64#ifdef CONFIG_CPU_CP15
61 mrc p15, 0, r0, c1, c0, 0 @ read control reg 65 mrc p15, 0, r0, c1, c0, 0 @ read control reg
62#ifdef CONFIG_ALIGNMENT_TRAP 66#ifdef CONFIG_ALIGNMENT_TRAP
63 orr r0, r0, #CR_A 67 orr r0, r0, #CR_A
@@ -73,7 +77,13 @@ __after_proc_init:
73#ifdef CONFIG_CPU_ICACHE_DISABLE 77#ifdef CONFIG_CPU_ICACHE_DISABLE
74 bic r0, r0, #CR_I 78 bic r0, r0, #CR_I
75#endif 79#endif
80#ifdef CONFIG_CPU_HIGH_VECTOR
81 orr r0, r0, #CR_V
82#else
83 bic r0, r0, #CR_V
84#endif
76 mcr p15, 0, r0, c1, c0, 0 @ write control reg 85 mcr p15, 0, r0, c1, c0, 0 @ write control reg
86#endif /* CONFIG_CPU_CP15 */
77 87
78 mov pc, r13 @ clear the BSS and jump 88 mov pc, r13 @ clear the BSS and jump
79 @ to start_kernel 89 @ to start_kernel
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 298363d97047..1b061583408e 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -2,6 +2,7 @@
2 * linux/arch/arm/kernel/module.c 2 * linux/arch/arm/kernel/module.c
3 * 3 *
4 * Copyright (C) 2002 Russell King. 4 * Copyright (C) 2002 Russell King.
5 * Modified for nommu by Hyok S. Choi
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -32,6 +33,7 @@ extern void _etext;
32#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK) 33#define MODULE_START (((unsigned long)&_etext + ~PGDIR_MASK) & PGDIR_MASK)
33#endif 34#endif
34 35
36#ifdef CONFIG_MMU
35void *module_alloc(unsigned long size) 37void *module_alloc(unsigned long size)
36{ 38{
37 struct vm_struct *area; 39 struct vm_struct *area;
@@ -46,6 +48,12 @@ void *module_alloc(unsigned long size)
46 48
47 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL); 49 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
48} 50}
51#else /* CONFIG_MMU */
52void *module_alloc(unsigned long size)
53{
54 return size == 0 ? NULL : vmalloc(size);
55}
56#endif /* !CONFIG_MMU */
49 57
50void module_free(struct module *module, void *region) 58void module_free(struct module *module, void *region)
51{ 59{
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index 3079535afccd..bf35c178a877 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -221,16 +221,26 @@ void __show_regs(struct pt_regs *regs)
221 processor_modes[processor_mode(regs)], 221 processor_modes[processor_mode(regs)],
222 thumb_mode(regs) ? " (T)" : "", 222 thumb_mode(regs) ? " (T)" : "",
223 get_fs() == get_ds() ? "kernel" : "user"); 223 get_fs() == get_ds() ? "kernel" : "user");
224#if CONFIG_CPU_CP15
224 { 225 {
225 unsigned int ctrl, transbase, dac; 226 unsigned int ctrl;
226 __asm__ ( 227 __asm__ (
227 " mrc p15, 0, %0, c1, c0\n" 228 " mrc p15, 0, %0, c1, c0\n"
228 " mrc p15, 0, %1, c2, c0\n" 229 : "=r" (ctrl));
229 " mrc p15, 0, %2, c3, c0\n" 230 printk("Control: %04X\n", ctrl);
230 : "=r" (ctrl), "=r" (transbase), "=r" (dac));
231 printk("Control: %04X Table: %08X DAC: %08X\n",
232 ctrl, transbase, dac);
233 } 231 }
232#ifdef CONFIG_CPU_CP15_MMU
233 {
234 unsigned int transbase, dac;
235 __asm__ (
236 " mrc p15, 0, %0, c2, c0\n"
237 " mrc p15, 0, %1, c3, c0\n"
238 : "=r" (transbase), "=r" (dac));
239 printk("Table: %08X DAC: %08X\n",
240 transbase, dac);
241 }
242#endif
243#endif
234} 244}
235 245
236void show_regs(struct pt_regs * regs) 246void show_regs(struct pt_regs * regs)
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index b59c74100a84..c0bfb8212b77 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -15,6 +15,7 @@ config CPU_ARM610
15 select CPU_32v3 15 select CPU_32v3
16 select CPU_CACHE_V3 16 select CPU_CACHE_V3
17 select CPU_CACHE_VIVT 17 select CPU_CACHE_VIVT
18 select CPU_CP15_MMU
18 select CPU_COPY_V3 if MMU 19 select CPU_COPY_V3 if MMU
19 select CPU_TLB_V3 if MMU 20 select CPU_TLB_V3 if MMU
20 help 21 help
@@ -24,6 +25,20 @@ config CPU_ARM610
24 Say Y if you want support for the ARM610 processor. 25 Say Y if you want support for the ARM610 processor.
25 Otherwise, say N. 26 Otherwise, say N.
26 27
28# ARM7TDMI
29config CPU_ARM7TDMI
30 bool "Support ARM7TDMI processor"
31 depends on !MMU
32 select CPU_32v4T
33 select CPU_ABRT_LV4T
34 select CPU_CACHE_V4
35 help
36 A 32-bit RISC microprocessor based on the ARM7 processor core
37 which has no memory control unit and cache.
38
39 Say Y if you want support for the ARM7TDMI processor.
40 Otherwise, say N.
41
27# ARM710 42# ARM710
28config CPU_ARM710 43config CPU_ARM710
29 bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC 44 bool "Support ARM710 processor" if !ARCH_CLPS7500 && ARCH_RPC
@@ -31,6 +46,7 @@ config CPU_ARM710
31 select CPU_32v3 46 select CPU_32v3
32 select CPU_CACHE_V3 47 select CPU_CACHE_V3
33 select CPU_CACHE_VIVT 48 select CPU_CACHE_VIVT
49 select CPU_CP15_MMU
34 select CPU_COPY_V3 if MMU 50 select CPU_COPY_V3 if MMU
35 select CPU_TLB_V3 if MMU 51 select CPU_TLB_V3 if MMU
36 help 52 help
@@ -50,6 +66,7 @@ config CPU_ARM720T
50 select CPU_ABRT_LV4T 66 select CPU_ABRT_LV4T
51 select CPU_CACHE_V4 67 select CPU_CACHE_V4
52 select CPU_CACHE_VIVT 68 select CPU_CACHE_VIVT
69 select CPU_CP15_MMU
53 select CPU_COPY_V4WT if MMU 70 select CPU_COPY_V4WT if MMU
54 select CPU_TLB_V4WT if MMU 71 select CPU_TLB_V4WT if MMU
55 help 72 help
@@ -59,6 +76,36 @@ config CPU_ARM720T
59 Say Y if you want support for the ARM720T processor. 76 Say Y if you want support for the ARM720T processor.
60 Otherwise, say N. 77 Otherwise, say N.
61 78
79# ARM740T
80config CPU_ARM740T
81 bool "Support ARM740T processor" if ARCH_INTEGRATOR
82 depends on !MMU
83 select CPU_32v4T
84 select CPU_ABRT_LV4T
85 select CPU_CACHE_V3 # although the core is v4t
86 select CPU_CP15_MPU
87 help
88 A 32-bit RISC processor with 8KB cache or 4KB variants,
89 write buffer and MPU(Protection Unit) built around
90 an ARM7TDMI core.
91
92 Say Y if you want support for the ARM740T processor.
93 Otherwise, say N.
94
95# ARM9TDMI
96config CPU_ARM9TDMI
97 bool "Support ARM9TDMI processor"
98 depends on !MMU
99 select CPU_32v4T
100 select CPU_ABRT_NOMMU
101 select CPU_CACHE_V4
102 help
103 A 32-bit RISC microprocessor based on the ARM9 processor core
104 which has no memory control unit and cache.
105
106 Say Y if you want support for the ARM9TDMI processor.
107 Otherwise, say N.
108
62# ARM920T 109# ARM920T
63config CPU_ARM920T 110config CPU_ARM920T
64 bool "Support ARM920T processor" 111 bool "Support ARM920T processor"
@@ -68,6 +115,7 @@ config CPU_ARM920T
68 select CPU_ABRT_EV4T 115 select CPU_ABRT_EV4T
69 select CPU_CACHE_V4WT 116 select CPU_CACHE_V4WT
70 select CPU_CACHE_VIVT 117 select CPU_CACHE_VIVT
118 select CPU_CP15_MMU
71 select CPU_COPY_V4WB if MMU 119 select CPU_COPY_V4WB if MMU
72 select CPU_TLB_V4WBI if MMU 120 select CPU_TLB_V4WBI if MMU
73 help 121 help
@@ -89,6 +137,7 @@ config CPU_ARM922T
89 select CPU_ABRT_EV4T 137 select CPU_ABRT_EV4T
90 select CPU_CACHE_V4WT 138 select CPU_CACHE_V4WT
91 select CPU_CACHE_VIVT 139 select CPU_CACHE_VIVT
140 select CPU_CP15_MMU
92 select CPU_COPY_V4WB if MMU 141 select CPU_COPY_V4WB if MMU
93 select CPU_TLB_V4WBI if MMU 142 select CPU_TLB_V4WBI if MMU
94 help 143 help
@@ -108,6 +157,7 @@ config CPU_ARM925T
108 select CPU_ABRT_EV4T 157 select CPU_ABRT_EV4T
109 select CPU_CACHE_V4WT 158 select CPU_CACHE_V4WT
110 select CPU_CACHE_VIVT 159 select CPU_CACHE_VIVT
160 select CPU_CP15_MMU
111 select CPU_COPY_V4WB if MMU 161 select CPU_COPY_V4WB if MMU
112 select CPU_TLB_V4WBI if MMU 162 select CPU_TLB_V4WBI if MMU
113 help 163 help
@@ -126,6 +176,7 @@ config CPU_ARM926T
126 select CPU_32v5 176 select CPU_32v5
127 select CPU_ABRT_EV5TJ 177 select CPU_ABRT_EV5TJ
128 select CPU_CACHE_VIVT 178 select CPU_CACHE_VIVT
179 select CPU_CP15_MMU
129 select CPU_COPY_V4WB if MMU 180 select CPU_COPY_V4WB if MMU
130 select CPU_TLB_V4WBI if MMU 181 select CPU_TLB_V4WBI if MMU
131 help 182 help
@@ -136,6 +187,39 @@ config CPU_ARM926T
136 Say Y if you want support for the ARM926T processor. 187 Say Y if you want support for the ARM926T processor.
137 Otherwise, say N. 188 Otherwise, say N.
138 189
190# ARM940T
191config CPU_ARM940T
192 bool "Support ARM940T processor" if ARCH_INTEGRATOR
193 depends on !MMU
194 select CPU_32v4T
195 select CPU_ABRT_NOMMU
196 select CPU_CACHE_VIVT
197 select CPU_CP15_MPU
198 help
199 ARM940T is a member of the ARM9TDMI family of general-
200 purpose microprocessors with MPU and seperate 4KB
201 instruction and 4KB data cases, each with a 4-word line
202 length.
203
204 Say Y if you want support for the ARM940T processor.
205 Otherwise, say N.
206
207# ARM946E-S
208config CPU_ARM946E
209 bool "Support ARM946E-S processor" if ARCH_INTEGRATOR
210 depends on !MMU
211 select CPU_32v5
212 select CPU_ABRT_NOMMU
213 select CPU_CACHE_VIVT
214 select CPU_CP15_MPU
215 help
216 ARM946E-S is a member of the ARM9E-S family of high-
217 performance, 32-bit system-on-chip processor solutions.
218 The TCM and ARMv5TE 32-bit instruction set is supported.
219
220 Say Y if you want support for the ARM946E-S processor.
221 Otherwise, say N.
222
139# ARM1020 - needs validating 223# ARM1020 - needs validating
140config CPU_ARM1020 224config CPU_ARM1020
141 bool "Support ARM1020T (rev 0) processor" 225 bool "Support ARM1020T (rev 0) processor"
@@ -144,6 +228,7 @@ config CPU_ARM1020
144 select CPU_ABRT_EV4T 228 select CPU_ABRT_EV4T
145 select CPU_CACHE_V4WT 229 select CPU_CACHE_V4WT
146 select CPU_CACHE_VIVT 230 select CPU_CACHE_VIVT
231 select CPU_CP15_MMU
147 select CPU_COPY_V4WB if MMU 232 select CPU_COPY_V4WB if MMU
148 select CPU_TLB_V4WBI if MMU 233 select CPU_TLB_V4WBI if MMU
149 help 234 help
@@ -161,6 +246,7 @@ config CPU_ARM1020E
161 select CPU_ABRT_EV4T 246 select CPU_ABRT_EV4T
162 select CPU_CACHE_V4WT 247 select CPU_CACHE_V4WT
163 select CPU_CACHE_VIVT 248 select CPU_CACHE_VIVT
249 select CPU_CP15_MMU
164 select CPU_COPY_V4WB if MMU 250 select CPU_COPY_V4WB if MMU
165 select CPU_TLB_V4WBI if MMU 251 select CPU_TLB_V4WBI if MMU
166 depends on n 252 depends on n
@@ -172,6 +258,7 @@ config CPU_ARM1022
172 select CPU_32v5 258 select CPU_32v5
173 select CPU_ABRT_EV4T 259 select CPU_ABRT_EV4T
174 select CPU_CACHE_VIVT 260 select CPU_CACHE_VIVT
261 select CPU_CP15_MMU
175 select CPU_COPY_V4WB if MMU # can probably do better 262 select CPU_COPY_V4WB if MMU # can probably do better
176 select CPU_TLB_V4WBI if MMU 263 select CPU_TLB_V4WBI if MMU
177 help 264 help
@@ -189,6 +276,7 @@ config CPU_ARM1026
189 select CPU_32v5 276 select CPU_32v5
190 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 277 select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10
191 select CPU_CACHE_VIVT 278 select CPU_CACHE_VIVT
279 select CPU_CP15_MMU
192 select CPU_COPY_V4WB if MMU # can probably do better 280 select CPU_COPY_V4WB if MMU # can probably do better
193 select CPU_TLB_V4WBI if MMU 281 select CPU_TLB_V4WBI if MMU
194 help 282 help
@@ -207,6 +295,7 @@ config CPU_SA110
207 select CPU_ABRT_EV4 295 select CPU_ABRT_EV4
208 select CPU_CACHE_V4WB 296 select CPU_CACHE_V4WB
209 select CPU_CACHE_VIVT 297 select CPU_CACHE_VIVT
298 select CPU_CP15_MMU
210 select CPU_COPY_V4WB if MMU 299 select CPU_COPY_V4WB if MMU
211 select CPU_TLB_V4WB if MMU 300 select CPU_TLB_V4WB if MMU
212 help 301 help
@@ -227,6 +316,7 @@ config CPU_SA1100
227 select CPU_ABRT_EV4 316 select CPU_ABRT_EV4
228 select CPU_CACHE_V4WB 317 select CPU_CACHE_V4WB
229 select CPU_CACHE_VIVT 318 select CPU_CACHE_VIVT
319 select CPU_CP15_MMU
230 select CPU_TLB_V4WB if MMU 320 select CPU_TLB_V4WB if MMU
231 321
232# XScale 322# XScale
@@ -237,6 +327,7 @@ config CPU_XSCALE
237 select CPU_32v5 327 select CPU_32v5
238 select CPU_ABRT_EV5T 328 select CPU_ABRT_EV5T
239 select CPU_CACHE_VIVT 329 select CPU_CACHE_VIVT
330 select CPU_CP15_MMU
240 select CPU_TLB_V4WBI if MMU 331 select CPU_TLB_V4WBI if MMU
241 332
242# XScale Core Version 3 333# XScale Core Version 3
@@ -247,6 +338,7 @@ config CPU_XSC3
247 select CPU_32v5 338 select CPU_32v5
248 select CPU_ABRT_EV5T 339 select CPU_ABRT_EV5T
249 select CPU_CACHE_VIVT 340 select CPU_CACHE_VIVT
341 select CPU_CP15_MMU
250 select CPU_TLB_V4WBI if MMU 342 select CPU_TLB_V4WBI if MMU
251 select IO_36 343 select IO_36
252 344
@@ -258,6 +350,7 @@ config CPU_V6
258 select CPU_ABRT_EV6 350 select CPU_ABRT_EV6
259 select CPU_CACHE_V6 351 select CPU_CACHE_V6
260 select CPU_CACHE_VIPT 352 select CPU_CACHE_VIPT
353 select CPU_CP15_MMU
261 select CPU_COPY_V6 if MMU 354 select CPU_COPY_V6 if MMU
262 select CPU_TLB_V6 if MMU 355 select CPU_TLB_V6 if MMU
263 356
@@ -299,6 +392,9 @@ config CPU_32v6
299 bool 392 bool
300 393
301# The abort model 394# The abort model
395config CPU_ABRT_NOMMU
396 bool
397
302config CPU_ABRT_EV4 398config CPU_ABRT_EV4
303 bool 399 bool
304 400
@@ -380,6 +476,23 @@ config CPU_TLB_V6
380 476
381endif 477endif
382 478
479config CPU_CP15
480 bool
481 help
482 Processor has the CP15 register.
483
484config CPU_CP15_MMU
485 bool
486 select CPU_CP15
487 help
488 Processor has the CP15 register, which has MMU related registers.
489
490config CPU_CP15_MPU
491 bool
492 select CPU_CP15
493 help
494 Processor has the CP15 register, which has MPU related registers.
495
383# 496#
384# CPU supports 36-bit I/O 497# CPU supports 36-bit I/O
385# 498#
@@ -390,7 +503,7 @@ comment "Processor Features"
390 503
391config ARM_THUMB 504config ARM_THUMB
392 bool "Support Thumb user binaries" 505 bool "Support Thumb user binaries"
393 depends on CPU_ARM720T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6 506 depends on CPU_ARM720T || CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_ARM1020E || CPU_ARM1022 || CPU_ARM1026 || CPU_XSCALE || CPU_XSC3 || CPU_V6
394 default y 507 default y
395 help 508 help
396 Say Y if you want to include kernel support for running user space 509 Say Y if you want to include kernel support for running user space
@@ -411,23 +524,48 @@ config CPU_BIG_ENDIAN
411 port must properly enable any big-endian related features 524 port must properly enable any big-endian related features
412 of your chipset/board/processor. 525 of your chipset/board/processor.
413 526
527config CPU_HIGH_VECTOR
528 depends !MMU && CPU_CP15 && !CPU_ARM740T
529 bool "Select the High exception vector"
530 default n
531 help
532 Say Y here to select high exception vector(0xFFFF0000~).
533 The exception vector can be vary depending on the platform
534 design in nommu mode. If your platform needs to select
535 high exception vector, say Y.
536 Otherwise or if you are unsure, say N, and the low exception
537 vector (0x00000000~) will be used.
538
414config CPU_ICACHE_DISABLE 539config CPU_ICACHE_DISABLE
415 bool "Disable I-Cache" 540 bool "Disable I-Cache (I-bit)"
416 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 541 depends on CPU_CP15 && !(CPU_ARM610 || CPU_ARM710 || CPU_ARM720T || CPU_ARM740T || CPU_XSCALE || CPU_XSC3)
417 help 542 help
418 Say Y here to disable the processor instruction cache. Unless 543 Say Y here to disable the processor instruction cache. Unless
419 you have a reason not to or are unsure, say N. 544 you have a reason not to or are unsure, say N.
420 545
421config CPU_DCACHE_DISABLE 546config CPU_DCACHE_DISABLE
422 bool "Disable D-Cache" 547 bool "Disable D-Cache (C-bit)"
423 depends on CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6 548 depends on CPU_CP15
424 help 549 help
425 Say Y here to disable the processor data cache. Unless 550 Say Y here to disable the processor data cache. Unless
426 you have a reason not to or are unsure, say N. 551 you have a reason not to or are unsure, say N.
427 552
553config CPU_DCACHE_SIZE
554 hex
555 depends on CPU_ARM740T || CPU_ARM946E
556 default 0x00001000 if CPU_ARM740T
557 default 0x00002000 # default size for ARM946E-S
558 help
559 Some cores are synthesizable to have various sized cache. For
560 ARM946E-S case, it can vary from 0KB to 1MB.
561 To support such cache operations, it is efficient to know the size
562 before compile time.
563 If your SoC is configured to have a different size, define the value
564 here with proper conditions.
565
428config CPU_DCACHE_WRITETHROUGH 566config CPU_DCACHE_WRITETHROUGH
429 bool "Force write through D-cache" 567 bool "Force write through D-cache"
430 depends on (CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE 568 depends on (CPU_ARM740T || CPU_ARM920T || CPU_ARM922T || CPU_ARM925T || CPU_ARM926T || CPU_ARM940T || CPU_ARM946E || CPU_ARM1020 || CPU_V6) && !CPU_DCACHE_DISABLE
431 default y if CPU_ARM925T 569 default y if CPU_ARM925T
432 help 570 help
433 Say Y here to use the data cache in writethrough mode. Unless you 571 Say Y here to use the data cache in writethrough mode. Unless you
@@ -435,7 +573,7 @@ config CPU_DCACHE_WRITETHROUGH
435 573
436config CPU_CACHE_ROUND_ROBIN 574config CPU_CACHE_ROUND_ROBIN
437 bool "Round robin I and D cache replacement algorithm" 575 bool "Round robin I and D cache replacement algorithm"
438 depends on (CPU_ARM926T || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE) 576 depends on (CPU_ARM926T || CPU_ARM946E || CPU_ARM1020) && (!CPU_ICACHE_DISABLE || !CPU_DCACHE_DISABLE)
439 help 577 help
440 Say Y here to use the predictable round-robin cache replacement 578 Say Y here to use the predictable round-robin cache replacement
441 policy. Unless you specifically require this or are unsure, say N. 579 policy. Unless you specifically require this or are unsure, say N.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 1a1563f859af..d2f5672ecf62 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := consistent.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o ioremap.o mmap.o \
9 mm-armv.o 9 pgd.o mmu.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
@@ -17,6 +17,7 @@ obj-$(CONFIG_MODULES) += proc-syms.o
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o 18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19 19
20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
20obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o 21obj-$(CONFIG_CPU_ABRT_EV4) += abort-ev4.o
21obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o 22obj-$(CONFIG_CPU_ABRT_EV4T) += abort-ev4t.o
22obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o 23obj-$(CONFIG_CPU_ABRT_LV4T) += abort-lv4t.o
@@ -46,11 +47,16 @@ obj-$(CONFIG_CPU_TLB_V6) += tlb-v6.o
46 47
47obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o 48obj-$(CONFIG_CPU_ARM610) += proc-arm6_7.o
48obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o 49obj-$(CONFIG_CPU_ARM710) += proc-arm6_7.o
50obj-$(CONFIG_CPU_ARM7TDMI) += proc-arm7tdmi.o
49obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o 51obj-$(CONFIG_CPU_ARM720T) += proc-arm720.o
52obj-$(CONFIG_CPU_ARM740T) += proc-arm740.o
53obj-$(CONFIG_CPU_ARM9TDMI) += proc-arm9tdmi.o
50obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o 54obj-$(CONFIG_CPU_ARM920T) += proc-arm920.o
51obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o 55obj-$(CONFIG_CPU_ARM922T) += proc-arm922.o
52obj-$(CONFIG_CPU_ARM925T) += proc-arm925.o 56obj-$(CONFIG_CPU_ARM925T) += proc-arm925.o
53obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o 57obj-$(CONFIG_CPU_ARM926T) += proc-arm926.o
58obj-$(CONFIG_CPU_ARM940T) += proc-arm940.o
59obj-$(CONFIG_CPU_ARM946E) += proc-arm946.o
54obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o 60obj-$(CONFIG_CPU_ARM1020) += proc-arm1020.o
55obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o 61obj-$(CONFIG_CPU_ARM1020E) += proc-arm1020e.o
56obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o 62obj-$(CONFIG_CPU_ARM1022) += proc-arm1022.o
diff --git a/arch/arm/mm/abort-lv4t.S b/arch/arm/mm/abort-lv4t.S
index db743e510214..9fb7b0e25ea1 100644
--- a/arch/arm/mm/abort-lv4t.S
+++ b/arch/arm/mm/abort-lv4t.S
@@ -19,11 +19,16 @@
19 */ 19 */
20ENTRY(v4t_late_abort) 20ENTRY(v4t_late_abort)
21 tst r3, #PSR_T_BIT @ check for thumb mode 21 tst r3, #PSR_T_BIT @ check for thumb mode
22#ifdef CONFIG_CPU_CP15_MMU
22 mrc p15, 0, r1, c5, c0, 0 @ get FSR 23 mrc p15, 0, r1, c5, c0, 0 @ get FSR
23 mrc p15, 0, r0, c6, c0, 0 @ get FAR 24 mrc p15, 0, r0, c6, c0, 0 @ get FAR
25 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
26#else
27 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
28 mov r1, #0
29#endif
24 bne .data_thumb_abort 30 bne .data_thumb_abort
25 ldr r8, [r2] @ read arm instruction 31 ldr r8, [r2] @ read arm instruction
26 bic r1, r1, #1 << 11 | 1 << 10 @ clear bits 11 and 10 of FSR
27 tst r8, #1 << 20 @ L = 1 -> write? 32 tst r8, #1 << 20 @ L = 1 -> write?
28 orreq r1, r1, #1 << 11 @ yes. 33 orreq r1, r1, #1 << 11 @ yes.
29 and r7, r8, #15 << 24 34 and r7, r8, #15 << 24
diff --git a/arch/arm/mm/abort-nommu.S b/arch/arm/mm/abort-nommu.S
new file mode 100644
index 000000000000..a7cc7f9ee45d
--- /dev/null
+++ b/arch/arm/mm/abort-nommu.S
@@ -0,0 +1,19 @@
1#include <linux/linkage.h>
2#include <asm/assembler.h>
3/*
4 * Function: nommu_early_abort
5 *
6 * Params : r2 = address of aborted instruction
7 * : r3 = saved SPSR
8 *
9 * Returns : r0 = 0 (abort address)
10 * : r1 = 0 (FSR)
11 *
12 * Note: There is no FSR/FAR on !CPU_CP15_MMU cores.
13 * Just fill zero into the registers.
14 */
15 .align 5
16ENTRY(nommu_early_abort)
17 mov r0, #0 @ clear r0, r1 (no FSR/FAR)
18 mov r1, #0
19 mov pc, lr
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index e0d21bbbe7d7..aa109f074dd9 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -735,7 +735,7 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
735 /* 735 /*
736 * We got a fault - fix it up, or die. 736 * We got a fault - fix it up, or die.
737 */ 737 */
738 do_bad_area(current, current->mm, addr, fsr, regs); 738 do_bad_area(addr, fsr, regs);
739 return 0; 739 return 0;
740 740
741 swp: 741 swp:
diff --git a/arch/arm/mm/cache-v4.S b/arch/arm/mm/cache-v4.S
index b8ad5d58ebe2..b2908063ed6a 100644
--- a/arch/arm/mm/cache-v4.S
+++ b/arch/arm/mm/cache-v4.S
@@ -29,9 +29,13 @@ ENTRY(v4_flush_user_cache_all)
29 * Clean and invalidate the entire cache. 29 * Clean and invalidate the entire cache.
30 */ 30 */
31ENTRY(v4_flush_kern_cache_all) 31ENTRY(v4_flush_kern_cache_all)
32#ifdef CPU_CP15
32 mov r0, #0 33 mov r0, #0
33 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 34 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
34 mov pc, lr 35 mov pc, lr
36#else
37 /* FALLTHROUGH */
38#endif
35 39
36/* 40/*
37 * flush_user_cache_range(start, end, flags) 41 * flush_user_cache_range(start, end, flags)
@@ -44,9 +48,13 @@ ENTRY(v4_flush_kern_cache_all)
44 * - flags - vma_area_struct flags describing address space 48 * - flags - vma_area_struct flags describing address space
45 */ 49 */
46ENTRY(v4_flush_user_cache_range) 50ENTRY(v4_flush_user_cache_range)
51#ifdef CPU_CP15
47 mov ip, #0 52 mov ip, #0
48 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache 53 mcreq p15, 0, ip, c7, c7, 0 @ flush ID cache
49 mov pc, lr 54 mov pc, lr
55#else
56 /* FALLTHROUGH */
57#endif
50 58
51/* 59/*
52 * coherent_kern_range(start, end) 60 * coherent_kern_range(start, end)
@@ -108,8 +116,10 @@ ENTRY(v4_dma_inv_range)
108 * - end - virtual end address 116 * - end - virtual end address
109 */ 117 */
110ENTRY(v4_dma_flush_range) 118ENTRY(v4_dma_flush_range)
119#ifdef CPU_CP15
111 mov r0, #0 120 mov r0, #0
112 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache 121 mcr p15, 0, r0, c7, c7, 0 @ flush ID cache
122#endif
113 /* FALLTHROUGH */ 123 /* FALLTHROUGH */
114 124
115/* 125/*
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index c5e0622c7765..f0943d160ffe 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -131,10 +131,11 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
131 force_sig_info(sig, &si, tsk); 131 force_sig_info(sig, &si, tsk);
132} 132}
133 133
134void 134void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
135do_bad_area(struct task_struct *tsk, struct mm_struct *mm, unsigned long addr,
136 unsigned int fsr, struct pt_regs *regs)
137{ 135{
136 struct task_struct *tsk = current;
137 struct mm_struct *mm = tsk->active_mm;
138
138 /* 139 /*
139 * If we are in kernel mode at this point, we 140 * If we are in kernel mode at this point, we
140 * have no context to handle this fault with. 141 * have no context to handle this fault with.
@@ -319,7 +320,6 @@ static int
319do_translation_fault(unsigned long addr, unsigned int fsr, 320do_translation_fault(unsigned long addr, unsigned int fsr,
320 struct pt_regs *regs) 321 struct pt_regs *regs)
321{ 322{
322 struct task_struct *tsk;
323 unsigned int index; 323 unsigned int index;
324 pgd_t *pgd, *pgd_k; 324 pgd_t *pgd, *pgd_k;
325 pmd_t *pmd, *pmd_k; 325 pmd_t *pmd, *pmd_k;
@@ -351,9 +351,7 @@ do_translation_fault(unsigned long addr, unsigned int fsr,
351 return 0; 351 return 0;
352 352
353bad_area: 353bad_area:
354 tsk = current; 354 do_bad_area(addr, fsr, regs);
355
356 do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
357 return 0; 355 return 0;
358} 356}
359 357
@@ -364,8 +362,7 @@ bad_area:
364static int 362static int
365do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) 363do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
366{ 364{
367 struct task_struct *tsk = current; 365 do_bad_area(addr, fsr, regs);
368 do_bad_area(tsk, tsk->active_mm, addr, fsr, regs);
369 return 0; 366 return 0;
370} 367}
371 368
diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
index 73b59e83227f..49e9e3804de4 100644
--- a/arch/arm/mm/fault.h
+++ b/arch/arm/mm/fault.h
@@ -1,6 +1,3 @@
1void do_bad_area(struct task_struct *tsk, struct mm_struct *mm, 1void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
2 unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3
4void show_pte(struct mm_struct *mm, unsigned long addr);
5 2
6unsigned long search_exception_table(unsigned long addr); 3unsigned long search_exception_table(unsigned long addr);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 64262bda8e54..22217fe2650b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -27,10 +27,7 @@
27 27
28#include "mm.h" 28#include "mm.h"
29 29
30DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 30extern void _text, _etext, __data_start, _end, __init_begin, __init_end;
31
32extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
33extern void _stext, _text, _etext, __data_start, _end, __init_begin, __init_end;
34extern unsigned long phys_initrd_start; 31extern unsigned long phys_initrd_start;
35extern unsigned long phys_initrd_size; 32extern unsigned long phys_initrd_size;
36 33
@@ -40,17 +37,6 @@ extern unsigned long phys_initrd_size;
40 */ 37 */
41static struct meminfo meminfo __initdata = { 0, }; 38static struct meminfo meminfo __initdata = { 0, };
42 39
43/*
44 * empty_zero_page is a special page that is used for
45 * zero-initialized data and COW.
46 */
47struct page *empty_zero_page;
48
49/*
50 * The pmd table for the upper-most set of pages.
51 */
52pmd_t *top_pmd;
53
54void show_mem(void) 40void show_mem(void)
55{ 41{
56 int free = 0, total = 0, reserved = 0; 42 int free = 0, total = 0, reserved = 0;
@@ -173,57 +159,18 @@ static int __init check_initrd(struct meminfo *mi)
173 return initrd_node; 159 return initrd_node;
174} 160}
175 161
176/* 162static inline void map_memory_bank(struct membank *bank)
177 * Reserve the various regions of node 0
178 */
179static __init void reserve_node_zero(pg_data_t *pgdat)
180{ 163{
181 unsigned long res_size = 0; 164#ifdef CONFIG_MMU
182 165 struct map_desc map;
183 /*
184 * Register the kernel text and data with bootmem.
185 * Note that this can only be in node 0.
186 */
187#ifdef CONFIG_XIP_KERNEL
188 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
189#else
190 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
191#endif
192
193 /*
194 * Reserve the page tables. These are already in use,
195 * and can only be in node 0.
196 */
197 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
198 PTRS_PER_PGD * sizeof(pgd_t));
199 166
200 /* 167 map.pfn = __phys_to_pfn(bank->start);
201 * Hmm... This should go elsewhere, but we really really need to 168 map.virtual = __phys_to_virt(bank->start);
202 * stop things allocating the low memory; ideally we need a better 169 map.length = bank->size;
203 * implementation of GFP_DMA which does not assume that DMA-able 170 map.type = MT_MEMORY;
204 * memory starts at zero.
205 */
206 if (machine_is_integrator() || machine_is_cintegrator())
207 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
208 171
209 /* 172 create_mapping(&map);
210 * These should likewise go elsewhere. They pre-reserve the
211 * screen memory region at the start of main system memory.
212 */
213 if (machine_is_edb7211())
214 res_size = 0x00020000;
215 if (machine_is_p720t())
216 res_size = 0x00014000;
217
218#ifdef CONFIG_SA1111
219 /*
220 * Because of the SA1111 DMA bug, we want to preserve our
221 * precious DMA-able memory...
222 */
223 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
224#endif 173#endif
225 if (res_size)
226 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
227} 174}
228 175
229static unsigned long __init 176static unsigned long __init
@@ -242,23 +189,18 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
242 * Calculate the pfn range, and map the memory banks for this node. 189 * Calculate the pfn range, and map the memory banks for this node.
243 */ 190 */
244 for_each_nodebank(i, mi, node) { 191 for_each_nodebank(i, mi, node) {
192 struct membank *bank = &mi->bank[i];
245 unsigned long start, end; 193 unsigned long start, end;
246 struct map_desc map;
247 194
248 start = mi->bank[i].start >> PAGE_SHIFT; 195 start = bank->start >> PAGE_SHIFT;
249 end = (mi->bank[i].start + mi->bank[i].size) >> PAGE_SHIFT; 196 end = (bank->start + bank->size) >> PAGE_SHIFT;
250 197
251 if (start_pfn > start) 198 if (start_pfn > start)
252 start_pfn = start; 199 start_pfn = start;
253 if (end_pfn < end) 200 if (end_pfn < end)
254 end_pfn = end; 201 end_pfn = end;
255 202
256 map.pfn = __phys_to_pfn(mi->bank[i].start); 203 map_memory_bank(bank);
257 map.virtual = __phys_to_virt(mi->bank[i].start);
258 map.length = mi->bank[i].size;
259 map.type = MT_MEMORY;
260
261 create_mapping(&map);
262 } 204 }
263 205
264 /* 206 /*
@@ -340,9 +282,9 @@ bootmem_init_node(int node, int initrd_node, struct meminfo *mi)
340 return end_pfn; 282 return end_pfn;
341} 283}
342 284
343static void __init bootmem_init(struct meminfo *mi) 285void __init bootmem_init(struct meminfo *mi)
344{ 286{
345 unsigned long addr, memend_pfn = 0; 287 unsigned long memend_pfn = 0;
346 int node, initrd_node, i; 288 int node, initrd_node, i;
347 289
348 /* 290 /*
@@ -355,26 +297,6 @@ static void __init bootmem_init(struct meminfo *mi)
355 memcpy(&meminfo, mi, sizeof(meminfo)); 297 memcpy(&meminfo, mi, sizeof(meminfo));
356 298
357 /* 299 /*
358 * Clear out all the mappings below the kernel image.
359 */
360 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
361 pmd_clear(pmd_off_k(addr));
362#ifdef CONFIG_XIP_KERNEL
363 /* The XIP kernel is mapped in the module area -- skip over it */
364 addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
365#endif
366 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
367 pmd_clear(pmd_off_k(addr));
368
369 /*
370 * Clear out all the kernel space mappings, except for the first
371 * memory bank, up to the end of the vmalloc region.
372 */
373 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
374 addr < VMALLOC_END; addr += PGDIR_SIZE)
375 pmd_clear(pmd_off_k(addr));
376
377 /*
378 * Locate which node contains the ramdisk image, if any. 300 * Locate which node contains the ramdisk image, if any.
379 */ 301 */
380 initrd_node = check_initrd(mi); 302 initrd_node = check_initrd(mi);
@@ -407,114 +329,6 @@ static void __init bootmem_init(struct meminfo *mi)
407 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET; 329 max_pfn = max_low_pfn = memend_pfn - PHYS_PFN_OFFSET;
408} 330}
409 331
410/*
411 * Set up device the mappings. Since we clear out the page tables for all
412 * mappings above VMALLOC_END, we will remove any debug device mappings.
413 * This means you have to be careful how you debug this function, or any
414 * called function. This means you can't use any function or debugging
415 * method which may touch any device, otherwise the kernel _will_ crash.
416 */
417static void __init devicemaps_init(struct machine_desc *mdesc)
418{
419 struct map_desc map;
420 unsigned long addr;
421 void *vectors;
422
423 /*
424 * Allocate the vector page early.
425 */
426 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
427 BUG_ON(!vectors);
428
429 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
430 pmd_clear(pmd_off_k(addr));
431
432 /*
433 * Map the kernel if it is XIP.
434 * It is always first in the modulearea.
435 */
436#ifdef CONFIG_XIP_KERNEL
437 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
438 map.virtual = MODULE_START;
439 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
440 map.type = MT_ROM;
441 create_mapping(&map);
442#endif
443
444 /*
445 * Map the cache flushing regions.
446 */
447#ifdef FLUSH_BASE
448 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
449 map.virtual = FLUSH_BASE;
450 map.length = SZ_1M;
451 map.type = MT_CACHECLEAN;
452 create_mapping(&map);
453#endif
454#ifdef FLUSH_BASE_MINICACHE
455 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
456 map.virtual = FLUSH_BASE_MINICACHE;
457 map.length = SZ_1M;
458 map.type = MT_MINICLEAN;
459 create_mapping(&map);
460#endif
461
462 /*
463 * Create a mapping for the machine vectors at the high-vectors
464 * location (0xffff0000). If we aren't using high-vectors, also
465 * create a mapping at the low-vectors virtual address.
466 */
467 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
468 map.virtual = 0xffff0000;
469 map.length = PAGE_SIZE;
470 map.type = MT_HIGH_VECTORS;
471 create_mapping(&map);
472
473 if (!vectors_high()) {
474 map.virtual = 0;
475 map.type = MT_LOW_VECTORS;
476 create_mapping(&map);
477 }
478
479 /*
480 * Ask the machine support to map in the statically mapped devices.
481 */
482 if (mdesc->map_io)
483 mdesc->map_io();
484
485 /*
486 * Finally flush the caches and tlb to ensure that we're in a
487 * consistent state wrt the writebuffer. This also ensures that
488 * any write-allocated cache lines in the vector page are written
489 * back. After this point, we can start to touch devices again.
490 */
491 local_flush_tlb_all();
492 flush_cache_all();
493}
494
495/*
496 * paging_init() sets up the page tables, initialises the zone memory
497 * maps, and sets up the zero page, bad page and bad page tables.
498 */
499void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
500{
501 void *zero_page;
502
503 build_mem_type_table();
504 bootmem_init(mi);
505 devicemaps_init(mdesc);
506
507 top_pmd = pmd_off_k(0xffff0000);
508
509 /*
510 * allocate the zero page. Note that we count on this going ok.
511 */
512 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
513 memzero(zero_page, PAGE_SIZE);
514 empty_zero_page = virt_to_page(zero_page);
515 flush_dcache_page(empty_zero_page);
516}
517
518static inline void free_area(unsigned long addr, unsigned long end, char *s) 332static inline void free_area(unsigned long addr, unsigned long end, char *s)
519{ 333{
520 unsigned int size = (end - addr) >> 10; 334 unsigned int size = (end - addr) >> 10;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 8d73ffbce8df..bb2bc9ab6bd3 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -14,6 +14,9 @@ static inline pmd_t *pmd_off_k(unsigned long virt)
14} 14}
15 15
16struct map_desc; 16struct map_desc;
17struct meminfo;
18struct pglist_data;
17 19
18void __init build_mem_type_table(void);
19void __init create_mapping(struct map_desc *md); 20void __init create_mapping(struct map_desc *md);
21void __init bootmem_init(struct meminfo *mi);
22void reserve_node_zero(struct pglist_data *pgdat);
diff --git a/arch/arm/mm/mm-armv.c b/arch/arm/mm/mmu.c
index ee9647823fad..e566cbe4b222 100644
--- a/arch/arm/mm/mm-armv.c
+++ b/arch/arm/mm/mmu.c
@@ -1,30 +1,46 @@
1/* 1/*
2 * linux/arch/arm/mm/mm-armv.c 2 * linux/arch/arm/mm/mmu.c
3 * 3 *
4 * Copyright (C) 1998-2005 Russell King 4 * Copyright (C) 1995-2005 Russell King
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify 6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as 7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 *
10 * Page table sludge for ARM v3 and v4 processor architectures.
11 */ 9 */
12#include <linux/module.h> 10#include <linux/module.h>
13#include <linux/mm.h> 11#include <linux/kernel.h>
12#include <linux/errno.h>
14#include <linux/init.h> 13#include <linux/init.h>
15#include <linux/bootmem.h> 14#include <linux/bootmem.h>
16#include <linux/highmem.h> 15#include <linux/mman.h>
17#include <linux/nodemask.h> 16#include <linux/nodemask.h>
18 17
19#include <asm/pgalloc.h> 18#include <asm/mach-types.h>
20#include <asm/page.h>
21#include <asm/setup.h> 19#include <asm/setup.h>
22#include <asm/tlbflush.h> 20#include <asm/sizes.h>
21#include <asm/tlb.h>
23 22
23#include <asm/mach/arch.h>
24#include <asm/mach/map.h> 24#include <asm/mach/map.h>
25 25
26#include "mm.h" 26#include "mm.h"
27 27
28DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
29
30extern void _stext, __data_start, _end;
31extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
32
33/*
34 * empty_zero_page is a special page that is used for
35 * zero-initialized data and COW.
36 */
37struct page *empty_zero_page;
38
39/*
40 * The pmd table for the upper-most set of pages.
41 */
42pmd_t *top_pmd;
43
28#define CPOLICY_UNCACHED 0 44#define CPOLICY_UNCACHED 0
29#define CPOLICY_BUFFERED 1 45#define CPOLICY_BUFFERED 1
30#define CPOLICY_WRITETHROUGH 2 46#define CPOLICY_WRITETHROUGH 2
@@ -99,6 +115,7 @@ static void __init early_cachepolicy(char **p)
99 flush_cache_all(); 115 flush_cache_all();
100 set_cr(cr_alignment); 116 set_cr(cr_alignment);
101} 117}
118__early_param("cachepolicy=", early_cachepolicy);
102 119
103static void __init early_nocache(char **__unused) 120static void __init early_nocache(char **__unused)
104{ 121{
@@ -106,6 +123,7 @@ static void __init early_nocache(char **__unused)
106 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 123 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
107 early_cachepolicy(&p); 124 early_cachepolicy(&p);
108} 125}
126__early_param("nocache", early_nocache);
109 127
110static void __init early_nowrite(char **__unused) 128static void __init early_nowrite(char **__unused)
111{ 129{
@@ -113,6 +131,7 @@ static void __init early_nowrite(char **__unused)
113 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 131 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
114 early_cachepolicy(&p); 132 early_cachepolicy(&p);
115} 133}
134__early_param("nowb", early_nowrite);
116 135
117static void __init early_ecc(char **p) 136static void __init early_ecc(char **p)
118{ 137{
@@ -124,10 +143,6 @@ static void __init early_ecc(char **p)
124 *p += 3; 143 *p += 3;
125 } 144 }
126} 145}
127
128__early_param("nocache", early_nocache);
129__early_param("nowb", early_nowrite);
130__early_param("cachepolicy=", early_cachepolicy);
131__early_param("ecc=", early_ecc); 146__early_param("ecc=", early_ecc);
132 147
133static int __init noalign_setup(char *__unused) 148static int __init noalign_setup(char *__unused)
@@ -137,149 +152,8 @@ static int __init noalign_setup(char *__unused)
137 set_cr(cr_alignment); 152 set_cr(cr_alignment);
138 return 1; 153 return 1;
139} 154}
140
141__setup("noalign", noalign_setup); 155__setup("noalign", noalign_setup);
142 156
143#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
144
145/*
146 * need to get a 16k page for level 1
147 */
148pgd_t *get_pgd_slow(struct mm_struct *mm)
149{
150 pgd_t *new_pgd, *init_pgd;
151 pmd_t *new_pmd, *init_pmd;
152 pte_t *new_pte, *init_pte;
153
154 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
155 if (!new_pgd)
156 goto no_pgd;
157
158 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
159
160 /*
161 * Copy over the kernel and IO PGD entries
162 */
163 init_pgd = pgd_offset_k(0);
164 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
165 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
166
167 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
168
169 if (!vectors_high()) {
170 /*
171 * On ARM, first page must always be allocated since it
172 * contains the machine vectors.
173 */
174 new_pmd = pmd_alloc(mm, new_pgd, 0);
175 if (!new_pmd)
176 goto no_pmd;
177
178 new_pte = pte_alloc_map(mm, new_pmd, 0);
179 if (!new_pte)
180 goto no_pte;
181
182 init_pmd = pmd_offset(init_pgd, 0);
183 init_pte = pte_offset_map_nested(init_pmd, 0);
184 set_pte(new_pte, *init_pte);
185 pte_unmap_nested(init_pte);
186 pte_unmap(new_pte);
187 }
188
189 return new_pgd;
190
191no_pte:
192 pmd_free(new_pmd);
193no_pmd:
194 free_pages((unsigned long)new_pgd, 2);
195no_pgd:
196 return NULL;
197}
198
199void free_pgd_slow(pgd_t *pgd)
200{
201 pmd_t *pmd;
202 struct page *pte;
203
204 if (!pgd)
205 return;
206
207 /* pgd is always present and good */
208 pmd = pmd_off(pgd, 0);
209 if (pmd_none(*pmd))
210 goto free;
211 if (pmd_bad(*pmd)) {
212 pmd_ERROR(*pmd);
213 pmd_clear(pmd);
214 goto free;
215 }
216
217 pte = pmd_page(*pmd);
218 pmd_clear(pmd);
219 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
220 pte_lock_deinit(pte);
221 pte_free(pte);
222 pmd_free(pmd);
223free:
224 free_pages((unsigned long) pgd, 2);
225}
226
227/*
228 * Create a SECTION PGD between VIRT and PHYS in domain
229 * DOMAIN with protection PROT. This operates on half-
230 * pgdir entry increments.
231 */
232static inline void
233alloc_init_section(unsigned long virt, unsigned long phys, int prot)
234{
235 pmd_t *pmdp = pmd_off_k(virt);
236
237 if (virt & (1 << 20))
238 pmdp++;
239
240 *pmdp = __pmd(phys | prot);
241 flush_pmd_entry(pmdp);
242}
243
244/*
245 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
246 */
247static inline void
248alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
249{
250 int i;
251
252 for (i = 0; i < 16; i += 1) {
253 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
254
255 virt += (PGDIR_SIZE / 2);
256 }
257}
258
259/*
260 * Add a PAGE mapping between VIRT and PHYS in domain
261 * DOMAIN with protection PROT. Note that due to the
262 * way we map the PTEs, we must allocate two PTE_SIZE'd
263 * blocks - one for the Linux pte table, and one for
264 * the hardware pte table.
265 */
266static inline void
267alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
268{
269 pmd_t *pmdp = pmd_off_k(virt);
270 pte_t *ptep;
271
272 if (pmd_none(*pmdp)) {
273 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
274 sizeof(pte_t));
275
276 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
277 }
278 ptep = pte_offset_kernel(pmdp, virt);
279
280 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
281}
282
283struct mem_types { 157struct mem_types {
284 unsigned int prot_pte; 158 unsigned int prot_pte;
285 unsigned int prot_l1; 159 unsigned int prot_l1;
@@ -344,7 +218,7 @@ static struct mem_types mem_types[] __initdata = {
344/* 218/*
345 * Adjust the PMD section entries according to the CPU in use. 219 * Adjust the PMD section entries according to the CPU in use.
346 */ 220 */
347void __init build_mem_type_table(void) 221static void __init build_mem_type_table(void)
348{ 222{
349 struct cachepolicy *cp; 223 struct cachepolicy *cp;
350 unsigned int cr = get_cr(); 224 unsigned int cr = get_cr();
@@ -482,6 +356,62 @@ void __init build_mem_type_table(void)
482#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 356#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
483 357
484/* 358/*
359 * Create a SECTION PGD between VIRT and PHYS in domain
360 * DOMAIN with protection PROT. This operates on half-
361 * pgdir entry increments.
362 */
363static inline void
364alloc_init_section(unsigned long virt, unsigned long phys, int prot)
365{
366 pmd_t *pmdp = pmd_off_k(virt);
367
368 if (virt & (1 << 20))
369 pmdp++;
370
371 *pmdp = __pmd(phys | prot);
372 flush_pmd_entry(pmdp);
373}
374
375/*
376 * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT
377 */
378static inline void
379alloc_init_supersection(unsigned long virt, unsigned long phys, int prot)
380{
381 int i;
382
383 for (i = 0; i < 16; i += 1) {
384 alloc_init_section(virt, phys, prot | PMD_SECT_SUPER);
385
386 virt += (PGDIR_SIZE / 2);
387 }
388}
389
390/*
391 * Add a PAGE mapping between VIRT and PHYS in domain
392 * DOMAIN with protection PROT. Note that due to the
393 * way we map the PTEs, we must allocate two PTE_SIZE'd
394 * blocks - one for the Linux pte table, and one for
395 * the hardware pte table.
396 */
397static inline void
398alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot)
399{
400 pmd_t *pmdp = pmd_off_k(virt);
401 pte_t *ptep;
402
403 if (pmd_none(*pmdp)) {
404 ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE *
405 sizeof(pte_t));
406
407 __pmd_populate(pmdp, __pa(ptep) | prot_l1);
408 }
409 ptep = pte_offset_kernel(pmdp, virt);
410
411 set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
412}
413
414/*
485 * Create the page directory entries and any necessary 415 * Create the page directory entries and any necessary
486 * page tables for the mapping specified by `md'. We 416 * page tables for the mapping specified by `md'. We
487 * are able to cope here with varying sizes and address 417 * are able to cope here with varying sizes and address
@@ -611,6 +541,205 @@ void __init create_mapping(struct map_desc *md)
611} 541}
612 542
613/* 543/*
544 * Create the architecture specific mappings
545 */
546void __init iotable_init(struct map_desc *io_desc, int nr)
547{
548 int i;
549
550 for (i = 0; i < nr; i++)
551 create_mapping(io_desc + i);
552}
553
554static inline void prepare_page_table(struct meminfo *mi)
555{
556 unsigned long addr;
557
558 /*
559 * Clear out all the mappings below the kernel image.
560 */
561 for (addr = 0; addr < MODULE_START; addr += PGDIR_SIZE)
562 pmd_clear(pmd_off_k(addr));
563
564#ifdef CONFIG_XIP_KERNEL
565 /* The XIP kernel is mapped in the module area -- skip over it */
566 addr = ((unsigned long)&_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
567#endif
568 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
569 pmd_clear(pmd_off_k(addr));
570
571 /*
572 * Clear out all the kernel space mappings, except for the first
573 * memory bank, up to the end of the vmalloc region.
574 */
575 for (addr = __phys_to_virt(mi->bank[0].start + mi->bank[0].size);
576 addr < VMALLOC_END; addr += PGDIR_SIZE)
577 pmd_clear(pmd_off_k(addr));
578}
579
580/*
581 * Reserve the various regions of node 0
582 */
583void __init reserve_node_zero(pg_data_t *pgdat)
584{
585 unsigned long res_size = 0;
586
587 /*
588 * Register the kernel text and data with bootmem.
589 * Note that this can only be in node 0.
590 */
591#ifdef CONFIG_XIP_KERNEL
592 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
593#else
594 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
595#endif
596
597 /*
598 * Reserve the page tables. These are already in use,
599 * and can only be in node 0.
600 */
601 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
602 PTRS_PER_PGD * sizeof(pgd_t));
603
604 /*
605 * Hmm... This should go elsewhere, but we really really need to
606 * stop things allocating the low memory; ideally we need a better
607 * implementation of GFP_DMA which does not assume that DMA-able
608 * memory starts at zero.
609 */
610 if (machine_is_integrator() || machine_is_cintegrator())
611 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
612
613 /*
614 * These should likewise go elsewhere. They pre-reserve the
615 * screen memory region at the start of main system memory.
616 */
617 if (machine_is_edb7211())
618 res_size = 0x00020000;
619 if (machine_is_p720t())
620 res_size = 0x00014000;
621
622#ifdef CONFIG_SA1111
623 /*
624 * Because of the SA1111 DMA bug, we want to preserve our
625 * precious DMA-able memory...
626 */
627 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
628#endif
629 if (res_size)
630 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size);
631}
632
633/*
634 * Set up device the mappings. Since we clear out the page tables for all
635 * mappings above VMALLOC_END, we will remove any debug device mappings.
636 * This means you have to be careful how you debug this function, or any
637 * called function. This means you can't use any function or debugging
638 * method which may touch any device, otherwise the kernel _will_ crash.
639 */
640static void __init devicemaps_init(struct machine_desc *mdesc)
641{
642 struct map_desc map;
643 unsigned long addr;
644 void *vectors;
645
646 /*
647 * Allocate the vector page early.
648 */
649 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
650 BUG_ON(!vectors);
651
652 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
653 pmd_clear(pmd_off_k(addr));
654
655 /*
656 * Map the kernel if it is XIP.
657 * It is always first in the modulearea.
658 */
659#ifdef CONFIG_XIP_KERNEL
660 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
661 map.virtual = MODULE_START;
662 map.length = ((unsigned long)&_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
663 map.type = MT_ROM;
664 create_mapping(&map);
665#endif
666
667 /*
668 * Map the cache flushing regions.
669 */
670#ifdef FLUSH_BASE
671 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
672 map.virtual = FLUSH_BASE;
673 map.length = SZ_1M;
674 map.type = MT_CACHECLEAN;
675 create_mapping(&map);
676#endif
677#ifdef FLUSH_BASE_MINICACHE
678 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
679 map.virtual = FLUSH_BASE_MINICACHE;
680 map.length = SZ_1M;
681 map.type = MT_MINICLEAN;
682 create_mapping(&map);
683#endif
684
685 /*
686 * Create a mapping for the machine vectors at the high-vectors
687 * location (0xffff0000). If we aren't using high-vectors, also
688 * create a mapping at the low-vectors virtual address.
689 */
690 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
691 map.virtual = 0xffff0000;
692 map.length = PAGE_SIZE;
693 map.type = MT_HIGH_VECTORS;
694 create_mapping(&map);
695
696 if (!vectors_high()) {
697 map.virtual = 0;
698 map.type = MT_LOW_VECTORS;
699 create_mapping(&map);
700 }
701
702 /*
703 * Ask the machine support to map in the statically mapped devices.
704 */
705 if (mdesc->map_io)
706 mdesc->map_io();
707
708 /*
709 * Finally flush the caches and tlb to ensure that we're in a
710 * consistent state wrt the writebuffer. This also ensures that
711 * any write-allocated cache lines in the vector page are written
712 * back. After this point, we can start to touch devices again.
713 */
714 local_flush_tlb_all();
715 flush_cache_all();
716}
717
718/*
719 * paging_init() sets up the page tables, initialises the zone memory
720 * maps, and sets up the zero page, bad page and bad page tables.
721 */
722void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
723{
724 void *zero_page;
725
726 build_mem_type_table();
727 prepare_page_table(mi);
728 bootmem_init(mi);
729 devicemaps_init(mdesc);
730
731 top_pmd = pmd_off_k(0xffff0000);
732
733 /*
734 * allocate the zero page. Note that we count on this going ok.
735 */
736 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
737 memzero(zero_page, PAGE_SIZE);
738 empty_zero_page = virt_to_page(zero_page);
739 flush_dcache_page(empty_zero_page);
740}
741
742/*
614 * In order to soft-boot, we need to insert a 1:1 mapping in place of 743 * In order to soft-boot, we need to insert a 1:1 mapping in place of
615 * the user-mode pages. This will then ensure that we have predictable 744 * the user-mode pages. This will then ensure that we have predictable
616 * results when turning the mmu off 745 * results when turning the mmu off
@@ -640,14 +769,3 @@ void setup_mm_for_reboot(char mode)
640 flush_pmd_entry(pmd); 769 flush_pmd_entry(pmd);
641 } 770 }
642} 771}
643
644/*
645 * Create the architecture specific mappings
646 */
647void __init iotable_init(struct map_desc *io_desc, int nr)
648{
649 int i;
650
651 for (i = 0; i < nr; i++)
652 create_mapping(io_desc + i);
653}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 1464ed817b5d..d0e66424a597 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -11,6 +11,49 @@
11#include <asm/io.h> 11#include <asm/io.h>
12#include <asm/page.h> 12#include <asm/page.h>
13 13
14#include "mm.h"
15
16extern void _stext, __data_start, _end;
17
18/*
19 * Reserve the various regions of node 0
20 */
21void __init reserve_node_zero(pg_data_t *pgdat)
22{
23 /*
24 * Register the kernel text and data with bootmem.
25 * Note that this can only be in node 0.
26 */
27#ifdef CONFIG_XIP_KERNEL
28 reserve_bootmem_node(pgdat, __pa(&__data_start), &_end - &__data_start);
29#else
30 reserve_bootmem_node(pgdat, __pa(&_stext), &_end - &_stext);
31#endif
32
33 /*
34 * Register the exception vector page.
35 * some architectures which the DRAM is the exception vector to trap,
36 * alloc_page breaks with error, although it is not NULL, but "0."
37 */
38 reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE);
39}
40
41/*
42 * paging_init() sets up the page tables, initialises the zone memory
43 * maps, and sets up the zero page, bad page and bad page tables.
44 */
45void __init paging_init(struct meminfo *mi, struct machine_desc *mdesc)
46{
47 bootmem_init(mi);
48}
49
50/*
51 * We don't need to do anything here for nommu machines.
52 */
53void setup_mm_for_reboot(char mode)
54{
55}
56
14void flush_dcache_page(struct page *page) 57void flush_dcache_page(struct page *page)
15{ 58{
16 __cpuc_flush_dcache_page(page_address(page)); 59 __cpuc_flush_dcache_page(page_address(page));
diff --git a/arch/arm/mm/pgd.c b/arch/arm/mm/pgd.c
new file mode 100644
index 000000000000..20c1b0df75f2
--- /dev/null
+++ b/arch/arm/mm/pgd.c
@@ -0,0 +1,101 @@
1/*
2 * linux/arch/arm/mm/pgd.c
3 *
4 * Copyright (C) 1998-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10#include <linux/mm.h>
11#include <linux/highmem.h>
12
13#include <asm/pgalloc.h>
14#include <asm/page.h>
15#include <asm/tlbflush.h>
16
17#include "mm.h"
18
19#define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
20
21/*
22 * need to get a 16k page for level 1
23 */
24pgd_t *get_pgd_slow(struct mm_struct *mm)
25{
26 pgd_t *new_pgd, *init_pgd;
27 pmd_t *new_pmd, *init_pmd;
28 pte_t *new_pte, *init_pte;
29
30 new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2);
31 if (!new_pgd)
32 goto no_pgd;
33
34 memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
35
36 /*
37 * Copy over the kernel and IO PGD entries
38 */
39 init_pgd = pgd_offset_k(0);
40 memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
41 (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
42
43 clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
44
45 if (!vectors_high()) {
46 /*
47 * On ARM, first page must always be allocated since it
48 * contains the machine vectors.
49 */
50 new_pmd = pmd_alloc(mm, new_pgd, 0);
51 if (!new_pmd)
52 goto no_pmd;
53
54 new_pte = pte_alloc_map(mm, new_pmd, 0);
55 if (!new_pte)
56 goto no_pte;
57
58 init_pmd = pmd_offset(init_pgd, 0);
59 init_pte = pte_offset_map_nested(init_pmd, 0);
60 set_pte(new_pte, *init_pte);
61 pte_unmap_nested(init_pte);
62 pte_unmap(new_pte);
63 }
64
65 return new_pgd;
66
67no_pte:
68 pmd_free(new_pmd);
69no_pmd:
70 free_pages((unsigned long)new_pgd, 2);
71no_pgd:
72 return NULL;
73}
74
75void free_pgd_slow(pgd_t *pgd)
76{
77 pmd_t *pmd;
78 struct page *pte;
79
80 if (!pgd)
81 return;
82
83 /* pgd is always present and good */
84 pmd = pmd_off(pgd, 0);
85 if (pmd_none(*pmd))
86 goto free;
87 if (pmd_bad(*pmd)) {
88 pmd_ERROR(*pmd);
89 pmd_clear(pmd);
90 goto free;
91 }
92
93 pte = pmd_page(*pmd);
94 pmd_clear(pmd);
95 dec_zone_page_state(virt_to_page((unsigned long *)pgd), NR_PAGETABLE);
96 pte_lock_deinit(pte);
97 pte_free(pte);
98 pmd_free(pmd);
99free:
100 free_pages((unsigned long) pgd, 2);
101}
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S
new file mode 100644
index 000000000000..40713818a87b
--- /dev/null
+++ b/arch/arm/mm/proc-arm740.S
@@ -0,0 +1,174 @@
1/*
2 * linux/arch/arm/mm/arm740.S: utility functions for ARM740
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm740_proc_init()
23 * cpu_arm740_do_idle()
24 * cpu_arm740_dcache_clean_area()
25 * cpu_arm740_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm740_proc_init)
30ENTRY(cpu_arm740_do_idle)
31ENTRY(cpu_arm740_dcache_clean_area)
32ENTRY(cpu_arm740_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm740_proc_fin()
37 */
38ENTRY(cpu_arm740_proc_fin)
39 stmfd sp!, {lr}
40 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41 msr cpsr_c, ip
42 mrc p15, 0, r0, c1, c0, 0
43 bic r0, r0, #0x3f000000 @ bank/f/lock/s
44 bic r0, r0, #0x0000000c @ w-buffer/cache
45 mcr p15, 0, r0, c1, c0, 0 @ disable caches
46 mcr p15, 0, r0, c7, c0, 0 @ invalidate cache
47 ldmfd sp!, {pc}
48
49/*
50 * cpu_arm740_reset(loc)
51 * Params : r0 = address to jump to
52 * Notes : This sets up everything for a reset
53 */
54ENTRY(cpu_arm740_reset)
55 mov ip, #0
56 mcr p15, 0, ip, c7, c0, 0 @ invalidate cache
57 mrc p15, 0, ip, c1, c0, 0 @ get ctrl register
58 bic ip, ip, #0x0000000c @ ............wc..
59 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
60 mov pc, r0
61
62 __INIT
63
64 .type __arm740_setup, #function
65__arm740_setup:
66 mov r0, #0
67 mcr p15, 0, r0, c7, c0, 0 @ invalidate caches
68
69 mcr p15, 0, r0, c6, c3 @ disable area 3~7
70 mcr p15, 0, r0, c6, c4
71 mcr p15, 0, r0, c6, c5
72 mcr p15, 0, r0, c6, c6
73 mcr p15, 0, r0, c6, c7
74
75 mov r0, #0x0000003F @ base = 0, size = 4GB
76 mcr p15, 0, r0, c6, c0 @ set area 0, default
77
78 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
79 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
80 mov r2, #10 @ 11 is the minimum (4KB)
811: add r2, r2, #1 @ area size *= 2
82 mov r1, r1, lsr #1
83 bne 1b @ count not zero r-shift
84 orr r0, r0, r2, lsl #1 @ the area register value
85 orr r0, r0, #1 @ set enable bit
86 mcr p15, 0, r0, c6, c1 @ set area 1, RAM
87
88 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
89 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
90 mov r2, #10 @ 11 is the minimum (4KB)
911: add r2, r2, #1 @ area size *= 2
92 mov r1, r1, lsr #1
93 bne 1b @ count not zero r-shift
94 orr r0, r0, r2, lsl #1 @ the area register value
95 orr r0, r0, #1 @ set enable bit
96 mcr p15, 0, r0, c6, c2 @ set area 2, ROM/FLASH
97
98 mov r0, #0x06
99 mcr p15, 0, r0, c2, c0 @ Region 1&2 cacheable
100#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
101 mov r0, #0x00 @ disable whole write buffer
102#else
103 mov r0, #0x02 @ Region 1 write bufferred
104#endif
105 mcr p15, 0, r0, c3, c0
106
107 mov r0, #0x10000
108 sub r0, r0, #1 @ r0 = 0xffff
109 mcr p15, 0, r0, c5, c0 @ all read/write access
110
111 mrc p15, 0, r0, c1, c0 @ get control register
112 bic r0, r0, #0x3F000000 @ set to standard caching mode
113 @ need some benchmark
114 orr r0, r0, #0x0000000d @ MPU/Cache/WB
115
116 mov pc, lr
117
118 .size __arm740_setup, . - __arm740_setup
119
120 __INITDATA
121
122/*
123 * Purpose : Function pointers used to access above functions - all calls
124 * come through these
125 */
126 .type arm740_processor_functions, #object
127ENTRY(arm740_processor_functions)
128 .word v4t_late_abort
129 .word cpu_arm740_proc_init
130 .word cpu_arm740_proc_fin
131 .word cpu_arm740_reset
132 .word cpu_arm740_do_idle
133 .word cpu_arm740_dcache_clean_area
134 .word cpu_arm740_switch_mm
135 .word 0 @ cpu_*_set_pte
136 .size arm740_processor_functions, . - arm740_processor_functions
137
138 .section ".rodata"
139
140 .type cpu_arch_name, #object
141cpu_arch_name:
142 .asciz "armv4"
143 .size cpu_arch_name, . - cpu_arch_name
144
145 .type cpu_elf_name, #object
146cpu_elf_name:
147 .asciz "v4"
148 .size cpu_elf_name, . - cpu_elf_name
149
150 .type cpu_arm740_name, #object
151cpu_arm740_name:
152 .ascii "ARM740T"
153 .size cpu_arm740_name, . - cpu_arm740_name
154
155 .align
156
157 .section ".proc.info.init", #alloc, #execinstr
158 .type __arm740_proc_info,#object
159__arm740_proc_info:
160 .long 0x41807400
161 .long 0xfffffff0
162 .long 0
163 b __arm740_setup
164 .long cpu_arch_name
165 .long cpu_elf_name
166 .long HWCAP_SWP | HWCAP_HALF | HWCAP_26BIT
167 .long cpu_arm740_name
168 .long arm740_processor_functions
169 .long 0
170 .long 0
171 .long v3_cache_fns @ cache model
172 .size __arm740_proc_info, . - __arm740_proc_info
173
174
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S
new file mode 100644
index 000000000000..22d7e3100ea6
--- /dev/null
+++ b/arch/arm/mm/proc-arm7tdmi.S
@@ -0,0 +1,249 @@
1/*
2 * linux/arch/arm/mm/proc-arm7tdmi.S: utility functions for ARM7TDMI
3 *
4 * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm7tdmi_proc_init()
23 * cpu_arm7tdmi_do_idle()
24 * cpu_arm7tdmi_dcache_clean_area()
25 * cpu_arm7tdmi_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm7tdmi_proc_init)
30ENTRY(cpu_arm7tdmi_do_idle)
31ENTRY(cpu_arm7tdmi_dcache_clean_area)
32ENTRY(cpu_arm7tdmi_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm7tdmi_proc_fin()
37 */
38ENTRY(cpu_arm7tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr
42
43/*
44 * Function: cpu_arm7tdmi_reset(loc)
45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */
48ENTRY(cpu_arm7tdmi_reset)
49 mov pc, r0
50
51 __INIT
52
53 .type __arm7tdmi_setup, #function
54__arm7tdmi_setup:
55 mov pc, lr
56 .size __arm7tdmi_setup, . - __arm7tdmi_setup
57
58 __INITDATA
59
60/*
61 * Purpose : Function pointers used to access above functions - all calls
62 * come through these
63 */
64 .type arm7tdmi_processor_functions, #object
65ENTRY(arm7tdmi_processor_functions)
66 .word v4t_late_abort
67 .word cpu_arm7tdmi_proc_init
68 .word cpu_arm7tdmi_proc_fin
69 .word cpu_arm7tdmi_reset
70 .word cpu_arm7tdmi_do_idle
71 .word cpu_arm7tdmi_dcache_clean_area
72 .word cpu_arm7tdmi_switch_mm
73 .word 0 @ cpu_*_set_pte
74 .size arm7tdmi_processor_functions, . - arm7tdmi_processor_functions
75
76 .section ".rodata"
77
78 .type cpu_arch_name, #object
79cpu_arch_name:
80 .asciz "armv4t"
81 .size cpu_arch_name, . - cpu_arch_name
82
83 .type cpu_elf_name, #object
84cpu_elf_name:
85 .asciz "v4"
86 .size cpu_elf_name, . - cpu_elf_name
87
88 .type cpu_arm7tdmi_name, #object
89cpu_arm7tdmi_name:
90 .asciz "ARM7TDMI"
91 .size cpu_arm7tdmi_name, . - cpu_arm7tdmi_name
92
93 .type cpu_triscenda7_name, #object
94cpu_triscenda7_name:
95 .asciz "Triscend-A7x"
96 .size cpu_triscenda7_name, . - cpu_triscenda7_name
97
98 .type cpu_at91_name, #object
99cpu_at91_name:
100 .asciz "Atmel-AT91M40xxx"
101 .size cpu_at91_name, . - cpu_at91_name
102
103 .type cpu_s3c3410_name, #object
104cpu_s3c3410_name:
105 .asciz "Samsung-S3C3410"
106 .size cpu_s3c3410_name, . - cpu_s3c3410_name
107
108 .type cpu_s3c44b0x_name, #object
109cpu_s3c44b0x_name:
110 .asciz "Samsung-S3C44B0x"
111 .size cpu_s3c44b0x_name, . - cpu_s3c44b0x_name
112
113 .type cpu_s3c4510b, #object
114cpu_s3c4510b_name:
115 .asciz "Samsung-S3C4510B"
116 .size cpu_s3c4510b_name, . - cpu_s3c4510b_name
117
118 .type cpu_s3c4530_name, #object
119cpu_s3c4530_name:
120 .asciz "Samsung-S3C4530"
121 .size cpu_s3c4530_name, . - cpu_s3c4530_name
122
123 .type cpu_netarm_name, #object
124cpu_netarm_name:
125 .asciz "NETARM"
126 .size cpu_netarm_name, . - cpu_netarm_name
127
128 .align
129
130 .section ".proc.info.init", #alloc, #execinstr
131
132 .type __arm7tdmi_proc_info, #object
133__arm7tdmi_proc_info:
134 .long 0x41007700
135 .long 0xfff8ff00
136 .long 0
137 .long 0
138 b __arm7tdmi_setup
139 .long cpu_arch_name
140 .long cpu_elf_name
141 .long HWCAP_SWP | HWCAP_26BIT
142 .long cpu_arm7tdmi_name
143 .long arm7tdmi_processor_functions
144 .long 0
145 .long 0
146 .long v4_cache_fns
147 .size __arm7tdmi_proc_info, . - __arm7dmi_proc_info
148
149 .type __triscenda7_proc_info, #object
150__triscenda7_proc_info:
151 .long 0x0001d2ff
152 .long 0x0001ffff
153 .long 0
154 .long 0
155 b __arm7tdmi_setup
156 .long cpu_arch_name
157 .long cpu_elf_name
158 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
159 .long cpu_triscenda7_name
160 .long arm7tdmi_processor_functions
161 .long 0
162 .long 0
163 .long v4_cache_fns
164 .size __triscenda7_proc_info, . - __triscenda7_proc_info
165
166 .type __at91_proc_info, #object
167__at91_proc_info:
168 .long 0x14000040
169 .long 0xfff000e0
170 .long 0
171 .long 0
172 b __arm7tdmi_setup
173 .long cpu_arch_name
174 .long cpu_elf_name
175 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
176 .long cpu_at91_name
177 .long arm7tdmi_processor_functions
178 .long 0
179 .long 0
180 .long v4_cache_fns
181 .size __at91_proc_info, . - __at91_proc_info
182
183 .type __s3c4510b_proc_info, #object
184__s3c4510b_proc_info:
185 .long 0x36365000
186 .long 0xfffff000
187 .long 0
188 .long 0
189 b __arm7tdmi_setup
190 .long cpu_arch_name
191 .long cpu_elf_name
192 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
193 .long cpu_s3c4510b_name
194 .long arm7tdmi_processor_functions
195 .long 0
196 .long 0
197 .long v4_cache_fns
198 .size __s3c4510b_proc_info, . - __s3c4510b_proc_info
199
200 .type __s3c4530_proc_info, #object
201__s3c4530_proc_info:
202 .long 0x4c000000
203 .long 0xfff000e0
204 .long 0
205 .long 0
206 b __arm7tdmi_setup
207 .long cpu_arch_name
208 .long cpu_elf_name
209 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
210 .long cpu_s3c4530_name
211 .long arm7tdmi_processor_functions
212 .long 0
213 .long 0
214 .long v4_cache_fns
215 .size __s3c4530_proc_info, . - __s3c4530_proc_info
216
217 .type __s3c3410_proc_info, #object
218__s3c3410_proc_info:
219 .long 0x34100000
220 .long 0xffff0000
221 .long 0
222 .long 0
223 b __arm7tdmi_setup
224 .long cpu_arch_name
225 .long cpu_elf_name
226 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
227 .long cpu_s3c3410_name
228 .long arm7tdmi_processor_functions
229 .long 0
230 .long 0
231 .long v4_cache_fns
232 .size __s3c3410_proc_info, . - __s3c3410_proc_info
233
234 .type __s3c44b0x_proc_info, #object
235__s3c44b0x_proc_info:
236 .long 0x44b00000
237 .long 0xffff0000
238 .long 0
239 .long 0
240 b __arm7tdmi_setup
241 .long cpu_arch_name
242 .long cpu_elf_name
243 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
244 .long cpu_s3c44b0x_name
245 .long arm7tdmi_processor_functions
246 .long 0
247 .long 0
248 .long v4_cache_fns
249 .size __s3c44b0x_proc_info, . - __s3c44b0x_proc_info
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S
new file mode 100644
index 000000000000..2397f4b6e151
--- /dev/null
+++ b/arch/arm/mm/proc-arm940.S
@@ -0,0 +1,369 @@
1/*
2 * linux/arch/arm/mm/arm940.S: utility functions for ARM940T
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/pgtable-hwdef.h>
15#include <asm/pgtable.h>
16#include <asm/procinfo.h>
17#include <asm/ptrace.h>
18
19/* ARM940T has a 4KB DCache comprising 256 lines of 4 words */
20#define CACHE_DLINESIZE 16
21#define CACHE_DSEGMENTS 4
22#define CACHE_DENTRIES 64
23
24 .text
25/*
26 * cpu_arm940_proc_init()
27 * cpu_arm940_switch_mm()
28 *
29 * These are not required.
30 */
31ENTRY(cpu_arm940_proc_init)
32ENTRY(cpu_arm940_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm940_proc_fin()
37 */
38ENTRY(cpu_arm940_proc_fin)
39 stmfd sp!, {lr}
40 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
41 msr cpsr_c, ip
42 bl arm940_flush_kern_cache_all
43 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
44 bic r0, r0, #0x00001000 @ i-cache
45 bic r0, r0, #0x00000004 @ d-cache
46 mcr p15, 0, r0, c1, c0, 0 @ disable caches
47 ldmfd sp!, {pc}
48
49/*
50 * cpu_arm940_reset(loc)
51 * Params : r0 = address to jump to
52 * Notes : This sets up everything for a reset
53 */
54ENTRY(cpu_arm940_reset)
55 mov ip, #0
56 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
57 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
58 mcr p15, 0, ip, c7, c10, 4 @ drain WB
59 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
60 bic ip, ip, #0x00000005 @ .............c.p
61 bic ip, ip, #0x00001000 @ i-cache
62 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
63 mov pc, r0
64
65/*
66 * cpu_arm940_do_idle()
67 */
68 .align 5
69ENTRY(cpu_arm940_do_idle)
70 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
71 mov pc, lr
72
73/*
74 * flush_user_cache_all()
75 */
76ENTRY(arm940_flush_user_cache_all)
77 /* FALLTHROUGH */
78
79/*
80 * flush_kern_cache_all()
81 *
82 * Clean and invalidate the entire cache.
83 */
84ENTRY(arm940_flush_kern_cache_all)
85 mov r2, #VM_EXEC
86 /* FALLTHROUGH */
87
88/*
89 * flush_user_cache_range(start, end, flags)
90 *
91 * There is no efficient way to flush a range of cache entries
92 * in the specified address range. Thus, flushes all.
93 *
94 * - start - start address (inclusive)
95 * - end - end address (exclusive)
96 * - flags - vm_flags describing address space
97 */
98ENTRY(arm940_flush_user_cache_range)
99 mov ip, #0
100#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
101 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
102#else
103 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1041: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1052: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
106 subs r3, r3, #1 << 26
107 bcs 2b @ entries 63 to 0
108 subs r1, r1, #1 << 4
109 bcs 1b @ segments 3 to 0
110#endif
111 tst r2, #VM_EXEC
112 mcrne p15, 0, ip, c7, c5, 0 @ invalidate I cache
113 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
114 mov pc, lr
115
116/*
117 * coherent_kern_range(start, end)
118 *
119 * Ensure coherency between the Icache and the Dcache in the
120 * region described by start, end. If you have non-snooping
121 * Harvard caches, you need to implement this function.
122 *
123 * - start - virtual start address
124 * - end - virtual end address
125 */
126ENTRY(arm940_coherent_kern_range)
127 /* FALLTHROUGH */
128
129/*
130 * coherent_user_range(start, end)
131 *
132 * Ensure coherency between the Icache and the Dcache in the
133 * region described by start, end. If you have non-snooping
134 * Harvard caches, you need to implement this function.
135 *
136 * - start - virtual start address
137 * - end - virtual end address
138 */
139ENTRY(arm940_coherent_user_range)
140 /* FALLTHROUGH */
141
142/*
143 * flush_kern_dcache_page(void *page)
144 *
145 * Ensure no D cache aliasing occurs, either with itself or
146 * the I cache
147 *
148 * - addr - page aligned address
149 */
150ENTRY(arm940_flush_kern_dcache_page)
151 mov ip, #0
152 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1531: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1542: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
155 subs r3, r3, #1 << 26
156 bcs 2b @ entries 63 to 0
157 subs r1, r1, #1 << 4
158 bcs 1b @ segments 7 to 0
159 mcr p15, 0, ip, c7, c5, 0 @ invalidate I cache
160 mcr p15, 0, ip, c7, c10, 4 @ drain WB
161 mov pc, lr
162
163/*
164 * dma_inv_range(start, end)
165 *
166 * There is no efficient way to invalidate a specifid virtual
167 * address range. Thus, invalidates all.
168 *
169 * - start - virtual start address
170 * - end - virtual end address
171 */
172ENTRY(arm940_dma_inv_range)
173 mov ip, #0
174 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1751: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1762: mcr p15, 0, r3, c7, c6, 2 @ flush D entry
177 subs r3, r3, #1 << 26
178 bcs 2b @ entries 63 to 0
179 subs r1, r1, #1 << 4
180 bcs 1b @ segments 7 to 0
181 mcr p15, 0, ip, c7, c10, 4 @ drain WB
182 mov pc, lr
183
184/*
185 * dma_clean_range(start, end)
186 *
187 * There is no efficient way to clean a specifid virtual
188 * address range. Thus, cleans all.
189 *
190 * - start - virtual start address
191 * - end - virtual end address
192 */
193ENTRY(arm940_dma_clean_range)
194ENTRY(cpu_arm940_dcache_clean_area)
195 mov ip, #0
196#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
197 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
1981: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
1992: mcr p15, 0, r3, c7, c10, 2 @ clean D entry
200 subs r3, r3, #1 << 26
201 bcs 2b @ entries 63 to 0
202 subs r1, r1, #1 << 4
203 bcs 1b @ segments 7 to 0
204#endif
205 mcr p15, 0, ip, c7, c10, 4 @ drain WB
206 mov pc, lr
207
208/*
209 * dma_flush_range(start, end)
210 *
211 * There is no efficient way to clean and invalidate a specifid
212 * virtual address range.
213 *
214 * - start - virtual start address
215 * - end - virtual end address
216 */
217ENTRY(arm940_dma_flush_range)
218 mov ip, #0
219 mov r1, #(CACHE_DSEGMENTS - 1) << 4 @ 4 segments
2201: orr r3, r1, #(CACHE_DENTRIES - 1) << 26 @ 64 entries
2212:
222#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
223 mcr p15, 0, r3, c7, c14, 2 @ clean/flush D entry
224#else
225 mcr p15, 0, r3, c7, c10, 2 @ clean D entry
226#endif
227 subs r3, r3, #1 << 26
228 bcs 2b @ entries 63 to 0
229 subs r1, r1, #1 << 4
230 bcs 1b @ segments 7 to 0
231 mcr p15, 0, ip, c7, c10, 4 @ drain WB
232 mov pc, lr
233
234ENTRY(arm940_cache_fns)
235 .long arm940_flush_kern_cache_all
236 .long arm940_flush_user_cache_all
237 .long arm940_flush_user_cache_range
238 .long arm940_coherent_kern_range
239 .long arm940_coherent_user_range
240 .long arm940_flush_kern_dcache_page
241 .long arm940_dma_inv_range
242 .long arm940_dma_clean_range
243 .long arm940_dma_flush_range
244
245 __INIT
246
247 .type __arm940_setup, #function
248__arm940_setup:
249 mov r0, #0
250 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
251 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
252 mcr p15, 0, r0, c7, c10, 4 @ drain WB
253
254 mcr p15, 0, r0, c6, c3, 0 @ disable data area 3~7
255 mcr p15, 0, r0, c6, c4, 0
256 mcr p15, 0, r0, c6, c5, 0
257 mcr p15, 0, r0, c6, c6, 0
258 mcr p15, 0, r0, c6, c7, 0
259
260 mcr p15, 0, r0, c6, c3, 1 @ disable instruction area 3~7
261 mcr p15, 0, r0, c6, c4, 1
262 mcr p15, 0, r0, c6, c5, 1
263 mcr p15, 0, r0, c6, c6, 1
264 mcr p15, 0, r0, c6, c7, 1
265
266 mov r0, #0x0000003F @ base = 0, size = 4GB
267 mcr p15, 0, r0, c6, c0, 0 @ set area 0, default
268 mcr p15, 0, r0, c6, c0, 1
269
270 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
271 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
272 mov r2, #10 @ 11 is the minimum (4KB)
2731: add r2, r2, #1 @ area size *= 2
274 mov r1, r1, lsr #1
275 bne 1b @ count not zero r-shift
276 orr r0, r0, r2, lsl #1 @ the area register value
277 orr r0, r0, #1 @ set enable bit
278 mcr p15, 0, r0, c6, c1, 0 @ set area 1, RAM
279 mcr p15, 0, r0, c6, c1, 1
280
281 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
282 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
283 mov r2, #10 @ 11 is the minimum (4KB)
2841: add r2, r2, #1 @ area size *= 2
285 mov r1, r1, lsr #1
286 bne 1b @ count not zero r-shift
287 orr r0, r0, r2, lsl #1 @ the area register value
288 orr r0, r0, #1 @ set enable bit
289 mcr p15, 0, r0, c6, c2, 0 @ set area 2, ROM/FLASH
290 mcr p15, 0, r0, c6, c2, 1
291
292 mov r0, #0x06
293 mcr p15, 0, r0, c2, c0, 0 @ Region 1&2 cacheable
294 mcr p15, 0, r0, c2, c0, 1
295#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
296 mov r0, #0x00 @ disable whole write buffer
297#else
298 mov r0, #0x02 @ Region 1 write bufferred
299#endif
300 mcr p15, 0, r0, c3, c0, 0
301
302 mov r0, #0x10000
303 sub r0, r0, #1 @ r0 = 0xffff
304 mcr p15, 0, r0, c5, c0, 0 @ all read/write access
305 mcr p15, 0, r0, c5, c0, 1
306
307 mrc p15, 0, r0, c1, c0 @ get control register
308 orr r0, r0, #0x00001000 @ I-cache
309 orr r0, r0, #0x00000005 @ MPU/D-cache
310
311 mov pc, lr
312
313 .size __arm940_setup, . - __arm940_setup
314
315 __INITDATA
316
317/*
318 * Purpose : Function pointers used to access above functions - all calls
319 * come through these
320 */
321 .type arm940_processor_functions, #object
322ENTRY(arm940_processor_functions)
323 .word nommu_early_abort
324 .word cpu_arm940_proc_init
325 .word cpu_arm940_proc_fin
326 .word cpu_arm940_reset
327 .word cpu_arm940_do_idle
328 .word cpu_arm940_dcache_clean_area
329 .word cpu_arm940_switch_mm
330 .word 0 @ cpu_*_set_pte
331 .size arm940_processor_functions, . - arm940_processor_functions
332
333 .section ".rodata"
334
335.type cpu_arch_name, #object
336cpu_arch_name:
337 .asciz "armv4t"
338 .size cpu_arch_name, . - cpu_arch_name
339
340 .type cpu_elf_name, #object
341cpu_elf_name:
342 .asciz "v4"
343 .size cpu_elf_name, . - cpu_elf_name
344
345 .type cpu_arm940_name, #object
346cpu_arm940_name:
347 .ascii "ARM940T"
348 .size cpu_arm940_name, . - cpu_arm940_name
349
350 .align
351
352 .section ".proc.info.init", #alloc, #execinstr
353
354 .type __arm940_proc_info,#object
355__arm940_proc_info:
356 .long 0x41009400
357 .long 0xff00fff0
358 .long 0
359 b __arm940_setup
360 .long cpu_arch_name
361 .long cpu_elf_name
362 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
363 .long cpu_arm940_name
364 .long arm940_processor_functions
365 .long 0
366 .long 0
367 .long arm940_cache_fns
368 .size __arm940_proc_info, . - __arm940_proc_info
369
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S
new file mode 100644
index 000000000000..e18617564421
--- /dev/null
+++ b/arch/arm/mm/proc-arm946.S
@@ -0,0 +1,424 @@
1/*
2 * linux/arch/arm/mm/arm946.S: utility functions for ARM946E-S
3 *
4 * Copyright (C) 2004-2006 Hyok S. Choi (hyok.choi@samsung.com)
5 *
6 * (Many of cache codes are from proc-arm926.S)
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/linkage.h>
14#include <linux/init.h>
15#include <asm/assembler.h>
16#include <asm/pgtable-hwdef.h>
17#include <asm/pgtable.h>
18#include <asm/procinfo.h>
19#include <asm/ptrace.h>
20
21/*
22 * ARM946E-S is synthesizable to have 0KB to 1MB sized D-Cache,
23 * comprising 256 lines of 32 bytes (8 words).
24 */
25#define CACHE_DSIZE (CONFIG_CPU_DCACHE_SIZE) /* typically 8KB. */
26#define CACHE_DLINESIZE 32 /* fixed */
27#define CACHE_DSEGMENTS 4 /* fixed */
28#define CACHE_DENTRIES (CACHE_DSIZE / CACHE_DSEGMENTS / CACHE_DLINESIZE)
29#define CACHE_DLIMIT (CACHE_DSIZE * 4) /* benchmark needed */
30
31 .text
32/*
33 * cpu_arm946_proc_init()
34 * cpu_arm946_switch_mm()
35 *
36 * These are not required.
37 */
38ENTRY(cpu_arm946_proc_init)
39ENTRY(cpu_arm946_switch_mm)
40 mov pc, lr
41
42/*
43 * cpu_arm946_proc_fin()
44 */
45ENTRY(cpu_arm946_proc_fin)
46 stmfd sp!, {lr}
47 mov ip, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
48 msr cpsr_c, ip
49 bl arm946_flush_kern_cache_all
50 mrc p15, 0, r0, c1, c0, 0 @ ctrl register
51 bic r0, r0, #0x00001000 @ i-cache
52 bic r0, r0, #0x00000004 @ d-cache
53 mcr p15, 0, r0, c1, c0, 0 @ disable caches
54 ldmfd sp!, {pc}
55
56/*
57 * cpu_arm946_reset(loc)
58 * Params : r0 = address to jump to
59 * Notes : This sets up everything for a reset
60 */
61ENTRY(cpu_arm946_reset)
62 mov ip, #0
63 mcr p15, 0, ip, c7, c5, 0 @ flush I cache
64 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
65 mcr p15, 0, ip, c7, c10, 4 @ drain WB
66 mrc p15, 0, ip, c1, c0, 0 @ ctrl register
67 bic ip, ip, #0x00000005 @ .............c.p
68 bic ip, ip, #0x00001000 @ i-cache
69 mcr p15, 0, ip, c1, c0, 0 @ ctrl register
70 mov pc, r0
71
72/*
73 * cpu_arm946_do_idle()
74 */
75 .align 5
76ENTRY(cpu_arm946_do_idle)
77 mcr p15, 0, r0, c7, c0, 4 @ Wait for interrupt
78 mov pc, lr
79
80/*
81 * flush_user_cache_all()
82 */
83ENTRY(arm946_flush_user_cache_all)
84 /* FALLTHROUGH */
85
86/*
87 * flush_kern_cache_all()
88 *
89 * Clean and invalidate the entire cache.
90 */
91ENTRY(arm946_flush_kern_cache_all)
92 mov r2, #VM_EXEC
93 mov ip, #0
94__flush_whole_cache:
95#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
96 mcr p15, 0, ip, c7, c6, 0 @ flush D cache
97#else
98 mov r1, #(CACHE_DSEGMENTS - 1) << 29 @ 4 segments
991: orr r3, r1, #(CACHE_DENTRIES - 1) << 4 @ n entries
1002: mcr p15, 0, r3, c7, c14, 2 @ clean/flush D index
101 subs r3, r3, #1 << 4
102 bcs 2b @ entries n to 0
103 subs r1, r1, #1 << 29
104 bcs 1b @ segments 3 to 0
105#endif
106 tst r2, #VM_EXEC
107 mcrne p15, 0, ip, c7, c5, 0 @ flush I cache
108 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
109 mov pc, lr
110
111/*
112 * flush_user_cache_range(start, end, flags)
113 *
114 * Clean and invalidate a range of cache entries in the
115 * specified address range.
116 *
117 * - start - start address (inclusive)
118 * - end - end address (exclusive)
119 * - flags - vm_flags describing address space
120 * (same as arm926)
121 */
122ENTRY(arm946_flush_user_cache_range)
123 mov ip, #0
124 sub r3, r1, r0 @ calculate total size
125 cmp r3, #CACHE_DLIMIT
126 bhs __flush_whole_cache
127
1281: tst r2, #VM_EXEC
129#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
130 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
131 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
132 add r0, r0, #CACHE_DLINESIZE
133 mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
134 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
135 add r0, r0, #CACHE_DLINESIZE
136#else
137 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
138 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
139 add r0, r0, #CACHE_DLINESIZE
140 mcr p15, 0, r0, c7, c14, 1 @ clean and invalidate D entry
141 mcrne p15, 0, r0, c7, c5, 1 @ invalidate I entry
142 add r0, r0, #CACHE_DLINESIZE
143#endif
144 cmp r0, r1
145 blo 1b
146 tst r2, #VM_EXEC
147 mcrne p15, 0, ip, c7, c10, 4 @ drain WB
148 mov pc, lr
149
150/*
151 * coherent_kern_range(start, end)
152 *
153 * Ensure coherency between the Icache and the Dcache in the
154 * region described by start, end. If you have non-snooping
155 * Harvard caches, you need to implement this function.
156 *
157 * - start - virtual start address
158 * - end - virtual end address
159 */
160ENTRY(arm946_coherent_kern_range)
161 /* FALLTHROUGH */
162
163/*
164 * coherent_user_range(start, end)
165 *
166 * Ensure coherency between the Icache and the Dcache in the
167 * region described by start, end. If you have non-snooping
168 * Harvard caches, you need to implement this function.
169 *
170 * - start - virtual start address
171 * - end - virtual end address
172 * (same as arm926)
173 */
174ENTRY(arm946_coherent_user_range)
175 bic r0, r0, #CACHE_DLINESIZE - 1
1761: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
177 mcr p15, 0, r0, c7, c5, 1 @ invalidate I entry
178 add r0, r0, #CACHE_DLINESIZE
179 cmp r0, r1
180 blo 1b
181 mcr p15, 0, r0, c7, c10, 4 @ drain WB
182 mov pc, lr
183
184/*
185 * flush_kern_dcache_page(void *page)
186 *
187 * Ensure no D cache aliasing occurs, either with itself or
188 * the I cache
189 *
190 * - addr - page aligned address
191 * (same as arm926)
192 */
193ENTRY(arm946_flush_kern_dcache_page)
194 add r1, r0, #PAGE_SZ
1951: mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
196 add r0, r0, #CACHE_DLINESIZE
197 cmp r0, r1
198 blo 1b
199 mov r0, #0
200 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
201 mcr p15, 0, r0, c7, c10, 4 @ drain WB
202 mov pc, lr
203
204/*
205 * dma_inv_range(start, end)
206 *
207 * Invalidate (discard) the specified virtual address range.
208 * May not write back any entries. If 'start' or 'end'
209 * are not cache line aligned, those lines must be written
210 * back.
211 *
212 * - start - virtual start address
213 * - end - virtual end address
214 * (same as arm926)
215 */
216ENTRY(arm946_dma_inv_range)
217#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
218 tst r0, #CACHE_DLINESIZE - 1
219 mcrne p15, 0, r0, c7, c10, 1 @ clean D entry
220 tst r1, #CACHE_DLINESIZE - 1
221 mcrne p15, 0, r1, c7, c10, 1 @ clean D entry
222#endif
223 bic r0, r0, #CACHE_DLINESIZE - 1
2241: mcr p15, 0, r0, c7, c6, 1 @ invalidate D entry
225 add r0, r0, #CACHE_DLINESIZE
226 cmp r0, r1
227 blo 1b
228 mcr p15, 0, r0, c7, c10, 4 @ drain WB
229 mov pc, lr
230
231/*
232 * dma_clean_range(start, end)
233 *
234 * Clean the specified virtual address range.
235 *
236 * - start - virtual start address
237 * - end - virtual end address
238 *
239 * (same as arm926)
240 */
241ENTRY(arm946_dma_clean_range)
242#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
243 bic r0, r0, #CACHE_DLINESIZE - 1
2441: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
245 add r0, r0, #CACHE_DLINESIZE
246 cmp r0, r1
247 blo 1b
248#endif
249 mcr p15, 0, r0, c7, c10, 4 @ drain WB
250 mov pc, lr
251
252/*
253 * dma_flush_range(start, end)
254 *
255 * Clean and invalidate the specified virtual address range.
256 *
257 * - start - virtual start address
258 * - end - virtual end address
259 *
260 * (same as arm926)
261 */
262ENTRY(arm946_dma_flush_range)
263 bic r0, r0, #CACHE_DLINESIZE - 1
2641:
265#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
266 mcr p15, 0, r0, c7, c14, 1 @ clean+invalidate D entry
267#else
268 mcr p15, 0, r0, c7, c10, 1 @ clean D entry
269#endif
270 add r0, r0, #CACHE_DLINESIZE
271 cmp r0, r1
272 blo 1b
273 mcr p15, 0, r0, c7, c10, 4 @ drain WB
274 mov pc, lr
275
276ENTRY(arm946_cache_fns)
277 .long arm946_flush_kern_cache_all
278 .long arm946_flush_user_cache_all
279 .long arm946_flush_user_cache_range
280 .long arm946_coherent_kern_range
281 .long arm946_coherent_user_range
282 .long arm946_flush_kern_dcache_page
283 .long arm946_dma_inv_range
284 .long arm946_dma_clean_range
285 .long arm946_dma_flush_range
286
287
288ENTRY(cpu_arm946_dcache_clean_area)
289#ifndef CONFIG_CPU_DCACHE_WRITETHROUGH
2901: mcr p15, 0, r0, c7, c10, 1 @ clean D entry
291 add r0, r0, #CACHE_DLINESIZE
292 subs r1, r1, #CACHE_DLINESIZE
293 bhi 1b
294#endif
295 mcr p15, 0, r0, c7, c10, 4 @ drain WB
296 mov pc, lr
297
298 __INIT
299
300 .type __arm946_setup, #function
301__arm946_setup:
302 mov r0, #0
303 mcr p15, 0, r0, c7, c5, 0 @ invalidate I cache
304 mcr p15, 0, r0, c7, c6, 0 @ invalidate D cache
305 mcr p15, 0, r0, c7, c10, 4 @ drain WB
306
307 mcr p15, 0, r0, c6, c3, 0 @ disable memory region 3~7
308 mcr p15, 0, r0, c6, c4, 0
309 mcr p15, 0, r0, c6, c5, 0
310 mcr p15, 0, r0, c6, c6, 0
311 mcr p15, 0, r0, c6, c7, 0
312
313 mov r0, #0x0000003F @ base = 0, size = 4GB
314 mcr p15, 0, r0, c6, c0, 0 @ set region 0, default
315
316 ldr r0, =(CONFIG_DRAM_BASE & 0xFFFFF000) @ base[31:12] of RAM
317 ldr r1, =(CONFIG_DRAM_SIZE >> 12) @ size of RAM (must be >= 4KB)
318 mov r2, #10 @ 11 is the minimum (4KB)
3191: add r2, r2, #1 @ area size *= 2
320 mov r1, r1, lsr #1
321 bne 1b @ count not zero r-shift
322 orr r0, r0, r2, lsl #1 @ the region register value
323 orr r0, r0, #1 @ set enable bit
324 mcr p15, 0, r0, c6, c1, 0 @ set region 1, RAM
325
326 ldr r0, =(CONFIG_FLASH_MEM_BASE & 0xFFFFF000) @ base[31:12] of FLASH
327 ldr r1, =(CONFIG_FLASH_SIZE >> 12) @ size of FLASH (must be >= 4KB)
328 mov r2, #10 @ 11 is the minimum (4KB)
3291: add r2, r2, #1 @ area size *= 2
330 mov r1, r1, lsr #1
331 bne 1b @ count not zero r-shift
332 orr r0, r0, r2, lsl #1 @ the region register value
333 orr r0, r0, #1 @ set enable bit
334 mcr p15, 0, r0, c6, c2, 0 @ set region 2, ROM/FLASH
335
336 mov r0, #0x06
337 mcr p15, 0, r0, c2, c0, 0 @ region 1,2 d-cacheable
338 mcr p15, 0, r0, c2, c0, 1 @ region 1,2 i-cacheable
339#ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
340 mov r0, #0x00 @ disable whole write buffer
341#else
342 mov r0, #0x02 @ region 1 write bufferred
343#endif
344 mcr p15, 0, r0, c3, c0, 0
345
346/*
347 * Access Permission Settings for future permission control by PU.
348 *
349 * priv. user
350 * region 0 (whole) rw -- : b0001
351 * region 1 (RAM) rw rw : b0011
352 * region 2 (FLASH) rw r- : b0010
353 * region 3~7 (none) -- -- : b0000
354 */
355 mov r0, #0x00000031
356 orr r0, r0, #0x00000200
357 mcr p15, 0, r0, c5, c0, 2 @ set data access permission
358 mcr p15, 0, r0, c5, c0, 3 @ set inst. access permission
359
360 mrc p15, 0, r0, c1, c0 @ get control register
361 orr r0, r0, #0x00001000 @ I-cache
362 orr r0, r0, #0x00000005 @ MPU/D-cache
363#ifdef CONFIG_CPU_CACHE_ROUND_ROBIN
364 orr r0, r0, #0x00004000 @ .1.. .... .... ....
365#endif
366 mov pc, lr
367
368 .size __arm946_setup, . - __arm946_setup
369
370 __INITDATA
371
372/*
373 * Purpose : Function pointers used to access above functions - all calls
374 * come through these
375 */
376 .type arm946_processor_functions, #object
377ENTRY(arm946_processor_functions)
378 .word nommu_early_abort
379 .word cpu_arm946_proc_init
380 .word cpu_arm946_proc_fin
381 .word cpu_arm946_reset
382 .word cpu_arm946_do_idle
383
384 .word cpu_arm946_dcache_clean_area
385 .word cpu_arm946_switch_mm
386 .word 0 @ cpu_*_set_pte
387 .size arm946_processor_functions, . - arm946_processor_functions
388
389 .section ".rodata"
390
391 .type cpu_arch_name, #object
392cpu_arch_name:
393 .asciz "armv5te"
394 .size cpu_arch_name, . - cpu_arch_name
395
396 .type cpu_elf_name, #object
397cpu_elf_name:
398 .asciz "v5t"
399 .size cpu_elf_name, . - cpu_elf_name
400
401 .type cpu_arm946_name, #object
402cpu_arm946_name:
403 .ascii "ARM946E-S"
404 .size cpu_arm946_name, . - cpu_arm946_name
405
406 .align
407
408 .section ".proc.info.init", #alloc, #execinstr
409 .type __arm946_proc_info,#object
410__arm946_proc_info:
411 .long 0x41009460
412 .long 0xff00fff0
413 .long 0
414 b __arm946_setup
415 .long cpu_arch_name
416 .long cpu_elf_name
417 .long HWCAP_SWP | HWCAP_HALF | HWCAP_THUMB
418 .long cpu_arm946_name
419 .long arm946_processor_functions
420 .long 0
421 .long 0
422 .long arm940_cache_fns
423 .size __arm946_proc_info, . - __arm946_proc_info
424
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S
new file mode 100644
index 000000000000..918ebf65d4f6
--- /dev/null
+++ b/arch/arm/mm/proc-arm9tdmi.S
@@ -0,0 +1,134 @@
1/*
2 * linux/arch/arm/mm/proc-arm9tdmi.S: utility functions for ARM9TDMI
3 *
4 * Copyright (C) 2003-2006 Hyok S. Choi <hyok.choi@samsung.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11#include <linux/linkage.h>
12#include <linux/init.h>
13#include <asm/assembler.h>
14#include <asm/asm-offsets.h>
15#include <asm/pgtable-hwdef.h>
16#include <asm/pgtable.h>
17#include <asm/procinfo.h>
18#include <asm/ptrace.h>
19
20 .text
21/*
22 * cpu_arm9tdmi_proc_init()
23 * cpu_arm9tdmi_do_idle()
24 * cpu_arm9tdmi_dcache_clean_area()
25 * cpu_arm9tdmi_switch_mm()
26 *
27 * These are not required.
28 */
29ENTRY(cpu_arm9tdmi_proc_init)
30ENTRY(cpu_arm9tdmi_do_idle)
31ENTRY(cpu_arm9tdmi_dcache_clean_area)
32ENTRY(cpu_arm9tdmi_switch_mm)
33 mov pc, lr
34
35/*
36 * cpu_arm9tdmi_proc_fin()
37 */
38ENTRY(cpu_arm9tdmi_proc_fin)
39 mov r0, #PSR_F_BIT | PSR_I_BIT | SVC_MODE
40 msr cpsr_c, r0
41 mov pc, lr
42
43/*
44 * Function: cpu_arm9tdmi_reset(loc)
45 * Params : loc(r0) address to jump to
46 * Purpose : Sets up everything for a reset and jump to the location for soft reset.
47 */
48ENTRY(cpu_arm9tdmi_reset)
49 mov pc, r0
50
51 __INIT
52
53 .type __arm9tdmi_setup, #function
54__arm9tdmi_setup:
55 mov pc, lr
56 .size __arm9tdmi_setup, . - __arm9tdmi_setup
57
58 __INITDATA
59
60/*
61 * Purpose : Function pointers used to access above functions - all calls
62 * come through these
63 */
64 .type arm9tdmi_processor_functions, #object
65ENTRY(arm9tdmi_processor_functions)
66 .word nommu_early_abort
67 .word cpu_arm9tdmi_proc_init
68 .word cpu_arm9tdmi_proc_fin
69 .word cpu_arm9tdmi_reset
70 .word cpu_arm9tdmi_do_idle
71 .word cpu_arm9tdmi_dcache_clean_area
72 .word cpu_arm9tdmi_switch_mm
73 .word 0 @ cpu_*_set_pte
74 .size arm9tdmi_processor_functions, . - arm9tdmi_processor_functions
75
76 .section ".rodata"
77
78 .type cpu_arch_name, #object
79cpu_arch_name:
80 .asciz "armv4t"
81 .size cpu_arch_name, . - cpu_arch_name
82
83 .type cpu_elf_name, #object
84cpu_elf_name:
85 .asciz "v4"
86 .size cpu_elf_name, . - cpu_elf_name
87
88 .type cpu_arm9tdmi_name, #object
89cpu_arm9tdmi_name:
90 .asciz "ARM9TDMI"
91 .size cpu_arm9tdmi_name, . - cpu_arm9tdmi_name
92
93 .type cpu_p2001_name, #object
94cpu_p2001_name:
95 .asciz "P2001"
96 .size cpu_p2001_name, . - cpu_p2001_name
97
98 .align
99
100 .section ".proc.info.init", #alloc, #execinstr
101
102 .type __arm9tdmi_proc_info, #object
103__arm9tdmi_proc_info:
104 .long 0x41009900
105 .long 0xfff8ff00
106 .long 0
107 .long 0
108 b __arm9tdmi_setup
109 .long cpu_arch_name
110 .long cpu_elf_name
111 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
112 .long cpu_arm9tdmi_name
113 .long arm9tdmi_processor_functions
114 .long 0
115 .long 0
116 .long v4_cache_fns
117 .size __arm9tdmi_proc_info, . - __arm9dmi_proc_info
118
119 .type __p2001_proc_info, #object
120__p2001_proc_info:
121 .long 0x41029000
122 .long 0xffffffff
123 .long 0
124 .long 0
125 b __arm9tdmi_setup
126 .long cpu_arch_name
127 .long cpu_elf_name
128 .long HWCAP_SWP | HWCAP_THUMB | HWCAP_26BIT
129 .long cpu_p2001_name
130 .long arm9tdmi_processor_functions
131 .long 0
132 .long 0
133 .long v4_cache_fns
134 .size __p2001_proc_info, . - __p2001_proc_info