diff options
Diffstat (limited to 'arch/arm/mm')
43 files changed, 512 insertions, 168 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig index 83c025e72ceb..e993140edd88 100644 --- a/arch/arm/mm/Kconfig +++ b/arch/arm/mm/Kconfig | |||
| @@ -17,7 +17,7 @@ config CPU_ARM610 | |||
| 17 | select CPU_CP15_MMU | 17 | select CPU_CP15_MMU |
| 18 | select CPU_COPY_V3 if MMU | 18 | select CPU_COPY_V3 if MMU |
| 19 | select CPU_TLB_V3 if MMU | 19 | select CPU_TLB_V3 if MMU |
| 20 | select CPU_PABRT_NOIFAR | 20 | select CPU_PABRT_LEGACY |
| 21 | help | 21 | help |
| 22 | The ARM610 is the successor to the ARM3 processor | 22 | The ARM610 is the successor to the ARM3 processor |
| 23 | and was produced by VLSI Technology Inc. | 23 | and was produced by VLSI Technology Inc. |
| @@ -31,7 +31,7 @@ config CPU_ARM7TDMI | |||
| 31 | depends on !MMU | 31 | depends on !MMU |
| 32 | select CPU_32v4T | 32 | select CPU_32v4T |
| 33 | select CPU_ABRT_LV4T | 33 | select CPU_ABRT_LV4T |
| 34 | select CPU_PABRT_NOIFAR | 34 | select CPU_PABRT_LEGACY |
| 35 | select CPU_CACHE_V4 | 35 | select CPU_CACHE_V4 |
| 36 | help | 36 | help |
| 37 | A 32-bit RISC microprocessor based on the ARM7 processor core | 37 | A 32-bit RISC microprocessor based on the ARM7 processor core |
| @@ -49,7 +49,7 @@ config CPU_ARM710 | |||
| 49 | select CPU_CP15_MMU | 49 | select CPU_CP15_MMU |
| 50 | select CPU_COPY_V3 if MMU | 50 | select CPU_COPY_V3 if MMU |
| 51 | select CPU_TLB_V3 if MMU | 51 | select CPU_TLB_V3 if MMU |
| 52 | select CPU_PABRT_NOIFAR | 52 | select CPU_PABRT_LEGACY |
| 53 | help | 53 | help |
| 54 | A 32-bit RISC microprocessor based on the ARM7 processor core | 54 | A 32-bit RISC microprocessor based on the ARM7 processor core |
| 55 | designed by Advanced RISC Machines Ltd. The ARM710 is the | 55 | designed by Advanced RISC Machines Ltd. The ARM710 is the |
| @@ -64,7 +64,7 @@ config CPU_ARM720T | |||
| 64 | bool "Support ARM720T processor" if ARCH_INTEGRATOR | 64 | bool "Support ARM720T processor" if ARCH_INTEGRATOR |
| 65 | select CPU_32v4T | 65 | select CPU_32v4T |
| 66 | select CPU_ABRT_LV4T | 66 | select CPU_ABRT_LV4T |
| 67 | select CPU_PABRT_NOIFAR | 67 | select CPU_PABRT_LEGACY |
| 68 | select CPU_CACHE_V4 | 68 | select CPU_CACHE_V4 |
| 69 | select CPU_CACHE_VIVT | 69 | select CPU_CACHE_VIVT |
| 70 | select CPU_CP15_MMU | 70 | select CPU_CP15_MMU |
| @@ -83,7 +83,7 @@ config CPU_ARM740T | |||
| 83 | depends on !MMU | 83 | depends on !MMU |
| 84 | select CPU_32v4T | 84 | select CPU_32v4T |
| 85 | select CPU_ABRT_LV4T | 85 | select CPU_ABRT_LV4T |
| 86 | select CPU_PABRT_NOIFAR | 86 | select CPU_PABRT_LEGACY |
| 87 | select CPU_CACHE_V3 # although the core is v4t | 87 | select CPU_CACHE_V3 # although the core is v4t |
| 88 | select CPU_CP15_MPU | 88 | select CPU_CP15_MPU |
| 89 | help | 89 | help |
| @@ -100,7 +100,7 @@ config CPU_ARM9TDMI | |||
| 100 | depends on !MMU | 100 | depends on !MMU |
| 101 | select CPU_32v4T | 101 | select CPU_32v4T |
| 102 | select CPU_ABRT_NOMMU | 102 | select CPU_ABRT_NOMMU |
| 103 | select CPU_PABRT_NOIFAR | 103 | select CPU_PABRT_LEGACY |
| 104 | select CPU_CACHE_V4 | 104 | select CPU_CACHE_V4 |
| 105 | help | 105 | help |
| 106 | A 32-bit RISC microprocessor based on the ARM9 processor core | 106 | A 32-bit RISC microprocessor based on the ARM9 processor core |
| @@ -114,7 +114,7 @@ config CPU_ARM920T | |||
| 114 | bool "Support ARM920T processor" if ARCH_INTEGRATOR | 114 | bool "Support ARM920T processor" if ARCH_INTEGRATOR |
| 115 | select CPU_32v4T | 115 | select CPU_32v4T |
| 116 | select CPU_ABRT_EV4T | 116 | select CPU_ABRT_EV4T |
| 117 | select CPU_PABRT_NOIFAR | 117 | select CPU_PABRT_LEGACY |
| 118 | select CPU_CACHE_V4WT | 118 | select CPU_CACHE_V4WT |
| 119 | select CPU_CACHE_VIVT | 119 | select CPU_CACHE_VIVT |
| 120 | select CPU_CP15_MMU | 120 | select CPU_CP15_MMU |
| @@ -135,7 +135,7 @@ config CPU_ARM922T | |||
| 135 | bool "Support ARM922T processor" if ARCH_INTEGRATOR | 135 | bool "Support ARM922T processor" if ARCH_INTEGRATOR |
| 136 | select CPU_32v4T | 136 | select CPU_32v4T |
| 137 | select CPU_ABRT_EV4T | 137 | select CPU_ABRT_EV4T |
| 138 | select CPU_PABRT_NOIFAR | 138 | select CPU_PABRT_LEGACY |
| 139 | select CPU_CACHE_V4WT | 139 | select CPU_CACHE_V4WT |
| 140 | select CPU_CACHE_VIVT | 140 | select CPU_CACHE_VIVT |
| 141 | select CPU_CP15_MMU | 141 | select CPU_CP15_MMU |
| @@ -154,7 +154,7 @@ config CPU_ARM925T | |||
| 154 | bool "Support ARM925T processor" if ARCH_OMAP1 | 154 | bool "Support ARM925T processor" if ARCH_OMAP1 |
| 155 | select CPU_32v4T | 155 | select CPU_32v4T |
| 156 | select CPU_ABRT_EV4T | 156 | select CPU_ABRT_EV4T |
| 157 | select CPU_PABRT_NOIFAR | 157 | select CPU_PABRT_LEGACY |
| 158 | select CPU_CACHE_V4WT | 158 | select CPU_CACHE_V4WT |
| 159 | select CPU_CACHE_VIVT | 159 | select CPU_CACHE_VIVT |
| 160 | select CPU_CP15_MMU | 160 | select CPU_CP15_MMU |
| @@ -173,7 +173,7 @@ config CPU_ARM926T | |||
| 173 | bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB | 173 | bool "Support ARM926T processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB |
| 174 | select CPU_32v5 | 174 | select CPU_32v5 |
| 175 | select CPU_ABRT_EV5TJ | 175 | select CPU_ABRT_EV5TJ |
| 176 | select CPU_PABRT_NOIFAR | 176 | select CPU_PABRT_LEGACY |
| 177 | select CPU_CACHE_VIVT | 177 | select CPU_CACHE_VIVT |
| 178 | select CPU_CP15_MMU | 178 | select CPU_CP15_MMU |
| 179 | select CPU_COPY_V4WB if MMU | 179 | select CPU_COPY_V4WB if MMU |
| @@ -191,7 +191,7 @@ config CPU_FA526 | |||
| 191 | bool | 191 | bool |
| 192 | select CPU_32v4 | 192 | select CPU_32v4 |
| 193 | select CPU_ABRT_EV4 | 193 | select CPU_ABRT_EV4 |
| 194 | select CPU_PABRT_NOIFAR | 194 | select CPU_PABRT_LEGACY |
| 195 | select CPU_CACHE_VIVT | 195 | select CPU_CACHE_VIVT |
| 196 | select CPU_CP15_MMU | 196 | select CPU_CP15_MMU |
| 197 | select CPU_CACHE_FA | 197 | select CPU_CACHE_FA |
| @@ -210,7 +210,7 @@ config CPU_ARM940T | |||
| 210 | depends on !MMU | 210 | depends on !MMU |
| 211 | select CPU_32v4T | 211 | select CPU_32v4T |
| 212 | select CPU_ABRT_NOMMU | 212 | select CPU_ABRT_NOMMU |
| 213 | select CPU_PABRT_NOIFAR | 213 | select CPU_PABRT_LEGACY |
| 214 | select CPU_CACHE_VIVT | 214 | select CPU_CACHE_VIVT |
| 215 | select CPU_CP15_MPU | 215 | select CPU_CP15_MPU |
| 216 | help | 216 | help |
| @@ -228,7 +228,7 @@ config CPU_ARM946E | |||
| 228 | depends on !MMU | 228 | depends on !MMU |
| 229 | select CPU_32v5 | 229 | select CPU_32v5 |
| 230 | select CPU_ABRT_NOMMU | 230 | select CPU_ABRT_NOMMU |
| 231 | select CPU_PABRT_NOIFAR | 231 | select CPU_PABRT_LEGACY |
| 232 | select CPU_CACHE_VIVT | 232 | select CPU_CACHE_VIVT |
| 233 | select CPU_CP15_MPU | 233 | select CPU_CP15_MPU |
| 234 | help | 234 | help |
| @@ -244,7 +244,7 @@ config CPU_ARM1020 | |||
| 244 | bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR | 244 | bool "Support ARM1020T (rev 0) processor" if ARCH_INTEGRATOR |
| 245 | select CPU_32v5 | 245 | select CPU_32v5 |
| 246 | select CPU_ABRT_EV4T | 246 | select CPU_ABRT_EV4T |
| 247 | select CPU_PABRT_NOIFAR | 247 | select CPU_PABRT_LEGACY |
| 248 | select CPU_CACHE_V4WT | 248 | select CPU_CACHE_V4WT |
| 249 | select CPU_CACHE_VIVT | 249 | select CPU_CACHE_VIVT |
| 250 | select CPU_CP15_MMU | 250 | select CPU_CP15_MMU |
| @@ -262,7 +262,7 @@ config CPU_ARM1020E | |||
| 262 | bool "Support ARM1020E processor" if ARCH_INTEGRATOR | 262 | bool "Support ARM1020E processor" if ARCH_INTEGRATOR |
| 263 | select CPU_32v5 | 263 | select CPU_32v5 |
| 264 | select CPU_ABRT_EV4T | 264 | select CPU_ABRT_EV4T |
| 265 | select CPU_PABRT_NOIFAR | 265 | select CPU_PABRT_LEGACY |
| 266 | select CPU_CACHE_V4WT | 266 | select CPU_CACHE_V4WT |
| 267 | select CPU_CACHE_VIVT | 267 | select CPU_CACHE_VIVT |
| 268 | select CPU_CP15_MMU | 268 | select CPU_CP15_MMU |
| @@ -275,7 +275,7 @@ config CPU_ARM1022 | |||
| 275 | bool "Support ARM1022E processor" if ARCH_INTEGRATOR | 275 | bool "Support ARM1022E processor" if ARCH_INTEGRATOR |
| 276 | select CPU_32v5 | 276 | select CPU_32v5 |
| 277 | select CPU_ABRT_EV4T | 277 | select CPU_ABRT_EV4T |
| 278 | select CPU_PABRT_NOIFAR | 278 | select CPU_PABRT_LEGACY |
| 279 | select CPU_CACHE_VIVT | 279 | select CPU_CACHE_VIVT |
| 280 | select CPU_CP15_MMU | 280 | select CPU_CP15_MMU |
| 281 | select CPU_COPY_V4WB if MMU # can probably do better | 281 | select CPU_COPY_V4WB if MMU # can probably do better |
| @@ -293,7 +293,7 @@ config CPU_ARM1026 | |||
| 293 | bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR | 293 | bool "Support ARM1026EJ-S processor" if ARCH_INTEGRATOR |
| 294 | select CPU_32v5 | 294 | select CPU_32v5 |
| 295 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 | 295 | select CPU_ABRT_EV5T # But need Jazelle, but EV5TJ ignores bit 10 |
| 296 | select CPU_PABRT_NOIFAR | 296 | select CPU_PABRT_LEGACY |
| 297 | select CPU_CACHE_VIVT | 297 | select CPU_CACHE_VIVT |
| 298 | select CPU_CP15_MMU | 298 | select CPU_CP15_MMU |
| 299 | select CPU_COPY_V4WB if MMU # can probably do better | 299 | select CPU_COPY_V4WB if MMU # can probably do better |
| @@ -311,7 +311,7 @@ config CPU_SA110 | |||
| 311 | select CPU_32v3 if ARCH_RPC | 311 | select CPU_32v3 if ARCH_RPC |
| 312 | select CPU_32v4 if !ARCH_RPC | 312 | select CPU_32v4 if !ARCH_RPC |
| 313 | select CPU_ABRT_EV4 | 313 | select CPU_ABRT_EV4 |
| 314 | select CPU_PABRT_NOIFAR | 314 | select CPU_PABRT_LEGACY |
| 315 | select CPU_CACHE_V4WB | 315 | select CPU_CACHE_V4WB |
| 316 | select CPU_CACHE_VIVT | 316 | select CPU_CACHE_VIVT |
| 317 | select CPU_CP15_MMU | 317 | select CPU_CP15_MMU |
| @@ -331,7 +331,7 @@ config CPU_SA1100 | |||
| 331 | bool | 331 | bool |
| 332 | select CPU_32v4 | 332 | select CPU_32v4 |
| 333 | select CPU_ABRT_EV4 | 333 | select CPU_ABRT_EV4 |
| 334 | select CPU_PABRT_NOIFAR | 334 | select CPU_PABRT_LEGACY |
| 335 | select CPU_CACHE_V4WB | 335 | select CPU_CACHE_V4WB |
| 336 | select CPU_CACHE_VIVT | 336 | select CPU_CACHE_VIVT |
| 337 | select CPU_CP15_MMU | 337 | select CPU_CP15_MMU |
| @@ -342,7 +342,7 @@ config CPU_XSCALE | |||
| 342 | bool | 342 | bool |
| 343 | select CPU_32v5 | 343 | select CPU_32v5 |
| 344 | select CPU_ABRT_EV5T | 344 | select CPU_ABRT_EV5T |
| 345 | select CPU_PABRT_NOIFAR | 345 | select CPU_PABRT_LEGACY |
| 346 | select CPU_CACHE_VIVT | 346 | select CPU_CACHE_VIVT |
| 347 | select CPU_CP15_MMU | 347 | select CPU_CP15_MMU |
| 348 | select CPU_TLB_V4WBI if MMU | 348 | select CPU_TLB_V4WBI if MMU |
| @@ -352,7 +352,7 @@ config CPU_XSC3 | |||
| 352 | bool | 352 | bool |
| 353 | select CPU_32v5 | 353 | select CPU_32v5 |
| 354 | select CPU_ABRT_EV5T | 354 | select CPU_ABRT_EV5T |
| 355 | select CPU_PABRT_NOIFAR | 355 | select CPU_PABRT_LEGACY |
| 356 | select CPU_CACHE_VIVT | 356 | select CPU_CACHE_VIVT |
| 357 | select CPU_CP15_MMU | 357 | select CPU_CP15_MMU |
| 358 | select CPU_TLB_V4WBI if MMU | 358 | select CPU_TLB_V4WBI if MMU |
| @@ -363,7 +363,7 @@ config CPU_MOHAWK | |||
| 363 | bool | 363 | bool |
| 364 | select CPU_32v5 | 364 | select CPU_32v5 |
| 365 | select CPU_ABRT_EV5T | 365 | select CPU_ABRT_EV5T |
| 366 | select CPU_PABRT_NOIFAR | 366 | select CPU_PABRT_LEGACY |
| 367 | select CPU_CACHE_VIVT | 367 | select CPU_CACHE_VIVT |
| 368 | select CPU_CP15_MMU | 368 | select CPU_CP15_MMU |
| 369 | select CPU_TLB_V4WBI if MMU | 369 | select CPU_TLB_V4WBI if MMU |
| @@ -374,7 +374,7 @@ config CPU_FEROCEON | |||
| 374 | bool | 374 | bool |
| 375 | select CPU_32v5 | 375 | select CPU_32v5 |
| 376 | select CPU_ABRT_EV5T | 376 | select CPU_ABRT_EV5T |
| 377 | select CPU_PABRT_NOIFAR | 377 | select CPU_PABRT_LEGACY |
| 378 | select CPU_CACHE_VIVT | 378 | select CPU_CACHE_VIVT |
| 379 | select CPU_CP15_MMU | 379 | select CPU_CP15_MMU |
| 380 | select CPU_COPY_FEROCEON if MMU | 380 | select CPU_COPY_FEROCEON if MMU |
| @@ -394,7 +394,7 @@ config CPU_V6 | |||
| 394 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX | 394 | bool "Support ARM V6 processor" if ARCH_INTEGRATOR || MACH_REALVIEW_EB || MACH_REALVIEW_PBX |
| 395 | select CPU_32v6 | 395 | select CPU_32v6 |
| 396 | select CPU_ABRT_EV6 | 396 | select CPU_ABRT_EV6 |
| 397 | select CPU_PABRT_NOIFAR | 397 | select CPU_PABRT_V6 |
| 398 | select CPU_CACHE_V6 | 398 | select CPU_CACHE_V6 |
| 399 | select CPU_CACHE_VIPT | 399 | select CPU_CACHE_VIPT |
| 400 | select CPU_CP15_MMU | 400 | select CPU_CP15_MMU |
| @@ -420,7 +420,7 @@ config CPU_V7 | |||
| 420 | select CPU_32v6K | 420 | select CPU_32v6K |
| 421 | select CPU_32v7 | 421 | select CPU_32v7 |
| 422 | select CPU_ABRT_EV7 | 422 | select CPU_ABRT_EV7 |
| 423 | select CPU_PABRT_IFAR | 423 | select CPU_PABRT_V7 |
| 424 | select CPU_CACHE_V7 | 424 | select CPU_CACHE_V7 |
| 425 | select CPU_CACHE_VIPT | 425 | select CPU_CACHE_VIPT |
| 426 | select CPU_CP15_MMU | 426 | select CPU_CP15_MMU |
| @@ -482,10 +482,13 @@ config CPU_ABRT_EV6 | |||
| 482 | config CPU_ABRT_EV7 | 482 | config CPU_ABRT_EV7 |
| 483 | bool | 483 | bool |
| 484 | 484 | ||
| 485 | config CPU_PABRT_IFAR | 485 | config CPU_PABRT_LEGACY |
| 486 | bool | 486 | bool |
| 487 | 487 | ||
| 488 | config CPU_PABRT_NOIFAR | 488 | config CPU_PABRT_V6 |
| 489 | bool | ||
| 490 | |||
| 491 | config CPU_PABRT_V7 | ||
| 489 | bool | 492 | bool |
| 490 | 493 | ||
| 491 | # The cache model | 494 | # The cache model |
| @@ -758,7 +761,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH | |||
| 758 | config CACHE_L2X0 | 761 | config CACHE_L2X0 |
| 759 | bool "Enable the L2x0 outer cache controller" | 762 | bool "Enable the L2x0 outer cache controller" |
| 760 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ | 763 | depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ |
| 761 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX | 764 | REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK |
| 762 | default y | 765 | default y |
| 763 | select OUTER_CACHE | 766 | select OUTER_CACHE |
| 764 | help | 767 | help |
| @@ -771,3 +774,8 @@ config CACHE_XSC3L2 | |||
| 771 | select OUTER_CACHE | 774 | select OUTER_CACHE |
| 772 | help | 775 | help |
| 773 | This option enables the L2 cache on XScale3. | 776 | This option enables the L2 cache on XScale3. |
| 777 | |||
| 778 | config ARM_L1_CACHE_SHIFT | ||
| 779 | int | ||
| 780 | default 6 if ARCH_OMAP3 | ||
| 781 | default 5 | ||
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile index 63e3f6dd0e21..055cb2aa8134 100644 --- a/arch/arm/mm/Makefile +++ b/arch/arm/mm/Makefile | |||
| @@ -27,6 +27,10 @@ obj-$(CONFIG_CPU_ABRT_EV5TJ) += abort-ev5tj.o | |||
| 27 | obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o | 27 | obj-$(CONFIG_CPU_ABRT_EV6) += abort-ev6.o |
| 28 | obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o | 28 | obj-$(CONFIG_CPU_ABRT_EV7) += abort-ev7.o |
| 29 | 29 | ||
| 30 | obj-$(CONFIG_CPU_PABRT_LEGACY) += pabort-legacy.o | ||
| 31 | obj-$(CONFIG_CPU_PABRT_V6) += pabort-v6.o | ||
| 32 | obj-$(CONFIG_CPU_PABRT_V7) += pabort-v7.o | ||
| 33 | |||
| 30 | obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o | 34 | obj-$(CONFIG_CPU_CACHE_V3) += cache-v3.o |
| 31 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o | 35 | obj-$(CONFIG_CPU_CACHE_V4) += cache-v4.o |
| 32 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o | 36 | obj-$(CONFIG_CPU_CACHE_V4WT) += cache-v4wt.o |
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c index 03cd27d917b9..b270d6228fe2 100644 --- a/arch/arm/mm/alignment.c +++ b/arch/arm/mm/alignment.c | |||
| @@ -159,7 +159,9 @@ union offset_union { | |||
| 159 | 159 | ||
| 160 | #define __get8_unaligned_check(ins,val,addr,err) \ | 160 | #define __get8_unaligned_check(ins,val,addr,err) \ |
| 161 | __asm__( \ | 161 | __asm__( \ |
| 162 | "1: "ins" %1, [%2], #1\n" \ | 162 | ARM( "1: "ins" %1, [%2], #1\n" ) \ |
| 163 | THUMB( "1: "ins" %1, [%2]\n" ) \ | ||
| 164 | THUMB( " add %2, %2, #1\n" ) \ | ||
| 163 | "2:\n" \ | 165 | "2:\n" \ |
| 164 | " .section .fixup,\"ax\"\n" \ | 166 | " .section .fixup,\"ax\"\n" \ |
| 165 | " .align 2\n" \ | 167 | " .align 2\n" \ |
| @@ -215,7 +217,9 @@ union offset_union { | |||
| 215 | do { \ | 217 | do { \ |
| 216 | unsigned int err = 0, v = val, a = addr; \ | 218 | unsigned int err = 0, v = val, a = addr; \ |
| 217 | __asm__( FIRST_BYTE_16 \ | 219 | __asm__( FIRST_BYTE_16 \ |
| 218 | "1: "ins" %1, [%2], #1\n" \ | 220 | ARM( "1: "ins" %1, [%2], #1\n" ) \ |
| 221 | THUMB( "1: "ins" %1, [%2]\n" ) \ | ||
| 222 | THUMB( " add %2, %2, #1\n" ) \ | ||
| 219 | " mov %1, %1, "NEXT_BYTE"\n" \ | 223 | " mov %1, %1, "NEXT_BYTE"\n" \ |
| 220 | "2: "ins" %1, [%2]\n" \ | 224 | "2: "ins" %1, [%2]\n" \ |
| 221 | "3:\n" \ | 225 | "3:\n" \ |
| @@ -245,11 +249,17 @@ union offset_union { | |||
| 245 | do { \ | 249 | do { \ |
| 246 | unsigned int err = 0, v = val, a = addr; \ | 250 | unsigned int err = 0, v = val, a = addr; \ |
| 247 | __asm__( FIRST_BYTE_32 \ | 251 | __asm__( FIRST_BYTE_32 \ |
| 248 | "1: "ins" %1, [%2], #1\n" \ | 252 | ARM( "1: "ins" %1, [%2], #1\n" ) \ |
| 253 | THUMB( "1: "ins" %1, [%2]\n" ) \ | ||
| 254 | THUMB( " add %2, %2, #1\n" ) \ | ||
| 249 | " mov %1, %1, "NEXT_BYTE"\n" \ | 255 | " mov %1, %1, "NEXT_BYTE"\n" \ |
| 250 | "2: "ins" %1, [%2], #1\n" \ | 256 | ARM( "2: "ins" %1, [%2], #1\n" ) \ |
| 257 | THUMB( "2: "ins" %1, [%2]\n" ) \ | ||
| 258 | THUMB( " add %2, %2, #1\n" ) \ | ||
| 251 | " mov %1, %1, "NEXT_BYTE"\n" \ | 259 | " mov %1, %1, "NEXT_BYTE"\n" \ |
| 252 | "3: "ins" %1, [%2], #1\n" \ | 260 | ARM( "3: "ins" %1, [%2], #1\n" ) \ |
| 261 | THUMB( "3: "ins" %1, [%2]\n" ) \ | ||
| 262 | THUMB( " add %2, %2, #1\n" ) \ | ||
| 253 | " mov %1, %1, "NEXT_BYTE"\n" \ | 263 | " mov %1, %1, "NEXT_BYTE"\n" \ |
| 254 | "4: "ins" %1, [%2]\n" \ | 264 | "4: "ins" %1, [%2]\n" \ |
| 255 | "5:\n" \ | 265 | "5:\n" \ |
diff --git a/arch/arm/mm/cache-v6.S b/arch/arm/mm/cache-v6.S index 8f5c13f4c936..295e25dd6381 100644 --- a/arch/arm/mm/cache-v6.S +++ b/arch/arm/mm/cache-v6.S | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <linux/linkage.h> | 12 | #include <linux/linkage.h> |
| 13 | #include <linux/init.h> | 13 | #include <linux/init.h> |
| 14 | #include <asm/assembler.h> | 14 | #include <asm/assembler.h> |
| 15 | #include <asm/unwind.h> | ||
| 15 | 16 | ||
| 16 | #include "proc-macros.S" | 17 | #include "proc-macros.S" |
| 17 | 18 | ||
| @@ -121,11 +122,13 @@ ENTRY(v6_coherent_kern_range) | |||
| 121 | * - the Icache does not read data from the write buffer | 122 | * - the Icache does not read data from the write buffer |
| 122 | */ | 123 | */ |
| 123 | ENTRY(v6_coherent_user_range) | 124 | ENTRY(v6_coherent_user_range) |
| 124 | 125 | UNWIND(.fnstart ) | |
| 125 | #ifdef HARVARD_CACHE | 126 | #ifdef HARVARD_CACHE |
| 126 | bic r0, r0, #CACHE_LINE_SIZE - 1 | 127 | bic r0, r0, #CACHE_LINE_SIZE - 1 |
| 127 | 1: mcr p15, 0, r0, c7, c10, 1 @ clean D line | 128 | 1: |
| 129 | USER( mcr p15, 0, r0, c7, c10, 1 ) @ clean D line | ||
| 128 | add r0, r0, #CACHE_LINE_SIZE | 130 | add r0, r0, #CACHE_LINE_SIZE |
| 131 | 2: | ||
| 129 | cmp r0, r1 | 132 | cmp r0, r1 |
| 130 | blo 1b | 133 | blo 1b |
| 131 | #endif | 134 | #endif |
| @@ -143,6 +146,19 @@ ENTRY(v6_coherent_user_range) | |||
| 143 | mov pc, lr | 146 | mov pc, lr |
| 144 | 147 | ||
| 145 | /* | 148 | /* |
| 149 | * Fault handling for the cache operation above. If the virtual address in r0 | ||
| 150 | * isn't mapped, just try the next page. | ||
| 151 | */ | ||
| 152 | 9001: | ||
| 153 | mov r0, r0, lsr #12 | ||
| 154 | mov r0, r0, lsl #12 | ||
| 155 | add r0, r0, #4096 | ||
| 156 | b 2b | ||
| 157 | UNWIND(.fnend ) | ||
| 158 | ENDPROC(v6_coherent_user_range) | ||
| 159 | ENDPROC(v6_coherent_kern_range) | ||
| 160 | |||
| 161 | /* | ||
| 146 | * v6_flush_kern_dcache_page(kaddr) | 162 | * v6_flush_kern_dcache_page(kaddr) |
| 147 | * | 163 | * |
| 148 | * Ensure that the data held in the page kaddr is written back | 164 | * Ensure that the data held in the page kaddr is written back |
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index be93ff02a98d..e1bd9759617f 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S | |||
| @@ -13,6 +13,7 @@ | |||
| 13 | #include <linux/linkage.h> | 13 | #include <linux/linkage.h> |
| 14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
| 15 | #include <asm/assembler.h> | 15 | #include <asm/assembler.h> |
| 16 | #include <asm/unwind.h> | ||
| 16 | 17 | ||
| 17 | #include "proc-macros.S" | 18 | #include "proc-macros.S" |
| 18 | 19 | ||
| @@ -21,7 +22,7 @@ | |||
| 21 | * | 22 | * |
| 22 | * Flush the whole D-cache. | 23 | * Flush the whole D-cache. |
| 23 | * | 24 | * |
| 24 | * Corrupted registers: r0-r5, r7, r9-r11 | 25 | * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode) |
| 25 | * | 26 | * |
| 26 | * - mm - mm_struct describing address space | 27 | * - mm - mm_struct describing address space |
| 27 | */ | 28 | */ |
| @@ -51,8 +52,12 @@ loop1: | |||
| 51 | loop2: | 52 | loop2: |
| 52 | mov r9, r4 @ create working copy of max way size | 53 | mov r9, r4 @ create working copy of max way size |
| 53 | loop3: | 54 | loop3: |
| 54 | orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 | 55 | ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11 |
| 55 | orr r11, r11, r7, lsl r2 @ factor index number into r11 | 56 | THUMB( lsl r6, r9, r5 ) |
| 57 | THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11 | ||
| 58 | ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11 | ||
| 59 | THUMB( lsl r6, r7, r2 ) | ||
| 60 | THUMB( orr r11, r11, r6 ) @ factor index number into r11 | ||
| 56 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way | 61 | mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way |
| 57 | subs r9, r9, #1 @ decrement the way | 62 | subs r9, r9, #1 @ decrement the way |
| 58 | bge loop3 | 63 | bge loop3 |
| @@ -82,11 +87,13 @@ ENDPROC(v7_flush_dcache_all) | |||
| 82 | * | 87 | * |
| 83 | */ | 88 | */ |
| 84 | ENTRY(v7_flush_kern_cache_all) | 89 | ENTRY(v7_flush_kern_cache_all) |
| 85 | stmfd sp!, {r4-r5, r7, r9-r11, lr} | 90 | ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
| 91 | THUMB( stmfd sp!, {r4-r7, r9-r11, lr} ) | ||
| 86 | bl v7_flush_dcache_all | 92 | bl v7_flush_dcache_all |
| 87 | mov r0, #0 | 93 | mov r0, #0 |
| 88 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate | 94 | mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate |
| 89 | ldmfd sp!, {r4-r5, r7, r9-r11, lr} | 95 | ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} ) |
| 96 | THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} ) | ||
| 90 | mov pc, lr | 97 | mov pc, lr |
| 91 | ENDPROC(v7_flush_kern_cache_all) | 98 | ENDPROC(v7_flush_kern_cache_all) |
| 92 | 99 | ||
| @@ -147,13 +154,16 @@ ENTRY(v7_coherent_kern_range) | |||
| 147 | * - the Icache does not read data from the write buffer | 154 | * - the Icache does not read data from the write buffer |
| 148 | */ | 155 | */ |
| 149 | ENTRY(v7_coherent_user_range) | 156 | ENTRY(v7_coherent_user_range) |
| 157 | UNWIND(.fnstart ) | ||
| 150 | dcache_line_size r2, r3 | 158 | dcache_line_size r2, r3 |
| 151 | sub r3, r2, #1 | 159 | sub r3, r2, #1 |
| 152 | bic r0, r0, r3 | 160 | bic r0, r0, r3 |
| 153 | 1: mcr p15, 0, r0, c7, c11, 1 @ clean D line to the point of unification | 161 | 1: |
| 162 | USER( mcr p15, 0, r0, c7, c11, 1 ) @ clean D line to the point of unification | ||
| 154 | dsb | 163 | dsb |
| 155 | mcr p15, 0, r0, c7, c5, 1 @ invalidate I line | 164 | USER( mcr p15, 0, r0, c7, c5, 1 ) @ invalidate I line |
| 156 | add r0, r0, r2 | 165 | add r0, r0, r2 |
| 166 | 2: | ||
| 157 | cmp r0, r1 | 167 | cmp r0, r1 |
| 158 | blo 1b | 168 | blo 1b |
| 159 | mov r0, #0 | 169 | mov r0, #0 |
| @@ -161,6 +171,17 @@ ENTRY(v7_coherent_user_range) | |||
| 161 | dsb | 171 | dsb |
| 162 | isb | 172 | isb |
| 163 | mov pc, lr | 173 | mov pc, lr |
| 174 | |||
| 175 | /* | ||
| 176 | * Fault handling for the cache operation above. If the virtual address in r0 | ||
| 177 | * isn't mapped, just try the next page. | ||
| 178 | */ | ||
| 179 | 9001: | ||
| 180 | mov r0, r0, lsr #12 | ||
| 181 | mov r0, r0, lsl #12 | ||
| 182 | add r0, r0, #4096 | ||
| 183 | b 2b | ||
| 184 | UNWIND(.fnend ) | ||
| 164 | ENDPROC(v7_coherent_kern_range) | 185 | ENDPROC(v7_coherent_kern_range) |
| 165 | ENDPROC(v7_coherent_user_range) | 186 | ENDPROC(v7_coherent_user_range) |
| 166 | 187 | ||
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c index fc84fcc74380..6bda76a43199 100644 --- a/arch/arm/mm/context.c +++ b/arch/arm/mm/context.c | |||
| @@ -59,6 +59,6 @@ void __new_context(struct mm_struct *mm) | |||
| 59 | } | 59 | } |
| 60 | spin_unlock(&cpu_asid_lock); | 60 | spin_unlock(&cpu_asid_lock); |
| 61 | 61 | ||
| 62 | mm->cpu_vm_mask = cpumask_of_cpu(smp_processor_id()); | 62 | cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id())); |
| 63 | mm->context.id = asid; | 63 | mm->context.id = asid; |
| 64 | } | 64 | } |
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c index 510c179b0ac8..b30925fcbcdc 100644 --- a/arch/arm/mm/dma-mapping.c +++ b/arch/arm/mm/dma-mapping.c | |||
| @@ -36,7 +36,34 @@ | |||
| 36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) | 36 | #define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) |
| 37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) | 37 | #define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) |
| 38 | 38 | ||
| 39 | static u64 get_coherent_dma_mask(struct device *dev) | ||
| 40 | { | ||
| 41 | u64 mask = ISA_DMA_THRESHOLD; | ||
| 42 | |||
| 43 | if (dev) { | ||
| 44 | mask = dev->coherent_dma_mask; | ||
| 45 | |||
| 46 | /* | ||
| 47 | * Sanity check the DMA mask - it must be non-zero, and | ||
| 48 | * must be able to be satisfied by a DMA allocation. | ||
| 49 | */ | ||
| 50 | if (mask == 0) { | ||
| 51 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
| 52 | return 0; | ||
| 53 | } | ||
| 54 | |||
| 55 | if ((~mask) & ISA_DMA_THRESHOLD) { | ||
| 56 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | ||
| 57 | "than system GFP_DMA mask %#llx\n", | ||
| 58 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | ||
| 59 | return 0; | ||
| 60 | } | ||
| 61 | } | ||
| 39 | 62 | ||
| 63 | return mask; | ||
| 64 | } | ||
| 65 | |||
| 66 | #ifdef CONFIG_MMU | ||
| 40 | /* | 67 | /* |
| 41 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations | 68 | * These are the page tables (2MB each) covering uncached, DMA consistent allocations |
| 42 | */ | 69 | */ |
| @@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
| 152 | struct page *page; | 179 | struct page *page; |
| 153 | struct arm_vm_region *c; | 180 | struct arm_vm_region *c; |
| 154 | unsigned long order; | 181 | unsigned long order; |
| 155 | u64 mask = ISA_DMA_THRESHOLD, limit; | 182 | u64 mask = get_coherent_dma_mask(dev); |
| 183 | u64 limit; | ||
| 156 | 184 | ||
| 157 | if (!consistent_pte[0]) { | 185 | if (!consistent_pte[0]) { |
| 158 | printk(KERN_ERR "%s: not initialised\n", __func__); | 186 | printk(KERN_ERR "%s: not initialised\n", __func__); |
| @@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
| 160 | return NULL; | 188 | return NULL; |
| 161 | } | 189 | } |
| 162 | 190 | ||
| 163 | if (dev) { | 191 | if (!mask) |
| 164 | mask = dev->coherent_dma_mask; | 192 | goto no_page; |
| 165 | |||
| 166 | /* | ||
| 167 | * Sanity check the DMA mask - it must be non-zero, and | ||
| 168 | * must be able to be satisfied by a DMA allocation. | ||
| 169 | */ | ||
| 170 | if (mask == 0) { | ||
| 171 | dev_warn(dev, "coherent DMA mask is unset\n"); | ||
| 172 | goto no_page; | ||
| 173 | } | ||
| 174 | |||
| 175 | if ((~mask) & ISA_DMA_THRESHOLD) { | ||
| 176 | dev_warn(dev, "coherent DMA mask %#llx is smaller " | ||
| 177 | "than system GFP_DMA mask %#llx\n", | ||
| 178 | mask, (unsigned long long)ISA_DMA_THRESHOLD); | ||
| 179 | goto no_page; | ||
| 180 | } | ||
| 181 | } | ||
| 182 | 193 | ||
| 183 | /* | 194 | /* |
| 184 | * Sanity check the allocation size. | 195 | * Sanity check the allocation size. |
| @@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | |||
| 267 | *handle = ~0; | 278 | *handle = ~0; |
| 268 | return NULL; | 279 | return NULL; |
| 269 | } | 280 | } |
| 281 | #else /* !CONFIG_MMU */ | ||
| 282 | static void * | ||
| 283 | __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, | ||
| 284 | pgprot_t prot) | ||
| 285 | { | ||
| 286 | void *virt; | ||
| 287 | u64 mask = get_coherent_dma_mask(dev); | ||
| 288 | |||
| 289 | if (!mask) | ||
| 290 | goto error; | ||
| 291 | |||
| 292 | if (mask != 0xffffffff) | ||
| 293 | gfp |= GFP_DMA; | ||
| 294 | virt = kmalloc(size, gfp); | ||
| 295 | if (!virt) | ||
| 296 | goto error; | ||
| 297 | |||
| 298 | *handle = virt_to_dma(dev, virt); | ||
| 299 | return virt; | ||
| 300 | |||
| 301 | error: | ||
| 302 | *handle = ~0; | ||
| 303 | return NULL; | ||
| 304 | } | ||
| 305 | #endif /* CONFIG_MMU */ | ||
| 270 | 306 | ||
| 271 | /* | 307 | /* |
| 272 | * Allocate DMA-coherent memory space and return both the kernel remapped | 308 | * Allocate DMA-coherent memory space and return both the kernel remapped |
| @@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine); | |||
| 311 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | 347 | static int dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 312 | void *cpu_addr, dma_addr_t dma_addr, size_t size) | 348 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
| 313 | { | 349 | { |
| 350 | int ret = -ENXIO; | ||
| 351 | #ifdef CONFIG_MMU | ||
| 314 | unsigned long flags, user_size, kern_size; | 352 | unsigned long flags, user_size, kern_size; |
| 315 | struct arm_vm_region *c; | 353 | struct arm_vm_region *c; |
| 316 | int ret = -ENXIO; | ||
| 317 | 354 | ||
| 318 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 355 | user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
| 319 | 356 | ||
| @@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma, | |||
| 334 | vma->vm_page_prot); | 371 | vma->vm_page_prot); |
| 335 | } | 372 | } |
| 336 | } | 373 | } |
| 374 | #endif /* CONFIG_MMU */ | ||
| 337 | 375 | ||
| 338 | return ret; | 376 | return ret; |
| 339 | } | 377 | } |
| @@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine); | |||
| 358 | * free a page as defined by the above mapping. | 396 | * free a page as defined by the above mapping. |
| 359 | * Must not be called with IRQs disabled. | 397 | * Must not be called with IRQs disabled. |
| 360 | */ | 398 | */ |
| 399 | #ifdef CONFIG_MMU | ||
| 361 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | 400 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) |
| 362 | { | 401 | { |
| 363 | struct arm_vm_region *c; | 402 | struct arm_vm_region *c; |
| @@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr | |||
| 444 | __func__, cpu_addr); | 483 | __func__, cpu_addr); |
| 445 | dump_stack(); | 484 | dump_stack(); |
| 446 | } | 485 | } |
| 486 | #else /* !CONFIG_MMU */ | ||
| 487 | void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) | ||
| 488 | { | ||
| 489 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) | ||
| 490 | return; | ||
| 491 | kfree(cpu_addr); | ||
| 492 | } | ||
| 493 | #endif /* CONFIG_MMU */ | ||
| 447 | EXPORT_SYMBOL(dma_free_coherent); | 494 | EXPORT_SYMBOL(dma_free_coherent); |
| 448 | 495 | ||
| 449 | /* | 496 | /* |
| @@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent); | |||
| 451 | */ | 498 | */ |
| 452 | static int __init consistent_init(void) | 499 | static int __init consistent_init(void) |
| 453 | { | 500 | { |
| 501 | int ret = 0; | ||
| 502 | #ifdef CONFIG_MMU | ||
| 454 | pgd_t *pgd; | 503 | pgd_t *pgd; |
| 455 | pmd_t *pmd; | 504 | pmd_t *pmd; |
| 456 | pte_t *pte; | 505 | pte_t *pte; |
| 457 | int ret = 0, i = 0; | 506 | int i = 0; |
| 458 | u32 base = CONSISTENT_BASE; | 507 | u32 base = CONSISTENT_BASE; |
| 459 | 508 | ||
| 460 | do { | 509 | do { |
| @@ -477,6 +526,7 @@ static int __init consistent_init(void) | |||
| 477 | consistent_pte[i++] = pte; | 526 | consistent_pte[i++] = pte; |
| 478 | base += (1 << PGDIR_SHIFT); | 527 | base += (1 << PGDIR_SHIFT); |
| 479 | } while (base < CONSISTENT_END); | 528 | } while (base < CONSISTENT_END); |
| 529 | #endif /* !CONFIG_MMU */ | ||
| 480 | 530 | ||
| 481 | return ret; | 531 | return ret; |
| 482 | } | 532 | } |
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c index bc0099d5ae85..d0d17b6a3703 100644 --- a/arch/arm/mm/fault-armv.c +++ b/arch/arm/mm/fault-armv.c | |||
| @@ -153,14 +153,11 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte) | |||
| 153 | 153 | ||
| 154 | page = pfn_to_page(pfn); | 154 | page = pfn_to_page(pfn); |
| 155 | mapping = page_mapping(page); | 155 | mapping = page_mapping(page); |
| 156 | if (mapping) { | ||
| 157 | #ifndef CONFIG_SMP | 156 | #ifndef CONFIG_SMP |
| 158 | int dirty = test_and_clear_bit(PG_dcache_dirty, &page->flags); | 157 | if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) |
| 159 | 158 | __flush_dcache_page(mapping, page); | |
| 160 | if (dirty) | ||
| 161 | __flush_dcache_page(mapping, page); | ||
| 162 | #endif | 159 | #endif |
| 163 | 160 | if (mapping) { | |
| 164 | if (cache_is_vivt()) | 161 | if (cache_is_vivt()) |
| 165 | make_coherent(mapping, vma, addr, pfn); | 162 | make_coherent(mapping, vma, addr, pfn); |
| 166 | else if (vma->vm_flags & VM_EXEC) | 163 | else if (vma->vm_flags & VM_EXEC) |
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 6fdcbb709827..10e06801afb3 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
| @@ -16,6 +16,8 @@ | |||
| 16 | #include <linux/kprobes.h> | 16 | #include <linux/kprobes.h> |
| 17 | #include <linux/uaccess.h> | 17 | #include <linux/uaccess.h> |
| 18 | #include <linux/page-flags.h> | 18 | #include <linux/page-flags.h> |
| 19 | #include <linux/sched.h> | ||
| 20 | #include <linux/highmem.h> | ||
| 19 | 21 | ||
| 20 | #include <asm/system.h> | 22 | #include <asm/system.h> |
| 21 | #include <asm/pgtable.h> | 23 | #include <asm/pgtable.h> |
| @@ -23,6 +25,20 @@ | |||
| 23 | 25 | ||
| 24 | #include "fault.h" | 26 | #include "fault.h" |
| 25 | 27 | ||
| 28 | /* | ||
| 29 | * Fault status register encodings. We steal bit 31 for our own purposes. | ||
| 30 | */ | ||
| 31 | #define FSR_LNX_PF (1 << 31) | ||
| 32 | #define FSR_WRITE (1 << 11) | ||
| 33 | #define FSR_FS4 (1 << 10) | ||
| 34 | #define FSR_FS3_0 (15) | ||
| 35 | |||
| 36 | static inline int fsr_fs(unsigned int fsr) | ||
| 37 | { | ||
| 38 | return (fsr & FSR_FS3_0) | (fsr & FSR_FS4) >> 6; | ||
| 39 | } | ||
| 40 | |||
| 41 | #ifdef CONFIG_MMU | ||
| 26 | 42 | ||
| 27 | #ifdef CONFIG_KPROBES | 43 | #ifdef CONFIG_KPROBES |
| 28 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) | 44 | static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) |
| @@ -97,6 +113,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr) | |||
| 97 | 113 | ||
| 98 | printk("\n"); | 114 | printk("\n"); |
| 99 | } | 115 | } |
| 116 | #else /* CONFIG_MMU */ | ||
| 117 | void show_pte(struct mm_struct *mm, unsigned long addr) | ||
| 118 | { } | ||
| 119 | #endif /* CONFIG_MMU */ | ||
| 100 | 120 | ||
| 101 | /* | 121 | /* |
| 102 | * Oops. The kernel tried to access some page that wasn't present. | 122 | * Oops. The kernel tried to access some page that wasn't present. |
| @@ -171,21 +191,39 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 171 | __do_kernel_fault(mm, addr, fsr, regs); | 191 | __do_kernel_fault(mm, addr, fsr, regs); |
| 172 | } | 192 | } |
| 173 | 193 | ||
| 194 | #ifdef CONFIG_MMU | ||
| 174 | #define VM_FAULT_BADMAP 0x010000 | 195 | #define VM_FAULT_BADMAP 0x010000 |
| 175 | #define VM_FAULT_BADACCESS 0x020000 | 196 | #define VM_FAULT_BADACCESS 0x020000 |
| 176 | 197 | ||
| 177 | static int | 198 | /* |
| 199 | * Check that the permissions on the VMA allow for the fault which occurred. | ||
| 200 | * If we encountered a write fault, we must have write permission, otherwise | ||
| 201 | * we allow any permission. | ||
| 202 | */ | ||
| 203 | static inline bool access_error(unsigned int fsr, struct vm_area_struct *vma) | ||
| 204 | { | ||
| 205 | unsigned int mask = VM_READ | VM_WRITE | VM_EXEC; | ||
| 206 | |||
| 207 | if (fsr & FSR_WRITE) | ||
| 208 | mask = VM_WRITE; | ||
| 209 | if (fsr & FSR_LNX_PF) | ||
| 210 | mask = VM_EXEC; | ||
| 211 | |||
| 212 | return vma->vm_flags & mask ? false : true; | ||
| 213 | } | ||
| 214 | |||
| 215 | static int __kprobes | ||
| 178 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | 216 | __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, |
| 179 | struct task_struct *tsk) | 217 | struct task_struct *tsk) |
| 180 | { | 218 | { |
| 181 | struct vm_area_struct *vma; | 219 | struct vm_area_struct *vma; |
| 182 | int fault, mask; | 220 | int fault; |
| 183 | 221 | ||
| 184 | vma = find_vma(mm, addr); | 222 | vma = find_vma(mm, addr); |
| 185 | fault = VM_FAULT_BADMAP; | 223 | fault = VM_FAULT_BADMAP; |
| 186 | if (!vma) | 224 | if (unlikely(!vma)) |
| 187 | goto out; | 225 | goto out; |
| 188 | if (vma->vm_start > addr) | 226 | if (unlikely(vma->vm_start > addr)) |
| 189 | goto check_stack; | 227 | goto check_stack; |
| 190 | 228 | ||
| 191 | /* | 229 | /* |
| @@ -193,47 +231,24 @@ __do_page_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr, | |||
| 193 | * memory access, so we can handle it. | 231 | * memory access, so we can handle it. |
| 194 | */ | 232 | */ |
| 195 | good_area: | 233 | good_area: |
| 196 | if (fsr & (1 << 11)) /* write? */ | 234 | if (access_error(fsr, vma)) { |
| 197 | mask = VM_WRITE; | 235 | fault = VM_FAULT_BADACCESS; |
| 198 | else | ||
| 199 | mask = VM_READ|VM_EXEC|VM_WRITE; | ||
| 200 | |||
| 201 | fault = VM_FAULT_BADACCESS; | ||
| 202 | if (!(vma->vm_flags & mask)) | ||
| 203 | goto out; | 236 | goto out; |
| 237 | } | ||
| 204 | 238 | ||
| 205 | /* | 239 | /* |
| 206 | * If for any reason at all we couldn't handle | 240 | * If for any reason at all we couldn't handle the fault, make |
| 207 | * the fault, make sure we exit gracefully rather | 241 | * sure we exit gracefully rather than endlessly redo the fault. |
| 208 | * than endlessly redo the fault. | ||
| 209 | */ | 242 | */ |
| 210 | survive: | 243 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & FSR_WRITE) ? FAULT_FLAG_WRITE : 0); |
| 211 | fault = handle_mm_fault(mm, vma, addr & PAGE_MASK, (fsr & (1 << 11)) ? FAULT_FLAG_WRITE : 0); | 244 | if (unlikely(fault & VM_FAULT_ERROR)) |
| 212 | if (unlikely(fault & VM_FAULT_ERROR)) { | 245 | return fault; |
| 213 | if (fault & VM_FAULT_OOM) | ||
| 214 | goto out_of_memory; | ||
| 215 | else if (fault & VM_FAULT_SIGBUS) | ||
| 216 | return fault; | ||
| 217 | BUG(); | ||
| 218 | } | ||
| 219 | if (fault & VM_FAULT_MAJOR) | 246 | if (fault & VM_FAULT_MAJOR) |
| 220 | tsk->maj_flt++; | 247 | tsk->maj_flt++; |
| 221 | else | 248 | else |
| 222 | tsk->min_flt++; | 249 | tsk->min_flt++; |
| 223 | return fault; | 250 | return fault; |
| 224 | 251 | ||
| 225 | out_of_memory: | ||
| 226 | if (!is_global_init(tsk)) | ||
| 227 | goto out; | ||
| 228 | |||
| 229 | /* | ||
| 230 | * If we are out of memory for pid1, sleep for a while and retry | ||
| 231 | */ | ||
| 232 | up_read(&mm->mmap_sem); | ||
| 233 | yield(); | ||
| 234 | down_read(&mm->mmap_sem); | ||
| 235 | goto survive; | ||
| 236 | |||
| 237 | check_stack: | 252 | check_stack: |
| 238 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) | 253 | if (vma->vm_flags & VM_GROWSDOWN && !expand_stack(vma, addr)) |
| 239 | goto good_area; | 254 | goto good_area; |
| @@ -270,6 +285,18 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 270 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) | 285 | if (!user_mode(regs) && !search_exception_tables(regs->ARM_pc)) |
| 271 | goto no_context; | 286 | goto no_context; |
| 272 | down_read(&mm->mmap_sem); | 287 | down_read(&mm->mmap_sem); |
| 288 | } else { | ||
| 289 | /* | ||
| 290 | * The above down_read_trylock() might have succeeded in | ||
| 291 | * which case, we'll have missed the might_sleep() from | ||
| 292 | * down_read() | ||
| 293 | */ | ||
| 294 | might_sleep(); | ||
| 295 | #ifdef CONFIG_DEBUG_VM | ||
| 296 | if (!user_mode(regs) && | ||
| 297 | !search_exception_tables(regs->ARM_pc)) | ||
| 298 | goto no_context; | ||
| 299 | #endif | ||
| 273 | } | 300 | } |
| 274 | 301 | ||
| 275 | fault = __do_page_fault(mm, addr, fsr, tsk); | 302 | fault = __do_page_fault(mm, addr, fsr, tsk); |
| @@ -281,6 +308,16 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 281 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) | 308 | if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS)))) |
| 282 | return 0; | 309 | return 0; |
| 283 | 310 | ||
| 311 | if (fault & VM_FAULT_OOM) { | ||
| 312 | /* | ||
| 313 | * We ran out of memory, call the OOM killer, and return to | ||
| 314 | * userspace (which will retry the fault, or kill us if we | ||
| 315 | * got oom-killed) | ||
| 316 | */ | ||
| 317 | pagefault_out_of_memory(); | ||
| 318 | return 0; | ||
| 319 | } | ||
| 320 | |||
| 284 | /* | 321 | /* |
| 285 | * If we are in kernel mode at this point, we | 322 | * If we are in kernel mode at this point, we |
| 286 | * have no context to handle this fault with. | 323 | * have no context to handle this fault with. |
| @@ -288,16 +325,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 288 | if (!user_mode(regs)) | 325 | if (!user_mode(regs)) |
| 289 | goto no_context; | 326 | goto no_context; |
| 290 | 327 | ||
| 291 | if (fault & VM_FAULT_OOM) { | ||
| 292 | /* | ||
| 293 | * We ran out of memory, or some other thing | ||
| 294 | * happened to us that made us unable to handle | ||
| 295 | * the page fault gracefully. | ||
| 296 | */ | ||
| 297 | printk("VM: killing process %s\n", tsk->comm); | ||
| 298 | do_group_exit(SIGKILL); | ||
| 299 | return 0; | ||
| 300 | } | ||
| 301 | if (fault & VM_FAULT_SIGBUS) { | 328 | if (fault & VM_FAULT_SIGBUS) { |
| 302 | /* | 329 | /* |
| 303 | * We had some memory, but were unable to | 330 | * We had some memory, but were unable to |
| @@ -322,6 +349,13 @@ no_context: | |||
| 322 | __do_kernel_fault(mm, addr, fsr, regs); | 349 | __do_kernel_fault(mm, addr, fsr, regs); |
| 323 | return 0; | 350 | return 0; |
| 324 | } | 351 | } |
| 352 | #else /* CONFIG_MMU */ | ||
| 353 | static int | ||
| 354 | do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | ||
| 355 | { | ||
| 356 | return 0; | ||
| 357 | } | ||
| 358 | #endif /* CONFIG_MMU */ | ||
| 325 | 359 | ||
| 326 | /* | 360 | /* |
| 327 | * First Level Translation Fault Handler | 361 | * First Level Translation Fault Handler |
| @@ -340,6 +374,7 @@ no_context: | |||
| 340 | * interrupt or a critical region, and should only copy the information | 374 | * interrupt or a critical region, and should only copy the information |
| 341 | * from the master page table, nothing more. | 375 | * from the master page table, nothing more. |
| 342 | */ | 376 | */ |
| 377 | #ifdef CONFIG_MMU | ||
| 343 | static int __kprobes | 378 | static int __kprobes |
| 344 | do_translation_fault(unsigned long addr, unsigned int fsr, | 379 | do_translation_fault(unsigned long addr, unsigned int fsr, |
| 345 | struct pt_regs *regs) | 380 | struct pt_regs *regs) |
| @@ -378,6 +413,14 @@ bad_area: | |||
| 378 | do_bad_area(addr, fsr, regs); | 413 | do_bad_area(addr, fsr, regs); |
| 379 | return 0; | 414 | return 0; |
| 380 | } | 415 | } |
| 416 | #else /* CONFIG_MMU */ | ||
| 417 | static int | ||
| 418 | do_translation_fault(unsigned long addr, unsigned int fsr, | ||
| 419 | struct pt_regs *regs) | ||
| 420 | { | ||
| 421 | return 0; | ||
| 422 | } | ||
| 423 | #endif /* CONFIG_MMU */ | ||
| 381 | 424 | ||
| 382 | /* | 425 | /* |
| 383 | * Some section permission faults need to be handled gracefully. | 426 | * Some section permission faults need to be handled gracefully. |
| @@ -465,10 +508,10 @@ hook_fault_code(int nr, int (*fn)(unsigned long, unsigned int, struct pt_regs *) | |||
| 465 | asmlinkage void __exception | 508 | asmlinkage void __exception |
| 466 | do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | 509 | do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) |
| 467 | { | 510 | { |
| 468 | const struct fsr_info *inf = fsr_info + (fsr & 15) + ((fsr & (1 << 10)) >> 6); | 511 | const struct fsr_info *inf = fsr_info + fsr_fs(fsr); |
| 469 | struct siginfo info; | 512 | struct siginfo info; |
| 470 | 513 | ||
| 471 | if (!inf->fn(addr, fsr, regs)) | 514 | if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) |
| 472 | return; | 515 | return; |
| 473 | 516 | ||
| 474 | printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", | 517 | printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", |
| @@ -481,9 +524,58 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 481 | arm_notify_die("", regs, &info, fsr, 0); | 524 | arm_notify_die("", regs, &info, fsr, 0); |
| 482 | } | 525 | } |
| 483 | 526 | ||
| 527 | |||
| 528 | static struct fsr_info ifsr_info[] = { | ||
| 529 | { do_bad, SIGBUS, 0, "unknown 0" }, | ||
| 530 | { do_bad, SIGBUS, 0, "unknown 1" }, | ||
| 531 | { do_bad, SIGBUS, 0, "debug event" }, | ||
| 532 | { do_bad, SIGSEGV, SEGV_ACCERR, "section access flag fault" }, | ||
| 533 | { do_bad, SIGBUS, 0, "unknown 4" }, | ||
| 534 | { do_translation_fault, SIGSEGV, SEGV_MAPERR, "section translation fault" }, | ||
| 535 | { do_bad, SIGSEGV, SEGV_ACCERR, "page access flag fault" }, | ||
| 536 | { do_page_fault, SIGSEGV, SEGV_MAPERR, "page translation fault" }, | ||
| 537 | { do_bad, SIGBUS, 0, "external abort on non-linefetch" }, | ||
| 538 | { do_bad, SIGSEGV, SEGV_ACCERR, "section domain fault" }, | ||
| 539 | { do_bad, SIGBUS, 0, "unknown 10" }, | ||
| 540 | { do_bad, SIGSEGV, SEGV_ACCERR, "page domain fault" }, | ||
| 541 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
| 542 | { do_sect_fault, SIGSEGV, SEGV_ACCERR, "section permission fault" }, | ||
| 543 | { do_bad, SIGBUS, 0, "external abort on translation" }, | ||
| 544 | { do_page_fault, SIGSEGV, SEGV_ACCERR, "page permission fault" }, | ||
| 545 | { do_bad, SIGBUS, 0, "unknown 16" }, | ||
| 546 | { do_bad, SIGBUS, 0, "unknown 17" }, | ||
| 547 | { do_bad, SIGBUS, 0, "unknown 18" }, | ||
| 548 | { do_bad, SIGBUS, 0, "unknown 19" }, | ||
| 549 | { do_bad, SIGBUS, 0, "unknown 20" }, | ||
| 550 | { do_bad, SIGBUS, 0, "unknown 21" }, | ||
| 551 | { do_bad, SIGBUS, 0, "unknown 22" }, | ||
| 552 | { do_bad, SIGBUS, 0, "unknown 23" }, | ||
| 553 | { do_bad, SIGBUS, 0, "unknown 24" }, | ||
| 554 | { do_bad, SIGBUS, 0, "unknown 25" }, | ||
| 555 | { do_bad, SIGBUS, 0, "unknown 26" }, | ||
| 556 | { do_bad, SIGBUS, 0, "unknown 27" }, | ||
| 557 | { do_bad, SIGBUS, 0, "unknown 28" }, | ||
| 558 | { do_bad, SIGBUS, 0, "unknown 29" }, | ||
| 559 | { do_bad, SIGBUS, 0, "unknown 30" }, | ||
| 560 | { do_bad, SIGBUS, 0, "unknown 31" }, | ||
| 561 | }; | ||
| 562 | |||
| 484 | asmlinkage void __exception | 563 | asmlinkage void __exception |
| 485 | do_PrefetchAbort(unsigned long addr, struct pt_regs *regs) | 564 | do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs) |
| 486 | { | 565 | { |
| 487 | do_translation_fault(addr, 0, regs); | 566 | const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr); |
| 567 | struct siginfo info; | ||
| 568 | |||
| 569 | if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) | ||
| 570 | return; | ||
| 571 | |||
| 572 | printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", | ||
| 573 | inf->name, ifsr, addr); | ||
| 574 | |||
| 575 | info.si_signo = inf->sig; | ||
| 576 | info.si_errno = 0; | ||
| 577 | info.si_code = inf->code; | ||
| 578 | info.si_addr = (void __user *)addr; | ||
| 579 | arm_notify_die("", regs, &info, ifsr, 0); | ||
| 488 | } | 580 | } |
| 489 | 581 | ||
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c index c07222eb5ce0..b27942909b23 100644 --- a/arch/arm/mm/flush.c +++ b/arch/arm/mm/flush.c | |||
| @@ -50,7 +50,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) | |||
| 50 | void flush_cache_mm(struct mm_struct *mm) | 50 | void flush_cache_mm(struct mm_struct *mm) |
| 51 | { | 51 | { |
| 52 | if (cache_is_vivt()) { | 52 | if (cache_is_vivt()) { |
| 53 | if (cpu_isset(smp_processor_id(), mm->cpu_vm_mask)) | 53 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) |
| 54 | __cpuc_flush_user_all(); | 54 | __cpuc_flush_user_all(); |
| 55 | return; | 55 | return; |
| 56 | } | 56 | } |
| @@ -73,7 +73,7 @@ void flush_cache_mm(struct mm_struct *mm) | |||
| 73 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | 73 | void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 74 | { | 74 | { |
| 75 | if (cache_is_vivt()) { | 75 | if (cache_is_vivt()) { |
| 76 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) | 76 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) |
| 77 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), | 77 | __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end), |
| 78 | vma->vm_flags); | 78 | vma->vm_flags); |
| 79 | return; | 79 | return; |
| @@ -97,7 +97,7 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned | |||
| 97 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) | 97 | void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) |
| 98 | { | 98 | { |
| 99 | if (cache_is_vivt()) { | 99 | if (cache_is_vivt()) { |
| 100 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | 100 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
| 101 | unsigned long addr = user_addr & PAGE_MASK; | 101 | unsigned long addr = user_addr & PAGE_MASK; |
| 102 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); | 102 | __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags); |
| 103 | } | 103 | } |
| @@ -113,7 +113,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
| 113 | unsigned long len, int write) | 113 | unsigned long len, int write) |
| 114 | { | 114 | { |
| 115 | if (cache_is_vivt()) { | 115 | if (cache_is_vivt()) { |
| 116 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask)) { | 116 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { |
| 117 | unsigned long addr = (unsigned long)kaddr; | 117 | unsigned long addr = (unsigned long)kaddr; |
| 118 | __cpuc_coherent_kern_range(addr, addr + len); | 118 | __cpuc_coherent_kern_range(addr, addr + len); |
| 119 | } | 119 | } |
| @@ -126,7 +126,7 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, | |||
| 126 | } | 126 | } |
| 127 | 127 | ||
| 128 | /* VIPT non-aliasing cache */ | 128 | /* VIPT non-aliasing cache */ |
| 129 | if (cpu_isset(smp_processor_id(), vma->vm_mm->cpu_vm_mask) && | 129 | if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)) && |
| 130 | vma->vm_flags & VM_EXEC) { | 130 | vma->vm_flags & VM_EXEC) { |
| 131 | unsigned long addr = (unsigned long)kaddr; | 131 | unsigned long addr = (unsigned long)kaddr; |
| 132 | /* only flushing the kernel mapping on non-aliasing VIPT */ | 132 | /* only flushing the kernel mapping on non-aliasing VIPT */ |
| @@ -144,7 +144,14 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page) | |||
| 144 | * page. This ensures that data in the physical page is mutually | 144 | * page. This ensures that data in the physical page is mutually |
| 145 | * coherent with the kernels mapping. | 145 | * coherent with the kernels mapping. |
| 146 | */ | 146 | */ |
| 147 | __cpuc_flush_dcache_page(page_address(page)); | 147 | #ifdef CONFIG_HIGHMEM |
| 148 | /* | ||
| 149 | * kmap_atomic() doesn't set the page virtual address, and | ||
| 150 | * kunmap_atomic() takes care of cache flushing already. | ||
| 151 | */ | ||
| 152 | if (page_address(page)) | ||
| 153 | #endif | ||
| 154 | __cpuc_flush_dcache_page(page_address(page)); | ||
| 148 | 155 | ||
| 149 | /* | 156 | /* |
| 150 | * If this is a page cache page, and we have an aliasing VIPT cache, | 157 | * If this is a page cache page, and we have an aliasing VIPT cache, |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index a34954d9df7d..30f82fb5918c 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
| @@ -40,11 +40,18 @@ void *kmap_atomic(struct page *page, enum km_type type) | |||
| 40 | { | 40 | { |
| 41 | unsigned int idx; | 41 | unsigned int idx; |
| 42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
| 43 | void *kmap; | ||
| 43 | 44 | ||
| 44 | pagefault_disable(); | 45 | pagefault_disable(); |
| 45 | if (!PageHighMem(page)) | 46 | if (!PageHighMem(page)) |
| 46 | return page_address(page); | 47 | return page_address(page); |
| 47 | 48 | ||
| 49 | debug_kmap_atomic(type); | ||
| 50 | |||
| 51 | kmap = kmap_high_get(page); | ||
| 52 | if (kmap) | ||
| 53 | return kmap; | ||
| 54 | |||
| 48 | idx = type + KM_TYPE_NR * smp_processor_id(); | 55 | idx = type + KM_TYPE_NR * smp_processor_id(); |
| 49 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 56 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 50 | #ifdef CONFIG_DEBUG_HIGHMEM | 57 | #ifdef CONFIG_DEBUG_HIGHMEM |
| @@ -80,6 +87,9 @@ void kunmap_atomic(void *kvaddr, enum km_type type) | |||
| 80 | #else | 87 | #else |
| 81 | (void) idx; /* to kill a warning */ | 88 | (void) idx; /* to kill a warning */ |
| 82 | #endif | 89 | #endif |
| 90 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { | ||
| 91 | /* this address was obtained through kmap_high_get() */ | ||
| 92 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); | ||
| 83 | } | 93 | } |
| 84 | pagefault_enable(); | 94 | pagefault_enable(); |
| 85 | } | 95 | } |
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c index 3a7279c1ce5e..40940d7ce4ff 100644 --- a/arch/arm/mm/init.c +++ b/arch/arm/mm/init.c | |||
| @@ -15,6 +15,7 @@ | |||
| 15 | #include <linux/mman.h> | 15 | #include <linux/mman.h> |
| 16 | #include <linux/nodemask.h> | 16 | #include <linux/nodemask.h> |
| 17 | #include <linux/initrd.h> | 17 | #include <linux/initrd.h> |
| 18 | #include <linux/sort.h> | ||
| 18 | #include <linux/highmem.h> | 19 | #include <linux/highmem.h> |
| 19 | 20 | ||
| 20 | #include <asm/mach-types.h> | 21 | #include <asm/mach-types.h> |
| @@ -349,12 +350,43 @@ static void __init bootmem_free_node(int node, struct meminfo *mi) | |||
| 349 | free_area_init_node(node, zone_size, min, zhole_size); | 350 | free_area_init_node(node, zone_size, min, zhole_size); |
| 350 | } | 351 | } |
| 351 | 352 | ||
| 353 | #ifndef CONFIG_SPARSEMEM | ||
| 354 | int pfn_valid(unsigned long pfn) | ||
| 355 | { | ||
| 356 | struct meminfo *mi = &meminfo; | ||
| 357 | unsigned int left = 0, right = mi->nr_banks; | ||
| 358 | |||
| 359 | do { | ||
| 360 | unsigned int mid = (right + left) / 2; | ||
| 361 | struct membank *bank = &mi->bank[mid]; | ||
| 362 | |||
| 363 | if (pfn < bank_pfn_start(bank)) | ||
| 364 | right = mid; | ||
| 365 | else if (pfn >= bank_pfn_end(bank)) | ||
| 366 | left = mid + 1; | ||
| 367 | else | ||
| 368 | return 1; | ||
| 369 | } while (left < right); | ||
| 370 | return 0; | ||
| 371 | } | ||
| 372 | EXPORT_SYMBOL(pfn_valid); | ||
| 373 | #endif | ||
| 374 | |||
| 375 | static int __init meminfo_cmp(const void *_a, const void *_b) | ||
| 376 | { | ||
| 377 | const struct membank *a = _a, *b = _b; | ||
| 378 | long cmp = bank_pfn_start(a) - bank_pfn_start(b); | ||
| 379 | return cmp < 0 ? -1 : cmp > 0 ? 1 : 0; | ||
| 380 | } | ||
| 381 | |||
| 352 | void __init bootmem_init(void) | 382 | void __init bootmem_init(void) |
| 353 | { | 383 | { |
| 354 | struct meminfo *mi = &meminfo; | 384 | struct meminfo *mi = &meminfo; |
| 355 | unsigned long min, max_low, max_high; | 385 | unsigned long min, max_low, max_high; |
| 356 | int node, initrd_node; | 386 | int node, initrd_node; |
| 357 | 387 | ||
| 388 | sort(&mi->bank, mi->nr_banks, sizeof(mi->bank[0]), meminfo_cmp, NULL); | ||
| 389 | |||
| 358 | /* | 390 | /* |
| 359 | * Locate which node contains the ramdisk image, if any. | 391 | * Locate which node contains the ramdisk image, if any. |
| 360 | */ | 392 | */ |
| @@ -451,7 +483,7 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) | |||
| 451 | /* | 483 | /* |
| 452 | * Convert start_pfn/end_pfn to a struct page pointer. | 484 | * Convert start_pfn/end_pfn to a struct page pointer. |
| 453 | */ | 485 | */ |
| 454 | start_pg = pfn_to_page(start_pfn); | 486 | start_pg = pfn_to_page(start_pfn - 1) + 1; |
| 455 | end_pg = pfn_to_page(end_pfn); | 487 | end_pg = pfn_to_page(end_pfn); |
| 456 | 488 | ||
| 457 | /* | 489 | /* |
| @@ -564,8 +596,8 @@ void __init mem_init(void) | |||
| 564 | 596 | ||
| 565 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " | 597 | printk(KERN_NOTICE "Memory: %luKB available (%dK code, " |
| 566 | "%dK data, %dK init, %luK highmem)\n", | 598 | "%dK data, %dK init, %luK highmem)\n", |
| 567 | (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), | 599 | nr_free_pages() << (PAGE_SHIFT-10), codesize >> 10, |
| 568 | codesize >> 10, datasize >> 10, initsize >> 10, | 600 | datasize >> 10, initsize >> 10, |
| 569 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); | 601 | (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))); |
| 570 | 602 | ||
| 571 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { | 603 | if (PAGE_SIZE >= 16384 && num_physpages <= 128) { |
| @@ -581,6 +613,14 @@ void __init mem_init(void) | |||
| 581 | 613 | ||
| 582 | void free_initmem(void) | 614 | void free_initmem(void) |
| 583 | { | 615 | { |
| 616 | #ifdef CONFIG_HAVE_TCM | ||
| 617 | extern char *__tcm_start, *__tcm_end; | ||
| 618 | |||
| 619 | totalram_pages += free_area(__phys_to_pfn(__pa(__tcm_start)), | ||
| 620 | __phys_to_pfn(__pa(__tcm_end)), | ||
| 621 | "TCM link"); | ||
| 622 | #endif | ||
| 623 | |||
| 584 | if (!machine_is_integrator() && !machine_is_cintegrator()) | 624 | if (!machine_is_integrator() && !machine_is_cintegrator()) |
| 585 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), | 625 | totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)), |
| 586 | __phys_to_pfn(__pa(__init_end)), | 626 | __phys_to_pfn(__pa(__init_end)), |
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c index f7457fea6de8..2b7996401b0f 100644 --- a/arch/arm/mm/mmap.c +++ b/arch/arm/mm/mmap.c | |||
| @@ -124,7 +124,7 @@ int valid_phys_addr_range(unsigned long addr, size_t size) | |||
| 124 | { | 124 | { |
| 125 | if (addr < PHYS_OFFSET) | 125 | if (addr < PHYS_OFFSET) |
| 126 | return 0; | 126 | return 0; |
| 127 | if (addr + size >= __pa(high_memory - 1)) | 127 | if (addr + size > __pa(high_memory - 1) + 1) |
| 128 | return 0; | 128 | return 0; |
| 129 | 129 | ||
| 130 | return 1; | 130 | return 1; |
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c index 4426ee67ceca..02243eeccf50 100644 --- a/arch/arm/mm/mmu.c +++ b/arch/arm/mm/mmu.c | |||
| @@ -21,6 +21,7 @@ | |||
| 21 | #include <asm/cachetype.h> | 21 | #include <asm/cachetype.h> |
| 22 | #include <asm/setup.h> | 22 | #include <asm/setup.h> |
| 23 | #include <asm/sizes.h> | 23 | #include <asm/sizes.h> |
| 24 | #include <asm/smp_plat.h> | ||
| 24 | #include <asm/tlb.h> | 25 | #include <asm/tlb.h> |
| 25 | #include <asm/highmem.h> | 26 | #include <asm/highmem.h> |
| 26 | 27 | ||
| @@ -709,10 +710,6 @@ static void __init sanity_check_meminfo(void) | |||
| 709 | if (meminfo.nr_banks >= NR_BANKS) { | 710 | if (meminfo.nr_banks >= NR_BANKS) { |
| 710 | printk(KERN_CRIT "NR_BANKS too low, " | 711 | printk(KERN_CRIT "NR_BANKS too low, " |
| 711 | "ignoring high memory\n"); | 712 | "ignoring high memory\n"); |
| 712 | } else if (cache_is_vipt_aliasing()) { | ||
| 713 | printk(KERN_CRIT "HIGHMEM is not yet supported " | ||
| 714 | "with VIPT aliasing cache, " | ||
| 715 | "ignoring high memory\n"); | ||
| 716 | } else { | 713 | } else { |
| 717 | memmove(bank + 1, bank, | 714 | memmove(bank + 1, bank, |
| 718 | (meminfo.nr_banks - i) * sizeof(*bank)); | 715 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| @@ -726,6 +723,8 @@ static void __init sanity_check_meminfo(void) | |||
| 726 | bank->size = VMALLOC_MIN - __va(bank->start); | 723 | bank->size = VMALLOC_MIN - __va(bank->start); |
| 727 | } | 724 | } |
| 728 | #else | 725 | #else |
| 726 | bank->highmem = highmem; | ||
| 727 | |||
| 729 | /* | 728 | /* |
| 730 | * Check whether this memory bank would entirely overlap | 729 | * Check whether this memory bank would entirely overlap |
| 731 | * the vmalloc area. | 730 | * the vmalloc area. |
| @@ -754,6 +753,38 @@ static void __init sanity_check_meminfo(void) | |||
| 754 | #endif | 753 | #endif |
| 755 | j++; | 754 | j++; |
| 756 | } | 755 | } |
| 756 | #ifdef CONFIG_HIGHMEM | ||
| 757 | if (highmem) { | ||
| 758 | const char *reason = NULL; | ||
| 759 | |||
| 760 | if (cache_is_vipt_aliasing()) { | ||
| 761 | /* | ||
| 762 | * Interactions between kmap and other mappings | ||
| 763 | * make highmem support with aliasing VIPT caches | ||
| 764 | * rather difficult. | ||
| 765 | */ | ||
| 766 | reason = "with VIPT aliasing cache"; | ||
| 767 | #ifdef CONFIG_SMP | ||
| 768 | } else if (tlb_ops_need_broadcast()) { | ||
| 769 | /* | ||
| 770 | * kmap_high needs to occasionally flush TLB entries, | ||
| 771 | * however, if the TLB entries need to be broadcast | ||
| 772 | * we may deadlock: | ||
| 773 | * kmap_high(irqs off)->flush_all_zero_pkmaps-> | ||
| 774 | * flush_tlb_kernel_range->smp_call_function_many | ||
| 775 | * (must not be called with irqs off) | ||
| 776 | */ | ||
| 777 | reason = "without hardware TLB ops broadcasting"; | ||
| 778 | #endif | ||
| 779 | } | ||
| 780 | if (reason) { | ||
| 781 | printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n", | ||
| 782 | reason); | ||
| 783 | while (j > 0 && meminfo.bank[j - 1].highmem) | ||
| 784 | j--; | ||
| 785 | } | ||
| 786 | } | ||
| 787 | #endif | ||
| 757 | meminfo.nr_banks = j; | 788 | meminfo.nr_banks = j; |
| 758 | } | 789 | } |
| 759 | 790 | ||
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c index ad7bacc693b2..900811cc9130 100644 --- a/arch/arm/mm/nommu.c +++ b/arch/arm/mm/nommu.c | |||
| @@ -12,6 +12,7 @@ | |||
| 12 | #include <asm/cacheflush.h> | 12 | #include <asm/cacheflush.h> |
| 13 | #include <asm/sections.h> | 13 | #include <asm/sections.h> |
| 14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
| 15 | #include <asm/setup.h> | ||
| 15 | #include <asm/mach/arch.h> | 16 | #include <asm/mach/arch.h> |
| 16 | 17 | ||
| 17 | #include "mm.h" | 18 | #include "mm.h" |
diff --git a/arch/arm/mm/pabort-legacy.S b/arch/arm/mm/pabort-legacy.S new file mode 100644 index 000000000000..87970eba88ea --- /dev/null +++ b/arch/arm/mm/pabort-legacy.S | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | #include <linux/linkage.h> | ||
| 2 | #include <asm/assembler.h> | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Function: legacy_pabort | ||
| 6 | * | ||
| 7 | * Params : r0 = address of aborted instruction | ||
| 8 | * | ||
| 9 | * Returns : r0 = address of abort | ||
| 10 | * : r1 = Simulated IFSR with section translation fault status | ||
| 11 | * | ||
| 12 | * Purpose : obtain information about current prefetch abort. | ||
| 13 | */ | ||
| 14 | |||
| 15 | .align 5 | ||
| 16 | ENTRY(legacy_pabort) | ||
| 17 | mov r1, #5 | ||
| 18 | mov pc, lr | ||
| 19 | ENDPROC(legacy_pabort) | ||
diff --git a/arch/arm/mm/pabort-v6.S b/arch/arm/mm/pabort-v6.S new file mode 100644 index 000000000000..06e3d1ef2115 --- /dev/null +++ b/arch/arm/mm/pabort-v6.S | |||
| @@ -0,0 +1,19 @@ | |||
| 1 | #include <linux/linkage.h> | ||
| 2 | #include <asm/assembler.h> | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Function: v6_pabort | ||
| 6 | * | ||
| 7 | * Params : r0 = address of aborted instruction | ||
| 8 | * | ||
| 9 | * Returns : r0 = address of abort | ||
| 10 | * : r1 = IFSR | ||
| 11 | * | ||
| 12 | * Purpose : obtain information about current prefetch abort. | ||
| 13 | */ | ||
| 14 | |||
| 15 | .align 5 | ||
| 16 | ENTRY(v6_pabort) | ||
| 17 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | ||
| 18 | mov pc, lr | ||
| 19 | ENDPROC(v6_pabort) | ||
diff --git a/arch/arm/mm/pabort-v7.S b/arch/arm/mm/pabort-v7.S new file mode 100644 index 000000000000..a8b3b300a18d --- /dev/null +++ b/arch/arm/mm/pabort-v7.S | |||
| @@ -0,0 +1,20 @@ | |||
| 1 | #include <linux/linkage.h> | ||
| 2 | #include <asm/assembler.h> | ||
| 3 | |||
| 4 | /* | ||
| 5 | * Function: v6_pabort | ||
| 6 | * | ||
| 7 | * Params : r0 = address of aborted instruction | ||
| 8 | * | ||
| 9 | * Returns : r0 = address of abort | ||
| 10 | * : r1 = IFSR | ||
| 11 | * | ||
| 12 | * Purpose : obtain information about current prefetch abort. | ||
| 13 | */ | ||
| 14 | |||
| 15 | .align 5 | ||
| 16 | ENTRY(v7_pabort) | ||
| 17 | mrc p15, 0, r0, c6, c0, 2 @ get IFAR | ||
| 18 | mrc p15, 0, r1, c5, c0, 1 @ get IFSR | ||
| 19 | mov pc, lr | ||
| 20 | ENDPROC(v7_pabort) | ||
diff --git a/arch/arm/mm/proc-arm1020.S b/arch/arm/mm/proc-arm1020.S index b5551bf010aa..d9fb4b98c49f 100644 --- a/arch/arm/mm/proc-arm1020.S +++ b/arch/arm/mm/proc-arm1020.S | |||
| @@ -449,7 +449,7 @@ arm1020_crval: | |||
| 449 | .type arm1020_processor_functions, #object | 449 | .type arm1020_processor_functions, #object |
| 450 | arm1020_processor_functions: | 450 | arm1020_processor_functions: |
| 451 | .word v4t_early_abort | 451 | .word v4t_early_abort |
| 452 | .word pabort_noifar | 452 | .word legacy_pabort |
| 453 | .word cpu_arm1020_proc_init | 453 | .word cpu_arm1020_proc_init |
| 454 | .word cpu_arm1020_proc_fin | 454 | .word cpu_arm1020_proc_fin |
| 455 | .word cpu_arm1020_reset | 455 | .word cpu_arm1020_reset |
diff --git a/arch/arm/mm/proc-arm1020e.S b/arch/arm/mm/proc-arm1020e.S index 8bc6740c29eb..7453b75dcea5 100644 --- a/arch/arm/mm/proc-arm1020e.S +++ b/arch/arm/mm/proc-arm1020e.S | |||
| @@ -430,7 +430,7 @@ arm1020e_crval: | |||
| 430 | .type arm1020e_processor_functions, #object | 430 | .type arm1020e_processor_functions, #object |
| 431 | arm1020e_processor_functions: | 431 | arm1020e_processor_functions: |
| 432 | .word v4t_early_abort | 432 | .word v4t_early_abort |
| 433 | .word pabort_noifar | 433 | .word legacy_pabort |
| 434 | .word cpu_arm1020e_proc_init | 434 | .word cpu_arm1020e_proc_init |
| 435 | .word cpu_arm1020e_proc_fin | 435 | .word cpu_arm1020e_proc_fin |
| 436 | .word cpu_arm1020e_reset | 436 | .word cpu_arm1020e_reset |
diff --git a/arch/arm/mm/proc-arm1022.S b/arch/arm/mm/proc-arm1022.S index 2cd03e66c0a3..8eb72d75a8b6 100644 --- a/arch/arm/mm/proc-arm1022.S +++ b/arch/arm/mm/proc-arm1022.S | |||
| @@ -413,7 +413,7 @@ arm1022_crval: | |||
| 413 | .type arm1022_processor_functions, #object | 413 | .type arm1022_processor_functions, #object |
| 414 | arm1022_processor_functions: | 414 | arm1022_processor_functions: |
| 415 | .word v4t_early_abort | 415 | .word v4t_early_abort |
| 416 | .word pabort_noifar | 416 | .word legacy_pabort |
| 417 | .word cpu_arm1022_proc_init | 417 | .word cpu_arm1022_proc_init |
| 418 | .word cpu_arm1022_proc_fin | 418 | .word cpu_arm1022_proc_fin |
| 419 | .word cpu_arm1022_reset | 419 | .word cpu_arm1022_reset |
diff --git a/arch/arm/mm/proc-arm1026.S b/arch/arm/mm/proc-arm1026.S index ad961a897f6e..3b59f0d67139 100644 --- a/arch/arm/mm/proc-arm1026.S +++ b/arch/arm/mm/proc-arm1026.S | |||
| @@ -408,7 +408,7 @@ arm1026_crval: | |||
| 408 | .type arm1026_processor_functions, #object | 408 | .type arm1026_processor_functions, #object |
| 409 | arm1026_processor_functions: | 409 | arm1026_processor_functions: |
| 410 | .word v5t_early_abort | 410 | .word v5t_early_abort |
| 411 | .word pabort_noifar | 411 | .word legacy_pabort |
| 412 | .word cpu_arm1026_proc_init | 412 | .word cpu_arm1026_proc_init |
| 413 | .word cpu_arm1026_proc_fin | 413 | .word cpu_arm1026_proc_fin |
| 414 | .word cpu_arm1026_reset | 414 | .word cpu_arm1026_reset |
diff --git a/arch/arm/mm/proc-arm6_7.S b/arch/arm/mm/proc-arm6_7.S index 80d6e1de069a..3f9cd3d8f6d5 100644 --- a/arch/arm/mm/proc-arm6_7.S +++ b/arch/arm/mm/proc-arm6_7.S | |||
| @@ -278,7 +278,7 @@ __arm7_setup: mov r0, #0 | |||
| 278 | .type arm6_processor_functions, #object | 278 | .type arm6_processor_functions, #object |
| 279 | ENTRY(arm6_processor_functions) | 279 | ENTRY(arm6_processor_functions) |
| 280 | .word cpu_arm6_data_abort | 280 | .word cpu_arm6_data_abort |
| 281 | .word pabort_noifar | 281 | .word legacy_pabort |
| 282 | .word cpu_arm6_proc_init | 282 | .word cpu_arm6_proc_init |
| 283 | .word cpu_arm6_proc_fin | 283 | .word cpu_arm6_proc_fin |
| 284 | .word cpu_arm6_reset | 284 | .word cpu_arm6_reset |
| @@ -295,7 +295,7 @@ ENTRY(arm6_processor_functions) | |||
| 295 | .type arm7_processor_functions, #object | 295 | .type arm7_processor_functions, #object |
| 296 | ENTRY(arm7_processor_functions) | 296 | ENTRY(arm7_processor_functions) |
| 297 | .word cpu_arm7_data_abort | 297 | .word cpu_arm7_data_abort |
| 298 | .word pabort_noifar | 298 | .word legacy_pabort |
| 299 | .word cpu_arm7_proc_init | 299 | .word cpu_arm7_proc_init |
| 300 | .word cpu_arm7_proc_fin | 300 | .word cpu_arm7_proc_fin |
| 301 | .word cpu_arm7_reset | 301 | .word cpu_arm7_reset |
diff --git a/arch/arm/mm/proc-arm720.S b/arch/arm/mm/proc-arm720.S index 85ae18695f10..0b62de244666 100644 --- a/arch/arm/mm/proc-arm720.S +++ b/arch/arm/mm/proc-arm720.S | |||
| @@ -181,7 +181,7 @@ arm720_crval: | |||
| 181 | .type arm720_processor_functions, #object | 181 | .type arm720_processor_functions, #object |
| 182 | ENTRY(arm720_processor_functions) | 182 | ENTRY(arm720_processor_functions) |
| 183 | .word v4t_late_abort | 183 | .word v4t_late_abort |
| 184 | .word pabort_noifar | 184 | .word legacy_pabort |
| 185 | .word cpu_arm720_proc_init | 185 | .word cpu_arm720_proc_init |
| 186 | .word cpu_arm720_proc_fin | 186 | .word cpu_arm720_proc_fin |
| 187 | .word cpu_arm720_reset | 187 | .word cpu_arm720_reset |
diff --git a/arch/arm/mm/proc-arm740.S b/arch/arm/mm/proc-arm740.S index 4f95bee63e95..01860cdeb2ec 100644 --- a/arch/arm/mm/proc-arm740.S +++ b/arch/arm/mm/proc-arm740.S | |||
| @@ -126,7 +126,7 @@ __arm740_setup: | |||
| 126 | .type arm740_processor_functions, #object | 126 | .type arm740_processor_functions, #object |
| 127 | ENTRY(arm740_processor_functions) | 127 | ENTRY(arm740_processor_functions) |
| 128 | .word v4t_late_abort | 128 | .word v4t_late_abort |
| 129 | .word pabort_noifar | 129 | .word legacy_pabort |
| 130 | .word cpu_arm740_proc_init | 130 | .word cpu_arm740_proc_init |
| 131 | .word cpu_arm740_proc_fin | 131 | .word cpu_arm740_proc_fin |
| 132 | .word cpu_arm740_reset | 132 | .word cpu_arm740_reset |
diff --git a/arch/arm/mm/proc-arm7tdmi.S b/arch/arm/mm/proc-arm7tdmi.S index 93e05fa7bed4..1201b9863829 100644 --- a/arch/arm/mm/proc-arm7tdmi.S +++ b/arch/arm/mm/proc-arm7tdmi.S | |||
| @@ -64,7 +64,7 @@ __arm7tdmi_setup: | |||
| 64 | .type arm7tdmi_processor_functions, #object | 64 | .type arm7tdmi_processor_functions, #object |
| 65 | ENTRY(arm7tdmi_processor_functions) | 65 | ENTRY(arm7tdmi_processor_functions) |
| 66 | .word v4t_late_abort | 66 | .word v4t_late_abort |
| 67 | .word pabort_noifar | 67 | .word legacy_pabort |
| 68 | .word cpu_arm7tdmi_proc_init | 68 | .word cpu_arm7tdmi_proc_init |
| 69 | .word cpu_arm7tdmi_proc_fin | 69 | .word cpu_arm7tdmi_proc_fin |
| 70 | .word cpu_arm7tdmi_reset | 70 | .word cpu_arm7tdmi_reset |
diff --git a/arch/arm/mm/proc-arm920.S b/arch/arm/mm/proc-arm920.S index 914d688394fc..2b7c197cc58d 100644 --- a/arch/arm/mm/proc-arm920.S +++ b/arch/arm/mm/proc-arm920.S | |||
| @@ -395,7 +395,7 @@ arm920_crval: | |||
| 395 | .type arm920_processor_functions, #object | 395 | .type arm920_processor_functions, #object |
| 396 | arm920_processor_functions: | 396 | arm920_processor_functions: |
| 397 | .word v4t_early_abort | 397 | .word v4t_early_abort |
| 398 | .word pabort_noifar | 398 | .word legacy_pabort |
| 399 | .word cpu_arm920_proc_init | 399 | .word cpu_arm920_proc_init |
| 400 | .word cpu_arm920_proc_fin | 400 | .word cpu_arm920_proc_fin |
| 401 | .word cpu_arm920_reset | 401 | .word cpu_arm920_reset |
diff --git a/arch/arm/mm/proc-arm922.S b/arch/arm/mm/proc-arm922.S index 51c9c9859e58..06a1aa4e3398 100644 --- a/arch/arm/mm/proc-arm922.S +++ b/arch/arm/mm/proc-arm922.S | |||
| @@ -399,7 +399,7 @@ arm922_crval: | |||
| 399 | .type arm922_processor_functions, #object | 399 | .type arm922_processor_functions, #object |
| 400 | arm922_processor_functions: | 400 | arm922_processor_functions: |
| 401 | .word v4t_early_abort | 401 | .word v4t_early_abort |
| 402 | .word pabort_noifar | 402 | .word legacy_pabort |
| 403 | .word cpu_arm922_proc_init | 403 | .word cpu_arm922_proc_init |
| 404 | .word cpu_arm922_proc_fin | 404 | .word cpu_arm922_proc_fin |
| 405 | .word cpu_arm922_reset | 405 | .word cpu_arm922_reset |
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S index 2724526d89c1..cb53435a85ae 100644 --- a/arch/arm/mm/proc-arm925.S +++ b/arch/arm/mm/proc-arm925.S | |||
| @@ -462,7 +462,7 @@ arm925_crval: | |||
| 462 | .type arm925_processor_functions, #object | 462 | .type arm925_processor_functions, #object |
| 463 | arm925_processor_functions: | 463 | arm925_processor_functions: |
| 464 | .word v4t_early_abort | 464 | .word v4t_early_abort |
| 465 | .word pabort_noifar | 465 | .word legacy_pabort |
| 466 | .word cpu_arm925_proc_init | 466 | .word cpu_arm925_proc_init |
| 467 | .word cpu_arm925_proc_fin | 467 | .word cpu_arm925_proc_fin |
| 468 | .word cpu_arm925_reset | 468 | .word cpu_arm925_reset |
diff --git a/arch/arm/mm/proc-arm926.S b/arch/arm/mm/proc-arm926.S index 54466937bff9..1c4848704bb3 100644 --- a/arch/arm/mm/proc-arm926.S +++ b/arch/arm/mm/proc-arm926.S | |||
| @@ -415,7 +415,7 @@ arm926_crval: | |||
| 415 | .type arm926_processor_functions, #object | 415 | .type arm926_processor_functions, #object |
| 416 | arm926_processor_functions: | 416 | arm926_processor_functions: |
| 417 | .word v5tj_early_abort | 417 | .word v5tj_early_abort |
| 418 | .word pabort_noifar | 418 | .word legacy_pabort |
| 419 | .word cpu_arm926_proc_init | 419 | .word cpu_arm926_proc_init |
| 420 | .word cpu_arm926_proc_fin | 420 | .word cpu_arm926_proc_fin |
| 421 | .word cpu_arm926_reset | 421 | .word cpu_arm926_reset |
diff --git a/arch/arm/mm/proc-arm940.S b/arch/arm/mm/proc-arm940.S index f595117caf55..5b0f8464c8f2 100644 --- a/arch/arm/mm/proc-arm940.S +++ b/arch/arm/mm/proc-arm940.S | |||
| @@ -322,7 +322,7 @@ __arm940_setup: | |||
| 322 | .type arm940_processor_functions, #object | 322 | .type arm940_processor_functions, #object |
| 323 | ENTRY(arm940_processor_functions) | 323 | ENTRY(arm940_processor_functions) |
| 324 | .word nommu_early_abort | 324 | .word nommu_early_abort |
| 325 | .word pabort_noifar | 325 | .word legacy_pabort |
| 326 | .word cpu_arm940_proc_init | 326 | .word cpu_arm940_proc_init |
| 327 | .word cpu_arm940_proc_fin | 327 | .word cpu_arm940_proc_fin |
| 328 | .word cpu_arm940_reset | 328 | .word cpu_arm940_reset |
diff --git a/arch/arm/mm/proc-arm946.S b/arch/arm/mm/proc-arm946.S index e03f6ff1fb26..40c0449a139b 100644 --- a/arch/arm/mm/proc-arm946.S +++ b/arch/arm/mm/proc-arm946.S | |||
| @@ -377,7 +377,7 @@ __arm946_setup: | |||
| 377 | .type arm946_processor_functions, #object | 377 | .type arm946_processor_functions, #object |
| 378 | ENTRY(arm946_processor_functions) | 378 | ENTRY(arm946_processor_functions) |
| 379 | .word nommu_early_abort | 379 | .word nommu_early_abort |
| 380 | .word pabort_noifar | 380 | .word legacy_pabort |
| 381 | .word cpu_arm946_proc_init | 381 | .word cpu_arm946_proc_init |
| 382 | .word cpu_arm946_proc_fin | 382 | .word cpu_arm946_proc_fin |
| 383 | .word cpu_arm946_reset | 383 | .word cpu_arm946_reset |
diff --git a/arch/arm/mm/proc-arm9tdmi.S b/arch/arm/mm/proc-arm9tdmi.S index be6c11d2b3fb..28545c29dbcd 100644 --- a/arch/arm/mm/proc-arm9tdmi.S +++ b/arch/arm/mm/proc-arm9tdmi.S | |||
| @@ -64,7 +64,7 @@ __arm9tdmi_setup: | |||
| 64 | .type arm9tdmi_processor_functions, #object | 64 | .type arm9tdmi_processor_functions, #object |
| 65 | ENTRY(arm9tdmi_processor_functions) | 65 | ENTRY(arm9tdmi_processor_functions) |
| 66 | .word nommu_early_abort | 66 | .word nommu_early_abort |
| 67 | .word pabort_noifar | 67 | .word legacy_pabort |
| 68 | .word cpu_arm9tdmi_proc_init | 68 | .word cpu_arm9tdmi_proc_init |
| 69 | .word cpu_arm9tdmi_proc_fin | 69 | .word cpu_arm9tdmi_proc_fin |
| 70 | .word cpu_arm9tdmi_reset | 70 | .word cpu_arm9tdmi_reset |
diff --git a/arch/arm/mm/proc-fa526.S b/arch/arm/mm/proc-fa526.S index 08b8a955d5d7..08f5ac237ad4 100644 --- a/arch/arm/mm/proc-fa526.S +++ b/arch/arm/mm/proc-fa526.S | |||
| @@ -191,7 +191,7 @@ fa526_cr1_set: | |||
| 191 | .type fa526_processor_functions, #object | 191 | .type fa526_processor_functions, #object |
| 192 | fa526_processor_functions: | 192 | fa526_processor_functions: |
| 193 | .word v4_early_abort | 193 | .word v4_early_abort |
| 194 | .word pabort_noifar | 194 | .word legacy_pabort |
| 195 | .word cpu_fa526_proc_init | 195 | .word cpu_fa526_proc_init |
| 196 | .word cpu_fa526_proc_fin | 196 | .word cpu_fa526_proc_fin |
| 197 | .word cpu_fa526_reset | 197 | .word cpu_fa526_reset |
diff --git a/arch/arm/mm/proc-feroceon.S b/arch/arm/mm/proc-feroceon.S index 0fe1f8fc3488..d0d7795200fc 100644 --- a/arch/arm/mm/proc-feroceon.S +++ b/arch/arm/mm/proc-feroceon.S | |||
| @@ -499,7 +499,7 @@ feroceon_crval: | |||
| 499 | .type feroceon_processor_functions, #object | 499 | .type feroceon_processor_functions, #object |
| 500 | feroceon_processor_functions: | 500 | feroceon_processor_functions: |
| 501 | .word v5t_early_abort | 501 | .word v5t_early_abort |
| 502 | .word pabort_noifar | 502 | .word legacy_pabort |
| 503 | .word cpu_feroceon_proc_init | 503 | .word cpu_feroceon_proc_init |
| 504 | .word cpu_feroceon_proc_fin | 504 | .word cpu_feroceon_proc_fin |
| 505 | .word cpu_feroceon_reset | 505 | .word cpu_feroceon_reset |
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S index 54b1f721dec8..7d63beaf9745 100644 --- a/arch/arm/mm/proc-macros.S +++ b/arch/arm/mm/proc-macros.S | |||
| @@ -77,19 +77,15 @@ | |||
| 77 | * Sanity check the PTE configuration for the code below - which makes | 77 | * Sanity check the PTE configuration for the code below - which makes |
| 78 | * certain assumptions about how these bits are layed out. | 78 | * certain assumptions about how these bits are layed out. |
| 79 | */ | 79 | */ |
| 80 | #ifdef CONFIG_MMU | ||
| 80 | #if L_PTE_SHARED != PTE_EXT_SHARED | 81 | #if L_PTE_SHARED != PTE_EXT_SHARED |
| 81 | #error PTE shared bit mismatch | 82 | #error PTE shared bit mismatch |
| 82 | #endif | 83 | #endif |
| 83 | #if L_PTE_BUFFERABLE != PTE_BUFFERABLE | ||
| 84 | #error PTE bufferable bit mismatch | ||
| 85 | #endif | ||
| 86 | #if L_PTE_CACHEABLE != PTE_CACHEABLE | ||
| 87 | #error PTE cacheable bit mismatch | ||
| 88 | #endif | ||
| 89 | #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ | 84 | #if (L_PTE_EXEC+L_PTE_USER+L_PTE_WRITE+L_PTE_DIRTY+L_PTE_YOUNG+\ |
| 90 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED | 85 | L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED |
| 91 | #error Invalid Linux PTE bit settings | 86 | #error Invalid Linux PTE bit settings |
| 92 | #endif | 87 | #endif |
| 88 | #endif /* CONFIG_MMU */ | ||
| 93 | 89 | ||
| 94 | /* | 90 | /* |
| 95 | * The ARMv6 and ARMv7 set_pte_ext translation function. | 91 | * The ARMv6 and ARMv7 set_pte_ext translation function. |
diff --git a/arch/arm/mm/proc-mohawk.S b/arch/arm/mm/proc-mohawk.S index 540f5078496b..52b5fd74fbb3 100644 --- a/arch/arm/mm/proc-mohawk.S +++ b/arch/arm/mm/proc-mohawk.S | |||
| @@ -359,7 +359,7 @@ mohawk_crval: | |||
| 359 | .type mohawk_processor_functions, #object | 359 | .type mohawk_processor_functions, #object |
| 360 | mohawk_processor_functions: | 360 | mohawk_processor_functions: |
| 361 | .word v5t_early_abort | 361 | .word v5t_early_abort |
| 362 | .word pabort_noifar | 362 | .word legacy_pabort |
| 363 | .word cpu_mohawk_proc_init | 363 | .word cpu_mohawk_proc_init |
| 364 | .word cpu_mohawk_proc_fin | 364 | .word cpu_mohawk_proc_fin |
| 365 | .word cpu_mohawk_reset | 365 | .word cpu_mohawk_reset |
diff --git a/arch/arm/mm/proc-sa110.S b/arch/arm/mm/proc-sa110.S index 90a7e5279f29..7b706b389906 100644 --- a/arch/arm/mm/proc-sa110.S +++ b/arch/arm/mm/proc-sa110.S | |||
| @@ -199,7 +199,7 @@ sa110_crval: | |||
| 199 | .type sa110_processor_functions, #object | 199 | .type sa110_processor_functions, #object |
| 200 | ENTRY(sa110_processor_functions) | 200 | ENTRY(sa110_processor_functions) |
| 201 | .word v4_early_abort | 201 | .word v4_early_abort |
| 202 | .word pabort_noifar | 202 | .word legacy_pabort |
| 203 | .word cpu_sa110_proc_init | 203 | .word cpu_sa110_proc_init |
| 204 | .word cpu_sa110_proc_fin | 204 | .word cpu_sa110_proc_fin |
| 205 | .word cpu_sa110_reset | 205 | .word cpu_sa110_reset |
diff --git a/arch/arm/mm/proc-sa1100.S b/arch/arm/mm/proc-sa1100.S index 451e2d953e2a..ee7700242c19 100644 --- a/arch/arm/mm/proc-sa1100.S +++ b/arch/arm/mm/proc-sa1100.S | |||
| @@ -214,7 +214,7 @@ sa1100_crval: | |||
| 214 | .type sa1100_processor_functions, #object | 214 | .type sa1100_processor_functions, #object |
| 215 | ENTRY(sa1100_processor_functions) | 215 | ENTRY(sa1100_processor_functions) |
| 216 | .word v4_early_abort | 216 | .word v4_early_abort |
| 217 | .word pabort_noifar | 217 | .word legacy_pabort |
| 218 | .word cpu_sa1100_proc_init | 218 | .word cpu_sa1100_proc_init |
| 219 | .word cpu_sa1100_proc_fin | 219 | .word cpu_sa1100_proc_fin |
| 220 | .word cpu_sa1100_reset | 220 | .word cpu_sa1100_reset |
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S index 524ddae92595..194737d60a22 100644 --- a/arch/arm/mm/proc-v6.S +++ b/arch/arm/mm/proc-v6.S | |||
| @@ -191,7 +191,7 @@ v6_crval: | |||
| 191 | .type v6_processor_functions, #object | 191 | .type v6_processor_functions, #object |
| 192 | ENTRY(v6_processor_functions) | 192 | ENTRY(v6_processor_functions) |
| 193 | .word v6_early_abort | 193 | .word v6_early_abort |
| 194 | .word pabort_noifar | 194 | .word v6_pabort |
| 195 | .word cpu_v6_proc_init | 195 | .word cpu_v6_proc_init |
| 196 | .word cpu_v6_proc_fin | 196 | .word cpu_v6_proc_fin |
| 197 | .word cpu_v6_reset | 197 | .word cpu_v6_reset |
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S index 180a08d03a03..23ebcf6eab9f 100644 --- a/arch/arm/mm/proc-v7.S +++ b/arch/arm/mm/proc-v7.S | |||
| @@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm) | |||
| 127 | */ | 127 | */ |
| 128 | ENTRY(cpu_v7_set_pte_ext) | 128 | ENTRY(cpu_v7_set_pte_ext) |
| 129 | #ifdef CONFIG_MMU | 129 | #ifdef CONFIG_MMU |
| 130 | str r1, [r0], #-2048 @ linux version | 130 | ARM( str r1, [r0], #-2048 ) @ linux version |
| 131 | THUMB( str r1, [r0] ) @ linux version | ||
| 132 | THUMB( sub r0, r0, #2048 ) | ||
| 131 | 133 | ||
| 132 | bic r3, r1, #0x000003f0 | 134 | bic r3, r1, #0x000003f0 |
| 133 | bic r3, r3, #PTE_TYPE_MASK | 135 | bic r3, r3, #PTE_TYPE_MASK |
| @@ -232,7 +234,6 @@ __v7_setup: | |||
| 232 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 | 234 | mcr p15, 0, r4, c2, c0, 1 @ load TTB1 |
| 233 | mov r10, #0x1f @ domains 0, 1 = manager | 235 | mov r10, #0x1f @ domains 0, 1 = manager |
| 234 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register | 236 | mcr p15, 0, r10, c3, c0, 0 @ load domain access register |
| 235 | #endif | ||
| 236 | /* | 237 | /* |
| 237 | * Memory region attributes with SCTLR.TRE=1 | 238 | * Memory region attributes with SCTLR.TRE=1 |
| 238 | * | 239 | * |
| @@ -265,6 +266,7 @@ __v7_setup: | |||
| 265 | ldr r6, =0x40e040e0 @ NMRR | 266 | ldr r6, =0x40e040e0 @ NMRR |
| 266 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR | 267 | mcr p15, 0, r5, c10, c2, 0 @ write PRRR |
| 267 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR | 268 | mcr p15, 0, r6, c10, c2, 1 @ write NMRR |
| 269 | #endif | ||
| 268 | adr r5, v7_crval | 270 | adr r5, v7_crval |
| 269 | ldmia r5, {r5, r6} | 271 | ldmia r5, {r5, r6} |
| 270 | #ifdef CONFIG_CPU_ENDIAN_BE8 | 272 | #ifdef CONFIG_CPU_ENDIAN_BE8 |
| @@ -273,6 +275,7 @@ __v7_setup: | |||
| 273 | mrc p15, 0, r0, c1, c0, 0 @ read control register | 275 | mrc p15, 0, r0, c1, c0, 0 @ read control register |
| 274 | bic r0, r0, r5 @ clear bits them | 276 | bic r0, r0, r5 @ clear bits them |
| 275 | orr r0, r0, r6 @ set them | 277 | orr r0, r0, r6 @ set them |
| 278 | THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions | ||
| 276 | mov pc, lr @ return to head.S:__ret | 279 | mov pc, lr @ return to head.S:__ret |
| 277 | ENDPROC(__v7_setup) | 280 | ENDPROC(__v7_setup) |
| 278 | 281 | ||
| @@ -292,7 +295,7 @@ __v7_setup_stack: | |||
| 292 | .type v7_processor_functions, #object | 295 | .type v7_processor_functions, #object |
| 293 | ENTRY(v7_processor_functions) | 296 | ENTRY(v7_processor_functions) |
| 294 | .word v7_early_abort | 297 | .word v7_early_abort |
| 295 | .word pabort_ifar | 298 | .word v7_pabort |
| 296 | .word cpu_v7_proc_init | 299 | .word cpu_v7_proc_init |
| 297 | .word cpu_v7_proc_fin | 300 | .word cpu_v7_proc_fin |
| 298 | .word cpu_v7_reset | 301 | .word cpu_v7_reset |
diff --git a/arch/arm/mm/proc-xsc3.S b/arch/arm/mm/proc-xsc3.S index 33515c214b92..2028f3702881 100644 --- a/arch/arm/mm/proc-xsc3.S +++ b/arch/arm/mm/proc-xsc3.S | |||
| @@ -428,7 +428,7 @@ xsc3_crval: | |||
| 428 | .type xsc3_processor_functions, #object | 428 | .type xsc3_processor_functions, #object |
| 429 | ENTRY(xsc3_processor_functions) | 429 | ENTRY(xsc3_processor_functions) |
| 430 | .word v5t_early_abort | 430 | .word v5t_early_abort |
| 431 | .word pabort_noifar | 431 | .word legacy_pabort |
| 432 | .word cpu_xsc3_proc_init | 432 | .word cpu_xsc3_proc_init |
| 433 | .word cpu_xsc3_proc_fin | 433 | .word cpu_xsc3_proc_fin |
| 434 | .word cpu_xsc3_reset | 434 | .word cpu_xsc3_reset |
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S index 0cce37b93937..f056c283682d 100644 --- a/arch/arm/mm/proc-xscale.S +++ b/arch/arm/mm/proc-xscale.S | |||
| @@ -17,7 +17,7 @@ | |||
| 17 | * | 17 | * |
| 18 | * 2001 Sep 08: | 18 | * 2001 Sep 08: |
| 19 | * Completely revisited, many important fixes | 19 | * Completely revisited, many important fixes |
| 20 | * Nicolas Pitre <nico@cam.org> | 20 | * Nicolas Pitre <nico@fluxnic.net> |
| 21 | */ | 21 | */ |
| 22 | 22 | ||
| 23 | #include <linux/linkage.h> | 23 | #include <linux/linkage.h> |
| @@ -511,7 +511,7 @@ xscale_crval: | |||
| 511 | .type xscale_processor_functions, #object | 511 | .type xscale_processor_functions, #object |
| 512 | ENTRY(xscale_processor_functions) | 512 | ENTRY(xscale_processor_functions) |
| 513 | .word v5t_early_abort | 513 | .word v5t_early_abort |
| 514 | .word pabort_noifar | 514 | .word legacy_pabort |
| 515 | .word cpu_xscale_proc_init | 515 | .word cpu_xscale_proc_init |
| 516 | .word cpu_xscale_proc_fin | 516 | .word cpu_xscale_proc_fin |
| 517 | .word cpu_xscale_reset | 517 | .word cpu_xscale_reset |
