aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig2
-rw-r--r--arch/arm/mm/alignment.c20
-rw-r--r--arch/arm/mm/cache-v7.S16
-rw-r--r--arch/arm/mm/dma-mapping.c94
-rw-r--r--arch/arm/mm/fault.c23
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-macros.S2
-rw-r--r--arch/arm/mm/proc-v7.S7
8 files changed, 130 insertions, 35 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 83c025e72ceb..5fe595aeba69 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -758,7 +758,7 @@ config CACHE_FEROCEON_L2_WRITETHROUGH
758config CACHE_L2X0 758config CACHE_L2X0
759 bool "Enable the L2x0 outer cache controller" 759 bool "Enable the L2x0 outer cache controller"
760 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \ 760 depends on REALVIEW_EB_ARM11MP || MACH_REALVIEW_PB11MP || MACH_REALVIEW_PB1176 || \
761 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX 761 REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK
762 default y 762 default y
763 select OUTER_CACHE 763 select OUTER_CACHE
764 help 764 help
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 03cd27d917b9..b270d6228fe2 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -159,7 +159,9 @@ union offset_union {
159 159
160#define __get8_unaligned_check(ins,val,addr,err) \ 160#define __get8_unaligned_check(ins,val,addr,err) \
161 __asm__( \ 161 __asm__( \
162 "1: "ins" %1, [%2], #1\n" \ 162 ARM( "1: "ins" %1, [%2], #1\n" ) \
163 THUMB( "1: "ins" %1, [%2]\n" ) \
164 THUMB( " add %2, %2, #1\n" ) \
163 "2:\n" \ 165 "2:\n" \
164 " .section .fixup,\"ax\"\n" \ 166 " .section .fixup,\"ax\"\n" \
165 " .align 2\n" \ 167 " .align 2\n" \
@@ -215,7 +217,9 @@ union offset_union {
215 do { \ 217 do { \
216 unsigned int err = 0, v = val, a = addr; \ 218 unsigned int err = 0, v = val, a = addr; \
217 __asm__( FIRST_BYTE_16 \ 219 __asm__( FIRST_BYTE_16 \
218 "1: "ins" %1, [%2], #1\n" \ 220 ARM( "1: "ins" %1, [%2], #1\n" ) \
221 THUMB( "1: "ins" %1, [%2]\n" ) \
222 THUMB( " add %2, %2, #1\n" ) \
219 " mov %1, %1, "NEXT_BYTE"\n" \ 223 " mov %1, %1, "NEXT_BYTE"\n" \
220 "2: "ins" %1, [%2]\n" \ 224 "2: "ins" %1, [%2]\n" \
221 "3:\n" \ 225 "3:\n" \
@@ -245,11 +249,17 @@ union offset_union {
245 do { \ 249 do { \
246 unsigned int err = 0, v = val, a = addr; \ 250 unsigned int err = 0, v = val, a = addr; \
247 __asm__( FIRST_BYTE_32 \ 251 __asm__( FIRST_BYTE_32 \
248 "1: "ins" %1, [%2], #1\n" \ 252 ARM( "1: "ins" %1, [%2], #1\n" ) \
253 THUMB( "1: "ins" %1, [%2]\n" ) \
254 THUMB( " add %2, %2, #1\n" ) \
249 " mov %1, %1, "NEXT_BYTE"\n" \ 255 " mov %1, %1, "NEXT_BYTE"\n" \
250 "2: "ins" %1, [%2], #1\n" \ 256 ARM( "2: "ins" %1, [%2], #1\n" ) \
257 THUMB( "2: "ins" %1, [%2]\n" ) \
258 THUMB( " add %2, %2, #1\n" ) \
251 " mov %1, %1, "NEXT_BYTE"\n" \ 259 " mov %1, %1, "NEXT_BYTE"\n" \
252 "3: "ins" %1, [%2], #1\n" \ 260 ARM( "3: "ins" %1, [%2], #1\n" ) \
261 THUMB( "3: "ins" %1, [%2]\n" ) \
262 THUMB( " add %2, %2, #1\n" ) \
253 " mov %1, %1, "NEXT_BYTE"\n" \ 263 " mov %1, %1, "NEXT_BYTE"\n" \
254 "4: "ins" %1, [%2]\n" \ 264 "4: "ins" %1, [%2]\n" \
255 "5:\n" \ 265 "5:\n" \
diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S
index be93ff02a98d..bda0ec31a4e2 100644
--- a/arch/arm/mm/cache-v7.S
+++ b/arch/arm/mm/cache-v7.S
@@ -21,7 +21,7 @@
21 * 21 *
22 * Flush the whole D-cache. 22 * Flush the whole D-cache.
23 * 23 *
24 * Corrupted registers: r0-r5, r7, r9-r11 24 * Corrupted registers: r0-r7, r9-r11 (r6 only in Thumb mode)
25 * 25 *
26 * - mm - mm_struct describing address space 26 * - mm - mm_struct describing address space
27 */ 27 */
@@ -51,8 +51,12 @@ loop1:
51loop2: 51loop2:
52 mov r9, r4 @ create working copy of max way size 52 mov r9, r4 @ create working copy of max way size
53loop3: 53loop3:
54 orr r11, r10, r9, lsl r5 @ factor way and cache number into r11 54 ARM( orr r11, r10, r9, lsl r5 ) @ factor way and cache number into r11
55 orr r11, r11, r7, lsl r2 @ factor index number into r11 55 THUMB( lsl r6, r9, r5 )
56 THUMB( orr r11, r10, r6 ) @ factor way and cache number into r11
57 ARM( orr r11, r11, r7, lsl r2 ) @ factor index number into r11
58 THUMB( lsl r6, r7, r2 )
59 THUMB( orr r11, r11, r6 ) @ factor index number into r11
56 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way 60 mcr p15, 0, r11, c7, c14, 2 @ clean & invalidate by set/way
57 subs r9, r9, #1 @ decrement the way 61 subs r9, r9, #1 @ decrement the way
58 bge loop3 62 bge loop3
@@ -82,11 +86,13 @@ ENDPROC(v7_flush_dcache_all)
82 * 86 *
83 */ 87 */
84ENTRY(v7_flush_kern_cache_all) 88ENTRY(v7_flush_kern_cache_all)
85 stmfd sp!, {r4-r5, r7, r9-r11, lr} 89 ARM( stmfd sp!, {r4-r5, r7, r9-r11, lr} )
90 THUMB( stmfd sp!, {r4-r7, r9-r11, lr} )
86 bl v7_flush_dcache_all 91 bl v7_flush_dcache_all
87 mov r0, #0 92 mov r0, #0
88 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate 93 mcr p15, 0, r0, c7, c5, 0 @ I+BTB cache invalidate
89 ldmfd sp!, {r4-r5, r7, r9-r11, lr} 94 ARM( ldmfd sp!, {r4-r5, r7, r9-r11, lr} )
95 THUMB( ldmfd sp!, {r4-r7, r9-r11, lr} )
90 mov pc, lr 96 mov pc, lr
91ENDPROC(v7_flush_kern_cache_all) 97ENDPROC(v7_flush_kern_cache_all)
92 98
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index 510c179b0ac8..b30925fcbcdc 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -36,7 +36,34 @@
36#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT) 36#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT) 37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
38 38
39static u64 get_coherent_dma_mask(struct device *dev)
40{
41 u64 mask = ISA_DMA_THRESHOLD;
42
43 if (dev) {
44 mask = dev->coherent_dma_mask;
45
46 /*
47 * Sanity check the DMA mask - it must be non-zero, and
48 * must be able to be satisfied by a DMA allocation.
49 */
50 if (mask == 0) {
51 dev_warn(dev, "coherent DMA mask is unset\n");
52 return 0;
53 }
54
55 if ((~mask) & ISA_DMA_THRESHOLD) {
56 dev_warn(dev, "coherent DMA mask %#llx is smaller "
57 "than system GFP_DMA mask %#llx\n",
58 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59 return 0;
60 }
61 }
39 62
63 return mask;
64}
65
66#ifdef CONFIG_MMU
40/* 67/*
41 * These are the page tables (2MB each) covering uncached, DMA consistent allocations 68 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
42 */ 69 */
@@ -152,7 +179,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
152 struct page *page; 179 struct page *page;
153 struct arm_vm_region *c; 180 struct arm_vm_region *c;
154 unsigned long order; 181 unsigned long order;
155 u64 mask = ISA_DMA_THRESHOLD, limit; 182 u64 mask = get_coherent_dma_mask(dev);
183 u64 limit;
156 184
157 if (!consistent_pte[0]) { 185 if (!consistent_pte[0]) {
158 printk(KERN_ERR "%s: not initialised\n", __func__); 186 printk(KERN_ERR "%s: not initialised\n", __func__);
@@ -160,25 +188,8 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
160 return NULL; 188 return NULL;
161 } 189 }
162 190
163 if (dev) { 191 if (!mask)
164 mask = dev->coherent_dma_mask; 192 goto no_page;
165
166 /*
167 * Sanity check the DMA mask - it must be non-zero, and
168 * must be able to be satisfied by a DMA allocation.
169 */
170 if (mask == 0) {
171 dev_warn(dev, "coherent DMA mask is unset\n");
172 goto no_page;
173 }
174
175 if ((~mask) & ISA_DMA_THRESHOLD) {
176 dev_warn(dev, "coherent DMA mask %#llx is smaller "
177 "than system GFP_DMA mask %#llx\n",
178 mask, (unsigned long long)ISA_DMA_THRESHOLD);
179 goto no_page;
180 }
181 }
182 193
183 /* 194 /*
184 * Sanity check the allocation size. 195 * Sanity check the allocation size.
@@ -267,6 +278,31 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
267 *handle = ~0; 278 *handle = ~0;
268 return NULL; 279 return NULL;
269} 280}
281#else /* !CONFIG_MMU */
282static void *
283__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
284 pgprot_t prot)
285{
286 void *virt;
287 u64 mask = get_coherent_dma_mask(dev);
288
289 if (!mask)
290 goto error;
291
292 if (mask != 0xffffffff)
293 gfp |= GFP_DMA;
294 virt = kmalloc(size, gfp);
295 if (!virt)
296 goto error;
297
298 *handle = virt_to_dma(dev, virt);
299 return virt;
300
301error:
302 *handle = ~0;
303 return NULL;
304}
305#endif /* CONFIG_MMU */
270 306
271/* 307/*
272 * Allocate DMA-coherent memory space and return both the kernel remapped 308 * Allocate DMA-coherent memory space and return both the kernel remapped
@@ -311,9 +347,10 @@ EXPORT_SYMBOL(dma_alloc_writecombine);
311static int dma_mmap(struct device *dev, struct vm_area_struct *vma, 347static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
312 void *cpu_addr, dma_addr_t dma_addr, size_t size) 348 void *cpu_addr, dma_addr_t dma_addr, size_t size)
313{ 349{
350 int ret = -ENXIO;
351#ifdef CONFIG_MMU
314 unsigned long flags, user_size, kern_size; 352 unsigned long flags, user_size, kern_size;
315 struct arm_vm_region *c; 353 struct arm_vm_region *c;
316 int ret = -ENXIO;
317 354
318 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 355 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
319 356
@@ -334,6 +371,7 @@ static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
334 vma->vm_page_prot); 371 vma->vm_page_prot);
335 } 372 }
336 } 373 }
374#endif /* CONFIG_MMU */
337 375
338 return ret; 376 return ret;
339} 377}
@@ -358,6 +396,7 @@ EXPORT_SYMBOL(dma_mmap_writecombine);
358 * free a page as defined by the above mapping. 396 * free a page as defined by the above mapping.
359 * Must not be called with IRQs disabled. 397 * Must not be called with IRQs disabled.
360 */ 398 */
399#ifdef CONFIG_MMU
361void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 400void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
362{ 401{
363 struct arm_vm_region *c; 402 struct arm_vm_region *c;
@@ -444,6 +483,14 @@ void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr
444 __func__, cpu_addr); 483 __func__, cpu_addr);
445 dump_stack(); 484 dump_stack();
446} 485}
486#else /* !CONFIG_MMU */
487void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
488{
489 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
490 return;
491 kfree(cpu_addr);
492}
493#endif /* CONFIG_MMU */
447EXPORT_SYMBOL(dma_free_coherent); 494EXPORT_SYMBOL(dma_free_coherent);
448 495
449/* 496/*
@@ -451,10 +498,12 @@ EXPORT_SYMBOL(dma_free_coherent);
451 */ 498 */
452static int __init consistent_init(void) 499static int __init consistent_init(void)
453{ 500{
501 int ret = 0;
502#ifdef CONFIG_MMU
454 pgd_t *pgd; 503 pgd_t *pgd;
455 pmd_t *pmd; 504 pmd_t *pmd;
456 pte_t *pte; 505 pte_t *pte;
457 int ret = 0, i = 0; 506 int i = 0;
458 u32 base = CONSISTENT_BASE; 507 u32 base = CONSISTENT_BASE;
459 508
460 do { 509 do {
@@ -477,6 +526,7 @@ static int __init consistent_init(void)
477 consistent_pte[i++] = pte; 526 consistent_pte[i++] = pte;
478 base += (1 << PGDIR_SHIFT); 527 base += (1 << PGDIR_SHIFT);
479 } while (base < CONSISTENT_END); 528 } while (base < CONSISTENT_END);
529#endif /* !CONFIG_MMU */
480 530
481 return ret; 531 return ret;
482} 532}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5fa8dea5a371..cc8829d7e116 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -16,6 +16,7 @@
16#include <linux/kprobes.h> 16#include <linux/kprobes.h>
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/page-flags.h> 18#include <linux/page-flags.h>
19#include <linux/sched.h>
19#include <linux/highmem.h> 20#include <linux/highmem.h>
20 21
21#include <asm/system.h> 22#include <asm/system.h>
@@ -24,6 +25,7 @@
24 25
25#include "fault.h" 26#include "fault.h"
26 27
28#ifdef CONFIG_MMU
27 29
28#ifdef CONFIG_KPROBES 30#ifdef CONFIG_KPROBES
29static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr) 31static inline int notify_page_fault(struct pt_regs *regs, unsigned int fsr)
@@ -98,6 +100,10 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
98 100
99 printk("\n"); 101 printk("\n");
100} 102}
103#else /* CONFIG_MMU */
104void show_pte(struct mm_struct *mm, unsigned long addr)
105{ }
106#endif /* CONFIG_MMU */
101 107
102/* 108/*
103 * Oops. The kernel tried to access some page that wasn't present. 109 * Oops. The kernel tried to access some page that wasn't present.
@@ -172,6 +178,7 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
172 __do_kernel_fault(mm, addr, fsr, regs); 178 __do_kernel_fault(mm, addr, fsr, regs);
173} 179}
174 180
181#ifdef CONFIG_MMU
175#define VM_FAULT_BADMAP 0x010000 182#define VM_FAULT_BADMAP 0x010000
176#define VM_FAULT_BADACCESS 0x020000 183#define VM_FAULT_BADACCESS 0x020000
177 184
@@ -323,6 +330,13 @@ no_context:
323 __do_kernel_fault(mm, addr, fsr, regs); 330 __do_kernel_fault(mm, addr, fsr, regs);
324 return 0; 331 return 0;
325} 332}
333#else /* CONFIG_MMU */
334static int
335do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
336{
337 return 0;
338}
339#endif /* CONFIG_MMU */
326 340
327/* 341/*
328 * First Level Translation Fault Handler 342 * First Level Translation Fault Handler
@@ -341,6 +355,7 @@ no_context:
341 * interrupt or a critical region, and should only copy the information 355 * interrupt or a critical region, and should only copy the information
342 * from the master page table, nothing more. 356 * from the master page table, nothing more.
343 */ 357 */
358#ifdef CONFIG_MMU
344static int __kprobes 359static int __kprobes
345do_translation_fault(unsigned long addr, unsigned int fsr, 360do_translation_fault(unsigned long addr, unsigned int fsr,
346 struct pt_regs *regs) 361 struct pt_regs *regs)
@@ -379,6 +394,14 @@ bad_area:
379 do_bad_area(addr, fsr, regs); 394 do_bad_area(addr, fsr, regs);
380 return 0; 395 return 0;
381} 396}
397#else /* CONFIG_MMU */
398static int
399do_translation_fault(unsigned long addr, unsigned int fsr,
400 struct pt_regs *regs)
401{
402 return 0;
403}
404#endif /* CONFIG_MMU */
382 405
383/* 406/*
384 * Some section permission faults need to be handled gracefully. 407 * Some section permission faults need to be handled gracefully.
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index ad7bacc693b2..900811cc9130 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -12,6 +12,7 @@
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
14#include <asm/page.h> 14#include <asm/page.h>
15#include <asm/setup.h>
15#include <asm/mach/arch.h> 16#include <asm/mach/arch.h>
16 17
17#include "mm.h" 18#include "mm.h"
diff --git a/arch/arm/mm/proc-macros.S b/arch/arm/mm/proc-macros.S
index a0b3f36f3178..7d63beaf9745 100644
--- a/arch/arm/mm/proc-macros.S
+++ b/arch/arm/mm/proc-macros.S
@@ -77,6 +77,7 @@
77 * Sanity check the PTE configuration for the code below - which makes 77 * Sanity check the PTE configuration for the code below - which makes
78 * certain assumptions about how these bits are layed out. 78 * certain assumptions about how these bits are layed out.
79 */ 79 */
80#ifdef CONFIG_MMU
80#if L_PTE_SHARED != PTE_EXT_SHARED 81#if L_PTE_SHARED != PTE_EXT_SHARED
81#error PTE shared bit mismatch 82#error PTE shared bit mismatch
82#endif 83#endif
@@ -84,6 +85,7 @@
84 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED 85 L_PTE_FILE+L_PTE_PRESENT) > L_PTE_SHARED
85#error Invalid Linux PTE bit settings 86#error Invalid Linux PTE bit settings
86#endif 87#endif
88#endif /* CONFIG_MMU */
87 89
88/* 90/*
89 * The ARMv6 and ARMv7 set_pte_ext translation function. 91 * The ARMv6 and ARMv7 set_pte_ext translation function.
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 180a08d03a03..f3fa1c32fe92 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -127,7 +127,9 @@ ENDPROC(cpu_v7_switch_mm)
127 */ 127 */
128ENTRY(cpu_v7_set_pte_ext) 128ENTRY(cpu_v7_set_pte_ext)
129#ifdef CONFIG_MMU 129#ifdef CONFIG_MMU
130 str r1, [r0], #-2048 @ linux version 130 ARM( str r1, [r0], #-2048 ) @ linux version
131 THUMB( str r1, [r0] ) @ linux version
132 THUMB( sub r0, r0, #2048 )
131 133
132 bic r3, r1, #0x000003f0 134 bic r3, r1, #0x000003f0
133 bic r3, r3, #PTE_TYPE_MASK 135 bic r3, r3, #PTE_TYPE_MASK
@@ -232,7 +234,6 @@ __v7_setup:
232 mcr p15, 0, r4, c2, c0, 1 @ load TTB1 234 mcr p15, 0, r4, c2, c0, 1 @ load TTB1
233 mov r10, #0x1f @ domains 0, 1 = manager 235 mov r10, #0x1f @ domains 0, 1 = manager
234 mcr p15, 0, r10, c3, c0, 0 @ load domain access register 236 mcr p15, 0, r10, c3, c0, 0 @ load domain access register
235#endif
236 /* 237 /*
237 * Memory region attributes with SCTLR.TRE=1 238 * Memory region attributes with SCTLR.TRE=1
238 * 239 *
@@ -265,6 +266,7 @@ __v7_setup:
265 ldr r6, =0x40e040e0 @ NMRR 266 ldr r6, =0x40e040e0 @ NMRR
266 mcr p15, 0, r5, c10, c2, 0 @ write PRRR 267 mcr p15, 0, r5, c10, c2, 0 @ write PRRR
267 mcr p15, 0, r6, c10, c2, 1 @ write NMRR 268 mcr p15, 0, r6, c10, c2, 1 @ write NMRR
269#endif
268 adr r5, v7_crval 270 adr r5, v7_crval
269 ldmia r5, {r5, r6} 271 ldmia r5, {r5, r6}
270#ifdef CONFIG_CPU_ENDIAN_BE8 272#ifdef CONFIG_CPU_ENDIAN_BE8
@@ -273,6 +275,7 @@ __v7_setup:
273 mrc p15, 0, r0, c1, c0, 0 @ read control register 275 mrc p15, 0, r0, c1, c0, 0 @ read control register
274 bic r0, r0, r5 @ clear bits them 276 bic r0, r0, r5 @ clear bits them
275 orr r0, r0, r6 @ set them 277 orr r0, r0, r6 @ set them
278 THUMB( orr r0, r0, #1 << 30 ) @ Thumb exceptions
276 mov pc, lr @ return to head.S:__ret 279 mov pc, lr @ return to head.S:__ret
277ENDPROC(__v7_setup) 280ENDPROC(__v7_setup)
278 281