aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig5
-rw-r--r--arch/arm/mm/context.c5
-rw-r--r--arch/arm/mm/copypage-v6.c8
-rw-r--r--arch/arm/mm/dma-mapping.c4
-rw-r--r--arch/arm/mm/fault-armv.c9
-rw-r--r--arch/arm/mm/flush.c74
-rw-r--r--arch/arm/mm/init.c20
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/mm/mmu.c9
-rw-r--r--arch/arm/mm/proc-v6.S7
-rw-r--r--arch/arm/mm/proc-v7.S14
11 files changed, 94 insertions, 63 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 9cf7706e0be0..7b7d4c36c11c 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -122,10 +122,7 @@ config CPU_ARM920T
122 select CPU_TLB_V4WBI if MMU 122 select CPU_TLB_V4WBI if MMU
123 help 123 help
124 The ARM920T is licensed to be produced by numerous vendors, 124 The ARM920T is licensed to be produced by numerous vendors,
125 and is used in the Maverick EP9312 and the Samsung S3C2410. 125 and is used in the Cirrus EP93xx and the Samsung S3C2410.
126
127 More information on the Maverick EP9312 at
128 <http://linuxdevices.com/products/PD2382866068.html>.
129 126
130 Say Y if you want support for the ARM920T processor. 127 Say Y if you want support for the ARM920T processor.
131 Otherwise, say N. 128 Otherwise, say N.
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6bda76a43199..a9e22e31eaa1 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -50,10 +50,7 @@ void __new_context(struct mm_struct *mm)
50 isb(); 50 isb();
51 flush_tlb_all(); 51 flush_tlb_all();
52 if (icache_is_vivt_asid_tagged()) { 52 if (icache_is_vivt_asid_tagged()) {
53 asm("mcr p15, 0, %0, c7, c5, 0 @ invalidate I-cache\n" 53 __flush_icache_all();
54 "mcr p15, 0, %0, c7, c5, 6 @ flush BTAC/BTB\n"
55 :
56 : "r" (0));
57 dsb(); 54 dsb();
58 } 55 }
59 } 56 }
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 4127a7bddfe5..841f355319bf 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from, KM_USER0);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to, KM_USER1);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44#ifdef CONFIG_HIGHMEM
45 /*
46 * kmap_atomic() doesn't set the page virtual address, and
47 * kunmap_atomic() takes care of cache flushing already.
48 */
49 if (page_address(to) != NULL)
50#endif
51 __cpuc_flush_dcache_page(kto);
44 kunmap_atomic(kto, KM_USER1); 52 kunmap_atomic(kto, KM_USER1);
45 kunmap_atomic(kfrom, KM_USER0); 53 kunmap_atomic(kfrom, KM_USER0);
46} 54}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index b30925fcbcdc..b9590a7085ca 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -205,7 +205,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
205 205
206 order = get_order(size); 206 order = get_order(size);
207 207
208 if (mask != 0xffffffff) 208 if (mask < 0xffffffffULL)
209 gfp |= GFP_DMA; 209 gfp |= GFP_DMA;
210 210
211 page = alloc_pages(gfp, order); 211 page = alloc_pages(gfp, order);
@@ -289,7 +289,7 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
289 if (!mask) 289 if (!mask)
290 goto error; 290 goto error;
291 291
292 if (mask != 0xffffffff) 292 if (mask < 0xffffffffULL)
293 gfp |= GFP_DMA; 293 gfp |= GFP_DMA;
294 virt = kmalloc(size, gfp); 294 virt = kmalloc(size, gfp);
295 if (!virt) 295 if (!virt)
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index d0d17b6a3703..729602291958 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -23,6 +23,8 @@
23#include <asm/pgtable.h> 23#include <asm/pgtable.h>
24#include <asm/tlbflush.h> 24#include <asm/tlbflush.h>
25 25
26#include "mm.h"
27
26static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE; 28static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
27 29
28/* 30/*
@@ -151,7 +153,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
151 if (!pfn_valid(pfn)) 153 if (!pfn_valid(pfn))
152 return; 154 return;
153 155
156 /*
157 * The zero page is never written to, so never has any dirty
158 * cache lines, and therefore never needs to be flushed.
159 */
154 page = pfn_to_page(pfn); 160 page = pfn_to_page(pfn);
161 if (page == ZERO_PAGE(0))
162 return;
163
155 mapping = page_mapping(page); 164 mapping = page_mapping(page);
156#ifndef CONFIG_SMP 165#ifndef CONFIG_SMP
157 if (test_and_clear_bit(PG_dcache_dirty, &page->flags)) 166 if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index b27942909b23..329594e760cd 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -18,10 +18,6 @@
18 18
19#include "mm.h" 19#include "mm.h"
20 20
21#ifdef CONFIG_ARM_ERRATA_411920
22extern void v6_icache_inval_all(void);
23#endif
24
25#ifdef CONFIG_CPU_CACHE_VIPT 21#ifdef CONFIG_CPU_CACHE_VIPT
26 22
27#define ALIAS_FLUSH_START 0xffff4000 23#define ALIAS_FLUSH_START 0xffff4000
@@ -35,77 +31,61 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
35 flush_tlb_kernel_page(to); 31 flush_tlb_kernel_page(to);
36 32
37 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
38 " mcr p15, 0, %2, c7, c10, 4\n" 34 " mcr p15, 0, %2, c7, c10, 4"
39#ifndef CONFIG_ARM_ERRATA_411920
40 " mcr p15, 0, %2, c7, c5, 0\n"
41#endif
42 : 35 :
43 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
44 : "cc"); 37 : "cc");
45#ifdef CONFIG_ARM_ERRATA_411920
46 v6_icache_inval_all();
47#endif
48} 38}
49 39
50void flush_cache_mm(struct mm_struct *mm) 40void flush_cache_mm(struct mm_struct *mm)
51{ 41{
52 if (cache_is_vivt()) { 42 if (cache_is_vivt()) {
53 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) 43 vivt_flush_cache_mm(mm);
54 __cpuc_flush_user_all();
55 return; 44 return;
56 } 45 }
57 46
58 if (cache_is_vipt_aliasing()) { 47 if (cache_is_vipt_aliasing()) {
59 asm( "mcr p15, 0, %0, c7, c14, 0\n" 48 asm( "mcr p15, 0, %0, c7, c14, 0\n"
60 " mcr p15, 0, %0, c7, c10, 4\n" 49 " mcr p15, 0, %0, c7, c10, 4"
61#ifndef CONFIG_ARM_ERRATA_411920
62 " mcr p15, 0, %0, c7, c5, 0\n"
63#endif
64 : 50 :
65 : "r" (0) 51 : "r" (0)
66 : "cc"); 52 : "cc");
67#ifdef CONFIG_ARM_ERRATA_411920
68 v6_icache_inval_all();
69#endif
70 } 53 }
71} 54}
72 55
73void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) 56void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
74{ 57{
75 if (cache_is_vivt()) { 58 if (cache_is_vivt()) {
76 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) 59 vivt_flush_cache_range(vma, start, end);
77 __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
78 vma->vm_flags);
79 return; 60 return;
80 } 61 }
81 62
82 if (cache_is_vipt_aliasing()) { 63 if (cache_is_vipt_aliasing()) {
83 asm( "mcr p15, 0, %0, c7, c14, 0\n" 64 asm( "mcr p15, 0, %0, c7, c14, 0\n"
84 " mcr p15, 0, %0, c7, c10, 4\n" 65 " mcr p15, 0, %0, c7, c10, 4"
85#ifndef CONFIG_ARM_ERRATA_411920
86 " mcr p15, 0, %0, c7, c5, 0\n"
87#endif
88 : 66 :
89 : "r" (0) 67 : "r" (0)
90 : "cc"); 68 : "cc");
91#ifdef CONFIG_ARM_ERRATA_411920
92 v6_icache_inval_all();
93#endif
94 } 69 }
70
71 if (vma->vm_flags & VM_EXEC)
72 __flush_icache_all();
95} 73}
96 74
97void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn) 75void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
98{ 76{
99 if (cache_is_vivt()) { 77 if (cache_is_vivt()) {
100 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 78 vivt_flush_cache_page(vma, user_addr, pfn);
101 unsigned long addr = user_addr & PAGE_MASK;
102 __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
103 }
104 return; 79 return;
105 } 80 }
106 81
107 if (cache_is_vipt_aliasing()) 82 if (cache_is_vipt_aliasing()) {
108 flush_pfn_alias(pfn, user_addr); 83 flush_pfn_alias(pfn, user_addr);
84 __flush_icache_all();
85 }
86
87 if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
88 __flush_icache_all();
109} 89}
110 90
111void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, 91void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -113,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
113 unsigned long len, int write) 93 unsigned long len, int write)
114{ 94{
115 if (cache_is_vivt()) { 95 if (cache_is_vivt()) {
116 if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) { 96 vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
117 unsigned long addr = (unsigned long)kaddr;
118 __cpuc_coherent_kern_range(addr, addr + len);
119 }
120 return; 97 return;
121 } 98 }
122 99
123 if (cache_is_vipt_aliasing()) { 100 if (cache_is_vipt_aliasing()) {
124 flush_pfn_alias(page_to_pfn(page), uaddr); 101 flush_pfn_alias(page_to_pfn(page), uaddr);
102 __flush_icache_all();
125 return; 103 return;
126 } 104 }
127 105
@@ -139,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
139 117
140void __flush_dcache_page(struct address_space *mapping, struct page *page) 118void __flush_dcache_page(struct address_space *mapping, struct page *page)
141{ 119{
120 void *addr = page_address(page);
121
142 /* 122 /*
143 * Writeback any data associated with the kernel mapping of this 123 * Writeback any data associated with the kernel mapping of this
144 * page. This ensures that data in the physical page is mutually 124 * page. This ensures that data in the physical page is mutually
@@ -149,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
149 * kmap_atomic() doesn't set the page virtual address, and 129 * kmap_atomic() doesn't set the page virtual address, and
150 * kunmap_atomic() takes care of cache flushing already. 130 * kunmap_atomic() takes care of cache flushing already.
151 */ 131 */
152 if (page_address(page)) 132 if (addr)
153#endif 133#endif
154 __cpuc_flush_dcache_page(page_address(page)); 134 __cpuc_flush_dcache_page(addr);
155 135
156 /* 136 /*
157 * If this is a page cache page, and we have an aliasing VIPT cache, 137 * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -215,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
215 */ 195 */
216void flush_dcache_page(struct page *page) 196void flush_dcache_page(struct page *page)
217{ 197{
218 struct address_space *mapping = page_mapping(page); 198 struct address_space *mapping;
199
200 /*
201 * The zero page is never written to, so never has any dirty
202 * cache lines, and therefore never needs to be flushed.
203 */
204 if (page == ZERO_PAGE(0))
205 return;
206
207 mapping = page_mapping(page);
219 208
220#ifndef CONFIG_SMP 209#ifndef CONFIG_SMP
221 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping)) 210 if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -261,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
261 * userspace address only. 250 * userspace address only.
262 */ 251 */
263 flush_pfn_alias(pfn, vmaddr); 252 flush_pfn_alias(pfn, vmaddr);
253 __flush_icache_all();
264 } 254 }
265 255
266 /* 256 /*
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 40940d7ce4ff..52c40d155672 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -273,7 +273,6 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
273 struct membank *bank = &mi->bank[i]; 273 struct membank *bank = &mi->bank[i];
274 if (!bank->highmem) 274 if (!bank->highmem)
275 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 275 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank));
276 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
277 } 276 }
278 277
279 /* 278 /*
@@ -370,6 +369,19 @@ int pfn_valid(unsigned long pfn)
370 return 0; 369 return 0;
371} 370}
372EXPORT_SYMBOL(pfn_valid); 371EXPORT_SYMBOL(pfn_valid);
372
373static void arm_memory_present(struct meminfo *mi, int node)
374{
375}
376#else
377static void arm_memory_present(struct meminfo *mi, int node)
378{
379 int i;
380 for_each_nodebank(i, mi, node) {
381 struct membank *bank = &mi->bank[i];
382 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank));
383 }
384}
373#endif 385#endif
374 386
375static int __init meminfo_cmp(const void *_a, const void *_b) 387static int __init meminfo_cmp(const void *_a, const void *_b)
@@ -427,6 +439,12 @@ void __init bootmem_init(void)
427 */ 439 */
428 if (node == initrd_node) 440 if (node == initrd_node)
429 bootmem_reserve_initrd(node); 441 bootmem_reserve_initrd(node);
442
443 /*
444 * Sparsemem tries to allocate bootmem in memory_present(),
445 * so must be done after the fixed reservations
446 */
447 arm_memory_present(mi, node);
430 } 448 }
431 449
432 /* 450 /*
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index c4f6f05198e0..a888363398f8 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -24,6 +24,8 @@ struct mem_type {
24 24
25const struct mem_type *get_mem_type(unsigned int type); 25const struct mem_type *get_mem_type(unsigned int type);
26 26
27extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
28
27#endif 29#endif
28 30
29struct map_desc; 31struct map_desc;
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 02243eeccf50..2427cdcd9098 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -117,6 +117,13 @@ static void __init early_cachepolicy(char **p)
117 } 117 }
118 if (i == ARRAY_SIZE(cache_policies)) 118 if (i == ARRAY_SIZE(cache_policies))
119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); 119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
120 /*
121 * This restriction is partly to do with the way we boot; it is
122 * unpredictable to have memory mapped using two different sets of
123 * memory attributes (shared, type, and cache attribs). We can not
124 * change these attributes once the initial assembly has setup the
125 * page tables.
126 */
120 if (cpu_architecture() >= CPU_ARCH_ARMv6) { 127 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
121 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); 128 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
122 cachepolicy = CPOLICY_WRITEBACK; 129 cachepolicy = CPOLICY_WRITEBACK;
@@ -1029,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc)
1029 */ 1036 */
1030 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 1037 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
1031 empty_zero_page = virt_to_page(zero_page); 1038 empty_zero_page = virt_to_page(zero_page);
1032 flush_dcache_page(empty_zero_page); 1039 __flush_dcache_page(NULL, empty_zero_page);
1033} 1040}
1034 1041
1035/* 1042/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 194737d60a22..70f75d2e3ead 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -32,8 +32,10 @@
32 32
33#ifndef CONFIG_SMP 33#ifndef CONFIG_SMP
34#define TTB_FLAGS TTB_RGN_WBWA 34#define TTB_FLAGS TTB_RGN_WBWA
35#define PMD_FLAGS PMD_SECT_WB
35#else 36#else
36#define TTB_FLAGS TTB_RGN_WBWA|TTB_S 37#define TTB_FLAGS TTB_RGN_WBWA|TTB_S
38#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
37#endif 39#endif
38 40
39ENTRY(cpu_v6_proc_init) 41ENTRY(cpu_v6_proc_init)
@@ -222,10 +224,9 @@ __v6_proc_info:
222 .long 0x0007b000 224 .long 0x0007b000
223 .long 0x0007f000 225 .long 0x0007f000
224 .long PMD_TYPE_SECT | \ 226 .long PMD_TYPE_SECT | \
225 PMD_SECT_BUFFERABLE | \
226 PMD_SECT_CACHEABLE | \
227 PMD_SECT_AP_WRITE | \ 227 PMD_SECT_AP_WRITE | \
228 PMD_SECT_AP_READ 228 PMD_SECT_AP_READ | \
229 PMD_FLAGS
229 .long PMD_TYPE_SECT | \ 230 .long PMD_TYPE_SECT | \
230 PMD_SECT_XN | \ 231 PMD_SECT_XN | \
231 PMD_SECT_AP_WRITE | \ 232 PMD_SECT_AP_WRITE | \
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 23ebcf6eab9f..3a285218fd15 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -33,9 +33,11 @@
33#ifndef CONFIG_SMP 33#ifndef CONFIG_SMP
34/* PTWs cacheable, inner WB not shareable, outer WB not shareable */ 34/* PTWs cacheable, inner WB not shareable, outer WB not shareable */
35#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB 35#define TTB_FLAGS TTB_IRGN_WB|TTB_RGN_OC_WB
36#define PMD_FLAGS PMD_SECT_WB
36#else 37#else
37/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */ 38/* PTWs cacheable, inner WBWA shareable, outer WBWA not shareable */
38#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA 39#define TTB_FLAGS TTB_IRGN_WBWA|TTB_S|TTB_NOS|TTB_RGN_OC_WBWA
40#define PMD_FLAGS PMD_SECT_WBWA|PMD_SECT_S
39#endif 41#endif
40 42
41ENTRY(cpu_v7_proc_init) 43ENTRY(cpu_v7_proc_init)
@@ -184,9 +186,10 @@ cpu_v7_name:
184 */ 186 */
185__v7_setup: 187__v7_setup:
186#ifdef CONFIG_SMP 188#ifdef CONFIG_SMP
187 mrc p15, 0, r0, c1, c0, 1 @ Enable SMP/nAMP mode and 189 mrc p15, 0, r0, c1, c0, 1
188 orr r0, r0, #(1 << 6) | (1 << 0) @ TLB ops broadcasting 190 tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
189 mcr p15, 0, r0, c1, c0, 1 191 orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
192 mcreq p15, 0, r0, c1, c0, 1 @ TLB ops broadcasting
190#endif 193#endif
191 adr r12, __v7_setup_stack @ the local stack 194 adr r12, __v7_setup_stack @ the local stack
192 stmia r12, {r0-r5, r7, r9, r11, lr} 195 stmia r12, {r0-r5, r7, r9, r11, lr}
@@ -326,10 +329,9 @@ __v7_proc_info:
326 .long 0x000f0000 @ Required ID value 329 .long 0x000f0000 @ Required ID value
327 .long 0x000f0000 @ Mask for ID 330 .long 0x000f0000 @ Mask for ID
328 .long PMD_TYPE_SECT | \ 331 .long PMD_TYPE_SECT | \
329 PMD_SECT_BUFFERABLE | \
330 PMD_SECT_CACHEABLE | \
331 PMD_SECT_AP_WRITE | \ 332 PMD_SECT_AP_WRITE | \
332 PMD_SECT_AP_READ 333 PMD_SECT_AP_READ | \
334 PMD_FLAGS
333 .long PMD_TYPE_SECT | \ 335 .long PMD_TYPE_SECT | \
334 PMD_SECT_XN | \ 336 PMD_SECT_XN | \
335 PMD_SECT_AP_WRITE | \ 337 PMD_SECT_AP_WRITE | \