aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/Kconfig11
-rw-r--r--arch/arm/mm/Makefile1
-rw-r--r--arch/arm/mm/discontig.c45
-rw-r--r--arch/arm/mm/init.c392
-rw-r--r--arch/arm/mm/ioremap.c6
-rw-r--r--arch/arm/mm/mm.h4
-rw-r--r--arch/arm/mm/mmu.c161
-rw-r--r--arch/arm/mm/nommu.c22
-rw-r--r--arch/arm/mm/proc-v6.S5
-rw-r--r--arch/arm/mm/proc-v7.S2
10 files changed, 202 insertions, 447 deletions
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index 101105e52610..87ec141fcaa6 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -717,17 +717,6 @@ config TLS_REG_EMUL
717 a few prototypes like that in existence) and therefore access to 717 a few prototypes like that in existence) and therefore access to
718 that required register must be emulated. 718 that required register must be emulated.
719 719
720config HAS_TLS_REG
721 bool
722 depends on !TLS_REG_EMUL
723 default y if SMP || CPU_32v7
724 help
725 This selects support for the CP15 thread register.
726 It is defined to be available on some ARMv6 processors (including
727 all SMP capable ARMv6's) or later processors. User space may
728 assume directly accessing that register and always obtain the
729 expected value only on ARMv7 and above.
730
731config NEEDS_SYSCALL_FOR_CMPXCHG 720config NEEDS_SYSCALL_FOR_CMPXCHG
732 bool 721 bool
733 help 722 help
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index e8d34a80851c..d63b6c413758 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -15,7 +15,6 @@ endif
15obj-$(CONFIG_MODULES) += proc-syms.o 15obj-$(CONFIG_MODULES) += proc-syms.o
16 16
17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o 17obj-$(CONFIG_ALIGNMENT_TRAP) += alignment.o
18obj-$(CONFIG_DISCONTIGMEM) += discontig.o
19obj-$(CONFIG_HIGHMEM) += highmem.o 18obj-$(CONFIG_HIGHMEM) += highmem.o
20 19
21obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o 20obj-$(CONFIG_CPU_ABRT_NOMMU) += abort-nommu.o
diff --git a/arch/arm/mm/discontig.c b/arch/arm/mm/discontig.c
deleted file mode 100644
index c8c0c4b0f0a3..000000000000
--- a/arch/arm/mm/discontig.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * linux/arch/arm/mm/discontig.c
3 *
4 * Discontiguous memory support.
5 *
6 * Initial code: Copyright (C) 1999-2000 Nicolas Pitre
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12#include <linux/module.h>
13#include <linux/mmzone.h>
14#include <linux/bootmem.h>
15
16#if MAX_NUMNODES != 4 && MAX_NUMNODES != 16
17# error Fix Me Please
18#endif
19
20/*
21 * Our node_data structure for discontiguous memory.
22 */
23
24pg_data_t discontig_node_data[MAX_NUMNODES] = {
25 { .bdata = &bootmem_node_data[0] },
26 { .bdata = &bootmem_node_data[1] },
27 { .bdata = &bootmem_node_data[2] },
28 { .bdata = &bootmem_node_data[3] },
29#if MAX_NUMNODES == 16
30 { .bdata = &bootmem_node_data[4] },
31 { .bdata = &bootmem_node_data[5] },
32 { .bdata = &bootmem_node_data[6] },
33 { .bdata = &bootmem_node_data[7] },
34 { .bdata = &bootmem_node_data[8] },
35 { .bdata = &bootmem_node_data[9] },
36 { .bdata = &bootmem_node_data[10] },
37 { .bdata = &bootmem_node_data[11] },
38 { .bdata = &bootmem_node_data[12] },
39 { .bdata = &bootmem_node_data[13] },
40 { .bdata = &bootmem_node_data[14] },
41 { .bdata = &bootmem_node_data[15] },
42#endif
43};
44
45EXPORT_SYMBOL(discontig_node_data);
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index f6a999465323..599d121c81e7 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -17,6 +17,7 @@
17#include <linux/initrd.h> 17#include <linux/initrd.h>
18#include <linux/highmem.h> 18#include <linux/highmem.h>
19#include <linux/gfp.h> 19#include <linux/gfp.h>
20#include <linux/memblock.h>
20 21
21#include <asm/mach-types.h> 22#include <asm/mach-types.h>
22#include <asm/sections.h> 23#include <asm/sections.h>
@@ -79,38 +80,37 @@ struct meminfo meminfo;
79void show_mem(void) 80void show_mem(void)
80{ 81{
81 int free = 0, total = 0, reserved = 0; 82 int free = 0, total = 0, reserved = 0;
82 int shared = 0, cached = 0, slab = 0, node, i; 83 int shared = 0, cached = 0, slab = 0, i;
83 struct meminfo * mi = &meminfo; 84 struct meminfo * mi = &meminfo;
84 85
85 printk("Mem-info:\n"); 86 printk("Mem-info:\n");
86 show_free_areas(); 87 show_free_areas();
87 for_each_online_node(node) { 88
88 for_each_nodebank (i,mi,node) { 89 for_each_bank (i, mi) {
89 struct membank *bank = &mi->bank[i]; 90 struct membank *bank = &mi->bank[i];
90 unsigned int pfn1, pfn2; 91 unsigned int pfn1, pfn2;
91 struct page *page, *end; 92 struct page *page, *end;
92 93
93 pfn1 = bank_pfn_start(bank); 94 pfn1 = bank_pfn_start(bank);
94 pfn2 = bank_pfn_end(bank); 95 pfn2 = bank_pfn_end(bank);
95 96
96 page = pfn_to_page(pfn1); 97 page = pfn_to_page(pfn1);
97 end = pfn_to_page(pfn2 - 1) + 1; 98 end = pfn_to_page(pfn2 - 1) + 1;
98 99
99 do { 100 do {
100 total++; 101 total++;
101 if (PageReserved(page)) 102 if (PageReserved(page))
102 reserved++; 103 reserved++;
103 else if (PageSwapCache(page)) 104 else if (PageSwapCache(page))
104 cached++; 105 cached++;
105 else if (PageSlab(page)) 106 else if (PageSlab(page))
106 slab++; 107 slab++;
107 else if (!page_count(page)) 108 else if (!page_count(page))
108 free++; 109 free++;
109 else 110 else
110 shared += page_count(page) - 1; 111 shared += page_count(page) - 1;
111 page++; 112 page++;
112 } while (page < end); 113 } while (page < end);
113 }
114 } 114 }
115 115
116 printk("%d pages of RAM\n", total); 116 printk("%d pages of RAM\n", total);
@@ -121,7 +121,7 @@ void show_mem(void)
121 printk("%d pages swap cached\n", cached); 121 printk("%d pages swap cached\n", cached);
122} 122}
123 123
124static void __init find_node_limits(int node, struct meminfo *mi, 124static void __init find_limits(struct meminfo *mi,
125 unsigned long *min, unsigned long *max_low, unsigned long *max_high) 125 unsigned long *min, unsigned long *max_low, unsigned long *max_high)
126{ 126{
127 int i; 127 int i;
@@ -129,7 +129,7 @@ static void __init find_node_limits(int node, struct meminfo *mi,
129 *min = -1UL; 129 *min = -1UL;
130 *max_low = *max_high = 0; 130 *max_low = *max_high = 0;
131 131
132 for_each_nodebank(i, mi, node) { 132 for_each_bank (i, mi) {
133 struct membank *bank = &mi->bank[i]; 133 struct membank *bank = &mi->bank[i];
134 unsigned long start, end; 134 unsigned long start, end;
135 135
@@ -147,155 +147,64 @@ static void __init find_node_limits(int node, struct meminfo *mi,
147 } 147 }
148} 148}
149 149
150/* 150static void __init arm_bootmem_init(struct meminfo *mi,
151 * FIXME: We really want to avoid allocating the bootmap bitmap
152 * over the top of the initrd. Hopefully, this is located towards
153 * the start of a bank, so if we allocate the bootmap bitmap at
154 * the end, we won't clash.
155 */
156static unsigned int __init
157find_bootmap_pfn(int node, struct meminfo *mi, unsigned int bootmap_pages)
158{
159 unsigned int start_pfn, i, bootmap_pfn;
160
161 start_pfn = PAGE_ALIGN(__pa(_end)) >> PAGE_SHIFT;
162 bootmap_pfn = 0;
163
164 for_each_nodebank(i, mi, node) {
165 struct membank *bank = &mi->bank[i];
166 unsigned int start, end;
167
168 start = bank_pfn_start(bank);
169 end = bank_pfn_end(bank);
170
171 if (end < start_pfn)
172 continue;
173
174 if (start < start_pfn)
175 start = start_pfn;
176
177 if (end <= start)
178 continue;
179
180 if (end - start >= bootmap_pages) {
181 bootmap_pfn = start;
182 break;
183 }
184 }
185
186 if (bootmap_pfn == 0)
187 BUG();
188
189 return bootmap_pfn;
190}
191
192static int __init check_initrd(struct meminfo *mi)
193{
194 int initrd_node = -2;
195#ifdef CONFIG_BLK_DEV_INITRD
196 unsigned long end = phys_initrd_start + phys_initrd_size;
197
198 /*
199 * Make sure that the initrd is within a valid area of
200 * memory.
201 */
202 if (phys_initrd_size) {
203 unsigned int i;
204
205 initrd_node = -1;
206
207 for (i = 0; i < mi->nr_banks; i++) {
208 struct membank *bank = &mi->bank[i];
209 if (bank_phys_start(bank) <= phys_initrd_start &&
210 end <= bank_phys_end(bank))
211 initrd_node = bank->node;
212 }
213 }
214
215 if (initrd_node == -1) {
216 printk(KERN_ERR "INITRD: 0x%08lx+0x%08lx extends beyond "
217 "physical memory - disabling initrd\n",
218 phys_initrd_start, phys_initrd_size);
219 phys_initrd_start = phys_initrd_size = 0;
220 }
221#endif
222
223 return initrd_node;
224}
225
226static void __init bootmem_init_node(int node, struct meminfo *mi,
227 unsigned long start_pfn, unsigned long end_pfn) 151 unsigned long start_pfn, unsigned long end_pfn)
228{ 152{
229 unsigned long boot_pfn;
230 unsigned int boot_pages; 153 unsigned int boot_pages;
154 phys_addr_t bitmap;
231 pg_data_t *pgdat; 155 pg_data_t *pgdat;
232 int i; 156 int i;
233 157
234 /* 158 /*
235 * Allocate the bootmem bitmap page. 159 * Allocate the bootmem bitmap page. This must be in a region
160 * of memory which has already been mapped.
236 */ 161 */
237 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 162 boot_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
238 boot_pfn = find_bootmap_pfn(node, mi, boot_pages); 163 bitmap = memblock_alloc_base(boot_pages << PAGE_SHIFT, L1_CACHE_BYTES,
164 __pfn_to_phys(end_pfn));
239 165
240 /* 166 /*
241 * Initialise the bootmem allocator for this node, handing the 167 * Initialise the bootmem allocator, handing the
242 * memory banks over to bootmem. 168 * memory banks over to bootmem.
243 */ 169 */
244 node_set_online(node); 170 node_set_online(0);
245 pgdat = NODE_DATA(node); 171 pgdat = NODE_DATA(0);
246 init_bootmem_node(pgdat, boot_pfn, start_pfn, end_pfn); 172 init_bootmem_node(pgdat, __phys_to_pfn(bitmap), start_pfn, end_pfn);
247 173
248 for_each_nodebank(i, mi, node) { 174 for_each_bank(i, mi) {
249 struct membank *bank = &mi->bank[i]; 175 struct membank *bank = &mi->bank[i];
250 if (!bank->highmem) 176 if (!bank->highmem)
251 free_bootmem_node(pgdat, bank_phys_start(bank), bank_phys_size(bank)); 177 free_bootmem(bank_phys_start(bank), bank_phys_size(bank));
252 } 178 }
253 179
254 /* 180 /*
255 * Reserve the bootmem bitmap for this node. 181 * Reserve the memblock reserved regions in bootmem.
256 */ 182 */
257 reserve_bootmem_node(pgdat, boot_pfn << PAGE_SHIFT, 183 for (i = 0; i < memblock.reserved.cnt; i++) {
258 boot_pages << PAGE_SHIFT, BOOTMEM_DEFAULT); 184 phys_addr_t start = memblock_start_pfn(&memblock.reserved, i);
259} 185 if (start >= start_pfn &&
260 186 memblock_end_pfn(&memblock.reserved, i) <= end_pfn)
261static void __init bootmem_reserve_initrd(int node) 187 reserve_bootmem_node(pgdat, __pfn_to_phys(start),
262{ 188 memblock_size_bytes(&memblock.reserved, i),
263#ifdef CONFIG_BLK_DEV_INITRD 189 BOOTMEM_DEFAULT);
264 pg_data_t *pgdat = NODE_DATA(node);
265 int res;
266
267 res = reserve_bootmem_node(pgdat, phys_initrd_start,
268 phys_initrd_size, BOOTMEM_EXCLUSIVE);
269
270 if (res == 0) {
271 initrd_start = __phys_to_virt(phys_initrd_start);
272 initrd_end = initrd_start + phys_initrd_size;
273 } else {
274 printk(KERN_ERR
275 "INITRD: 0x%08lx+0x%08lx overlaps in-use "
276 "memory region - disabling initrd\n",
277 phys_initrd_start, phys_initrd_size);
278 } 190 }
279#endif
280} 191}
281 192
282static void __init bootmem_free_node(int node, struct meminfo *mi) 193static void __init arm_bootmem_free(struct meminfo *mi, unsigned long min,
194 unsigned long max_low, unsigned long max_high)
283{ 195{
284 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; 196 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
285 unsigned long min, max_low, max_high;
286 int i; 197 int i;
287 198
288 find_node_limits(node, mi, &min, &max_low, &max_high);
289
290 /* 199 /*
291 * initialise the zones within this node. 200 * initialise the zones.
292 */ 201 */
293 memset(zone_size, 0, sizeof(zone_size)); 202 memset(zone_size, 0, sizeof(zone_size));
294 203
295 /* 204 /*
296 * The size of this node has already been determined. If we need 205 * The memory size has already been determined. If we need
297 * to do anything fancy with the allocation of this memory to the 206 * to do anything fancy with the allocation of this memory
298 * zones, now is the time to do it. 207 * to the zones, now is the time to do it.
299 */ 208 */
300 zone_size[0] = max_low - min; 209 zone_size[0] = max_low - min;
301#ifdef CONFIG_HIGHMEM 210#ifdef CONFIG_HIGHMEM
@@ -303,11 +212,11 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
303#endif 212#endif
304 213
305 /* 214 /*
306 * For each bank in this node, calculate the size of the holes. 215 * Calculate the size of the holes.
307 * holes = node_size - sum(bank_sizes_in_node) 216 * holes = node_size - sum(bank_sizes)
308 */ 217 */
309 memcpy(zhole_size, zone_size, sizeof(zhole_size)); 218 memcpy(zhole_size, zone_size, sizeof(zhole_size));
310 for_each_nodebank(i, mi, node) { 219 for_each_bank(i, mi) {
311 int idx = 0; 220 int idx = 0;
312#ifdef CONFIG_HIGHMEM 221#ifdef CONFIG_HIGHMEM
313 if (mi->bank[i].highmem) 222 if (mi->bank[i].highmem)
@@ -320,24 +229,23 @@ static void __init bootmem_free_node(int node, struct meminfo *mi)
320 * Adjust the sizes according to any special requirements for 229 * Adjust the sizes according to any special requirements for
321 * this machine type. 230 * this machine type.
322 */ 231 */
323 arch_adjust_zones(node, zone_size, zhole_size); 232 arch_adjust_zones(zone_size, zhole_size);
324 233
325 free_area_init_node(node, zone_size, min, zhole_size); 234 free_area_init_node(0, zone_size, min, zhole_size);
326} 235}
327 236
328#ifndef CONFIG_SPARSEMEM 237#ifndef CONFIG_SPARSEMEM
329int pfn_valid(unsigned long pfn) 238int pfn_valid(unsigned long pfn)
330{ 239{
331 struct meminfo *mi = &meminfo; 240 struct memblock_region *mem = &memblock.memory;
332 unsigned int left = 0, right = mi->nr_banks; 241 unsigned int left = 0, right = mem->cnt;
333 242
334 do { 243 do {
335 unsigned int mid = (right + left) / 2; 244 unsigned int mid = (right + left) / 2;
336 struct membank *bank = &mi->bank[mid];
337 245
338 if (pfn < bank_pfn_start(bank)) 246 if (pfn < memblock_start_pfn(mem, mid))
339 right = mid; 247 right = mid;
340 else if (pfn >= bank_pfn_end(bank)) 248 else if (pfn >= memblock_end_pfn(mem, mid))
341 left = mid + 1; 249 left = mid + 1;
342 else 250 else
343 return 1; 251 return 1;
@@ -346,73 +254,69 @@ int pfn_valid(unsigned long pfn)
346} 254}
347EXPORT_SYMBOL(pfn_valid); 255EXPORT_SYMBOL(pfn_valid);
348 256
349static void arm_memory_present(struct meminfo *mi, int node) 257static void arm_memory_present(void)
350{ 258{
351} 259}
352#else 260#else
353static void arm_memory_present(struct meminfo *mi, int node) 261static void arm_memory_present(void)
354{ 262{
355 int i; 263 int i;
356 for_each_nodebank(i, mi, node) { 264 for (i = 0; i < memblock.memory.cnt; i++)
357 struct membank *bank = &mi->bank[i]; 265 memory_present(0, memblock_start_pfn(&memblock.memory, i),
358 memory_present(node, bank_pfn_start(bank), bank_pfn_end(bank)); 266 memblock_end_pfn(&memblock.memory, i));
359 }
360} 267}
361#endif 268#endif
362 269
363void __init bootmem_init(void) 270void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
364{ 271{
365 struct meminfo *mi = &meminfo; 272 int i;
366 unsigned long min, max_low, max_high;
367 int node, initrd_node;
368 273
369 /* 274 memblock_init();
370 * Locate which node contains the ramdisk image, if any. 275 for (i = 0; i < mi->nr_banks; i++)
371 */ 276 memblock_add(mi->bank[i].start, mi->bank[i].size);
372 initrd_node = check_initrd(mi);
373 277
374 max_low = max_high = 0; 278 /* Register the kernel text, kernel data and initrd with memblock. */
279#ifdef CONFIG_XIP_KERNEL
280 memblock_reserve(__pa(_data), _end - _data);
281#else
282 memblock_reserve(__pa(_stext), _end - _stext);
283#endif
284#ifdef CONFIG_BLK_DEV_INITRD
285 if (phys_initrd_size) {
286 memblock_reserve(phys_initrd_start, phys_initrd_size);
375 287
376 /* 288 /* Now convert initrd to virtual addresses */
377 * Run through each node initialising the bootmem allocator. 289 initrd_start = __phys_to_virt(phys_initrd_start);
378 */ 290 initrd_end = initrd_start + phys_initrd_size;
379 for_each_node(node) { 291 }
380 unsigned long node_low, node_high; 292#endif
381 293
382 find_node_limits(node, mi, &min, &node_low, &node_high); 294 arm_mm_memblock_reserve();
383 295
384 if (node_low > max_low) 296 /* reserve any platform specific memblock areas */
385 max_low = node_low; 297 if (mdesc->reserve)
386 if (node_high > max_high) 298 mdesc->reserve();
387 max_high = node_high;
388 299
389 /* 300 memblock_analyze();
390 * If there is no memory in this node, ignore it. 301 memblock_dump_all();
391 * (We can't have nodes which have no lowmem) 302}
392 */
393 if (node_low == 0)
394 continue;
395 303
396 bootmem_init_node(node, mi, min, node_low); 304void __init bootmem_init(void)
305{
306 struct meminfo *mi = &meminfo;
307 unsigned long min, max_low, max_high;
397 308
398 /* 309 max_low = max_high = 0;
399 * Reserve any special node zero regions.
400 */
401 if (node == 0)
402 reserve_node_zero(NODE_DATA(node));
403 310
404 /* 311 find_limits(mi, &min, &max_low, &max_high);
405 * If the initrd is in this node, reserve its memory.
406 */
407 if (node == initrd_node)
408 bootmem_reserve_initrd(node);
409 312
410 /* 313 arm_bootmem_init(mi, min, max_low);
411 * Sparsemem tries to allocate bootmem in memory_present(), 314
412 * so must be done after the fixed reservations 315 /*
413 */ 316 * Sparsemem tries to allocate bootmem in memory_present(),
414 arm_memory_present(mi, node); 317 * so must be done after the fixed reservations
415 } 318 */
319 arm_memory_present();
416 320
417 /* 321 /*
418 * sparse_init() needs the bootmem allocator up and running. 322 * sparse_init() needs the bootmem allocator up and running.
@@ -420,12 +324,11 @@ void __init bootmem_init(void)
420 sparse_init(); 324 sparse_init();
421 325
422 /* 326 /*
423 * Now free memory in each node - free_area_init_node needs 327 * Now free the memory - free_area_init_node needs
424 * the sparse mem_map arrays initialized by sparse_init() 328 * the sparse mem_map arrays initialized by sparse_init()
425 * for memmap_init_zone(), otherwise all PFNs are invalid. 329 * for memmap_init_zone(), otherwise all PFNs are invalid.
426 */ 330 */
427 for_each_node(node) 331 arm_bootmem_free(mi, min, max_low, max_high);
428 bootmem_free_node(node, mi);
429 332
430 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1; 333 high_memory = __va((max_low << PAGE_SHIFT) - 1) + 1;
431 334
@@ -460,7 +363,7 @@ static inline int free_area(unsigned long pfn, unsigned long end, char *s)
460} 363}
461 364
462static inline void 365static inline void
463free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn) 366free_memmap(unsigned long start_pfn, unsigned long end_pfn)
464{ 367{
465 struct page *start_pg, *end_pg; 368 struct page *start_pg, *end_pg;
466 unsigned long pg, pgend; 369 unsigned long pg, pgend;
@@ -483,13 +386,13 @@ free_memmap(int node, unsigned long start_pfn, unsigned long end_pfn)
483 * free the section of the memmap array. 386 * free the section of the memmap array.
484 */ 387 */
485 if (pg < pgend) 388 if (pg < pgend)
486 free_bootmem_node(NODE_DATA(node), pg, pgend - pg); 389 free_bootmem(pg, pgend - pg);
487} 390}
488 391
489/* 392/*
490 * The mem_map array can get very big. Free the unused area of the memory map. 393 * The mem_map array can get very big. Free the unused area of the memory map.
491 */ 394 */
492static void __init free_unused_memmap_node(int node, struct meminfo *mi) 395static void __init free_unused_memmap(struct meminfo *mi)
493{ 396{
494 unsigned long bank_start, prev_bank_end = 0; 397 unsigned long bank_start, prev_bank_end = 0;
495 unsigned int i; 398 unsigned int i;
@@ -499,7 +402,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
499 * may not be the case, especially if the user has provided the 402 * may not be the case, especially if the user has provided the
500 * information on the command line. 403 * information on the command line.
501 */ 404 */
502 for_each_nodebank(i, mi, node) { 405 for_each_bank(i, mi) {
503 struct membank *bank = &mi->bank[i]; 406 struct membank *bank = &mi->bank[i];
504 407
505 bank_start = bank_pfn_start(bank); 408 bank_start = bank_pfn_start(bank);
@@ -514,7 +417,7 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
514 * between the current bank and the previous, free it. 417 * between the current bank and the previous, free it.
515 */ 418 */
516 if (prev_bank_end && prev_bank_end != bank_start) 419 if (prev_bank_end && prev_bank_end != bank_start)
517 free_memmap(node, prev_bank_end, bank_start); 420 free_memmap(prev_bank_end, bank_start);
518 421
519 prev_bank_end = bank_pfn_end(bank); 422 prev_bank_end = bank_pfn_end(bank);
520 } 423 }
@@ -528,21 +431,14 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
528void __init mem_init(void) 431void __init mem_init(void)
529{ 432{
530 unsigned long reserved_pages, free_pages; 433 unsigned long reserved_pages, free_pages;
531 int i, node; 434 int i;
532 435
533#ifndef CONFIG_DISCONTIGMEM
534 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map; 436 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
535#endif
536 437
537 /* this will put all unused low memory onto the freelists */ 438 /* this will put all unused low memory onto the freelists */
538 for_each_online_node(node) { 439 free_unused_memmap(&meminfo);
539 pg_data_t *pgdat = NODE_DATA(node);
540
541 free_unused_memmap_node(node, &meminfo);
542 440
543 if (pgdat->node_spanned_pages != 0) 441 totalram_pages += free_all_bootmem();
544 totalram_pages += free_all_bootmem_node(pgdat);
545 }
546 442
547#ifdef CONFIG_SA1111 443#ifdef CONFIG_SA1111
548 /* now that our DMA memory is actually so designated, we can free it */ 444 /* now that our DMA memory is actually so designated, we can free it */
@@ -552,39 +448,35 @@ void __init mem_init(void)
552 448
553#ifdef CONFIG_HIGHMEM 449#ifdef CONFIG_HIGHMEM
554 /* set highmem page free */ 450 /* set highmem page free */
555 for_each_online_node(node) { 451 for_each_bank (i, &meminfo) {
556 for_each_nodebank (i, &meminfo, node) { 452 unsigned long start = bank_pfn_start(&meminfo.bank[i]);
557 unsigned long start = bank_pfn_start(&meminfo.bank[i]); 453 unsigned long end = bank_pfn_end(&meminfo.bank[i]);
558 unsigned long end = bank_pfn_end(&meminfo.bank[i]); 454 if (start >= max_low_pfn + PHYS_PFN_OFFSET)
559 if (start >= max_low_pfn + PHYS_PFN_OFFSET) 455 totalhigh_pages += free_area(start, end, NULL);
560 totalhigh_pages += free_area(start, end, NULL);
561 }
562 } 456 }
563 totalram_pages += totalhigh_pages; 457 totalram_pages += totalhigh_pages;
564#endif 458#endif
565 459
566 reserved_pages = free_pages = 0; 460 reserved_pages = free_pages = 0;
567 461
568 for_each_online_node(node) { 462 for_each_bank(i, &meminfo) {
569 for_each_nodebank(i, &meminfo, node) { 463 struct membank *bank = &meminfo.bank[i];
570 struct membank *bank = &meminfo.bank[i]; 464 unsigned int pfn1, pfn2;
571 unsigned int pfn1, pfn2; 465 struct page *page, *end;
572 struct page *page, *end; 466
573 467 pfn1 = bank_pfn_start(bank);
574 pfn1 = bank_pfn_start(bank); 468 pfn2 = bank_pfn_end(bank);
575 pfn2 = bank_pfn_end(bank); 469
576 470 page = pfn_to_page(pfn1);
577 page = pfn_to_page(pfn1); 471 end = pfn_to_page(pfn2 - 1) + 1;
578 end = pfn_to_page(pfn2 - 1) + 1; 472
579 473 do {
580 do { 474 if (PageReserved(page))
581 if (PageReserved(page)) 475 reserved_pages++;
582 reserved_pages++; 476 else if (!page_count(page))
583 else if (!page_count(page)) 477 free_pages++;
584 free_pages++; 478 page++;
585 page++; 479 } while (page < end);
586 } while (page < end);
587 }
588 } 480 }
589 481
590 /* 482 /*
diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
index 28c8b950ef04..03f11935ed08 100644
--- a/arch/arm/mm/ioremap.c
+++ b/arch/arm/mm/ioremap.c
@@ -268,6 +268,12 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
268 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) 268 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
269 return NULL; 269 return NULL;
270 270
271 /*
272 * Don't allow RAM to be mapped - this causes problems with ARMv6+
273 */
274 if (WARN_ON(pfn_valid(pfn)))
275 return NULL;
276
271 type = get_mem_type(mtype); 277 type = get_mem_type(mtype);
272 if (!type) 278 if (!type)
273 return NULL; 279 return NULL;
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 815d08eecbb0..6630620380a4 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -28,7 +28,5 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
28 28
29#endif 29#endif
30 30
31struct pglist_data;
32
33void __init bootmem_init(void); 31void __init bootmem_init(void);
34void reserve_node_zero(struct pglist_data *pgdat); 32void arm_mm_memblock_reserve(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 285894171186..d5541adc3520 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -11,13 +11,12 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h> 14#include <linux/mman.h>
16#include <linux/nodemask.h> 15#include <linux/nodemask.h>
16#include <linux/memblock.h>
17#include <linux/sort.h> 17#include <linux/sort.h>
18 18
19#include <asm/cputype.h> 19#include <asm/cputype.h>
20#include <asm/mach-types.h>
21#include <asm/sections.h> 20#include <asm/sections.h>
22#include <asm/cachetype.h> 21#include <asm/cachetype.h>
23#include <asm/setup.h> 22#include <asm/setup.h>
@@ -488,18 +487,28 @@ static void __init build_mem_type_table(void)
488 487
489#define vectors_base() (vectors_high() ? 0xffff0000 : 0) 488#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
490 489
491static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, 490static void __init *early_alloc(unsigned long sz)
492 unsigned long end, unsigned long pfn,
493 const struct mem_type *type)
494{ 491{
495 pte_t *pte; 492 void *ptr = __va(memblock_alloc(sz, sz));
493 memset(ptr, 0, sz);
494 return ptr;
495}
496 496
497static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
498{
497 if (pmd_none(*pmd)) { 499 if (pmd_none(*pmd)) {
498 pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); 500 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
499 __pmd_populate(pmd, __pa(pte) | type->prot_l1); 501 __pmd_populate(pmd, __pa(pte) | prot);
500 } 502 }
503 BUG_ON(pmd_bad(*pmd));
504 return pte_offset_kernel(pmd, addr);
505}
501 506
502 pte = pte_offset_kernel(pmd, addr); 507static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
508 unsigned long end, unsigned long pfn,
509 const struct mem_type *type)
510{
511 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
503 do { 512 do {
504 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); 513 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
505 pfn++; 514 pfn++;
@@ -668,7 +677,7 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
668 create_mapping(io_desc + i); 677 create_mapping(io_desc + i);
669} 678}
670 679
671static unsigned long __initdata vmalloc_reserve = SZ_128M; 680static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
672 681
673/* 682/*
674 * vmalloc=size forces the vmalloc area to be exactly 'size' 683 * vmalloc=size forces the vmalloc area to be exactly 'size'
@@ -677,7 +686,7 @@ static unsigned long __initdata vmalloc_reserve = SZ_128M;
677 */ 686 */
678static int __init early_vmalloc(char *arg) 687static int __init early_vmalloc(char *arg)
679{ 688{
680 vmalloc_reserve = memparse(arg, NULL); 689 unsigned long vmalloc_reserve = memparse(arg, NULL);
681 690
682 if (vmalloc_reserve < SZ_16M) { 691 if (vmalloc_reserve < SZ_16M) {
683 vmalloc_reserve = SZ_16M; 692 vmalloc_reserve = SZ_16M;
@@ -692,22 +701,26 @@ static int __init early_vmalloc(char *arg)
692 "vmalloc area is too big, limiting to %luMB\n", 701 "vmalloc area is too big, limiting to %luMB\n",
693 vmalloc_reserve >> 20); 702 vmalloc_reserve >> 20);
694 } 703 }
704
705 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
695 return 0; 706 return 0;
696} 707}
697early_param("vmalloc", early_vmalloc); 708early_param("vmalloc", early_vmalloc);
698 709
699#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) 710phys_addr_t lowmem_end_addr;
700 711
701static void __init sanity_check_meminfo(void) 712static void __init sanity_check_meminfo(void)
702{ 713{
703 int i, j, highmem = 0; 714 int i, j, highmem = 0;
704 715
716 lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
717
705 for (i = 0, j = 0; i < meminfo.nr_banks; i++) { 718 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
706 struct membank *bank = &meminfo.bank[j]; 719 struct membank *bank = &meminfo.bank[j];
707 *bank = meminfo.bank[i]; 720 *bank = meminfo.bank[i];
708 721
709#ifdef CONFIG_HIGHMEM 722#ifdef CONFIG_HIGHMEM
710 if (__va(bank->start) > VMALLOC_MIN || 723 if (__va(bank->start) > vmalloc_min ||
711 __va(bank->start) < (void *)PAGE_OFFSET) 724 __va(bank->start) < (void *)PAGE_OFFSET)
712 highmem = 1; 725 highmem = 1;
713 726
@@ -717,8 +730,8 @@ static void __init sanity_check_meminfo(void)
717 * Split those memory banks which are partially overlapping 730 * Split those memory banks which are partially overlapping
718 * the vmalloc area greatly simplifying things later. 731 * the vmalloc area greatly simplifying things later.
719 */ 732 */
720 if (__va(bank->start) < VMALLOC_MIN && 733 if (__va(bank->start) < vmalloc_min &&
721 bank->size > VMALLOC_MIN - __va(bank->start)) { 734 bank->size > vmalloc_min - __va(bank->start)) {
722 if (meminfo.nr_banks >= NR_BANKS) { 735 if (meminfo.nr_banks >= NR_BANKS) {
723 printk(KERN_CRIT "NR_BANKS too low, " 736 printk(KERN_CRIT "NR_BANKS too low, "
724 "ignoring high memory\n"); 737 "ignoring high memory\n");
@@ -727,12 +740,12 @@ static void __init sanity_check_meminfo(void)
727 (meminfo.nr_banks - i) * sizeof(*bank)); 740 (meminfo.nr_banks - i) * sizeof(*bank));
728 meminfo.nr_banks++; 741 meminfo.nr_banks++;
729 i++; 742 i++;
730 bank[1].size -= VMALLOC_MIN - __va(bank->start); 743 bank[1].size -= vmalloc_min - __va(bank->start);
731 bank[1].start = __pa(VMALLOC_MIN - 1) + 1; 744 bank[1].start = __pa(vmalloc_min - 1) + 1;
732 bank[1].highmem = highmem = 1; 745 bank[1].highmem = highmem = 1;
733 j++; 746 j++;
734 } 747 }
735 bank->size = VMALLOC_MIN - __va(bank->start); 748 bank->size = vmalloc_min - __va(bank->start);
736 } 749 }
737#else 750#else
738 bank->highmem = highmem; 751 bank->highmem = highmem;
@@ -741,7 +754,7 @@ static void __init sanity_check_meminfo(void)
741 * Check whether this memory bank would entirely overlap 754 * Check whether this memory bank would entirely overlap
742 * the vmalloc area. 755 * the vmalloc area.
743 */ 756 */
744 if (__va(bank->start) >= VMALLOC_MIN || 757 if (__va(bank->start) >= vmalloc_min ||
745 __va(bank->start) < (void *)PAGE_OFFSET) { 758 __va(bank->start) < (void *)PAGE_OFFSET) {
746 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " 759 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
747 "(vmalloc region overlap).\n", 760 "(vmalloc region overlap).\n",
@@ -753,9 +766,9 @@ static void __init sanity_check_meminfo(void)
753 * Check whether this memory bank would partially overlap 766 * Check whether this memory bank would partially overlap
754 * the vmalloc area. 767 * the vmalloc area.
755 */ 768 */
756 if (__va(bank->start + bank->size) > VMALLOC_MIN || 769 if (__va(bank->start + bank->size) > vmalloc_min ||
757 __va(bank->start + bank->size) < __va(bank->start)) { 770 __va(bank->start + bank->size) < __va(bank->start)) {
758 unsigned long newsize = VMALLOC_MIN - __va(bank->start); 771 unsigned long newsize = vmalloc_min - __va(bank->start);
759 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " 772 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
760 "to -%.8lx (vmalloc region overlap).\n", 773 "to -%.8lx (vmalloc region overlap).\n",
761 bank->start, bank->start + bank->size - 1, 774 bank->start, bank->start + bank->size - 1,
@@ -827,101 +840,23 @@ static inline void prepare_page_table(void)
827} 840}
828 841
829/* 842/*
830 * Reserve the various regions of node 0 843 * Reserve the special regions of memory
831 */ 844 */
832void __init reserve_node_zero(pg_data_t *pgdat) 845void __init arm_mm_memblock_reserve(void)
833{ 846{
834 unsigned long res_size = 0;
835
836 /*
837 * Register the kernel text and data with bootmem.
838 * Note that this can only be in node 0.
839 */
840#ifdef CONFIG_XIP_KERNEL
841 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
842 BOOTMEM_DEFAULT);
843#else
844 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
845 BOOTMEM_DEFAULT);
846#endif
847
848 /* 847 /*
849 * Reserve the page tables. These are already in use, 848 * Reserve the page tables. These are already in use,
850 * and can only be in node 0. 849 * and can only be in node 0.
851 */ 850 */
852 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), 851 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
853 PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
854
855 /*
856 * Hmm... This should go elsewhere, but we really really need to
857 * stop things allocating the low memory; ideally we need a better
858 * implementation of GFP_DMA which does not assume that DMA-able
859 * memory starts at zero.
860 */
861 if (machine_is_integrator() || machine_is_cintegrator())
862 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
863
864 /*
865 * These should likewise go elsewhere. They pre-reserve the
866 * screen memory region at the start of main system memory.
867 */
868 if (machine_is_edb7211())
869 res_size = 0x00020000;
870 if (machine_is_p720t())
871 res_size = 0x00014000;
872
873 /* H1940, RX3715 and RX1950 need to reserve this for suspend */
874
875 if (machine_is_h1940() || machine_is_rx3715()
876 || machine_is_rx1950()) {
877 reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
878 BOOTMEM_DEFAULT);
879 reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
880 BOOTMEM_DEFAULT);
881 }
882
883 if (machine_is_palmld() || machine_is_palmtx()) {
884 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
885 BOOTMEM_EXCLUSIVE);
886 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
887 BOOTMEM_EXCLUSIVE);
888 }
889
890 if (machine_is_treo680() || machine_is_centro()) {
891 reserve_bootmem_node(pgdat, 0xa0000000, 0x1000,
892 BOOTMEM_EXCLUSIVE);
893 reserve_bootmem_node(pgdat, 0xa2000000, 0x1000,
894 BOOTMEM_EXCLUSIVE);
895 }
896
897 if (machine_is_palmt5())
898 reserve_bootmem_node(pgdat, 0xa0200000, 0x1000,
899 BOOTMEM_EXCLUSIVE);
900
901 /*
902 * U300 - This platform family can share physical memory
903 * between two ARM cpus, one running Linux and the other
904 * running another OS.
905 */
906 if (machine_is_u300()) {
907#ifdef CONFIG_MACH_U300_SINGLE_RAM
908#if ((CONFIG_MACH_U300_ACCESS_MEM_SIZE & 1) == 1) && \
909 CONFIG_MACH_U300_2MB_ALIGNMENT_FIX
910 res_size = 0x00100000;
911#endif
912#endif
913 }
914 852
915#ifdef CONFIG_SA1111 853#ifdef CONFIG_SA1111
916 /* 854 /*
917 * Because of the SA1111 DMA bug, we want to preserve our 855 * Because of the SA1111 DMA bug, we want to preserve our
918 * precious DMA-able memory... 856 * precious DMA-able memory...
919 */ 857 */
920 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; 858 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
921#endif 859#endif
922 if (res_size)
923 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
924 BOOTMEM_DEFAULT);
925} 860}
926 861
927/* 862/*
@@ -940,7 +875,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
940 /* 875 /*
941 * Allocate the vector page early. 876 * Allocate the vector page early.
942 */ 877 */
943 vectors = alloc_bootmem_low_pages(PAGE_SIZE); 878 vectors = early_alloc(PAGE_SIZE);
944 879
945 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) 880 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
946 pmd_clear(pmd_off_k(addr)); 881 pmd_clear(pmd_off_k(addr));
@@ -1011,11 +946,8 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
1011static void __init kmap_init(void) 946static void __init kmap_init(void)
1012{ 947{
1013#ifdef CONFIG_HIGHMEM 948#ifdef CONFIG_HIGHMEM
1014 pmd_t *pmd = pmd_off_k(PKMAP_BASE); 949 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1015 pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); 950 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1016 BUG_ON(!pmd_none(*pmd) || !pte);
1017 __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE);
1018 pkmap_page_table = pte + PTRS_PER_PTE;
1019#endif 951#endif
1020} 952}
1021 953
@@ -1066,17 +998,16 @@ void __init paging_init(struct machine_desc *mdesc)
1066 sanity_check_meminfo(); 998 sanity_check_meminfo();
1067 prepare_page_table(); 999 prepare_page_table();
1068 map_lowmem(); 1000 map_lowmem();
1069 bootmem_init();
1070 devicemaps_init(mdesc); 1001 devicemaps_init(mdesc);
1071 kmap_init(); 1002 kmap_init();
1072 1003
1073 top_pmd = pmd_off_k(0xffff0000); 1004 top_pmd = pmd_off_k(0xffff0000);
1074 1005
1075 /* 1006 /* allocate the zero page. */
1076 * allocate the zero page. Note that this always succeeds and 1007 zero_page = early_alloc(PAGE_SIZE);
1077 * returns a zeroed result. 1008
1078 */ 1009 bootmem_init();
1079 zero_page = alloc_bootmem_low_pages(PAGE_SIZE); 1010
1080 empty_zero_page = virt_to_page(zero_page); 1011 empty_zero_page = virt_to_page(zero_page);
1081 __flush_dcache_page(NULL, empty_zero_page); 1012 __flush_dcache_page(NULL, empty_zero_page);
1082} 1013}
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index 33b327379f07..687d02319a41 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -6,8 +6,8 @@
6#include <linux/module.h> 6#include <linux/module.h>
7#include <linux/mm.h> 7#include <linux/mm.h>
8#include <linux/pagemap.h> 8#include <linux/pagemap.h>
9#include <linux/bootmem.h>
10#include <linux/io.h> 9#include <linux/io.h>
10#include <linux/memblock.h>
11 11
12#include <asm/cacheflush.h> 12#include <asm/cacheflush.h>
13#include <asm/sections.h> 13#include <asm/sections.h>
@@ -17,30 +17,14 @@
17 17
18#include "mm.h" 18#include "mm.h"
19 19
20/* 20void __init arm_mm_memblock_reserve(void)
21 * Reserve the various regions of node 0
22 */
23void __init reserve_node_zero(pg_data_t *pgdat)
24{ 21{
25 /* 22 /*
26 * Register the kernel text and data with bootmem.
27 * Note that this can only be in node 0.
28 */
29#ifdef CONFIG_XIP_KERNEL
30 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
31 BOOTMEM_DEFAULT);
32#else
33 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
34 BOOTMEM_DEFAULT);
35#endif
36
37 /*
38 * Register the exception vector page. 23 * Register the exception vector page.
39 * some architectures which the DRAM is the exception vector to trap, 24 * some architectures which the DRAM is the exception vector to trap,
40 * alloc_page breaks with error, although it is not NULL, but "0." 25 * alloc_page breaks with error, although it is not NULL, but "0."
41 */ 26 */
42 reserve_bootmem_node(pgdat, CONFIG_VECTORS_BASE, PAGE_SIZE, 27 memblock_reserve(CONFIG_VECTORS_BASE, PAGE_SIZE);
43 BOOTMEM_DEFAULT);
44} 28}
45 29
46/* 30/*
diff --git a/arch/arm/mm/proc-v6.S b/arch/arm/mm/proc-v6.S
index 7a5337ed7d68..2f5a3c23a0fe 100644
--- a/arch/arm/mm/proc-v6.S
+++ b/arch/arm/mm/proc-v6.S
@@ -239,7 +239,8 @@ __v6_proc_info:
239 b __v6_setup 239 b __v6_setup
240 .long cpu_arch_name 240 .long cpu_arch_name
241 .long cpu_elf_name 241 .long cpu_elf_name
242 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA 242 /* See also feat_v6_fixup() for HWCAP_TLS */
243 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_JAVA|HWCAP_TLS
243 .long cpu_v6_name 244 .long cpu_v6_name
244 .long v6_processor_functions 245 .long v6_processor_functions
245 .long v6wbi_tlb_fns 246 .long v6wbi_tlb_fns
@@ -262,7 +263,7 @@ __pj4_v6_proc_info:
262 b __v6_setup 263 b __v6_setup
263 .long cpu_arch_name 264 .long cpu_arch_name
264 .long cpu_elf_name 265 .long cpu_elf_name
265 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 266 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
266 .long cpu_pj4_name 267 .long cpu_pj4_name
267 .long v6_processor_functions 268 .long v6_processor_functions
268 .long v6wbi_tlb_fns 269 .long v6wbi_tlb_fns
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 7aaf88a3b7aa..8071bcd4c995 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -344,7 +344,7 @@ __v7_proc_info:
344 b __v7_setup 344 b __v7_setup
345 .long cpu_arch_name 345 .long cpu_arch_name
346 .long cpu_elf_name 346 .long cpu_elf_name
347 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP 347 .long HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP|HWCAP_TLS
348 .long cpu_v7_name 348 .long cpu_v7_name
349 .long v7_processor_functions 349 .long v7_processor_functions
350 .long v7wbi_tlb_fns 350 .long v7wbi_tlb_fns