aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/mm/init.c
diff options
context:
space:
mode:
authorArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:00 -0400
committerArnd Bergmann <arnd@arndb.de>2012-10-04 16:57:51 -0400
commitc37d6154c0b9163c27e53cc1d0be3867b4abd760 (patch)
tree7a24522c56d1cb284dff1d3c225bbdaba0901bb5 /arch/arm64/mm/init.c
parente7a570ff7dff9af6e54ff5e580a61ec7652137a0 (diff)
parent8a1ab3155c2ac7fbe5f2038d6e26efeb607a1498 (diff)
Merge branch 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers into asm-generic
Patches from David Howells <dhowells@redhat.com>: This is to complete part of the UAPI disintegration for which the preparatory patches were pulled recently. Note that there are some fixup patches which are at the base of the branch aimed at you, plus all arches get the asm-generic branch merged in too. * 'disintegrate-asm-generic' of git://git.infradead.org/users/dhowells/linux-headers: UAPI: (Scripted) Disintegrate include/asm-generic UAPI: Fix conditional header installation handling (notably kvm_para.h on m68k) c6x: remove c6x signal.h UAPI: Split compound conditionals containing __KERNEL__ in Arm64 UAPI: Fix the guards on various asm/unistd.h files Signed-off-by: Arnd Bergmann <arnd@arndb.de>
Diffstat (limited to 'arch/arm64/mm/init.c')
-rw-r--r--arch/arm64/mm/init.c437
1 files changed, 437 insertions, 0 deletions
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
new file mode 100644
index 000000000000..5f719ba949bc
--- /dev/null
+++ b/arch/arm64/mm/init.c
@@ -0,0 +1,437 @@
1/*
2 * Based on arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/errno.h>
23#include <linux/swap.h>
24#include <linux/init.h>
25#include <linux/bootmem.h>
26#include <linux/mman.h>
27#include <linux/nodemask.h>
28#include <linux/initrd.h>
29#include <linux/gfp.h>
30#include <linux/memblock.h>
31#include <linux/sort.h>
32#include <linux/of_fdt.h>
33
34#include <asm/prom.h>
35#include <asm/sections.h>
36#include <asm/setup.h>
37#include <asm/sizes.h>
38#include <asm/tlb.h>
39
40#include "mm.h"
41
42static unsigned long phys_initrd_start __initdata = 0;
43static unsigned long phys_initrd_size __initdata = 0;
44
45phys_addr_t memstart_addr __read_mostly = 0;
46
47void __init early_init_dt_setup_initrd_arch(unsigned long start,
48 unsigned long end)
49{
50 phys_initrd_start = start;
51 phys_initrd_size = end - start;
52}
53
54static int __init early_initrd(char *p)
55{
56 unsigned long start, size;
57 char *endp;
58
59 start = memparse(p, &endp);
60 if (*endp == ',') {
61 size = memparse(endp + 1, NULL);
62
63 phys_initrd_start = start;
64 phys_initrd_size = size;
65 }
66 return 0;
67}
68early_param("initrd", early_initrd);
69
70#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
71
72static void __init zone_sizes_init(unsigned long min, unsigned long max)
73{
74 struct memblock_region *reg;
75 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
76 unsigned long max_dma32 = min;
77
78 memset(zone_size, 0, sizeof(zone_size));
79
80#ifdef CONFIG_ZONE_DMA32
81 /* 4GB maximum for 32-bit only capable devices */
82 max_dma32 = min(max, MAX_DMA32_PFN);
83 zone_size[ZONE_DMA32] = max_dma32 - min;
84#endif
85 zone_size[ZONE_NORMAL] = max - max_dma32;
86
87 memcpy(zhole_size, zone_size, sizeof(zhole_size));
88
89 for_each_memblock(memory, reg) {
90 unsigned long start = memblock_region_memory_base_pfn(reg);
91 unsigned long end = memblock_region_memory_end_pfn(reg);
92
93 if (start >= max)
94 continue;
95#ifdef CONFIG_ZONE_DMA32
96 if (start < max_dma32) {
97 unsigned long dma_end = min(end, max_dma32);
98 zhole_size[ZONE_DMA32] -= dma_end - start;
99 }
100#endif
101 if (end > max_dma32) {
102 unsigned long normal_end = min(end, max);
103 unsigned long normal_start = max(start, max_dma32);
104 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
105 }
106 }
107
108 free_area_init_node(0, zone_size, min, zhole_size);
109}
110
111#ifdef CONFIG_HAVE_ARCH_PFN_VALID
112int pfn_valid(unsigned long pfn)
113{
114 return memblock_is_memory(pfn << PAGE_SHIFT);
115}
116EXPORT_SYMBOL(pfn_valid);
117#endif
118
119#ifndef CONFIG_SPARSEMEM
120static void arm64_memory_present(void)
121{
122}
123#else
124static void arm64_memory_present(void)
125{
126 struct memblock_region *reg;
127
128 for_each_memblock(memory, reg)
129 memory_present(0, memblock_region_memory_base_pfn(reg),
130 memblock_region_memory_end_pfn(reg));
131}
132#endif
133
134void __init arm64_memblock_init(void)
135{
136 u64 *reserve_map, base, size;
137
138 /* Register the kernel text, kernel data and initrd with memblock */
139 memblock_reserve(__pa(_text), _end - _text);
140#ifdef CONFIG_BLK_DEV_INITRD
141 if (phys_initrd_size) {
142 memblock_reserve(phys_initrd_start, phys_initrd_size);
143
144 /* Now convert initrd to virtual addresses */
145 initrd_start = __phys_to_virt(phys_initrd_start);
146 initrd_end = initrd_start + phys_initrd_size;
147 }
148#endif
149
150 /*
151 * Reserve the page tables. These are already in use,
152 * and can only be in node 0.
153 */
154 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
155 memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
156
157 /* Reserve the dtb region */
158 memblock_reserve(virt_to_phys(initial_boot_params),
159 be32_to_cpu(initial_boot_params->totalsize));
160
161 /*
162 * Process the reserve map. This will probably overlap the initrd
163 * and dtb locations which are already reserved, but overlapping
164 * doesn't hurt anything
165 */
166 reserve_map = ((void*)initial_boot_params) +
167 be32_to_cpu(initial_boot_params->off_mem_rsvmap);
168 while (1) {
169 base = be64_to_cpup(reserve_map++);
170 size = be64_to_cpup(reserve_map++);
171 if (!size)
172 break;
173 memblock_reserve(base, size);
174 }
175
176 memblock_allow_resize();
177 memblock_dump_all();
178}
179
180void __init bootmem_init(void)
181{
182 unsigned long min, max;
183
184 min = PFN_UP(memblock_start_of_DRAM());
185 max = PFN_DOWN(memblock_end_of_DRAM());
186
187 /*
188 * Sparsemem tries to allocate bootmem in memory_present(), so must be
189 * done after the fixed reservations.
190 */
191 arm64_memory_present();
192
193 sparse_init();
194 zone_sizes_init(min, max);
195
196 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
197 max_pfn = max_low_pfn = max;
198}
199
200static inline int free_area(unsigned long pfn, unsigned long end, char *s)
201{
202 unsigned int pages = 0, size = (end - pfn) << (PAGE_SHIFT - 10);
203
204 for (; pfn < end; pfn++) {
205 struct page *page = pfn_to_page(pfn);
206 ClearPageReserved(page);
207 init_page_count(page);
208 __free_page(page);
209 pages++;
210 }
211
212 if (size && s)
213 pr_info("Freeing %s memory: %dK\n", s, size);
214
215 return pages;
216}
217
218/*
219 * Poison init memory with an undefined instruction (0x0).
220 */
221static inline void poison_init_mem(void *s, size_t count)
222{
223 memset(s, 0, count);
224}
225
226#ifndef CONFIG_SPARSEMEM_VMEMMAP
227static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
228{
229 struct page *start_pg, *end_pg;
230 unsigned long pg, pgend;
231
232 /*
233 * Convert start_pfn/end_pfn to a struct page pointer.
234 */
235 start_pg = pfn_to_page(start_pfn - 1) + 1;
236 end_pg = pfn_to_page(end_pfn - 1) + 1;
237
238 /*
239 * Convert to physical addresses, and round start upwards and end
240 * downwards.
241 */
242 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
243 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
244
245 /*
246 * If there are free pages between these, free the section of the
247 * memmap array.
248 */
249 if (pg < pgend)
250 free_bootmem(pg, pgend - pg);
251}
252
253/*
254 * The mem_map array can get very big. Free the unused area of the memory map.
255 */
256static void __init free_unused_memmap(void)
257{
258 unsigned long start, prev_end = 0;
259 struct memblock_region *reg;
260
261 for_each_memblock(memory, reg) {
262 start = __phys_to_pfn(reg->base);
263
264#ifdef CONFIG_SPARSEMEM
265 /*
266 * Take care not to free memmap entries that don't exist due
267 * to SPARSEMEM sections which aren't present.
268 */
269 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
270#endif
271 /*
272 * If we had a previous bank, and there is a space between the
273 * current bank and the previous, free it.
274 */
275 if (prev_end && prev_end < start)
276 free_memmap(prev_end, start);
277
278 /*
279 * Align up here since the VM subsystem insists that the
280 * memmap entries are valid from the bank end aligned to
281 * MAX_ORDER_NR_PAGES.
282 */
283 prev_end = ALIGN(start + __phys_to_pfn(reg->size),
284 MAX_ORDER_NR_PAGES);
285 }
286
287#ifdef CONFIG_SPARSEMEM
288 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
289 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
290#endif
291}
292#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
293
294/*
295 * mem_init() marks the free areas in the mem_map and tells us how much memory
296 * is free. This is done after various parts of the system have claimed their
297 * memory after the kernel image.
298 */
299void __init mem_init(void)
300{
301 unsigned long reserved_pages, free_pages;
302 struct memblock_region *reg;
303
304#if CONFIG_SWIOTLB
305 extern void __init arm64_swiotlb_init(size_t max_size);
306 arm64_swiotlb_init(max_pfn << (PAGE_SHIFT - 1));
307#endif
308
309 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
310
311#ifndef CONFIG_SPARSEMEM_VMEMMAP
312 /* this will put all unused low memory onto the freelists */
313 free_unused_memmap();
314#endif
315
316 totalram_pages += free_all_bootmem();
317
318 reserved_pages = free_pages = 0;
319
320 for_each_memblock(memory, reg) {
321 unsigned int pfn1, pfn2;
322 struct page *page, *end;
323
324 pfn1 = __phys_to_pfn(reg->base);
325 pfn2 = pfn1 + __phys_to_pfn(reg->size);
326
327 page = pfn_to_page(pfn1);
328 end = pfn_to_page(pfn2 - 1) + 1;
329
330 do {
331 if (PageReserved(page))
332 reserved_pages++;
333 else if (!page_count(page))
334 free_pages++;
335 page++;
336 } while (page < end);
337 }
338
339 /*
340 * Since our memory may not be contiguous, calculate the real number
341 * of pages we have in this system.
342 */
343 pr_info("Memory:");
344 num_physpages = 0;
345 for_each_memblock(memory, reg) {
346 unsigned long pages = memblock_region_memory_end_pfn(reg) -
347 memblock_region_memory_base_pfn(reg);
348 num_physpages += pages;
349 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
350 }
351 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
352
353 pr_notice("Memory: %luk/%luk available, %luk reserved\n",
354 nr_free_pages() << (PAGE_SHIFT-10),
355 free_pages << (PAGE_SHIFT-10),
356 reserved_pages << (PAGE_SHIFT-10));
357
358#define MLK(b, t) b, t, ((t) - (b)) >> 10
359#define MLM(b, t) b, t, ((t) - (b)) >> 20
360#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
361
362 pr_notice("Virtual kernel memory layout:\n"
363 " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
364#ifdef CONFIG_SPARSEMEM_VMEMMAP
365 " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
366#endif
367 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
368 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
369 " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
370 " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
371 " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
372 MLM(VMALLOC_START, VMALLOC_END),
373#ifdef CONFIG_SPARSEMEM_VMEMMAP
374 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
375 (unsigned long)virt_to_page(high_memory)),
376#endif
377 MLM(MODULES_VADDR, MODULES_END),
378 MLM(PAGE_OFFSET, (unsigned long)high_memory),
379
380 MLK_ROUNDUP(__init_begin, __init_end),
381 MLK_ROUNDUP(_text, _etext),
382 MLK_ROUNDUP(_sdata, _edata));
383
384#undef MLK
385#undef MLM
386#undef MLK_ROUNDUP
387
388 /*
389 * Check boundaries twice: Some fundamental inconsistencies can be
390 * detected at build time already.
391 */
392#ifdef CONFIG_COMPAT
393 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
394#endif
395 BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
396 BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
397
398 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
399 extern int sysctl_overcommit_memory;
400 /*
401 * On a machine this small we won't get anywhere without
402 * overcommit, so turn it on by default.
403 */
404 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
405 }
406}
407
408void free_initmem(void)
409{
410 poison_init_mem(__init_begin, __init_end - __init_begin);
411 totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
412 __phys_to_pfn(__pa(__init_end)),
413 "init");
414}
415
416#ifdef CONFIG_BLK_DEV_INITRD
417
418static int keep_initrd;
419
420void free_initrd_mem(unsigned long start, unsigned long end)
421{
422 if (!keep_initrd) {
423 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
424 totalram_pages += free_area(__phys_to_pfn(__pa(start)),
425 __phys_to_pfn(__pa(end)),
426 "initrd");
427 }
428}
429
430static int __init keepinitrd_setup(char *__unused)
431{
432 keep_initrd = 1;
433 return 1;
434}
435
436__setup("keepinitrd", keepinitrd_setup);
437#endif