diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-12 05:16:06 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 14:47:53 -0400 |
commit | 24aa07882b672fff2da2f5c955759f0bd13d32d5 (patch) | |
tree | e6dad38048ede1dbb9ad3c7fffcc4b37e72274a8 /arch/x86/mm | |
parent | c378ddd53f9b8832a46fd4fec050a97fc2269858 (diff) |
memblock, x86: Replace memblock_x86_reserve/free_range() with generic ones
Other than sanity check and debug message, the x86 specific version of
memblock reserve/free functions are simple wrappers around the generic
versions - memblock_reserve/free().
This patch adds debug messages with caller identification to the
generic versions and replaces x86 specific ones and kills them.
arch/x86/include/asm/memblock.h and arch/x86/mm/memblock.c are empty
after this change and removed.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/1310462166-31469-14-git-send-email-tj@kernel.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: "H. Peter Anvin" <hpa@zytor.com>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r-- | arch/x86/mm/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/mm/init.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/memblock.c | 34 | ||||
-rw-r--r-- | arch/x86/mm/memtest.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 5 | ||||
-rw-r--r-- | arch/x86/mm/numa_32.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/numa_emulation.c | 4 |
7 files changed, 11 insertions, 48 deletions
diff --git a/arch/x86/mm/Makefile b/arch/x86/mm/Makefile index 3d11327c9ab4..23d8e5fecf76 100644 --- a/arch/x86/mm/Makefile +++ b/arch/x86/mm/Makefile | |||
@@ -27,6 +27,4 @@ obj-$(CONFIG_AMD_NUMA) += amdtopology.o | |||
27 | obj-$(CONFIG_ACPI_NUMA) += srat.o | 27 | obj-$(CONFIG_ACPI_NUMA) += srat.o |
28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o | 28 | obj-$(CONFIG_NUMA_EMU) += numa_emulation.o |
29 | 29 | ||
30 | obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o | ||
31 | |||
32 | obj-$(CONFIG_MEMTEST) += memtest.o | 30 | obj-$(CONFIG_MEMTEST) += memtest.o |
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 13cf05a61605..0b736b99d925 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c | |||
@@ -81,7 +81,7 @@ static void __init find_early_table_space(unsigned long end, int use_pse, | |||
81 | 81 | ||
82 | void __init native_pagetable_reserve(u64 start, u64 end) | 82 | void __init native_pagetable_reserve(u64 start, u64 end) |
83 | { | 83 | { |
84 | memblock_x86_reserve_range(start, end, "PGTABLE"); | 84 | memblock_reserve(start, end - start); |
85 | } | 85 | } |
86 | 86 | ||
87 | struct map_range { | 87 | struct map_range { |
@@ -280,8 +280,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, | |||
280 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) | 280 | * pgt_buf_end) and free the other ones (pgt_buf_end - pgt_buf_top) |
281 | * so that they can be reused for other purposes. | 281 | * so that they can be reused for other purposes. |
282 | * | 282 | * |
283 | * On native it just means calling memblock_x86_reserve_range, on Xen it | 283 | * On native it just means calling memblock_reserve, on Xen it also |
284 | * also means marking RW the pagetable pages that we allocated before | 284 | * means marking RW the pagetable pages that we allocated before |
285 | * but that haven't been used. | 285 | * but that haven't been used. |
286 | * | 286 | * |
287 | * In fact on xen we mark RO the whole range pgt_buf_start - | 287 | * In fact on xen we mark RO the whole range pgt_buf_start - |
diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c deleted file mode 100644 index 7325c5d8ace5..000000000000 --- a/arch/x86/mm/memblock.c +++ /dev/null | |||
@@ -1,34 +0,0 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/bitops.h> | ||
5 | #include <linux/memblock.h> | ||
6 | #include <linux/bootmem.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/range.h> | ||
9 | |||
10 | void __init memblock_x86_reserve_range(u64 start, u64 end, char *name) | ||
11 | { | ||
12 | if (start == end) | ||
13 | return; | ||
14 | |||
15 | if (WARN_ONCE(start > end, "memblock_x86_reserve_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
16 | return; | ||
17 | |||
18 | memblock_dbg(" memblock_x86_reserve_range: [%#010llx-%#010llx] %16s\n", start, end - 1, name); | ||
19 | |||
20 | memblock_reserve(start, end - start); | ||
21 | } | ||
22 | |||
23 | void __init memblock_x86_free_range(u64 start, u64 end) | ||
24 | { | ||
25 | if (start == end) | ||
26 | return; | ||
27 | |||
28 | if (WARN_ONCE(start > end, "memblock_x86_free_range: wrong range [%#llx, %#llx)\n", start, end)) | ||
29 | return; | ||
30 | |||
31 | memblock_dbg(" memblock_x86_free_range: [%#010llx-%#010llx]\n", start, end - 1); | ||
32 | |||
33 | memblock_free(start, end - start); | ||
34 | } | ||
diff --git a/arch/x86/mm/memtest.c b/arch/x86/mm/memtest.c index 46a5ff25eda4..c80b9fb95734 100644 --- a/arch/x86/mm/memtest.c +++ b/arch/x86/mm/memtest.c | |||
@@ -34,7 +34,7 @@ static void __init reserve_bad_mem(u64 pattern, u64 start_bad, u64 end_bad) | |||
34 | (unsigned long long) pattern, | 34 | (unsigned long long) pattern, |
35 | (unsigned long long) start_bad, | 35 | (unsigned long long) start_bad, |
36 | (unsigned long long) end_bad); | 36 | (unsigned long long) end_bad); |
37 | memblock_x86_reserve_range(start_bad, end_bad, "BAD RAM"); | 37 | memblock_reserve(start_bad, end_bad - start_bad); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) | 40 | static void __init memtest(u64 pattern, u64 start_phys, u64 size) |
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 88e562729967..496f494593bf 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -364,8 +364,7 @@ void __init numa_reset_distance(void) | |||
364 | 364 | ||
365 | /* numa_distance could be 1LU marking allocation failure, test cnt */ | 365 | /* numa_distance could be 1LU marking allocation failure, test cnt */ |
366 | if (numa_distance_cnt) | 366 | if (numa_distance_cnt) |
367 | memblock_x86_free_range(__pa(numa_distance), | 367 | memblock_free(__pa(numa_distance), size); |
368 | __pa(numa_distance) + size); | ||
369 | numa_distance_cnt = 0; | 368 | numa_distance_cnt = 0; |
370 | numa_distance = NULL; /* enable table creation */ | 369 | numa_distance = NULL; /* enable table creation */ |
371 | } | 370 | } |
@@ -394,7 +393,7 @@ static int __init numa_alloc_distance(void) | |||
394 | numa_distance = (void *)1LU; | 393 | numa_distance = (void *)1LU; |
395 | return -ENOMEM; | 394 | return -ENOMEM; |
396 | } | 395 | } |
397 | memblock_x86_reserve_range(phys, phys + size, "NUMA DIST"); | 396 | memblock_reserve(phys, size); |
398 | 397 | ||
399 | numa_distance = __va(phys); | 398 | numa_distance = __va(phys); |
400 | numa_distance_cnt = cnt; | 399 | numa_distance_cnt = cnt; |
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 58878b536ef2..534255a36b6b 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -204,7 +204,7 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) | |||
204 | size, nid); | 204 | size, nid); |
205 | return; | 205 | return; |
206 | } | 206 | } |
207 | memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); | 207 | memblock_reserve(node_pa, size); |
208 | 208 | ||
209 | remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, | 209 | remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, |
210 | max_low_pfn << PAGE_SHIFT, | 210 | max_low_pfn << PAGE_SHIFT, |
@@ -212,10 +212,10 @@ void __init init_alloc_remap(int nid, u64 start, u64 end) | |||
212 | if (!remap_pa) { | 212 | if (!remap_pa) { |
213 | pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", | 213 | pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", |
214 | size, nid); | 214 | size, nid); |
215 | memblock_x86_free_range(node_pa, node_pa + size); | 215 | memblock_free(node_pa, size); |
216 | return; | 216 | return; |
217 | } | 217 | } |
218 | memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); | 218 | memblock_reserve(remap_pa, size); |
219 | remap_va = phys_to_virt(remap_pa); | 219 | remap_va = phys_to_virt(remap_pa); |
220 | 220 | ||
221 | /* perform actual remap */ | 221 | /* perform actual remap */ |
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c index 971fe70549b3..46db56845f18 100644 --- a/arch/x86/mm/numa_emulation.c +++ b/arch/x86/mm/numa_emulation.c | |||
@@ -361,7 +361,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
361 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); | 361 | pr_warning("NUMA: Warning: can't allocate copy of distance table, disabling emulation\n"); |
362 | goto no_emu; | 362 | goto no_emu; |
363 | } | 363 | } |
364 | memblock_x86_reserve_range(phys, phys + phys_size, "TMP NUMA DIST"); | 364 | memblock_reserve(phys, phys_size); |
365 | phys_dist = __va(phys); | 365 | phys_dist = __va(phys); |
366 | 366 | ||
367 | for (i = 0; i < numa_dist_cnt; i++) | 367 | for (i = 0; i < numa_dist_cnt; i++) |
@@ -430,7 +430,7 @@ void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt) | |||
430 | 430 | ||
431 | /* free the copied physical distance table */ | 431 | /* free the copied physical distance table */ |
432 | if (phys_dist) | 432 | if (phys_dist) |
433 | memblock_x86_free_range(__pa(phys_dist), __pa(phys_dist) + phys_size); | 433 | memblock_free(__pa(phys_dist), phys_size); |
434 | return; | 434 | return; |
435 | 435 | ||
436 | no_emu: | 436 | no_emu: |