aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2010-08-04 00:52:34 -0400
committerPaul Mundt <lethal@linux-sh.org>2010-08-04 00:52:34 -0400
commitbaea90ea14b316e4599f000c713e446908a5aef6 (patch)
tree17eae40f15832b466121d8f962028472c4568a13 /arch/sh
parent36239c6704b71da7fb8e2a9429e159a84d0c5a3e (diff)
parent3a09b1be53d23df780a0cd0e4087a05e2ca4a00c (diff)
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6
Diffstat (limited to 'arch/sh')
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/include/asm/lmb.h6
-rw-r--r--arch/sh/include/asm/memblock.h6
-rw-r--r--arch/sh/kernel/machine_kexec.c18
-rw-r--r--arch/sh/kernel/setup.c8
-rw-r--r--arch/sh/mm/init.c40
-rw-r--r--arch/sh/mm/numa.c8
7 files changed, 44 insertions, 44 deletions
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 4b5a1d53704d..c91934186896 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -10,7 +10,7 @@ config SUPERH
10 select EMBEDDED 10 select EMBEDDED
11 select HAVE_CLK 11 select HAVE_CLK
12 select HAVE_IDE if HAS_IOPORT 12 select HAVE_IDE if HAS_IOPORT
13 select HAVE_LMB 13 select HAVE_MEMBLOCK
14 select HAVE_OPROFILE 14 select HAVE_OPROFILE
15 select HAVE_GENERIC_DMA_COHERENT 15 select HAVE_GENERIC_DMA_COHERENT
16 select HAVE_ARCH_TRACEHOOK 16 select HAVE_ARCH_TRACEHOOK
diff --git a/arch/sh/include/asm/lmb.h b/arch/sh/include/asm/lmb.h
deleted file mode 100644
index 9b437f657ffa..000000000000
--- a/arch/sh/include/asm/lmb.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_LMB_H
2#define __ASM_SH_LMB_H
3
4#define LMB_REAL_LIMIT 0
5
6#endif /* __ASM_SH_LMB_H */
diff --git a/arch/sh/include/asm/memblock.h b/arch/sh/include/asm/memblock.h
new file mode 100644
index 000000000000..dfe683b88075
--- /dev/null
+++ b/arch/sh/include/asm/memblock.h
@@ -0,0 +1,6 @@
1#ifndef __ASM_SH_MEMBLOCK_H
2#define __ASM_SH_MEMBLOCK_H
3
4#define MEMBLOCK_REAL_LIMIT 0
5
6#endif /* __ASM_SH_MEMBLOCK_H */
diff --git a/arch/sh/kernel/machine_kexec.c b/arch/sh/kernel/machine_kexec.c
index 5a559e666eb3..e2a3af31ff99 100644
--- a/arch/sh/kernel/machine_kexec.c
+++ b/arch/sh/kernel/machine_kexec.c
@@ -15,7 +15,7 @@
15#include <linux/numa.h> 15#include <linux/numa.h>
16#include <linux/ftrace.h> 16#include <linux/ftrace.h>
17#include <linux/suspend.h> 17#include <linux/suspend.h>
18#include <linux/lmb.h> 18#include <linux/memblock.h>
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/pgalloc.h> 20#include <asm/pgalloc.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
@@ -157,10 +157,10 @@ void __init reserve_crashkernel(void)
157 unsigned long long crash_size, crash_base; 157 unsigned long long crash_size, crash_base;
158 int ret; 158 int ret;
159 159
160 /* this is necessary because of lmb_phys_mem_size() */ 160 /* this is necessary because of memblock_phys_mem_size() */
161 lmb_analyze(); 161 memblock_analyze();
162 162
163 ret = parse_crashkernel(boot_command_line, lmb_phys_mem_size(), 163 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
164 &crash_size, &crash_base); 164 &crash_size, &crash_base);
165 if (ret == 0 && crash_size > 0) { 165 if (ret == 0 && crash_size > 0) {
166 crashk_res.start = crash_base; 166 crashk_res.start = crash_base;
@@ -172,14 +172,14 @@ void __init reserve_crashkernel(void)
172 172
173 crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1); 173 crash_size = PAGE_ALIGN(crashk_res.end - crashk_res.start + 1);
174 if (!crashk_res.start) { 174 if (!crashk_res.start) {
175 unsigned long max = lmb_end_of_DRAM() - memory_limit; 175 unsigned long max = memblock_end_of_DRAM() - memory_limit;
176 crashk_res.start = __lmb_alloc_base(crash_size, PAGE_SIZE, max); 176 crashk_res.start = __memblock_alloc_base(crash_size, PAGE_SIZE, max);
177 if (!crashk_res.start) { 177 if (!crashk_res.start) {
178 pr_err("crashkernel allocation failed\n"); 178 pr_err("crashkernel allocation failed\n");
179 goto disable; 179 goto disable;
180 } 180 }
181 } else { 181 } else {
182 ret = lmb_reserve(crashk_res.start, crash_size); 182 ret = memblock_reserve(crashk_res.start, crash_size);
183 if (unlikely(ret < 0)) { 183 if (unlikely(ret < 0)) {
184 pr_err("crashkernel reservation failed - " 184 pr_err("crashkernel reservation failed - "
185 "memory is in use\n"); 185 "memory is in use\n");
@@ -192,7 +192,7 @@ void __init reserve_crashkernel(void)
192 /* 192 /*
193 * Crash kernel trumps memory limit 193 * Crash kernel trumps memory limit
194 */ 194 */
195 if ((lmb_end_of_DRAM() - memory_limit) <= crashk_res.end) { 195 if ((memblock_end_of_DRAM() - memory_limit) <= crashk_res.end) {
196 memory_limit = 0; 196 memory_limit = 0;
197 pr_info("Disabled memory limit for crashkernel\n"); 197 pr_info("Disabled memory limit for crashkernel\n");
198 } 198 }
@@ -201,7 +201,7 @@ void __init reserve_crashkernel(void)
201 "for crashkernel (System RAM: %ldMB)\n", 201 "for crashkernel (System RAM: %ldMB)\n",
202 (unsigned long)(crash_size >> 20), 202 (unsigned long)(crash_size >> 20),
203 (unsigned long)(crashk_res.start), 203 (unsigned long)(crashk_res.start),
204 (unsigned long)(lmb_phys_mem_size() >> 20)); 204 (unsigned long)(memblock_phys_mem_size() >> 20));
205 205
206 return; 206 return;
207 207
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 272734681d29..e769401a78ba 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -30,7 +30,7 @@
30#include <linux/clk.h> 30#include <linux/clk.h>
31#include <linux/delay.h> 31#include <linux/delay.h>
32#include <linux/platform_device.h> 32#include <linux/platform_device.h>
33#include <linux/lmb.h> 33#include <linux/memblock.h>
34#include <asm/uaccess.h> 34#include <asm/uaccess.h>
35#include <asm/io.h> 35#include <asm/io.h>
36#include <asm/page.h> 36#include <asm/page.h>
@@ -141,10 +141,10 @@ void __init check_for_initrd(void)
141 goto disable; 141 goto disable;
142 } 142 }
143 143
144 if (unlikely(end > lmb_end_of_DRAM())) { 144 if (unlikely(end > memblock_end_of_DRAM())) {
145 pr_err("initrd extends beyond end of memory " 145 pr_err("initrd extends beyond end of memory "
146 "(0x%08lx > 0x%08lx)\ndisabling initrd\n", 146 "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
147 end, (unsigned long)lmb_end_of_DRAM()); 147 end, (unsigned long)memblock_end_of_DRAM());
148 goto disable; 148 goto disable;
149 } 149 }
150 150
@@ -161,7 +161,7 @@ void __init check_for_initrd(void)
161 initrd_start = (unsigned long)__va(__pa(start)); 161 initrd_start = (unsigned long)__va(__pa(start));
162 initrd_end = initrd_start + INITRD_SIZE; 162 initrd_end = initrd_start + INITRD_SIZE;
163 163
164 lmb_reserve(__pa(initrd_start), INITRD_SIZE); 164 memblock_reserve(__pa(initrd_start), INITRD_SIZE);
165 165
166 return; 166 return;
167 167
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index 82d46096e531..105f559d946d 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -16,7 +16,7 @@
16#include <linux/pagemap.h> 16#include <linux/pagemap.h>
17#include <linux/percpu.h> 17#include <linux/percpu.h>
18#include <linux/io.h> 18#include <linux/io.h>
19#include <linux/lmb.h> 19#include <linux/memblock.h>
20#include <linux/dma-mapping.h> 20#include <linux/dma-mapping.h>
21#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
22#include <asm/mmzone.h> 22#include <asm/mmzone.h>
@@ -33,7 +33,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD];
33 33
34void __init generic_mem_init(void) 34void __init generic_mem_init(void)
35{ 35{
36 lmb_add(__MEMORY_START, __MEMORY_SIZE); 36 memblock_add(__MEMORY_START, __MEMORY_SIZE);
37} 37}
38 38
39void __init __weak plat_mem_setup(void) 39void __init __weak plat_mem_setup(void)
@@ -200,12 +200,12 @@ void __init allocate_pgdat(unsigned int nid)
200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 200 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
201 201
202#ifdef CONFIG_NEED_MULTIPLE_NODES 202#ifdef CONFIG_NEED_MULTIPLE_NODES
203 phys = __lmb_alloc_base(sizeof(struct pglist_data), 203 phys = __memblock_alloc_base(sizeof(struct pglist_data),
204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); 204 SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT);
205 /* Retry with all of system memory */ 205 /* Retry with all of system memory */
206 if (!phys) 206 if (!phys)
207 phys = __lmb_alloc_base(sizeof(struct pglist_data), 207 phys = __memblock_alloc_base(sizeof(struct pglist_data),
208 SMP_CACHE_BYTES, lmb_end_of_DRAM()); 208 SMP_CACHE_BYTES, memblock_end_of_DRAM());
209 if (!phys) 209 if (!phys)
210 panic("Can't allocate pgdat for node %d\n", nid); 210 panic("Can't allocate pgdat for node %d\n", nid);
211 211
@@ -236,7 +236,7 @@ static void __init bootmem_init_one_node(unsigned int nid)
236 236
237 total_pages = bootmem_bootmap_pages(p->node_spanned_pages); 237 total_pages = bootmem_bootmap_pages(p->node_spanned_pages);
238 238
239 paddr = lmb_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE); 239 paddr = memblock_alloc(total_pages << PAGE_SHIFT, PAGE_SIZE);
240 if (!paddr) 240 if (!paddr)
241 panic("Can't allocate bootmap for nid[%d]\n", nid); 241 panic("Can't allocate bootmap for nid[%d]\n", nid);
242 242
@@ -251,9 +251,9 @@ static void __init bootmem_init_one_node(unsigned int nid)
251 */ 251 */
252 if (nid == 0) { 252 if (nid == 0) {
253 /* Reserve the sections we're already using. */ 253 /* Reserve the sections we're already using. */
254 for (i = 0; i < lmb.reserved.cnt; i++) 254 for (i = 0; i < memblock.reserved.cnt; i++)
255 reserve_bootmem(lmb.reserved.region[i].base, 255 reserve_bootmem(memblock.reserved.region[i].base,
256 lmb_size_bytes(&lmb.reserved, i), 256 memblock_size_bytes(&memblock.reserved, i),
257 BOOTMEM_DEFAULT); 257 BOOTMEM_DEFAULT);
258 } 258 }
259 259
@@ -265,10 +265,10 @@ static void __init do_init_bootmem(void)
265 int i; 265 int i;
266 266
267 /* Add active regions with valid PFNs. */ 267 /* Add active regions with valid PFNs. */
268 for (i = 0; i < lmb.memory.cnt; i++) { 268 for (i = 0; i < memblock.memory.cnt; i++) {
269 unsigned long start_pfn, end_pfn; 269 unsigned long start_pfn, end_pfn;
270 start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; 270 start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT;
271 end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); 271 end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i);
272 __add_active_range(0, start_pfn, end_pfn); 272 __add_active_range(0, start_pfn, end_pfn);
273 } 273 }
274 274
@@ -300,7 +300,7 @@ static void __init early_reserve_mem(void)
300 * this catches the (definitely buggy) case of us accidentally 300 * this catches the (definitely buggy) case of us accidentally
301 * initializing the bootmem allocator with an invalid RAM area. 301 * initializing the bootmem allocator with an invalid RAM area.
302 */ 302 */
303 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET, 303 memblock_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - 304 (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) -
305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET)); 305 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
306 306
@@ -308,7 +308,7 @@ static void __init early_reserve_mem(void)
308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. 308 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
309 */ 309 */
310 if (CONFIG_ZERO_PAGE_OFFSET != 0) 310 if (CONFIG_ZERO_PAGE_OFFSET != 0)
311 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET); 311 memblock_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
312 312
313 /* 313 /*
314 * Handle additional early reservations 314 * Handle additional early reservations
@@ -323,27 +323,27 @@ void __init paging_init(void)
323 unsigned long vaddr, end; 323 unsigned long vaddr, end;
324 int nid; 324 int nid;
325 325
326 lmb_init(); 326 memblock_init();
327 327
328 sh_mv.mv_mem_init(); 328 sh_mv.mv_mem_init();
329 329
330 early_reserve_mem(); 330 early_reserve_mem();
331 331
332 lmb_enforce_memory_limit(memory_limit); 332 memblock_enforce_memory_limit(memory_limit);
333 lmb_analyze(); 333 memblock_analyze();
334 334
335 lmb_dump_all(); 335 memblock_dump_all();
336 336
337 /* 337 /*
338 * Determine low and high memory ranges: 338 * Determine low and high memory ranges:
339 */ 339 */
340 max_low_pfn = max_pfn = lmb_end_of_DRAM() >> PAGE_SHIFT; 340 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
341 min_low_pfn = __MEMORY_START >> PAGE_SHIFT; 341 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
342 342
343 nodes_clear(node_online_map); 343 nodes_clear(node_online_map);
344 344
345 memory_start = (unsigned long)__va(__MEMORY_START); 345 memory_start = (unsigned long)__va(__MEMORY_START);
346 memory_end = memory_start + (memory_limit ?: lmb_phys_mem_size()); 346 memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size());
347 347
348 uncached_init(); 348 uncached_init();
349 pmb_init(); 349 pmb_init();
diff --git a/arch/sh/mm/numa.c b/arch/sh/mm/numa.c
index a2e645f64a37..3d85225b9e95 100644
--- a/arch/sh/mm/numa.c
+++ b/arch/sh/mm/numa.c
@@ -9,7 +9,7 @@
9 */ 9 */
10#include <linux/module.h> 10#include <linux/module.h>
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <linux/lmb.h> 12#include <linux/memblock.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/numa.h> 14#include <linux/numa.h>
15#include <linux/pfn.h> 15#include <linux/pfn.h>
@@ -39,12 +39,12 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
39 pmb_bolt_mapping((unsigned long)__va(start), start, end - start, 39 pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
40 PAGE_KERNEL); 40 PAGE_KERNEL);
41 41
42 lmb_add(start, end - start); 42 memblock_add(start, end - start);
43 43
44 __add_active_range(nid, start_pfn, end_pfn); 44 __add_active_range(nid, start_pfn, end_pfn);
45 45
46 /* Node-local pgdat */ 46 /* Node-local pgdat */
47 NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data), 47 NODE_DATA(nid) = __va(memblock_alloc_base(sizeof(struct pglist_data),
48 SMP_CACHE_BYTES, end)); 48 SMP_CACHE_BYTES, end));
49 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 49 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
50 50
@@ -54,7 +54,7 @@ void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
54 54
55 /* Node-local bootmap */ 55 /* Node-local bootmap */
56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); 56 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
57 bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT, 57 bootmem_paddr = memblock_alloc_base(bootmap_pages << PAGE_SHIFT,
58 PAGE_SIZE, end); 58 PAGE_SIZE, end);
59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 59 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
60 start_pfn, end_pfn); 60 start_pfn, end_pfn);