aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorPaul Mundt <lethal@linux-sh.org>2007-03-28 03:38:13 -0400
committerPaul Mundt <lethal@hera.kernel.org>2007-05-06 22:10:54 -0400
commit01066625e9ae39742c92e21163f7f2a818e02762 (patch)
treec5b8a2a2c9de29ed13094891fce2b7f5769ffca7 /arch
parent759ab068c4d4216c4ad247bfa851601dfb6500dc (diff)
sh: bootmem tidying for discontig/sparsemem preparation.
This reworks some of the node 0 bootmem initialization in preparation for discontigmem and sparsemem support. ARCH_POPULATES_NODE_MAP is switched to as a result of this. Signed-off-by: Paul Mundt <lethal@linux-sh.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/sh/kernel/setup.c164
-rw-r--r--arch/sh/mm/Kconfig11
-rw-r--r--arch/sh/mm/init.c197
3 files changed, 198 insertions, 174 deletions
diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
index 4d6d89115194..60cc2161438f 100644
--- a/arch/sh/kernel/setup.c
+++ b/arch/sh/kernel/setup.c
@@ -4,7 +4,7 @@
4 * This file handles the architecture-dependent parts of initialization 4 * This file handles the architecture-dependent parts of initialization
5 * 5 *
6 * Copyright (C) 1999 Niibe Yutaka 6 * Copyright (C) 1999 Niibe Yutaka
7 * Copyright (C) 2002 - 2006 Paul Mundt 7 * Copyright (C) 2002 - 2007 Paul Mundt
8 */ 8 */
9#include <linux/screen_info.h> 9#include <linux/screen_info.h>
10#include <linux/ioport.h> 10#include <linux/ioport.h>
@@ -15,15 +15,18 @@
15#include <linux/seq_file.h> 15#include <linux/seq_file.h>
16#include <linux/root_dev.h> 16#include <linux/root_dev.h>
17#include <linux/utsname.h> 17#include <linux/utsname.h>
18#include <linux/nodemask.h>
18#include <linux/cpu.h> 19#include <linux/cpu.h>
19#include <linux/pfn.h> 20#include <linux/pfn.h>
20#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/mm.h>
21#include <asm/uaccess.h> 23#include <asm/uaccess.h>
22#include <asm/io.h> 24#include <asm/io.h>
23#include <asm/sections.h> 25#include <asm/sections.h>
24#include <asm/irq.h> 26#include <asm/irq.h>
25#include <asm/setup.h> 27#include <asm/setup.h>
26#include <asm/clock.h> 28#include <asm/clock.h>
29#include <asm/mmu_context.h>
27 30
28extern void * __rd_start, * __rd_end; 31extern void * __rd_start, * __rd_end;
29 32
@@ -202,53 +205,33 @@ static int __init sh_mv_setup(char **cmdline_p)
202 return 0; 205 return 0;
203} 206}
204 207
205void __init setup_arch(char **cmdline_p) 208/*
209 * Register fully available low RAM pages with the bootmem allocator.
210 */
211static void __init register_bootmem_low_pages(void)
206{ 212{
207 unsigned long bootmap_size; 213 unsigned long curr_pfn, last_pfn, pages;
208 unsigned long start_pfn, max_pfn, max_low_pfn;
209
210#ifdef CONFIG_CMDLINE_BOOL
211 strcpy(COMMAND_LINE, CONFIG_CMDLINE);
212#endif
213
214 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
215
216#ifdef CONFIG_BLK_DEV_RAM
217 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
218 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
219 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
220#endif
221
222 if (!MOUNT_ROOT_RDONLY)
223 root_mountflags &= ~MS_RDONLY;
224 init_mm.start_code = (unsigned long) _text;
225 init_mm.end_code = (unsigned long) _etext;
226 init_mm.end_data = (unsigned long) _edata;
227 init_mm.brk = (unsigned long) _end;
228
229 code_resource.start = (unsigned long)virt_to_phys(_text);
230 code_resource.end = (unsigned long)virt_to_phys(_etext)-1;
231 data_resource.start = (unsigned long)virt_to_phys(_etext);
232 data_resource.end = (unsigned long)virt_to_phys(_edata)-1;
233
234 sh_mv_setup(cmdline_p);
235
236 214
237 /* 215 /*
238 * Find the highest page frame number we have available 216 * We are rounding up the start address of usable memory:
239 */ 217 */
240 max_pfn = PFN_DOWN(__pa(memory_end)); 218 curr_pfn = PFN_UP(__MEMORY_START);
241 219
242 /* 220 /*
243 * Determine low and high memory ranges: 221 * ... and at the end of the usable range downwards:
244 */ 222 */
245 max_low_pfn = max_pfn; 223 last_pfn = PFN_DOWN(__pa(memory_end));
246 224
247 /* 225 if (last_pfn > max_low_pfn)
248 * Partially used pages are not usable - thus 226 last_pfn = max_low_pfn;
249 * we are rounding upwards: 227
250 */ 228 pages = last_pfn - curr_pfn;
251 start_pfn = PFN_UP(__pa(_end)); 229 free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(pages));
230}
231
232void __init setup_bootmem_allocator(unsigned long start_pfn)
233{
234 unsigned long bootmap_size;
252 235
253 /* 236 /*
254 * Find a proper area for the bootmem bitmap. After this 237 * Find a proper area for the bootmem bitmap. After this
@@ -256,31 +239,11 @@ void __init setup_arch(char **cmdline_p)
256 * is intact) must be done via bootmem_alloc(). 239 * is intact) must be done via bootmem_alloc().
257 */ 240 */
258 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn, 241 bootmap_size = init_bootmem_node(NODE_DATA(0), start_pfn,
259 __MEMORY_START>>PAGE_SHIFT, 242 min_low_pfn, max_low_pfn);
260 max_low_pfn);
261 /*
262 * Register fully available low RAM pages with the bootmem allocator.
263 */
264 {
265 unsigned long curr_pfn, last_pfn, pages;
266
267 /*
268 * We are rounding up the start address of usable memory:
269 */
270 curr_pfn = PFN_UP(__MEMORY_START);
271 /*
272 * ... and at the end of the usable range downwards:
273 */
274 last_pfn = PFN_DOWN(__pa(memory_end));
275 243
276 if (last_pfn > max_low_pfn) 244 register_bootmem_low_pages();
277 last_pfn = max_low_pfn;
278
279 pages = last_pfn - curr_pfn;
280 free_bootmem_node(NODE_DATA(0), PFN_PHYS(curr_pfn),
281 PFN_PHYS(pages));
282 }
283 245
246 node_set_online(0);
284 247
285 /* 248 /*
286 * Reserve the kernel text and 249 * Reserve the kernel text and
@@ -289,14 +252,14 @@ void __init setup_arch(char **cmdline_p)
289 * case of us accidentally initializing the bootmem allocator with 252 * case of us accidentally initializing the bootmem allocator with
290 * an invalid RAM area. 253 * an invalid RAM area.
291 */ 254 */
292 reserve_bootmem_node(NODE_DATA(0), __MEMORY_START+PAGE_SIZE, 255 reserve_bootmem(__MEMORY_START+PAGE_SIZE,
293 (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START); 256 (PFN_PHYS(start_pfn)+bootmap_size+PAGE_SIZE-1)-__MEMORY_START);
294 257
295 /* 258 /*
296 * reserve physical page 0 - it's a special BIOS page on many boxes, 259 * reserve physical page 0 - it's a special BIOS page on many boxes,
297 * enabling clean reboots, SMP operation, laptop functions. 260 * enabling clean reboots, SMP operation, laptop functions.
298 */ 261 */
299 reserve_bootmem_node(NODE_DATA(0), __MEMORY_START, PAGE_SIZE); 262 reserve_bootmem(__MEMORY_START, PAGE_SIZE);
300 263
301#ifdef CONFIG_BLK_DEV_INITRD 264#ifdef CONFIG_BLK_DEV_INITRD
302 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0); 265 ROOT_DEV = MKDEV(RAMDISK_MAJOR, 0);
@@ -310,8 +273,8 @@ void __init setup_arch(char **cmdline_p)
310 273
311 if (LOADER_TYPE && INITRD_START) { 274 if (LOADER_TYPE && INITRD_START) {
312 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) { 275 if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
313 reserve_bootmem_node(NODE_DATA(0), INITRD_START + 276 reserve_bootmem(INITRD_START + __MEMORY_START,
314 __MEMORY_START, INITRD_SIZE); 277 INITRD_SIZE);
315 initrd_start = INITRD_START + PAGE_OFFSET + 278 initrd_start = INITRD_START + PAGE_OFFSET +
316 __MEMORY_START; 279 __MEMORY_START;
317 initrd_end = initrd_start + INITRD_SIZE; 280 initrd_end = initrd_start + INITRD_SIZE;
@@ -324,6 +287,71 @@ void __init setup_arch(char **cmdline_p)
324 } 287 }
325 } 288 }
326#endif 289#endif
290}
291
292#ifndef CONFIG_NEED_MULTIPLE_NODES
293static void __init setup_memory(void)
294{
295 unsigned long start_pfn;
296
297 /*
298 * Partially used pages are not usable - thus
299 * we are rounding upwards:
300 */
301 start_pfn = PFN_UP(__pa(_end));
302 setup_bootmem_allocator(start_pfn);
303}
304#else
305extern void __init setup_memory(void);
306#endif
307
308void __init setup_arch(char **cmdline_p)
309{
310 enable_mmu();
311
312#ifdef CONFIG_CMDLINE_BOOL
313 strcpy(COMMAND_LINE, CONFIG_CMDLINE);
314#endif
315
316 ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
317
318#ifdef CONFIG_BLK_DEV_RAM
319 rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
320 rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
321 rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
322#endif
323
324 if (!MOUNT_ROOT_RDONLY)
325 root_mountflags &= ~MS_RDONLY;
326 init_mm.start_code = (unsigned long) _text;
327 init_mm.end_code = (unsigned long) _etext;
328 init_mm.end_data = (unsigned long) _edata;
329 init_mm.brk = (unsigned long) _end;
330
331 code_resource.start = virt_to_phys(_text);
332 code_resource.end = virt_to_phys(_etext)-1;
333 data_resource.start = virt_to_phys(_etext);
334 data_resource.end = virt_to_phys(_edata)-1;
335
336 parse_early_param();
337
338 sh_mv_setup(cmdline_p);
339
340 /*
341 * Find the highest page frame number we have available
342 */
343 max_pfn = PFN_DOWN(__pa(memory_end));
344
345 /*
346 * Determine low and high memory ranges:
347 */
348 max_low_pfn = max_pfn;
349 min_low_pfn = __MEMORY_START >> PAGE_SHIFT;
350
351 nodes_clear(node_online_map);
352 setup_memory();
353 paging_init();
354 sparse_init();
327 355
328#ifdef CONFIG_DUMMY_CONSOLE 356#ifdef CONFIG_DUMMY_CONSOLE
329 conswitchp = &dummy_con; 357 conswitchp = &dummy_con;
@@ -332,8 +360,6 @@ void __init setup_arch(char **cmdline_p)
332 /* Perform the machine specific initialisation */ 360 /* Perform the machine specific initialisation */
333 if (likely(sh_mv.mv_setup)) 361 if (likely(sh_mv.mv_setup))
334 sh_mv.mv_setup(cmdline_p); 362 sh_mv.mv_setup(cmdline_p);
335
336 paging_init();
337} 363}
338 364
339struct sh_machine_vector* __init get_mv_byname(const char* name) 365struct sh_machine_vector* __init get_mv_byname(const char* name)
diff --git a/arch/sh/mm/Kconfig b/arch/sh/mm/Kconfig
index 3cac22f50e15..5359f3dff93c 100644
--- a/arch/sh/mm/Kconfig
+++ b/arch/sh/mm/Kconfig
@@ -291,6 +291,17 @@ config VSYSCALL
291 For systems with an MMU that can afford to give up a page, 291 For systems with an MMU that can afford to give up a page,
292 (the default value) say Y. 292 (the default value) say Y.
293 293
294config NODES_SHIFT
295 int
296 default "1"
297 depends on NEED_MULTIPLE_NODES
298
299config ARCH_FLATMEM_ENABLE
300 def_bool y
301
302config ARCH_POPULATES_NODE_MAP
303 def_bool y
304
294choice 305choice
295 prompt "Kernel page size" 306 prompt "Kernel page size"
296 default PAGE_SIZE_4KB 307 default PAGE_SIZE_4KB
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index ae957a932375..4d030988b368 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -1,37 +1,20 @@
1/* $Id: init.c,v 1.19 2004/02/21 04:42:16 kkojima Exp $ 1/*
2 * 2 * linux/arch/sh/mm/init.c
3 * linux/arch/sh/mm/init.c
4 * 3 *
5 * Copyright (C) 1999 Niibe Yutaka 4 * Copyright (C) 1999 Niibe Yutaka
6 * Copyright (C) 2002, 2004 Paul Mundt 5 * Copyright (C) 2002 - 2007 Paul Mundt
7 * 6 *
8 * Based on linux/arch/i386/mm/init.c: 7 * Based on linux/arch/i386/mm/init.c:
9 * Copyright (C) 1995 Linus Torvalds 8 * Copyright (C) 1995 Linus Torvalds
10 */ 9 */
11
12#include <linux/signal.h>
13#include <linux/sched.h>
14#include <linux/kernel.h>
15#include <linux/errno.h>
16#include <linux/string.h>
17#include <linux/types.h>
18#include <linux/ptrace.h>
19#include <linux/mman.h>
20#include <linux/mm.h> 10#include <linux/mm.h>
21#include <linux/swap.h> 11#include <linux/swap.h>
22#include <linux/smp.h>
23#include <linux/init.h> 12#include <linux/init.h>
24#include <linux/highmem.h>
25#include <linux/bootmem.h> 13#include <linux/bootmem.h>
26#include <linux/pagemap.h>
27#include <linux/proc_fs.h> 14#include <linux/proc_fs.h>
28#include <asm/processor.h> 15#include <linux/percpu.h>
29#include <asm/system.h> 16#include <linux/io.h>
30#include <asm/uaccess.h>
31#include <asm/pgtable.h>
32#include <asm/pgalloc.h>
33#include <asm/mmu_context.h> 17#include <asm/mmu_context.h>
34#include <asm/io.h>
35#include <asm/tlb.h> 18#include <asm/tlb.h>
36#include <asm/cacheflush.h> 19#include <asm/cacheflush.h>
37#include <asm/cache.h> 20#include <asm/cache.h>
@@ -39,37 +22,51 @@
39DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 22DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
40pgd_t swapper_pg_dir[PTRS_PER_PGD]; 23pgd_t swapper_pg_dir[PTRS_PER_PGD];
41 24
42#ifdef CONFIG_MMU
43/* It'd be good if these lines were in the standard header file. */
44#define START_PFN (NODE_DATA(0)->bdata->node_boot_start >> PAGE_SHIFT)
45#define MAX_LOW_PFN (NODE_DATA(0)->bdata->node_low_pfn)
46#endif
47
48void (*copy_page)(void *from, void *to); 25void (*copy_page)(void *from, void *to);
49void (*clear_page)(void *to); 26void (*clear_page)(void *to);
50 27
51void show_mem(void) 28void show_mem(void)
52{ 29{
53 int i, total = 0, reserved = 0; 30 int total = 0, reserved = 0, free = 0;
54 int shared = 0, cached = 0; 31 int shared = 0, cached = 0, slab = 0;
32 pg_data_t *pgdat;
55 33
56 printk("Mem-info:\n"); 34 printk("Mem-info:\n");
57 show_free_areas(); 35 show_free_areas();
58 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 36
59 i = max_mapnr; 37 for_each_online_pgdat(pgdat) {
60 while (i-- > 0) { 38 struct page *page, *end;
61 total++; 39 unsigned long flags;
62 if (PageReserved(mem_map+i)) 40
63 reserved++; 41 pgdat_resize_lock(pgdat, &flags);
64 else if (PageSwapCache(mem_map+i)) 42 page = pgdat->node_mem_map;
65 cached++; 43 end = page + pgdat->node_spanned_pages;
66 else if (page_count(mem_map+i)) 44
67 shared += page_count(mem_map+i) - 1; 45 do {
46 total++;
47 if (PageReserved(page))
48 reserved++;
49 else if (PageSwapCache(page))
50 cached++;
51 else if (PageSlab(page))
52 slab++;
53 else if (!page_count(page))
54 free++;
55 else
56 shared += page_count(page) - 1;
57 page++;
58 } while (page < end);
59
60 pgdat_resize_unlock(pgdat, &flags);
68 } 61 }
69 printk("%d pages of RAM\n",total); 62
70 printk("%d reserved pages\n",reserved); 63 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
71 printk("%d pages shared\n",shared); 64 printk("%d pages of RAM\n", total);
72 printk("%d pages swap cached\n",cached); 65 printk("%d free pages\n", free);
66 printk("%d reserved pages\n", reserved);
67 printk("%d slab pages\n", slab);
68 printk("%d pages shared\n", shared);
69 printk("%d pages swap cached\n", cached);
73} 70}
74 71
75#ifdef CONFIG_MMU 72#ifdef CONFIG_MMU
@@ -147,52 +144,38 @@ extern char __init_begin, __init_end;
147 */ 144 */
148void __init paging_init(void) 145void __init paging_init(void)
149{ 146{
150 unsigned long zones_size[MAX_NR_ZONES] = { 0, }; 147 int nid;
151 148
152 /* 149 /* We don't need to map the kernel through the TLB, as
153 * Setup some defaults for the zone sizes.. these should be safe 150 * it is permanatly mapped using P1. So clear the
154 * regardless of distcontiguous memory or MMU settings. 151 * entire pgd. */
155 */ 152 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
156 zones_size[ZONE_NORMAL] = __MEMORY_SIZE >> PAGE_SHIFT;
157#ifdef CONFIG_HIGHMEM
158 zones_size[ZONE_HIGHMEM] = 0 >> PAGE_SHIFT;
159#endif
160
161#ifdef CONFIG_MMU
162 /*
163 * If we have an MMU, and want to be using it .. we need to adjust
164 * the zone sizes accordingly, in addition to turning it on.
165 */
166 {
167 /* We don't need to map the kernel through the TLB, as
168 * it is permanatly mapped using P1. So clear the
169 * entire pgd. */
170 memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir));
171
172 /* Turn on the MMU */
173 enable_mmu();
174 zones_size[ZONE_NORMAL] = MAX_LOW_PFN - START_PFN;
175 }
176 153
177 /* Set an initial value for the MMU.TTB so we don't have to 154 /* Set an initial value for the MMU.TTB so we don't have to
178 * check for a null value. */ 155 * check for a null value. */
179 set_TTB(swapper_pg_dir); 156 set_TTB(swapper_pg_dir);
180 157
181#elif defined(CONFIG_CPU_SH3) || defined(CONFIG_CPU_SH4) 158 for_each_online_node(nid) {
182 /* 159 pg_data_t *pgdat = NODE_DATA(nid);
183 * If we don't have CONFIG_MMU set and the processor in question 160 unsigned long max_zone_pfns[MAX_NR_ZONES];
184 * still has an MMU, care needs to be taken to make sure it doesn't 161 unsigned long low, start_pfn;
185 * stay on.. Since the boot loader could have potentially already 162
186 * turned it on, and we clearly don't want it, we simply turn it off. 163 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
187 * 164
188 * We don't need to do anything special for the zone sizes, since the 165 start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
189 * default values that were already configured up above should be 166 low = pgdat->bdata->node_low_pfn;
190 * satisfactory. 167
191 */ 168 max_zone_pfns[ZONE_NORMAL] = low;
192 disable_mmu(); 169 add_active_range(nid, start_pfn, low);
193#endif 170
194 NODE_DATA(0)->node_mem_map = NULL; 171 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
195 free_area_init_node(0, NODE_DATA(0), zones_size, __MEMORY_START >> PAGE_SHIFT, 0); 172 nid, start_pfn, low);
173
174 free_area_init_nodes(max_zone_pfns);
175
176 printk("Node %u: mem_map starts at %p\n",
177 pgdat->node_id, pgdat->node_mem_map);
178 }
196} 179}
197 180
198static struct kcore_list kcore_mem, kcore_vmalloc; 181static struct kcore_list kcore_mem, kcore_vmalloc;
@@ -200,18 +183,33 @@ static struct kcore_list kcore_mem, kcore_vmalloc;
200void __init mem_init(void) 183void __init mem_init(void)
201{ 184{
202 int codesize, reservedpages, datasize, initsize; 185 int codesize, reservedpages, datasize, initsize;
203 int tmp; 186 int nid;
204 extern unsigned long memory_start;
205 187
206#ifdef CONFIG_MMU 188 reservedpages = 0;
207 high_memory = (void *)__va(MAX_LOW_PFN * PAGE_SIZE);
208#else
209 extern unsigned long memory_end;
210 189
211 high_memory = (void *)(memory_end & PAGE_MASK); 190 for_each_online_node(nid) {
212#endif 191 pg_data_t *pgdat = NODE_DATA(nid);
192 unsigned long node_pages = 0;
193 void *node_high_memory;
194 int i;
195
196 num_physpages += pgdat->node_present_pages;
197
198 if (pgdat->node_spanned_pages)
199 node_pages = free_all_bootmem_node(pgdat);
200
201 totalram_pages += node_pages;
213 202
214 max_mapnr = num_physpages = MAP_NR(high_memory) - MAP_NR(memory_start); 203 for (i = 0; i < node_pages; i++)
204 if (PageReserved(pgdat->node_mem_map + i))
205 reservedpages++;
206
207 node_high_memory = (void *)((pgdat->node_start_pfn +
208 pgdat->node_spanned_pages) <<
209 PAGE_SHIFT);
210 if (node_high_memory > high_memory)
211 high_memory = node_high_memory;
212 }
215 213
216 /* clear the zero-page */ 214 /* clear the zero-page */
217 memset(empty_zero_page, 0, PAGE_SIZE); 215 memset(empty_zero_page, 0, PAGE_SIZE);
@@ -229,16 +227,6 @@ void __init mem_init(void)
229 clear_page = clear_page_nommu; 227 clear_page = clear_page_nommu;
230#endif 228#endif
231 229
232 /* this will put all low memory onto the freelists */
233 totalram_pages += free_all_bootmem_node(NODE_DATA(0));
234 reservedpages = 0;
235 for (tmp = 0; tmp < num_physpages; tmp++)
236 /*
237 * Only count reserved RAM pages
238 */
239 if (PageReserved(mem_map+tmp))
240 reservedpages++;
241
242 codesize = (unsigned long) &_etext - (unsigned long) &_text; 230 codesize = (unsigned long) &_etext - (unsigned long) &_text;
243 datasize = (unsigned long) &_edata - (unsigned long) &_etext; 231 datasize = (unsigned long) &_edata - (unsigned long) &_etext;
244 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 232 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
@@ -250,7 +238,7 @@ void __init mem_init(void)
250 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 238 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
251 "%dk reserved, %dk data, %dk init)\n", 239 "%dk reserved, %dk data, %dk init)\n",
252 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 240 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
253 max_mapnr << (PAGE_SHIFT-10), 241 totalram_pages << (PAGE_SHIFT-10),
254 codesize >> 10, 242 codesize >> 10,
255 reservedpages << (PAGE_SHIFT-10), 243 reservedpages << (PAGE_SHIFT-10),
256 datasize >> 10, 244 datasize >> 10,
@@ -289,4 +277,3 @@ void free_initrd_mem(unsigned long start, unsigned long end)
289 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 277 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
290} 278}
291#endif 279#endif
292