aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/mm/init.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/mm/init.c')
-rw-r--r--arch/sh/mm/init.c107
1 files changed, 68 insertions, 39 deletions
diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
index e0e644ff3204..82b68c789a5f 100644
--- a/arch/sh/mm/init.c
+++ b/arch/sh/mm/init.c
@@ -18,6 +18,7 @@
18#include <asm/mmu_context.h> 18#include <asm/mmu_context.h>
19#include <asm/tlb.h> 19#include <asm/tlb.h>
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21#include <asm/sections.h>
21#include <asm/cache.h> 22#include <asm/cache.h>
22 23
23DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); 24DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
@@ -36,14 +37,11 @@ void show_mem(void)
36 show_free_areas(); 37 show_free_areas();
37 38
38 for_each_online_pgdat(pgdat) { 39 for_each_online_pgdat(pgdat) {
39 struct page *page, *end; 40 unsigned long flags, i;
40 unsigned long flags;
41 41
42 pgdat_resize_lock(pgdat, &flags); 42 pgdat_resize_lock(pgdat, &flags);
43 page = pgdat->node_mem_map; 43 for (i = 0; i < pgdat->node_spanned_pages; i++) {
44 end = page + pgdat->node_spanned_pages; 44 struct page *page = pgdat_page_nr(pgdat, i);
45
46 do {
47 total++; 45 total++;
48 if (PageReserved(page)) 46 if (PageReserved(page))
49 reserved++; 47 reserved++;
@@ -55,9 +53,7 @@ void show_mem(void)
55 free++; 53 free++;
56 else 54 else
57 shared += page_count(page) - 1; 55 shared += page_count(page) - 1;
58 page++; 56 }
59 } while (page < end);
60
61 pgdat_resize_unlock(pgdat, &flags); 57 pgdat_resize_unlock(pgdat, &flags);
62 } 58 }
63 59
@@ -137,16 +133,12 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
137} 133}
138#endif /* CONFIG_MMU */ 134#endif /* CONFIG_MMU */
139 135
140/* References to section boundaries */
141
142extern char _text, _etext, _edata, __bss_start, _end;
143extern char __init_begin, __init_end;
144
145/* 136/*
146 * paging_init() sets up the page tables 137 * paging_init() sets up the page tables
147 */ 138 */
148void __init paging_init(void) 139void __init paging_init(void)
149{ 140{
141 unsigned long max_zone_pfns[MAX_NR_ZONES];
150 int nid; 142 int nid;
151 143
152 /* We don't need to map the kernel through the TLB, as 144 /* We don't need to map the kernel through the TLB, as
@@ -158,43 +150,39 @@ void __init paging_init(void)
158 * check for a null value. */ 150 * check for a null value. */
159 set_TTB(swapper_pg_dir); 151 set_TTB(swapper_pg_dir);
160 152
153 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
154
161 for_each_online_node(nid) { 155 for_each_online_node(nid) {
162 pg_data_t *pgdat = NODE_DATA(nid); 156 pg_data_t *pgdat = NODE_DATA(nid);
163 unsigned long max_zone_pfns[MAX_NR_ZONES];
164 unsigned long low, start_pfn; 157 unsigned long low, start_pfn;
165 158
166 memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
167
168 start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT; 159 start_pfn = pgdat->bdata->node_boot_start >> PAGE_SHIFT;
169 low = pgdat->bdata->node_low_pfn; 160 low = pgdat->bdata->node_low_pfn;
170 161
171 max_zone_pfns[ZONE_NORMAL] = low; 162 if (max_zone_pfns[ZONE_NORMAL] < low)
172 add_active_range(nid, start_pfn, low); 163 max_zone_pfns[ZONE_NORMAL] = low;
173 164
174 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n", 165 printk("Node %u: start_pfn = 0x%lx, low = 0x%lx\n",
175 nid, start_pfn, low); 166 nid, start_pfn, low);
176
177 free_area_init_nodes(max_zone_pfns);
178
179 printk("Node %u: mem_map starts at %p\n",
180 pgdat->node_id, pgdat->node_mem_map);
181 } 167 }
168
169 free_area_init_nodes(max_zone_pfns);
182} 170}
183 171
184static struct kcore_list kcore_mem, kcore_vmalloc; 172static struct kcore_list kcore_mem, kcore_vmalloc;
185 173
186void __init mem_init(void) 174void __init mem_init(void)
187{ 175{
188 int codesize, reservedpages, datasize, initsize; 176 int codesize, datasize, initsize;
189 int nid; 177 int nid;
190 178
191 reservedpages = 0; 179 num_physpages = 0;
180 high_memory = NULL;
192 181
193 for_each_online_node(nid) { 182 for_each_online_node(nid) {
194 pg_data_t *pgdat = NODE_DATA(nid); 183 pg_data_t *pgdat = NODE_DATA(nid);
195 unsigned long node_pages = 0; 184 unsigned long node_pages = 0;
196 void *node_high_memory; 185 void *node_high_memory;
197 int i;
198 186
199 num_physpages += pgdat->node_present_pages; 187 num_physpages += pgdat->node_present_pages;
200 188
@@ -203,13 +191,9 @@ void __init mem_init(void)
203 191
204 totalram_pages += node_pages; 192 totalram_pages += node_pages;
205 193
206 for (i = 0; i < node_pages; i++) 194 node_high_memory = (void *)__va((pgdat->node_start_pfn +
207 if (PageReserved(pgdat->node_mem_map + i)) 195 pgdat->node_spanned_pages) <<
208 reservedpages++; 196 PAGE_SHIFT);
209
210 node_high_memory = (void *)((pgdat->node_start_pfn +
211 pgdat->node_spanned_pages) <<
212 PAGE_SHIFT);
213 if (node_high_memory > high_memory) 197 if (node_high_memory > high_memory)
214 high_memory = node_high_memory; 198 high_memory = node_high_memory;
215 } 199 }
@@ -239,11 +223,10 @@ void __init mem_init(void)
239 VMALLOC_END - VMALLOC_START); 223 VMALLOC_END - VMALLOC_START);
240 224
241 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " 225 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
242 "%dk reserved, %dk data, %dk init)\n", 226 "%dk data, %dk init)\n",
243 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10), 227 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
244 totalram_pages << (PAGE_SHIFT-10), 228 num_physpages << (PAGE_SHIFT-10),
245 codesize >> 10, 229 codesize >> 10,
246 reservedpages << (PAGE_SHIFT-10),
247 datasize >> 10, 230 datasize >> 10,
248 initsize >> 10); 231 initsize >> 10);
249 232
@@ -264,7 +247,9 @@ void free_initmem(void)
264 free_page(addr); 247 free_page(addr);
265 totalram_pages++; 248 totalram_pages++;
266 } 249 }
267 printk ("Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10); 250 printk("Freeing unused kernel memory: %ldk freed\n",
251 ((unsigned long)&__init_end -
252 (unsigned long)&__init_begin) >> 10);
268} 253}
269 254
270#ifdef CONFIG_BLK_DEV_INITRD 255#ifdef CONFIG_BLK_DEV_INITRD
@@ -277,6 +262,50 @@ void free_initrd_mem(unsigned long start, unsigned long end)
277 free_page(p); 262 free_page(p);
278 totalram_pages++; 263 totalram_pages++;
279 } 264 }
280 printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); 265 printk("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
266}
267#endif
268
269#ifdef CONFIG_MEMORY_HOTPLUG
270void online_page(struct page *page)
271{
272 ClearPageReserved(page);
273 init_page_count(page);
274 __free_page(page);
275 totalram_pages++;
276 num_physpages++;
281} 277}
278
279int arch_add_memory(int nid, u64 start, u64 size)
280{
281 pg_data_t *pgdat;
282 unsigned long start_pfn = start >> PAGE_SHIFT;
283 unsigned long nr_pages = size >> PAGE_SHIFT;
284 int ret;
285
286 pgdat = NODE_DATA(nid);
287
288 /* We only have ZONE_NORMAL, so this is easy.. */
289 ret = __add_pages(pgdat->node_zones + ZONE_NORMAL, start_pfn, nr_pages);
290 if (unlikely(ret))
291 printk("%s: Failed, __add_pages() == %d\n", __FUNCTION__, ret);
292
293 return ret;
294}
295EXPORT_SYMBOL_GPL(arch_add_memory);
296
297int remove_memory(u64 start, u64 size)
298{
299 return -EINVAL;
300}
301EXPORT_SYMBOL_GPL(remove_memory);
302
303#ifdef CONFIG_NUMA
304int memory_add_physaddr_to_nid(u64 addr)
305{
306 /* Node 0 for now.. */
307 return 0;
308}
309EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
310#endif
282#endif 311#endif