aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/i386/mm')
-rw-r--r--arch/i386/mm/discontig.c4
-rw-r--r--arch/i386/mm/fault.c2
-rw-r--r--arch/i386/mm/init.c62
-rw-r--r--arch/i386/mm/ioremap.c4
-rw-r--r--arch/i386/mm/pgtable.c11
5 files changed, 68 insertions, 15 deletions
diff --git a/arch/i386/mm/discontig.c b/arch/i386/mm/discontig.c
index 244d8ec66be2..c4af9638dbfa 100644
--- a/arch/i386/mm/discontig.c
+++ b/arch/i386/mm/discontig.c
@@ -98,7 +98,7 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
98 98
99extern unsigned long find_max_low_pfn(void); 99extern unsigned long find_max_low_pfn(void);
100extern void find_max_pfn(void); 100extern void find_max_pfn(void);
101extern void one_highpage_init(struct page *, int, int); 101extern void add_one_highpage_init(struct page *, int, int);
102 102
103extern struct e820map e820; 103extern struct e820map e820;
104extern unsigned long init_pg_tables_end; 104extern unsigned long init_pg_tables_end;
@@ -427,7 +427,7 @@ void __init set_highmem_pages_init(int bad_ppro)
427 if (!pfn_valid(node_pfn)) 427 if (!pfn_valid(node_pfn))
428 continue; 428 continue;
429 page = pfn_to_page(node_pfn); 429 page = pfn_to_page(node_pfn);
430 one_highpage_init(page, node_pfn, bad_ppro); 430 add_one_highpage_init(page, node_pfn, bad_ppro);
431 } 431 }
432 } 432 }
433 totalram_pages += totalhigh_pages; 433 totalram_pages += totalhigh_pages;
diff --git a/arch/i386/mm/fault.c b/arch/i386/mm/fault.c
index 9edd4485b91e..cf572d9a3b6e 100644
--- a/arch/i386/mm/fault.c
+++ b/arch/i386/mm/fault.c
@@ -108,7 +108,7 @@ static inline unsigned long get_segment_eip(struct pt_regs *regs,
108 desc = (void *)desc + (seg & ~7); 108 desc = (void *)desc + (seg & ~7);
109 } else { 109 } else {
110 /* Must disable preemption while reading the GDT. */ 110 /* Must disable preemption while reading the GDT. */
111 desc = (u32 *)&per_cpu(cpu_gdt_table, get_cpu()); 111 desc = (u32 *)get_cpu_gdt_table(get_cpu());
112 desc = (void *)desc + (seg & ~7); 112 desc = (void *)desc + (seg & ~7);
113 } 113 }
114 114
diff --git a/arch/i386/mm/init.c b/arch/i386/mm/init.c
index 2ebaf75f732e..542d9298da5e 100644
--- a/arch/i386/mm/init.c
+++ b/arch/i386/mm/init.c
@@ -27,6 +27,7 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/proc_fs.h> 28#include <linux/proc_fs.h>
29#include <linux/efi.h> 29#include <linux/efi.h>
30#include <linux/memory_hotplug.h>
30 31
31#include <asm/processor.h> 32#include <asm/processor.h>
32#include <asm/system.h> 33#include <asm/system.h>
@@ -266,17 +267,46 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
266 pkmap_page_table = pte; 267 pkmap_page_table = pte;
267} 268}
268 269
269void __init one_highpage_init(struct page *page, int pfn, int bad_ppro) 270void __devinit free_new_highpage(struct page *page)
271{
272 set_page_count(page, 1);
273 __free_page(page);
274 totalhigh_pages++;
275}
276
277void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
270{ 278{
271 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) { 279 if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
272 ClearPageReserved(page); 280 ClearPageReserved(page);
273 set_page_count(page, 1); 281 free_new_highpage(page);
274 __free_page(page);
275 totalhigh_pages++;
276 } else 282 } else
277 SetPageReserved(page); 283 SetPageReserved(page);
278} 284}
279 285
286static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
287{
288 free_new_highpage(page);
289 totalram_pages++;
290#ifdef CONFIG_FLATMEM
291 max_mapnr = max(pfn, max_mapnr);
292#endif
293 num_physpages++;
294 return 0;
295}
296
297/*
298 * Not currently handling the NUMA case.
299 * Assuming single node and all memory that
300 * has been added dynamically that would be
301 * onlined here is in HIGHMEM
302 */
303void online_page(struct page *page)
304{
305 ClearPageReserved(page);
306 add_one_highpage_hotplug(page, page_to_pfn(page));
307}
308
309
280#ifdef CONFIG_NUMA 310#ifdef CONFIG_NUMA
281extern void set_highmem_pages_init(int); 311extern void set_highmem_pages_init(int);
282#else 312#else
@@ -284,7 +314,7 @@ static void __init set_highmem_pages_init(int bad_ppro)
284{ 314{
285 int pfn; 315 int pfn;
286 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++) 316 for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
287 one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro); 317 add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
288 totalram_pages += totalhigh_pages; 318 totalram_pages += totalhigh_pages;
289} 319}
290#endif /* CONFIG_FLATMEM */ 320#endif /* CONFIG_FLATMEM */
@@ -615,6 +645,28 @@ void __init mem_init(void)
615#endif 645#endif
616} 646}
617 647
648/*
649 * this is for the non-NUMA, single node SMP system case.
650 * Specifically, in the case of x86, we will always add
651 * memory to the highmem for now.
652 */
653#ifndef CONFIG_NEED_MULTIPLE_NODES
654int add_memory(u64 start, u64 size)
655{
656 struct pglist_data *pgdata = &contig_page_data;
657 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
658 unsigned long start_pfn = start >> PAGE_SHIFT;
659 unsigned long nr_pages = size >> PAGE_SHIFT;
660
661 return __add_pages(zone, start_pfn, nr_pages);
662}
663
664int remove_memory(u64 start, u64 size)
665{
666 return -EINVAL;
667}
668#endif
669
618kmem_cache_t *pgd_cache; 670kmem_cache_t *pgd_cache;
619kmem_cache_t *pmd_cache; 671kmem_cache_t *pmd_cache;
620 672
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c
index f379b8d67558..5d09de8d1c6b 100644
--- a/arch/i386/mm/ioremap.c
+++ b/arch/i386/mm/ioremap.c
@@ -28,7 +28,7 @@ static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
28 unsigned long pfn; 28 unsigned long pfn;
29 29
30 pfn = phys_addr >> PAGE_SHIFT; 30 pfn = phys_addr >> PAGE_SHIFT;
31 pte = pte_alloc_kernel(&init_mm, pmd, addr); 31 pte = pte_alloc_kernel(pmd, addr);
32 if (!pte) 32 if (!pte)
33 return -ENOMEM; 33 return -ENOMEM;
34 do { 34 do {
@@ -87,14 +87,12 @@ static int ioremap_page_range(unsigned long addr,
87 flush_cache_all(); 87 flush_cache_all();
88 phys_addr -= addr; 88 phys_addr -= addr;
89 pgd = pgd_offset_k(addr); 89 pgd = pgd_offset_k(addr);
90 spin_lock(&init_mm.page_table_lock);
91 do { 90 do {
92 next = pgd_addr_end(addr, end); 91 next = pgd_addr_end(addr, end);
93 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); 92 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
94 if (err) 93 if (err)
95 break; 94 break;
96 } while (pgd++, addr = next, addr != end); 95 } while (pgd++, addr = next, addr != end);
97 spin_unlock(&init_mm.page_table_lock);
98 flush_tlb_all(); 96 flush_tlb_all();
99 return err; 97 return err;
100} 98}
diff --git a/arch/i386/mm/pgtable.c b/arch/i386/mm/pgtable.c
index dcdce2c6c532..9db3242103be 100644
--- a/arch/i386/mm/pgtable.c
+++ b/arch/i386/mm/pgtable.c
@@ -31,11 +31,13 @@ void show_mem(void)
31 pg_data_t *pgdat; 31 pg_data_t *pgdat;
32 unsigned long i; 32 unsigned long i;
33 struct page_state ps; 33 struct page_state ps;
34 unsigned long flags;
34 35
35 printk(KERN_INFO "Mem-info:\n"); 36 printk(KERN_INFO "Mem-info:\n");
36 show_free_areas(); 37 show_free_areas();
37 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); 38 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
38 for_each_pgdat(pgdat) { 39 for_each_pgdat(pgdat) {
40 pgdat_resize_lock(pgdat, &flags);
39 for (i = 0; i < pgdat->node_spanned_pages; ++i) { 41 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
40 page = pgdat_page_nr(pgdat, i); 42 page = pgdat_page_nr(pgdat, i);
41 total++; 43 total++;
@@ -48,6 +50,7 @@ void show_mem(void)
48 else if (page_count(page)) 50 else if (page_count(page))
49 shared += page_count(page) - 1; 51 shared += page_count(page) - 1;
50 } 52 }
53 pgdat_resize_unlock(pgdat, &flags);
51 } 54 }
52 printk(KERN_INFO "%d pages of RAM\n", total); 55 printk(KERN_INFO "%d pages of RAM\n", total);
53 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); 56 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
@@ -188,19 +191,19 @@ static inline void pgd_list_add(pgd_t *pgd)
188 struct page *page = virt_to_page(pgd); 191 struct page *page = virt_to_page(pgd);
189 page->index = (unsigned long)pgd_list; 192 page->index = (unsigned long)pgd_list;
190 if (pgd_list) 193 if (pgd_list)
191 pgd_list->private = (unsigned long)&page->index; 194 set_page_private(pgd_list, (unsigned long)&page->index);
192 pgd_list = page; 195 pgd_list = page;
193 page->private = (unsigned long)&pgd_list; 196 set_page_private(page, (unsigned long)&pgd_list);
194} 197}
195 198
196static inline void pgd_list_del(pgd_t *pgd) 199static inline void pgd_list_del(pgd_t *pgd)
197{ 200{
198 struct page *next, **pprev, *page = virt_to_page(pgd); 201 struct page *next, **pprev, *page = virt_to_page(pgd);
199 next = (struct page *)page->index; 202 next = (struct page *)page->index;
200 pprev = (struct page **)page->private; 203 pprev = (struct page **)page_private(page);
201 *pprev = next; 204 *pprev = next;
202 if (next) 205 if (next)
203 next->private = (unsigned long)pprev; 206 set_page_private(next, (unsigned long)pprev);
204} 207}
205 208
206void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) 209void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)