aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
authorDave Hansen <haveblue@us.ibm.com>2005-10-29 21:16:54 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2005-10-30 00:40:44 -0400
commit3947be1969a9ce455ec30f60ef51efb10e4323d1 (patch)
tree0b4b3b4c268beb7aa88cb685cce48b6bb5053c47 /mm/memory_hotplug.c
parentbdc8cb984576ab5b550c8b24c6fa111a873503e3 (diff)
[PATCH] memory hotplug: sysfs and add/remove functions
This adds generic memory add/remove and supporting functions for memory hotplug into a new file as well as a memory hotplug kernel config option. Individual architecture patches will follow. For now, disable memory hotplug when swsusp is enabled. There's a lot of churn there right now. We'll fix it up properly once it calms down. Signed-off-by: Matt Tolentino <matthew.e.tolentino@intel.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c178
1 files changed, 178 insertions, 0 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
new file mode 100644
index 000000000000..855e0fc928b3
--- /dev/null
+++ b/mm/memory_hotplug.c
@@ -0,0 +1,178 @@
1/*
2 * linux/mm/memory_hotplug.c
3 *
4 * Copyright (C)
5 */
6
7#include <linux/config.h>
8#include <linux/stddef.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/interrupt.h>
12#include <linux/pagemap.h>
13#include <linux/bootmem.h>
14#include <linux/compiler.h>
15#include <linux/module.h>
16#include <linux/pagevec.h>
17#include <linux/slab.h>
18#include <linux/sysctl.h>
19#include <linux/cpu.h>
20#include <linux/memory.h>
21#include <linux/memory_hotplug.h>
22#include <linux/highmem.h>
23#include <linux/vmalloc.h>
24
25#include <asm/tlbflush.h>
26
27static struct page *__kmalloc_section_memmap(unsigned long nr_pages)
28{
29 struct page *page, *ret;
30 unsigned long memmap_size = sizeof(struct page) * nr_pages;
31
32 page = alloc_pages(GFP_KERNEL, get_order(memmap_size));
33 if (page)
34 goto got_map_page;
35
36 ret = vmalloc(memmap_size);
37 if (ret)
38 goto got_map_ptr;
39
40 return NULL;
41got_map_page:
42 ret = (struct page *)pfn_to_kaddr(page_to_pfn(page));
43got_map_ptr:
44 memset(ret, 0, memmap_size);
45
46 return ret;
47}
48
49extern void zonetable_add(struct zone *zone, int nid, int zid, unsigned long pfn,
50 unsigned long size);
51static void __add_zone(struct zone *zone, unsigned long phys_start_pfn)
52{
53 struct pglist_data *pgdat = zone->zone_pgdat;
54 int nr_pages = PAGES_PER_SECTION;
55 int nid = pgdat->node_id;
56 int zone_type;
57
58 zone_type = zone - pgdat->node_zones;
59 memmap_init_zone(nr_pages, nid, zone_type, phys_start_pfn);
60 zonetable_add(zone, nid, zone_type, phys_start_pfn, nr_pages);
61}
62
63extern int sparse_add_one_section(struct zone *, unsigned long,
64 struct page *mem_map);
65static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
66{
67 struct pglist_data *pgdat = zone->zone_pgdat;
68 int nr_pages = PAGES_PER_SECTION;
69 struct page *memmap;
70 int ret;
71
72 /*
73 * This can potentially allocate memory, and does its own
74 * internal locking.
75 */
76 sparse_index_init(pfn_to_section_nr(phys_start_pfn), pgdat->node_id);
77
78 pgdat_resize_lock(pgdat, &flags);
79 memmap = __kmalloc_section_memmap(nr_pages);
80 ret = sparse_add_one_section(zone, phys_start_pfn, memmap);
81 pgdat_resize_unlock(pgdat, &flags);
82
83 if (ret <= 0) {
84 /* the mem_map didn't get used */
85 if (memmap >= (struct page *)VMALLOC_START &&
86 memmap < (struct page *)VMALLOC_END)
87 vfree(memmap);
88 else
89 free_pages((unsigned long)memmap,
90 get_order(sizeof(struct page) * nr_pages));
91 }
92
93 if (ret < 0)
94 return ret;
95
96 __add_zone(zone, phys_start_pfn);
97 return register_new_memory(__pfn_to_section(phys_start_pfn));
98}
99
100/*
101 * Reasonably generic function for adding memory. It is
102 * expected that archs that support memory hotplug will
103 * call this function after deciding the zone to which to
104 * add the new pages.
105 */
106int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
107 unsigned long nr_pages)
108{
109 unsigned long i;
110 int err = 0;
111
112 for (i = 0; i < nr_pages; i += PAGES_PER_SECTION) {
113 err = __add_section(zone, phys_start_pfn + i);
114
115 if (err)
116 break;
117 }
118
119 return err;
120}
121
122static void grow_zone_span(struct zone *zone,
123 unsigned long start_pfn, unsigned long end_pfn)
124{
125 unsigned long old_zone_end_pfn;
126
127 zone_span_writelock(zone);
128
129 old_zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
130 if (start_pfn < zone->zone_start_pfn)
131 zone->zone_start_pfn = start_pfn;
132
133 if (end_pfn > old_zone_end_pfn)
134 zone->spanned_pages = end_pfn - zone->zone_start_pfn;
135
136 zone_span_writeunlock(zone);
137}
138
139static void grow_pgdat_span(struct pglist_data *pgdat,
140 unsigned long start_pfn, unsigned long end_pfn)
141{
142 unsigned long old_pgdat_end_pfn =
143 pgdat->node_start_pfn + pgdat->node_spanned_pages;
144
145 if (start_pfn < pgdat->node_start_pfn)
146 pgdat->node_start_pfn = start_pfn;
147
148 if (end_pfn > old_pgdat_end_pfn)
149 pgdat->node_spanned_pages = end_pfn - pgdat->node_spanned_pages;
150}
151
152int online_pages(unsigned long pfn, unsigned long nr_pages)
153{
154 unsigned long i;
155 unsigned long flags;
156 unsigned long onlined_pages = 0;
157 struct zone *zone;
158
159 /*
160 * This doesn't need a lock to do pfn_to_page().
161 * The section can't be removed here because of the
162 * memory_block->state_sem.
163 */
164 zone = page_zone(pfn_to_page(pfn));
165 pgdat_resize_lock(zone->zone_pgdat, &flags);
166 grow_zone_span(zone, pfn, pfn + nr_pages);
167 grow_pgdat_span(zone->zone_pgdat, pfn, pfn + nr_pages);
168 pgdat_resize_unlock(zone->zone_pgdat, &flags);
169
170 for (i = 0; i < nr_pages; i++) {
171 struct page *page = pfn_to_page(pfn + i);
172 online_page(page);
173 onlined_pages++;
174 }
175 zone->present_pages += onlined_pages;
176
177 return 0;
178}