aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory_hotplug.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory_hotplug.c')
-rw-r--r--mm/memory_hotplug.c186
1 files changed, 185 insertions, 1 deletions
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0fb330271271..b17dca7249f8 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -29,6 +29,8 @@
29 29
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31 31
32#include "internal.h"
33
32/* add this memory to iomem resource */ 34/* add this memory to iomem resource */
33static struct resource *register_memory_resource(u64 start, u64 size) 35static struct resource *register_memory_resource(u64 start, u64 size)
34{ 36{
@@ -58,8 +60,105 @@ static void release_memory_resource(struct resource *res)
58 return; 60 return;
59} 61}
60 62
61
62#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE 63#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
64#ifndef CONFIG_SPARSEMEM_VMEMMAP
65static void get_page_bootmem(unsigned long info, struct page *page, int magic)
66{
67 atomic_set(&page->_mapcount, magic);
68 SetPagePrivate(page);
69 set_page_private(page, info);
70 atomic_inc(&page->_count);
71}
72
73void put_page_bootmem(struct page *page)
74{
75 int magic;
76
77 magic = atomic_read(&page->_mapcount);
78 BUG_ON(magic >= -1);
79
80 if (atomic_dec_return(&page->_count) == 1) {
81 ClearPagePrivate(page);
82 set_page_private(page, 0);
83 reset_page_mapcount(page);
84 __free_pages_bootmem(page, 0);
85 }
86
87}
88
89void register_page_bootmem_info_section(unsigned long start_pfn)
90{
91 unsigned long *usemap, mapsize, section_nr, i;
92 struct mem_section *ms;
93 struct page *page, *memmap;
94
95 if (!pfn_valid(start_pfn))
96 return;
97
98 section_nr = pfn_to_section_nr(start_pfn);
99 ms = __nr_to_section(section_nr);
100
101 /* Get section's memmap address */
102 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
103
104 /*
105 * Get page for the memmap's phys address
106 * XXX: need more consideration for sparse_vmemmap...
107 */
108 page = virt_to_page(memmap);
109 mapsize = sizeof(struct page) * PAGES_PER_SECTION;
110 mapsize = PAGE_ALIGN(mapsize) >> PAGE_SHIFT;
111
112 /* remember memmap's page */
113 for (i = 0; i < mapsize; i++, page++)
114 get_page_bootmem(section_nr, page, SECTION_INFO);
115
116 usemap = __nr_to_section(section_nr)->pageblock_flags;
117 page = virt_to_page(usemap);
118
119 mapsize = PAGE_ALIGN(usemap_size()) >> PAGE_SHIFT;
120
121 for (i = 0; i < mapsize; i++, page++)
122 get_page_bootmem(section_nr, page, MIX_INFO);
123
124}
125
126void register_page_bootmem_info_node(struct pglist_data *pgdat)
127{
128 unsigned long i, pfn, end_pfn, nr_pages;
129 int node = pgdat->node_id;
130 struct page *page;
131 struct zone *zone;
132
133 nr_pages = PAGE_ALIGN(sizeof(struct pglist_data)) >> PAGE_SHIFT;
134 page = virt_to_page(pgdat);
135
136 for (i = 0; i < nr_pages; i++, page++)
137 get_page_bootmem(node, page, NODE_INFO);
138
139 zone = &pgdat->node_zones[0];
140 for (; zone < pgdat->node_zones + MAX_NR_ZONES - 1; zone++) {
141 if (zone->wait_table) {
142 nr_pages = zone->wait_table_hash_nr_entries
143 * sizeof(wait_queue_head_t);
144 nr_pages = PAGE_ALIGN(nr_pages) >> PAGE_SHIFT;
145 page = virt_to_page(zone->wait_table);
146
147 for (i = 0; i < nr_pages; i++, page++)
148 get_page_bootmem(node, page, NODE_INFO);
149 }
150 }
151
152 pfn = pgdat->node_start_pfn;
153 end_pfn = pfn + pgdat->node_spanned_pages;
154
155 /* register_section info */
156 for (; pfn < end_pfn; pfn += PAGES_PER_SECTION)
157 register_page_bootmem_info_section(pfn);
158
159}
160#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
161
63static int __add_zone(struct zone *zone, unsigned long phys_start_pfn) 162static int __add_zone(struct zone *zone, unsigned long phys_start_pfn)
64{ 163{
65 struct pglist_data *pgdat = zone->zone_pgdat; 164 struct pglist_data *pgdat = zone->zone_pgdat;
@@ -101,6 +200,36 @@ static int __add_section(struct zone *zone, unsigned long phys_start_pfn)
101 return register_new_memory(__pfn_to_section(phys_start_pfn)); 200 return register_new_memory(__pfn_to_section(phys_start_pfn));
102} 201}
103 202
203#ifdef CONFIG_SPARSEMEM_VMEMMAP
204static int __remove_section(struct zone *zone, struct mem_section *ms)
205{
206 /*
207 * XXX: Freeing memmap with vmemmap is not implement yet.
208 * This should be removed later.
209 */
210 return -EBUSY;
211}
212#else
213static int __remove_section(struct zone *zone, struct mem_section *ms)
214{
215 unsigned long flags;
216 struct pglist_data *pgdat = zone->zone_pgdat;
217 int ret = -EINVAL;
218
219 if (!valid_section(ms))
220 return ret;
221
222 ret = unregister_memory_section(ms);
223 if (ret)
224 return ret;
225
226 pgdat_resize_lock(pgdat, &flags);
227 sparse_remove_one_section(zone, ms);
228 pgdat_resize_unlock(pgdat, &flags);
229 return 0;
230}
231#endif
232
104/* 233/*
105 * Reasonably generic function for adding memory. It is 234 * Reasonably generic function for adding memory. It is
106 * expected that archs that support memory hotplug will 235 * expected that archs that support memory hotplug will
@@ -134,6 +263,42 @@ int __add_pages(struct zone *zone, unsigned long phys_start_pfn,
134} 263}
135EXPORT_SYMBOL_GPL(__add_pages); 264EXPORT_SYMBOL_GPL(__add_pages);
136 265
266/**
267 * __remove_pages() - remove sections of pages from a zone
268 * @zone: zone from which pages need to be removed
269 * @phys_start_pfn: starting pageframe (must be aligned to start of a section)
270 * @nr_pages: number of pages to remove (must be multiple of section size)
271 *
272 * Generic helper function to remove section mappings and sysfs entries
273 * for the section of the memory we are removing. Caller needs to make
274 * sure that pages are marked reserved and zones are adjust properly by
275 * calling offline_pages().
276 */
277int __remove_pages(struct zone *zone, unsigned long phys_start_pfn,
278 unsigned long nr_pages)
279{
280 unsigned long i, ret = 0;
281 int sections_to_remove;
282
283 /*
284 * We can only remove entire sections
285 */
286 BUG_ON(phys_start_pfn & ~PAGE_SECTION_MASK);
287 BUG_ON(nr_pages % PAGES_PER_SECTION);
288
289 release_mem_region(phys_start_pfn << PAGE_SHIFT, nr_pages * PAGE_SIZE);
290
291 sections_to_remove = nr_pages / PAGES_PER_SECTION;
292 for (i = 0; i < sections_to_remove; i++) {
293 unsigned long pfn = phys_start_pfn + i*PAGES_PER_SECTION;
294 ret = __remove_section(zone, __pfn_to_section(pfn));
295 if (ret)
296 break;
297 }
298 return ret;
299}
300EXPORT_SYMBOL_GPL(__remove_pages);
301
137static void grow_zone_span(struct zone *zone, 302static void grow_zone_span(struct zone *zone,
138 unsigned long start_pfn, unsigned long end_pfn) 303 unsigned long start_pfn, unsigned long end_pfn)
139{ 304{
@@ -164,6 +329,25 @@ static void grow_pgdat_span(struct pglist_data *pgdat,
164 pgdat->node_start_pfn; 329 pgdat->node_start_pfn;
165} 330}
166 331
332void online_page(struct page *page)
333{
334 totalram_pages++;
335 num_physpages++;
336
337#ifdef CONFIG_HIGHMEM
338 if (PageHighMem(page))
339 totalhigh_pages++;
340#endif
341
342#ifdef CONFIG_FLATMEM
343 max_mapnr = max(page_to_pfn(page), max_mapnr);
344#endif
345
346 ClearPageReserved(page);
347 init_page_count(page);
348 __free_page(page);
349}
350
167static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages, 351static int online_pages_range(unsigned long start_pfn, unsigned long nr_pages,
168 void *arg) 352 void *arg)
169{ 353{