aboutsummaryrefslogtreecommitdiffstats
path: root/fs/sysfs/mount.c
blob: 281c0c9bc39f84f85d9ab6f78adc2d93e56a4a40 (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
/*
 * fs/sysfs/symlink.c - operations for initializing and mounting sysfs
 *
 * Copyright (c) 2001-3 Patrick Mochel
 * Copyright (c) 2007 SUSE Linux Products GmbH
 * Copyright (c) 2007 Tejun Heo <teheo@suse.de>
 *
 * This file is released under the GPLv2.
 *
 * Please see Documentation/filesystems/sysfs.txt for more information.
 */

#define DEBUG 

#include <linux/fs.h>
#include <linux/mount.h>
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/magic.h>
#include <linux/slab.h>

#include "sysfs.h"


static struct vfsmount *sysfs_mount;
struct kmem_cache *sysfs_dir_cachep;

static const struct super_operations sysfs_ops = {
	.statfs		= simple_statfs,
	.drop_inode	= generic_delete_inode,
	.delete_inode	= sysfs_delete_inode,
};

struct sysfs_dirent sysfs_root = {
	.s_name		= "",
	.s_count	= ATOMIC_INIT(1),
	.s_flags	= SYSFS_DIR | (KOBJ_NS_TYPE_NONE << SYSFS_NS_TYPE_SHIFT),
	.s_mode		= S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
	.s_ino		= 1,
};

static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
{
	struct inode *inode;
	struct dentry *root;

	sb->s_blocksize = PAGE_CACHE_SIZE;
	sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
	sb->s_magic = SYSFS_MAGIC;
	sb->s_op = &sysfs_ops;
	sb->s_time_gran = 1;

	/* get root inode, initialize and unlock it */
	mutex_lock(&sysfs_mutex);
	inode = sysfs_get_inode(sb, &sysfs_root);
	mutex_unlock(&sysfs_mutex);
	if (!inode) {
		pr_debug("sysfs: could not get root inode\n");
		return -ENOMEM;
	}

	/* instantiate and link root dentry */
	root = d_alloc_root(inode);
	if (!root) {
		pr_debug("%s: could not get root dentry!\n",__func__);
		iput(inode);
		return -ENOMEM;
	}
	root->d_fsdata = &sysfs_root;
	sb->s_root = root;
	return 0;
}

static int sysfs_test_super(struct super_block *sb, void *data)
{
	struct sysfs_super_info *sb_info = sysfs_info(sb);
	struct sysfs_super_info *info = data;
	enum kobj_ns_type type;
	int found = 1;

	for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++) {
		if (sb_info->ns[type] != info->ns[type])
			found = 0;
	}
	return found;
}

static int sysfs_set_super(struct super_block *sb, void *data)
{
	int error;
	error = set_anon_super(sb, data);
	if (!error)
		sb->s_fs_info = data;
	return error;
}

static int sysfs_get_sb(struct file_system_type *fs_type,
	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
{
	struct sysfs_super_info *info;
	enum kobj_ns_type type;
	struct super_block *sb;
	int error;

	error = -ENOMEM;
	info = kzalloc(sizeof(*info), GFP_KERNEL);
	if (!info)
		goto out;

	for (type = KOBJ_NS_TYPE_NONE; type < KOBJ_NS_TYPES; type++)
		info->ns[type] = kobj_ns_current(type);

	sb = sget(fs_type, sysfs_test_super, sysfs_set_super, info);
	if (IS_ERR(sb) || sb->s_fs_info != info)
		kfree(info);
	if (IS_ERR(sb)) {
		error = PTR_ERR(sb);
		goto out;
	}
	if (!sb->s_root) {
		sb->s_flags = flags;
		error = sysfs_fill_super(sb, data, flags & MS_SILENT ? 1 : 0);
		if (error) {
			deactivate_locked_super(sb);
			goto out;
		}
		sb->s_flags |= MS_ACTIVE;
	}

	simple_set_mnt(mnt, sb);
	error = 0;
out:
	return error;
}

static void sysfs_kill_sb(struct super_block *sb)
{
	struct sysfs_super_info *info = sysfs_info(sb);

	/* Remove the superblock from fs_supers/s_instances
	 * so we can't find it, before freeing sysfs_super_info.
	 */
	kill_anon_super(sb);
	kfree(info);
}

static struct file_system_type sysfs_fs_type = {
	.name		= "sysfs",
	.get_sb		= sysfs_get_sb,
	.kill_sb	= sysfs_kill_sb,
};

void sysfs_exit_ns(enum kobj_ns_type type, const void *ns)
{
	struct super_block *sb;

	mutex_lock(&sysfs_mutex);
	spin_lock(&sb_lock);
	list_for_each_entry(sb, &sysfs_fs_type.fs_supers, s_instances) {
		struct sysfs_super_info *info = sysfs_info(sb);
		/*
		 * If we see a superblock on the fs_supers/s_instances
		 * list the unmount has not completed and sb->s_fs_info
		 * points to a valid struct sysfs_super_info.
		 */
		/* Ignore superblocks with the wrong ns */
		if (info->ns[type] != ns)
			continue;
		info->ns[type] = NULL;
	}
	spin_unlock(&sb_lock);
	mutex_unlock(&sysfs_mutex);
}

int __init sysfs_init(void)
{
	int err = -ENOMEM;

	sysfs_dir_cachep = kmem_cache_create("sysfs_dir_cache",
					      sizeof(struct sysfs_dirent),
					      0, 0, NULL);
	if (!sysfs_dir_cachep)
		goto out;

	err = sysfs_inode_init();
	if (err)
		goto out_err;

	err = register_filesystem(&sysfs_fs_type);
	if (!err) {
		sysfs_mount = kern_mount(&sysfs_fs_type);
		if (IS_ERR(sysfs_mount)) {
			printk(KERN_ERR "sysfs: could not mount!\n");
			err = PTR_ERR(sysfs_mount);
			sysfs_mount = NULL;
			unregister_filesystem(&sysfs_fs_type);
			goto out_err;
		}
	} else
		goto out_err;
out:
	return err;
out_err:
	kmem_cache_destroy(sysfs_dir_cachep);
	sysfs_dir_cachep = NULL;
	goto out;
}

#undef sysfs_get
struct sysfs_dirent *sysfs_get(struct sysfs_dirent *sd)
{
	return __sysfs_get(sd);
}
EXPORT_SYMBOL_GPL(sysfs_get);

#undef sysfs_put
void sysfs_put(struct sysfs_dirent *sd)
{
	__sysfs_put(sd);
}
EXPORT_SYMBOL_GPL(sysfs_put);
ret; } #else /* !SPARSEMEM_EXTREME */ static inline int sparse_index_init(unsigned long section_nr, int nid) { return 0; } #endif /* * Although written for the SPARSEMEM_EXTREME case, this happens * to also work for the flat array case because * NR_SECTION_ROOTS==NR_MEM_SECTIONS. */ int __section_nr(struct mem_section* ms) { unsigned long root_nr; struct mem_section* root; for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); if (!root) continue; if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) break; } VM_BUG_ON(root_nr == NR_SECTION_ROOTS); return (root_nr * SECTIONS_PER_ROOT) + (ms - root); } /* * During early boot, before section_mem_map is used for an actual * mem_map, we use section_mem_map to store the section's NUMA * node. This keeps us from having to use another data structure. The * node information is cleared just before we store the real mem_map. */ static inline unsigned long sparse_encode_early_nid(int nid) { return (nid << SECTION_NID_SHIFT); } static inline int sparse_early_nid(struct mem_section *section) { return (section->section_mem_map >> SECTION_NID_SHIFT); } /* Validate the physical addressing limitations of the model */ void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, unsigned long *end_pfn) { unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); /* * Sanity checks - do not allow an architecture to pass * in larger pfns than the maximum scope of sparsemem: */ if (*start_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *start_pfn = max_sparsemem_pfn; *end_pfn = max_sparsemem_pfn; } else if (*end_pfn > max_sparsemem_pfn) { mminit_dprintk(MMINIT_WARNING, "pfnvalidation", "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", *start_pfn, *end_pfn, max_sparsemem_pfn); WARN_ON_ONCE(1); *end_pfn = max_sparsemem_pfn; } } /* Record a memory area against a node. */ void __init memory_present(int nid, unsigned long start, unsigned long end) { unsigned long pfn; start &= PAGE_SECTION_MASK; mminit_validate_memmodel_limits(&start, &end); for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { unsigned long section = pfn_to_section_nr(pfn); struct mem_section *ms; sparse_index_init(section, nid); set_section_nid(section, nid); ms = __nr_to_section(section); if (!ms->section_mem_map) ms->section_mem_map = sparse_encode_early_nid(nid) | SECTION_MARKED_PRESENT; } } /* * Only used by the i386 NUMA architecures, but relatively * generic code. */ unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, unsigned long end_pfn) { unsigned long pfn; unsigned long nr_pages = 0; mminit_validate_memmodel_limits(&start_pfn, &end_pfn); for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { if (nid != early_pfn_to_nid(pfn)) continue; if (pfn_present(pfn)) nr_pages += PAGES_PER_SECTION; } return nr_pages * sizeof(struct page); } /* * Subtle, we encode the real pfn into the mem_map such that * the identity pfn - section_mem_map will return the actual * physical page frame number. */ static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) { return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); } /* * Decode mem_map from the coded memmap */ struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) { /* mask off the extra low bits of information */ coded_mem_map &= SECTION_MAP_MASK; return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); } static int __meminit sparse_init_one_section(struct mem_section *ms, unsigned long pnum, struct page *mem_map, unsigned long *pageblock_bitmap) { if (!present_section(ms)) return -EINVAL; ms->section_mem_map &= ~SECTION_MAP_MASK; ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | SECTION_HAS_MEM_MAP; ms->pageblock_flags = pageblock_bitmap; return 1; } unsigned long usemap_size(void) { unsigned long size_bytes; size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; size_bytes = roundup(size_bytes, sizeof(unsigned long)); return size_bytes; } #ifdef CONFIG_MEMORY_HOTPLUG static unsigned long *__kmalloc_section_usemap(void) { return kmalloc(usemap_size(), GFP_KERNEL); } #endif /* CONFIG_MEMORY_HOTPLUG */ #ifdef CONFIG_MEMORY_HOTREMOVE static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { unsigned long goal, limit; unsigned long *p; int nid; /* * A page may contain usemaps for other sections preventing the * page being freed and making a section unremovable while * other sections referencing the usemap retmain active. Similarly, * a pgdat can prevent a section being removed. If section A * contains a pgdat and section B contains the usemap, both * sections become inter-dependent. This allocates usemaps * from the same section as the pgdat where possible to avoid * this problem. */ goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); limit = goal + (1UL << PA_SECTION_SHIFT); nid = early_pfn_to_nid(goal >> PAGE_SHIFT); again: p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, SMP_CACHE_BYTES, goal, limit); if (!p && limit) { limit = 0; goto again; } return p; } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) { unsigned long usemap_snr, pgdat_snr; static unsigned long old_usemap_snr = NR_MEM_SECTIONS; static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; struct pglist_data *pgdat = NODE_DATA(nid); int usemap_nid; usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); if (usemap_snr == pgdat_snr) return; if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) /* skip redundant message */ return; old_usemap_snr = usemap_snr; old_pgdat_snr = pgdat_snr; usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); if (usemap_nid != nid) { printk(KERN_INFO "node %d must be removed before remove section %ld\n", nid, usemap_snr); return; } /* * There is a circular dependency. * Some platforms allow un-removable section because they will just * gather other removable sections for dynamic partitioning. * Just notify un-removable section's number here. */ printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, pgdat_snr, nid); printk(KERN_CONT " have a circular dependency on usemap and pgdat allocations\n"); } #else static unsigned long * __init sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, unsigned long size) { return alloc_bootmem_node_nopanic(pgdat, size); } static void __init check_usemap_section_nr(int nid, unsigned long *usemap) { } #endif /* CONFIG_MEMORY_HOTREMOVE */ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long usemap_count, int nodeid) { void *usemap; unsigned long pnum; int size = usemap_size(); usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), size * usemap_count); if (!usemap) { printk(KERN_WARNING "%s: allocation failed\n", __func__); return; } for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; usemap_map[pnum] = usemap; usemap += size; check_usemap_section_nr(nodeid, usemap_map[pnum]); } } #ifndef CONFIG_SPARSEMEM_VMEMMAP struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) { struct page *map; unsigned long size; map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); if (map) return map; size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); map = __alloc_bootmem_node_high(NODE_DATA(nid), size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); return map; } void __init sparse_mem_maps_populate_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { void *map; unsigned long pnum; unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; map = alloc_remap(nodeid, size * map_count); if (map) { for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; map_map[pnum] = map; map += size; } return; } size = PAGE_ALIGN(size); map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); if (map) { for (pnum = pnum_begin; pnum < pnum_end; pnum++) { if (!present_section_nr(pnum)) continue; map_map[pnum] = map; map += size; } return; } /* fallback */ for (pnum = pnum_begin; pnum < pnum_end; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); if (map_map[pnum]) continue; ms = __nr_to_section(pnum); printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; } } #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, unsigned long pnum_begin, unsigned long pnum_end, unsigned long map_count, int nodeid) { sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, map_count, nodeid); } #else static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) { struct page *map; struct mem_section *ms = __nr_to_section(pnum); int nid = sparse_early_nid(ms); map = sparse_mem_map_populate(pnum, nid); if (map) return map; printk(KERN_ERR "%s: sparsemem memory map backing failed " "some memory will not be available.\n", __func__); ms->section_mem_map = 0; return NULL; } #endif void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) { } /* * Allocate the accumulated non-linear sections, allocate a mem_map * for each and record the physical to section mapping. */ void __init sparse_init(void) { unsigned long pnum; struct page *map; unsigned long *usemap; unsigned long **usemap_map; int size; int nodeid_begin = 0; unsigned long pnum_begin = 0; unsigned long usemap_count; #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER unsigned long map_count; int size2; struct page **map_map; #endif /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ set_pageblock_order(); /* * map is using big page (aka 2M in x86 64 bit) * usemap is less one page (aka 24 bytes) * so alloc 2M (with 2M align) and 24 bytes in turn will * make next 2M slip to one more 2M later. * then in big system, the memory will have a lot of holes... * here try to allocate 2M pages continuously. * * powerpc need to call sparse_init_one_section right after each * sparse_early_mem_map_alloc, so allocate usemap_map at first. */ size = sizeof(unsigned long *) * NR_MEM_SECTIONS; usemap_map = alloc_bootmem(size); if (!usemap_map) panic("can not allocate usemap_map\n"); for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid_begin = sparse_early_nid(ms); pnum_begin = pnum; break; } usemap_count = 1; for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; int nodeid; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid = sparse_early_nid(ms); if (nodeid == nodeid_begin) { usemap_count++; continue; } /* ok, we need to take cake of from pnum_begin to pnum - 1*/ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, usemap_count, nodeid_begin); /* new start, update count etc*/ nodeid_begin = nodeid; pnum_begin = pnum; usemap_count = 1; } /* ok, last chunk */ sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, usemap_count, nodeid_begin); #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER size2 = sizeof(struct page *) * NR_MEM_SECTIONS; map_map = alloc_bootmem(size2); if (!map_map) panic("can not allocate map_map\n"); for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid_begin = sparse_early_nid(ms); pnum_begin = pnum; break; } map_count = 1; for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { struct mem_section *ms; int nodeid; if (!present_section_nr(pnum)) continue; ms = __nr_to_section(pnum); nodeid = sparse_early_nid(ms); if (nodeid == nodeid_begin) { map_count++; continue; } /* ok, we need to take cake of from pnum_begin to pnum - 1*/ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, map_count, nodeid_begin); /* new start, update count etc*/ nodeid_begin = nodeid; pnum_begin = pnum; map_count = 1; } /* ok, last chunk */ sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, map_count, nodeid_begin); #endif for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { if (!present_section_nr(pnum)) continue; usemap = usemap_map[pnum]; if (!usemap) continue; #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER map = map_map[pnum]; #else map = sparse_early_mem_map_alloc(pnum); #endif if (!map) continue; sparse_init_one_section(__nr_to_section(pnum), pnum, map, usemap); } vmemmap_populate_print_last(); #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER free_bootmem(__pa(map_map), size2); #endif free_bootmem(__pa(usemap_map), size); } #ifdef CONFIG_MEMORY_HOTPLUG #ifdef CONFIG_SPARSEMEM_VMEMMAP static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, unsigned long nr_pages) { /* This will make the necessary allocations eventually. */ return sparse_mem_map_populate(pnum, nid); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { return; /* XXX: Not implemented yet */ } static void free_map_bootmem(struct page *page, unsigned long nr_pages) { } #else static struct page *__kmalloc_section_memmap(unsigned long nr_pages) { struct page *page, *ret; unsigned long memmap_size = sizeof(struct page) * nr_pages; page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); if (page) goto got_map_page; ret = vmalloc(memmap_size); if (ret) goto got_map_ptr; return NULL; got_map_page: ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); got_map_ptr: memset(ret, 0, memmap_size); return ret; } static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, unsigned long nr_pages) { return __kmalloc_section_memmap(nr_pages); } static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) { if (is_vmalloc_addr(memmap)) vfree(memmap); else free_pages((unsigned long)memmap, get_order(sizeof(struct page) * nr_pages)); } static void free_map_bootmem(struct page *page, unsigned long nr_pages) { unsigned long maps_section_nr, removing_section_nr, i; unsigned long magic; for (i = 0; i < nr_pages; i++, page++) { magic = (unsigned long) page->lru.next; BUG_ON(magic == NODE_INFO); maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); removing_section_nr = page->private; /* * When this function is called, the removing section is * logical offlined state. This means all pages are isolated * from page allocator. If removing section's memmap is placed * on the same section, it must not be freed. * If it is freed, page allocator may allocate it which will * be removed physically soon. */ if (maps_section_nr != removing_section_nr) put_page_bootmem(page); } } #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void free_section_usemap(struct page *memmap, unsigned long *usemap) { struct page *usemap_page; unsigned long nr_pages; if (!usemap) return; usemap_page = virt_to_page(usemap); /* * Check to see if allocation came from hot-plug-add */ if (PageSlab(usemap_page)) { kfree(usemap); if (memmap) __kfree_section_memmap(memmap, PAGES_PER_SECTION); return; } /* * The usemap came from bootmem. This is packed with other usemaps * on the section which has pgdat at boot time. Just keep it as is now. */ if (memmap) { struct page *memmap_page; memmap_page = virt_to_page(memmap); nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) >> PAGE_SHIFT; free_map_bootmem(memmap_page, nr_pages); } } /* * returns the number of sections whose mem_maps were properly * set. If this is <=0, then that means that the passed-in * map was not consumed and must be freed. */ int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, int nr_pages) { unsigned long section_nr = pfn_to_section_nr(start_pfn); struct pglist_data *pgdat = zone->zone_pgdat; struct mem_section *ms; struct page *memmap; unsigned long *usemap; unsigned long flags; int ret; /* * no locking for this, because it does its own * plus, it does a kmalloc */ ret = sparse_index_init(section_nr, pgdat->node_id); if (ret < 0 && ret != -EEXIST) return ret; memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); if (!memmap) return -ENOMEM; usemap = __kmalloc_section_usemap(); if (!usemap) { __kfree_section_memmap(memmap, nr_pages); return -ENOMEM; } pgdat_resize_lock(pgdat, &flags); ms = __pfn_to_section(start_pfn); if (ms->section_mem_map & SECTION_MARKED_PRESENT) { ret = -EEXIST; goto out; } ms->section_mem_map |= SECTION_MARKED_PRESENT; ret = sparse_init_one_section(ms, section_nr, memmap, usemap); out: pgdat_resize_unlock(pgdat, &flags); if (ret <= 0) { kfree(usemap); __kfree_section_memmap(memmap, nr_pages); } return ret; } void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) { struct page *memmap = NULL; unsigned long *usemap = NULL; if (ms->section_mem_map) { usemap = ms->pageblock_flags; memmap = sparse_decode_mem_map(ms->section_mem_map, __section_nr(ms)); ms->section_mem_map = 0; ms->pageblock_flags = NULL; } free_section_usemap(memmap, usemap); } #endif