diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 8 | ||||
-rw-r--r-- | mm/Makefile | 7 | ||||
-rw-r--r-- | mm/bootmem.c | 13 | ||||
-rw-r--r-- | mm/memblock.c | 837 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/memory_hotplug.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 86 | ||||
-rw-r--r-- | mm/percpu-km.c | 8 | ||||
-rw-r--r-- | mm/percpu.c | 401 | ||||
-rw-r--r-- | mm/percpu_up.c | 30 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 11 | ||||
-rw-r--r-- | mm/swapfile.c | 6 | ||||
-rw-r--r-- | mm/vmalloc.c | 11 |
13 files changed, 894 insertions, 528 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index f0fb9124e410..c2c8a4a11898 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -301,3 +301,11 @@ config NOMMU_INITIAL_TRIM_EXCESS | |||
301 | of 1 says that all excess pages should be trimmed. | 301 | of 1 says that all excess pages should be trimmed. |
302 | 302 | ||
303 | See Documentation/nommu-mmap.txt for more information. | 303 | See Documentation/nommu-mmap.txt for more information. |
304 | |||
305 | # | ||
306 | # UP and nommu archs use km based percpu allocator | ||
307 | # | ||
308 | config NEED_PER_CPU_KM | ||
309 | depends on !SMP | ||
310 | bool | ||
311 | default y | ||
diff --git a/mm/Makefile b/mm/Makefile index 34b2546a9e37..f73f75a29f82 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -11,7 +11,7 @@ obj-y := bootmem.o filemap.o mempool.o oom_kill.o fadvise.o \ | |||
11 | maccess.o page_alloc.o page-writeback.o \ | 11 | maccess.o page_alloc.o page-writeback.o \ |
12 | readahead.o swap.o truncate.o vmscan.o shmem.o \ | 12 | readahead.o swap.o truncate.o vmscan.o shmem.o \ |
13 | prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ | 13 | prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ |
14 | page_isolation.o mm_init.o mmu_context.o \ | 14 | page_isolation.o mm_init.o mmu_context.o percpu.o \ |
15 | $(mmu-y) | 15 | $(mmu-y) |
16 | obj-y += init-mm.o | 16 | obj-y += init-mm.o |
17 | 17 | ||
@@ -36,11 +36,6 @@ obj-$(CONFIG_FAILSLAB) += failslab.o | |||
36 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o | 36 | obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o |
37 | obj-$(CONFIG_FS_XIP) += filemap_xip.o | 37 | obj-$(CONFIG_FS_XIP) += filemap_xip.o |
38 | obj-$(CONFIG_MIGRATION) += migrate.o | 38 | obj-$(CONFIG_MIGRATION) += migrate.o |
39 | ifdef CONFIG_SMP | ||
40 | obj-y += percpu.o | ||
41 | else | ||
42 | obj-y += percpu_up.o | ||
43 | endif | ||
44 | obj-$(CONFIG_QUICKLIST) += quicklist.o | 39 | obj-$(CONFIG_QUICKLIST) += quicklist.o |
45 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o | 40 | obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o |
46 | obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o | 41 | obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o |
diff --git a/mm/bootmem.c b/mm/bootmem.c index 142c84a54993..13b0caa9793c 100644 --- a/mm/bootmem.c +++ b/mm/bootmem.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/module.h> | 15 | #include <linux/module.h> |
16 | #include <linux/kmemleak.h> | 16 | #include <linux/kmemleak.h> |
17 | #include <linux/range.h> | 17 | #include <linux/range.h> |
18 | #include <linux/memblock.h> | ||
18 | 19 | ||
19 | #include <asm/bug.h> | 20 | #include <asm/bug.h> |
20 | #include <asm/io.h> | 21 | #include <asm/io.h> |
@@ -434,7 +435,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
434 | unsigned long size) | 435 | unsigned long size) |
435 | { | 436 | { |
436 | #ifdef CONFIG_NO_BOOTMEM | 437 | #ifdef CONFIG_NO_BOOTMEM |
437 | free_early(physaddr, physaddr + size); | 438 | kmemleak_free_part(__va(physaddr), size); |
439 | memblock_x86_free_range(physaddr, physaddr + size); | ||
438 | #else | 440 | #else |
439 | unsigned long start, end; | 441 | unsigned long start, end; |
440 | 442 | ||
@@ -459,7 +461,8 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr, | |||
459 | void __init free_bootmem(unsigned long addr, unsigned long size) | 461 | void __init free_bootmem(unsigned long addr, unsigned long size) |
460 | { | 462 | { |
461 | #ifdef CONFIG_NO_BOOTMEM | 463 | #ifdef CONFIG_NO_BOOTMEM |
462 | free_early(addr, addr + size); | 464 | kmemleak_free_part(__va(addr), size); |
465 | memblock_x86_free_range(addr, addr + size); | ||
463 | #else | 466 | #else |
464 | unsigned long start, end; | 467 | unsigned long start, end; |
465 | 468 | ||
@@ -526,6 +529,12 @@ int __init reserve_bootmem(unsigned long addr, unsigned long size, | |||
526 | } | 529 | } |
527 | 530 | ||
528 | #ifndef CONFIG_NO_BOOTMEM | 531 | #ifndef CONFIG_NO_BOOTMEM |
532 | int __weak __init reserve_bootmem_generic(unsigned long phys, unsigned long len, | ||
533 | int flags) | ||
534 | { | ||
535 | return reserve_bootmem(phys, len, flags); | ||
536 | } | ||
537 | |||
529 | static unsigned long __init align_idx(struct bootmem_data *bdata, | 538 | static unsigned long __init align_idx(struct bootmem_data *bdata, |
530 | unsigned long idx, unsigned long step) | 539 | unsigned long idx, unsigned long step) |
531 | { | 540 | { |
diff --git a/mm/memblock.c b/mm/memblock.c index 43840b305ecb..400dc62697d7 100644 --- a/mm/memblock.c +++ b/mm/memblock.c | |||
@@ -11,237 +11,423 @@ | |||
11 | */ | 11 | */ |
12 | 12 | ||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/slab.h> | ||
14 | #include <linux/init.h> | 15 | #include <linux/init.h> |
15 | #include <linux/bitops.h> | 16 | #include <linux/bitops.h> |
17 | #include <linux/poison.h> | ||
18 | #include <linux/pfn.h> | ||
19 | #include <linux/debugfs.h> | ||
20 | #include <linux/seq_file.h> | ||
16 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
17 | 22 | ||
18 | #define MEMBLOCK_ALLOC_ANYWHERE 0 | 23 | struct memblock memblock __initdata_memblock; |
19 | 24 | ||
20 | struct memblock memblock; | 25 | int memblock_debug __initdata_memblock; |
26 | int memblock_can_resize __initdata_memblock; | ||
27 | static struct memblock_region memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
28 | static struct memblock_region memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS + 1] __initdata_memblock; | ||
21 | 29 | ||
22 | static int memblock_debug; | 30 | /* inline so we don't get a warning when pr_debug is compiled out */ |
31 | static inline const char *memblock_type_name(struct memblock_type *type) | ||
32 | { | ||
33 | if (type == &memblock.memory) | ||
34 | return "memory"; | ||
35 | else if (type == &memblock.reserved) | ||
36 | return "reserved"; | ||
37 | else | ||
38 | return "unknown"; | ||
39 | } | ||
23 | 40 | ||
24 | static int __init early_memblock(char *p) | 41 | /* |
42 | * Address comparison utilities | ||
43 | */ | ||
44 | |||
45 | static phys_addr_t __init_memblock memblock_align_down(phys_addr_t addr, phys_addr_t size) | ||
25 | { | 46 | { |
26 | if (p && strstr(p, "debug")) | 47 | return addr & ~(size - 1); |
27 | memblock_debug = 1; | 48 | } |
49 | |||
50 | static phys_addr_t __init_memblock memblock_align_up(phys_addr_t addr, phys_addr_t size) | ||
51 | { | ||
52 | return (addr + (size - 1)) & ~(size - 1); | ||
53 | } | ||
54 | |||
55 | static unsigned long __init_memblock memblock_addrs_overlap(phys_addr_t base1, phys_addr_t size1, | ||
56 | phys_addr_t base2, phys_addr_t size2) | ||
57 | { | ||
58 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | ||
59 | } | ||
60 | |||
61 | static long __init_memblock memblock_addrs_adjacent(phys_addr_t base1, phys_addr_t size1, | ||
62 | phys_addr_t base2, phys_addr_t size2) | ||
63 | { | ||
64 | if (base2 == base1 + size1) | ||
65 | return 1; | ||
66 | else if (base1 == base2 + size2) | ||
67 | return -1; | ||
68 | |||
28 | return 0; | 69 | return 0; |
29 | } | 70 | } |
30 | early_param("memblock", early_memblock); | ||
31 | 71 | ||
32 | static void memblock_dump(struct memblock_region *region, char *name) | 72 | static long __init_memblock memblock_regions_adjacent(struct memblock_type *type, |
73 | unsigned long r1, unsigned long r2) | ||
33 | { | 74 | { |
34 | unsigned long long base, size; | 75 | phys_addr_t base1 = type->regions[r1].base; |
35 | int i; | 76 | phys_addr_t size1 = type->regions[r1].size; |
77 | phys_addr_t base2 = type->regions[r2].base; | ||
78 | phys_addr_t size2 = type->regions[r2].size; | ||
36 | 79 | ||
37 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); | 80 | return memblock_addrs_adjacent(base1, size1, base2, size2); |
81 | } | ||
38 | 82 | ||
39 | for (i = 0; i < region->cnt; i++) { | 83 | long __init_memblock memblock_overlaps_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
40 | base = region->region[i].base; | 84 | { |
41 | size = region->region[i].size; | 85 | unsigned long i; |
42 | 86 | ||
43 | pr_info(" %s[0x%x]\t0x%016llx - 0x%016llx, 0x%llx bytes\n", | 87 | for (i = 0; i < type->cnt; i++) { |
44 | name, i, base, base + size - 1, size); | 88 | phys_addr_t rgnbase = type->regions[i].base; |
89 | phys_addr_t rgnsize = type->regions[i].size; | ||
90 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | ||
91 | break; | ||
45 | } | 92 | } |
93 | |||
94 | return (i < type->cnt) ? i : -1; | ||
46 | } | 95 | } |
47 | 96 | ||
48 | void memblock_dump_all(void) | 97 | /* |
98 | * Find, allocate, deallocate or reserve unreserved regions. All allocations | ||
99 | * are top-down. | ||
100 | */ | ||
101 | |||
102 | static phys_addr_t __init_memblock memblock_find_region(phys_addr_t start, phys_addr_t end, | ||
103 | phys_addr_t size, phys_addr_t align) | ||
49 | { | 104 | { |
50 | if (!memblock_debug) | 105 | phys_addr_t base, res_base; |
51 | return; | 106 | long j; |
52 | 107 | ||
53 | pr_info("MEMBLOCK configuration:\n"); | 108 | /* In case, huge size is requested */ |
54 | pr_info(" rmo_size = 0x%llx\n", (unsigned long long)memblock.rmo_size); | 109 | if (end < size) |
55 | pr_info(" memory.size = 0x%llx\n", (unsigned long long)memblock.memory.size); | 110 | return MEMBLOCK_ERROR; |
56 | 111 | ||
57 | memblock_dump(&memblock.memory, "memory"); | 112 | base = memblock_align_down((end - size), align); |
58 | memblock_dump(&memblock.reserved, "reserved"); | 113 | |
114 | /* Prevent allocations returning 0 as it's also used to | ||
115 | * indicate an allocation failure | ||
116 | */ | ||
117 | if (start == 0) | ||
118 | start = PAGE_SIZE; | ||
119 | |||
120 | while (start <= base) { | ||
121 | j = memblock_overlaps_region(&memblock.reserved, base, size); | ||
122 | if (j < 0) | ||
123 | return base; | ||
124 | res_base = memblock.reserved.regions[j].base; | ||
125 | if (res_base < size) | ||
126 | break; | ||
127 | base = memblock_align_down(res_base - size, align); | ||
128 | } | ||
129 | |||
130 | return MEMBLOCK_ERROR; | ||
59 | } | 131 | } |
60 | 132 | ||
61 | static unsigned long memblock_addrs_overlap(u64 base1, u64 size1, u64 base2, | 133 | static phys_addr_t __init_memblock memblock_find_base(phys_addr_t size, |
62 | u64 size2) | 134 | phys_addr_t align, phys_addr_t start, phys_addr_t end) |
63 | { | 135 | { |
64 | return ((base1 < (base2 + size2)) && (base2 < (base1 + size1))); | 136 | long i; |
137 | |||
138 | BUG_ON(0 == size); | ||
139 | |||
140 | size = memblock_align_up(size, align); | ||
141 | |||
142 | /* Pump up max_addr */ | ||
143 | if (end == MEMBLOCK_ALLOC_ACCESSIBLE) | ||
144 | end = memblock.current_limit; | ||
145 | |||
146 | /* We do a top-down search, this tends to limit memory | ||
147 | * fragmentation by keeping early boot allocs near the | ||
148 | * top of memory | ||
149 | */ | ||
150 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | ||
151 | phys_addr_t memblockbase = memblock.memory.regions[i].base; | ||
152 | phys_addr_t memblocksize = memblock.memory.regions[i].size; | ||
153 | phys_addr_t bottom, top, found; | ||
154 | |||
155 | if (memblocksize < size) | ||
156 | continue; | ||
157 | if ((memblockbase + memblocksize) <= start) | ||
158 | break; | ||
159 | bottom = max(memblockbase, start); | ||
160 | top = min(memblockbase + memblocksize, end); | ||
161 | if (bottom >= top) | ||
162 | continue; | ||
163 | found = memblock_find_region(bottom, top, size, align); | ||
164 | if (found != MEMBLOCK_ERROR) | ||
165 | return found; | ||
166 | } | ||
167 | return MEMBLOCK_ERROR; | ||
65 | } | 168 | } |
66 | 169 | ||
67 | static long memblock_addrs_adjacent(u64 base1, u64 size1, u64 base2, u64 size2) | 170 | /* |
171 | * Find a free area with specified alignment in a specific range. | ||
172 | */ | ||
173 | u64 __init_memblock memblock_find_in_range(u64 start, u64 end, u64 size, u64 align) | ||
68 | { | 174 | { |
69 | if (base2 == base1 + size1) | 175 | return memblock_find_base(size, align, start, end); |
70 | return 1; | 176 | } |
71 | else if (base1 == base2 + size2) | ||
72 | return -1; | ||
73 | 177 | ||
74 | return 0; | 178 | /* |
179 | * Free memblock.reserved.regions | ||
180 | */ | ||
181 | int __init_memblock memblock_free_reserved_regions(void) | ||
182 | { | ||
183 | if (memblock.reserved.regions == memblock_reserved_init_regions) | ||
184 | return 0; | ||
185 | |||
186 | return memblock_free(__pa(memblock.reserved.regions), | ||
187 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
75 | } | 188 | } |
76 | 189 | ||
77 | static long memblock_regions_adjacent(struct memblock_region *rgn, | 190 | /* |
78 | unsigned long r1, unsigned long r2) | 191 | * Reserve memblock.reserved.regions |
192 | */ | ||
193 | int __init_memblock memblock_reserve_reserved_regions(void) | ||
79 | { | 194 | { |
80 | u64 base1 = rgn->region[r1].base; | 195 | if (memblock.reserved.regions == memblock_reserved_init_regions) |
81 | u64 size1 = rgn->region[r1].size; | 196 | return 0; |
82 | u64 base2 = rgn->region[r2].base; | ||
83 | u64 size2 = rgn->region[r2].size; | ||
84 | 197 | ||
85 | return memblock_addrs_adjacent(base1, size1, base2, size2); | 198 | return memblock_reserve(__pa(memblock.reserved.regions), |
199 | sizeof(struct memblock_region) * memblock.reserved.max); | ||
86 | } | 200 | } |
87 | 201 | ||
88 | static void memblock_remove_region(struct memblock_region *rgn, unsigned long r) | 202 | static void __init_memblock memblock_remove_region(struct memblock_type *type, unsigned long r) |
89 | { | 203 | { |
90 | unsigned long i; | 204 | unsigned long i; |
91 | 205 | ||
92 | for (i = r; i < rgn->cnt - 1; i++) { | 206 | for (i = r; i < type->cnt - 1; i++) { |
93 | rgn->region[i].base = rgn->region[i + 1].base; | 207 | type->regions[i].base = type->regions[i + 1].base; |
94 | rgn->region[i].size = rgn->region[i + 1].size; | 208 | type->regions[i].size = type->regions[i + 1].size; |
95 | } | 209 | } |
96 | rgn->cnt--; | 210 | type->cnt--; |
97 | } | 211 | } |
98 | 212 | ||
99 | /* Assumption: base addr of region 1 < base addr of region 2 */ | 213 | /* Assumption: base addr of region 1 < base addr of region 2 */ |
100 | static void memblock_coalesce_regions(struct memblock_region *rgn, | 214 | static void __init_memblock memblock_coalesce_regions(struct memblock_type *type, |
101 | unsigned long r1, unsigned long r2) | 215 | unsigned long r1, unsigned long r2) |
102 | { | 216 | { |
103 | rgn->region[r1].size += rgn->region[r2].size; | 217 | type->regions[r1].size += type->regions[r2].size; |
104 | memblock_remove_region(rgn, r2); | 218 | memblock_remove_region(type, r2); |
105 | } | 219 | } |
106 | 220 | ||
107 | void __init memblock_init(void) | 221 | /* Defined below but needed now */ |
222 | static long memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size); | ||
223 | |||
224 | static int __init_memblock memblock_double_array(struct memblock_type *type) | ||
108 | { | 225 | { |
109 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | 226 | struct memblock_region *new_array, *old_array; |
110 | * This simplifies the memblock_add() code below... | 227 | phys_addr_t old_size, new_size, addr; |
228 | int use_slab = slab_is_available(); | ||
229 | |||
230 | /* We don't allow resizing until we know about the reserved regions | ||
231 | * of memory that aren't suitable for allocation | ||
111 | */ | 232 | */ |
112 | memblock.memory.region[0].base = 0; | 233 | if (!memblock_can_resize) |
113 | memblock.memory.region[0].size = 0; | 234 | return -1; |
114 | memblock.memory.cnt = 1; | ||
115 | 235 | ||
116 | /* Ditto. */ | 236 | /* Calculate new doubled size */ |
117 | memblock.reserved.region[0].base = 0; | 237 | old_size = type->max * sizeof(struct memblock_region); |
118 | memblock.reserved.region[0].size = 0; | 238 | new_size = old_size << 1; |
119 | memblock.reserved.cnt = 1; | 239 | |
120 | } | 240 | /* Try to find some space for it. |
241 | * | ||
242 | * WARNING: We assume that either slab_is_available() and we use it or | ||
243 | * we use MEMBLOCK for allocations. That means that this is unsafe to use | ||
244 | * when bootmem is currently active (unless bootmem itself is implemented | ||
245 | * on top of MEMBLOCK which isn't the case yet) | ||
246 | * | ||
247 | * This should however not be an issue for now, as we currently only | ||
248 | * call into MEMBLOCK while it's still active, or much later when slab is | ||
249 | * active for memory hotplug operations | ||
250 | */ | ||
251 | if (use_slab) { | ||
252 | new_array = kmalloc(new_size, GFP_KERNEL); | ||
253 | addr = new_array == NULL ? MEMBLOCK_ERROR : __pa(new_array); | ||
254 | } else | ||
255 | addr = memblock_find_base(new_size, sizeof(phys_addr_t), 0, MEMBLOCK_ALLOC_ACCESSIBLE); | ||
256 | if (addr == MEMBLOCK_ERROR) { | ||
257 | pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", | ||
258 | memblock_type_name(type), type->max, type->max * 2); | ||
259 | return -1; | ||
260 | } | ||
261 | new_array = __va(addr); | ||
121 | 262 | ||
122 | void __init memblock_analyze(void) | 263 | memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", |
123 | { | 264 | memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); |
124 | int i; | ||
125 | 265 | ||
126 | memblock.memory.size = 0; | 266 | /* Found space, we now need to move the array over before |
267 | * we add the reserved region since it may be our reserved | ||
268 | * array itself that is full. | ||
269 | */ | ||
270 | memcpy(new_array, type->regions, old_size); | ||
271 | memset(new_array + type->max, 0, old_size); | ||
272 | old_array = type->regions; | ||
273 | type->regions = new_array; | ||
274 | type->max <<= 1; | ||
275 | |||
276 | /* If we use SLAB that's it, we are done */ | ||
277 | if (use_slab) | ||
278 | return 0; | ||
127 | 279 | ||
128 | for (i = 0; i < memblock.memory.cnt; i++) | 280 | /* Add the new reserved region now. Should not fail ! */ |
129 | memblock.memory.size += memblock.memory.region[i].size; | 281 | BUG_ON(memblock_add_region(&memblock.reserved, addr, new_size) < 0); |
282 | |||
283 | /* If the array wasn't our static init one, then free it. We only do | ||
284 | * that before SLAB is available as later on, we don't know whether | ||
285 | * to use kfree or free_bootmem_pages(). Shouldn't be a big deal | ||
286 | * anyways | ||
287 | */ | ||
288 | if (old_array != memblock_memory_init_regions && | ||
289 | old_array != memblock_reserved_init_regions) | ||
290 | memblock_free(__pa(old_array), old_size); | ||
291 | |||
292 | return 0; | ||
130 | } | 293 | } |
131 | 294 | ||
132 | static long memblock_add_region(struct memblock_region *rgn, u64 base, u64 size) | 295 | extern int __init_memblock __weak memblock_memory_can_coalesce(phys_addr_t addr1, phys_addr_t size1, |
296 | phys_addr_t addr2, phys_addr_t size2) | ||
297 | { | ||
298 | return 1; | ||
299 | } | ||
300 | |||
301 | static long __init_memblock memblock_add_region(struct memblock_type *type, phys_addr_t base, phys_addr_t size) | ||
133 | { | 302 | { |
134 | unsigned long coalesced = 0; | 303 | unsigned long coalesced = 0; |
135 | long adjacent, i; | 304 | long adjacent, i; |
136 | 305 | ||
137 | if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { | 306 | if ((type->cnt == 1) && (type->regions[0].size == 0)) { |
138 | rgn->region[0].base = base; | 307 | type->regions[0].base = base; |
139 | rgn->region[0].size = size; | 308 | type->regions[0].size = size; |
140 | return 0; | 309 | return 0; |
141 | } | 310 | } |
142 | 311 | ||
143 | /* First try and coalesce this MEMBLOCK with another. */ | 312 | /* First try and coalesce this MEMBLOCK with another. */ |
144 | for (i = 0; i < rgn->cnt; i++) { | 313 | for (i = 0; i < type->cnt; i++) { |
145 | u64 rgnbase = rgn->region[i].base; | 314 | phys_addr_t rgnbase = type->regions[i].base; |
146 | u64 rgnsize = rgn->region[i].size; | 315 | phys_addr_t rgnsize = type->regions[i].size; |
147 | 316 | ||
148 | if ((rgnbase == base) && (rgnsize == size)) | 317 | if ((rgnbase == base) && (rgnsize == size)) |
149 | /* Already have this region, so we're done */ | 318 | /* Already have this region, so we're done */ |
150 | return 0; | 319 | return 0; |
151 | 320 | ||
152 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); | 321 | adjacent = memblock_addrs_adjacent(base, size, rgnbase, rgnsize); |
322 | /* Check if arch allows coalescing */ | ||
323 | if (adjacent != 0 && type == &memblock.memory && | ||
324 | !memblock_memory_can_coalesce(base, size, rgnbase, rgnsize)) | ||
325 | break; | ||
153 | if (adjacent > 0) { | 326 | if (adjacent > 0) { |
154 | rgn->region[i].base -= size; | 327 | type->regions[i].base -= size; |
155 | rgn->region[i].size += size; | 328 | type->regions[i].size += size; |
156 | coalesced++; | 329 | coalesced++; |
157 | break; | 330 | break; |
158 | } else if (adjacent < 0) { | 331 | } else if (adjacent < 0) { |
159 | rgn->region[i].size += size; | 332 | type->regions[i].size += size; |
160 | coalesced++; | 333 | coalesced++; |
161 | break; | 334 | break; |
162 | } | 335 | } |
163 | } | 336 | } |
164 | 337 | ||
165 | if ((i < rgn->cnt - 1) && memblock_regions_adjacent(rgn, i, i+1)) { | 338 | /* If we plugged a hole, we may want to also coalesce with the |
166 | memblock_coalesce_regions(rgn, i, i+1); | 339 | * next region |
340 | */ | ||
341 | if ((i < type->cnt - 1) && memblock_regions_adjacent(type, i, i+1) && | ||
342 | ((type != &memblock.memory || memblock_memory_can_coalesce(type->regions[i].base, | ||
343 | type->regions[i].size, | ||
344 | type->regions[i+1].base, | ||
345 | type->regions[i+1].size)))) { | ||
346 | memblock_coalesce_regions(type, i, i+1); | ||
167 | coalesced++; | 347 | coalesced++; |
168 | } | 348 | } |
169 | 349 | ||
170 | if (coalesced) | 350 | if (coalesced) |
171 | return coalesced; | 351 | return coalesced; |
172 | if (rgn->cnt >= MAX_MEMBLOCK_REGIONS) | 352 | |
353 | /* If we are out of space, we fail. It's too late to resize the array | ||
354 | * but then this shouldn't have happened in the first place. | ||
355 | */ | ||
356 | if (WARN_ON(type->cnt >= type->max)) | ||
173 | return -1; | 357 | return -1; |
174 | 358 | ||
175 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ | 359 | /* Couldn't coalesce the MEMBLOCK, so add it to the sorted table. */ |
176 | for (i = rgn->cnt - 1; i >= 0; i--) { | 360 | for (i = type->cnt - 1; i >= 0; i--) { |
177 | if (base < rgn->region[i].base) { | 361 | if (base < type->regions[i].base) { |
178 | rgn->region[i+1].base = rgn->region[i].base; | 362 | type->regions[i+1].base = type->regions[i].base; |
179 | rgn->region[i+1].size = rgn->region[i].size; | 363 | type->regions[i+1].size = type->regions[i].size; |
180 | } else { | 364 | } else { |
181 | rgn->region[i+1].base = base; | 365 | type->regions[i+1].base = base; |
182 | rgn->region[i+1].size = size; | 366 | type->regions[i+1].size = size; |
183 | break; | 367 | break; |
184 | } | 368 | } |
185 | } | 369 | } |
186 | 370 | ||
187 | if (base < rgn->region[0].base) { | 371 | if (base < type->regions[0].base) { |
188 | rgn->region[0].base = base; | 372 | type->regions[0].base = base; |
189 | rgn->region[0].size = size; | 373 | type->regions[0].size = size; |
374 | } | ||
375 | type->cnt++; | ||
376 | |||
377 | /* The array is full ? Try to resize it. If that fails, we undo | ||
378 | * our allocation and return an error | ||
379 | */ | ||
380 | if (type->cnt == type->max && memblock_double_array(type)) { | ||
381 | type->cnt--; | ||
382 | return -1; | ||
190 | } | 383 | } |
191 | rgn->cnt++; | ||
192 | 384 | ||
193 | return 0; | 385 | return 0; |
194 | } | 386 | } |
195 | 387 | ||
196 | long memblock_add(u64 base, u64 size) | 388 | long __init_memblock memblock_add(phys_addr_t base, phys_addr_t size) |
197 | { | 389 | { |
198 | struct memblock_region *_rgn = &memblock.memory; | 390 | return memblock_add_region(&memblock.memory, base, size); |
199 | |||
200 | /* On pSeries LPAR systems, the first MEMBLOCK is our RMO region. */ | ||
201 | if (base == 0) | ||
202 | memblock.rmo_size = size; | ||
203 | |||
204 | return memblock_add_region(_rgn, base, size); | ||
205 | 391 | ||
206 | } | 392 | } |
207 | 393 | ||
208 | static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | 394 | static long __init_memblock __memblock_remove(struct memblock_type *type, phys_addr_t base, phys_addr_t size) |
209 | { | 395 | { |
210 | u64 rgnbegin, rgnend; | 396 | phys_addr_t rgnbegin, rgnend; |
211 | u64 end = base + size; | 397 | phys_addr_t end = base + size; |
212 | int i; | 398 | int i; |
213 | 399 | ||
214 | rgnbegin = rgnend = 0; /* supress gcc warnings */ | 400 | rgnbegin = rgnend = 0; /* supress gcc warnings */ |
215 | 401 | ||
216 | /* Find the region where (base, size) belongs to */ | 402 | /* Find the region where (base, size) belongs to */ |
217 | for (i=0; i < rgn->cnt; i++) { | 403 | for (i=0; i < type->cnt; i++) { |
218 | rgnbegin = rgn->region[i].base; | 404 | rgnbegin = type->regions[i].base; |
219 | rgnend = rgnbegin + rgn->region[i].size; | 405 | rgnend = rgnbegin + type->regions[i].size; |
220 | 406 | ||
221 | if ((rgnbegin <= base) && (end <= rgnend)) | 407 | if ((rgnbegin <= base) && (end <= rgnend)) |
222 | break; | 408 | break; |
223 | } | 409 | } |
224 | 410 | ||
225 | /* Didn't find the region */ | 411 | /* Didn't find the region */ |
226 | if (i == rgn->cnt) | 412 | if (i == type->cnt) |
227 | return -1; | 413 | return -1; |
228 | 414 | ||
229 | /* Check to see if we are removing entire region */ | 415 | /* Check to see if we are removing entire region */ |
230 | if ((rgnbegin == base) && (rgnend == end)) { | 416 | if ((rgnbegin == base) && (rgnend == end)) { |
231 | memblock_remove_region(rgn, i); | 417 | memblock_remove_region(type, i); |
232 | return 0; | 418 | return 0; |
233 | } | 419 | } |
234 | 420 | ||
235 | /* Check to see if region is matching at the front */ | 421 | /* Check to see if region is matching at the front */ |
236 | if (rgnbegin == base) { | 422 | if (rgnbegin == base) { |
237 | rgn->region[i].base = end; | 423 | type->regions[i].base = end; |
238 | rgn->region[i].size -= size; | 424 | type->regions[i].size -= size; |
239 | return 0; | 425 | return 0; |
240 | } | 426 | } |
241 | 427 | ||
242 | /* Check to see if the region is matching at the end */ | 428 | /* Check to see if the region is matching at the end */ |
243 | if (rgnend == end) { | 429 | if (rgnend == end) { |
244 | rgn->region[i].size -= size; | 430 | type->regions[i].size -= size; |
245 | return 0; | 431 | return 0; |
246 | } | 432 | } |
247 | 433 | ||
@@ -249,208 +435,189 @@ static long __memblock_remove(struct memblock_region *rgn, u64 base, u64 size) | |||
249 | * We need to split the entry - adjust the current one to the | 435 | * We need to split the entry - adjust the current one to the |
250 | * beginging of the hole and add the region after hole. | 436 | * beginging of the hole and add the region after hole. |
251 | */ | 437 | */ |
252 | rgn->region[i].size = base - rgn->region[i].base; | 438 | type->regions[i].size = base - type->regions[i].base; |
253 | return memblock_add_region(rgn, end, rgnend - end); | 439 | return memblock_add_region(type, end, rgnend - end); |
254 | } | 440 | } |
255 | 441 | ||
256 | long memblock_remove(u64 base, u64 size) | 442 | long __init_memblock memblock_remove(phys_addr_t base, phys_addr_t size) |
257 | { | 443 | { |
258 | return __memblock_remove(&memblock.memory, base, size); | 444 | return __memblock_remove(&memblock.memory, base, size); |
259 | } | 445 | } |
260 | 446 | ||
261 | long __init memblock_free(u64 base, u64 size) | 447 | long __init_memblock memblock_free(phys_addr_t base, phys_addr_t size) |
262 | { | 448 | { |
263 | return __memblock_remove(&memblock.reserved, base, size); | 449 | return __memblock_remove(&memblock.reserved, base, size); |
264 | } | 450 | } |
265 | 451 | ||
266 | long __init memblock_reserve(u64 base, u64 size) | 452 | long __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size) |
267 | { | 453 | { |
268 | struct memblock_region *_rgn = &memblock.reserved; | 454 | struct memblock_type *_rgn = &memblock.reserved; |
269 | 455 | ||
270 | BUG_ON(0 == size); | 456 | BUG_ON(0 == size); |
271 | 457 | ||
272 | return memblock_add_region(_rgn, base, size); | 458 | return memblock_add_region(_rgn, base, size); |
273 | } | 459 | } |
274 | 460 | ||
275 | long memblock_overlaps_region(struct memblock_region *rgn, u64 base, u64 size) | 461 | phys_addr_t __init __memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
276 | { | 462 | { |
277 | unsigned long i; | 463 | phys_addr_t found; |
278 | 464 | ||
279 | for (i = 0; i < rgn->cnt; i++) { | 465 | /* We align the size to limit fragmentation. Without this, a lot of |
280 | u64 rgnbase = rgn->region[i].base; | 466 | * small allocs quickly eat up the whole reserve array on sparc |
281 | u64 rgnsize = rgn->region[i].size; | 467 | */ |
282 | if (memblock_addrs_overlap(base, size, rgnbase, rgnsize)) | 468 | size = memblock_align_up(size, align); |
283 | break; | ||
284 | } | ||
285 | 469 | ||
286 | return (i < rgn->cnt) ? i : -1; | 470 | found = memblock_find_base(size, align, 0, max_addr); |
471 | if (found != MEMBLOCK_ERROR && | ||
472 | memblock_add_region(&memblock.reserved, found, size) >= 0) | ||
473 | return found; | ||
474 | |||
475 | return 0; | ||
287 | } | 476 | } |
288 | 477 | ||
289 | static u64 memblock_align_down(u64 addr, u64 size) | 478 | phys_addr_t __init memblock_alloc_base(phys_addr_t size, phys_addr_t align, phys_addr_t max_addr) |
290 | { | 479 | { |
291 | return addr & ~(size - 1); | 480 | phys_addr_t alloc; |
481 | |||
482 | alloc = __memblock_alloc_base(size, align, max_addr); | ||
483 | |||
484 | if (alloc == 0) | ||
485 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | ||
486 | (unsigned long long) size, (unsigned long long) max_addr); | ||
487 | |||
488 | return alloc; | ||
292 | } | 489 | } |
293 | 490 | ||
294 | static u64 memblock_align_up(u64 addr, u64 size) | 491 | phys_addr_t __init memblock_alloc(phys_addr_t size, phys_addr_t align) |
295 | { | 492 | { |
296 | return (addr + (size - 1)) & ~(size - 1); | 493 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); |
297 | } | 494 | } |
298 | 495 | ||
299 | static u64 __init memblock_alloc_nid_unreserved(u64 start, u64 end, | 496 | |
300 | u64 size, u64 align) | 497 | /* |
498 | * Additional node-local allocators. Search for node memory is bottom up | ||
499 | * and walks memblock regions within that node bottom-up as well, but allocation | ||
500 | * within an memblock region is top-down. XXX I plan to fix that at some stage | ||
501 | * | ||
502 | * WARNING: Only available after early_node_map[] has been populated, | ||
503 | * on some architectures, that is after all the calls to add_active_range() | ||
504 | * have been done to populate it. | ||
505 | */ | ||
506 | |||
507 | phys_addr_t __weak __init memblock_nid_range(phys_addr_t start, phys_addr_t end, int *nid) | ||
301 | { | 508 | { |
302 | u64 base, res_base; | 509 | #ifdef CONFIG_ARCH_POPULATES_NODE_MAP |
303 | long j; | 510 | /* |
511 | * This code originates from sparc which really wants use to walk by addresses | ||
512 | * and returns the nid. This is not very convenient for early_pfn_map[] users | ||
513 | * as the map isn't sorted yet, and it really wants to be walked by nid. | ||
514 | * | ||
515 | * For now, I implement the inefficient method below which walks the early | ||
516 | * map multiple times. Eventually we may want to use an ARCH config option | ||
517 | * to implement a completely different method for both case. | ||
518 | */ | ||
519 | unsigned long start_pfn, end_pfn; | ||
520 | int i; | ||
304 | 521 | ||
305 | base = memblock_align_down((end - size), align); | 522 | for (i = 0; i < MAX_NUMNODES; i++) { |
306 | while (start <= base) { | 523 | get_pfn_range_for_nid(i, &start_pfn, &end_pfn); |
307 | j = memblock_overlaps_region(&memblock.reserved, base, size); | 524 | if (start < PFN_PHYS(start_pfn) || start >= PFN_PHYS(end_pfn)) |
308 | if (j < 0) { | 525 | continue; |
309 | /* this area isn't reserved, take it */ | 526 | *nid = i; |
310 | if (memblock_add_region(&memblock.reserved, base, size) < 0) | 527 | return min(end, PFN_PHYS(end_pfn)); |
311 | base = ~(u64)0; | ||
312 | return base; | ||
313 | } | ||
314 | res_base = memblock.reserved.region[j].base; | ||
315 | if (res_base < size) | ||
316 | break; | ||
317 | base = memblock_align_down(res_base - size, align); | ||
318 | } | 528 | } |
529 | #endif | ||
530 | *nid = 0; | ||
319 | 531 | ||
320 | return ~(u64)0; | 532 | return end; |
321 | } | 533 | } |
322 | 534 | ||
323 | static u64 __init memblock_alloc_nid_region(struct memblock_property *mp, | 535 | static phys_addr_t __init memblock_alloc_nid_region(struct memblock_region *mp, |
324 | u64 (*nid_range)(u64, u64, int *), | 536 | phys_addr_t size, |
325 | u64 size, u64 align, int nid) | 537 | phys_addr_t align, int nid) |
326 | { | 538 | { |
327 | u64 start, end; | 539 | phys_addr_t start, end; |
328 | 540 | ||
329 | start = mp->base; | 541 | start = mp->base; |
330 | end = start + mp->size; | 542 | end = start + mp->size; |
331 | 543 | ||
332 | start = memblock_align_up(start, align); | 544 | start = memblock_align_up(start, align); |
333 | while (start < end) { | 545 | while (start < end) { |
334 | u64 this_end; | 546 | phys_addr_t this_end; |
335 | int this_nid; | 547 | int this_nid; |
336 | 548 | ||
337 | this_end = nid_range(start, end, &this_nid); | 549 | this_end = memblock_nid_range(start, end, &this_nid); |
338 | if (this_nid == nid) { | 550 | if (this_nid == nid) { |
339 | u64 ret = memblock_alloc_nid_unreserved(start, this_end, | 551 | phys_addr_t ret = memblock_find_region(start, this_end, size, align); |
340 | size, align); | 552 | if (ret != MEMBLOCK_ERROR && |
341 | if (ret != ~(u64)0) | 553 | memblock_add_region(&memblock.reserved, ret, size) >= 0) |
342 | return ret; | 554 | return ret; |
343 | } | 555 | } |
344 | start = this_end; | 556 | start = this_end; |
345 | } | 557 | } |
346 | 558 | ||
347 | return ~(u64)0; | 559 | return MEMBLOCK_ERROR; |
348 | } | 560 | } |
349 | 561 | ||
350 | u64 __init memblock_alloc_nid(u64 size, u64 align, int nid, | 562 | phys_addr_t __init memblock_alloc_nid(phys_addr_t size, phys_addr_t align, int nid) |
351 | u64 (*nid_range)(u64 start, u64 end, int *nid)) | ||
352 | { | 563 | { |
353 | struct memblock_region *mem = &memblock.memory; | 564 | struct memblock_type *mem = &memblock.memory; |
354 | int i; | 565 | int i; |
355 | 566 | ||
356 | BUG_ON(0 == size); | 567 | BUG_ON(0 == size); |
357 | 568 | ||
569 | /* We align the size to limit fragmentation. Without this, a lot of | ||
570 | * small allocs quickly eat up the whole reserve array on sparc | ||
571 | */ | ||
358 | size = memblock_align_up(size, align); | 572 | size = memblock_align_up(size, align); |
359 | 573 | ||
574 | /* We do a bottom-up search for a region with the right | ||
575 | * nid since that's easier considering how memblock_nid_range() | ||
576 | * works | ||
577 | */ | ||
360 | for (i = 0; i < mem->cnt; i++) { | 578 | for (i = 0; i < mem->cnt; i++) { |
361 | u64 ret = memblock_alloc_nid_region(&mem->region[i], | 579 | phys_addr_t ret = memblock_alloc_nid_region(&mem->regions[i], |
362 | nid_range, | ||
363 | size, align, nid); | 580 | size, align, nid); |
364 | if (ret != ~(u64)0) | 581 | if (ret != MEMBLOCK_ERROR) |
365 | return ret; | 582 | return ret; |
366 | } | 583 | } |
367 | 584 | ||
368 | return memblock_alloc(size, align); | 585 | return 0; |
369 | } | ||
370 | |||
371 | u64 __init memblock_alloc(u64 size, u64 align) | ||
372 | { | ||
373 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); | ||
374 | } | 586 | } |
375 | 587 | ||
376 | u64 __init memblock_alloc_base(u64 size, u64 align, u64 max_addr) | 588 | phys_addr_t __init memblock_alloc_try_nid(phys_addr_t size, phys_addr_t align, int nid) |
377 | { | 589 | { |
378 | u64 alloc; | 590 | phys_addr_t res = memblock_alloc_nid(size, align, nid); |
379 | |||
380 | alloc = __memblock_alloc_base(size, align, max_addr); | ||
381 | 591 | ||
382 | if (alloc == 0) | 592 | if (res) |
383 | panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", | 593 | return res; |
384 | (unsigned long long) size, (unsigned long long) max_addr); | 594 | return memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ANYWHERE); |
385 | |||
386 | return alloc; | ||
387 | } | 595 | } |
388 | 596 | ||
389 | u64 __init __memblock_alloc_base(u64 size, u64 align, u64 max_addr) | ||
390 | { | ||
391 | long i, j; | ||
392 | u64 base = 0; | ||
393 | u64 res_base; | ||
394 | |||
395 | BUG_ON(0 == size); | ||
396 | 597 | ||
397 | size = memblock_align_up(size, align); | 598 | /* |
398 | 599 | * Remaining API functions | |
399 | /* On some platforms, make sure we allocate lowmem */ | 600 | */ |
400 | /* Note that MEMBLOCK_REAL_LIMIT may be MEMBLOCK_ALLOC_ANYWHERE */ | ||
401 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | ||
402 | max_addr = MEMBLOCK_REAL_LIMIT; | ||
403 | |||
404 | for (i = memblock.memory.cnt - 1; i >= 0; i--) { | ||
405 | u64 memblockbase = memblock.memory.region[i].base; | ||
406 | u64 memblocksize = memblock.memory.region[i].size; | ||
407 | |||
408 | if (memblocksize < size) | ||
409 | continue; | ||
410 | if (max_addr == MEMBLOCK_ALLOC_ANYWHERE) | ||
411 | base = memblock_align_down(memblockbase + memblocksize - size, align); | ||
412 | else if (memblockbase < max_addr) { | ||
413 | base = min(memblockbase + memblocksize, max_addr); | ||
414 | base = memblock_align_down(base - size, align); | ||
415 | } else | ||
416 | continue; | ||
417 | |||
418 | while (base && memblockbase <= base) { | ||
419 | j = memblock_overlaps_region(&memblock.reserved, base, size); | ||
420 | if (j < 0) { | ||
421 | /* this area isn't reserved, take it */ | ||
422 | if (memblock_add_region(&memblock.reserved, base, size) < 0) | ||
423 | return 0; | ||
424 | return base; | ||
425 | } | ||
426 | res_base = memblock.reserved.region[j].base; | ||
427 | if (res_base < size) | ||
428 | break; | ||
429 | base = memblock_align_down(res_base - size, align); | ||
430 | } | ||
431 | } | ||
432 | return 0; | ||
433 | } | ||
434 | 601 | ||
435 | /* You must call memblock_analyze() before this. */ | 602 | /* You must call memblock_analyze() before this. */ |
436 | u64 __init memblock_phys_mem_size(void) | 603 | phys_addr_t __init memblock_phys_mem_size(void) |
437 | { | 604 | { |
438 | return memblock.memory.size; | 605 | return memblock.memory_size; |
439 | } | 606 | } |
440 | 607 | ||
441 | u64 memblock_end_of_DRAM(void) | 608 | phys_addr_t __init_memblock memblock_end_of_DRAM(void) |
442 | { | 609 | { |
443 | int idx = memblock.memory.cnt - 1; | 610 | int idx = memblock.memory.cnt - 1; |
444 | 611 | ||
445 | return (memblock.memory.region[idx].base + memblock.memory.region[idx].size); | 612 | return (memblock.memory.regions[idx].base + memblock.memory.regions[idx].size); |
446 | } | 613 | } |
447 | 614 | ||
448 | /* You must call memblock_analyze() after this. */ | 615 | /* You must call memblock_analyze() after this. */ |
449 | void __init memblock_enforce_memory_limit(u64 memory_limit) | 616 | void __init memblock_enforce_memory_limit(phys_addr_t memory_limit) |
450 | { | 617 | { |
451 | unsigned long i; | 618 | unsigned long i; |
452 | u64 limit; | 619 | phys_addr_t limit; |
453 | struct memblock_property *p; | 620 | struct memblock_region *p; |
454 | 621 | ||
455 | if (!memory_limit) | 622 | if (!memory_limit) |
456 | return; | 623 | return; |
@@ -458,24 +625,21 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
458 | /* Truncate the memblock regions to satisfy the memory limit. */ | 625 | /* Truncate the memblock regions to satisfy the memory limit. */ |
459 | limit = memory_limit; | 626 | limit = memory_limit; |
460 | for (i = 0; i < memblock.memory.cnt; i++) { | 627 | for (i = 0; i < memblock.memory.cnt; i++) { |
461 | if (limit > memblock.memory.region[i].size) { | 628 | if (limit > memblock.memory.regions[i].size) { |
462 | limit -= memblock.memory.region[i].size; | 629 | limit -= memblock.memory.regions[i].size; |
463 | continue; | 630 | continue; |
464 | } | 631 | } |
465 | 632 | ||
466 | memblock.memory.region[i].size = limit; | 633 | memblock.memory.regions[i].size = limit; |
467 | memblock.memory.cnt = i + 1; | 634 | memblock.memory.cnt = i + 1; |
468 | break; | 635 | break; |
469 | } | 636 | } |
470 | 637 | ||
471 | if (memblock.memory.region[0].size < memblock.rmo_size) | ||
472 | memblock.rmo_size = memblock.memory.region[0].size; | ||
473 | |||
474 | memory_limit = memblock_end_of_DRAM(); | 638 | memory_limit = memblock_end_of_DRAM(); |
475 | 639 | ||
476 | /* And truncate any reserves above the limit also. */ | 640 | /* And truncate any reserves above the limit also. */ |
477 | for (i = 0; i < memblock.reserved.cnt; i++) { | 641 | for (i = 0; i < memblock.reserved.cnt; i++) { |
478 | p = &memblock.reserved.region[i]; | 642 | p = &memblock.reserved.regions[i]; |
479 | 643 | ||
480 | if (p->base > memory_limit) | 644 | if (p->base > memory_limit) |
481 | p->size = 0; | 645 | p->size = 0; |
@@ -489,53 +653,190 @@ void __init memblock_enforce_memory_limit(u64 memory_limit) | |||
489 | } | 653 | } |
490 | } | 654 | } |
491 | 655 | ||
492 | int __init memblock_is_reserved(u64 addr) | 656 | static int __init_memblock memblock_search(struct memblock_type *type, phys_addr_t addr) |
657 | { | ||
658 | unsigned int left = 0, right = type->cnt; | ||
659 | |||
660 | do { | ||
661 | unsigned int mid = (right + left) / 2; | ||
662 | |||
663 | if (addr < type->regions[mid].base) | ||
664 | right = mid; | ||
665 | else if (addr >= (type->regions[mid].base + | ||
666 | type->regions[mid].size)) | ||
667 | left = mid + 1; | ||
668 | else | ||
669 | return mid; | ||
670 | } while (left < right); | ||
671 | return -1; | ||
672 | } | ||
673 | |||
674 | int __init memblock_is_reserved(phys_addr_t addr) | ||
675 | { | ||
676 | return memblock_search(&memblock.reserved, addr) != -1; | ||
677 | } | ||
678 | |||
679 | int __init_memblock memblock_is_memory(phys_addr_t addr) | ||
680 | { | ||
681 | return memblock_search(&memblock.memory, addr) != -1; | ||
682 | } | ||
683 | |||
684 | int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) | ||
685 | { | ||
686 | int idx = memblock_search(&memblock.reserved, base); | ||
687 | |||
688 | if (idx == -1) | ||
689 | return 0; | ||
690 | return memblock.reserved.regions[idx].base <= base && | ||
691 | (memblock.reserved.regions[idx].base + | ||
692 | memblock.reserved.regions[idx].size) >= (base + size); | ||
693 | } | ||
694 | |||
695 | int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) | ||
696 | { | ||
697 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | ||
698 | } | ||
699 | |||
700 | |||
701 | void __init_memblock memblock_set_current_limit(phys_addr_t limit) | ||
493 | { | 702 | { |
703 | memblock.current_limit = limit; | ||
704 | } | ||
705 | |||
706 | static void __init_memblock memblock_dump(struct memblock_type *region, char *name) | ||
707 | { | ||
708 | unsigned long long base, size; | ||
494 | int i; | 709 | int i; |
495 | 710 | ||
496 | for (i = 0; i < memblock.reserved.cnt; i++) { | 711 | pr_info(" %s.cnt = 0x%lx\n", name, region->cnt); |
497 | u64 upper = memblock.reserved.region[i].base + | 712 | |
498 | memblock.reserved.region[i].size - 1; | 713 | for (i = 0; i < region->cnt; i++) { |
499 | if ((addr >= memblock.reserved.region[i].base) && (addr <= upper)) | 714 | base = region->regions[i].base; |
500 | return 1; | 715 | size = region->regions[i].size; |
716 | |||
717 | pr_info(" %s[%#x]\t[%#016llx-%#016llx], %#llx bytes\n", | ||
718 | name, i, base, base + size - 1, size); | ||
501 | } | 719 | } |
502 | return 0; | ||
503 | } | 720 | } |
504 | 721 | ||
505 | int memblock_is_region_reserved(u64 base, u64 size) | 722 | void __init_memblock memblock_dump_all(void) |
506 | { | 723 | { |
507 | return memblock_overlaps_region(&memblock.reserved, base, size) >= 0; | 724 | if (!memblock_debug) |
725 | return; | ||
726 | |||
727 | pr_info("MEMBLOCK configuration:\n"); | ||
728 | pr_info(" memory size = 0x%llx\n", (unsigned long long)memblock.memory_size); | ||
729 | |||
730 | memblock_dump(&memblock.memory, "memory"); | ||
731 | memblock_dump(&memblock.reserved, "reserved"); | ||
508 | } | 732 | } |
509 | 733 | ||
510 | /* | 734 | void __init memblock_analyze(void) |
511 | * Given a <base, len>, find which memory regions belong to this range. | ||
512 | * Adjust the request and return a contiguous chunk. | ||
513 | */ | ||
514 | int memblock_find(struct memblock_property *res) | ||
515 | { | 735 | { |
516 | int i; | 736 | int i; |
517 | u64 rstart, rend; | ||
518 | 737 | ||
519 | rstart = res->base; | 738 | /* Check marker in the unused last array entry */ |
520 | rend = rstart + res->size - 1; | 739 | WARN_ON(memblock_memory_init_regions[INIT_MEMBLOCK_REGIONS].base |
740 | != (phys_addr_t)RED_INACTIVE); | ||
741 | WARN_ON(memblock_reserved_init_regions[INIT_MEMBLOCK_REGIONS].base | ||
742 | != (phys_addr_t)RED_INACTIVE); | ||
743 | |||
744 | memblock.memory_size = 0; | ||
745 | |||
746 | for (i = 0; i < memblock.memory.cnt; i++) | ||
747 | memblock.memory_size += memblock.memory.regions[i].size; | ||
748 | |||
749 | /* We allow resizing from there */ | ||
750 | memblock_can_resize = 1; | ||
751 | } | ||
752 | |||
753 | void __init memblock_init(void) | ||
754 | { | ||
755 | static int init_done __initdata = 0; | ||
756 | |||
757 | if (init_done) | ||
758 | return; | ||
759 | init_done = 1; | ||
760 | |||
761 | /* Hookup the initial arrays */ | ||
762 | memblock.memory.regions = memblock_memory_init_regions; | ||
763 | memblock.memory.max = INIT_MEMBLOCK_REGIONS; | ||
764 | memblock.reserved.regions = memblock_reserved_init_regions; | ||
765 | memblock.reserved.max = INIT_MEMBLOCK_REGIONS; | ||
766 | |||
767 | /* Write a marker in the unused last array entry */ | ||
768 | memblock.memory.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | ||
769 | memblock.reserved.regions[INIT_MEMBLOCK_REGIONS].base = (phys_addr_t)RED_INACTIVE; | ||
770 | |||
771 | /* Create a dummy zero size MEMBLOCK which will get coalesced away later. | ||
772 | * This simplifies the memblock_add() code below... | ||
773 | */ | ||
774 | memblock.memory.regions[0].base = 0; | ||
775 | memblock.memory.regions[0].size = 0; | ||
776 | memblock.memory.cnt = 1; | ||
777 | |||
778 | /* Ditto. */ | ||
779 | memblock.reserved.regions[0].base = 0; | ||
780 | memblock.reserved.regions[0].size = 0; | ||
781 | memblock.reserved.cnt = 1; | ||
782 | |||
783 | memblock.current_limit = MEMBLOCK_ALLOC_ANYWHERE; | ||
784 | } | ||
785 | |||
786 | static int __init early_memblock(char *p) | ||
787 | { | ||
788 | if (p && strstr(p, "debug")) | ||
789 | memblock_debug = 1; | ||
790 | return 0; | ||
791 | } | ||
792 | early_param("memblock", early_memblock); | ||
793 | |||
794 | #if defined(CONFIG_DEBUG_FS) && !defined(ARCH_DISCARD_MEMBLOCK) | ||
795 | |||
796 | static int memblock_debug_show(struct seq_file *m, void *private) | ||
797 | { | ||
798 | struct memblock_type *type = m->private; | ||
799 | struct memblock_region *reg; | ||
800 | int i; | ||
801 | |||
802 | for (i = 0; i < type->cnt; i++) { | ||
803 | reg = &type->regions[i]; | ||
804 | seq_printf(m, "%4d: ", i); | ||
805 | if (sizeof(phys_addr_t) == 4) | ||
806 | seq_printf(m, "0x%08lx..0x%08lx\n", | ||
807 | (unsigned long)reg->base, | ||
808 | (unsigned long)(reg->base + reg->size - 1)); | ||
809 | else | ||
810 | seq_printf(m, "0x%016llx..0x%016llx\n", | ||
811 | (unsigned long long)reg->base, | ||
812 | (unsigned long long)(reg->base + reg->size - 1)); | ||
521 | 813 | ||
522 | for (i = 0; i < memblock.memory.cnt; i++) { | ||
523 | u64 start = memblock.memory.region[i].base; | ||
524 | u64 end = start + memblock.memory.region[i].size - 1; | ||
525 | |||
526 | if (start > rend) | ||
527 | return -1; | ||
528 | |||
529 | if ((end >= rstart) && (start < rend)) { | ||
530 | /* adjust the request */ | ||
531 | if (rstart < start) | ||
532 | rstart = start; | ||
533 | if (rend > end) | ||
534 | rend = end; | ||
535 | res->base = rstart; | ||
536 | res->size = rend - rstart + 1; | ||
537 | return 0; | ||
538 | } | ||
539 | } | 814 | } |
540 | return -1; | 815 | return 0; |
816 | } | ||
817 | |||
818 | static int memblock_debug_open(struct inode *inode, struct file *file) | ||
819 | { | ||
820 | return single_open(file, memblock_debug_show, inode->i_private); | ||
541 | } | 821 | } |
822 | |||
823 | static const struct file_operations memblock_debug_fops = { | ||
824 | .open = memblock_debug_open, | ||
825 | .read = seq_read, | ||
826 | .llseek = seq_lseek, | ||
827 | .release = single_release, | ||
828 | }; | ||
829 | |||
830 | static int __init memblock_init_debugfs(void) | ||
831 | { | ||
832 | struct dentry *root = debugfs_create_dir("memblock", NULL); | ||
833 | if (!root) | ||
834 | return -ENXIO; | ||
835 | debugfs_create_file("memory", S_IRUGO, root, &memblock.memory, &memblock_debug_fops); | ||
836 | debugfs_create_file("reserved", S_IRUGO, root, &memblock.reserved, &memblock_debug_fops); | ||
837 | |||
838 | return 0; | ||
839 | } | ||
840 | __initcall(memblock_init_debugfs); | ||
841 | |||
842 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/mm/memory.c b/mm/memory.c index 0e18b4d649ec..98b58fecedef 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3185,7 +3185,7 @@ static inline int handle_pte_fault(struct mm_struct *mm, | |||
3185 | * with threads. | 3185 | * with threads. |
3186 | */ | 3186 | */ |
3187 | if (flags & FAULT_FLAG_WRITE) | 3187 | if (flags & FAULT_FLAG_WRITE) |
3188 | flush_tlb_page(vma, address); | 3188 | flush_tlb_fix_spurious_fault(vma, address); |
3189 | } | 3189 | } |
3190 | unlock: | 3190 | unlock: |
3191 | pte_unmap_unlock(pte, ptl); | 3191 | pte_unmap_unlock(pte, ptl); |
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index dd186c1a5d53..d4e940a26945 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c | |||
@@ -840,7 +840,6 @@ repeat: | |||
840 | ret = 0; | 840 | ret = 0; |
841 | if (drain) { | 841 | if (drain) { |
842 | lru_add_drain_all(); | 842 | lru_add_drain_all(); |
843 | flush_scheduled_work(); | ||
844 | cond_resched(); | 843 | cond_resched(); |
845 | drain_all_pages(); | 844 | drain_all_pages(); |
846 | } | 845 | } |
@@ -862,7 +861,6 @@ repeat: | |||
862 | } | 861 | } |
863 | /* drain all zone's lru pagevec, this is asyncronous... */ | 862 | /* drain all zone's lru pagevec, this is asyncronous... */ |
864 | lru_add_drain_all(); | 863 | lru_add_drain_all(); |
865 | flush_scheduled_work(); | ||
866 | yield(); | 864 | yield(); |
867 | /* drain pcp pages , this is synchrouns. */ | 865 | /* drain pcp pages , this is synchrouns. */ |
868 | drain_all_pages(); | 866 | drain_all_pages(); |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index f12ad1836abe..2a362c52fdf4 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/pagemap.h> | 21 | #include <linux/pagemap.h> |
22 | #include <linux/jiffies.h> | 22 | #include <linux/jiffies.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/memblock.h> | ||
24 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
25 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
26 | #include <linux/kmemcheck.h> | 27 | #include <linux/kmemcheck.h> |
@@ -3636,6 +3637,41 @@ void __init free_bootmem_with_active_regions(int nid, | |||
3636 | } | 3637 | } |
3637 | } | 3638 | } |
3638 | 3639 | ||
3640 | #ifdef CONFIG_HAVE_MEMBLOCK | ||
3641 | u64 __init find_memory_core_early(int nid, u64 size, u64 align, | ||
3642 | u64 goal, u64 limit) | ||
3643 | { | ||
3644 | int i; | ||
3645 | |||
3646 | /* Need to go over early_node_map to find out good range for node */ | ||
3647 | for_each_active_range_index_in_nid(i, nid) { | ||
3648 | u64 addr; | ||
3649 | u64 ei_start, ei_last; | ||
3650 | u64 final_start, final_end; | ||
3651 | |||
3652 | ei_last = early_node_map[i].end_pfn; | ||
3653 | ei_last <<= PAGE_SHIFT; | ||
3654 | ei_start = early_node_map[i].start_pfn; | ||
3655 | ei_start <<= PAGE_SHIFT; | ||
3656 | |||
3657 | final_start = max(ei_start, goal); | ||
3658 | final_end = min(ei_last, limit); | ||
3659 | |||
3660 | if (final_start >= final_end) | ||
3661 | continue; | ||
3662 | |||
3663 | addr = memblock_find_in_range(final_start, final_end, size, align); | ||
3664 | |||
3665 | if (addr == MEMBLOCK_ERROR) | ||
3666 | continue; | ||
3667 | |||
3668 | return addr; | ||
3669 | } | ||
3670 | |||
3671 | return MEMBLOCK_ERROR; | ||
3672 | } | ||
3673 | #endif | ||
3674 | |||
3639 | int __init add_from_early_node_map(struct range *range, int az, | 3675 | int __init add_from_early_node_map(struct range *range, int az, |
3640 | int nr_range, int nid) | 3676 | int nr_range, int nid) |
3641 | { | 3677 | { |
@@ -3655,46 +3691,26 @@ int __init add_from_early_node_map(struct range *range, int az, | |||
3655 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, | 3691 | void * __init __alloc_memory_core_early(int nid, u64 size, u64 align, |
3656 | u64 goal, u64 limit) | 3692 | u64 goal, u64 limit) |
3657 | { | 3693 | { |
3658 | int i; | ||
3659 | void *ptr; | 3694 | void *ptr; |
3695 | u64 addr; | ||
3660 | 3696 | ||
3661 | if (limit > get_max_mapped()) | 3697 | if (limit > memblock.current_limit) |
3662 | limit = get_max_mapped(); | 3698 | limit = memblock.current_limit; |
3663 | |||
3664 | /* need to go over early_node_map to find out good range for node */ | ||
3665 | for_each_active_range_index_in_nid(i, nid) { | ||
3666 | u64 addr; | ||
3667 | u64 ei_start, ei_last; | ||
3668 | |||
3669 | ei_last = early_node_map[i].end_pfn; | ||
3670 | ei_last <<= PAGE_SHIFT; | ||
3671 | ei_start = early_node_map[i].start_pfn; | ||
3672 | ei_start <<= PAGE_SHIFT; | ||
3673 | addr = find_early_area(ei_start, ei_last, | ||
3674 | goal, limit, size, align); | ||
3675 | |||
3676 | if (addr == -1ULL) | ||
3677 | continue; | ||
3678 | 3699 | ||
3679 | #if 0 | 3700 | addr = find_memory_core_early(nid, size, align, goal, limit); |
3680 | printk(KERN_DEBUG "alloc (nid=%d %llx - %llx) (%llx - %llx) %llx %llx => %llx\n", | ||
3681 | nid, | ||
3682 | ei_start, ei_last, goal, limit, size, | ||
3683 | align, addr); | ||
3684 | #endif | ||
3685 | 3701 | ||
3686 | ptr = phys_to_virt(addr); | 3702 | if (addr == MEMBLOCK_ERROR) |
3687 | memset(ptr, 0, size); | 3703 | return NULL; |
3688 | reserve_early_without_check(addr, addr + size, "BOOTMEM"); | ||
3689 | /* | ||
3690 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3691 | * are never reported as leaks. | ||
3692 | */ | ||
3693 | kmemleak_alloc(ptr, size, 0, 0); | ||
3694 | return ptr; | ||
3695 | } | ||
3696 | 3704 | ||
3697 | return NULL; | 3705 | ptr = phys_to_virt(addr); |
3706 | memset(ptr, 0, size); | ||
3707 | memblock_x86_reserve_range(addr, addr + size, "BOOTMEM"); | ||
3708 | /* | ||
3709 | * The min_count is set to 0 so that bootmem allocated blocks | ||
3710 | * are never reported as leaks. | ||
3711 | */ | ||
3712 | kmemleak_alloc(ptr, size, 0, 0); | ||
3713 | return ptr; | ||
3698 | } | 3714 | } |
3699 | #endif | 3715 | #endif |
3700 | 3716 | ||
diff --git a/mm/percpu-km.c b/mm/percpu-km.c index df680855540a..89633fefc6a2 100644 --- a/mm/percpu-km.c +++ b/mm/percpu-km.c | |||
@@ -27,7 +27,7 @@ | |||
27 | * chunk size is not aligned. percpu-km code will whine about it. | 27 | * chunk size is not aligned. percpu-km code will whine about it. |
28 | */ | 28 | */ |
29 | 29 | ||
30 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 30 | #if defined(CONFIG_SMP) && defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) |
31 | #error "contiguous percpu allocation is incompatible with paged first chunk" | 31 | #error "contiguous percpu allocation is incompatible with paged first chunk" |
32 | #endif | 32 | #endif |
33 | 33 | ||
@@ -35,7 +35,11 @@ | |||
35 | 35 | ||
36 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) | 36 | static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size) |
37 | { | 37 | { |
38 | /* noop */ | 38 | unsigned int cpu; |
39 | |||
40 | for_each_possible_cpu(cpu) | ||
41 | memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); | ||
42 | |||
39 | return 0; | 43 | return 0; |
40 | } | 44 | } |
41 | 45 | ||
diff --git a/mm/percpu.c b/mm/percpu.c index c76ef3891e0d..6fc9015534f8 100644 --- a/mm/percpu.c +++ b/mm/percpu.c | |||
@@ -76,6 +76,7 @@ | |||
76 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ | 76 | #define PCPU_SLOT_BASE_SHIFT 5 /* 1-31 shares the same slot */ |
77 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ | 77 | #define PCPU_DFL_MAP_ALLOC 16 /* start a map with 16 ents */ |
78 | 78 | ||
79 | #ifdef CONFIG_SMP | ||
79 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ | 80 | /* default addr <-> pcpu_ptr mapping, override in asm/percpu.h if necessary */ |
80 | #ifndef __addr_to_pcpu_ptr | 81 | #ifndef __addr_to_pcpu_ptr |
81 | #define __addr_to_pcpu_ptr(addr) \ | 82 | #define __addr_to_pcpu_ptr(addr) \ |
@@ -89,6 +90,11 @@ | |||
89 | (unsigned long)pcpu_base_addr - \ | 90 | (unsigned long)pcpu_base_addr - \ |
90 | (unsigned long)__per_cpu_start) | 91 | (unsigned long)__per_cpu_start) |
91 | #endif | 92 | #endif |
93 | #else /* CONFIG_SMP */ | ||
94 | /* on UP, it's always identity mapped */ | ||
95 | #define __addr_to_pcpu_ptr(addr) (void __percpu *)(addr) | ||
96 | #define __pcpu_ptr_to_addr(ptr) (void __force *)(ptr) | ||
97 | #endif /* CONFIG_SMP */ | ||
92 | 98 | ||
93 | struct pcpu_chunk { | 99 | struct pcpu_chunk { |
94 | struct list_head list; /* linked to pcpu_slot lists */ | 100 | struct list_head list; /* linked to pcpu_slot lists */ |
@@ -820,8 +826,8 @@ fail_unlock_mutex: | |||
820 | * @size: size of area to allocate in bytes | 826 | * @size: size of area to allocate in bytes |
821 | * @align: alignment of area (max PAGE_SIZE) | 827 | * @align: alignment of area (max PAGE_SIZE) |
822 | * | 828 | * |
823 | * Allocate percpu area of @size bytes aligned at @align. Might | 829 | * Allocate zero-filled percpu area of @size bytes aligned at @align. |
824 | * sleep. Might trigger writeouts. | 830 | * Might sleep. Might trigger writeouts. |
825 | * | 831 | * |
826 | * CONTEXT: | 832 | * CONTEXT: |
827 | * Does GFP_KERNEL allocation. | 833 | * Does GFP_KERNEL allocation. |
@@ -840,9 +846,10 @@ EXPORT_SYMBOL_GPL(__alloc_percpu); | |||
840 | * @size: size of area to allocate in bytes | 846 | * @size: size of area to allocate in bytes |
841 | * @align: alignment of area (max PAGE_SIZE) | 847 | * @align: alignment of area (max PAGE_SIZE) |
842 | * | 848 | * |
843 | * Allocate percpu area of @size bytes aligned at @align from reserved | 849 | * Allocate zero-filled percpu area of @size bytes aligned at @align |
844 | * percpu area if arch has set it up; otherwise, allocation is served | 850 | * from reserved percpu area if arch has set it up; otherwise, |
845 | * from the same dynamic area. Might sleep. Might trigger writeouts. | 851 | * allocation is served from the same dynamic area. Might sleep. |
852 | * Might trigger writeouts. | ||
846 | * | 853 | * |
847 | * CONTEXT: | 854 | * CONTEXT: |
848 | * Does GFP_KERNEL allocation. | 855 | * Does GFP_KERNEL allocation. |
@@ -949,6 +956,7 @@ EXPORT_SYMBOL_GPL(free_percpu); | |||
949 | */ | 956 | */ |
950 | bool is_kernel_percpu_address(unsigned long addr) | 957 | bool is_kernel_percpu_address(unsigned long addr) |
951 | { | 958 | { |
959 | #ifdef CONFIG_SMP | ||
952 | const size_t static_size = __per_cpu_end - __per_cpu_start; | 960 | const size_t static_size = __per_cpu_end - __per_cpu_start; |
953 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); | 961 | void __percpu *base = __addr_to_pcpu_ptr(pcpu_base_addr); |
954 | unsigned int cpu; | 962 | unsigned int cpu; |
@@ -959,6 +967,8 @@ bool is_kernel_percpu_address(unsigned long addr) | |||
959 | if ((void *)addr >= start && (void *)addr < start + static_size) | 967 | if ((void *)addr >= start && (void *)addr < start + static_size) |
960 | return true; | 968 | return true; |
961 | } | 969 | } |
970 | #endif | ||
971 | /* on UP, can't distinguish from other static vars, always false */ | ||
962 | return false; | 972 | return false; |
963 | } | 973 | } |
964 | 974 | ||
@@ -1067,161 +1077,6 @@ void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai) | |||
1067 | } | 1077 | } |
1068 | 1078 | ||
1069 | /** | 1079 | /** |
1070 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | ||
1071 | * @reserved_size: the size of reserved percpu area in bytes | ||
1072 | * @dyn_size: minimum free size for dynamic allocation in bytes | ||
1073 | * @atom_size: allocation atom size | ||
1074 | * @cpu_distance_fn: callback to determine distance between cpus, optional | ||
1075 | * | ||
1076 | * This function determines grouping of units, their mappings to cpus | ||
1077 | * and other parameters considering needed percpu size, allocation | ||
1078 | * atom size and distances between CPUs. | ||
1079 | * | ||
1080 | * Groups are always mutliples of atom size and CPUs which are of | ||
1081 | * LOCAL_DISTANCE both ways are grouped together and share space for | ||
1082 | * units in the same group. The returned configuration is guaranteed | ||
1083 | * to have CPUs on different nodes on different groups and >=75% usage | ||
1084 | * of allocated virtual address space. | ||
1085 | * | ||
1086 | * RETURNS: | ||
1087 | * On success, pointer to the new allocation_info is returned. On | ||
1088 | * failure, ERR_PTR value is returned. | ||
1089 | */ | ||
1090 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | ||
1091 | size_t reserved_size, size_t dyn_size, | ||
1092 | size_t atom_size, | ||
1093 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | ||
1094 | { | ||
1095 | static int group_map[NR_CPUS] __initdata; | ||
1096 | static int group_cnt[NR_CPUS] __initdata; | ||
1097 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1098 | int nr_groups = 1, nr_units = 0; | ||
1099 | size_t size_sum, min_unit_size, alloc_size; | ||
1100 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | ||
1101 | int last_allocs, group, unit; | ||
1102 | unsigned int cpu, tcpu; | ||
1103 | struct pcpu_alloc_info *ai; | ||
1104 | unsigned int *cpu_map; | ||
1105 | |||
1106 | /* this function may be called multiple times */ | ||
1107 | memset(group_map, 0, sizeof(group_map)); | ||
1108 | memset(group_cnt, 0, sizeof(group_cnt)); | ||
1109 | |||
1110 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | ||
1111 | size_sum = PFN_ALIGN(static_size + reserved_size + | ||
1112 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | ||
1113 | dyn_size = size_sum - static_size - reserved_size; | ||
1114 | |||
1115 | /* | ||
1116 | * Determine min_unit_size, alloc_size and max_upa such that | ||
1117 | * alloc_size is multiple of atom_size and is the smallest | ||
1118 | * which can accomodate 4k aligned segments which are equal to | ||
1119 | * or larger than min_unit_size. | ||
1120 | */ | ||
1121 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | ||
1122 | |||
1123 | alloc_size = roundup(min_unit_size, atom_size); | ||
1124 | upa = alloc_size / min_unit_size; | ||
1125 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1126 | upa--; | ||
1127 | max_upa = upa; | ||
1128 | |||
1129 | /* group cpus according to their proximity */ | ||
1130 | for_each_possible_cpu(cpu) { | ||
1131 | group = 0; | ||
1132 | next_group: | ||
1133 | for_each_possible_cpu(tcpu) { | ||
1134 | if (cpu == tcpu) | ||
1135 | break; | ||
1136 | if (group_map[tcpu] == group && cpu_distance_fn && | ||
1137 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | ||
1138 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | ||
1139 | group++; | ||
1140 | nr_groups = max(nr_groups, group + 1); | ||
1141 | goto next_group; | ||
1142 | } | ||
1143 | } | ||
1144 | group_map[cpu] = group; | ||
1145 | group_cnt[group]++; | ||
1146 | } | ||
1147 | |||
1148 | /* | ||
1149 | * Expand unit size until address space usage goes over 75% | ||
1150 | * and then as much as possible without using more address | ||
1151 | * space. | ||
1152 | */ | ||
1153 | last_allocs = INT_MAX; | ||
1154 | for (upa = max_upa; upa; upa--) { | ||
1155 | int allocs = 0, wasted = 0; | ||
1156 | |||
1157 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1158 | continue; | ||
1159 | |||
1160 | for (group = 0; group < nr_groups; group++) { | ||
1161 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | ||
1162 | allocs += this_allocs; | ||
1163 | wasted += this_allocs * upa - group_cnt[group]; | ||
1164 | } | ||
1165 | |||
1166 | /* | ||
1167 | * Don't accept if wastage is over 1/3. The | ||
1168 | * greater-than comparison ensures upa==1 always | ||
1169 | * passes the following check. | ||
1170 | */ | ||
1171 | if (wasted > num_possible_cpus() / 3) | ||
1172 | continue; | ||
1173 | |||
1174 | /* and then don't consume more memory */ | ||
1175 | if (allocs > last_allocs) | ||
1176 | break; | ||
1177 | last_allocs = allocs; | ||
1178 | best_upa = upa; | ||
1179 | } | ||
1180 | upa = best_upa; | ||
1181 | |||
1182 | /* allocate and fill alloc_info */ | ||
1183 | for (group = 0; group < nr_groups; group++) | ||
1184 | nr_units += roundup(group_cnt[group], upa); | ||
1185 | |||
1186 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | ||
1187 | if (!ai) | ||
1188 | return ERR_PTR(-ENOMEM); | ||
1189 | cpu_map = ai->groups[0].cpu_map; | ||
1190 | |||
1191 | for (group = 0; group < nr_groups; group++) { | ||
1192 | ai->groups[group].cpu_map = cpu_map; | ||
1193 | cpu_map += roundup(group_cnt[group], upa); | ||
1194 | } | ||
1195 | |||
1196 | ai->static_size = static_size; | ||
1197 | ai->reserved_size = reserved_size; | ||
1198 | ai->dyn_size = dyn_size; | ||
1199 | ai->unit_size = alloc_size / upa; | ||
1200 | ai->atom_size = atom_size; | ||
1201 | ai->alloc_size = alloc_size; | ||
1202 | |||
1203 | for (group = 0, unit = 0; group_cnt[group]; group++) { | ||
1204 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
1205 | |||
1206 | /* | ||
1207 | * Initialize base_offset as if all groups are located | ||
1208 | * back-to-back. The caller should update this to | ||
1209 | * reflect actual allocation. | ||
1210 | */ | ||
1211 | gi->base_offset = unit * ai->unit_size; | ||
1212 | |||
1213 | for_each_possible_cpu(cpu) | ||
1214 | if (group_map[cpu] == group) | ||
1215 | gi->cpu_map[gi->nr_units++] = cpu; | ||
1216 | gi->nr_units = roundup(gi->nr_units, upa); | ||
1217 | unit += gi->nr_units; | ||
1218 | } | ||
1219 | BUG_ON(unit != nr_units); | ||
1220 | |||
1221 | return ai; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info | 1080 | * pcpu_dump_alloc_info - print out information about pcpu_alloc_info |
1226 | * @lvl: loglevel | 1081 | * @lvl: loglevel |
1227 | * @ai: allocation info to dump | 1082 | * @ai: allocation info to dump |
@@ -1363,7 +1218,9 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1363 | 1218 | ||
1364 | /* sanity checks */ | 1219 | /* sanity checks */ |
1365 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); | 1220 | PCPU_SETUP_BUG_ON(ai->nr_groups <= 0); |
1221 | #ifdef CONFIG_SMP | ||
1366 | PCPU_SETUP_BUG_ON(!ai->static_size); | 1222 | PCPU_SETUP_BUG_ON(!ai->static_size); |
1223 | #endif | ||
1367 | PCPU_SETUP_BUG_ON(!base_addr); | 1224 | PCPU_SETUP_BUG_ON(!base_addr); |
1368 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); | 1225 | PCPU_SETUP_BUG_ON(ai->unit_size < size_sum); |
1369 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); | 1226 | PCPU_SETUP_BUG_ON(ai->unit_size & ~PAGE_MASK); |
@@ -1488,6 +1345,8 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai, | |||
1488 | return 0; | 1345 | return 0; |
1489 | } | 1346 | } |
1490 | 1347 | ||
1348 | #ifdef CONFIG_SMP | ||
1349 | |||
1491 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { | 1350 | const char *pcpu_fc_names[PCPU_FC_NR] __initdata = { |
1492 | [PCPU_FC_AUTO] = "auto", | 1351 | [PCPU_FC_AUTO] = "auto", |
1493 | [PCPU_FC_EMBED] = "embed", | 1352 | [PCPU_FC_EMBED] = "embed", |
@@ -1515,8 +1374,180 @@ static int __init percpu_alloc_setup(char *str) | |||
1515 | } | 1374 | } |
1516 | early_param("percpu_alloc", percpu_alloc_setup); | 1375 | early_param("percpu_alloc", percpu_alloc_setup); |
1517 | 1376 | ||
1377 | /* | ||
1378 | * pcpu_embed_first_chunk() is used by the generic percpu setup. | ||
1379 | * Build it if needed by the arch config or the generic setup is going | ||
1380 | * to be used. | ||
1381 | */ | ||
1518 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ | 1382 | #if defined(CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK) || \ |
1519 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) | 1383 | !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) |
1384 | #define BUILD_EMBED_FIRST_CHUNK | ||
1385 | #endif | ||
1386 | |||
1387 | /* build pcpu_page_first_chunk() iff needed by the arch config */ | ||
1388 | #if defined(CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK) | ||
1389 | #define BUILD_PAGE_FIRST_CHUNK | ||
1390 | #endif | ||
1391 | |||
1392 | /* pcpu_build_alloc_info() is used by both embed and page first chunk */ | ||
1393 | #if defined(BUILD_EMBED_FIRST_CHUNK) || defined(BUILD_PAGE_FIRST_CHUNK) | ||
1394 | /** | ||
1395 | * pcpu_build_alloc_info - build alloc_info considering distances between CPUs | ||
1396 | * @reserved_size: the size of reserved percpu area in bytes | ||
1397 | * @dyn_size: minimum free size for dynamic allocation in bytes | ||
1398 | * @atom_size: allocation atom size | ||
1399 | * @cpu_distance_fn: callback to determine distance between cpus, optional | ||
1400 | * | ||
1401 | * This function determines grouping of units, their mappings to cpus | ||
1402 | * and other parameters considering needed percpu size, allocation | ||
1403 | * atom size and distances between CPUs. | ||
1404 | * | ||
1405 | * Groups are always mutliples of atom size and CPUs which are of | ||
1406 | * LOCAL_DISTANCE both ways are grouped together and share space for | ||
1407 | * units in the same group. The returned configuration is guaranteed | ||
1408 | * to have CPUs on different nodes on different groups and >=75% usage | ||
1409 | * of allocated virtual address space. | ||
1410 | * | ||
1411 | * RETURNS: | ||
1412 | * On success, pointer to the new allocation_info is returned. On | ||
1413 | * failure, ERR_PTR value is returned. | ||
1414 | */ | ||
1415 | static struct pcpu_alloc_info * __init pcpu_build_alloc_info( | ||
1416 | size_t reserved_size, size_t dyn_size, | ||
1417 | size_t atom_size, | ||
1418 | pcpu_fc_cpu_distance_fn_t cpu_distance_fn) | ||
1419 | { | ||
1420 | static int group_map[NR_CPUS] __initdata; | ||
1421 | static int group_cnt[NR_CPUS] __initdata; | ||
1422 | const size_t static_size = __per_cpu_end - __per_cpu_start; | ||
1423 | int nr_groups = 1, nr_units = 0; | ||
1424 | size_t size_sum, min_unit_size, alloc_size; | ||
1425 | int upa, max_upa, uninitialized_var(best_upa); /* units_per_alloc */ | ||
1426 | int last_allocs, group, unit; | ||
1427 | unsigned int cpu, tcpu; | ||
1428 | struct pcpu_alloc_info *ai; | ||
1429 | unsigned int *cpu_map; | ||
1430 | |||
1431 | /* this function may be called multiple times */ | ||
1432 | memset(group_map, 0, sizeof(group_map)); | ||
1433 | memset(group_cnt, 0, sizeof(group_cnt)); | ||
1434 | |||
1435 | /* calculate size_sum and ensure dyn_size is enough for early alloc */ | ||
1436 | size_sum = PFN_ALIGN(static_size + reserved_size + | ||
1437 | max_t(size_t, dyn_size, PERCPU_DYNAMIC_EARLY_SIZE)); | ||
1438 | dyn_size = size_sum - static_size - reserved_size; | ||
1439 | |||
1440 | /* | ||
1441 | * Determine min_unit_size, alloc_size and max_upa such that | ||
1442 | * alloc_size is multiple of atom_size and is the smallest | ||
1443 | * which can accomodate 4k aligned segments which are equal to | ||
1444 | * or larger than min_unit_size. | ||
1445 | */ | ||
1446 | min_unit_size = max_t(size_t, size_sum, PCPU_MIN_UNIT_SIZE); | ||
1447 | |||
1448 | alloc_size = roundup(min_unit_size, atom_size); | ||
1449 | upa = alloc_size / min_unit_size; | ||
1450 | while (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1451 | upa--; | ||
1452 | max_upa = upa; | ||
1453 | |||
1454 | /* group cpus according to their proximity */ | ||
1455 | for_each_possible_cpu(cpu) { | ||
1456 | group = 0; | ||
1457 | next_group: | ||
1458 | for_each_possible_cpu(tcpu) { | ||
1459 | if (cpu == tcpu) | ||
1460 | break; | ||
1461 | if (group_map[tcpu] == group && cpu_distance_fn && | ||
1462 | (cpu_distance_fn(cpu, tcpu) > LOCAL_DISTANCE || | ||
1463 | cpu_distance_fn(tcpu, cpu) > LOCAL_DISTANCE)) { | ||
1464 | group++; | ||
1465 | nr_groups = max(nr_groups, group + 1); | ||
1466 | goto next_group; | ||
1467 | } | ||
1468 | } | ||
1469 | group_map[cpu] = group; | ||
1470 | group_cnt[group]++; | ||
1471 | } | ||
1472 | |||
1473 | /* | ||
1474 | * Expand unit size until address space usage goes over 75% | ||
1475 | * and then as much as possible without using more address | ||
1476 | * space. | ||
1477 | */ | ||
1478 | last_allocs = INT_MAX; | ||
1479 | for (upa = max_upa; upa; upa--) { | ||
1480 | int allocs = 0, wasted = 0; | ||
1481 | |||
1482 | if (alloc_size % upa || ((alloc_size / upa) & ~PAGE_MASK)) | ||
1483 | continue; | ||
1484 | |||
1485 | for (group = 0; group < nr_groups; group++) { | ||
1486 | int this_allocs = DIV_ROUND_UP(group_cnt[group], upa); | ||
1487 | allocs += this_allocs; | ||
1488 | wasted += this_allocs * upa - group_cnt[group]; | ||
1489 | } | ||
1490 | |||
1491 | /* | ||
1492 | * Don't accept if wastage is over 1/3. The | ||
1493 | * greater-than comparison ensures upa==1 always | ||
1494 | * passes the following check. | ||
1495 | */ | ||
1496 | if (wasted > num_possible_cpus() / 3) | ||
1497 | continue; | ||
1498 | |||
1499 | /* and then don't consume more memory */ | ||
1500 | if (allocs > last_allocs) | ||
1501 | break; | ||
1502 | last_allocs = allocs; | ||
1503 | best_upa = upa; | ||
1504 | } | ||
1505 | upa = best_upa; | ||
1506 | |||
1507 | /* allocate and fill alloc_info */ | ||
1508 | for (group = 0; group < nr_groups; group++) | ||
1509 | nr_units += roundup(group_cnt[group], upa); | ||
1510 | |||
1511 | ai = pcpu_alloc_alloc_info(nr_groups, nr_units); | ||
1512 | if (!ai) | ||
1513 | return ERR_PTR(-ENOMEM); | ||
1514 | cpu_map = ai->groups[0].cpu_map; | ||
1515 | |||
1516 | for (group = 0; group < nr_groups; group++) { | ||
1517 | ai->groups[group].cpu_map = cpu_map; | ||
1518 | cpu_map += roundup(group_cnt[group], upa); | ||
1519 | } | ||
1520 | |||
1521 | ai->static_size = static_size; | ||
1522 | ai->reserved_size = reserved_size; | ||
1523 | ai->dyn_size = dyn_size; | ||
1524 | ai->unit_size = alloc_size / upa; | ||
1525 | ai->atom_size = atom_size; | ||
1526 | ai->alloc_size = alloc_size; | ||
1527 | |||
1528 | for (group = 0, unit = 0; group_cnt[group]; group++) { | ||
1529 | struct pcpu_group_info *gi = &ai->groups[group]; | ||
1530 | |||
1531 | /* | ||
1532 | * Initialize base_offset as if all groups are located | ||
1533 | * back-to-back. The caller should update this to | ||
1534 | * reflect actual allocation. | ||
1535 | */ | ||
1536 | gi->base_offset = unit * ai->unit_size; | ||
1537 | |||
1538 | for_each_possible_cpu(cpu) | ||
1539 | if (group_map[cpu] == group) | ||
1540 | gi->cpu_map[gi->nr_units++] = cpu; | ||
1541 | gi->nr_units = roundup(gi->nr_units, upa); | ||
1542 | unit += gi->nr_units; | ||
1543 | } | ||
1544 | BUG_ON(unit != nr_units); | ||
1545 | |||
1546 | return ai; | ||
1547 | } | ||
1548 | #endif /* BUILD_EMBED_FIRST_CHUNK || BUILD_PAGE_FIRST_CHUNK */ | ||
1549 | |||
1550 | #if defined(BUILD_EMBED_FIRST_CHUNK) | ||
1520 | /** | 1551 | /** |
1521 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem | 1552 | * pcpu_embed_first_chunk - embed the first percpu chunk into bootmem |
1522 | * @reserved_size: the size of reserved percpu area in bytes | 1553 | * @reserved_size: the size of reserved percpu area in bytes |
@@ -1645,10 +1676,9 @@ out_free: | |||
1645 | free_bootmem(__pa(areas), areas_size); | 1676 | free_bootmem(__pa(areas), areas_size); |
1646 | return rc; | 1677 | return rc; |
1647 | } | 1678 | } |
1648 | #endif /* CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK || | 1679 | #endif /* BUILD_EMBED_FIRST_CHUNK */ |
1649 | !CONFIG_HAVE_SETUP_PER_CPU_AREA */ | ||
1650 | 1680 | ||
1651 | #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK | 1681 | #ifdef BUILD_PAGE_FIRST_CHUNK |
1652 | /** | 1682 | /** |
1653 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages | 1683 | * pcpu_page_first_chunk - map the first chunk using PAGE_SIZE pages |
1654 | * @reserved_size: the size of reserved percpu area in bytes | 1684 | * @reserved_size: the size of reserved percpu area in bytes |
@@ -1756,10 +1786,11 @@ out_free_ar: | |||
1756 | pcpu_free_alloc_info(ai); | 1786 | pcpu_free_alloc_info(ai); |
1757 | return rc; | 1787 | return rc; |
1758 | } | 1788 | } |
1759 | #endif /* CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK */ | 1789 | #endif /* BUILD_PAGE_FIRST_CHUNK */ |
1760 | 1790 | ||
1791 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1761 | /* | 1792 | /* |
1762 | * Generic percpu area setup. | 1793 | * Generic SMP percpu area setup. |
1763 | * | 1794 | * |
1764 | * The embedding helper is used because its behavior closely resembles | 1795 | * The embedding helper is used because its behavior closely resembles |
1765 | * the original non-dynamic generic percpu area setup. This is | 1796 | * the original non-dynamic generic percpu area setup. This is |
@@ -1770,7 +1801,6 @@ out_free_ar: | |||
1770 | * on the physical linear memory mapping which uses large page | 1801 | * on the physical linear memory mapping which uses large page |
1771 | * mappings on applicable archs. | 1802 | * mappings on applicable archs. |
1772 | */ | 1803 | */ |
1773 | #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA | ||
1774 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | 1804 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; |
1775 | EXPORT_SYMBOL(__per_cpu_offset); | 1805 | EXPORT_SYMBOL(__per_cpu_offset); |
1776 | 1806 | ||
@@ -1799,13 +1829,48 @@ void __init setup_per_cpu_areas(void) | |||
1799 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, | 1829 | PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, NULL, |
1800 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); | 1830 | pcpu_dfl_fc_alloc, pcpu_dfl_fc_free); |
1801 | if (rc < 0) | 1831 | if (rc < 0) |
1802 | panic("Failed to initialized percpu areas."); | 1832 | panic("Failed to initialize percpu areas."); |
1803 | 1833 | ||
1804 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; | 1834 | delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; |
1805 | for_each_possible_cpu(cpu) | 1835 | for_each_possible_cpu(cpu) |
1806 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; | 1836 | __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; |
1807 | } | 1837 | } |
1808 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ | 1838 | #endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */ |
1839 | |||
1840 | #else /* CONFIG_SMP */ | ||
1841 | |||
1842 | /* | ||
1843 | * UP percpu area setup. | ||
1844 | * | ||
1845 | * UP always uses km-based percpu allocator with identity mapping. | ||
1846 | * Static percpu variables are indistinguishable from the usual static | ||
1847 | * variables and don't require any special preparation. | ||
1848 | */ | ||
1849 | void __init setup_per_cpu_areas(void) | ||
1850 | { | ||
1851 | const size_t unit_size = | ||
1852 | roundup_pow_of_two(max_t(size_t, PCPU_MIN_UNIT_SIZE, | ||
1853 | PERCPU_DYNAMIC_RESERVE)); | ||
1854 | struct pcpu_alloc_info *ai; | ||
1855 | void *fc; | ||
1856 | |||
1857 | ai = pcpu_alloc_alloc_info(1, 1); | ||
1858 | fc = __alloc_bootmem(unit_size, PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | ||
1859 | if (!ai || !fc) | ||
1860 | panic("Failed to allocate memory for percpu areas."); | ||
1861 | |||
1862 | ai->dyn_size = unit_size; | ||
1863 | ai->unit_size = unit_size; | ||
1864 | ai->atom_size = unit_size; | ||
1865 | ai->alloc_size = unit_size; | ||
1866 | ai->groups[0].nr_units = 1; | ||
1867 | ai->groups[0].cpu_map[0] = 0; | ||
1868 | |||
1869 | if (pcpu_setup_first_chunk(ai, fc) < 0) | ||
1870 | panic("Failed to initialize percpu areas."); | ||
1871 | } | ||
1872 | |||
1873 | #endif /* CONFIG_SMP */ | ||
1809 | 1874 | ||
1810 | /* | 1875 | /* |
1811 | * First and reserved chunks are initialized with temporary allocation | 1876 | * First and reserved chunks are initialized with temporary allocation |
diff --git a/mm/percpu_up.c b/mm/percpu_up.c deleted file mode 100644 index db884fae5721..000000000000 --- a/mm/percpu_up.c +++ /dev/null | |||
@@ -1,30 +0,0 @@ | |||
1 | /* | ||
2 | * mm/percpu_up.c - dummy percpu memory allocator implementation for UP | ||
3 | */ | ||
4 | |||
5 | #include <linux/module.h> | ||
6 | #include <linux/percpu.h> | ||
7 | #include <linux/slab.h> | ||
8 | |||
9 | void __percpu *__alloc_percpu(size_t size, size_t align) | ||
10 | { | ||
11 | /* | ||
12 | * Can't easily make larger alignment work with kmalloc. WARN | ||
13 | * on it. Larger alignment should only be used for module | ||
14 | * percpu sections on SMP for which this path isn't used. | ||
15 | */ | ||
16 | WARN_ON_ONCE(align > SMP_CACHE_BYTES); | ||
17 | return (void __percpu __force *)kzalloc(size, GFP_KERNEL); | ||
18 | } | ||
19 | EXPORT_SYMBOL_GPL(__alloc_percpu); | ||
20 | |||
21 | void free_percpu(void __percpu *p) | ||
22 | { | ||
23 | kfree(this_cpu_ptr(p)); | ||
24 | } | ||
25 | EXPORT_SYMBOL_GPL(free_percpu); | ||
26 | |||
27 | phys_addr_t per_cpu_ptr_to_phys(void *addr) | ||
28 | { | ||
29 | return __pa(addr); | ||
30 | } | ||
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index aa33fd67fa41..29d6cbffb283 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c | |||
@@ -220,18 +220,7 @@ void __init sparse_mem_maps_populate_node(struct page **map_map, | |||
220 | 220 | ||
221 | if (vmemmap_buf_start) { | 221 | if (vmemmap_buf_start) { |
222 | /* need to free left buf */ | 222 | /* need to free left buf */ |
223 | #ifdef CONFIG_NO_BOOTMEM | ||
224 | free_early(__pa(vmemmap_buf_start), __pa(vmemmap_buf_end)); | ||
225 | if (vmemmap_buf_start < vmemmap_buf) { | ||
226 | char name[15]; | ||
227 | |||
228 | snprintf(name, sizeof(name), "MEMMAP %d", nodeid); | ||
229 | reserve_early_without_check(__pa(vmemmap_buf_start), | ||
230 | __pa(vmemmap_buf), name); | ||
231 | } | ||
232 | #else | ||
233 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); | 223 | free_bootmem(__pa(vmemmap_buf), vmemmap_buf_end - vmemmap_buf); |
234 | #endif | ||
235 | vmemmap_buf = NULL; | 224 | vmemmap_buf = NULL; |
236 | vmemmap_buf_end = NULL; | 225 | vmemmap_buf_end = NULL; |
237 | } | 226 | } |
diff --git a/mm/swapfile.c b/mm/swapfile.c index 7c703ff2f36f..9fc7bac7db0c 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c | |||
@@ -139,7 +139,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); | 139 | nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); |
140 | if (nr_blocks) { | 140 | if (nr_blocks) { |
141 | err = blkdev_issue_discard(si->bdev, start_block, | 141 | err = blkdev_issue_discard(si->bdev, start_block, |
142 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); | 142 | nr_blocks, GFP_KERNEL, 0); |
143 | if (err) | 143 | if (err) |
144 | return err; | 144 | return err; |
145 | cond_resched(); | 145 | cond_resched(); |
@@ -150,7 +150,7 @@ static int discard_swap(struct swap_info_struct *si) | |||
150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); | 150 | nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); |
151 | 151 | ||
152 | err = blkdev_issue_discard(si->bdev, start_block, | 152 | err = blkdev_issue_discard(si->bdev, start_block, |
153 | nr_blocks, GFP_KERNEL, BLKDEV_IFL_WAIT); | 153 | nr_blocks, GFP_KERNEL, 0); |
154 | if (err) | 154 | if (err) |
155 | break; | 155 | break; |
156 | 156 | ||
@@ -189,7 +189,7 @@ static void discard_swap_cluster(struct swap_info_struct *si, | |||
189 | start_block <<= PAGE_SHIFT - 9; | 189 | start_block <<= PAGE_SHIFT - 9; |
190 | nr_blocks <<= PAGE_SHIFT - 9; | 190 | nr_blocks <<= PAGE_SHIFT - 9; |
191 | if (blkdev_issue_discard(si->bdev, start_block, | 191 | if (blkdev_issue_discard(si->bdev, start_block, |
192 | nr_blocks, GFP_NOIO, BLKDEV_IFL_WAIT)) | 192 | nr_blocks, GFP_NOIO, 0)) |
193 | break; | 193 | break; |
194 | } | 194 | } |
195 | 195 | ||
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index 6b8889da69a6..9f909622a25e 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -517,6 +517,15 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0); | |||
517 | static void purge_fragmented_blocks_allcpus(void); | 517 | static void purge_fragmented_blocks_allcpus(void); |
518 | 518 | ||
519 | /* | 519 | /* |
520 | * called before a call to iounmap() if the caller wants vm_area_struct's | ||
521 | * immediately freed. | ||
522 | */ | ||
523 | void set_iounmap_nonlazy(void) | ||
524 | { | ||
525 | atomic_set(&vmap_lazy_nr, lazy_max_pages()+1); | ||
526 | } | ||
527 | |||
528 | /* | ||
520 | * Purges all lazily-freed vmap areas. | 529 | * Purges all lazily-freed vmap areas. |
521 | * | 530 | * |
522 | * If sync is 0 then don't purge if there is already a purge in progress. | 531 | * If sync is 0 then don't purge if there is already a purge in progress. |
@@ -2056,6 +2065,7 @@ void free_vm_area(struct vm_struct *area) | |||
2056 | } | 2065 | } |
2057 | EXPORT_SYMBOL_GPL(free_vm_area); | 2066 | EXPORT_SYMBOL_GPL(free_vm_area); |
2058 | 2067 | ||
2068 | #ifdef CONFIG_SMP | ||
2059 | static struct vmap_area *node_to_va(struct rb_node *n) | 2069 | static struct vmap_area *node_to_va(struct rb_node *n) |
2060 | { | 2070 | { |
2061 | return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; | 2071 | return n ? rb_entry(n, struct vmap_area, rb_node) : NULL; |
@@ -2336,6 +2346,7 @@ void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms) | |||
2336 | free_vm_area(vms[i]); | 2346 | free_vm_area(vms[i]); |
2337 | kfree(vms); | 2347 | kfree(vms); |
2338 | } | 2348 | } |
2349 | #endif /* CONFIG_SMP */ | ||
2339 | 2350 | ||
2340 | #ifdef CONFIG_PROC_FS | 2351 | #ifdef CONFIG_PROC_FS |
2341 | static void *s_start(struct seq_file *m, loff_t *pos) | 2352 | static void *s_start(struct seq_file *m, loff_t *pos) |