aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2009-07-21 08:18:35 -0400
committerTejun Heo <tj@kernel.org>2009-10-02 00:29:29 -0400
commit23fb064bb96f001ecb8682129f7ee1bc1ca691bc (patch)
treeae9173b25aa69cda1b974c630334ffb61cee7ebe
parent52594762a39dfb6338c9d0906ca21dd9ae9453be (diff)
percpu: kill legacy percpu allocator
With ia64 converted, there's no arch left which still uses legacy percpu allocator. Kill it. Signed-off-by: Tejun Heo <tj@kernel.org> Delightedly-acked-by: Rusty Russell <rusty@rustcorp.com.au> Cc: Ingo Molnar <mingo@redhat.com> Cc: Christoph Lameter <cl@linux-foundation.org>
-rw-r--r--include/linux/percpu.h24
-rw-r--r--kernel/module.c150
-rw-r--r--mm/Makefile4
-rw-r--r--mm/allocpercpu.c177
-rw-r--r--mm/percpu.c2
5 files changed, 0 insertions, 357 deletions
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 878836ca999c..5baf5b8788fb 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -34,8 +34,6 @@
34 34
35#ifdef CONFIG_SMP 35#ifdef CONFIG_SMP
36 36
37#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38
39/* minimum unit size, also is the maximum supported allocation size */ 37/* minimum unit size, also is the maximum supported allocation size */
40#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10) 38#define PCPU_MIN_UNIT_SIZE PFN_ALIGN(64 << 10)
41 39
@@ -130,28 +128,6 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
130#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu))) 128#define per_cpu_ptr(ptr, cpu) SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
131 129
132extern void *__alloc_reserved_percpu(size_t size, size_t align); 130extern void *__alloc_reserved_percpu(size_t size, size_t align);
133
134#else /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
135
136struct percpu_data {
137 void *ptrs[1];
138};
139
140/* pointer disguising messes up the kmemleak objects tracking */
141#ifndef CONFIG_DEBUG_KMEMLEAK
142#define __percpu_disguise(pdata) (struct percpu_data *)~(unsigned long)(pdata)
143#else
144#define __percpu_disguise(pdata) (struct percpu_data *)(pdata)
145#endif
146
147#define per_cpu_ptr(ptr, cpu) \
148({ \
149 struct percpu_data *__p = __percpu_disguise(ptr); \
150 (__typeof__(ptr))__p->ptrs[(cpu)]; \
151})
152
153#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
154
155extern void *__alloc_percpu(size_t size, size_t align); 131extern void *__alloc_percpu(size_t size, size_t align);
156extern void free_percpu(void *__pdata); 132extern void free_percpu(void *__pdata);
157 133
diff --git a/kernel/module.c b/kernel/module.c
index 8b7d8805819d..64787cddeb5e 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -370,8 +370,6 @@ EXPORT_SYMBOL_GPL(find_module);
370 370
371#ifdef CONFIG_SMP 371#ifdef CONFIG_SMP
372 372
373#ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
374
375static void *percpu_modalloc(unsigned long size, unsigned long align, 373static void *percpu_modalloc(unsigned long size, unsigned long align,
376 const char *name) 374 const char *name)
377{ 375{
@@ -395,154 +393,6 @@ static void percpu_modfree(void *freeme)
395 free_percpu(freeme); 393 free_percpu(freeme);
396} 394}
397 395
398#else /* ... CONFIG_HAVE_LEGACY_PER_CPU_AREA */
399
400/* Number of blocks used and allocated. */
401static unsigned int pcpu_num_used, pcpu_num_allocated;
402/* Size of each block. -ve means used. */
403static int *pcpu_size;
404
405static int split_block(unsigned int i, unsigned short size)
406{
407 /* Reallocation required? */
408 if (pcpu_num_used + 1 > pcpu_num_allocated) {
409 int *new;
410
411 new = krealloc(pcpu_size, sizeof(new[0])*pcpu_num_allocated*2,
412 GFP_KERNEL);
413 if (!new)
414 return 0;
415
416 pcpu_num_allocated *= 2;
417 pcpu_size = new;
418 }
419
420 /* Insert a new subblock */
421 memmove(&pcpu_size[i+1], &pcpu_size[i],
422 sizeof(pcpu_size[0]) * (pcpu_num_used - i));
423 pcpu_num_used++;
424
425 pcpu_size[i+1] -= size;
426 pcpu_size[i] = size;
427 return 1;
428}
429
430static inline unsigned int block_size(int val)
431{
432 if (val < 0)
433 return -val;
434 return val;
435}
436
437static void *percpu_modalloc(unsigned long size, unsigned long align,
438 const char *name)
439{
440 unsigned long extra;
441 unsigned int i;
442 void *ptr;
443 int cpu;
444
445 if (align > PAGE_SIZE) {
446 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
447 name, align, PAGE_SIZE);
448 align = PAGE_SIZE;
449 }
450
451 ptr = __per_cpu_start;
452 for (i = 0; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
453 /* Extra for alignment requirement. */
454 extra = ALIGN((unsigned long)ptr, align) - (unsigned long)ptr;
455 BUG_ON(i == 0 && extra != 0);
456
457 if (pcpu_size[i] < 0 || pcpu_size[i] < extra + size)
458 continue;
459
460 /* Transfer extra to previous block. */
461 if (pcpu_size[i-1] < 0)
462 pcpu_size[i-1] -= extra;
463 else
464 pcpu_size[i-1] += extra;
465 pcpu_size[i] -= extra;
466 ptr += extra;
467
468 /* Split block if warranted */
469 if (pcpu_size[i] - size > sizeof(unsigned long))
470 if (!split_block(i, size))
471 return NULL;
472
473 /* add the per-cpu scanning areas */
474 for_each_possible_cpu(cpu)
475 kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
476 GFP_KERNEL);
477
478 /* Mark allocated */
479 pcpu_size[i] = -pcpu_size[i];
480 return ptr;
481 }
482
483 printk(KERN_WARNING "Could not allocate %lu bytes percpu data\n",
484 size);
485 return NULL;
486}
487
488static void percpu_modfree(void *freeme)
489{
490 unsigned int i;
491 void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
492 int cpu;
493
494 /* First entry is core kernel percpu data. */
495 for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
496 if (ptr == freeme) {
497 pcpu_size[i] = -pcpu_size[i];
498 goto free;
499 }
500 }
501 BUG();
502
503 free:
504 /* remove the per-cpu scanning areas */
505 for_each_possible_cpu(cpu)
506 kmemleak_free(freeme + per_cpu_offset(cpu));
507
508 /* Merge with previous? */
509 if (pcpu_size[i-1] >= 0) {
510 pcpu_size[i-1] += pcpu_size[i];
511 pcpu_num_used--;
512 memmove(&pcpu_size[i], &pcpu_size[i+1],
513 (pcpu_num_used - i) * sizeof(pcpu_size[0]));
514 i--;
515 }
516 /* Merge with next? */
517 if (i+1 < pcpu_num_used && pcpu_size[i+1] >= 0) {
518 pcpu_size[i] += pcpu_size[i+1];
519 pcpu_num_used--;
520 memmove(&pcpu_size[i+1], &pcpu_size[i+2],
521 (pcpu_num_used - (i+1)) * sizeof(pcpu_size[0]));
522 }
523}
524
525static int percpu_modinit(void)
526{
527 pcpu_num_used = 2;
528 pcpu_num_allocated = 2;
529 pcpu_size = kmalloc(sizeof(pcpu_size[0]) * pcpu_num_allocated,
530 GFP_KERNEL);
531 /* Static in-kernel percpu data (used). */
532 pcpu_size[0] = -(__per_cpu_end-__per_cpu_start);
533 /* Free room. */
534 pcpu_size[1] = PERCPU_ENOUGH_ROOM + pcpu_size[0];
535 if (pcpu_size[1] < 0) {
536 printk(KERN_ERR "No per-cpu room for modules.\n");
537 pcpu_num_used = 1;
538 }
539
540 return 0;
541}
542__initcall(percpu_modinit);
543
544#endif /* CONFIG_HAVE_LEGACY_PER_CPU_AREA */
545
546static unsigned int find_pcpusec(Elf_Ehdr *hdr, 396static unsigned int find_pcpusec(Elf_Ehdr *hdr,
547 Elf_Shdr *sechdrs, 397 Elf_Shdr *sechdrs,
548 const char *secstrings) 398 const char *secstrings)
diff --git a/mm/Makefile b/mm/Makefile
index ebf849042ed3..82131d0f8d85 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -34,11 +34,7 @@ obj-$(CONFIG_FAILSLAB) += failslab.o
34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 34obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
35obj-$(CONFIG_FS_XIP) += filemap_xip.o 35obj-$(CONFIG_FS_XIP) += filemap_xip.o
36obj-$(CONFIG_MIGRATION) += migrate.o 36obj-$(CONFIG_MIGRATION) += migrate.o
37ifndef CONFIG_HAVE_LEGACY_PER_CPU_AREA
38obj-$(CONFIG_SMP) += percpu.o 37obj-$(CONFIG_SMP) += percpu.o
39else
40obj-$(CONFIG_SMP) += allocpercpu.o
41endif
42obj-$(CONFIG_QUICKLIST) += quicklist.o 38obj-$(CONFIG_QUICKLIST) += quicklist.o
43obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o 39obj-$(CONFIG_CGROUP_MEM_RES_CTLR) += memcontrol.o page_cgroup.o
44obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o 40obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
deleted file mode 100644
index df34ceae0c67..000000000000
--- a/mm/allocpercpu.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * linux/mm/allocpercpu.c
3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8#include <linux/bootmem.h>
9#include <asm/sections.h>
10
11#ifndef cache_line_size
12#define cache_line_size() L1_CACHE_BYTES
13#endif
14
15/**
16 * percpu_depopulate - depopulate per-cpu data for given cpu
17 * @__pdata: per-cpu data to depopulate
18 * @cpu: depopulate per-cpu data for this cpu
19 *
20 * Depopulating per-cpu data for a cpu going offline would be a typical
21 * use case. You need to register a cpu hotplug handler for that purpose.
22 */
23static void percpu_depopulate(void *__pdata, int cpu)
24{
25 struct percpu_data *pdata = __percpu_disguise(__pdata);
26
27 kfree(pdata->ptrs[cpu]);
28 pdata->ptrs[cpu] = NULL;
29}
30
31/**
32 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
33 * @__pdata: per-cpu data to depopulate
34 * @mask: depopulate per-cpu data for cpu's selected through mask bits
35 */
36static void __percpu_depopulate_mask(void *__pdata, const cpumask_t *mask)
37{
38 int cpu;
39 for_each_cpu_mask_nr(cpu, *mask)
40 percpu_depopulate(__pdata, cpu);
41}
42
43#define percpu_depopulate_mask(__pdata, mask) \
44 __percpu_depopulate_mask((__pdata), &(mask))
45
46/**
47 * percpu_populate - populate per-cpu data for given cpu
48 * @__pdata: per-cpu data to populate further
49 * @size: size of per-cpu object
50 * @gfp: may sleep or not etc.
51 * @cpu: populate per-data for this cpu
52 *
53 * Populating per-cpu data for a cpu coming online would be a typical
54 * use case. You need to register a cpu hotplug handler for that purpose.
55 * Per-cpu object is populated with zeroed buffer.
56 */
57static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
58{
59 struct percpu_data *pdata = __percpu_disguise(__pdata);
60 int node = cpu_to_node(cpu);
61
62 /*
63 * We should make sure each CPU gets private memory.
64 */
65 size = roundup(size, cache_line_size());
66
67 BUG_ON(pdata->ptrs[cpu]);
68 if (node_online(node))
69 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
70 else
71 pdata->ptrs[cpu] = kzalloc(size, gfp);
72 return pdata->ptrs[cpu];
73}
74
75/**
76 * percpu_populate_mask - populate per-cpu data for more cpu's
77 * @__pdata: per-cpu data to populate further
78 * @size: size of per-cpu object
79 * @gfp: may sleep or not etc.
80 * @mask: populate per-cpu data for cpu's selected through mask bits
81 *
82 * Per-cpu objects are populated with zeroed buffers.
83 */
84static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
85 cpumask_t *mask)
86{
87 cpumask_t populated;
88 int cpu;
89
90 cpus_clear(populated);
91 for_each_cpu_mask_nr(cpu, *mask)
92 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
93 __percpu_depopulate_mask(__pdata, &populated);
94 return -ENOMEM;
95 } else
96 cpu_set(cpu, populated);
97 return 0;
98}
99
100#define percpu_populate_mask(__pdata, size, gfp, mask) \
101 __percpu_populate_mask((__pdata), (size), (gfp), &(mask))
102
103/**
104 * alloc_percpu - initial setup of per-cpu data
105 * @size: size of per-cpu object
106 * @align: alignment
107 *
108 * Allocate dynamic percpu area. Percpu objects are populated with
109 * zeroed buffers.
110 */
111void *__alloc_percpu(size_t size, size_t align)
112{
113 /*
114 * We allocate whole cache lines to avoid false sharing
115 */
116 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
117 void *pdata = kzalloc(sz, GFP_KERNEL);
118 void *__pdata = __percpu_disguise(pdata);
119
120 /*
121 * Can't easily make larger alignment work with kmalloc. WARN
122 * on it. Larger alignment should only be used for module
123 * percpu sections on SMP for which this path isn't used.
124 */
125 WARN_ON_ONCE(align > SMP_CACHE_BYTES);
126
127 if (unlikely(!pdata))
128 return NULL;
129 if (likely(!__percpu_populate_mask(__pdata, size, GFP_KERNEL,
130 &cpu_possible_map)))
131 return __pdata;
132 kfree(pdata);
133 return NULL;
134}
135EXPORT_SYMBOL_GPL(__alloc_percpu);
136
137/**
138 * free_percpu - final cleanup of per-cpu data
139 * @__pdata: object to clean up
140 *
141 * We simply clean up any per-cpu object left. No need for the client to
142 * track and specify through a bis mask which per-cpu objects are to free.
143 */
144void free_percpu(void *__pdata)
145{
146 if (unlikely(!__pdata))
147 return;
148 __percpu_depopulate_mask(__pdata, cpu_possible_mask);
149 kfree(__percpu_disguise(__pdata));
150}
151EXPORT_SYMBOL_GPL(free_percpu);
152
153/*
154 * Generic percpu area setup.
155 */
156#ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
157unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
158
159EXPORT_SYMBOL(__per_cpu_offset);
160
161void __init setup_per_cpu_areas(void)
162{
163 unsigned long size, i;
164 char *ptr;
165 unsigned long nr_possible_cpus = num_possible_cpus();
166
167 /* Copy section for each CPU (we discard the original) */
168 size = ALIGN(PERCPU_ENOUGH_ROOM, PAGE_SIZE);
169 ptr = alloc_bootmem_pages(size * nr_possible_cpus);
170
171 for_each_possible_cpu(i) {
172 __per_cpu_offset[i] = ptr - __per_cpu_start;
173 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
174 ptr += size;
175 }
176}
177#endif /* CONFIG_HAVE_SETUP_PER_CPU_AREA */
diff --git a/mm/percpu.c b/mm/percpu.c
index 4a048abad043..e4e08b87b77e 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -46,8 +46,6 @@
46 * 46 *
47 * To use this allocator, arch code should do the followings. 47 * To use this allocator, arch code should do the followings.
48 * 48 *
49 * - drop CONFIG_HAVE_LEGACY_PER_CPU_AREA
50 *
51 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate 49 * - define __addr_to_pcpu_ptr() and __pcpu_ptr_to_addr() to translate
52 * regular address to percpu pointer and back if they need to be 50 * regular address to percpu pointer and back if they need to be
53 * different from the default 51 * different from the default