aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Lameter <clameter@sgi.com>2006-09-26 02:31:50 -0400
committerLinus Torvalds <torvalds@g5.osdl.org>2006-09-26 11:48:51 -0400
commitd00bcc98d7ec2c87391c9d9e1cca519ef64d33ef (patch)
tree08b7d0fafba03d7b1d4d1d861897f78658aba173
parent39bbcb8f88154c4ac9853baf3f1134af4c987517 (diff)
[PATCH] Extract the allocpercpu functions from the slab allocator
The allocpercpu functions __alloc_percpu and __free_percpu() are heavily using the slab allocator. However, they are conceptually slab. This also simplifies SLOB (at this point slob may be broken in mm. This should fix it). Signed-off-by: Christoph Lameter <clameter@sgi.com> Cc: Matt Mackall <mpm@selenic.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r--mm/Makefile2
-rw-r--r--mm/allocpercpu.c129
-rw-r--r--mm/slab.c124
-rw-r--r--mm/slob.c45
4 files changed, 130 insertions, 170 deletions
diff --git a/mm/Makefile b/mm/Makefile
index 9dd824c11eeb..60c56c0b5e10 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -23,4 +23,4 @@ obj-$(CONFIG_SLAB) += slab.o
23obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o 23obj-$(CONFIG_MEMORY_HOTPLUG) += memory_hotplug.o
24obj-$(CONFIG_FS_XIP) += filemap_xip.o 24obj-$(CONFIG_FS_XIP) += filemap_xip.o
25obj-$(CONFIG_MIGRATION) += migrate.o 25obj-$(CONFIG_MIGRATION) += migrate.o
26 26obj-$(CONFIG_SMP) += allocpercpu.o
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
new file mode 100644
index 000000000000..eaa9abeea536
--- /dev/null
+++ b/mm/allocpercpu.c
@@ -0,0 +1,129 @@
1/*
2 * linux/mm/allocpercpu.c
3 *
4 * Separated from slab.c August 11, 2006 Christoph Lameter <clameter@sgi.com>
5 */
6#include <linux/mm.h>
7#include <linux/module.h>
8
9/**
10 * percpu_depopulate - depopulate per-cpu data for given cpu
11 * @__pdata: per-cpu data to depopulate
12 * @cpu: depopulate per-cpu data for this cpu
13 *
14 * Depopulating per-cpu data for a cpu going offline would be a typical
15 * use case. You need to register a cpu hotplug handler for that purpose.
16 */
17void percpu_depopulate(void *__pdata, int cpu)
18{
19 struct percpu_data *pdata = __percpu_disguise(__pdata);
20 if (pdata->ptrs[cpu]) {
21 kfree(pdata->ptrs[cpu]);
22 pdata->ptrs[cpu] = NULL;
23 }
24}
25EXPORT_SYMBOL_GPL(percpu_depopulate);
26
27/**
28 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
29 * @__pdata: per-cpu data to depopulate
30 * @mask: depopulate per-cpu data for cpu's selected through mask bits
31 */
32void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
33{
34 int cpu;
35 for_each_cpu_mask(cpu, *mask)
36 percpu_depopulate(__pdata, cpu);
37}
38EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
39
40/**
41 * percpu_populate - populate per-cpu data for given cpu
42 * @__pdata: per-cpu data to populate further
43 * @size: size of per-cpu object
44 * @gfp: may sleep or not etc.
45 * @cpu: populate per-data for this cpu
46 *
47 * Populating per-cpu data for a cpu coming online would be a typical
48 * use case. You need to register a cpu hotplug handler for that purpose.
49 * Per-cpu object is populated with zeroed buffer.
50 */
51void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
52{
53 struct percpu_data *pdata = __percpu_disguise(__pdata);
54 int node = cpu_to_node(cpu);
55
56 BUG_ON(pdata->ptrs[cpu]);
57 if (node_online(node)) {
58 /* FIXME: kzalloc_node(size, gfp, node) */
59 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
60 if (pdata->ptrs[cpu])
61 memset(pdata->ptrs[cpu], 0, size);
62 } else
63 pdata->ptrs[cpu] = kzalloc(size, gfp);
64 return pdata->ptrs[cpu];
65}
66EXPORT_SYMBOL_GPL(percpu_populate);
67
68/**
69 * percpu_populate_mask - populate per-cpu data for more cpu's
70 * @__pdata: per-cpu data to populate further
71 * @size: size of per-cpu object
72 * @gfp: may sleep or not etc.
73 * @mask: populate per-cpu data for cpu's selected through mask bits
74 *
75 * Per-cpu objects are populated with zeroed buffers.
76 */
77int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
78 cpumask_t *mask)
79{
80 cpumask_t populated = CPU_MASK_NONE;
81 int cpu;
82
83 for_each_cpu_mask(cpu, *mask)
84 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
85 __percpu_depopulate_mask(__pdata, &populated);
86 return -ENOMEM;
87 } else
88 cpu_set(cpu, populated);
89 return 0;
90}
91EXPORT_SYMBOL_GPL(__percpu_populate_mask);
92
93/**
94 * percpu_alloc_mask - initial setup of per-cpu data
95 * @size: size of per-cpu object
96 * @gfp: may sleep or not etc.
97 * @mask: populate per-data for cpu's selected through mask bits
98 *
99 * Populating per-cpu data for all online cpu's would be a typical use case,
100 * which is simplified by the percpu_alloc() wrapper.
101 * Per-cpu objects are populated with zeroed buffers.
102 */
103void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
104{
105 void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
106 void *__pdata = __percpu_disguise(pdata);
107
108 if (unlikely(!pdata))
109 return NULL;
110 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
111 return __pdata;
112 kfree(pdata);
113 return NULL;
114}
115EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
116
117/**
118 * percpu_free - final cleanup of per-cpu data
119 * @__pdata: object to clean up
120 *
121 * We simply clean up any per-cpu object left. No need for the client to
122 * track and specify through a bis mask which per-cpu objects are to free.
123 */
124void percpu_free(void *__pdata)
125{
126 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
127 kfree(__percpu_disguise(__pdata));
128}
129EXPORT_SYMBOL_GPL(percpu_free);
diff --git a/mm/slab.c b/mm/slab.c
index 619337a5cb2b..13b5050f84cc 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3440,130 +3440,6 @@ void *__kmalloc_track_caller(size_t size, gfp_t flags, void *caller)
3440EXPORT_SYMBOL(__kmalloc_track_caller); 3440EXPORT_SYMBOL(__kmalloc_track_caller);
3441#endif 3441#endif
3442 3442
3443#ifdef CONFIG_SMP
3444/**
3445 * percpu_depopulate - depopulate per-cpu data for given cpu
3446 * @__pdata: per-cpu data to depopulate
3447 * @cpu: depopulate per-cpu data for this cpu
3448 *
3449 * Depopulating per-cpu data for a cpu going offline would be a typical
3450 * use case. You need to register a cpu hotplug handler for that purpose.
3451 */
3452void percpu_depopulate(void *__pdata, int cpu)
3453{
3454 struct percpu_data *pdata = __percpu_disguise(__pdata);
3455 if (pdata->ptrs[cpu]) {
3456 kfree(pdata->ptrs[cpu]);
3457 pdata->ptrs[cpu] = NULL;
3458 }
3459}
3460EXPORT_SYMBOL_GPL(percpu_depopulate);
3461
3462/**
3463 * percpu_depopulate_mask - depopulate per-cpu data for some cpu's
3464 * @__pdata: per-cpu data to depopulate
3465 * @mask: depopulate per-cpu data for cpu's selected through mask bits
3466 */
3467void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask)
3468{
3469 int cpu;
3470 for_each_cpu_mask(cpu, *mask)
3471 percpu_depopulate(__pdata, cpu);
3472}
3473EXPORT_SYMBOL_GPL(__percpu_depopulate_mask);
3474
3475/**
3476 * percpu_populate - populate per-cpu data for given cpu
3477 * @__pdata: per-cpu data to populate further
3478 * @size: size of per-cpu object
3479 * @gfp: may sleep or not etc.
3480 * @cpu: populate per-data for this cpu
3481 *
3482 * Populating per-cpu data for a cpu coming online would be a typical
3483 * use case. You need to register a cpu hotplug handler for that purpose.
3484 * Per-cpu object is populated with zeroed buffer.
3485 */
3486void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
3487{
3488 struct percpu_data *pdata = __percpu_disguise(__pdata);
3489 int node = cpu_to_node(cpu);
3490
3491 BUG_ON(pdata->ptrs[cpu]);
3492 if (node_online(node)) {
3493 /* FIXME: kzalloc_node(size, gfp, node) */
3494 pdata->ptrs[cpu] = kmalloc_node(size, gfp, node);
3495 if (pdata->ptrs[cpu])
3496 memset(pdata->ptrs[cpu], 0, size);
3497 } else
3498 pdata->ptrs[cpu] = kzalloc(size, gfp);
3499 return pdata->ptrs[cpu];
3500}
3501EXPORT_SYMBOL_GPL(percpu_populate);
3502
3503/**
3504 * percpu_populate_mask - populate per-cpu data for more cpu's
3505 * @__pdata: per-cpu data to populate further
3506 * @size: size of per-cpu object
3507 * @gfp: may sleep or not etc.
3508 * @mask: populate per-cpu data for cpu's selected through mask bits
3509 *
3510 * Per-cpu objects are populated with zeroed buffers.
3511 */
3512int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp,
3513 cpumask_t *mask)
3514{
3515 cpumask_t populated = CPU_MASK_NONE;
3516 int cpu;
3517
3518 for_each_cpu_mask(cpu, *mask)
3519 if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) {
3520 __percpu_depopulate_mask(__pdata, &populated);
3521 return -ENOMEM;
3522 } else
3523 cpu_set(cpu, populated);
3524 return 0;
3525}
3526EXPORT_SYMBOL_GPL(__percpu_populate_mask);
3527
3528/**
3529 * percpu_alloc_mask - initial setup of per-cpu data
3530 * @size: size of per-cpu object
3531 * @gfp: may sleep or not etc.
3532 * @mask: populate per-data for cpu's selected through mask bits
3533 *
3534 * Populating per-cpu data for all online cpu's would be a typical use case,
3535 * which is simplified by the percpu_alloc() wrapper.
3536 * Per-cpu objects are populated with zeroed buffers.
3537 */
3538void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
3539{
3540 void *pdata = kzalloc(sizeof(struct percpu_data), gfp);
3541 void *__pdata = __percpu_disguise(pdata);
3542
3543 if (unlikely(!pdata))
3544 return NULL;
3545 if (likely(!__percpu_populate_mask(__pdata, size, gfp, mask)))
3546 return __pdata;
3547 kfree(pdata);
3548 return NULL;
3549}
3550EXPORT_SYMBOL_GPL(__percpu_alloc_mask);
3551
3552/**
3553 * percpu_free - final cleanup of per-cpu data
3554 * @__pdata: object to clean up
3555 *
3556 * We simply clean up any per-cpu object left. No need for the client to
3557 * track and specify through a bis mask which per-cpu objects are to free.
3558 */
3559void percpu_free(void *__pdata)
3560{
3561 __percpu_depopulate_mask(__pdata, &cpu_possible_map);
3562 kfree(__percpu_disguise(__pdata));
3563}
3564EXPORT_SYMBOL_GPL(percpu_free);
3565#endif /* CONFIG_SMP */
3566
3567/** 3443/**
3568 * kmem_cache_free - Deallocate an object 3444 * kmem_cache_free - Deallocate an object
3569 * @cachep: The cache the allocation was from. 3445 * @cachep: The cache the allocation was from.
diff --git a/mm/slob.c b/mm/slob.c
index 7b52b20b9607..4c28a421b270 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -343,48 +343,3 @@ void kmem_cache_init(void)
343atomic_t slab_reclaim_pages = ATOMIC_INIT(0); 343atomic_t slab_reclaim_pages = ATOMIC_INIT(0);
344EXPORT_SYMBOL(slab_reclaim_pages); 344EXPORT_SYMBOL(slab_reclaim_pages);
345 345
346#ifdef CONFIG_SMP
347
348void *__alloc_percpu(size_t size)
349{
350 int i;
351 struct percpu_data *pdata = kmalloc(sizeof (*pdata), GFP_KERNEL);
352
353 if (!pdata)
354 return NULL;
355
356 for_each_possible_cpu(i) {
357 pdata->ptrs[i] = kmalloc(size, GFP_KERNEL);
358 if (!pdata->ptrs[i])
359 goto unwind_oom;
360 memset(pdata->ptrs[i], 0, size);
361 }
362
363 /* Catch derefs w/o wrappers */
364 return (void *) (~(unsigned long) pdata);
365
366unwind_oom:
367 while (--i >= 0) {
368 if (!cpu_possible(i))
369 continue;
370 kfree(pdata->ptrs[i]);
371 }
372 kfree(pdata);
373 return NULL;
374}
375EXPORT_SYMBOL(__alloc_percpu);
376
377void
378free_percpu(const void *objp)
379{
380 int i;
381 struct percpu_data *p = (struct percpu_data *) (~(unsigned long) objp);
382
383 for_each_possible_cpu(i)
384 kfree(p->ptrs[i]);
385
386 kfree(p);
387}
388EXPORT_SYMBOL(free_percpu);
389
390#endif