diff options
Diffstat (limited to 'mm/allocpercpu.c')
| -rw-r--r-- | mm/allocpercpu.c | 24 |
1 files changed, 13 insertions, 11 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c index 05f2b4009ccc..4297bc41bfd2 100644 --- a/mm/allocpercpu.c +++ b/mm/allocpercpu.c | |||
| @@ -18,27 +18,28 @@ | |||
| 18 | * Depopulating per-cpu data for a cpu going offline would be a typical | 18 | * Depopulating per-cpu data for a cpu going offline would be a typical |
| 19 | * use case. You need to register a cpu hotplug handler for that purpose. | 19 | * use case. You need to register a cpu hotplug handler for that purpose. |
| 20 | */ | 20 | */ |
| 21 | void percpu_depopulate(void *__pdata, int cpu) | 21 | static void percpu_depopulate(void *__pdata, int cpu) |
| 22 | { | 22 | { |
| 23 | struct percpu_data *pdata = __percpu_disguise(__pdata); | 23 | struct percpu_data *pdata = __percpu_disguise(__pdata); |
| 24 | 24 | ||
| 25 | kfree(pdata->ptrs[cpu]); | 25 | kfree(pdata->ptrs[cpu]); |
| 26 | pdata->ptrs[cpu] = NULL; | 26 | pdata->ptrs[cpu] = NULL; |
| 27 | } | 27 | } |
| 28 | EXPORT_SYMBOL_GPL(percpu_depopulate); | ||
| 29 | 28 | ||
| 30 | /** | 29 | /** |
| 31 | * percpu_depopulate_mask - depopulate per-cpu data for some cpu's | 30 | * percpu_depopulate_mask - depopulate per-cpu data for some cpu's |
| 32 | * @__pdata: per-cpu data to depopulate | 31 | * @__pdata: per-cpu data to depopulate |
| 33 | * @mask: depopulate per-cpu data for cpu's selected through mask bits | 32 | * @mask: depopulate per-cpu data for cpu's selected through mask bits |
| 34 | */ | 33 | */ |
| 35 | void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) | 34 | static void __percpu_depopulate_mask(void *__pdata, cpumask_t *mask) |
| 36 | { | 35 | { |
| 37 | int cpu; | 36 | int cpu; |
| 38 | for_each_cpu_mask(cpu, *mask) | 37 | for_each_cpu_mask_nr(cpu, *mask) |
| 39 | percpu_depopulate(__pdata, cpu); | 38 | percpu_depopulate(__pdata, cpu); |
| 40 | } | 39 | } |
| 41 | EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); | 40 | |
| 41 | #define percpu_depopulate_mask(__pdata, mask) \ | ||
| 42 | __percpu_depopulate_mask((__pdata), &(mask)) | ||
| 42 | 43 | ||
| 43 | /** | 44 | /** |
| 44 | * percpu_populate - populate per-cpu data for given cpu | 45 | * percpu_populate - populate per-cpu data for given cpu |
| @@ -51,7 +52,7 @@ EXPORT_SYMBOL_GPL(__percpu_depopulate_mask); | |||
| 51 | * use case. You need to register a cpu hotplug handler for that purpose. | 52 | * use case. You need to register a cpu hotplug handler for that purpose. |
| 52 | * Per-cpu object is populated with zeroed buffer. | 53 | * Per-cpu object is populated with zeroed buffer. |
| 53 | */ | 54 | */ |
| 54 | void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | 55 | static void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) |
| 55 | { | 56 | { |
| 56 | struct percpu_data *pdata = __percpu_disguise(__pdata); | 57 | struct percpu_data *pdata = __percpu_disguise(__pdata); |
| 57 | int node = cpu_to_node(cpu); | 58 | int node = cpu_to_node(cpu); |
| @@ -68,7 +69,6 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu) | |||
| 68 | pdata->ptrs[cpu] = kzalloc(size, gfp); | 69 | pdata->ptrs[cpu] = kzalloc(size, gfp); |
| 69 | return pdata->ptrs[cpu]; | 70 | return pdata->ptrs[cpu]; |
| 70 | } | 71 | } |
| 71 | EXPORT_SYMBOL_GPL(percpu_populate); | ||
| 72 | 72 | ||
| 73 | /** | 73 | /** |
| 74 | * percpu_populate_mask - populate per-cpu data for more cpu's | 74 | * percpu_populate_mask - populate per-cpu data for more cpu's |
| @@ -79,14 +79,14 @@ EXPORT_SYMBOL_GPL(percpu_populate); | |||
| 79 | * | 79 | * |
| 80 | * Per-cpu objects are populated with zeroed buffers. | 80 | * Per-cpu objects are populated with zeroed buffers. |
| 81 | */ | 81 | */ |
| 82 | int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | 82 | static int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, |
| 83 | cpumask_t *mask) | 83 | cpumask_t *mask) |
| 84 | { | 84 | { |
| 85 | cpumask_t populated; | 85 | cpumask_t populated; |
| 86 | int cpu; | 86 | int cpu; |
| 87 | 87 | ||
| 88 | cpus_clear(populated); | 88 | cpus_clear(populated); |
| 89 | for_each_cpu_mask(cpu, *mask) | 89 | for_each_cpu_mask_nr(cpu, *mask) |
| 90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { | 90 | if (unlikely(!percpu_populate(__pdata, size, gfp, cpu))) { |
| 91 | __percpu_depopulate_mask(__pdata, &populated); | 91 | __percpu_depopulate_mask(__pdata, &populated); |
| 92 | return -ENOMEM; | 92 | return -ENOMEM; |
| @@ -94,7 +94,9 @@ int __percpu_populate_mask(void *__pdata, size_t size, gfp_t gfp, | |||
| 94 | cpu_set(cpu, populated); | 94 | cpu_set(cpu, populated); |
| 95 | return 0; | 95 | return 0; |
| 96 | } | 96 | } |
| 97 | EXPORT_SYMBOL_GPL(__percpu_populate_mask); | 97 | |
| 98 | #define percpu_populate_mask(__pdata, size, gfp, mask) \ | ||
| 99 | __percpu_populate_mask((__pdata), (size), (gfp), &(mask)) | ||
| 98 | 100 | ||
| 99 | /** | 101 | /** |
| 100 | * percpu_alloc_mask - initial setup of per-cpu data | 102 | * percpu_alloc_mask - initial setup of per-cpu data |
