aboutsummaryrefslogtreecommitdiffstats
path: root/mm/allocpercpu.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/allocpercpu.c')
-rw-r--r--mm/allocpercpu.c15
1 files changed, 14 insertions, 1 deletions
diff --git a/mm/allocpercpu.c b/mm/allocpercpu.c
index 7e58322b7134..b0012e27fea8 100644
--- a/mm/allocpercpu.c
+++ b/mm/allocpercpu.c
@@ -6,6 +6,10 @@
6#include <linux/mm.h> 6#include <linux/mm.h>
7#include <linux/module.h> 7#include <linux/module.h>
8 8
9#ifndef cache_line_size
10#define cache_line_size() L1_CACHE_BYTES
11#endif
12
9/** 13/**
10 * percpu_depopulate - depopulate per-cpu data for given cpu 14 * percpu_depopulate - depopulate per-cpu data for given cpu
11 * @__pdata: per-cpu data to depopulate 15 * @__pdata: per-cpu data to depopulate
@@ -52,6 +56,11 @@ void *percpu_populate(void *__pdata, size_t size, gfp_t gfp, int cpu)
52 struct percpu_data *pdata = __percpu_disguise(__pdata); 56 struct percpu_data *pdata = __percpu_disguise(__pdata);
53 int node = cpu_to_node(cpu); 57 int node = cpu_to_node(cpu);
54 58
59 /*
60 * We should make sure each CPU gets private memory.
61 */
62 size = roundup(size, cache_line_size());
63
55 BUG_ON(pdata->ptrs[cpu]); 64 BUG_ON(pdata->ptrs[cpu]);
56 if (node_online(node)) 65 if (node_online(node))
57 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node); 66 pdata->ptrs[cpu] = kmalloc_node(size, gfp|__GFP_ZERO, node);
@@ -98,7 +107,11 @@ EXPORT_SYMBOL_GPL(__percpu_populate_mask);
98 */ 107 */
99void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask) 108void *__percpu_alloc_mask(size_t size, gfp_t gfp, cpumask_t *mask)
100{ 109{
101 void *pdata = kzalloc(nr_cpu_ids * sizeof(void *), gfp); 110 /*
111 * We allocate whole cache lines to avoid false sharing
112 */
113 size_t sz = roundup(nr_cpu_ids * sizeof(void *), cache_line_size());
114 void *pdata = kzalloc(sz, gfp);
102 void *__pdata = __percpu_disguise(pdata); 115 void *__pdata = __percpu_disguise(pdata);
103 116
104 if (unlikely(!pdata)) 117 if (unlikely(!pdata))