diff options
author | Tejun Heo <tj@kernel.org> | 2014-09-07 20:51:29 -0400 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2014-09-07 20:51:29 -0400 |
commit | 908c7f1949cb7cc6e92ba8f18f2998e87e265b8e (patch) | |
tree | af885c65c6fe794cab7b7ad37bd811531a2a2ac5 /lib | |
parent | ebd8fef304f99da84d4a52ad056f6137ac9652d4 (diff) |
percpu_counter: add @gfp to percpu_counter_init()
Percpu allocator now supports allocation mask. Add @gfp to
percpu_counter_init() so that !GFP_KERNEL allocation masks can be used
with percpu_counters too.
We could have left percpu_counter_init() alone and added
percpu_counter_init_gfp(); however, the number of users isn't that
high and introducing _gfp variants to all percpu data structures would
be quite ugly, so let's just do the conversion. This is the one with
the most users. Other percpu data structures are a lot easier to
convert.
This patch doesn't make any functional difference.
Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: Jan Kara <jack@suse.cz>
Acked-by: "David S. Miller" <davem@davemloft.net>
Cc: x86@kernel.org
Cc: Jens Axboe <axboe@kernel.dk>
Cc: "Theodore Ts'o" <tytso@mit.edu>
Cc: Alexander Viro <viro@zeniv.linux.org.uk>
Cc: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'lib')
-rw-r--r-- | lib/flex_proportions.c | 4 | ||||
-rw-r--r-- | lib/percpu_counter.c | 4 | ||||
-rw-r--r-- | lib/proportions.c | 6 |
3 files changed, 7 insertions, 7 deletions
diff --git a/lib/flex_proportions.c b/lib/flex_proportions.c index ebf3bac460b0..b9d026bfcf38 100644 --- a/lib/flex_proportions.c +++ b/lib/flex_proportions.c | |||
@@ -40,7 +40,7 @@ int fprop_global_init(struct fprop_global *p) | |||
40 | 40 | ||
41 | p->period = 0; | 41 | p->period = 0; |
42 | /* Use 1 to avoid dealing with periods with 0 events... */ | 42 | /* Use 1 to avoid dealing with periods with 0 events... */ |
43 | err = percpu_counter_init(&p->events, 1); | 43 | err = percpu_counter_init(&p->events, 1, GFP_KERNEL); |
44 | if (err) | 44 | if (err) |
45 | return err; | 45 | return err; |
46 | seqcount_init(&p->sequence); | 46 | seqcount_init(&p->sequence); |
@@ -172,7 +172,7 @@ int fprop_local_init_percpu(struct fprop_local_percpu *pl) | |||
172 | { | 172 | { |
173 | int err; | 173 | int err; |
174 | 174 | ||
175 | err = percpu_counter_init(&pl->events, 0); | 175 | err = percpu_counter_init(&pl->events, 0, GFP_KERNEL); |
176 | if (err) | 176 | if (err) |
177 | return err; | 177 | return err; |
178 | pl->period = 0; | 178 | pl->period = 0; |
diff --git a/lib/percpu_counter.c b/lib/percpu_counter.c index 3fde78275cd1..48144cdae819 100644 --- a/lib/percpu_counter.c +++ b/lib/percpu_counter.c | |||
@@ -112,7 +112,7 @@ s64 __percpu_counter_sum(struct percpu_counter *fbc) | |||
112 | } | 112 | } |
113 | EXPORT_SYMBOL(__percpu_counter_sum); | 113 | EXPORT_SYMBOL(__percpu_counter_sum); |
114 | 114 | ||
115 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | 115 | int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, gfp_t gfp, |
116 | struct lock_class_key *key) | 116 | struct lock_class_key *key) |
117 | { | 117 | { |
118 | unsigned long flags __maybe_unused; | 118 | unsigned long flags __maybe_unused; |
@@ -120,7 +120,7 @@ int __percpu_counter_init(struct percpu_counter *fbc, s64 amount, | |||
120 | raw_spin_lock_init(&fbc->lock); | 120 | raw_spin_lock_init(&fbc->lock); |
121 | lockdep_set_class(&fbc->lock, key); | 121 | lockdep_set_class(&fbc->lock, key); |
122 | fbc->count = amount; | 122 | fbc->count = amount; |
123 | fbc->counters = alloc_percpu(s32); | 123 | fbc->counters = alloc_percpu_gfp(s32, gfp); |
124 | if (!fbc->counters) | 124 | if (!fbc->counters) |
125 | return -ENOMEM; | 125 | return -ENOMEM; |
126 | 126 | ||
diff --git a/lib/proportions.c b/lib/proportions.c index 05df84801b56..ca95f8d54384 100644 --- a/lib/proportions.c +++ b/lib/proportions.c | |||
@@ -83,11 +83,11 @@ int prop_descriptor_init(struct prop_descriptor *pd, int shift) | |||
83 | pd->index = 0; | 83 | pd->index = 0; |
84 | pd->pg[0].shift = shift; | 84 | pd->pg[0].shift = shift; |
85 | mutex_init(&pd->mutex); | 85 | mutex_init(&pd->mutex); |
86 | err = percpu_counter_init(&pd->pg[0].events, 0); | 86 | err = percpu_counter_init(&pd->pg[0].events, 0, GFP_KERNEL); |
87 | if (err) | 87 | if (err) |
88 | goto out; | 88 | goto out; |
89 | 89 | ||
90 | err = percpu_counter_init(&pd->pg[1].events, 0); | 90 | err = percpu_counter_init(&pd->pg[1].events, 0, GFP_KERNEL); |
91 | if (err) | 91 | if (err) |
92 | percpu_counter_destroy(&pd->pg[0].events); | 92 | percpu_counter_destroy(&pd->pg[0].events); |
93 | 93 | ||
@@ -193,7 +193,7 @@ int prop_local_init_percpu(struct prop_local_percpu *pl) | |||
193 | raw_spin_lock_init(&pl->lock); | 193 | raw_spin_lock_init(&pl->lock); |
194 | pl->shift = 0; | 194 | pl->shift = 0; |
195 | pl->period = 0; | 195 | pl->period = 0; |
196 | return percpu_counter_init(&pl->events, 0); | 196 | return percpu_counter_init(&pl->events, 0, GFP_KERNEL); |
197 | } | 197 | } |
198 | 198 | ||
199 | void prop_local_destroy_percpu(struct prop_local_percpu *pl) | 199 | void prop_local_destroy_percpu(struct prop_local_percpu *pl) |