diff options
author | Omar Sandoval <osandov@fb.com> | 2016-09-17 04:28:23 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-17 10:39:10 -0400 |
commit | 40aabb67464d5aad9ca3d2a5fedee56e2ff45aa0 (patch) | |
tree | 19592825f8eb48363f5f9279c0d32003e7ed6532 /lib/sbitmap.c | |
parent | 48e28166a7b608e19a6aea3acadd81cdfe660f6b (diff) |
sbitmap: push per-cpu last_tag into sbitmap_queue
Allocating your own per-cpu allocation hint separately makes for an
awkward API. Instead, allocate the per-cpu hint as part of the struct
sbitmap_queue. There's no point for a struct sbitmap_queue without the
cache, but you can still use a bare struct sbitmap.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r-- | lib/sbitmap.c | 35 |
1 files changed, 34 insertions, 1 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c index 4d8e97e470ee..1651ad9d5530 100644 --- a/lib/sbitmap.c +++ b/lib/sbitmap.c | |||
@@ -205,11 +205,18 @@ int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | |||
205 | if (ret) | 205 | if (ret) |
206 | return ret; | 206 | return ret; |
207 | 207 | ||
208 | sbq->alloc_hint = alloc_percpu_gfp(unsigned int, flags); | ||
209 | if (!sbq->alloc_hint) { | ||
210 | sbitmap_free(&sbq->sb); | ||
211 | return -ENOMEM; | ||
212 | } | ||
213 | |||
208 | sbq->wake_batch = sbq_calc_wake_batch(depth); | 214 | sbq->wake_batch = sbq_calc_wake_batch(depth); |
209 | atomic_set(&sbq->wake_index, 0); | 215 | atomic_set(&sbq->wake_index, 0); |
210 | 216 | ||
211 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); | 217 | sbq->ws = kzalloc_node(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags, node); |
212 | if (!sbq->ws) { | 218 | if (!sbq->ws) { |
219 | free_percpu(sbq->alloc_hint); | ||
213 | sbitmap_free(&sbq->sb); | 220 | sbitmap_free(&sbq->sb); |
214 | return -ENOMEM; | 221 | return -ENOMEM; |
215 | } | 222 | } |
@@ -229,6 +236,29 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | |||
229 | } | 236 | } |
230 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | 237 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); |
231 | 238 | ||
239 | int __sbitmap_queue_get(struct sbitmap_queue *sbq, bool round_robin) | ||
240 | { | ||
241 | unsigned int hint; | ||
242 | int nr; | ||
243 | |||
244 | hint = this_cpu_read(*sbq->alloc_hint); | ||
245 | nr = sbitmap_get(&sbq->sb, hint, round_robin); | ||
246 | |||
247 | if (nr == -1) { | ||
248 | /* If the map is full, a hint won't do us much good. */ | ||
249 | this_cpu_write(*sbq->alloc_hint, 0); | ||
250 | } else if (nr == hint || unlikely(round_robin)) { | ||
251 | /* Only update the hint if we used it. */ | ||
252 | hint = nr + 1; | ||
253 | if (hint >= sbq->sb.depth - 1) | ||
254 | hint = 0; | ||
255 | this_cpu_write(*sbq->alloc_hint, hint); | ||
256 | } | ||
257 | |||
258 | return nr; | ||
259 | } | ||
260 | EXPORT_SYMBOL_GPL(__sbitmap_queue_get); | ||
261 | |||
232 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | 262 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) |
233 | { | 263 | { |
234 | int i, wake_index; | 264 | int i, wake_index; |
@@ -273,10 +303,13 @@ static void sbq_wake_up(struct sbitmap_queue *sbq) | |||
273 | } | 303 | } |
274 | } | 304 | } |
275 | 305 | ||
276 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr) | 306 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr, |
307 | bool round_robin, unsigned int cpu) | ||
277 | { | 308 | { |
278 | sbitmap_clear_bit(&sbq->sb, nr); | 309 | sbitmap_clear_bit(&sbq->sb, nr); |
279 | sbq_wake_up(sbq); | 310 | sbq_wake_up(sbq); |
311 | if (likely(!round_robin)) | ||
312 | *per_cpu_ptr(sbq->alloc_hint, cpu) = nr; | ||
280 | } | 313 | } |
281 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); | 314 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); |
282 | 315 | ||