diff options
author | Omar Sandoval <osandov@fb.com> | 2016-09-17 10:38:44 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-17 10:38:44 -0400 |
commit | 88459642cba452630326b9cab1c651e09577d4e4 (patch) | |
tree | cd7ecd917b294a92ff827b0e7dab526f0069547f /lib/sbitmap.c | |
parent | 703fd1c0f177219e3a84e6c095c31dc566514d81 (diff) |
blk-mq: abstract tag allocation out into sbitmap library
This is a generally useful data structure, so make it available to
anyone else who might want to use it. It's also a nice cleanup
separating the allocation logic from the rest of the tag handling logic.
The code is behind a new Kconfig option, CONFIG_SBITMAP, which is only
selected by CONFIG_BLOCK for now.
This should be a complete noop functionality-wise.
Signed-off-by: Omar Sandoval <osandov@fb.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r-- | lib/sbitmap.c | 301 |
1 files changed, 301 insertions, 0 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c new file mode 100644 index 000000000000..dfc084ac6937 --- /dev/null +++ b/lib/sbitmap.c | |||
@@ -0,0 +1,301 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Facebook | ||
3 | * Copyright (C) 2013-2014 Jens Axboe | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public | ||
7 | * License v2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public License | ||
15 | * along with this program. If not, see <https://www.gnu.org/licenses/>. | ||
16 | */ | ||
17 | |||
18 | #include <linux/sbitmap.h> | ||
19 | |||
20 | int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, | ||
21 | gfp_t flags, int node) | ||
22 | { | ||
23 | unsigned int bits_per_word; | ||
24 | unsigned int i; | ||
25 | |||
26 | if (shift < 0) { | ||
27 | shift = ilog2(BITS_PER_LONG); | ||
28 | /* | ||
29 | * If the bitmap is small, shrink the number of bits per word so | ||
30 | * we spread over a few cachelines, at least. If less than 4 | ||
31 | * bits, just forget about it, it's not going to work optimally | ||
32 | * anyway. | ||
33 | */ | ||
34 | if (depth >= 4) { | ||
35 | while ((4U << shift) > depth) | ||
36 | shift--; | ||
37 | } | ||
38 | } | ||
39 | bits_per_word = 1U << shift; | ||
40 | if (bits_per_word > BITS_PER_LONG) | ||
41 | return -EINVAL; | ||
42 | |||
43 | sb->shift = shift; | ||
44 | sb->depth = depth; | ||
45 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | ||
46 | |||
47 | if (depth == 0) { | ||
48 | sb->map = NULL; | ||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | sb->map = kzalloc_node(sb->map_nr * sizeof(*sb->map), flags, node); | ||
53 | if (!sb->map) | ||
54 | return -ENOMEM; | ||
55 | |||
56 | for (i = 0; i < sb->map_nr; i++) { | ||
57 | sb->map[i].depth = min(depth, bits_per_word); | ||
58 | depth -= sb->map[i].depth; | ||
59 | } | ||
60 | return 0; | ||
61 | } | ||
62 | EXPORT_SYMBOL_GPL(sbitmap_init_node); | ||
63 | |||
64 | void sbitmap_resize(struct sbitmap *sb, unsigned int depth) | ||
65 | { | ||
66 | unsigned int bits_per_word = 1U << sb->shift; | ||
67 | unsigned int i; | ||
68 | |||
69 | sb->depth = depth; | ||
70 | sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); | ||
71 | |||
72 | for (i = 0; i < sb->map_nr; i++) { | ||
73 | sb->map[i].depth = min(depth, bits_per_word); | ||
74 | depth -= sb->map[i].depth; | ||
75 | } | ||
76 | } | ||
77 | EXPORT_SYMBOL_GPL(sbitmap_resize); | ||
78 | |||
79 | static int __sbitmap_get_word(struct sbitmap_word *word, unsigned int hint, | ||
80 | bool wrap) | ||
81 | { | ||
82 | unsigned int orig_hint = hint; | ||
83 | int nr; | ||
84 | |||
85 | while (1) { | ||
86 | nr = find_next_zero_bit(&word->word, word->depth, hint); | ||
87 | if (unlikely(nr >= word->depth)) { | ||
88 | /* | ||
89 | * We started with an offset, and we didn't reset the | ||
90 | * offset to 0 in a failure case, so start from 0 to | ||
91 | * exhaust the map. | ||
92 | */ | ||
93 | if (orig_hint && hint && wrap) { | ||
94 | hint = orig_hint = 0; | ||
95 | continue; | ||
96 | } | ||
97 | return -1; | ||
98 | } | ||
99 | |||
100 | if (!test_and_set_bit(nr, &word->word)) | ||
101 | break; | ||
102 | |||
103 | hint = nr + 1; | ||
104 | if (hint >= word->depth - 1) | ||
105 | hint = 0; | ||
106 | } | ||
107 | |||
108 | return nr; | ||
109 | } | ||
110 | |||
111 | int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin) | ||
112 | { | ||
113 | unsigned int i, index; | ||
114 | int nr = -1; | ||
115 | |||
116 | index = SB_NR_TO_INDEX(sb, alloc_hint); | ||
117 | |||
118 | for (i = 0; i < sb->map_nr; i++) { | ||
119 | nr = __sbitmap_get_word(&sb->map[index], | ||
120 | SB_NR_TO_BIT(sb, alloc_hint), | ||
121 | !round_robin); | ||
122 | if (nr != -1) { | ||
123 | nr += index << sb->shift; | ||
124 | break; | ||
125 | } | ||
126 | |||
127 | /* Jump to next index. */ | ||
128 | index++; | ||
129 | alloc_hint = index << sb->shift; | ||
130 | |||
131 | if (index >= sb->map_nr) { | ||
132 | index = 0; | ||
133 | alloc_hint = 0; | ||
134 | } | ||
135 | } | ||
136 | |||
137 | return nr; | ||
138 | } | ||
139 | EXPORT_SYMBOL_GPL(sbitmap_get); | ||
140 | |||
141 | bool sbitmap_any_bit_set(const struct sbitmap *sb) | ||
142 | { | ||
143 | unsigned int i; | ||
144 | |||
145 | for (i = 0; i < sb->map_nr; i++) { | ||
146 | if (sb->map[i].word) | ||
147 | return true; | ||
148 | } | ||
149 | return false; | ||
150 | } | ||
151 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_set); | ||
152 | |||
153 | bool sbitmap_any_bit_clear(const struct sbitmap *sb) | ||
154 | { | ||
155 | unsigned int i; | ||
156 | |||
157 | for (i = 0; i < sb->map_nr; i++) { | ||
158 | const struct sbitmap_word *word = &sb->map[i]; | ||
159 | unsigned long ret; | ||
160 | |||
161 | ret = find_first_zero_bit(&word->word, word->depth); | ||
162 | if (ret < word->depth) | ||
163 | return true; | ||
164 | } | ||
165 | return false; | ||
166 | } | ||
167 | EXPORT_SYMBOL_GPL(sbitmap_any_bit_clear); | ||
168 | |||
169 | unsigned int sbitmap_weight(const struct sbitmap *sb) | ||
170 | { | ||
171 | unsigned int i, weight; | ||
172 | |||
173 | for (i = 0; i < sb->map_nr; i++) { | ||
174 | const struct sbitmap_word *word = &sb->map[i]; | ||
175 | |||
176 | weight += bitmap_weight(&word->word, word->depth); | ||
177 | } | ||
178 | return weight; | ||
179 | } | ||
180 | EXPORT_SYMBOL_GPL(sbitmap_weight); | ||
181 | |||
182 | static unsigned int sbq_calc_wake_batch(unsigned int depth) | ||
183 | { | ||
184 | unsigned int wake_batch; | ||
185 | |||
186 | /* | ||
187 | * For each batch, we wake up one queue. We need to make sure that our | ||
188 | * batch size is small enough that the full depth of the bitmap is | ||
189 | * enough to wake up all of the queues. | ||
190 | */ | ||
191 | wake_batch = SBQ_WAKE_BATCH; | ||
192 | if (wake_batch > depth / SBQ_WAIT_QUEUES) | ||
193 | wake_batch = max(1U, depth / SBQ_WAIT_QUEUES); | ||
194 | |||
195 | return wake_batch; | ||
196 | } | ||
197 | |||
198 | int sbitmap_queue_init_node(struct sbitmap_queue *sbq, unsigned int depth, | ||
199 | int shift, gfp_t flags, int node) | ||
200 | { | ||
201 | int ret; | ||
202 | int i; | ||
203 | |||
204 | ret = sbitmap_init_node(&sbq->sb, depth, shift, flags, node); | ||
205 | if (ret) | ||
206 | return ret; | ||
207 | |||
208 | sbq->wake_batch = sbq_calc_wake_batch(depth); | ||
209 | atomic_set(&sbq->wake_index, 0); | ||
210 | |||
211 | sbq->ws = kzalloc(SBQ_WAIT_QUEUES * sizeof(*sbq->ws), flags); | ||
212 | if (!sbq->ws) { | ||
213 | sbitmap_free(&sbq->sb); | ||
214 | return -ENOMEM; | ||
215 | } | ||
216 | |||
217 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | ||
218 | init_waitqueue_head(&sbq->ws[i].wait); | ||
219 | atomic_set(&sbq->ws[i].wait_cnt, sbq->wake_batch); | ||
220 | } | ||
221 | return 0; | ||
222 | } | ||
223 | EXPORT_SYMBOL_GPL(sbitmap_queue_init_node); | ||
224 | |||
225 | void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth) | ||
226 | { | ||
227 | sbq->wake_batch = sbq_calc_wake_batch(depth); | ||
228 | sbitmap_resize(&sbq->sb, depth); | ||
229 | } | ||
230 | EXPORT_SYMBOL_GPL(sbitmap_queue_resize); | ||
231 | |||
232 | static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq) | ||
233 | { | ||
234 | int i, wake_index; | ||
235 | |||
236 | wake_index = atomic_read(&sbq->wake_index); | ||
237 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | ||
238 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | ||
239 | |||
240 | if (waitqueue_active(&ws->wait)) { | ||
241 | int o = atomic_read(&sbq->wake_index); | ||
242 | |||
243 | if (wake_index != o) | ||
244 | atomic_cmpxchg(&sbq->wake_index, o, wake_index); | ||
245 | return ws; | ||
246 | } | ||
247 | |||
248 | wake_index = sbq_index_inc(wake_index); | ||
249 | } | ||
250 | |||
251 | return NULL; | ||
252 | } | ||
253 | |||
254 | static void sbq_wake_up(struct sbitmap_queue *sbq) | ||
255 | { | ||
256 | struct sbq_wait_state *ws; | ||
257 | int wait_cnt; | ||
258 | |||
259 | /* Ensure that the wait list checks occur after clear_bit(). */ | ||
260 | smp_mb(); | ||
261 | |||
262 | ws = sbq_wake_ptr(sbq); | ||
263 | if (!ws) | ||
264 | return; | ||
265 | |||
266 | wait_cnt = atomic_dec_return(&ws->wait_cnt); | ||
267 | if (unlikely(wait_cnt < 0)) | ||
268 | wait_cnt = atomic_inc_return(&ws->wait_cnt); | ||
269 | if (wait_cnt == 0) { | ||
270 | atomic_add(sbq->wake_batch, &ws->wait_cnt); | ||
271 | sbq_index_atomic_inc(&sbq->wake_index); | ||
272 | wake_up(&ws->wait); | ||
273 | } | ||
274 | } | ||
275 | |||
276 | void sbitmap_queue_clear(struct sbitmap_queue *sbq, unsigned int nr) | ||
277 | { | ||
278 | sbitmap_clear_bit(&sbq->sb, nr); | ||
279 | sbq_wake_up(sbq); | ||
280 | } | ||
281 | EXPORT_SYMBOL_GPL(sbitmap_queue_clear); | ||
282 | |||
283 | void sbitmap_queue_wake_all(struct sbitmap_queue *sbq) | ||
284 | { | ||
285 | int i, wake_index; | ||
286 | |||
287 | /* | ||
288 | * Make sure all changes prior to this are visible from other CPUs. | ||
289 | */ | ||
290 | smp_mb(); | ||
291 | wake_index = atomic_read(&sbq->wake_index); | ||
292 | for (i = 0; i < SBQ_WAIT_QUEUES; i++) { | ||
293 | struct sbq_wait_state *ws = &sbq->ws[wake_index]; | ||
294 | |||
295 | if (waitqueue_active(&ws->wait)) | ||
296 | wake_up(&ws->wait); | ||
297 | |||
298 | wake_index = sbq_index_inc(wake_index); | ||
299 | } | ||
300 | } | ||
301 | EXPORT_SYMBOL_GPL(sbitmap_queue_wake_all); | ||