aboutsummaryrefslogtreecommitdiffstats
path: root/lib/sbitmap.c
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-12-11 20:39:41 -0500
committerJens Axboe <axboe@kernel.dk>2018-12-11 20:39:41 -0500
commitb2dbff1bb893d5dfdf501231ff5505ca10cdede3 (patch)
tree52f74b5b06c2ba8fbfb6f3cd12e479c869572afa /lib/sbitmap.c
parent2c4d5356e64d7d538f24c23045478330fae4a065 (diff)
sbitmap: flush deferred clears for resize and shallow gets
We're missing a deferred clear off the shallow get, which can cause a hang. Additionally, when we resize the sbitmap, we should also flush deferred clears for good measure. Ensure we have full coverage on batch clears, even for paths where we would not be doing deferred clear. This makes it less error prone for future additions. Reported-by: Bart Van Assche <bvanassche@acm.org> Tested-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'lib/sbitmap.c')
-rw-r--r--lib/sbitmap.c94
1 files changed, 51 insertions, 43 deletions
diff --git a/lib/sbitmap.c b/lib/sbitmap.c
index 2261136ae067..5b3e56d68dab 100644
--- a/lib/sbitmap.c
+++ b/lib/sbitmap.c
@@ -20,6 +20,47 @@
20#include <linux/sbitmap.h> 20#include <linux/sbitmap.h>
21#include <linux/seq_file.h> 21#include <linux/seq_file.h>
22 22
23/*
24 * See if we have deferred clears that we can batch move
25 */
26static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
27{
28 unsigned long mask, val;
29 unsigned long __maybe_unused flags;
30 bool ret = false;
31
32 /* Silence bogus lockdep warning */
33#if defined(CONFIG_LOCKDEP)
34 local_irq_save(flags);
35#endif
36 spin_lock(&sb->map[index].swap_lock);
37
38 if (!sb->map[index].cleared)
39 goto out_unlock;
40
41 /*
42 * First get a stable cleared mask, setting the old mask to 0.
43 */
44 do {
45 mask = sb->map[index].cleared;
46 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
47
48 /*
49 * Now clear the masked bits in our free word
50 */
51 do {
52 val = sb->map[index].word;
53 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
54
55 ret = true;
56out_unlock:
57 spin_unlock(&sb->map[index].swap_lock);
58#if defined(CONFIG_LOCKDEP)
59 local_irq_restore(flags);
60#endif
61 return ret;
62}
63
23int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift, 64int sbitmap_init_node(struct sbitmap *sb, unsigned int depth, int shift,
24 gfp_t flags, int node) 65 gfp_t flags, int node)
25{ 66{
@@ -70,6 +111,9 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth)
70 unsigned int bits_per_word = 1U << sb->shift; 111 unsigned int bits_per_word = 1U << sb->shift;
71 unsigned int i; 112 unsigned int i;
72 113
114 for (i = 0; i < sb->map_nr; i++)
115 sbitmap_deferred_clear(sb, i);
116
73 sb->depth = depth; 117 sb->depth = depth;
74 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word); 118 sb->map_nr = DIV_ROUND_UP(sb->depth, bits_per_word);
75 119
@@ -112,47 +156,6 @@ static int __sbitmap_get_word(unsigned long *word, unsigned long depth,
112 return nr; 156 return nr;
113} 157}
114 158
115/*
116 * See if we have deferred clears that we can batch move
117 */
118static inline bool sbitmap_deferred_clear(struct sbitmap *sb, int index)
119{
120 unsigned long mask, val;
121 unsigned long __maybe_unused flags;
122 bool ret = false;
123
124 /* Silence bogus lockdep warning */
125#if defined(CONFIG_LOCKDEP)
126 local_irq_save(flags);
127#endif
128 spin_lock(&sb->map[index].swap_lock);
129
130 if (!sb->map[index].cleared)
131 goto out_unlock;
132
133 /*
134 * First get a stable cleared mask, setting the old mask to 0.
135 */
136 do {
137 mask = sb->map[index].cleared;
138 } while (cmpxchg(&sb->map[index].cleared, mask, 0) != mask);
139
140 /*
141 * Now clear the masked bits in our free word
142 */
143 do {
144 val = sb->map[index].word;
145 } while (cmpxchg(&sb->map[index].word, val, val & ~mask) != val);
146
147 ret = true;
148out_unlock:
149 spin_unlock(&sb->map[index].swap_lock);
150#if defined(CONFIG_LOCKDEP)
151 local_irq_restore(flags);
152#endif
153 return ret;
154}
155
156static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index, 159static int sbitmap_find_bit_in_index(struct sbitmap *sb, int index,
157 unsigned int alloc_hint, bool round_robin) 160 unsigned int alloc_hint, bool round_robin)
158{ 161{
@@ -215,6 +218,7 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
215 index = SB_NR_TO_INDEX(sb, alloc_hint); 218 index = SB_NR_TO_INDEX(sb, alloc_hint);
216 219
217 for (i = 0; i < sb->map_nr; i++) { 220 for (i = 0; i < sb->map_nr; i++) {
221again:
218 nr = __sbitmap_get_word(&sb->map[index].word, 222 nr = __sbitmap_get_word(&sb->map[index].word,
219 min(sb->map[index].depth, shallow_depth), 223 min(sb->map[index].depth, shallow_depth),
220 SB_NR_TO_BIT(sb, alloc_hint), true); 224 SB_NR_TO_BIT(sb, alloc_hint), true);
@@ -223,6 +227,9 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
223 break; 227 break;
224 } 228 }
225 229
230 if (sbitmap_deferred_clear(sb, index))
231 goto again;
232
226 /* Jump to next index. */ 233 /* Jump to next index. */
227 index++; 234 index++;
228 alloc_hint = index << sb->shift; 235 alloc_hint = index << sb->shift;
@@ -242,7 +249,7 @@ bool sbitmap_any_bit_set(const struct sbitmap *sb)
242 unsigned int i; 249 unsigned int i;
243 250
244 for (i = 0; i < sb->map_nr; i++) { 251 for (i = 0; i < sb->map_nr; i++) {
245 if (sb->map[i].word) 252 if (sb->map[i].word & ~sb->map[i].cleared)
246 return true; 253 return true;
247 } 254 }
248 return false; 255 return false;
@@ -255,9 +262,10 @@ bool sbitmap_any_bit_clear(const struct sbitmap *sb)
255 262
256 for (i = 0; i < sb->map_nr; i++) { 263 for (i = 0; i < sb->map_nr; i++) {
257 const struct sbitmap_word *word = &sb->map[i]; 264 const struct sbitmap_word *word = &sb->map[i];
265 unsigned long mask = word->word & ~word->cleared;
258 unsigned long ret; 266 unsigned long ret;
259 267
260 ret = find_first_zero_bit(&word->word, word->depth); 268 ret = find_first_zero_bit(&mask, word->depth);
261 if (ret < word->depth) 269 if (ret < word->depth)
262 return true; 270 return true;
263 } 271 }