diff options
Diffstat (limited to 'drivers/md/dm-thin.c')
-rw-r--r-- | drivers/md/dm-thin.c | 124 |
1 files changed, 73 insertions, 51 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index c3087575fef0..da2f0217df66 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -124,7 +124,7 @@ struct cell { | |||
124 | struct hlist_node list; | 124 | struct hlist_node list; |
125 | struct bio_prison *prison; | 125 | struct bio_prison *prison; |
126 | struct cell_key key; | 126 | struct cell_key key; |
127 | unsigned count; | 127 | struct bio *holder; |
128 | struct bio_list bios; | 128 | struct bio_list bios; |
129 | }; | 129 | }; |
130 | 130 | ||
@@ -220,55 +220,60 @@ static struct cell *__search_bucket(struct hlist_head *bucket, | |||
220 | * This may block if a new cell needs allocating. You must ensure that | 220 | * This may block if a new cell needs allocating. You must ensure that |
221 | * cells will be unlocked even if the calling thread is blocked. | 221 | * cells will be unlocked even if the calling thread is blocked. |
222 | * | 222 | * |
223 | * Returns the number of entries in the cell prior to the new addition | 223 | * Returns 1 if the cell was already held, 0 if @inmate is the new holder. |
224 | * or < 0 on failure. | ||
225 | */ | 224 | */ |
226 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, | 225 | static int bio_detain(struct bio_prison *prison, struct cell_key *key, |
227 | struct bio *inmate, struct cell **ref) | 226 | struct bio *inmate, struct cell **ref) |
228 | { | 227 | { |
229 | int r; | 228 | int r = 1; |
230 | unsigned long flags; | 229 | unsigned long flags; |
231 | uint32_t hash = hash_key(prison, key); | 230 | uint32_t hash = hash_key(prison, key); |
232 | struct cell *uninitialized_var(cell), *cell2 = NULL; | 231 | struct cell *cell, *cell2; |
233 | 232 | ||
234 | BUG_ON(hash > prison->nr_buckets); | 233 | BUG_ON(hash > prison->nr_buckets); |
235 | 234 | ||
236 | spin_lock_irqsave(&prison->lock, flags); | 235 | spin_lock_irqsave(&prison->lock, flags); |
236 | |||
237 | cell = __search_bucket(prison->cells + hash, key); | 237 | cell = __search_bucket(prison->cells + hash, key); |
238 | if (cell) { | ||
239 | bio_list_add(&cell->bios, inmate); | ||
240 | goto out; | ||
241 | } | ||
238 | 242 | ||
239 | if (!cell) { | 243 | /* |
240 | /* | 244 | * Allocate a new cell |
241 | * Allocate a new cell | 245 | */ |
242 | */ | 246 | spin_unlock_irqrestore(&prison->lock, flags); |
243 | spin_unlock_irqrestore(&prison->lock, flags); | 247 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); |
244 | cell2 = mempool_alloc(prison->cell_pool, GFP_NOIO); | 248 | spin_lock_irqsave(&prison->lock, flags); |
245 | spin_lock_irqsave(&prison->lock, flags); | ||
246 | 249 | ||
247 | /* | 250 | /* |
248 | * We've been unlocked, so we have to double check that | 251 | * We've been unlocked, so we have to double check that |
249 | * nobody else has inserted this cell in the meantime. | 252 | * nobody else has inserted this cell in the meantime. |
250 | */ | 253 | */ |
251 | cell = __search_bucket(prison->cells + hash, key); | 254 | cell = __search_bucket(prison->cells + hash, key); |
255 | if (cell) { | ||
256 | mempool_free(cell2, prison->cell_pool); | ||
257 | bio_list_add(&cell->bios, inmate); | ||
258 | goto out; | ||
259 | } | ||
260 | |||
261 | /* | ||
262 | * Use new cell. | ||
263 | */ | ||
264 | cell = cell2; | ||
252 | 265 | ||
253 | if (!cell) { | 266 | cell->prison = prison; |
254 | cell = cell2; | 267 | memcpy(&cell->key, key, sizeof(cell->key)); |
255 | cell2 = NULL; | 268 | cell->holder = inmate; |
269 | bio_list_init(&cell->bios); | ||
270 | hlist_add_head(&cell->list, prison->cells + hash); | ||
256 | 271 | ||
257 | cell->prison = prison; | 272 | r = 0; |
258 | memcpy(&cell->key, key, sizeof(cell->key)); | ||
259 | cell->count = 0; | ||
260 | bio_list_init(&cell->bios); | ||
261 | hlist_add_head(&cell->list, prison->cells + hash); | ||
262 | } | ||
263 | } | ||
264 | 273 | ||
265 | r = cell->count++; | 274 | out: |
266 | bio_list_add(&cell->bios, inmate); | ||
267 | spin_unlock_irqrestore(&prison->lock, flags); | 275 | spin_unlock_irqrestore(&prison->lock, flags); |
268 | 276 | ||
269 | if (cell2) | ||
270 | mempool_free(cell2, prison->cell_pool); | ||
271 | |||
272 | *ref = cell; | 277 | *ref = cell; |
273 | 278 | ||
274 | return r; | 279 | return r; |
@@ -283,8 +288,8 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates) | |||
283 | 288 | ||
284 | hlist_del(&cell->list); | 289 | hlist_del(&cell->list); |
285 | 290 | ||
286 | if (inmates) | 291 | bio_list_add(inmates, cell->holder); |
287 | bio_list_merge(inmates, &cell->bios); | 292 | bio_list_merge(inmates, &cell->bios); |
288 | 293 | ||
289 | mempool_free(cell, prison->cell_pool); | 294 | mempool_free(cell, prison->cell_pool); |
290 | } | 295 | } |
@@ -305,22 +310,44 @@ static void cell_release(struct cell *cell, struct bio_list *bios) | |||
305 | * bio may be in the cell. This function releases the cell, and also does | 310 | * bio may be in the cell. This function releases the cell, and also does |
306 | * a sanity check. | 311 | * a sanity check. |
307 | */ | 312 | */ |
313 | static void __cell_release_singleton(struct cell *cell, struct bio *bio) | ||
314 | { | ||
315 | hlist_del(&cell->list); | ||
316 | BUG_ON(cell->holder != bio); | ||
317 | BUG_ON(!bio_list_empty(&cell->bios)); | ||
318 | } | ||
319 | |||
308 | static void cell_release_singleton(struct cell *cell, struct bio *bio) | 320 | static void cell_release_singleton(struct cell *cell, struct bio *bio) |
309 | { | 321 | { |
310 | struct bio_prison *prison = cell->prison; | ||
311 | struct bio_list bios; | ||
312 | struct bio *b; | ||
313 | unsigned long flags; | 322 | unsigned long flags; |
314 | 323 | struct bio_prison *prison = cell->prison; | |
315 | bio_list_init(&bios); | ||
316 | 324 | ||
317 | spin_lock_irqsave(&prison->lock, flags); | 325 | spin_lock_irqsave(&prison->lock, flags); |
318 | __cell_release(cell, &bios); | 326 | __cell_release_singleton(cell, bio); |
319 | spin_unlock_irqrestore(&prison->lock, flags); | 327 | spin_unlock_irqrestore(&prison->lock, flags); |
328 | } | ||
329 | |||
330 | /* | ||
331 | * Sometimes we don't want the holder, just the additional bios. | ||
332 | */ | ||
333 | static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) | ||
334 | { | ||
335 | struct bio_prison *prison = cell->prison; | ||
320 | 336 | ||
321 | b = bio_list_pop(&bios); | 337 | hlist_del(&cell->list); |
322 | BUG_ON(b != bio); | 338 | bio_list_merge(inmates, &cell->bios); |
323 | BUG_ON(!bio_list_empty(&bios)); | 339 | |
340 | mempool_free(cell, prison->cell_pool); | ||
341 | } | ||
342 | |||
343 | static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) | ||
344 | { | ||
345 | unsigned long flags; | ||
346 | struct bio_prison *prison = cell->prison; | ||
347 | |||
348 | spin_lock_irqsave(&prison->lock, flags); | ||
349 | __cell_release_no_holder(cell, inmates); | ||
350 | spin_unlock_irqrestore(&prison->lock, flags); | ||
324 | } | 351 | } |
325 | 352 | ||
326 | static void cell_error(struct cell *cell) | 353 | static void cell_error(struct cell *cell) |
@@ -800,21 +827,16 @@ static void cell_defer(struct thin_c *tc, struct cell *cell, | |||
800 | * Same as cell_defer above, except it omits one particular detainee, | 827 | * Same as cell_defer above, except it omits one particular detainee, |
801 | * a write bio that covers the block and has already been processed. | 828 | * a write bio that covers the block and has already been processed. |
802 | */ | 829 | */ |
803 | static void cell_defer_except(struct thin_c *tc, struct cell *cell, | 830 | static void cell_defer_except(struct thin_c *tc, struct cell *cell) |
804 | struct bio *exception) | ||
805 | { | 831 | { |
806 | struct bio_list bios; | 832 | struct bio_list bios; |
807 | struct bio *bio; | ||
808 | struct pool *pool = tc->pool; | 833 | struct pool *pool = tc->pool; |
809 | unsigned long flags; | 834 | unsigned long flags; |
810 | 835 | ||
811 | bio_list_init(&bios); | 836 | bio_list_init(&bios); |
812 | cell_release(cell, &bios); | ||
813 | 837 | ||
814 | spin_lock_irqsave(&pool->lock, flags); | 838 | spin_lock_irqsave(&pool->lock, flags); |
815 | while ((bio = bio_list_pop(&bios))) | 839 | cell_release_no_holder(cell, &pool->deferred_bios); |
816 | if (bio != exception) | ||
817 | bio_list_add(&pool->deferred_bios, bio); | ||
818 | spin_unlock_irqrestore(&pool->lock, flags); | 840 | spin_unlock_irqrestore(&pool->lock, flags); |
819 | 841 | ||
820 | wake_worker(pool); | 842 | wake_worker(pool); |
@@ -854,7 +876,7 @@ static void process_prepared_mapping(struct new_mapping *m) | |||
854 | * the bios in the cell. | 876 | * the bios in the cell. |
855 | */ | 877 | */ |
856 | if (bio) { | 878 | if (bio) { |
857 | cell_defer_except(tc, m->cell, bio); | 879 | cell_defer_except(tc, m->cell); |
858 | bio_endio(bio, 0); | 880 | bio_endio(bio, 0); |
859 | } else | 881 | } else |
860 | cell_defer(tc, m->cell, m->data_block); | 882 | cell_defer(tc, m->cell, m->data_block); |