aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/md.c56
-rw-r--r--drivers/md/md.h4
2 files changed, 60 insertions, 0 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index fb11170c717e..6e853c61d87e 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -445,6 +445,61 @@ void md_flush_request(mddev_t *mddev, struct bio *bio)
445} 445}
446EXPORT_SYMBOL(md_flush_request); 446EXPORT_SYMBOL(md_flush_request);
447 447
448/* Support for plugging.
449 * This mirrors the plugging support in request_queue, but does not
450 * require having a whole queue or request structures.
451 * We allocate an md_plug_cb for each md device and each thread it gets
452 * plugged on. This links tot the private plug_handle structure in the
453 * personality data where we keep a count of the number of outstanding
454 * plugs so other code can see if a plug is active.
455 */
456struct md_plug_cb {
457 struct blk_plug_cb cb;
458 mddev_t *mddev;
459};
460
461static void plugger_unplug(struct blk_plug_cb *cb)
462{
463 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb);
464 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt))
465 md_wakeup_thread(mdcb->mddev->thread);
466 kfree(mdcb);
467}
468
469/* Check that an unplug wakeup will come shortly.
470 * If not, wakeup the md thread immediately
471 */
472int mddev_check_plugged(mddev_t *mddev)
473{
474 struct blk_plug *plug = current->plug;
475 struct md_plug_cb *mdcb;
476
477 if (!plug)
478 return 0;
479
480 list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
481 if (mdcb->cb.callback == plugger_unplug &&
482 mdcb->mddev == mddev) {
483 /* Already on the list, move to top */
484 if (mdcb != list_first_entry(&plug->cb_list,
485 struct md_plug_cb,
486 cb.list))
487 list_move(&mdcb->cb.list, &plug->cb_list);
488 return 1;
489 }
490 }
491 /* Not currently on the callback list */
492 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
493 if (!mdcb)
494 return 0;
495
496 mdcb->mddev = mddev;
497 mdcb->cb.callback = plugger_unplug;
498 atomic_inc(&mddev->plug_cnt);
499 list_add(&mdcb->cb.list, &plug->cb_list);
500 return 1;
501}
502EXPORT_SYMBOL_GPL(mddev_check_plugged);
448 503
449static inline mddev_t *mddev_get(mddev_t *mddev) 504static inline mddev_t *mddev_get(mddev_t *mddev)
450{ 505{
@@ -494,6 +549,7 @@ void mddev_init(mddev_t *mddev)
494 atomic_set(&mddev->active, 1); 549 atomic_set(&mddev->active, 1);
495 atomic_set(&mddev->openers, 0); 550 atomic_set(&mddev->openers, 0);
496 atomic_set(&mddev->active_io, 0); 551 atomic_set(&mddev->active_io, 0);
552 atomic_set(&mddev->plug_cnt, 0);
497 spin_lock_init(&mddev->write_lock); 553 spin_lock_init(&mddev->write_lock);
498 atomic_set(&mddev->flush_pending, 0); 554 atomic_set(&mddev->flush_pending, 0);
499 init_waitqueue_head(&mddev->sb_wait); 555 init_waitqueue_head(&mddev->sb_wait);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index fad90228672f..0b1fd3f1d85b 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -179,6 +179,9 @@ struct mddev_s
179 int delta_disks, new_level, new_layout; 179 int delta_disks, new_level, new_layout;
180 int new_chunk_sectors; 180 int new_chunk_sectors;
181 181
182 atomic_t plug_cnt; /* If device is expecting
183 * more bios soon.
184 */
182 struct mdk_thread_s *thread; /* management thread */ 185 struct mdk_thread_s *thread; /* management thread */
183 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */ 186 struct mdk_thread_s *sync_thread; /* doing resync or reconstruct */
184 sector_t curr_resync; /* last block scheduled */ 187 sector_t curr_resync; /* last block scheduled */
@@ -508,4 +511,5 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask,
508 mddev_t *mddev); 511 mddev_t *mddev);
509extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 512extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs,
510 mddev_t *mddev); 513 mddev_t *mddev);
514extern int mddev_check_plugged(mddev_t *mddev);
511#endif /* _MD_MD_H */ 515#endif /* _MD_MD_H */