aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c59
1 files changed, 5 insertions, 54 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index f6c46109b07..fcd098794d3 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio)
498} 498}
499EXPORT_SYMBOL(md_flush_request); 499EXPORT_SYMBOL(md_flush_request);
500 500
501/* Support for plugging. 501void md_unplug(struct blk_plug_cb *cb, bool from_schedule)
502 * This mirrors the plugging support in request_queue, but does not
503 * require having a whole queue or request structures.
504 * We allocate an md_plug_cb for each md device and each thread it gets
505 * plugged on. This links tot the private plug_handle structure in the
506 * personality data where we keep a count of the number of outstanding
507 * plugs so other code can see if a plug is active.
508 */
509struct md_plug_cb {
510 struct blk_plug_cb cb;
511 struct mddev *mddev;
512};
513
514static void plugger_unplug(struct blk_plug_cb *cb)
515{ 502{
516 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); 503 struct mddev *mddev = cb->data;
517 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) 504 md_wakeup_thread(mddev->thread);
518 md_wakeup_thread(mdcb->mddev->thread); 505 kfree(cb);
519 kfree(mdcb);
520}
521
522/* Check that an unplug wakeup will come shortly.
523 * If not, wakeup the md thread immediately
524 */
525int mddev_check_plugged(struct mddev *mddev)
526{
527 struct blk_plug *plug = current->plug;
528 struct md_plug_cb *mdcb;
529
530 if (!plug)
531 return 0;
532
533 list_for_each_entry(mdcb, &plug->cb_list, cb.list) {
534 if (mdcb->cb.callback == plugger_unplug &&
535 mdcb->mddev == mddev) {
536 /* Already on the list, move to top */
537 if (mdcb != list_first_entry(&plug->cb_list,
538 struct md_plug_cb,
539 cb.list))
540 list_move(&mdcb->cb.list, &plug->cb_list);
541 return 1;
542 }
543 }
544 /* Not currently on the callback list */
545 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC);
546 if (!mdcb)
547 return 0;
548
549 mdcb->mddev = mddev;
550 mdcb->cb.callback = plugger_unplug;
551 atomic_inc(&mddev->plug_cnt);
552 list_add(&mdcb->cb.list, &plug->cb_list);
553 return 1;
554} 506}
555EXPORT_SYMBOL_GPL(mddev_check_plugged); 507EXPORT_SYMBOL(md_unplug);
556 508
557static inline struct mddev *mddev_get(struct mddev *mddev) 509static inline struct mddev *mddev_get(struct mddev *mddev)
558{ 510{
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev)
602 atomic_set(&mddev->active, 1); 554 atomic_set(&mddev->active, 1);
603 atomic_set(&mddev->openers, 0); 555 atomic_set(&mddev->openers, 0);
604 atomic_set(&mddev->active_io, 0); 556 atomic_set(&mddev->active_io, 0);
605 atomic_set(&mddev->plug_cnt, 0);
606 spin_lock_init(&mddev->write_lock); 557 spin_lock_init(&mddev->write_lock);
607 atomic_set(&mddev->flush_pending, 0); 558 atomic_set(&mddev->flush_pending, 0);
608 init_waitqueue_head(&mddev->sb_wait); 559 init_waitqueue_head(&mddev->sb_wait);