aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /drivers/md/raid10.c
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c87
1 files changed, 19 insertions, 68 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3b607b28741b..e79f1c5bf71b 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -57,23 +57,16 @@
57 */ 57 */
58#define NR_RAID10_BIOS 256 58#define NR_RAID10_BIOS 256
59 59
60static void unplug_slaves(mddev_t *mddev);
61
62static void allow_barrier(conf_t *conf); 60static void allow_barrier(conf_t *conf);
63static void lower_barrier(conf_t *conf); 61static void lower_barrier(conf_t *conf);
64 62
65static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data) 63static void * r10bio_pool_alloc(gfp_t gfp_flags, void *data)
66{ 64{
67 conf_t *conf = data; 65 conf_t *conf = data;
68 r10bio_t *r10_bio;
69 int size = offsetof(struct r10bio_s, devs[conf->copies]); 66 int size = offsetof(struct r10bio_s, devs[conf->copies]);
70 67
71 /* allocate a r10bio with room for raid_disks entries in the bios array */ 68 /* allocate a r10bio with room for raid_disks entries in the bios array */
72 r10_bio = kzalloc(size, gfp_flags); 69 return kzalloc(size, gfp_flags);
73 if (!r10_bio && conf->mddev)
74 unplug_slaves(conf->mddev);
75
76 return r10_bio;
77} 70}
78 71
79static void r10bio_pool_free(void *r10_bio, void *data) 72static void r10bio_pool_free(void *r10_bio, void *data)
@@ -106,10 +99,8 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
106 int nalloc; 99 int nalloc;
107 100
108 r10_bio = r10bio_pool_alloc(gfp_flags, conf); 101 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
109 if (!r10_bio) { 102 if (!r10_bio)
110 unplug_slaves(conf->mddev);
111 return NULL; 103 return NULL;
112 }
113 104
114 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery)) 105 if (test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
115 nalloc = conf->copies; /* resync */ 106 nalloc = conf->copies; /* resync */
@@ -597,37 +588,6 @@ rb_out:
597 return disk; 588 return disk;
598} 589}
599 590
600static void unplug_slaves(mddev_t *mddev)
601{
602 conf_t *conf = mddev->private;
603 int i;
604
605 rcu_read_lock();
606 for (i=0; i < conf->raid_disks; i++) {
607 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
608 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
609 struct request_queue *r_queue = bdev_get_queue(rdev->bdev);
610
611 atomic_inc(&rdev->nr_pending);
612 rcu_read_unlock();
613
614 blk_unplug(r_queue);
615
616 rdev_dec_pending(rdev, mddev);
617 rcu_read_lock();
618 }
619 }
620 rcu_read_unlock();
621}
622
623static void raid10_unplug(struct request_queue *q)
624{
625 mddev_t *mddev = q->queuedata;
626
627 unplug_slaves(q->queuedata);
628 md_wakeup_thread(mddev->thread);
629}
630
631static int raid10_congested(void *data, int bits) 591static int raid10_congested(void *data, int bits)
632{ 592{
633 mddev_t *mddev = data; 593 mddev_t *mddev = data;
@@ -649,20 +609,16 @@ static int raid10_congested(void *data, int bits)
649 return ret; 609 return ret;
650} 610}
651 611
652static int flush_pending_writes(conf_t *conf) 612static void flush_pending_writes(conf_t *conf)
653{ 613{
654 /* Any writes that have been queued but are awaiting 614 /* Any writes that have been queued but are awaiting
655 * bitmap updates get flushed here. 615 * bitmap updates get flushed here.
656 * We return 1 if any requests were actually submitted.
657 */ 616 */
658 int rv = 0;
659
660 spin_lock_irq(&conf->device_lock); 617 spin_lock_irq(&conf->device_lock);
661 618
662 if (conf->pending_bio_list.head) { 619 if (conf->pending_bio_list.head) {
663 struct bio *bio; 620 struct bio *bio;
664 bio = bio_list_get(&conf->pending_bio_list); 621 bio = bio_list_get(&conf->pending_bio_list);
665 blk_remove_plug(conf->mddev->queue);
666 spin_unlock_irq(&conf->device_lock); 622 spin_unlock_irq(&conf->device_lock);
667 /* flush any pending bitmap writes to disk 623 /* flush any pending bitmap writes to disk
668 * before proceeding w/ I/O */ 624 * before proceeding w/ I/O */
@@ -674,11 +630,16 @@ static int flush_pending_writes(conf_t *conf)
674 generic_make_request(bio); 630 generic_make_request(bio);
675 bio = next; 631 bio = next;
676 } 632 }
677 rv = 1;
678 } else 633 } else
679 spin_unlock_irq(&conf->device_lock); 634 spin_unlock_irq(&conf->device_lock);
680 return rv;
681} 635}
636
637static void md_kick_device(mddev_t *mddev)
638{
639 blk_flush_plug(current);
640 md_wakeup_thread(mddev->thread);
641}
642
682/* Barriers.... 643/* Barriers....
683 * Sometimes we need to suspend IO while we do something else, 644 * Sometimes we need to suspend IO while we do something else,
684 * either some resync/recovery, or reconfigure the array. 645 * either some resync/recovery, or reconfigure the array.
@@ -708,8 +669,7 @@ static void raise_barrier(conf_t *conf, int force)
708 669
709 /* Wait until no block IO is waiting (unless 'force') */ 670 /* Wait until no block IO is waiting (unless 'force') */
710 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, 671 wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting,
711 conf->resync_lock, 672 conf->resync_lock, md_kick_device(conf->mddev));
712 raid10_unplug(conf->mddev->queue));
713 673
714 /* block any new IO from starting */ 674 /* block any new IO from starting */
715 conf->barrier++; 675 conf->barrier++;
@@ -717,8 +677,7 @@ static void raise_barrier(conf_t *conf, int force)
717 /* No wait for all pending IO to complete */ 677 /* No wait for all pending IO to complete */
718 wait_event_lock_irq(conf->wait_barrier, 678 wait_event_lock_irq(conf->wait_barrier,
719 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 679 !conf->nr_pending && conf->barrier < RESYNC_DEPTH,
720 conf->resync_lock, 680 conf->resync_lock, md_kick_device(conf->mddev));
721 raid10_unplug(conf->mddev->queue));
722 681
723 spin_unlock_irq(&conf->resync_lock); 682 spin_unlock_irq(&conf->resync_lock);
724} 683}
@@ -739,7 +698,7 @@ static void wait_barrier(conf_t *conf)
739 conf->nr_waiting++; 698 conf->nr_waiting++;
740 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 699 wait_event_lock_irq(conf->wait_barrier, !conf->barrier,
741 conf->resync_lock, 700 conf->resync_lock,
742 raid10_unplug(conf->mddev->queue)); 701 md_kick_device(conf->mddev));
743 conf->nr_waiting--; 702 conf->nr_waiting--;
744 } 703 }
745 conf->nr_pending++; 704 conf->nr_pending++;
@@ -776,7 +735,7 @@ static void freeze_array(conf_t *conf)
776 conf->nr_pending == conf->nr_queued+1, 735 conf->nr_pending == conf->nr_queued+1,
777 conf->resync_lock, 736 conf->resync_lock,
778 ({ flush_pending_writes(conf); 737 ({ flush_pending_writes(conf);
779 raid10_unplug(conf->mddev->queue); })); 738 md_kick_device(conf->mddev); }));
780 spin_unlock_irq(&conf->resync_lock); 739 spin_unlock_irq(&conf->resync_lock);
781} 740}
782 741
@@ -971,7 +930,6 @@ static int make_request(mddev_t *mddev, struct bio * bio)
971 atomic_inc(&r10_bio->remaining); 930 atomic_inc(&r10_bio->remaining);
972 spin_lock_irqsave(&conf->device_lock, flags); 931 spin_lock_irqsave(&conf->device_lock, flags);
973 bio_list_add(&conf->pending_bio_list, mbio); 932 bio_list_add(&conf->pending_bio_list, mbio);
974 blk_plug_device(mddev->queue);
975 spin_unlock_irqrestore(&conf->device_lock, flags); 933 spin_unlock_irqrestore(&conf->device_lock, flags);
976 } 934 }
977 935
@@ -988,7 +946,7 @@ static int make_request(mddev_t *mddev, struct bio * bio)
988 /* In case raid10d snuck in to freeze_array */ 946 /* In case raid10d snuck in to freeze_array */
989 wake_up(&conf->wait_barrier); 947 wake_up(&conf->wait_barrier);
990 948
991 if (do_sync) 949 if (do_sync || !mddev->bitmap)
992 md_wakeup_thread(mddev->thread); 950 md_wakeup_thread(mddev->thread);
993 951
994 return 0; 952 return 0;
@@ -1681,7 +1639,6 @@ static void raid10d(mddev_t *mddev)
1681 unsigned long flags; 1639 unsigned long flags;
1682 conf_t *conf = mddev->private; 1640 conf_t *conf = mddev->private;
1683 struct list_head *head = &conf->retry_list; 1641 struct list_head *head = &conf->retry_list;
1684 int unplug=0;
1685 mdk_rdev_t *rdev; 1642 mdk_rdev_t *rdev;
1686 1643
1687 md_check_recovery(mddev); 1644 md_check_recovery(mddev);
@@ -1689,7 +1646,7 @@ static void raid10d(mddev_t *mddev)
1689 for (;;) { 1646 for (;;) {
1690 char b[BDEVNAME_SIZE]; 1647 char b[BDEVNAME_SIZE];
1691 1648
1692 unplug += flush_pending_writes(conf); 1649 flush_pending_writes(conf);
1693 1650
1694 spin_lock_irqsave(&conf->device_lock, flags); 1651 spin_lock_irqsave(&conf->device_lock, flags);
1695 if (list_empty(head)) { 1652 if (list_empty(head)) {
@@ -1703,13 +1660,11 @@ static void raid10d(mddev_t *mddev)
1703 1660
1704 mddev = r10_bio->mddev; 1661 mddev = r10_bio->mddev;
1705 conf = mddev->private; 1662 conf = mddev->private;
1706 if (test_bit(R10BIO_IsSync, &r10_bio->state)) { 1663 if (test_bit(R10BIO_IsSync, &r10_bio->state))
1707 sync_request_write(mddev, r10_bio); 1664 sync_request_write(mddev, r10_bio);
1708 unplug = 1; 1665 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
1709 } else if (test_bit(R10BIO_IsRecover, &r10_bio->state)) {
1710 recovery_request_write(mddev, r10_bio); 1666 recovery_request_write(mddev, r10_bio);
1711 unplug = 1; 1667 else {
1712 } else {
1713 int mirror; 1668 int mirror;
1714 /* we got a read error. Maybe the drive is bad. Maybe just 1669 /* we got a read error. Maybe the drive is bad. Maybe just
1715 * the block and we can fix it. 1670 * the block and we can fix it.
@@ -1756,14 +1711,11 @@ static void raid10d(mddev_t *mddev)
1756 bio->bi_rw = READ | do_sync; 1711 bio->bi_rw = READ | do_sync;
1757 bio->bi_private = r10_bio; 1712 bio->bi_private = r10_bio;
1758 bio->bi_end_io = raid10_end_read_request; 1713 bio->bi_end_io = raid10_end_read_request;
1759 unplug = 1;
1760 generic_make_request(bio); 1714 generic_make_request(bio);
1761 } 1715 }
1762 } 1716 }
1763 cond_resched(); 1717 cond_resched();
1764 } 1718 }
1765 if (unplug)
1766 unplug_slaves(mddev);
1767} 1719}
1768 1720
1769 1721
@@ -2376,7 +2328,6 @@ static int run(mddev_t *mddev)
2376 md_set_array_sectors(mddev, size); 2328 md_set_array_sectors(mddev, size);
2377 mddev->resync_max_sectors = size; 2329 mddev->resync_max_sectors = size;
2378 2330
2379 mddev->queue->unplug_fn = raid10_unplug;
2380 mddev->queue->backing_dev_info.congested_fn = raid10_congested; 2331 mddev->queue->backing_dev_info.congested_fn = raid10_congested;
2381 mddev->queue->backing_dev_info.congested_data = mddev; 2332 mddev->queue->backing_dev_info.congested_data = mddev;
2382 2333