diff options
author | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
---|---|---|
committer | Jens Axboe <jaxboe@fusionio.com> | 2011-03-10 02:52:07 -0500 |
commit | 7eaceaccab5f40bbfda044629a6298616aeaed50 (patch) | |
tree | 33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /drivers/md/raid1.c | |
parent | 73c101011926c5832e6e141682180c4debe2cf45 (diff) |
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging,
and delay users have been converted to use the new API for that.
So lets kill off the old plugging along with aops->sync_page().
Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 83 |
1 files changed, 17 insertions, 66 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a23ffa397ba9..b67d822d57ae 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -52,23 +52,16 @@ | |||
52 | #define NR_RAID1_BIOS 256 | 52 | #define NR_RAID1_BIOS 256 |
53 | 53 | ||
54 | 54 | ||
55 | static void unplug_slaves(mddev_t *mddev); | ||
56 | |||
57 | static void allow_barrier(conf_t *conf); | 55 | static void allow_barrier(conf_t *conf); |
58 | static void lower_barrier(conf_t *conf); | 56 | static void lower_barrier(conf_t *conf); |
59 | 57 | ||
60 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) | 58 | static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) |
61 | { | 59 | { |
62 | struct pool_info *pi = data; | 60 | struct pool_info *pi = data; |
63 | r1bio_t *r1_bio; | ||
64 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); | 61 | int size = offsetof(r1bio_t, bios[pi->raid_disks]); |
65 | 62 | ||
66 | /* allocate a r1bio with room for raid_disks entries in the bios array */ | 63 | /* allocate a r1bio with room for raid_disks entries in the bios array */ |
67 | r1_bio = kzalloc(size, gfp_flags); | 64 | return kzalloc(size, gfp_flags); |
68 | if (!r1_bio && pi->mddev) | ||
69 | unplug_slaves(pi->mddev); | ||
70 | |||
71 | return r1_bio; | ||
72 | } | 65 | } |
73 | 66 | ||
74 | static void r1bio_pool_free(void *r1_bio, void *data) | 67 | static void r1bio_pool_free(void *r1_bio, void *data) |
@@ -91,10 +84,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
91 | int i, j; | 84 | int i, j; |
92 | 85 | ||
93 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); | 86 | r1_bio = r1bio_pool_alloc(gfp_flags, pi); |
94 | if (!r1_bio) { | 87 | if (!r1_bio) |
95 | unplug_slaves(pi->mddev); | ||
96 | return NULL; | 88 | return NULL; |
97 | } | ||
98 | 89 | ||
99 | /* | 90 | /* |
100 | * Allocate bios : 1 for reading, n-1 for writing | 91 | * Allocate bios : 1 for reading, n-1 for writing |
@@ -520,37 +511,6 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio) | |||
520 | return new_disk; | 511 | return new_disk; |
521 | } | 512 | } |
522 | 513 | ||
523 | static void unplug_slaves(mddev_t *mddev) | ||
524 | { | ||
525 | conf_t *conf = mddev->private; | ||
526 | int i; | ||
527 | |||
528 | rcu_read_lock(); | ||
529 | for (i=0; i<mddev->raid_disks; i++) { | ||
530 | mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); | ||
531 | if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { | ||
532 | struct request_queue *r_queue = bdev_get_queue(rdev->bdev); | ||
533 | |||
534 | atomic_inc(&rdev->nr_pending); | ||
535 | rcu_read_unlock(); | ||
536 | |||
537 | blk_unplug(r_queue); | ||
538 | |||
539 | rdev_dec_pending(rdev, mddev); | ||
540 | rcu_read_lock(); | ||
541 | } | ||
542 | } | ||
543 | rcu_read_unlock(); | ||
544 | } | ||
545 | |||
546 | static void raid1_unplug(struct request_queue *q) | ||
547 | { | ||
548 | mddev_t *mddev = q->queuedata; | ||
549 | |||
550 | unplug_slaves(mddev); | ||
551 | md_wakeup_thread(mddev->thread); | ||
552 | } | ||
553 | |||
554 | static int raid1_congested(void *data, int bits) | 514 | static int raid1_congested(void *data, int bits) |
555 | { | 515 | { |
556 | mddev_t *mddev = data; | 516 | mddev_t *mddev = data; |
@@ -580,20 +540,16 @@ static int raid1_congested(void *data, int bits) | |||
580 | } | 540 | } |
581 | 541 | ||
582 | 542 | ||
583 | static int flush_pending_writes(conf_t *conf) | 543 | static void flush_pending_writes(conf_t *conf) |
584 | { | 544 | { |
585 | /* Any writes that have been queued but are awaiting | 545 | /* Any writes that have been queued but are awaiting |
586 | * bitmap updates get flushed here. | 546 | * bitmap updates get flushed here. |
587 | * We return 1 if any requests were actually submitted. | ||
588 | */ | 547 | */ |
589 | int rv = 0; | ||
590 | |||
591 | spin_lock_irq(&conf->device_lock); | 548 | spin_lock_irq(&conf->device_lock); |
592 | 549 | ||
593 | if (conf->pending_bio_list.head) { | 550 | if (conf->pending_bio_list.head) { |
594 | struct bio *bio; | 551 | struct bio *bio; |
595 | bio = bio_list_get(&conf->pending_bio_list); | 552 | bio = bio_list_get(&conf->pending_bio_list); |
596 | blk_remove_plug(conf->mddev->queue); | ||
597 | spin_unlock_irq(&conf->device_lock); | 553 | spin_unlock_irq(&conf->device_lock); |
598 | /* flush any pending bitmap writes to | 554 | /* flush any pending bitmap writes to |
599 | * disk before proceeding w/ I/O */ | 555 | * disk before proceeding w/ I/O */ |
@@ -605,10 +561,14 @@ static int flush_pending_writes(conf_t *conf) | |||
605 | generic_make_request(bio); | 561 | generic_make_request(bio); |
606 | bio = next; | 562 | bio = next; |
607 | } | 563 | } |
608 | rv = 1; | ||
609 | } else | 564 | } else |
610 | spin_unlock_irq(&conf->device_lock); | 565 | spin_unlock_irq(&conf->device_lock); |
611 | return rv; | 566 | } |
567 | |||
568 | static void md_kick_device(mddev_t *mddev) | ||
569 | { | ||
570 | blk_flush_plug(current); | ||
571 | md_wakeup_thread(mddev->thread); | ||
612 | } | 572 | } |
613 | 573 | ||
614 | /* Barriers.... | 574 | /* Barriers.... |
@@ -640,8 +600,7 @@ static void raise_barrier(conf_t *conf) | |||
640 | 600 | ||
641 | /* Wait until no block IO is waiting */ | 601 | /* Wait until no block IO is waiting */ |
642 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, | 602 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, |
643 | conf->resync_lock, | 603 | conf->resync_lock, md_kick_device(conf->mddev)); |
644 | raid1_unplug(conf->mddev->queue)); | ||
645 | 604 | ||
646 | /* block any new IO from starting */ | 605 | /* block any new IO from starting */ |
647 | conf->barrier++; | 606 | conf->barrier++; |
@@ -649,8 +608,7 @@ static void raise_barrier(conf_t *conf) | |||
649 | /* Now wait for all pending IO to complete */ | 608 | /* Now wait for all pending IO to complete */ |
650 | wait_event_lock_irq(conf->wait_barrier, | 609 | wait_event_lock_irq(conf->wait_barrier, |
651 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 610 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
652 | conf->resync_lock, | 611 | conf->resync_lock, md_kick_device(conf->mddev)); |
653 | raid1_unplug(conf->mddev->queue)); | ||
654 | 612 | ||
655 | spin_unlock_irq(&conf->resync_lock); | 613 | spin_unlock_irq(&conf->resync_lock); |
656 | } | 614 | } |
@@ -672,7 +630,7 @@ static void wait_barrier(conf_t *conf) | |||
672 | conf->nr_waiting++; | 630 | conf->nr_waiting++; |
673 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, | 631 | wait_event_lock_irq(conf->wait_barrier, !conf->barrier, |
674 | conf->resync_lock, | 632 | conf->resync_lock, |
675 | raid1_unplug(conf->mddev->queue)); | 633 | md_kick_device(conf->mddev)); |
676 | conf->nr_waiting--; | 634 | conf->nr_waiting--; |
677 | } | 635 | } |
678 | conf->nr_pending++; | 636 | conf->nr_pending++; |
@@ -709,7 +667,7 @@ static void freeze_array(conf_t *conf) | |||
709 | conf->nr_pending == conf->nr_queued+1, | 667 | conf->nr_pending == conf->nr_queued+1, |
710 | conf->resync_lock, | 668 | conf->resync_lock, |
711 | ({ flush_pending_writes(conf); | 669 | ({ flush_pending_writes(conf); |
712 | raid1_unplug(conf->mddev->queue); })); | 670 | md_kick_device(conf->mddev); })); |
713 | spin_unlock_irq(&conf->resync_lock); | 671 | spin_unlock_irq(&conf->resync_lock); |
714 | } | 672 | } |
715 | static void unfreeze_array(conf_t *conf) | 673 | static void unfreeze_array(conf_t *conf) |
@@ -959,7 +917,6 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
959 | atomic_inc(&r1_bio->remaining); | 917 | atomic_inc(&r1_bio->remaining); |
960 | spin_lock_irqsave(&conf->device_lock, flags); | 918 | spin_lock_irqsave(&conf->device_lock, flags); |
961 | bio_list_add(&conf->pending_bio_list, mbio); | 919 | bio_list_add(&conf->pending_bio_list, mbio); |
962 | blk_plug_device(mddev->queue); | ||
963 | spin_unlock_irqrestore(&conf->device_lock, flags); | 920 | spin_unlock_irqrestore(&conf->device_lock, flags); |
964 | } | 921 | } |
965 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); | 922 | r1_bio_write_done(r1_bio, bio->bi_vcnt, behind_pages, behind_pages != NULL); |
@@ -968,7 +925,7 @@ static int make_request(mddev_t *mddev, struct bio * bio) | |||
968 | /* In case raid1d snuck in to freeze_array */ | 925 | /* In case raid1d snuck in to freeze_array */ |
969 | wake_up(&conf->wait_barrier); | 926 | wake_up(&conf->wait_barrier); |
970 | 927 | ||
971 | if (do_sync) | 928 | if (do_sync || !bitmap) |
972 | md_wakeup_thread(mddev->thread); | 929 | md_wakeup_thread(mddev->thread); |
973 | 930 | ||
974 | return 0; | 931 | return 0; |
@@ -1558,7 +1515,6 @@ static void raid1d(mddev_t *mddev) | |||
1558 | unsigned long flags; | 1515 | unsigned long flags; |
1559 | conf_t *conf = mddev->private; | 1516 | conf_t *conf = mddev->private; |
1560 | struct list_head *head = &conf->retry_list; | 1517 | struct list_head *head = &conf->retry_list; |
1561 | int unplug=0; | ||
1562 | mdk_rdev_t *rdev; | 1518 | mdk_rdev_t *rdev; |
1563 | 1519 | ||
1564 | md_check_recovery(mddev); | 1520 | md_check_recovery(mddev); |
@@ -1566,7 +1522,7 @@ static void raid1d(mddev_t *mddev) | |||
1566 | for (;;) { | 1522 | for (;;) { |
1567 | char b[BDEVNAME_SIZE]; | 1523 | char b[BDEVNAME_SIZE]; |
1568 | 1524 | ||
1569 | unplug += flush_pending_writes(conf); | 1525 | flush_pending_writes(conf); |
1570 | 1526 | ||
1571 | spin_lock_irqsave(&conf->device_lock, flags); | 1527 | spin_lock_irqsave(&conf->device_lock, flags); |
1572 | if (list_empty(head)) { | 1528 | if (list_empty(head)) { |
@@ -1580,10 +1536,9 @@ static void raid1d(mddev_t *mddev) | |||
1580 | 1536 | ||
1581 | mddev = r1_bio->mddev; | 1537 | mddev = r1_bio->mddev; |
1582 | conf = mddev->private; | 1538 | conf = mddev->private; |
1583 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) { | 1539 | if (test_bit(R1BIO_IsSync, &r1_bio->state)) |
1584 | sync_request_write(mddev, r1_bio); | 1540 | sync_request_write(mddev, r1_bio); |
1585 | unplug = 1; | 1541 | else { |
1586 | } else { | ||
1587 | int disk; | 1542 | int disk; |
1588 | 1543 | ||
1589 | /* we got a read error. Maybe the drive is bad. Maybe just | 1544 | /* we got a read error. Maybe the drive is bad. Maybe just |
@@ -1633,14 +1588,11 @@ static void raid1d(mddev_t *mddev) | |||
1633 | bio->bi_end_io = raid1_end_read_request; | 1588 | bio->bi_end_io = raid1_end_read_request; |
1634 | bio->bi_rw = READ | do_sync; | 1589 | bio->bi_rw = READ | do_sync; |
1635 | bio->bi_private = r1_bio; | 1590 | bio->bi_private = r1_bio; |
1636 | unplug = 1; | ||
1637 | generic_make_request(bio); | 1591 | generic_make_request(bio); |
1638 | } | 1592 | } |
1639 | } | 1593 | } |
1640 | cond_resched(); | 1594 | cond_resched(); |
1641 | } | 1595 | } |
1642 | if (unplug) | ||
1643 | unplug_slaves(mddev); | ||
1644 | } | 1596 | } |
1645 | 1597 | ||
1646 | 1598 | ||
@@ -2064,7 +2016,6 @@ static int run(mddev_t *mddev) | |||
2064 | 2016 | ||
2065 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); | 2017 | md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); |
2066 | 2018 | ||
2067 | mddev->queue->unplug_fn = raid1_unplug; | ||
2068 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; | 2019 | mddev->queue->backing_dev_info.congested_fn = raid1_congested; |
2069 | mddev->queue->backing_dev_info.congested_data = mddev; | 2020 | mddev->queue->backing_dev_info.congested_data = mddev; |
2070 | md_integrity_register(mddev); | 2021 | md_integrity_register(mddev); |