diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-17 16:39:11 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-12-17 16:39:11 -0500 |
commit | 9228ff90387e276ad67b10c0eb525c9d6a57d5e9 (patch) | |
tree | e7c87b68daba7cf7ca4c342c6b52165bd78fbe16 /drivers/md | |
parent | 9360b53661a2c7754517b2925580055bacc8ec38 (diff) | |
parent | d2ec180c23a5a1bfe34d8638b0342a47c00cf70f (diff) |
Merge branch 'for-3.8/drivers' of git://git.kernel.dk/linux-block
Pull block driver update from Jens Axboe:
"Now that the core bits are in, here are the driver bits for 3.8. The
branch contains:
- A huge pile of drbd bits that were dumped from the 3.7 merge
window. Following that, it was both made perfectly clear that
there is going to be no more over-the-wall pulls and how the
situation on individual pulls can be improved.
- A few cleanups from Akinobu Mita for drbd and cciss.
- Queue improvement for loop from Lukas. This grew into adding a
generic interface for waiting/checking an even with a specific
lock, allowing this to be pulled out of md and now loop and drbd is
also using it.
- A few fixes for xen back/front block driver from Roger Pau Monne.
- Partition improvements from Stephen Warren, allowing partiion UUID
to be used as an identifier."
* 'for-3.8/drivers' of git://git.kernel.dk/linux-block: (609 commits)
drbd: update Kconfig to match current dependencies
drbd: Fix drbdsetup wait-connect, wait-sync etc... commands
drbd: close race between drbd_set_role and drbd_connect
drbd: respect no-md-barriers setting also when changed online via disk-options
drbd: Remove obsolete check
drbd: fixup after wait_even_lock_irq() addition to generic code
loop: Limit the number of requests in the bio list
wait: add wait_event_lock_irq() interface
xen-blkfront: free allocated page
xen-blkback: move free persistent grants code
block: partition: msdos: provide UUIDs for partitions
init: reduce PARTUUID min length to 1 from 36
block: store partition_meta_info.uuid as a string
cciss: use check_signature()
cciss: cleanup bitops usage
drbd: use copy_highpage
drbd: if the replication link breaks during handshake, keep retrying
drbd: check return of kmalloc in receive_uuids
drbd: Broadcast sync progress no more often than once per second
drbd: don't try to clear bits once the disk has failed
...
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/md.c | 2 | ||||
-rw-r--r-- | drivers/md/md.h | 26 | ||||
-rw-r--r-- | drivers/md/raid1.c | 15 | ||||
-rw-r--r-- | drivers/md/raid10.c | 15 | ||||
-rw-r--r-- | drivers/md/raid5.c | 12 |
5 files changed, 20 insertions, 50 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index bd8bf0953fe..4843b004c55 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -452,7 +452,7 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) | |||
452 | spin_lock_irq(&mddev->write_lock); | 452 | spin_lock_irq(&mddev->write_lock); |
453 | wait_event_lock_irq(mddev->sb_wait, | 453 | wait_event_lock_irq(mddev->sb_wait, |
454 | !mddev->flush_bio, | 454 | !mddev->flush_bio, |
455 | mddev->write_lock, /*nothing*/); | 455 | mddev->write_lock); |
456 | mddev->flush_bio = bio; | 456 | mddev->flush_bio = bio; |
457 | spin_unlock_irq(&mddev->write_lock); | 457 | spin_unlock_irq(&mddev->write_lock); |
458 | 458 | ||
diff --git a/drivers/md/md.h b/drivers/md/md.h index af443ab868d..1e2fc3d9c74 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -551,32 +551,6 @@ struct md_thread { | |||
551 | 551 | ||
552 | #define THREAD_WAKEUP 0 | 552 | #define THREAD_WAKEUP 0 |
553 | 553 | ||
554 | #define __wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
555 | do { \ | ||
556 | wait_queue_t __wait; \ | ||
557 | init_waitqueue_entry(&__wait, current); \ | ||
558 | \ | ||
559 | add_wait_queue(&wq, &__wait); \ | ||
560 | for (;;) { \ | ||
561 | set_current_state(TASK_UNINTERRUPTIBLE); \ | ||
562 | if (condition) \ | ||
563 | break; \ | ||
564 | spin_unlock_irq(&lock); \ | ||
565 | cmd; \ | ||
566 | schedule(); \ | ||
567 | spin_lock_irq(&lock); \ | ||
568 | } \ | ||
569 | current->state = TASK_RUNNING; \ | ||
570 | remove_wait_queue(&wq, &__wait); \ | ||
571 | } while (0) | ||
572 | |||
573 | #define wait_event_lock_irq(wq, condition, lock, cmd) \ | ||
574 | do { \ | ||
575 | if (condition) \ | ||
576 | break; \ | ||
577 | __wait_event_lock_irq(wq, condition, lock, cmd); \ | ||
578 | } while (0) | ||
579 | |||
580 | static inline void safe_put_page(struct page *p) | 554 | static inline void safe_put_page(struct page *p) |
581 | { | 555 | { |
582 | if (p) put_page(p); | 556 | if (p) put_page(p); |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a0f73092176..d5bddfc4010 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -822,7 +822,7 @@ static void raise_barrier(struct r1conf *conf) | |||
822 | 822 | ||
823 | /* Wait until no block IO is waiting */ | 823 | /* Wait until no block IO is waiting */ |
824 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, | 824 | wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, |
825 | conf->resync_lock, ); | 825 | conf->resync_lock); |
826 | 826 | ||
827 | /* block any new IO from starting */ | 827 | /* block any new IO from starting */ |
828 | conf->barrier++; | 828 | conf->barrier++; |
@@ -830,7 +830,7 @@ static void raise_barrier(struct r1conf *conf) | |||
830 | /* Now wait for all pending IO to complete */ | 830 | /* Now wait for all pending IO to complete */ |
831 | wait_event_lock_irq(conf->wait_barrier, | 831 | wait_event_lock_irq(conf->wait_barrier, |
832 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 832 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
833 | conf->resync_lock, ); | 833 | conf->resync_lock); |
834 | 834 | ||
835 | spin_unlock_irq(&conf->resync_lock); | 835 | spin_unlock_irq(&conf->resync_lock); |
836 | } | 836 | } |
@@ -864,8 +864,7 @@ static void wait_barrier(struct r1conf *conf) | |||
864 | (conf->nr_pending && | 864 | (conf->nr_pending && |
865 | current->bio_list && | 865 | current->bio_list && |
866 | !bio_list_empty(current->bio_list)), | 866 | !bio_list_empty(current->bio_list)), |
867 | conf->resync_lock, | 867 | conf->resync_lock); |
868 | ); | ||
869 | conf->nr_waiting--; | 868 | conf->nr_waiting--; |
870 | } | 869 | } |
871 | conf->nr_pending++; | 870 | conf->nr_pending++; |
@@ -898,10 +897,10 @@ static void freeze_array(struct r1conf *conf) | |||
898 | spin_lock_irq(&conf->resync_lock); | 897 | spin_lock_irq(&conf->resync_lock); |
899 | conf->barrier++; | 898 | conf->barrier++; |
900 | conf->nr_waiting++; | 899 | conf->nr_waiting++; |
901 | wait_event_lock_irq(conf->wait_barrier, | 900 | wait_event_lock_irq_cmd(conf->wait_barrier, |
902 | conf->nr_pending == conf->nr_queued+1, | 901 | conf->nr_pending == conf->nr_queued+1, |
903 | conf->resync_lock, | 902 | conf->resync_lock, |
904 | flush_pending_writes(conf)); | 903 | flush_pending_writes(conf)); |
905 | spin_unlock_irq(&conf->resync_lock); | 904 | spin_unlock_irq(&conf->resync_lock); |
906 | } | 905 | } |
907 | static void unfreeze_array(struct r1conf *conf) | 906 | static void unfreeze_array(struct r1conf *conf) |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index c9acbd71713..64d48249c03 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -952,7 +952,7 @@ static void raise_barrier(struct r10conf *conf, int force) | |||
952 | 952 | ||
953 | /* Wait until no block IO is waiting (unless 'force') */ | 953 | /* Wait until no block IO is waiting (unless 'force') */ |
954 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, | 954 | wait_event_lock_irq(conf->wait_barrier, force || !conf->nr_waiting, |
955 | conf->resync_lock, ); | 955 | conf->resync_lock); |
956 | 956 | ||
957 | /* block any new IO from starting */ | 957 | /* block any new IO from starting */ |
958 | conf->barrier++; | 958 | conf->barrier++; |
@@ -960,7 +960,7 @@ static void raise_barrier(struct r10conf *conf, int force) | |||
960 | /* Now wait for all pending IO to complete */ | 960 | /* Now wait for all pending IO to complete */ |
961 | wait_event_lock_irq(conf->wait_barrier, | 961 | wait_event_lock_irq(conf->wait_barrier, |
962 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, | 962 | !conf->nr_pending && conf->barrier < RESYNC_DEPTH, |
963 | conf->resync_lock, ); | 963 | conf->resync_lock); |
964 | 964 | ||
965 | spin_unlock_irq(&conf->resync_lock); | 965 | spin_unlock_irq(&conf->resync_lock); |
966 | } | 966 | } |
@@ -993,8 +993,7 @@ static void wait_barrier(struct r10conf *conf) | |||
993 | (conf->nr_pending && | 993 | (conf->nr_pending && |
994 | current->bio_list && | 994 | current->bio_list && |
995 | !bio_list_empty(current->bio_list)), | 995 | !bio_list_empty(current->bio_list)), |
996 | conf->resync_lock, | 996 | conf->resync_lock); |
997 | ); | ||
998 | conf->nr_waiting--; | 997 | conf->nr_waiting--; |
999 | } | 998 | } |
1000 | conf->nr_pending++; | 999 | conf->nr_pending++; |
@@ -1027,10 +1026,10 @@ static void freeze_array(struct r10conf *conf) | |||
1027 | spin_lock_irq(&conf->resync_lock); | 1026 | spin_lock_irq(&conf->resync_lock); |
1028 | conf->barrier++; | 1027 | conf->barrier++; |
1029 | conf->nr_waiting++; | 1028 | conf->nr_waiting++; |
1030 | wait_event_lock_irq(conf->wait_barrier, | 1029 | wait_event_lock_irq_cmd(conf->wait_barrier, |
1031 | conf->nr_pending == conf->nr_queued+1, | 1030 | conf->nr_pending == conf->nr_queued+1, |
1032 | conf->resync_lock, | 1031 | conf->resync_lock, |
1033 | flush_pending_writes(conf)); | 1032 | flush_pending_writes(conf)); |
1034 | 1033 | ||
1035 | spin_unlock_irq(&conf->resync_lock); | 1034 | spin_unlock_irq(&conf->resync_lock); |
1036 | } | 1035 | } |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 3380372c039..8d8555bf3e1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -466,7 +466,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
466 | do { | 466 | do { |
467 | wait_event_lock_irq(conf->wait_for_stripe, | 467 | wait_event_lock_irq(conf->wait_for_stripe, |
468 | conf->quiesce == 0 || noquiesce, | 468 | conf->quiesce == 0 || noquiesce, |
469 | conf->device_lock, /* nothing */); | 469 | conf->device_lock); |
470 | sh = __find_stripe(conf, sector, conf->generation - previous); | 470 | sh = __find_stripe(conf, sector, conf->generation - previous); |
471 | if (!sh) { | 471 | if (!sh) { |
472 | if (!conf->inactive_blocked) | 472 | if (!conf->inactive_blocked) |
@@ -480,8 +480,7 @@ get_active_stripe(struct r5conf *conf, sector_t sector, | |||
480 | (atomic_read(&conf->active_stripes) | 480 | (atomic_read(&conf->active_stripes) |
481 | < (conf->max_nr_stripes *3/4) | 481 | < (conf->max_nr_stripes *3/4) |
482 | || !conf->inactive_blocked), | 482 | || !conf->inactive_blocked), |
483 | conf->device_lock, | 483 | conf->device_lock); |
484 | ); | ||
485 | conf->inactive_blocked = 0; | 484 | conf->inactive_blocked = 0; |
486 | } else | 485 | } else |
487 | init_stripe(sh, sector, previous); | 486 | init_stripe(sh, sector, previous); |
@@ -1646,8 +1645,7 @@ static int resize_stripes(struct r5conf *conf, int newsize) | |||
1646 | spin_lock_irq(&conf->device_lock); | 1645 | spin_lock_irq(&conf->device_lock); |
1647 | wait_event_lock_irq(conf->wait_for_stripe, | 1646 | wait_event_lock_irq(conf->wait_for_stripe, |
1648 | !list_empty(&conf->inactive_list), | 1647 | !list_empty(&conf->inactive_list), |
1649 | conf->device_lock, | 1648 | conf->device_lock); |
1650 | ); | ||
1651 | osh = get_free_stripe(conf); | 1649 | osh = get_free_stripe(conf); |
1652 | spin_unlock_irq(&conf->device_lock); | 1650 | spin_unlock_irq(&conf->device_lock); |
1653 | atomic_set(&nsh->count, 1); | 1651 | atomic_set(&nsh->count, 1); |
@@ -4003,7 +4001,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
4003 | spin_lock_irq(&conf->device_lock); | 4001 | spin_lock_irq(&conf->device_lock); |
4004 | wait_event_lock_irq(conf->wait_for_stripe, | 4002 | wait_event_lock_irq(conf->wait_for_stripe, |
4005 | conf->quiesce == 0, | 4003 | conf->quiesce == 0, |
4006 | conf->device_lock, /* nothing */); | 4004 | conf->device_lock); |
4007 | atomic_inc(&conf->active_aligned_reads); | 4005 | atomic_inc(&conf->active_aligned_reads); |
4008 | spin_unlock_irq(&conf->device_lock); | 4006 | spin_unlock_irq(&conf->device_lock); |
4009 | 4007 | ||
@@ -6095,7 +6093,7 @@ static void raid5_quiesce(struct mddev *mddev, int state) | |||
6095 | wait_event_lock_irq(conf->wait_for_stripe, | 6093 | wait_event_lock_irq(conf->wait_for_stripe, |
6096 | atomic_read(&conf->active_stripes) == 0 && | 6094 | atomic_read(&conf->active_stripes) == 0 && |
6097 | atomic_read(&conf->active_aligned_reads) == 0, | 6095 | atomic_read(&conf->active_aligned_reads) == 0, |
6098 | conf->device_lock, /* nothing */); | 6096 | conf->device_lock); |
6099 | conf->quiesce = 1; | 6097 | conf->quiesce = 1; |
6100 | spin_unlock_irq(&conf->device_lock); | 6098 | spin_unlock_irq(&conf->device_lock); |
6101 | /* allow reshape to continue */ | 6099 | /* allow reshape to continue */ |