diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 12:06:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 12:06:47 -0400 |
commit | eff0d13f3823f35d70228cd151d2a2c89288ff32 (patch) | |
tree | 55bff4dbcc43c4b0f38509ac4de585c0c457980c /drivers/md | |
parent | 8cf1a3fce0b95050b63d451c9d561da0da2aa4d6 (diff) | |
parent | 10af8138eb6d09d6a768e43ef1aa9b2f16b8c7be (diff) |
Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block
Pull block driver changes from Jens Axboe:
- Making the plugging support for drivers a bit more sane from Neil.
This supersedes the plugging change from Shaohua as well.
- The usual round of drbd updates.
- Using a tail add instead of a head add in the request completion for
ndb, making us find the most completed request more quickly.
- A few floppy changes, getting rid of a duplicated flag and also
running the floppy init async (since it takes forever in boot terms)
from Andi.
* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
floppy: remove duplicated flag FD_RAW_NEED_DISK
blk: pass from_schedule to non-request unplug functions.
block: stack unplug
blk: centralize non-request unplug handling.
md: remove plug_cnt feature of plugging.
block/nbd: micro-optimization in nbd request completion
drbd: announce FLUSH/FUA capability to upper layers
drbd: fix max_bio_size to be unsigned
drbd: flush drbd work queue before invalidate/invalidate remote
drbd: fix potential access after free
drbd: call local-io-error handler early
drbd: do not reset rs_pending_cnt too early
drbd: reset congestion information before reporting it in /proc/drbd
drbd: report congestion if we are waiting for some userland callback
drbd: differentiate between normal and forced detach
drbd: cleanup, remove two unused global flags
floppy: Run floppy initialization asynchronous
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/md.c | 59 | ||||
-rw-r--r-- | drivers/md/md.h | 11 | ||||
-rw-r--r-- | drivers/md/raid1.c | 3 | ||||
-rw-r--r-- | drivers/md/raid10.c | 3 | ||||
-rw-r--r-- | drivers/md/raid5.c | 5 |
5 files changed, 16 insertions, 65 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index f6c46109b071..fcd098794d37 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) | |||
498 | } | 498 | } |
499 | EXPORT_SYMBOL(md_flush_request); | 499 | EXPORT_SYMBOL(md_flush_request); |
500 | 500 | ||
501 | /* Support for plugging. | 501 | void md_unplug(struct blk_plug_cb *cb, bool from_schedule) |
502 | * This mirrors the plugging support in request_queue, but does not | ||
503 | * require having a whole queue or request structures. | ||
504 | * We allocate an md_plug_cb for each md device and each thread it gets | ||
505 | * plugged on. This links tot the private plug_handle structure in the | ||
506 | * personality data where we keep a count of the number of outstanding | ||
507 | * plugs so other code can see if a plug is active. | ||
508 | */ | ||
509 | struct md_plug_cb { | ||
510 | struct blk_plug_cb cb; | ||
511 | struct mddev *mddev; | ||
512 | }; | ||
513 | |||
514 | static void plugger_unplug(struct blk_plug_cb *cb) | ||
515 | { | 502 | { |
516 | struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); | 503 | struct mddev *mddev = cb->data; |
517 | if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) | 504 | md_wakeup_thread(mddev->thread); |
518 | md_wakeup_thread(mdcb->mddev->thread); | 505 | kfree(cb); |
519 | kfree(mdcb); | ||
520 | } | ||
521 | |||
522 | /* Check that an unplug wakeup will come shortly. | ||
523 | * If not, wakeup the md thread immediately | ||
524 | */ | ||
525 | int mddev_check_plugged(struct mddev *mddev) | ||
526 | { | ||
527 | struct blk_plug *plug = current->plug; | ||
528 | struct md_plug_cb *mdcb; | ||
529 | |||
530 | if (!plug) | ||
531 | return 0; | ||
532 | |||
533 | list_for_each_entry(mdcb, &plug->cb_list, cb.list) { | ||
534 | if (mdcb->cb.callback == plugger_unplug && | ||
535 | mdcb->mddev == mddev) { | ||
536 | /* Already on the list, move to top */ | ||
537 | if (mdcb != list_first_entry(&plug->cb_list, | ||
538 | struct md_plug_cb, | ||
539 | cb.list)) | ||
540 | list_move(&mdcb->cb.list, &plug->cb_list); | ||
541 | return 1; | ||
542 | } | ||
543 | } | ||
544 | /* Not currently on the callback list */ | ||
545 | mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); | ||
546 | if (!mdcb) | ||
547 | return 0; | ||
548 | |||
549 | mdcb->mddev = mddev; | ||
550 | mdcb->cb.callback = plugger_unplug; | ||
551 | atomic_inc(&mddev->plug_cnt); | ||
552 | list_add(&mdcb->cb.list, &plug->cb_list); | ||
553 | return 1; | ||
554 | } | 506 | } |
555 | EXPORT_SYMBOL_GPL(mddev_check_plugged); | 507 | EXPORT_SYMBOL(md_unplug); |
556 | 508 | ||
557 | static inline struct mddev *mddev_get(struct mddev *mddev) | 509 | static inline struct mddev *mddev_get(struct mddev *mddev) |
558 | { | 510 | { |
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) | |||
602 | atomic_set(&mddev->active, 1); | 554 | atomic_set(&mddev->active, 1); |
603 | atomic_set(&mddev->openers, 0); | 555 | atomic_set(&mddev->openers, 0); |
604 | atomic_set(&mddev->active_io, 0); | 556 | atomic_set(&mddev->active_io, 0); |
605 | atomic_set(&mddev->plug_cnt, 0); | ||
606 | spin_lock_init(&mddev->write_lock); | 557 | spin_lock_init(&mddev->write_lock); |
607 | atomic_set(&mddev->flush_pending, 0); | 558 | atomic_set(&mddev->flush_pending, 0); |
608 | init_waitqueue_head(&mddev->sb_wait); | 559 | init_waitqueue_head(&mddev->sb_wait); |
diff --git a/drivers/md/md.h b/drivers/md/md.h index 7b4a3c318cae..f385b038589d 100644 --- a/drivers/md/md.h +++ b/drivers/md/md.h | |||
@@ -266,9 +266,6 @@ struct mddev { | |||
266 | int new_chunk_sectors; | 266 | int new_chunk_sectors; |
267 | int reshape_backwards; | 267 | int reshape_backwards; |
268 | 268 | ||
269 | atomic_t plug_cnt; /* If device is expecting | ||
270 | * more bios soon. | ||
271 | */ | ||
272 | struct md_thread *thread; /* management thread */ | 269 | struct md_thread *thread; /* management thread */ |
273 | struct md_thread *sync_thread; /* doing resync or reconstruct */ | 270 | struct md_thread *sync_thread; /* doing resync or reconstruct */ |
274 | sector_t curr_resync; /* last block scheduled */ | 271 | sector_t curr_resync; /* last block scheduled */ |
@@ -630,6 +627,12 @@ extern struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, | |||
630 | struct mddev *mddev); | 627 | struct mddev *mddev); |
631 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, | 628 | extern struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, |
632 | struct mddev *mddev); | 629 | struct mddev *mddev); |
633 | extern int mddev_check_plugged(struct mddev *mddev); | ||
634 | extern void md_trim_bio(struct bio *bio, int offset, int size); | 630 | extern void md_trim_bio(struct bio *bio, int offset, int size); |
631 | |||
632 | extern void md_unplug(struct blk_plug_cb *cb, bool from_schedule); | ||
633 | static inline int mddev_check_plugged(struct mddev *mddev) | ||
634 | { | ||
635 | return !!blk_check_plugged(md_unplug, mddev, | ||
636 | sizeof(struct blk_plug_cb)); | ||
637 | } | ||
635 | #endif /* _MD_MD_H */ | 638 | #endif /* _MD_MD_H */ |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 197f62681db5..9f7f8bee8442 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -2247,8 +2247,7 @@ static void raid1d(struct mddev *mddev) | |||
2247 | blk_start_plug(&plug); | 2247 | blk_start_plug(&plug); |
2248 | for (;;) { | 2248 | for (;;) { |
2249 | 2249 | ||
2250 | if (atomic_read(&mddev->plug_cnt) == 0) | 2250 | flush_pending_writes(conf); |
2251 | flush_pending_writes(conf); | ||
2252 | 2251 | ||
2253 | spin_lock_irqsave(&conf->device_lock, flags); | 2252 | spin_lock_irqsave(&conf->device_lock, flags); |
2254 | if (list_empty(head)) { | 2253 | if (list_empty(head)) { |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index e2549deab7c3..de5ed6fd8806 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -2680,8 +2680,7 @@ static void raid10d(struct mddev *mddev) | |||
2680 | blk_start_plug(&plug); | 2680 | blk_start_plug(&plug); |
2681 | for (;;) { | 2681 | for (;;) { |
2682 | 2682 | ||
2683 | if (atomic_read(&mddev->plug_cnt) == 0) | 2683 | flush_pending_writes(conf); |
2684 | flush_pending_writes(conf); | ||
2685 | 2684 | ||
2686 | spin_lock_irqsave(&conf->device_lock, flags); | 2685 | spin_lock_irqsave(&conf->device_lock, flags); |
2687 | if (list_empty(head)) { | 2686 | if (list_empty(head)) { |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 259f519814ca..87a2d0bdedd1 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -4562,7 +4562,7 @@ static void raid5d(struct mddev *mddev) | |||
4562 | while (1) { | 4562 | while (1) { |
4563 | struct bio *bio; | 4563 | struct bio *bio; |
4564 | 4564 | ||
4565 | if (atomic_read(&mddev->plug_cnt) == 0 && | 4565 | if ( |
4566 | !list_empty(&conf->bitmap_list)) { | 4566 | !list_empty(&conf->bitmap_list)) { |
4567 | /* Now is a good time to flush some bitmap updates */ | 4567 | /* Now is a good time to flush some bitmap updates */ |
4568 | conf->seq_flush++; | 4568 | conf->seq_flush++; |
@@ -4572,8 +4572,7 @@ static void raid5d(struct mddev *mddev) | |||
4572 | conf->seq_write = conf->seq_flush; | 4572 | conf->seq_write = conf->seq_flush; |
4573 | activate_bit_delay(conf); | 4573 | activate_bit_delay(conf); |
4574 | } | 4574 | } |
4575 | if (atomic_read(&mddev->plug_cnt) == 0) | 4575 | raid5_activate_delayed(conf); |
4576 | raid5_activate_delayed(conf); | ||
4577 | 4576 | ||
4578 | while ((bio = remove_bio_from_retry(conf))) { | 4577 | while ((bio = remove_bio_from_retry(conf))) { |
4579 | int ok; | 4578 | int ok; |