diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 12:06:47 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-08-01 12:06:47 -0400 |
commit | eff0d13f3823f35d70228cd151d2a2c89288ff32 (patch) | |
tree | 55bff4dbcc43c4b0f38509ac4de585c0c457980c /drivers/md/md.c | |
parent | 8cf1a3fce0b95050b63d451c9d561da0da2aa4d6 (diff) | |
parent | 10af8138eb6d09d6a768e43ef1aa9b2f16b8c7be (diff) |
Merge branch 'for-3.6/drivers' of git://git.kernel.dk/linux-block
Pull block driver changes from Jens Axboe:
- Making the plugging support for drivers a bit more sane from Neil.
This supersedes the plugging change from Shaohua as well.
- The usual round of drbd updates.
- Using a tail add instead of a head add in the request completion for
ndb, making us find the most completed request more quickly.
- A few floppy changes, getting rid of a duplicated flag and also
running the floppy init async (since it takes forever in boot terms)
from Andi.
* 'for-3.6/drivers' of git://git.kernel.dk/linux-block:
floppy: remove duplicated flag FD_RAW_NEED_DISK
blk: pass from_schedule to non-request unplug functions.
block: stack unplug
blk: centralize non-request unplug handling.
md: remove plug_cnt feature of plugging.
block/nbd: micro-optimization in nbd request completion
drbd: announce FLUSH/FUA capability to upper layers
drbd: fix max_bio_size to be unsigned
drbd: flush drbd work queue before invalidate/invalidate remote
drbd: fix potential access after free
drbd: call local-io-error handler early
drbd: do not reset rs_pending_cnt too early
drbd: reset congestion information before reporting it in /proc/drbd
drbd: report congestion if we are waiting for some userland callback
drbd: differentiate between normal and forced detach
drbd: cleanup, remove two unused global flags
floppy: Run floppy initialization asynchronous
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r-- | drivers/md/md.c | 59 |
1 files changed, 5 insertions, 54 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index f6c46109b071..fcd098794d37 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -498,61 +498,13 @@ void md_flush_request(struct mddev *mddev, struct bio *bio) | |||
498 | } | 498 | } |
499 | EXPORT_SYMBOL(md_flush_request); | 499 | EXPORT_SYMBOL(md_flush_request); |
500 | 500 | ||
501 | /* Support for plugging. | 501 | void md_unplug(struct blk_plug_cb *cb, bool from_schedule) |
502 | * This mirrors the plugging support in request_queue, but does not | ||
503 | * require having a whole queue or request structures. | ||
504 | * We allocate an md_plug_cb for each md device and each thread it gets | ||
505 | * plugged on. This links tot the private plug_handle structure in the | ||
506 | * personality data where we keep a count of the number of outstanding | ||
507 | * plugs so other code can see if a plug is active. | ||
508 | */ | ||
509 | struct md_plug_cb { | ||
510 | struct blk_plug_cb cb; | ||
511 | struct mddev *mddev; | ||
512 | }; | ||
513 | |||
514 | static void plugger_unplug(struct blk_plug_cb *cb) | ||
515 | { | 502 | { |
516 | struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); | 503 | struct mddev *mddev = cb->data; |
517 | if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) | 504 | md_wakeup_thread(mddev->thread); |
518 | md_wakeup_thread(mdcb->mddev->thread); | 505 | kfree(cb); |
519 | kfree(mdcb); | ||
520 | } | ||
521 | |||
522 | /* Check that an unplug wakeup will come shortly. | ||
523 | * If not, wakeup the md thread immediately | ||
524 | */ | ||
525 | int mddev_check_plugged(struct mddev *mddev) | ||
526 | { | ||
527 | struct blk_plug *plug = current->plug; | ||
528 | struct md_plug_cb *mdcb; | ||
529 | |||
530 | if (!plug) | ||
531 | return 0; | ||
532 | |||
533 | list_for_each_entry(mdcb, &plug->cb_list, cb.list) { | ||
534 | if (mdcb->cb.callback == plugger_unplug && | ||
535 | mdcb->mddev == mddev) { | ||
536 | /* Already on the list, move to top */ | ||
537 | if (mdcb != list_first_entry(&plug->cb_list, | ||
538 | struct md_plug_cb, | ||
539 | cb.list)) | ||
540 | list_move(&mdcb->cb.list, &plug->cb_list); | ||
541 | return 1; | ||
542 | } | ||
543 | } | ||
544 | /* Not currently on the callback list */ | ||
545 | mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); | ||
546 | if (!mdcb) | ||
547 | return 0; | ||
548 | |||
549 | mdcb->mddev = mddev; | ||
550 | mdcb->cb.callback = plugger_unplug; | ||
551 | atomic_inc(&mddev->plug_cnt); | ||
552 | list_add(&mdcb->cb.list, &plug->cb_list); | ||
553 | return 1; | ||
554 | } | 506 | } |
555 | EXPORT_SYMBOL_GPL(mddev_check_plugged); | 507 | EXPORT_SYMBOL(md_unplug); |
556 | 508 | ||
557 | static inline struct mddev *mddev_get(struct mddev *mddev) | 509 | static inline struct mddev *mddev_get(struct mddev *mddev) |
558 | { | 510 | { |
@@ -602,7 +554,6 @@ void mddev_init(struct mddev *mddev) | |||
602 | atomic_set(&mddev->active, 1); | 554 | atomic_set(&mddev->active, 1); |
603 | atomic_set(&mddev->openers, 0); | 555 | atomic_set(&mddev->openers, 0); |
604 | atomic_set(&mddev->active_io, 0); | 556 | atomic_set(&mddev->active_io, 0); |
605 | atomic_set(&mddev->plug_cnt, 0); | ||
606 | spin_lock_init(&mddev->write_lock); | 557 | spin_lock_init(&mddev->write_lock); |
607 | atomic_set(&mddev->flush_pending, 0); | 558 | atomic_set(&mddev->flush_pending, 0); |
608 | init_waitqueue_head(&mddev->sb_wait); | 559 | init_waitqueue_head(&mddev->sb_wait); |