diff options
Diffstat (limited to 'drivers/md/md.c')
| -rw-r--r-- | drivers/md/md.c | 88 |
1 files changed, 47 insertions, 41 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c index b12b3776c0c0..7d6f7f18a920 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
| @@ -447,48 +447,59 @@ EXPORT_SYMBOL(md_flush_request); | |||
| 447 | 447 | ||
| 448 | /* Support for plugging. | 448 | /* Support for plugging. |
| 449 | * This mirrors the plugging support in request_queue, but does not | 449 | * This mirrors the plugging support in request_queue, but does not |
| 450 | * require having a whole queue | 450 | * require having a whole queue or request structures. |
| 451 | * We allocate an md_plug_cb for each md device and each thread it gets | ||
| 452 | * plugged on. This links tot the private plug_handle structure in the | ||
| 453 | * personality data where we keep a count of the number of outstanding | ||
| 454 | * plugs so other code can see if a plug is active. | ||
| 451 | */ | 455 | */ |
| 452 | static void plugger_work(struct work_struct *work) | 456 | struct md_plug_cb { |
| 453 | { | 457 | struct blk_plug_cb cb; |
| 454 | struct plug_handle *plug = | 458 | mddev_t *mddev; |
| 455 | container_of(work, struct plug_handle, unplug_work); | 459 | }; |
| 456 | plug->unplug_fn(plug); | ||
| 457 | } | ||
| 458 | static void plugger_timeout(unsigned long data) | ||
| 459 | { | ||
| 460 | struct plug_handle *plug = (void *)data; | ||
| 461 | kblockd_schedule_work(NULL, &plug->unplug_work); | ||
| 462 | } | ||
| 463 | void plugger_init(struct plug_handle *plug, | ||
| 464 | void (*unplug_fn)(struct plug_handle *)) | ||
| 465 | { | ||
| 466 | plug->unplug_flag = 0; | ||
| 467 | plug->unplug_fn = unplug_fn; | ||
| 468 | init_timer(&plug->unplug_timer); | ||
| 469 | plug->unplug_timer.function = plugger_timeout; | ||
| 470 | plug->unplug_timer.data = (unsigned long)plug; | ||
| 471 | INIT_WORK(&plug->unplug_work, plugger_work); | ||
| 472 | } | ||
| 473 | EXPORT_SYMBOL_GPL(plugger_init); | ||
| 474 | 460 | ||
| 475 | void plugger_set_plug(struct plug_handle *plug) | 461 | static void plugger_unplug(struct blk_plug_cb *cb) |
| 476 | { | 462 | { |
| 477 | if (!test_and_set_bit(PLUGGED_FLAG, &plug->unplug_flag)) | 463 | struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); |
| 478 | mod_timer(&plug->unplug_timer, jiffies + msecs_to_jiffies(3)+1); | 464 | if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) |
| 465 | md_wakeup_thread(mdcb->mddev->thread); | ||
| 466 | kfree(mdcb); | ||
| 479 | } | 467 | } |
| 480 | EXPORT_SYMBOL_GPL(plugger_set_plug); | ||
| 481 | 468 | ||
| 482 | int plugger_remove_plug(struct plug_handle *plug) | 469 | /* Check that an unplug wakeup will come shortly. |
| 470 | * If not, wakeup the md thread immediately | ||
| 471 | */ | ||
| 472 | int mddev_check_plugged(mddev_t *mddev) | ||
| 483 | { | 473 | { |
| 484 | if (test_and_clear_bit(PLUGGED_FLAG, &plug->unplug_flag)) { | 474 | struct blk_plug *plug = current->plug; |
| 485 | del_timer(&plug->unplug_timer); | 475 | struct md_plug_cb *mdcb; |
| 486 | return 1; | 476 | |
| 487 | } else | 477 | if (!plug) |
| 488 | return 0; | 478 | return 0; |
| 489 | } | ||
| 490 | EXPORT_SYMBOL_GPL(plugger_remove_plug); | ||
| 491 | 479 | ||
| 480 | list_for_each_entry(mdcb, &plug->cb_list, cb.list) { | ||
| 481 | if (mdcb->cb.callback == plugger_unplug && | ||
| 482 | mdcb->mddev == mddev) { | ||
| 483 | /* Already on the list, move to top */ | ||
| 484 | if (mdcb != list_first_entry(&plug->cb_list, | ||
| 485 | struct md_plug_cb, | ||
| 486 | cb.list)) | ||
| 487 | list_move(&mdcb->cb.list, &plug->cb_list); | ||
| 488 | return 1; | ||
| 489 | } | ||
| 490 | } | ||
| 491 | /* Not currently on the callback list */ | ||
| 492 | mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); | ||
| 493 | if (!mdcb) | ||
| 494 | return 0; | ||
| 495 | |||
| 496 | mdcb->mddev = mddev; | ||
| 497 | mdcb->cb.callback = plugger_unplug; | ||
| 498 | atomic_inc(&mddev->plug_cnt); | ||
| 499 | list_add(&mdcb->cb.list, &plug->cb_list); | ||
| 500 | return 1; | ||
| 501 | } | ||
| 502 | EXPORT_SYMBOL_GPL(mddev_check_plugged); | ||
| 492 | 503 | ||
| 493 | static inline mddev_t *mddev_get(mddev_t *mddev) | 504 | static inline mddev_t *mddev_get(mddev_t *mddev) |
| 494 | { | 505 | { |
| @@ -538,6 +549,7 @@ void mddev_init(mddev_t *mddev) | |||
| 538 | atomic_set(&mddev->active, 1); | 549 | atomic_set(&mddev->active, 1); |
| 539 | atomic_set(&mddev->openers, 0); | 550 | atomic_set(&mddev->openers, 0); |
| 540 | atomic_set(&mddev->active_io, 0); | 551 | atomic_set(&mddev->active_io, 0); |
| 552 | atomic_set(&mddev->plug_cnt, 0); | ||
| 541 | spin_lock_init(&mddev->write_lock); | 553 | spin_lock_init(&mddev->write_lock); |
| 542 | atomic_set(&mddev->flush_pending, 0); | 554 | atomic_set(&mddev->flush_pending, 0); |
| 543 | init_waitqueue_head(&mddev->sb_wait); | 555 | init_waitqueue_head(&mddev->sb_wait); |
| @@ -3158,6 +3170,7 @@ level_store(mddev_t *mddev, const char *buf, size_t len) | |||
| 3158 | mddev->layout = mddev->new_layout; | 3170 | mddev->layout = mddev->new_layout; |
| 3159 | mddev->chunk_sectors = mddev->new_chunk_sectors; | 3171 | mddev->chunk_sectors = mddev->new_chunk_sectors; |
| 3160 | mddev->delta_disks = 0; | 3172 | mddev->delta_disks = 0; |
| 3173 | mddev->degraded = 0; | ||
| 3161 | if (mddev->pers->sync_request == NULL) { | 3174 | if (mddev->pers->sync_request == NULL) { |
| 3162 | /* this is now an array without redundancy, so | 3175 | /* this is now an array without redundancy, so |
| 3163 | * it must always be in_sync | 3176 | * it must always be in_sync |
| @@ -4723,7 +4736,6 @@ static void md_clean(mddev_t *mddev) | |||
| 4723 | mddev->bitmap_info.chunksize = 0; | 4736 | mddev->bitmap_info.chunksize = 0; |
| 4724 | mddev->bitmap_info.daemon_sleep = 0; | 4737 | mddev->bitmap_info.daemon_sleep = 0; |
| 4725 | mddev->bitmap_info.max_write_behind = 0; | 4738 | mddev->bitmap_info.max_write_behind = 0; |
| 4726 | mddev->plug = NULL; | ||
| 4727 | } | 4739 | } |
| 4728 | 4740 | ||
| 4729 | static void __md_stop_writes(mddev_t *mddev) | 4741 | static void __md_stop_writes(mddev_t *mddev) |
| @@ -6688,12 +6700,6 @@ int md_allow_write(mddev_t *mddev) | |||
| 6688 | } | 6700 | } |
| 6689 | EXPORT_SYMBOL_GPL(md_allow_write); | 6701 | EXPORT_SYMBOL_GPL(md_allow_write); |
| 6690 | 6702 | ||
| 6691 | void md_unplug(mddev_t *mddev) | ||
| 6692 | { | ||
| 6693 | if (mddev->plug) | ||
| 6694 | mddev->plug->unplug_fn(mddev->plug); | ||
| 6695 | } | ||
| 6696 | |||
| 6697 | #define SYNC_MARKS 10 | 6703 | #define SYNC_MARKS 10 |
| 6698 | #define SYNC_MARK_STEP (3*HZ) | 6704 | #define SYNC_MARK_STEP (3*HZ) |
| 6699 | void md_do_sync(mddev_t *mddev) | 6705 | void md_do_sync(mddev_t *mddev) |
