diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-24 11:42:35 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2011-06-24 11:42:35 -0400 |
| commit | 5220cc9382e11ca955ce946ee6a5bac577bb14ff (patch) | |
| tree | 7949f52a5ca0c7fa74ec8e49ba89a00d0e4114a9 | |
| parent | 726ce0656b99ac6436b590d83613fe8447b4769e (diff) | |
| parent | 155d109b5f52ffd749219b27702462dcd9cf4f8d (diff) | |
Merge branch 'for-linus' of git://git.kernel.dk/linux-block
* 'for-linus' of git://git.kernel.dk/linux-block:
block: add REQ_SECURE to REQ_COMMON_MASK
block: use the passed in @bdev when claiming if partno is zero
block: Add __attribute__((format(printf...) and fix fallout
block: make disk_block_events() properly wait for work cancellation
block: remove non-syncing __disk_block_events() and fold it into disk_block_events()
block: don't use non-syncing event blocking in disk_check_events()
cfq-iosched: fix locking around ioc->ioc_data assignment
| -rw-r--r-- | block/blk-throttle.c | 4 | ||||
| -rw-r--r-- | block/cfq-iosched.c | 16 | ||||
| -rw-r--r-- | block/genhd.c | 79 | ||||
| -rw-r--r-- | fs/block_dev.c | 14 | ||||
| -rw-r--r-- | include/linux/blk_types.h | 2 | ||||
| -rw-r--r-- | include/linux/blktrace_api.h | 3 |
6 files changed, 73 insertions, 45 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index a62be8d0dc1b..3689f833afdc 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c | |||
| @@ -927,7 +927,7 @@ static int throtl_dispatch(struct request_queue *q) | |||
| 927 | 927 | ||
| 928 | bio_list_init(&bio_list_on_stack); | 928 | bio_list_init(&bio_list_on_stack); |
| 929 | 929 | ||
| 930 | throtl_log(td, "dispatch nr_queued=%lu read=%u write=%u", | 930 | throtl_log(td, "dispatch nr_queued=%d read=%u write=%u", |
| 931 | total_nr_queued(td), td->nr_queued[READ], | 931 | total_nr_queued(td), td->nr_queued[READ], |
| 932 | td->nr_queued[WRITE]); | 932 | td->nr_queued[WRITE]); |
| 933 | 933 | ||
| @@ -1204,7 +1204,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop) | |||
| 1204 | } | 1204 | } |
| 1205 | 1205 | ||
| 1206 | queue_bio: | 1206 | queue_bio: |
| 1207 | throtl_log_tg(td, tg, "[%c] bio. bdisp=%u sz=%u bps=%llu" | 1207 | throtl_log_tg(td, tg, "[%c] bio. bdisp=%llu sz=%u bps=%llu" |
| 1208 | " iodisp=%u iops=%u queued=%d/%d", | 1208 | " iodisp=%u iops=%u queued=%d/%d", |
| 1209 | rw == READ ? 'R' : 'W', | 1209 | rw == READ ? 'R' : 'W', |
| 1210 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], | 1210 | tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index 3c7b537bf908..f3799432676d 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
| @@ -988,9 +988,10 @@ static void cfq_group_served(struct cfq_data *cfqd, struct cfq_group *cfqg, | |||
| 988 | 988 | ||
| 989 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, | 989 | cfq_log_cfqg(cfqd, cfqg, "served: vt=%llu min_vt=%llu", cfqg->vdisktime, |
| 990 | st->min_vdisktime); | 990 | st->min_vdisktime); |
| 991 | cfq_log_cfqq(cfqq->cfqd, cfqq, "sl_used=%u disp=%u charge=%u iops=%u" | 991 | cfq_log_cfqq(cfqq->cfqd, cfqq, |
| 992 | " sect=%u", used_sl, cfqq->slice_dispatch, charge, | 992 | "sl_used=%u disp=%u charge=%u iops=%u sect=%lu", |
| 993 | iops_mode(cfqd), cfqq->nr_sectors); | 993 | used_sl, cfqq->slice_dispatch, charge, |
| 994 | iops_mode(cfqd), cfqq->nr_sectors); | ||
| 994 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, | 995 | cfq_blkiocg_update_timeslice_used(&cfqg->blkg, used_sl, |
| 995 | unaccounted_sl); | 996 | unaccounted_sl); |
| 996 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); | 997 | cfq_blkiocg_set_start_empty_time(&cfqg->blkg); |
| @@ -2023,8 +2024,8 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd) | |||
| 2023 | */ | 2024 | */ |
| 2024 | if (sample_valid(cic->ttime_samples) && | 2025 | if (sample_valid(cic->ttime_samples) && |
| 2025 | (cfqq->slice_end - jiffies < cic->ttime_mean)) { | 2026 | (cfqq->slice_end - jiffies < cic->ttime_mean)) { |
| 2026 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d", | 2027 | cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%lu", |
| 2027 | cic->ttime_mean); | 2028 | cic->ttime_mean); |
| 2028 | return; | 2029 | return; |
| 2029 | } | 2030 | } |
| 2030 | 2031 | ||
| @@ -2772,8 +2773,11 @@ static void __cfq_exit_single_io_context(struct cfq_data *cfqd, | |||
| 2772 | smp_wmb(); | 2773 | smp_wmb(); |
| 2773 | cic->key = cfqd_dead_key(cfqd); | 2774 | cic->key = cfqd_dead_key(cfqd); |
| 2774 | 2775 | ||
| 2775 | if (ioc->ioc_data == cic) | 2776 | if (rcu_dereference(ioc->ioc_data) == cic) { |
| 2777 | spin_lock(&ioc->lock); | ||
| 2776 | rcu_assign_pointer(ioc->ioc_data, NULL); | 2778 | rcu_assign_pointer(ioc->ioc_data, NULL); |
| 2779 | spin_unlock(&ioc->lock); | ||
| 2780 | } | ||
| 2777 | 2781 | ||
| 2778 | if (cic->cfqq[BLK_RW_ASYNC]) { | 2782 | if (cic->cfqq[BLK_RW_ASYNC]) { |
| 2779 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); | 2783 | cfq_exit_cfqq(cfqd, cic->cfqq[BLK_RW_ASYNC]); |
diff --git a/block/genhd.c b/block/genhd.c index 95822ae25cfe..3608289c8ecd 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
| @@ -1371,6 +1371,7 @@ struct disk_events { | |||
| 1371 | struct gendisk *disk; /* the associated disk */ | 1371 | struct gendisk *disk; /* the associated disk */ |
| 1372 | spinlock_t lock; | 1372 | spinlock_t lock; |
| 1373 | 1373 | ||
| 1374 | struct mutex block_mutex; /* protects blocking */ | ||
| 1374 | int block; /* event blocking depth */ | 1375 | int block; /* event blocking depth */ |
| 1375 | unsigned int pending; /* events already sent out */ | 1376 | unsigned int pending; /* events already sent out */ |
| 1376 | unsigned int clearing; /* events being cleared */ | 1377 | unsigned int clearing; /* events being cleared */ |
| @@ -1414,22 +1415,44 @@ static unsigned long disk_events_poll_jiffies(struct gendisk *disk) | |||
| 1414 | return msecs_to_jiffies(intv_msecs); | 1415 | return msecs_to_jiffies(intv_msecs); |
| 1415 | } | 1416 | } |
| 1416 | 1417 | ||
| 1417 | static void __disk_block_events(struct gendisk *disk, bool sync) | 1418 | /** |
| 1419 | * disk_block_events - block and flush disk event checking | ||
| 1420 | * @disk: disk to block events for | ||
| 1421 | * | ||
| 1422 | * On return from this function, it is guaranteed that event checking | ||
| 1423 | * isn't in progress and won't happen until unblocked by | ||
| 1424 | * disk_unblock_events(). Events blocking is counted and the actual | ||
| 1425 | * unblocking happens after the matching number of unblocks are done. | ||
| 1426 | * | ||
| 1427 | * Note that this intentionally does not block event checking from | ||
| 1428 | * disk_clear_events(). | ||
| 1429 | * | ||
| 1430 | * CONTEXT: | ||
| 1431 | * Might sleep. | ||
| 1432 | */ | ||
| 1433 | void disk_block_events(struct gendisk *disk) | ||
| 1418 | { | 1434 | { |
| 1419 | struct disk_events *ev = disk->ev; | 1435 | struct disk_events *ev = disk->ev; |
| 1420 | unsigned long flags; | 1436 | unsigned long flags; |
| 1421 | bool cancel; | 1437 | bool cancel; |
| 1422 | 1438 | ||
| 1439 | if (!ev) | ||
| 1440 | return; | ||
| 1441 | |||
| 1442 | /* | ||
| 1443 | * Outer mutex ensures that the first blocker completes canceling | ||
| 1444 | * the event work before further blockers are allowed to finish. | ||
| 1445 | */ | ||
| 1446 | mutex_lock(&ev->block_mutex); | ||
| 1447 | |||
| 1423 | spin_lock_irqsave(&ev->lock, flags); | 1448 | spin_lock_irqsave(&ev->lock, flags); |
| 1424 | cancel = !ev->block++; | 1449 | cancel = !ev->block++; |
| 1425 | spin_unlock_irqrestore(&ev->lock, flags); | 1450 | spin_unlock_irqrestore(&ev->lock, flags); |
| 1426 | 1451 | ||
| 1427 | if (cancel) { | 1452 | if (cancel) |
| 1428 | if (sync) | 1453 | cancel_delayed_work_sync(&disk->ev->dwork); |
| 1429 | cancel_delayed_work_sync(&disk->ev->dwork); | 1454 | |
| 1430 | else | 1455 | mutex_unlock(&ev->block_mutex); |
| 1431 | cancel_delayed_work(&disk->ev->dwork); | ||
| 1432 | } | ||
| 1433 | } | 1456 | } |
| 1434 | 1457 | ||
| 1435 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) | 1458 | static void __disk_unblock_events(struct gendisk *disk, bool check_now) |
| @@ -1461,27 +1484,6 @@ out_unlock: | |||
| 1461 | } | 1484 | } |
| 1462 | 1485 | ||
| 1463 | /** | 1486 | /** |
| 1464 | * disk_block_events - block and flush disk event checking | ||
| 1465 | * @disk: disk to block events for | ||
| 1466 | * | ||
| 1467 | * On return from this function, it is guaranteed that event checking | ||
| 1468 | * isn't in progress and won't happen until unblocked by | ||
| 1469 | * disk_unblock_events(). Events blocking is counted and the actual | ||
| 1470 | * unblocking happens after the matching number of unblocks are done. | ||
| 1471 | * | ||
| 1472 | * Note that this intentionally does not block event checking from | ||
| 1473 | * disk_clear_events(). | ||
| 1474 | * | ||
| 1475 | * CONTEXT: | ||
| 1476 | * Might sleep. | ||
| 1477 | */ | ||
| 1478 | void disk_block_events(struct gendisk *disk) | ||
| 1479 | { | ||
| 1480 | if (disk->ev) | ||
| 1481 | __disk_block_events(disk, true); | ||
| 1482 | } | ||
| 1483 | |||
| 1484 | /** | ||
| 1485 | * disk_unblock_events - unblock disk event checking | 1487 | * disk_unblock_events - unblock disk event checking |
| 1486 | * @disk: disk to unblock events for | 1488 | * @disk: disk to unblock events for |
| 1487 | * | 1489 | * |
| @@ -1508,10 +1510,18 @@ void disk_unblock_events(struct gendisk *disk) | |||
| 1508 | */ | 1510 | */ |
| 1509 | void disk_check_events(struct gendisk *disk) | 1511 | void disk_check_events(struct gendisk *disk) |
| 1510 | { | 1512 | { |
| 1511 | if (disk->ev) { | 1513 | struct disk_events *ev = disk->ev; |
| 1512 | __disk_block_events(disk, false); | 1514 | unsigned long flags; |
| 1513 | __disk_unblock_events(disk, true); | 1515 | |
| 1516 | if (!ev) | ||
| 1517 | return; | ||
| 1518 | |||
| 1519 | spin_lock_irqsave(&ev->lock, flags); | ||
| 1520 | if (!ev->block) { | ||
| 1521 | cancel_delayed_work(&ev->dwork); | ||
| 1522 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | ||
| 1514 | } | 1523 | } |
| 1524 | spin_unlock_irqrestore(&ev->lock, flags); | ||
| 1515 | } | 1525 | } |
| 1516 | EXPORT_SYMBOL_GPL(disk_check_events); | 1526 | EXPORT_SYMBOL_GPL(disk_check_events); |
| 1517 | 1527 | ||
| @@ -1546,7 +1556,7 @@ unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask) | |||
| 1546 | spin_unlock_irq(&ev->lock); | 1556 | spin_unlock_irq(&ev->lock); |
| 1547 | 1557 | ||
| 1548 | /* uncondtionally schedule event check and wait for it to finish */ | 1558 | /* uncondtionally schedule event check and wait for it to finish */ |
| 1549 | __disk_block_events(disk, true); | 1559 | disk_block_events(disk); |
| 1550 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); | 1560 | queue_delayed_work(system_nrt_wq, &ev->dwork, 0); |
| 1551 | flush_delayed_work(&ev->dwork); | 1561 | flush_delayed_work(&ev->dwork); |
| 1552 | __disk_unblock_events(disk, false); | 1562 | __disk_unblock_events(disk, false); |
| @@ -1664,7 +1674,7 @@ static ssize_t disk_events_poll_msecs_store(struct device *dev, | |||
| 1664 | if (intv < 0 && intv != -1) | 1674 | if (intv < 0 && intv != -1) |
| 1665 | return -EINVAL; | 1675 | return -EINVAL; |
| 1666 | 1676 | ||
| 1667 | __disk_block_events(disk, true); | 1677 | disk_block_events(disk); |
| 1668 | disk->ev->poll_msecs = intv; | 1678 | disk->ev->poll_msecs = intv; |
| 1669 | __disk_unblock_events(disk, true); | 1679 | __disk_unblock_events(disk, true); |
| 1670 | 1680 | ||
| @@ -1750,6 +1760,7 @@ static void disk_add_events(struct gendisk *disk) | |||
| 1750 | INIT_LIST_HEAD(&ev->node); | 1760 | INIT_LIST_HEAD(&ev->node); |
| 1751 | ev->disk = disk; | 1761 | ev->disk = disk; |
| 1752 | spin_lock_init(&ev->lock); | 1762 | spin_lock_init(&ev->lock); |
| 1763 | mutex_init(&ev->block_mutex); | ||
| 1753 | ev->block = 1; | 1764 | ev->block = 1; |
| 1754 | ev->poll_msecs = -1; | 1765 | ev->poll_msecs = -1; |
| 1755 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); | 1766 | INIT_DELAYED_WORK(&ev->dwork, disk_events_workfn); |
| @@ -1770,7 +1781,7 @@ static void disk_del_events(struct gendisk *disk) | |||
| 1770 | if (!disk->ev) | 1781 | if (!disk->ev) |
| 1771 | return; | 1782 | return; |
| 1772 | 1783 | ||
| 1773 | __disk_block_events(disk, true); | 1784 | disk_block_events(disk); |
| 1774 | 1785 | ||
| 1775 | mutex_lock(&disk_events_mutex); | 1786 | mutex_lock(&disk_events_mutex); |
| 1776 | list_del_init(&disk->ev->node); | 1787 | list_del_init(&disk->ev->node); |
diff --git a/fs/block_dev.c b/fs/block_dev.c index 1a2421f908f0..610e8e0b04b8 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c | |||
| @@ -762,7 +762,19 @@ static struct block_device *bd_start_claiming(struct block_device *bdev, | |||
| 762 | if (!disk) | 762 | if (!disk) |
| 763 | return ERR_PTR(-ENXIO); | 763 | return ERR_PTR(-ENXIO); |
| 764 | 764 | ||
| 765 | whole = bdget_disk(disk, 0); | 765 | /* |
| 766 | * Normally, @bdev should equal what's returned from bdget_disk() | ||
| 767 | * if partno is 0; however, some drivers (floppy) use multiple | ||
| 768 | * bdev's for the same physical device and @bdev may be one of the | ||
| 769 | * aliases. Keep @bdev if partno is 0. This means claimer | ||
| 770 | * tracking is broken for those devices but it has always been that | ||
| 771 | * way. | ||
| 772 | */ | ||
| 773 | if (partno) | ||
| 774 | whole = bdget_disk(disk, 0); | ||
| 775 | else | ||
| 776 | whole = bdgrab(bdev); | ||
| 777 | |||
| 766 | module_put(disk->fops->owner); | 778 | module_put(disk->fops->owner); |
| 767 | put_disk(disk); | 779 | put_disk(disk); |
| 768 | if (!whole) | 780 | if (!whole) |
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 2a7cea53ca0d..6395692b2e7a 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h | |||
| @@ -167,7 +167,7 @@ enum rq_flag_bits { | |||
| 167 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) | 167 | (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) |
| 168 | #define REQ_COMMON_MASK \ | 168 | #define REQ_COMMON_MASK \ |
| 169 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ | 169 | (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \ |
| 170 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) | 170 | REQ_NOIDLE | REQ_FLUSH | REQ_FUA | REQ_SECURE) |
| 171 | #define REQ_CLONE_MASK REQ_COMMON_MASK | 171 | #define REQ_CLONE_MASK REQ_COMMON_MASK |
| 172 | 172 | ||
| 173 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) | 173 | #define REQ_RAHEAD (1 << __REQ_RAHEAD) |
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h index b22fb0d3db0f..8c7c2de7631a 100644 --- a/include/linux/blktrace_api.h +++ b/include/linux/blktrace_api.h | |||
| @@ -169,7 +169,8 @@ extern void blk_trace_shutdown(struct request_queue *); | |||
| 169 | extern int do_blk_trace_setup(struct request_queue *q, char *name, | 169 | extern int do_blk_trace_setup(struct request_queue *q, char *name, |
| 170 | dev_t dev, struct block_device *bdev, | 170 | dev_t dev, struct block_device *bdev, |
| 171 | struct blk_user_trace_setup *buts); | 171 | struct blk_user_trace_setup *buts); |
| 172 | extern void __trace_note_message(struct blk_trace *, const char *fmt, ...); | 172 | extern __attribute__((format(printf, 2, 3))) |
| 173 | void __trace_note_message(struct blk_trace *, const char *fmt, ...); | ||
| 173 | 174 | ||
| 174 | /** | 175 | /** |
| 175 | * blk_add_trace_msg - Add a (simple) message to the blktrace stream | 176 | * blk_add_trace_msg - Add a (simple) message to the blktrace stream |
