diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:00:32 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-22 20:00:32 -0400 |
commit | e9dd2b6837e26fe202708cce5ea4bb4ee3e3482e (patch) | |
tree | f42fd892495bfc4cbb740d06b016d267c9c42d00 /block/blk-core.c | |
parent | 4f3a29dadaf999a273f1e7fe2476595d0283eef3 (diff) | |
parent | b4627321e18582dcbdeb45d77df29d3177107c65 (diff) |
Merge branch 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block
* 'for-2.6.37/core' of git://git.kernel.dk/linux-2.6-block: (39 commits)
cfq-iosched: Fix a gcc 4.5 warning and put some comments
block: Turn bvec_k{un,}map_irq() into static inline functions
block: fix accounting bug on cross partition merges
block: Make the integrity mapped property a bio flag
block: Fix double free in blk_integrity_unregister
block: Ensure physical block size is unsigned int
blkio-throttle: Fix possible multiplication overflow in iops calculations
blkio-throttle: limit max iops value to UINT_MAX
blkio-throttle: There is no need to convert jiffies to milli seconds
blkio-throttle: Fix link failure failure on i386
blkio: Recalculate the throttled bio dispatch time upon throttle limit change
blkio: Add root group to td->tg_list
blkio: deletion of a cgroup was causes oops
blkio: Do not export throttle files if CONFIG_BLK_DEV_THROTTLING=n
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: revert bad fix for memory hotplug causing bounces
Fix compile error in blk-exec.c for !CONFIG_DETECT_HUNG_TASK
block: set the bounce_pfn to the actual DMA limit rather than to max memory
block: Prevent hang_check firing during long I/O
cfq: improve fsync performance for small files
...
Fix up trivial conflicts due to __rcu sparse annotation in include/linux/genhd.h
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 53 |
1 files changed, 43 insertions, 10 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 32a1c123dfb3..500eb859886e 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -64,13 +64,15 @@ static void drive_stat_acct(struct request *rq, int new_io) | |||
64 | return; | 64 | return; |
65 | 65 | ||
66 | cpu = part_stat_lock(); | 66 | cpu = part_stat_lock(); |
67 | part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | ||
68 | 67 | ||
69 | if (!new_io) | 68 | if (!new_io) { |
69 | part = rq->part; | ||
70 | part_stat_inc(cpu, part, merges[rw]); | 70 | part_stat_inc(cpu, part, merges[rw]); |
71 | else { | 71 | } else { |
72 | part = disk_map_sector_rcu(rq->rq_disk, blk_rq_pos(rq)); | ||
72 | part_round_stats(cpu, part); | 73 | part_round_stats(cpu, part); |
73 | part_inc_in_flight(part, rw); | 74 | part_inc_in_flight(part, rw); |
75 | rq->part = part; | ||
74 | } | 76 | } |
75 | 77 | ||
76 | part_stat_unlock(); | 78 | part_stat_unlock(); |
@@ -128,6 +130,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) | |||
128 | rq->ref_count = 1; | 130 | rq->ref_count = 1; |
129 | rq->start_time = jiffies; | 131 | rq->start_time = jiffies; |
130 | set_start_time_ns(rq); | 132 | set_start_time_ns(rq); |
133 | rq->part = NULL; | ||
131 | } | 134 | } |
132 | EXPORT_SYMBOL(blk_rq_init); | 135 | EXPORT_SYMBOL(blk_rq_init); |
133 | 136 | ||
@@ -382,6 +385,7 @@ void blk_sync_queue(struct request_queue *q) | |||
382 | del_timer_sync(&q->unplug_timer); | 385 | del_timer_sync(&q->unplug_timer); |
383 | del_timer_sync(&q->timeout); | 386 | del_timer_sync(&q->timeout); |
384 | cancel_work_sync(&q->unplug_work); | 387 | cancel_work_sync(&q->unplug_work); |
388 | throtl_shutdown_timer_wq(q); | ||
385 | } | 389 | } |
386 | EXPORT_SYMBOL(blk_sync_queue); | 390 | EXPORT_SYMBOL(blk_sync_queue); |
387 | 391 | ||
@@ -459,6 +463,8 @@ void blk_cleanup_queue(struct request_queue *q) | |||
459 | if (q->elevator) | 463 | if (q->elevator) |
460 | elevator_exit(q->elevator); | 464 | elevator_exit(q->elevator); |
461 | 465 | ||
466 | blk_throtl_exit(q); | ||
467 | |||
462 | blk_put_queue(q); | 468 | blk_put_queue(q); |
463 | } | 469 | } |
464 | EXPORT_SYMBOL(blk_cleanup_queue); | 470 | EXPORT_SYMBOL(blk_cleanup_queue); |
@@ -515,6 +521,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
515 | return NULL; | 521 | return NULL; |
516 | } | 522 | } |
517 | 523 | ||
524 | if (blk_throtl_init(q)) { | ||
525 | kmem_cache_free(blk_requestq_cachep, q); | ||
526 | return NULL; | ||
527 | } | ||
528 | |||
518 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, | 529 | setup_timer(&q->backing_dev_info.laptop_mode_wb_timer, |
519 | laptop_mode_timer_fn, (unsigned long) q); | 530 | laptop_mode_timer_fn, (unsigned long) q); |
520 | init_timer(&q->unplug_timer); | 531 | init_timer(&q->unplug_timer); |
@@ -796,11 +807,16 @@ static struct request *get_request(struct request_queue *q, int rw_flags, | |||
796 | rl->starved[is_sync] = 0; | 807 | rl->starved[is_sync] = 0; |
797 | 808 | ||
798 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); | 809 | priv = !test_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags); |
799 | if (priv) | 810 | if (priv) { |
800 | rl->elvpriv++; | 811 | rl->elvpriv++; |
801 | 812 | ||
802 | if (blk_queue_io_stat(q)) | 813 | /* |
803 | rw_flags |= REQ_IO_STAT; | 814 | * Don't do stats for non-priv requests |
815 | */ | ||
816 | if (blk_queue_io_stat(q)) | ||
817 | rw_flags |= REQ_IO_STAT; | ||
818 | } | ||
819 | |||
804 | spin_unlock_irq(q->queue_lock); | 820 | spin_unlock_irq(q->queue_lock); |
805 | 821 | ||
806 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); | 822 | rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); |
@@ -1522,6 +1538,15 @@ static inline void __generic_make_request(struct bio *bio) | |||
1522 | goto end_io; | 1538 | goto end_io; |
1523 | } | 1539 | } |
1524 | 1540 | ||
1541 | blk_throtl_bio(q, &bio); | ||
1542 | |||
1543 | /* | ||
1544 | * If bio = NULL, bio has been throttled and will be submitted | ||
1545 | * later. | ||
1546 | */ | ||
1547 | if (!bio) | ||
1548 | break; | ||
1549 | |||
1525 | trace_block_bio_queue(q, bio); | 1550 | trace_block_bio_queue(q, bio); |
1526 | 1551 | ||
1527 | ret = q->make_request_fn(q, bio); | 1552 | ret = q->make_request_fn(q, bio); |
@@ -1612,11 +1637,12 @@ void submit_bio(int rw, struct bio *bio) | |||
1612 | 1637 | ||
1613 | if (unlikely(block_dump)) { | 1638 | if (unlikely(block_dump)) { |
1614 | char b[BDEVNAME_SIZE]; | 1639 | char b[BDEVNAME_SIZE]; |
1615 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s\n", | 1640 | printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", |
1616 | current->comm, task_pid_nr(current), | 1641 | current->comm, task_pid_nr(current), |
1617 | (rw & WRITE) ? "WRITE" : "READ", | 1642 | (rw & WRITE) ? "WRITE" : "READ", |
1618 | (unsigned long long)bio->bi_sector, | 1643 | (unsigned long long)bio->bi_sector, |
1619 | bdevname(bio->bi_bdev, b)); | 1644 | bdevname(bio->bi_bdev, b), |
1645 | count); | ||
1620 | } | 1646 | } |
1621 | } | 1647 | } |
1622 | 1648 | ||
@@ -1759,7 +1785,7 @@ static void blk_account_io_completion(struct request *req, unsigned int bytes) | |||
1759 | int cpu; | 1785 | int cpu; |
1760 | 1786 | ||
1761 | cpu = part_stat_lock(); | 1787 | cpu = part_stat_lock(); |
1762 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 1788 | part = req->part; |
1763 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); | 1789 | part_stat_add(cpu, part, sectors[rw], bytes >> 9); |
1764 | part_stat_unlock(); | 1790 | part_stat_unlock(); |
1765 | } | 1791 | } |
@@ -1779,7 +1805,7 @@ static void blk_account_io_done(struct request *req) | |||
1779 | int cpu; | 1805 | int cpu; |
1780 | 1806 | ||
1781 | cpu = part_stat_lock(); | 1807 | cpu = part_stat_lock(); |
1782 | part = disk_map_sector_rcu(req->rq_disk, blk_rq_pos(req)); | 1808 | part = req->part; |
1783 | 1809 | ||
1784 | part_stat_inc(cpu, part, ios[rw]); | 1810 | part_stat_inc(cpu, part, ios[rw]); |
1785 | part_stat_add(cpu, part, ticks[rw], duration); | 1811 | part_stat_add(cpu, part, ticks[rw], duration); |
@@ -2579,6 +2605,13 @@ int kblockd_schedule_work(struct request_queue *q, struct work_struct *work) | |||
2579 | } | 2605 | } |
2580 | EXPORT_SYMBOL(kblockd_schedule_work); | 2606 | EXPORT_SYMBOL(kblockd_schedule_work); |
2581 | 2607 | ||
2608 | int kblockd_schedule_delayed_work(struct request_queue *q, | ||
2609 | struct delayed_work *dwork, unsigned long delay) | ||
2610 | { | ||
2611 | return queue_delayed_work(kblockd_workqueue, dwork, delay); | ||
2612 | } | ||
2613 | EXPORT_SYMBOL(kblockd_schedule_delayed_work); | ||
2614 | |||
2582 | int __init blk_dev_init(void) | 2615 | int __init blk_dev_init(void) |
2583 | { | 2616 | { |
2584 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * | 2617 | BUILD_BUG_ON(__REQ_NR_BITS > 8 * |