aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c4
-rw-r--r--block/blk-core.c11
-rw-r--r--block/blk-ioc.c5
-rw-r--r--block/blk-settings.c39
-rw-r--r--block/cfq-iosched.c57
-rw-r--r--block/genhd.c2
6 files changed, 63 insertions, 55 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 1fa2654db0a6..e7dbbaf5fb3e 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -147,16 +147,16 @@ blkiocg_weight_write(struct cgroup *cgroup, struct cftype *cftype, u64 val)
147 return -EINVAL; 147 return -EINVAL;
148 148
149 blkcg = cgroup_to_blkio_cgroup(cgroup); 149 blkcg = cgroup_to_blkio_cgroup(cgroup);
150 spin_lock(&blkio_list_lock);
150 spin_lock_irq(&blkcg->lock); 151 spin_lock_irq(&blkcg->lock);
151 blkcg->weight = (unsigned int)val; 152 blkcg->weight = (unsigned int)val;
152 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) { 153 hlist_for_each_entry(blkg, n, &blkcg->blkg_list, blkcg_node) {
153 spin_lock(&blkio_list_lock);
154 list_for_each_entry(blkiop, &blkio_list, list) 154 list_for_each_entry(blkiop, &blkio_list, list)
155 blkiop->ops.blkio_update_group_weight_fn(blkg, 155 blkiop->ops.blkio_update_group_weight_fn(blkg,
156 blkcg->weight); 156 blkcg->weight);
157 spin_unlock(&blkio_list_lock);
158 } 157 }
159 spin_unlock_irq(&blkcg->lock); 158 spin_unlock_irq(&blkcg->lock);
159 spin_unlock(&blkio_list_lock);
160 return 0; 160 return 0;
161} 161}
162 162
diff --git a/block/blk-core.c b/block/blk-core.c
index 718897e6d37f..d1a9a0a64f95 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1147,7 +1147,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1147 */ 1147 */
1148static inline bool queue_should_plug(struct request_queue *q) 1148static inline bool queue_should_plug(struct request_queue *q)
1149{ 1149{
1150 return !(blk_queue_nonrot(q) && blk_queue_queuing(q)); 1150 return !(blk_queue_nonrot(q) && blk_queue_tagged(q));
1151} 1151}
1152 1152
1153static int __make_request(struct request_queue *q, struct bio *bio) 1153static int __make_request(struct request_queue *q, struct bio *bio)
@@ -1859,15 +1859,8 @@ void blk_dequeue_request(struct request *rq)
1859 * and to it is freed is accounted as io that is in progress at 1859 * and to it is freed is accounted as io that is in progress at
1860 * the driver side. 1860 * the driver side.
1861 */ 1861 */
1862 if (blk_account_rq(rq)) { 1862 if (blk_account_rq(rq))
1863 q->in_flight[rq_is_sync(rq)]++; 1863 q->in_flight[rq_is_sync(rq)]++;
1864 /*
1865 * Mark this device as supporting hardware queuing, if
1866 * we have more IOs in flight than 4.
1867 */
1868 if (!blk_queue_queuing(q) && queue_in_flight(q) > 4)
1869 set_bit(QUEUE_FLAG_CQ, &q->queue_flags);
1870 }
1871} 1864}
1872 1865
1873/** 1866/**
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index cbdabb0dd6d7..98e6bf61b0ac 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
39 39
40 if (atomic_long_dec_and_test(&ioc->refcount)) { 40 if (atomic_long_dec_and_test(&ioc->refcount)) {
41 rcu_read_lock(); 41 rcu_read_lock();
42 if (ioc->aic && ioc->aic->dtor)
43 ioc->aic->dtor(ioc->aic);
44 cfq_dtor(ioc); 42 cfq_dtor(ioc);
45 rcu_read_unlock(); 43 rcu_read_unlock();
46 44
@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
76 task_unlock(task); 74 task_unlock(task);
77 75
78 if (atomic_dec_and_test(&ioc->nr_tasks)) { 76 if (atomic_dec_and_test(&ioc->nr_tasks)) {
79 if (ioc->aic && ioc->aic->exit)
80 ioc->aic->exit(ioc->aic);
81 cfq_exit(ioc); 77 cfq_exit(ioc);
82 78
83 } 79 }
@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
97 ret->ioprio = 0; 93 ret->ioprio = 0;
98 ret->last_waited = jiffies; /* doesn't matter... */ 94 ret->last_waited = jiffies; /* doesn't matter... */
99 ret->nr_batch_requests = 0; /* because this is 0 */ 95 ret->nr_batch_requests = 0; /* because this is 0 */
100 ret->aic = NULL;
101 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); 96 INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
102 INIT_HLIST_HEAD(&ret->cic_list); 97 INIT_HLIST_HEAD(&ret->cic_list);
103 ret->ioc_data = NULL; 98 ret->ioc_data = NULL;
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d52d4adc440b..5eeb9e0d256e 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
528 sector_t offset) 528 sector_t offset)
529{ 529{
530 sector_t alignment; 530 sector_t alignment;
531 unsigned int top, bottom; 531 unsigned int top, bottom, ret = 0;
532 532
533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); 533 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); 534 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
@@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
546 t->max_segment_size = min_not_zero(t->max_segment_size, 546 t->max_segment_size = min_not_zero(t->max_segment_size,
547 b->max_segment_size); 547 b->max_segment_size);
548 548
549 t->misaligned |= b->misaligned;
550
549 alignment = queue_limit_alignment_offset(b, offset); 551 alignment = queue_limit_alignment_offset(b, offset);
550 552
551 /* Bottom device has different alignment. Check that it is 553 /* Bottom device has different alignment. Check that it is
@@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
558 bottom = max(b->physical_block_size, b->io_min) + alignment; 560 bottom = max(b->physical_block_size, b->io_min) + alignment;
559 561
560 /* Verify that top and bottom intervals line up */ 562 /* Verify that top and bottom intervals line up */
561 if (max(top, bottom) & (min(top, bottom) - 1)) 563 if (max(top, bottom) & (min(top, bottom) - 1)) {
562 t->misaligned = 1; 564 t->misaligned = 1;
565 ret = -1;
566 }
563 } 567 }
564 568
565 t->logical_block_size = max(t->logical_block_size, 569 t->logical_block_size = max(t->logical_block_size,
@@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
578 if (t->physical_block_size & (t->logical_block_size - 1)) { 582 if (t->physical_block_size & (t->logical_block_size - 1)) {
579 t->physical_block_size = t->logical_block_size; 583 t->physical_block_size = t->logical_block_size;
580 t->misaligned = 1; 584 t->misaligned = 1;
585 ret = -1;
581 } 586 }
582 587
583 /* Minimum I/O a multiple of the physical block size? */ 588 /* Minimum I/O a multiple of the physical block size? */
584 if (t->io_min & (t->physical_block_size - 1)) { 589 if (t->io_min & (t->physical_block_size - 1)) {
585 t->io_min = t->physical_block_size; 590 t->io_min = t->physical_block_size;
586 t->misaligned = 1; 591 t->misaligned = 1;
592 ret = -1;
587 } 593 }
588 594
589 /* Optimal I/O a multiple of the physical block size? */ 595 /* Optimal I/O a multiple of the physical block size? */
590 if (t->io_opt & (t->physical_block_size - 1)) { 596 if (t->io_opt & (t->physical_block_size - 1)) {
591 t->io_opt = 0; 597 t->io_opt = 0;
592 t->misaligned = 1; 598 t->misaligned = 1;
599 ret = -1;
593 } 600 }
594 601
595 /* Find lowest common alignment_offset */ 602 /* Find lowest common alignment_offset */
@@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
597 & (max(t->physical_block_size, t->io_min) - 1); 604 & (max(t->physical_block_size, t->io_min) - 1);
598 605
599 /* Verify that new alignment_offset is on a logical block boundary */ 606 /* Verify that new alignment_offset is on a logical block boundary */
600 if (t->alignment_offset & (t->logical_block_size - 1)) 607 if (t->alignment_offset & (t->logical_block_size - 1)) {
601 t->misaligned = 1; 608 t->misaligned = 1;
609 ret = -1;
610 }
602 611
603 /* Discard alignment and granularity */ 612 /* Discard alignment and granularity */
604 if (b->discard_granularity) { 613 if (b->discard_granularity) {
@@ -626,11 +635,33 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
626 (t->discard_granularity - 1); 635 (t->discard_granularity - 1);
627 } 636 }
628 637
629 return t->misaligned ? -1 : 0; 638 return ret;
630} 639}
631EXPORT_SYMBOL(blk_stack_limits); 640EXPORT_SYMBOL(blk_stack_limits);
632 641
633/** 642/**
643 * bdev_stack_limits - adjust queue limits for stacked drivers
644 * @t: the stacking driver limits (top device)
645 * @bdev: the component block_device (bottom)
646 * @start: first data sector within component device
647 *
648 * Description:
649 * Merges queue limits for a top device and a block_device. Returns
650 * 0 if alignment didn't change. Returns -1 if adding the bottom
651 * device caused misalignment.
652 */
653int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
654 sector_t start)
655{
656 struct request_queue *bq = bdev_get_queue(bdev);
657
658 start += get_start_sect(bdev);
659
660 return blk_stack_limits(t, &bq->limits, start << 9);
661}
662EXPORT_SYMBOL(bdev_stack_limits);
663
664/**
634 * disk_stack_limits - adjust queue limits for stacked drivers 665 * disk_stack_limits - adjust queue limits for stacked drivers
635 * @disk: MD/DM gendisk (top) 666 * @disk: MD/DM gendisk (top)
636 * @bdev: the underlying block device (bottom) 667 * @bdev: the underlying block device (bottom)
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 918c7fd9aeb1..023f4e69a337 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -42,16 +42,13 @@ static const int cfq_hist_divisor = 4;
42 */ 42 */
43#define CFQ_MIN_TT (2) 43#define CFQ_MIN_TT (2)
44 44
45/*
46 * Allow merged cfqqs to perform this amount of seeky I/O before
47 * deciding to break the queues up again.
48 */
49#define CFQQ_COOP_TOUT (HZ)
50
51#define CFQ_SLICE_SCALE (5) 45#define CFQ_SLICE_SCALE (5)
52#define CFQ_HW_QUEUE_MIN (5) 46#define CFQ_HW_QUEUE_MIN (5)
53#define CFQ_SERVICE_SHIFT 12 47#define CFQ_SERVICE_SHIFT 12
54 48
49#define CFQQ_SEEK_THR 8 * 1024
50#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
51
55#define RQ_CIC(rq) \ 52#define RQ_CIC(rq) \
56 ((struct cfq_io_context *) (rq)->elevator_private) 53 ((struct cfq_io_context *) (rq)->elevator_private)
57#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2) 54#define RQ_CFQQ(rq) (struct cfq_queue *) ((rq)->elevator_private2)
@@ -137,7 +134,6 @@ struct cfq_queue {
137 u64 seek_total; 134 u64 seek_total;
138 sector_t seek_mean; 135 sector_t seek_mean;
139 sector_t last_request_pos; 136 sector_t last_request_pos;
140 unsigned long seeky_start;
141 137
142 pid_t pid; 138 pid_t pid;
143 139
@@ -314,6 +310,7 @@ enum cfqq_state_flags {
314 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */ 310 CFQ_CFQQ_FLAG_slice_new, /* no requests dispatched in slice */
315 CFQ_CFQQ_FLAG_sync, /* synchronous queue */ 311 CFQ_CFQQ_FLAG_sync, /* synchronous queue */
316 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */ 312 CFQ_CFQQ_FLAG_coop, /* cfqq is shared */
313 CFQ_CFQQ_FLAG_split_coop, /* shared cfqq will be splitted */
317 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */ 314 CFQ_CFQQ_FLAG_deep, /* sync cfqq experienced large depth */
318 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */ 315 CFQ_CFQQ_FLAG_wait_busy, /* Waiting for next request */
319}; 316};
@@ -342,6 +339,7 @@ CFQ_CFQQ_FNS(prio_changed);
342CFQ_CFQQ_FNS(slice_new); 339CFQ_CFQQ_FNS(slice_new);
343CFQ_CFQQ_FNS(sync); 340CFQ_CFQQ_FNS(sync);
344CFQ_CFQQ_FNS(coop); 341CFQ_CFQQ_FNS(coop);
342CFQ_CFQQ_FNS(split_coop);
345CFQ_CFQQ_FNS(deep); 343CFQ_CFQQ_FNS(deep);
346CFQ_CFQQ_FNS(wait_busy); 344CFQ_CFQQ_FNS(wait_busy);
347#undef CFQ_CFQQ_FNS 345#undef CFQ_CFQQ_FNS
@@ -1566,6 +1564,15 @@ __cfq_slice_expired(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1566 cfq_clear_cfqq_wait_busy(cfqq); 1564 cfq_clear_cfqq_wait_busy(cfqq);
1567 1565
1568 /* 1566 /*
1567 * If this cfqq is shared between multiple processes, check to
1568 * make sure that those processes are still issuing I/Os within
1569 * the mean seek distance. If not, it may be time to break the
1570 * queues apart again.
1571 */
1572 if (cfq_cfqq_coop(cfqq) && CFQQ_SEEKY(cfqq))
1573 cfq_mark_cfqq_split_coop(cfqq);
1574
1575 /*
1569 * store what was left of this slice, if the queue idled/timed out 1576 * store what was left of this slice, if the queue idled/timed out
1570 */ 1577 */
1571 if (timed_out && !cfq_cfqq_slice_new(cfqq)) { 1578 if (timed_out && !cfq_cfqq_slice_new(cfqq)) {
@@ -1663,9 +1670,6 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1663 return cfqd->last_position - blk_rq_pos(rq); 1670 return cfqd->last_position - blk_rq_pos(rq);
1664} 1671}
1665 1672
1666#define CFQQ_SEEK_THR 8 * 1024
1667#define CFQQ_SEEKY(cfqq) ((cfqq)->seek_mean > CFQQ_SEEK_THR)
1668
1669static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1673static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1670 struct request *rq, bool for_preempt) 1674 struct request *rq, bool for_preempt)
1671{ 1675{
@@ -1803,7 +1807,7 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1803 * Otherwise, we do only if they are the last ones 1807 * Otherwise, we do only if they are the last ones
1804 * in their service tree. 1808 * in their service tree.
1805 */ 1809 */
1806 return service_tree->count == 1; 1810 return service_tree->count == 1 && cfq_cfqq_sync(cfqq);
1807} 1811}
1808 1812
1809static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1813static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -3000,19 +3004,6 @@ cfq_update_io_seektime(struct cfq_data *cfqd, struct cfq_queue *cfqq,
3000 total = cfqq->seek_total + (cfqq->seek_samples/2); 3004 total = cfqq->seek_total + (cfqq->seek_samples/2);
3001 do_div(total, cfqq->seek_samples); 3005 do_div(total, cfqq->seek_samples);
3002 cfqq->seek_mean = (sector_t)total; 3006 cfqq->seek_mean = (sector_t)total;
3003
3004 /*
3005 * If this cfqq is shared between multiple processes, check to
3006 * make sure that those processes are still issuing I/Os within
3007 * the mean seek distance. If not, it may be time to break the
3008 * queues apart again.
3009 */
3010 if (cfq_cfqq_coop(cfqq)) {
3011 if (CFQQ_SEEKY(cfqq) && !cfqq->seeky_start)
3012 cfqq->seeky_start = jiffies;
3013 else if (!CFQQ_SEEKY(cfqq))
3014 cfqq->seeky_start = 0;
3015 }
3016} 3007}
3017 3008
3018/* 3009/*
@@ -3077,6 +3068,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3077 return true; 3068 return true;
3078 3069
3079 /* 3070 /*
3071 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
3072 */
3073 if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
3074 return false;
3075
3076 /*
3080 * if the new request is sync, but the currently running queue is 3077 * if the new request is sync, but the currently running queue is
3081 * not, let the sync request have priority. 3078 * not, let the sync request have priority.
3082 */ 3079 */
@@ -3447,14 +3444,6 @@ cfq_merge_cfqqs(struct cfq_data *cfqd, struct cfq_io_context *cic,
3447 return cic_to_cfqq(cic, 1); 3444 return cic_to_cfqq(cic, 1);
3448} 3445}
3449 3446
3450static int should_split_cfqq(struct cfq_queue *cfqq)
3451{
3452 if (cfqq->seeky_start &&
3453 time_after(jiffies, cfqq->seeky_start + CFQQ_COOP_TOUT))
3454 return 1;
3455 return 0;
3456}
3457
3458/* 3447/*
3459 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this 3448 * Returns NULL if a new cfqq should be allocated, or the old cfqq if this
3460 * was the last process referring to said cfqq. 3449 * was the last process referring to said cfqq.
@@ -3463,9 +3452,9 @@ static struct cfq_queue *
3463split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq) 3452split_cfqq(struct cfq_io_context *cic, struct cfq_queue *cfqq)
3464{ 3453{
3465 if (cfqq_process_refs(cfqq) == 1) { 3454 if (cfqq_process_refs(cfqq) == 1) {
3466 cfqq->seeky_start = 0;
3467 cfqq->pid = current->pid; 3455 cfqq->pid = current->pid;
3468 cfq_clear_cfqq_coop(cfqq); 3456 cfq_clear_cfqq_coop(cfqq);
3457 cfq_clear_cfqq_split_coop(cfqq);
3469 return cfqq; 3458 return cfqq;
3470 } 3459 }
3471 3460
@@ -3504,7 +3493,7 @@ new_queue:
3504 /* 3493 /*
3505 * If the queue was seeky for too long, break it apart. 3494 * If the queue was seeky for too long, break it apart.
3506 */ 3495 */
3507 if (cfq_cfqq_coop(cfqq) && should_split_cfqq(cfqq)) { 3496 if (cfq_cfqq_coop(cfqq) && cfq_cfqq_split_coop(cfqq)) {
3508 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq"); 3497 cfq_log_cfqq(cfqd, cfqq, "breaking apart cfqq");
3509 cfqq = split_cfqq(cic, cfqq); 3498 cfqq = split_cfqq(cic, cfqq);
3510 if (!cfqq) 3499 if (!cfqq)
diff --git a/block/genhd.c b/block/genhd.c
index b11a4ad7d571..d13ba76a169c 100644
--- a/block/genhd.c
+++ b/block/genhd.c
@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
867{ 867{
868 struct gendisk *disk = dev_to_disk(dev); 868 struct gendisk *disk = dev_to_disk(dev);
869 869
870 return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); 870 return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
871} 871}
872 872
873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); 873static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);