aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig3
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-settings.c11
-rw-r--r--block/blk-sysfs.c25
-rw-r--r--block/blk-timeout.c12
-rw-r--r--block/cfq-iosched.c43
-rw-r--r--block/elevator.c2
7 files changed, 71 insertions, 33 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 62a5921321cd..f9e89f4d94bb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY
78 Protection. If in doubt, say N. 78 Protection. If in doubt, say N.
79 79
80config BLK_CGROUP 80config BLK_CGROUP
81 tristate 81 tristate "Block cgroup support"
82 depends on CGROUPS 82 depends on CGROUPS
83 depends on CFQ_GROUP_IOSCHED
83 default n 84 default n
84 ---help--- 85 ---help---
85 Generic block IO controller cgroup interface. This is the common 86 Generic block IO controller cgroup interface. This is the common
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5fe03def34b2..2cc682b860ea 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -286,16 +286,16 @@ done:
286static struct cgroup_subsys_state * 286static struct cgroup_subsys_state *
287blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup) 287blkiocg_create(struct cgroup_subsys *subsys, struct cgroup *cgroup)
288{ 288{
289 struct blkio_cgroup *blkcg, *parent_blkcg; 289 struct blkio_cgroup *blkcg;
290 struct cgroup *parent = cgroup->parent;
290 291
291 if (!cgroup->parent) { 292 if (!parent) {
292 blkcg = &blkio_root_cgroup; 293 blkcg = &blkio_root_cgroup;
293 goto done; 294 goto done;
294 } 295 }
295 296
296 /* Currently we do not support hierarchy deeper than two level (0,1) */ 297 /* Currently we do not support hierarchy deeper than two level (0,1) */
297 parent_blkcg = cgroup_to_blkio_cgroup(cgroup->parent); 298 if (parent != cgroup->top_cgroup)
298 if (css_depth(&parent_blkcg->css) > 0)
299 return ERR_PTR(-EINVAL); 299 return ERR_PTR(-EINVAL);
300 300
301 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL); 301 blkcg = kzalloc(sizeof(*blkcg), GFP_KERNEL);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index d9a9db5f0a2b..f5ed5a1187ba 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h> 10#include <linux/gcd.h>
11#include <linux/lcm.h>
11#include <linux/jiffies.h> 12#include <linux/jiffies.h>
12#include <linux/gfp.h> 13#include <linux/gfp.h>
13 14
@@ -462,16 +463,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
462} 463}
463EXPORT_SYMBOL(blk_queue_stack_limits); 464EXPORT_SYMBOL(blk_queue_stack_limits);
464 465
465static unsigned int lcm(unsigned int a, unsigned int b)
466{
467 if (a && b)
468 return (a * b) / gcd(a, b);
469 else if (b)
470 return b;
471
472 return a;
473}
474
475/** 466/**
476 * blk_stack_limits - adjust queue_limits for stacked devices 467 * blk_stack_limits - adjust queue_limits for stacked devices
477 * @t: the stacking driver limits (top device) 468 * @t: the stacking driver limits (top device)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index c2b821fa324a..306759bbdf1b 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -107,6 +107,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
107 return queue_var_show(max_sectors_kb, (page)); 107 return queue_var_show(max_sectors_kb, (page));
108} 108}
109 109
110static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
111{
112 return queue_var_show(queue_max_segments(q), (page));
113}
114
115static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
116{
117 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
118 return queue_var_show(queue_max_segment_size(q), (page));
119
120 return queue_var_show(PAGE_CACHE_SIZE, (page));
121}
122
110static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 123static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
111{ 124{
112 return queue_var_show(queue_logical_block_size(q), page); 125 return queue_var_show(queue_logical_block_size(q), page);
@@ -281,6 +294,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
281 .show = queue_max_hw_sectors_show, 294 .show = queue_max_hw_sectors_show,
282}; 295};
283 296
297static struct queue_sysfs_entry queue_max_segments_entry = {
298 .attr = {.name = "max_segments", .mode = S_IRUGO },
299 .show = queue_max_segments_show,
300};
301
302static struct queue_sysfs_entry queue_max_segment_size_entry = {
303 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
304 .show = queue_max_segment_size_show,
305};
306
284static struct queue_sysfs_entry queue_iosched_entry = { 307static struct queue_sysfs_entry queue_iosched_entry = {
285 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 308 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
286 .show = elv_iosched_show, 309 .show = elv_iosched_show,
@@ -356,6 +379,8 @@ static struct attribute *default_attrs[] = {
356 &queue_ra_entry.attr, 379 &queue_ra_entry.attr,
357 &queue_max_hw_sectors_entry.attr, 380 &queue_max_hw_sectors_entry.attr,
358 &queue_max_sectors_entry.attr, 381 &queue_max_sectors_entry.attr,
382 &queue_max_segments_entry.attr,
383 &queue_max_segment_size_entry.attr,
359 &queue_iosched_entry.attr, 384 &queue_iosched_entry.attr,
360 &queue_hw_sector_size_entry.attr, 385 &queue_hw_sector_size_entry.attr,
361 &queue_logical_block_size_entry.attr, 386 &queue_logical_block_size_entry.attr,
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 1ba7e0aca878..4f0c06c7a338 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data)
109 struct request_queue *q = (struct request_queue *) data; 109 struct request_queue *q = (struct request_queue *) data;
110 unsigned long flags, next = 0; 110 unsigned long flags, next = 0;
111 struct request *rq, *tmp; 111 struct request *rq, *tmp;
112 int next_set = 0;
112 113
113 spin_lock_irqsave(q->queue_lock, flags); 114 spin_lock_irqsave(q->queue_lock, flags);
114 115
@@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data)
122 if (blk_mark_rq_complete(rq)) 123 if (blk_mark_rq_complete(rq))
123 continue; 124 continue;
124 blk_rq_timed_out(rq); 125 blk_rq_timed_out(rq);
125 } else if (!next || time_after(next, rq->deadline)) 126 } else if (!next_set || time_after(next, rq->deadline)) {
126 next = rq->deadline; 127 next = rq->deadline;
128 next_set = 1;
129 }
127 } 130 }
128 131
129 /* 132 if (next_set)
130 * next can never be 0 here with the list non-empty, since we always
131 * bump ->deadline to 1 so we can detect if the timer was ever added
132 * or not. See comment in blk_add_timer()
133 */
134 if (next)
135 mod_timer(&q->timeout, round_jiffies_up(next)); 133 mod_timer(&q->timeout, round_jiffies_up(next));
136 134
137 spin_unlock_irqrestore(q->queue_lock, flags); 135 spin_unlock_irqrestore(q->queue_lock, flags);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index fc98a48554fd..5f127cfb2e92 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -48,6 +48,7 @@ static const int cfq_hist_divisor = 4;
48#define CFQ_SERVICE_SHIFT 12 48#define CFQ_SERVICE_SHIFT 12
49 49
50#define CFQQ_SEEK_THR (sector_t)(8 * 100) 50#define CFQQ_SEEK_THR (sector_t)(8 * 100)
51#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
51#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 52#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
52#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 53#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
53 54
@@ -948,6 +949,11 @@ cfq_find_alloc_cfqg(struct cfq_data *cfqd, struct cgroup *cgroup, int create)
948 unsigned int major, minor; 949 unsigned int major, minor;
949 950
950 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key)); 951 cfqg = cfqg_of_blkg(blkiocg_lookup_group(blkcg, key));
952 if (cfqg && !cfqg->blkg.dev && bdi->dev && dev_name(bdi->dev)) {
953 sscanf(dev_name(bdi->dev), "%u:%u", &major, &minor);
954 cfqg->blkg.dev = MKDEV(major, minor);
955 goto done;
956 }
951 if (cfqg || !create) 957 if (cfqg || !create)
952 goto done; 958 goto done;
953 959
@@ -1518,7 +1524,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1518 struct cfq_queue *cfqq) 1524 struct cfq_queue *cfqq)
1519{ 1525{
1520 if (cfqq) { 1526 if (cfqq) {
1521 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1527 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1528 cfqd->serving_prio, cfqd->serving_type);
1522 cfqq->slice_start = 0; 1529 cfqq->slice_start = 0;
1523 cfqq->dispatch_start = jiffies; 1530 cfqq->dispatch_start = jiffies;
1524 cfqq->allocated_slice = 0; 1531 cfqq->allocated_slice = 0;
@@ -1661,9 +1668,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1661} 1668}
1662 1669
1663static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1670static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1664 struct request *rq, bool for_preempt) 1671 struct request *rq)
1665{ 1672{
1666 return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; 1673 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1667} 1674}
1668 1675
1669static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 1676static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -1690,7 +1697,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1690 * will contain the closest sector. 1697 * will contain the closest sector.
1691 */ 1698 */
1692 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1699 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1693 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1700 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1694 return __cfqq; 1701 return __cfqq;
1695 1702
1696 if (blk_rq_pos(__cfqq->next_rq) < sector) 1703 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1701,7 +1708,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1701 return NULL; 1708 return NULL;
1702 1709
1703 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1710 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1704 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1711 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1705 return __cfqq; 1712 return __cfqq;
1706 1713
1707 return NULL; 1714 return NULL;
@@ -1722,6 +1729,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1722{ 1729{
1723 struct cfq_queue *cfqq; 1730 struct cfq_queue *cfqq;
1724 1731
1732 if (cfq_class_idle(cur_cfqq))
1733 return NULL;
1725 if (!cfq_cfqq_sync(cur_cfqq)) 1734 if (!cfq_cfqq_sync(cur_cfqq))
1726 return NULL; 1735 return NULL;
1727 if (CFQQ_SEEKY(cur_cfqq)) 1736 if (CFQQ_SEEKY(cur_cfqq))
@@ -1788,7 +1797,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1788 * Otherwise, we do only if they are the last ones 1797 * Otherwise, we do only if they are the last ones
1789 * in their service tree. 1798 * in their service tree.
1790 */ 1799 */
1791 return service_tree->count == 1 && cfq_cfqq_sync(cfqq); 1800 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1801 return 1;
1802 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1803 service_tree->count);
1804 return 0;
1792} 1805}
1793 1806
1794static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1807static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -1833,8 +1846,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1833 * time slice. 1846 * time slice.
1834 */ 1847 */
1835 if (sample_valid(cic->ttime_samples) && 1848 if (sample_valid(cic->ttime_samples) &&
1836 (cfqq->slice_end - jiffies < cic->ttime_mean)) 1849 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1850 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1851 cic->ttime_mean);
1837 return; 1852 return;
1853 }
1838 1854
1839 cfq_mark_cfqq_wait_request(cfqq); 1855 cfq_mark_cfqq_wait_request(cfqq);
1840 1856
@@ -2042,6 +2058,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2042 slice = max(slice, 2 * cfqd->cfq_slice_idle); 2058 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2043 2059
2044 slice = max_t(unsigned, slice, CFQ_MIN_TT); 2060 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2061 cfq_log(cfqd, "workload slice:%d", slice);
2045 cfqd->workload_expires = jiffies + slice; 2062 cfqd->workload_expires = jiffies + slice;
2046 cfqd->noidle_tree_requires_idle = false; 2063 cfqd->noidle_tree_requires_idle = false;
2047} 2064}
@@ -2189,10 +2206,13 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd)
2189 struct cfq_queue *cfqq; 2206 struct cfq_queue *cfqq;
2190 int dispatched = 0; 2207 int dispatched = 0;
2191 2208
2192 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) 2209 /* Expire the timeslice of the current active queue first */
2210 cfq_slice_expired(cfqd, 0);
2211 while ((cfqq = cfq_get_next_queue_forced(cfqd)) != NULL) {
2212 __cfq_set_active_queue(cfqd, cfqq);
2193 dispatched += __cfq_forced_dispatch_cfqq(cfqq); 2213 dispatched += __cfq_forced_dispatch_cfqq(cfqq);
2214 }
2194 2215
2195 cfq_slice_expired(cfqd, 0);
2196 BUG_ON(cfqd->busy_queues); 2216 BUG_ON(cfqd->busy_queues);
2197 2217
2198 cfq_log(cfqd, "forced_dispatch=%d", dispatched); 2218 cfq_log(cfqd, "forced_dispatch=%d", dispatched);
@@ -3104,7 +3124,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3104 * if this request is as-good as one we would expect from the 3124 * if this request is as-good as one we would expect from the
3105 * current cfqq, let it preempt 3125 * current cfqq, let it preempt
3106 */ 3126 */
3107 if (cfq_rq_close(cfqd, cfqq, rq, true)) 3127 if (cfq_rq_close(cfqd, cfqq, rq))
3108 return true; 3128 return true;
3109 3129
3110 return false; 3130 return false;
@@ -3308,6 +3328,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3308 if (cfq_should_wait_busy(cfqd, cfqq)) { 3328 if (cfq_should_wait_busy(cfqd, cfqq)) {
3309 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3329 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3310 cfq_mark_cfqq_wait_busy(cfqq); 3330 cfq_mark_cfqq_wait_busy(cfqq);
3331 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3311 } 3332 }
3312 3333
3313 /* 3334 /*
@@ -3673,8 +3694,10 @@ static void *cfq_init_queue(struct request_queue *q)
3673 * to make sure that cfq_put_cfqg() does not try to kfree root group 3694 * to make sure that cfq_put_cfqg() does not try to kfree root group
3674 */ 3695 */
3675 atomic_set(&cfqg->ref, 1); 3696 atomic_set(&cfqg->ref, 1);
3697 rcu_read_lock();
3676 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd, 3698 blkiocg_add_blkio_group(&blkio_root_cgroup, &cfqg->blkg, (void *)cfqd,
3677 0); 3699 0);
3700 rcu_read_unlock();
3678#endif 3701#endif
3679 /* 3702 /*
3680 * Not strictly needed (since RB_ROOT just clears the node and we 3703 * Not strictly needed (since RB_ROOT just clears the node and we
diff --git a/block/elevator.c b/block/elevator.c
index df75676f6671..76e3702d5381 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name)
154 154
155 spin_unlock(&elv_list_lock); 155 spin_unlock(&elv_list_lock);
156 156
157 sprintf(elv, "%s-iosched", name); 157 snprintf(elv, sizeof(elv), "%s-iosched", name);
158 158
159 request_module("%s", elv); 159 request_module("%s", elv);
160 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);