aboutsummaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
authorJens Axboe <jens.axboe@oracle.com>2010-04-02 02:43:33 -0400
committerJens Axboe <jens.axboe@oracle.com>2010-04-02 02:43:33 -0400
commited6b6dc7c142bfb5840d201a9b4a465c9e56ce33 (patch)
tree538a3ec7ba62f5e668be7f0bd681857c1bd73da2 /block
parent2eaa9cfdf33b8d7fb7aff27792192e0019ae8fc6 (diff)
parenta506aedc51093544ff0f9610af6066d18cb6abbe (diff)
Merge branch 'for-linus' into for-2.6.35
Diffstat (limited to 'block')
-rw-r--r--block/Kconfig3
-rw-r--r--block/blk-settings.c11
-rw-r--r--block/blk-sysfs.c25
-rw-r--r--block/cfq-iosched.c29
-rw-r--r--block/elevator.c2
5 files changed, 50 insertions, 20 deletions
diff --git a/block/Kconfig b/block/Kconfig
index 62a5921321cd..f9e89f4d94bb 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -78,8 +78,9 @@ config BLK_DEV_INTEGRITY
78 Protection. If in doubt, say N. 78 Protection. If in doubt, say N.
79 79
80config BLK_CGROUP 80config BLK_CGROUP
81 tristate 81 tristate "Block cgroup support"
82 depends on CGROUPS 82 depends on CGROUPS
83 depends on CFQ_GROUP_IOSCHED
83 default n 84 default n
84 ---help--- 85 ---help---
85 Generic block IO controller cgroup interface. This is the common 86 Generic block IO controller cgroup interface. This is the common
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 31e7a9375c13..4c4700dca56a 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -8,6 +8,7 @@
8#include <linux/blkdev.h> 8#include <linux/blkdev.h>
9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */ 9#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
10#include <linux/gcd.h> 10#include <linux/gcd.h>
11#include <linux/lcm.h>
11#include <linux/jiffies.h> 12#include <linux/jiffies.h>
12 13
13#include "blk.h" 14#include "blk.h"
@@ -461,16 +462,6 @@ void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
461} 462}
462EXPORT_SYMBOL(blk_queue_stack_limits); 463EXPORT_SYMBOL(blk_queue_stack_limits);
463 464
464static unsigned int lcm(unsigned int a, unsigned int b)
465{
466 if (a && b)
467 return (a * b) / gcd(a, b);
468 else if (b)
469 return b;
470
471 return a;
472}
473
474/** 465/**
475 * blk_stack_limits - adjust queue_limits for stacked devices 466 * blk_stack_limits - adjust queue_limits for stacked devices
476 * @t: the stacking driver limits (top device) 467 * @t: the stacking driver limits (top device)
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 2ae2cb3f362f..4426739fb757 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -106,6 +106,19 @@ static ssize_t queue_max_sectors_show(struct request_queue *q, char *page)
106 return queue_var_show(max_sectors_kb, (page)); 106 return queue_var_show(max_sectors_kb, (page));
107} 107}
108 108
109static ssize_t queue_max_segments_show(struct request_queue *q, char *page)
110{
111 return queue_var_show(queue_max_segments(q), (page));
112}
113
114static ssize_t queue_max_segment_size_show(struct request_queue *q, char *page)
115{
116 if (test_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags))
117 return queue_var_show(queue_max_segment_size(q), (page));
118
119 return queue_var_show(PAGE_CACHE_SIZE, (page));
120}
121
109static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page) 122static ssize_t queue_logical_block_size_show(struct request_queue *q, char *page)
110{ 123{
111 return queue_var_show(queue_logical_block_size(q), page); 124 return queue_var_show(queue_logical_block_size(q), page);
@@ -280,6 +293,16 @@ static struct queue_sysfs_entry queue_max_hw_sectors_entry = {
280 .show = queue_max_hw_sectors_show, 293 .show = queue_max_hw_sectors_show,
281}; 294};
282 295
296static struct queue_sysfs_entry queue_max_segments_entry = {
297 .attr = {.name = "max_segments", .mode = S_IRUGO },
298 .show = queue_max_segments_show,
299};
300
301static struct queue_sysfs_entry queue_max_segment_size_entry = {
302 .attr = {.name = "max_segment_size", .mode = S_IRUGO },
303 .show = queue_max_segment_size_show,
304};
305
283static struct queue_sysfs_entry queue_iosched_entry = { 306static struct queue_sysfs_entry queue_iosched_entry = {
284 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR }, 307 .attr = {.name = "scheduler", .mode = S_IRUGO | S_IWUSR },
285 .show = elv_iosched_show, 308 .show = elv_iosched_show,
@@ -355,6 +378,8 @@ static struct attribute *default_attrs[] = {
355 &queue_ra_entry.attr, 378 &queue_ra_entry.attr,
356 &queue_max_hw_sectors_entry.attr, 379 &queue_max_hw_sectors_entry.attr,
357 &queue_max_sectors_entry.attr, 380 &queue_max_sectors_entry.attr,
381 &queue_max_segments_entry.attr,
382 &queue_max_segment_size_entry.attr,
358 &queue_iosched_entry.attr, 383 &queue_iosched_entry.attr,
359 &queue_hw_sector_size_entry.attr, 384 &queue_hw_sector_size_entry.attr,
360 &queue_logical_block_size_entry.attr, 385 &queue_logical_block_size_entry.attr,
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index dee9d9378fee..2c7a0f4f3cd7 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -47,6 +47,7 @@ static const int cfq_hist_divisor = 4;
47#define CFQ_SERVICE_SHIFT 12 47#define CFQ_SERVICE_SHIFT 12
48 48
49#define CFQQ_SEEK_THR (sector_t)(8 * 100) 49#define CFQQ_SEEK_THR (sector_t)(8 * 100)
50#define CFQQ_CLOSE_THR (sector_t)(8 * 1024)
50#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32) 51#define CFQQ_SECT_THR_NONROT (sector_t)(2 * 32)
51#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8) 52#define CFQQ_SEEKY(cfqq) (hweight32(cfqq->seek_history) > 32/8)
52 53
@@ -1517,7 +1518,8 @@ static void __cfq_set_active_queue(struct cfq_data *cfqd,
1517 struct cfq_queue *cfqq) 1518 struct cfq_queue *cfqq)
1518{ 1519{
1519 if (cfqq) { 1520 if (cfqq) {
1520 cfq_log_cfqq(cfqd, cfqq, "set_active"); 1521 cfq_log_cfqq(cfqd, cfqq, "set_active wl_prio:%d wl_type:%d",
1522 cfqd->serving_prio, cfqd->serving_type);
1521 cfqq->slice_start = 0; 1523 cfqq->slice_start = 0;
1522 cfqq->dispatch_start = jiffies; 1524 cfqq->dispatch_start = jiffies;
1523 cfqq->allocated_slice = 0; 1525 cfqq->allocated_slice = 0;
@@ -1660,9 +1662,9 @@ static inline sector_t cfq_dist_from_last(struct cfq_data *cfqd,
1660} 1662}
1661 1663
1662static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq, 1664static inline int cfq_rq_close(struct cfq_data *cfqd, struct cfq_queue *cfqq,
1663 struct request *rq, bool for_preempt) 1665 struct request *rq)
1664{ 1666{
1665 return cfq_dist_from_last(cfqd, rq) <= CFQQ_SEEK_THR; 1667 return cfq_dist_from_last(cfqd, rq) <= CFQQ_CLOSE_THR;
1666} 1668}
1667 1669
1668static struct cfq_queue *cfqq_close(struct cfq_data *cfqd, 1670static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
@@ -1689,7 +1691,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1689 * will contain the closest sector. 1691 * will contain the closest sector.
1690 */ 1692 */
1691 __cfqq = rb_entry(parent, struct cfq_queue, p_node); 1693 __cfqq = rb_entry(parent, struct cfq_queue, p_node);
1692 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1694 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1693 return __cfqq; 1695 return __cfqq;
1694 1696
1695 if (blk_rq_pos(__cfqq->next_rq) < sector) 1697 if (blk_rq_pos(__cfqq->next_rq) < sector)
@@ -1700,7 +1702,7 @@ static struct cfq_queue *cfqq_close(struct cfq_data *cfqd,
1700 return NULL; 1702 return NULL;
1701 1703
1702 __cfqq = rb_entry(node, struct cfq_queue, p_node); 1704 __cfqq = rb_entry(node, struct cfq_queue, p_node);
1703 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq, false)) 1705 if (cfq_rq_close(cfqd, cur_cfqq, __cfqq->next_rq))
1704 return __cfqq; 1706 return __cfqq;
1705 1707
1706 return NULL; 1708 return NULL;
@@ -1721,6 +1723,8 @@ static struct cfq_queue *cfq_close_cooperator(struct cfq_data *cfqd,
1721{ 1723{
1722 struct cfq_queue *cfqq; 1724 struct cfq_queue *cfqq;
1723 1725
1726 if (cfq_class_idle(cur_cfqq))
1727 return NULL;
1724 if (!cfq_cfqq_sync(cur_cfqq)) 1728 if (!cfq_cfqq_sync(cur_cfqq))
1725 return NULL; 1729 return NULL;
1726 if (CFQQ_SEEKY(cur_cfqq)) 1730 if (CFQQ_SEEKY(cur_cfqq))
@@ -1787,7 +1791,11 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
1787 * Otherwise, we do only if they are the last ones 1791 * Otherwise, we do only if they are the last ones
1788 * in their service tree. 1792 * in their service tree.
1789 */ 1793 */
1790 return service_tree->count == 1 && cfq_cfqq_sync(cfqq); 1794 if (service_tree->count == 1 && cfq_cfqq_sync(cfqq))
1795 return 1;
1796 cfq_log_cfqq(cfqd, cfqq, "Not idling. st->count:%d",
1797 service_tree->count);
1798 return 0;
1791} 1799}
1792 1800
1793static void cfq_arm_slice_timer(struct cfq_data *cfqd) 1801static void cfq_arm_slice_timer(struct cfq_data *cfqd)
@@ -1832,8 +1840,11 @@ static void cfq_arm_slice_timer(struct cfq_data *cfqd)
1832 * time slice. 1840 * time slice.
1833 */ 1841 */
1834 if (sample_valid(cic->ttime_samples) && 1842 if (sample_valid(cic->ttime_samples) &&
1835 (cfqq->slice_end - jiffies < cic->ttime_mean)) 1843 (cfqq->slice_end - jiffies < cic->ttime_mean)) {
1844 cfq_log_cfqq(cfqd, cfqq, "Not idling. think_time:%d",
1845 cic->ttime_mean);
1836 return; 1846 return;
1847 }
1837 1848
1838 cfq_mark_cfqq_wait_request(cfqq); 1849 cfq_mark_cfqq_wait_request(cfqq);
1839 1850
@@ -2041,6 +2052,7 @@ static void choose_service_tree(struct cfq_data *cfqd, struct cfq_group *cfqg)
2041 slice = max(slice, 2 * cfqd->cfq_slice_idle); 2052 slice = max(slice, 2 * cfqd->cfq_slice_idle);
2042 2053
2043 slice = max_t(unsigned, slice, CFQ_MIN_TT); 2054 slice = max_t(unsigned, slice, CFQ_MIN_TT);
2055 cfq_log(cfqd, "workload slice:%d", slice);
2044 cfqd->workload_expires = jiffies + slice; 2056 cfqd->workload_expires = jiffies + slice;
2045 cfqd->noidle_tree_requires_idle = false; 2057 cfqd->noidle_tree_requires_idle = false;
2046} 2058}
@@ -3103,7 +3115,7 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
3103 * if this request is as-good as one we would expect from the 3115 * if this request is as-good as one we would expect from the
3104 * current cfqq, let it preempt 3116 * current cfqq, let it preempt
3105 */ 3117 */
3106 if (cfq_rq_close(cfqd, cfqq, rq, true)) 3118 if (cfq_rq_close(cfqd, cfqq, rq))
3107 return true; 3119 return true;
3108 3120
3109 return false; 3121 return false;
@@ -3307,6 +3319,7 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq)
3307 if (cfq_should_wait_busy(cfqd, cfqq)) { 3319 if (cfq_should_wait_busy(cfqd, cfqq)) {
3308 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle; 3320 cfqq->slice_end = jiffies + cfqd->cfq_slice_idle;
3309 cfq_mark_cfqq_wait_busy(cfqq); 3321 cfq_mark_cfqq_wait_busy(cfqq);
3322 cfq_log_cfqq(cfqd, cfqq, "will busy wait");
3310 } 3323 }
3311 3324
3312 /* 3325 /*
diff --git a/block/elevator.c b/block/elevator.c
index df75676f6671..76e3702d5381 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -154,7 +154,7 @@ static struct elevator_type *elevator_get(const char *name)
154 154
155 spin_unlock(&elv_list_lock); 155 spin_unlock(&elv_list_lock);
156 156
157 sprintf(elv, "%s-iosched", name); 157 snprintf(elv, sizeof(elv), "%s-iosched", name);
158 158
159 request_module("%s", elv); 159 request_module("%s", elv);
160 spin_lock(&elv_list_lock); 160 spin_lock(&elv_list_lock);