diff options
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-core.c | 27 | ||||
-rw-r--r-- | block/blk-settings.c | 43 | ||||
-rw-r--r-- | block/cfq-iosched.c | 7 |
3 files changed, 47 insertions, 30 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index d17d71c71d4f..b06cf5c2a829 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/swap.h> | 26 | #include <linux/swap.h> |
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/task_io_accounting_ops.h> | 28 | #include <linux/task_io_accounting_ops.h> |
29 | #include <linux/blktrace_api.h> | ||
30 | #include <linux/fault-inject.h> | 29 | #include <linux/fault-inject.h> |
31 | 30 | ||
32 | #define CREATE_TRACE_POINTS | 31 | #define CREATE_TRACE_POINTS |
@@ -498,6 +497,11 @@ struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id) | |||
498 | 497 | ||
499 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; | 498 | q->backing_dev_info.unplug_io_fn = blk_backing_dev_unplug; |
500 | q->backing_dev_info.unplug_io_data = q; | 499 | q->backing_dev_info.unplug_io_data = q; |
500 | q->backing_dev_info.ra_pages = | ||
501 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
502 | q->backing_dev_info.state = 0; | ||
503 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
504 | |||
501 | err = bdi_init(&q->backing_dev_info); | 505 | err = bdi_init(&q->backing_dev_info); |
502 | if (err) { | 506 | if (err) { |
503 | kmem_cache_free(blk_requestq_cachep, q); | 507 | kmem_cache_free(blk_requestq_cachep, q); |
@@ -884,9 +888,10 @@ EXPORT_SYMBOL(blk_get_request); | |||
884 | 888 | ||
885 | /** | 889 | /** |
886 | * blk_make_request - given a bio, allocate a corresponding struct request. | 890 | * blk_make_request - given a bio, allocate a corresponding struct request. |
887 | * | 891 | * @q: target request queue |
888 | * @bio: The bio describing the memory mappings that will be submitted for IO. | 892 | * @bio: The bio describing the memory mappings that will be submitted for IO. |
889 | * It may be a chained-bio properly constructed by block/bio layer. | 893 | * It may be a chained-bio properly constructed by block/bio layer. |
894 | * @gfp_mask: gfp flags to be used for memory allocation | ||
890 | * | 895 | * |
891 | * blk_make_request is the parallel of generic_make_request for BLOCK_PC | 896 | * blk_make_request is the parallel of generic_make_request for BLOCK_PC |
892 | * type commands. Where the struct request needs to be farther initialized by | 897 | * type commands. Where the struct request needs to be farther initialized by |
@@ -1872,14 +1877,14 @@ EXPORT_SYMBOL(blk_fetch_request); | |||
1872 | 1877 | ||
1873 | /** | 1878 | /** |
1874 | * blk_update_request - Special helper function for request stacking drivers | 1879 | * blk_update_request - Special helper function for request stacking drivers |
1875 | * @rq: the request being processed | 1880 | * @req: the request being processed |
1876 | * @error: %0 for success, < %0 for error | 1881 | * @error: %0 for success, < %0 for error |
1877 | * @nr_bytes: number of bytes to complete @rq | 1882 | * @nr_bytes: number of bytes to complete @req |
1878 | * | 1883 | * |
1879 | * Description: | 1884 | * Description: |
1880 | * Ends I/O on a number of bytes attached to @rq, but doesn't complete | 1885 | * Ends I/O on a number of bytes attached to @req, but doesn't complete |
1881 | * the request structure even if @rq doesn't have leftover. | 1886 | * the request structure even if @req doesn't have leftover. |
1882 | * If @rq has leftover, sets it up for the next range of segments. | 1887 | * If @req has leftover, sets it up for the next range of segments. |
1883 | * | 1888 | * |
1884 | * This special helper function is only for request stacking drivers | 1889 | * This special helper function is only for request stacking drivers |
1885 | * (e.g. request-based dm) so that they can handle partial completion. | 1890 | * (e.g. request-based dm) so that they can handle partial completion. |
@@ -2145,7 +2150,7 @@ EXPORT_SYMBOL_GPL(blk_end_request); | |||
2145 | /** | 2150 | /** |
2146 | * blk_end_request_all - Helper function for drives to finish the request. | 2151 | * blk_end_request_all - Helper function for drives to finish the request. |
2147 | * @rq: the request to finish | 2152 | * @rq: the request to finish |
2148 | * @err: %0 for success, < %0 for error | 2153 | * @error: %0 for success, < %0 for error |
2149 | * | 2154 | * |
2150 | * Description: | 2155 | * Description: |
2151 | * Completely finish @rq. | 2156 | * Completely finish @rq. |
@@ -2166,7 +2171,7 @@ EXPORT_SYMBOL_GPL(blk_end_request_all); | |||
2166 | /** | 2171 | /** |
2167 | * blk_end_request_cur - Helper function to finish the current request chunk. | 2172 | * blk_end_request_cur - Helper function to finish the current request chunk. |
2168 | * @rq: the request to finish the current chunk for | 2173 | * @rq: the request to finish the current chunk for |
2169 | * @err: %0 for success, < %0 for error | 2174 | * @error: %0 for success, < %0 for error |
2170 | * | 2175 | * |
2171 | * Description: | 2176 | * Description: |
2172 | * Complete the current consecutively mapped chunk from @rq. | 2177 | * Complete the current consecutively mapped chunk from @rq. |
@@ -2203,7 +2208,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request); | |||
2203 | /** | 2208 | /** |
2204 | * __blk_end_request_all - Helper function for drives to finish the request. | 2209 | * __blk_end_request_all - Helper function for drives to finish the request. |
2205 | * @rq: the request to finish | 2210 | * @rq: the request to finish |
2206 | * @err: %0 for success, < %0 for error | 2211 | * @error: %0 for success, < %0 for error |
2207 | * | 2212 | * |
2208 | * Description: | 2213 | * Description: |
2209 | * Completely finish @rq. Must be called with queue lock held. | 2214 | * Completely finish @rq. Must be called with queue lock held. |
@@ -2224,7 +2229,7 @@ EXPORT_SYMBOL_GPL(__blk_end_request_all); | |||
2224 | /** | 2229 | /** |
2225 | * __blk_end_request_cur - Helper function to finish the current request chunk. | 2230 | * __blk_end_request_cur - Helper function to finish the current request chunk. |
2226 | * @rq: the request to finish the current chunk for | 2231 | * @rq: the request to finish the current chunk for |
2227 | * @err: %0 for success, < %0 for error | 2232 | * @error: %0 for success, < %0 for error |
2228 | * | 2233 | * |
2229 | * Description: | 2234 | * Description: |
2230 | * Complete the current consecutively mapped chunk from @rq. Must | 2235 | * Complete the current consecutively mapped chunk from @rq. Must |
diff --git a/block/blk-settings.c b/block/blk-settings.c index 1c4df9bf6813..7541ea4bf9fe 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c | |||
@@ -96,6 +96,31 @@ void blk_queue_lld_busy(struct request_queue *q, lld_busy_fn *fn) | |||
96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); | 96 | EXPORT_SYMBOL_GPL(blk_queue_lld_busy); |
97 | 97 | ||
98 | /** | 98 | /** |
99 | * blk_set_default_limits - reset limits to default values | ||
100 | * @limits: the queue_limits structure to reset | ||
101 | * | ||
102 | * Description: | ||
103 | * Returns a queue_limit struct to its default state. Can be used by | ||
104 | * stacking drivers like DM that stage table swaps and reuse an | ||
105 | * existing device queue. | ||
106 | */ | ||
107 | void blk_set_default_limits(struct queue_limits *lim) | ||
108 | { | ||
109 | lim->max_phys_segments = MAX_PHYS_SEGMENTS; | ||
110 | lim->max_hw_segments = MAX_HW_SEGMENTS; | ||
111 | lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK; | ||
112 | lim->max_segment_size = MAX_SEGMENT_SIZE; | ||
113 | lim->max_sectors = lim->max_hw_sectors = SAFE_MAX_SECTORS; | ||
114 | lim->logical_block_size = lim->physical_block_size = lim->io_min = 512; | ||
115 | lim->bounce_pfn = BLK_BOUNCE_ANY; | ||
116 | lim->alignment_offset = 0; | ||
117 | lim->io_opt = 0; | ||
118 | lim->misaligned = 0; | ||
119 | lim->no_cluster = 0; | ||
120 | } | ||
121 | EXPORT_SYMBOL(blk_set_default_limits); | ||
122 | |||
123 | /** | ||
99 | * blk_queue_make_request - define an alternate make_request function for a device | 124 | * blk_queue_make_request - define an alternate make_request function for a device |
100 | * @q: the request queue for the device to be affected | 125 | * @q: the request queue for the device to be affected |
101 | * @mfn: the alternate make_request function | 126 | * @mfn: the alternate make_request function |
@@ -123,18 +148,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
123 | * set defaults | 148 | * set defaults |
124 | */ | 149 | */ |
125 | q->nr_requests = BLKDEV_MAX_RQ; | 150 | q->nr_requests = BLKDEV_MAX_RQ; |
126 | blk_queue_max_phys_segments(q, MAX_PHYS_SEGMENTS); | ||
127 | blk_queue_max_hw_segments(q, MAX_HW_SEGMENTS); | ||
128 | blk_queue_segment_boundary(q, BLK_SEG_BOUNDARY_MASK); | ||
129 | blk_queue_max_segment_size(q, MAX_SEGMENT_SIZE); | ||
130 | 151 | ||
131 | q->make_request_fn = mfn; | 152 | q->make_request_fn = mfn; |
132 | q->backing_dev_info.ra_pages = | ||
133 | (VM_MAX_READAHEAD * 1024) / PAGE_CACHE_SIZE; | ||
134 | q->backing_dev_info.state = 0; | ||
135 | q->backing_dev_info.capabilities = BDI_CAP_MAP_COPY; | ||
136 | blk_queue_max_sectors(q, SAFE_MAX_SECTORS); | ||
137 | blk_queue_logical_block_size(q, 512); | ||
138 | blk_queue_dma_alignment(q, 511); | 153 | blk_queue_dma_alignment(q, 511); |
139 | blk_queue_congestion_threshold(q); | 154 | blk_queue_congestion_threshold(q); |
140 | q->nr_batching = BLK_BATCH_REQ; | 155 | q->nr_batching = BLK_BATCH_REQ; |
@@ -147,6 +162,8 @@ void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn) | |||
147 | q->unplug_timer.function = blk_unplug_timeout; | 162 | q->unplug_timer.function = blk_unplug_timeout; |
148 | q->unplug_timer.data = (unsigned long)q; | 163 | q->unplug_timer.data = (unsigned long)q; |
149 | 164 | ||
165 | blk_set_default_limits(&q->limits); | ||
166 | |||
150 | /* | 167 | /* |
151 | * by default assume old behaviour and bounce for any highmem page | 168 | * by default assume old behaviour and bounce for any highmem page |
152 | */ | 169 | */ |
@@ -343,7 +360,7 @@ EXPORT_SYMBOL(blk_queue_physical_block_size); | |||
343 | /** | 360 | /** |
344 | * blk_queue_alignment_offset - set physical block alignment offset | 361 | * blk_queue_alignment_offset - set physical block alignment offset |
345 | * @q: the request queue for the device | 362 | * @q: the request queue for the device |
346 | * @alignment: alignment offset in bytes | 363 | * @offset: alignment offset in bytes |
347 | * | 364 | * |
348 | * Description: | 365 | * Description: |
349 | * Some devices are naturally misaligned to compensate for things like | 366 | * Some devices are naturally misaligned to compensate for things like |
@@ -362,7 +379,7 @@ EXPORT_SYMBOL(blk_queue_alignment_offset); | |||
362 | /** | 379 | /** |
363 | * blk_queue_io_min - set minimum request size for the queue | 380 | * blk_queue_io_min - set minimum request size for the queue |
364 | * @q: the request queue for the device | 381 | * @q: the request queue for the device |
365 | * @io_min: smallest I/O size in bytes | 382 | * @min: smallest I/O size in bytes |
366 | * | 383 | * |
367 | * Description: | 384 | * Description: |
368 | * Some devices have an internal block size bigger than the reported | 385 | * Some devices have an internal block size bigger than the reported |
@@ -385,7 +402,7 @@ EXPORT_SYMBOL(blk_queue_io_min); | |||
385 | /** | 402 | /** |
386 | * blk_queue_io_opt - set optimal request size for the queue | 403 | * blk_queue_io_opt - set optimal request size for the queue |
387 | * @q: the request queue for the device | 404 | * @q: the request queue for the device |
388 | * @io_opt: optimal request size in bytes | 405 | * @opt: optimal request size in bytes |
389 | * | 406 | * |
390 | * Description: | 407 | * Description: |
391 | * Drivers can call this function to set the preferred I/O request | 408 | * Drivers can call this function to set the preferred I/O request |
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c index ef2f72d42434..833ec18eaa63 100644 --- a/block/cfq-iosched.c +++ b/block/cfq-iosched.c | |||
@@ -122,7 +122,6 @@ struct cfq_data { | |||
122 | struct cfq_queue *async_idle_cfqq; | 122 | struct cfq_queue *async_idle_cfqq; |
123 | 123 | ||
124 | sector_t last_position; | 124 | sector_t last_position; |
125 | unsigned long last_end_request; | ||
126 | 125 | ||
127 | /* | 126 | /* |
128 | * tunables, see top of file | 127 | * tunables, see top of file |
@@ -1253,7 +1252,7 @@ static int cfq_forced_dispatch(struct cfq_data *cfqd) | |||
1253 | 1252 | ||
1254 | BUG_ON(cfqd->busy_queues); | 1253 | BUG_ON(cfqd->busy_queues); |
1255 | 1254 | ||
1256 | cfq_log(cfqd, "forced_dispatch=%d\n", dispatched); | 1255 | cfq_log(cfqd, "forced_dispatch=%d", dispatched); |
1257 | return dispatched; | 1256 | return dispatched; |
1258 | } | 1257 | } |
1259 | 1258 | ||
@@ -2164,9 +2163,6 @@ static void cfq_completed_request(struct request_queue *q, struct request *rq) | |||
2164 | if (cfq_cfqq_sync(cfqq)) | 2163 | if (cfq_cfqq_sync(cfqq)) |
2165 | cfqd->sync_flight--; | 2164 | cfqd->sync_flight--; |
2166 | 2165 | ||
2167 | if (!cfq_class_idle(cfqq)) | ||
2168 | cfqd->last_end_request = now; | ||
2169 | |||
2170 | if (sync) | 2166 | if (sync) |
2171 | RQ_CIC(rq)->last_end_request = now; | 2167 | RQ_CIC(rq)->last_end_request = now; |
2172 | 2168 | ||
@@ -2479,7 +2475,6 @@ static void *cfq_init_queue(struct request_queue *q) | |||
2479 | 2475 | ||
2480 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); | 2476 | INIT_WORK(&cfqd->unplug_work, cfq_kick_queue); |
2481 | 2477 | ||
2482 | cfqd->last_end_request = jiffies; | ||
2483 | cfqd->cfq_quantum = cfq_quantum; | 2478 | cfqd->cfq_quantum = cfq_quantum; |
2484 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; | 2479 | cfqd->cfq_fifo_expire[0] = cfq_fifo_expire[0]; |
2485 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; | 2480 | cfqd->cfq_fifo_expire[1] = cfq_fifo_expire[1]; |