summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@kernel.dk>2018-10-24 05:39:36 -0400
committerJens Axboe <axboe@kernel.dk>2018-11-07 15:42:32 -0500
commit7ca01926463a15f5d2681458643b2453930b873a (patch)
tree06ea203ffd839dfb7dfe0be9a10287679b898d36
parent2cdf2caecda6cb16c24c6bdd2484d4cec99cfbb3 (diff)
block: remove legacy rq tagging
It's now unused, kill it. Reviewed-by: Hannes Reinecke <hare@suse.com> Tested-by: Ming Lei <ming.lei@redhat.com> Reviewed-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--Documentation/block/biodoc.txt88
-rw-r--r--block/Makefile2
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-mq-debugfs.c2
-rw-r--r--block/blk-mq-tag.c6
-rw-r--r--block/blk-sysfs.c3
-rw-r--r--block/blk-tag.c378
-rw-r--r--include/linux/blkdev.h35
8 files changed, 3 insertions, 517 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 207eca58efaa..ac18b488cb5e 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -65,7 +65,6 @@ Description of Contents:
65 3.2.3 I/O completion 65 3.2.3 I/O completion
66 3.2.4 Implications for drivers that do not interpret bios (don't handle 66 3.2.4 Implications for drivers that do not interpret bios (don't handle
67 multiple segments) 67 multiple segments)
68 3.2.5 Request command tagging
69 3.3 I/O submission 68 3.3 I/O submission
704. The I/O scheduler 694. The I/O scheduler
715. Scalability related changes 705. Scalability related changes
@@ -708,93 +707,6 @@ is crossed on completion of a transfer. (The end*request* functions should
708be used if only if the request has come down from block/bio path, not for 707be used if only if the request has come down from block/bio path, not for
709direct access requests which only specify rq->buffer without a valid rq->bio) 708direct access requests which only specify rq->buffer without a valid rq->bio)
710 709
7113.2.5 Generic request command tagging
712
7133.2.5.1 Tag helpers
714
715Block now offers some simple generic functionality to help support command
716queueing (typically known as tagged command queueing), ie manage more than
717one outstanding command on a queue at any given time.
718
719 blk_queue_init_tags(struct request_queue *q, int depth)
720
721 Initialize internal command tagging structures for a maximum
722 depth of 'depth'.
723
724 blk_queue_free_tags((struct request_queue *q)
725
726 Teardown tag info associated with the queue. This will be done
727 automatically by block if blk_queue_cleanup() is called on a queue
728 that is using tagging.
729
730The above are initialization and exit management, the main helpers during
731normal operations are:
732
733 blk_queue_start_tag(struct request_queue *q, struct request *rq)
734
735 Start tagged operation for this request. A free tag number between
736 0 and 'depth' is assigned to the request (rq->tag holds this number),
737 and 'rq' is added to the internal tag management. If the maximum depth
738 for this queue is already achieved (or if the tag wasn't started for
739 some other reason), 1 is returned. Otherwise 0 is returned.
740
741 blk_queue_end_tag(struct request_queue *q, struct request *rq)
742
743 End tagged operation on this request. 'rq' is removed from the internal
744 book keeping structures.
745
746To minimize struct request and queue overhead, the tag helpers utilize some
747of the same request members that are used for normal request queue management.
748This means that a request cannot both be an active tag and be on the queue
749list at the same time. blk_queue_start_tag() will remove the request, but
750the driver must remember to call blk_queue_end_tag() before signalling
751completion of the request to the block layer. This means ending tag
752operations before calling end_that_request_last()! For an example of a user
753of these helpers, see the IDE tagged command queueing support.
754
7553.2.5.2 Tag info
756
757Some block functions exist to query current tag status or to go from a
758tag number to the associated request. These are, in no particular order:
759
760 blk_queue_tagged(q)
761
762 Returns 1 if the queue 'q' is using tagging, 0 if not.
763
764 blk_queue_tag_request(q, tag)
765
766 Returns a pointer to the request associated with tag 'tag'.
767
768 blk_queue_tag_depth(q)
769
770 Return current queue depth.
771
772 blk_queue_tag_queue(q)
773
774 Returns 1 if the queue can accept a new queued command, 0 if we are
775 at the maximum depth already.
776
777 blk_queue_rq_tagged(rq)
778
779 Returns 1 if the request 'rq' is tagged.
780
7813.2.5.2 Internal structure
782
783Internally, block manages tags in the blk_queue_tag structure:
784
785 struct blk_queue_tag {
786 struct request **tag_index; /* array or pointers to rq */
787 unsigned long *tag_map; /* bitmap of free tags */
788 struct list_head busy_list; /* fifo list of busy tags */
789 int busy; /* queue depth */
790 int max_depth; /* max queue depth */
791 };
792
793Most of the above is simple and straight forward, however busy_list may need
794a bit of explaining. Normally we don't care too much about request ordering,
795but in the event of any barrier requests in the tag queue we need to ensure
796that requests are restarted in the order they were queue.
797
7983.3 I/O Submission 7103.3 I/O Submission
799 711
800The routine submit_bio() is used to submit a single io. Higher level i/o 712The routine submit_bio() is used to submit a single io. Higher level i/o
diff --git a/block/Makefile b/block/Makefile
index 27eac600474f..213674c8faaa 100644
--- a/block/Makefile
+++ b/block/Makefile
@@ -3,7 +3,7 @@
3# Makefile for the kernel block layer 3# Makefile for the kernel block layer
4# 4#
5 5
6obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ 6obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-sysfs.o \
7 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ 7 blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
8 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ 8 blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
9 blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ 9 blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
diff --git a/block/blk-core.c b/block/blk-core.c
index 03ef8f0e7dc5..daaed4dfa719 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1658,9 +1658,6 @@ void blk_requeue_request(struct request_queue *q, struct request *rq)
1658 trace_block_rq_requeue(q, rq); 1658 trace_block_rq_requeue(q, rq);
1659 rq_qos_requeue(q, rq); 1659 rq_qos_requeue(q, rq);
1660 1660
1661 if (rq->rq_flags & RQF_QUEUED)
1662 blk_queue_end_tag(q, rq);
1663
1664 BUG_ON(blk_queued_rq(rq)); 1661 BUG_ON(blk_queued_rq(rq));
1665 1662
1666 elv_requeue_request(q, rq); 1663 elv_requeue_request(q, rq);
@@ -3174,9 +3171,6 @@ void blk_finish_request(struct request *req, blk_status_t error)
3174 if (req->rq_flags & RQF_STATS) 3171 if (req->rq_flags & RQF_STATS)
3175 blk_stat_add(req, now); 3172 blk_stat_add(req, now);
3176 3173
3177 if (req->rq_flags & RQF_QUEUED)
3178 blk_queue_end_tag(q, req);
3179
3180 BUG_ON(blk_queued_rq(req)); 3174 BUG_ON(blk_queued_rq(req));
3181 3175
3182 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req)) 3176 if (unlikely(laptop_mode) && !blk_rq_is_passthrough(req))
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index 10b284a1f18d..9ed43a7c70b5 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -112,7 +112,6 @@ static int queue_pm_only_show(void *data, struct seq_file *m)
112 112
113#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name 113#define QUEUE_FLAG_NAME(name) [QUEUE_FLAG_##name] = #name
114static const char *const blk_queue_flag_name[] = { 114static const char *const blk_queue_flag_name[] = {
115 QUEUE_FLAG_NAME(QUEUED),
116 QUEUE_FLAG_NAME(STOPPED), 115 QUEUE_FLAG_NAME(STOPPED),
117 QUEUE_FLAG_NAME(DYING), 116 QUEUE_FLAG_NAME(DYING),
118 QUEUE_FLAG_NAME(BYPASS), 117 QUEUE_FLAG_NAME(BYPASS),
@@ -318,7 +317,6 @@ static const char *const cmd_flag_name[] = {
318static const char *const rqf_name[] = { 317static const char *const rqf_name[] = {
319 RQF_NAME(SORTED), 318 RQF_NAME(SORTED),
320 RQF_NAME(STARTED), 319 RQF_NAME(STARTED),
321 RQF_NAME(QUEUED),
322 RQF_NAME(SOFTBARRIER), 320 RQF_NAME(SOFTBARRIER),
323 RQF_NAME(FLUSH_SEQ), 321 RQF_NAME(FLUSH_SEQ),
324 RQF_NAME(MIXED_MERGE), 322 RQF_NAME(MIXED_MERGE),
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index cfda95b85d34..4254e74c1446 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -530,10 +530,8 @@ u32 blk_mq_unique_tag(struct request *rq)
530 struct blk_mq_hw_ctx *hctx; 530 struct blk_mq_hw_ctx *hctx;
531 int hwq = 0; 531 int hwq = 0;
532 532
533 if (q->mq_ops) { 533 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
534 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu); 534 hwq = hctx->queue_num;
535 hwq = hctx->queue_num;
536 }
537 535
538 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) | 536 return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
539 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); 537 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index 844a454a7b3a..1b82ccfde3fe 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -849,9 +849,6 @@ static void __blk_release_queue(struct work_struct *work)
849 849
850 blk_exit_rl(q, &q->root_rl); 850 blk_exit_rl(q, &q->root_rl);
851 851
852 if (q->queue_tags)
853 __blk_queue_free_tags(q);
854
855 blk_queue_free_zone_bitmaps(q); 852 blk_queue_free_zone_bitmaps(q);
856 853
857 if (!q->mq_ops) { 854 if (!q->mq_ops) {
diff --git a/block/blk-tag.c b/block/blk-tag.c
deleted file mode 100644
index fbc153aef166..000000000000
--- a/block/blk-tag.c
+++ /dev/null
@@ -1,378 +0,0 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Functions related to tagged command queuing
4 */
5#include <linux/kernel.h>
6#include <linux/module.h>
7#include <linux/bio.h>
8#include <linux/blkdev.h>
9#include <linux/slab.h>
10
11#include "blk.h"
12
13/**
14 * blk_queue_find_tag - find a request by its tag and queue
15 * @q: The request queue for the device
16 * @tag: The tag of the request
17 *
18 * Notes:
19 * Should be used when a device returns a tag and you want to match
20 * it with a request.
21 *
22 * no locks need be held.
23 **/
24struct request *blk_queue_find_tag(struct request_queue *q, int tag)
25{
26 return blk_map_queue_find_tag(q->queue_tags, tag);
27}
28EXPORT_SYMBOL(blk_queue_find_tag);
29
30/**
31 * blk_free_tags - release a given set of tag maintenance info
32 * @bqt: the tag map to free
33 *
34 * Drop the reference count on @bqt and frees it when the last reference
35 * is dropped.
36 */
37void blk_free_tags(struct blk_queue_tag *bqt)
38{
39 if (atomic_dec_and_test(&bqt->refcnt)) {
40 BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
41 bqt->max_depth);
42
43 kfree(bqt->tag_index);
44 bqt->tag_index = NULL;
45
46 kfree(bqt->tag_map);
47 bqt->tag_map = NULL;
48
49 kfree(bqt);
50 }
51}
52EXPORT_SYMBOL(blk_free_tags);
53
54/**
55 * __blk_queue_free_tags - release tag maintenance info
56 * @q: the request queue for the device
57 *
58 * Notes:
59 * blk_cleanup_queue() will take care of calling this function, if tagging
60 * has been used. So there's no need to call this directly.
61 **/
62void __blk_queue_free_tags(struct request_queue *q)
63{
64 struct blk_queue_tag *bqt = q->queue_tags;
65
66 if (!bqt)
67 return;
68
69 blk_free_tags(bqt);
70
71 q->queue_tags = NULL;
72 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
73}
74
75/**
76 * blk_queue_free_tags - release tag maintenance info
77 * @q: the request queue for the device
78 *
79 * Notes:
80 * This is used to disable tagged queuing to a device, yet leave
81 * queue in function.
82 **/
83void blk_queue_free_tags(struct request_queue *q)
84{
85 queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
86}
87EXPORT_SYMBOL(blk_queue_free_tags);
88
89static int
90init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
91{
92 struct request **tag_index;
93 unsigned long *tag_map;
94 int nr_ulongs;
95
96 if (q && depth > q->nr_requests * 2) {
97 depth = q->nr_requests * 2;
98 printk(KERN_ERR "%s: adjusted depth to %d\n",
99 __func__, depth);
100 }
101
102 tag_index = kcalloc(depth, sizeof(struct request *), GFP_ATOMIC);
103 if (!tag_index)
104 goto fail;
105
106 nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
107 tag_map = kcalloc(nr_ulongs, sizeof(unsigned long), GFP_ATOMIC);
108 if (!tag_map)
109 goto fail;
110
111 tags->real_max_depth = depth;
112 tags->max_depth = depth;
113 tags->tag_index = tag_index;
114 tags->tag_map = tag_map;
115
116 return 0;
117fail:
118 kfree(tag_index);
119 return -ENOMEM;
120}
121
122static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
123 int depth, int alloc_policy)
124{
125 struct blk_queue_tag *tags;
126
127 tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
128 if (!tags)
129 goto fail;
130
131 if (init_tag_map(q, tags, depth))
132 goto fail;
133
134 atomic_set(&tags->refcnt, 1);
135 tags->alloc_policy = alloc_policy;
136 tags->next_tag = 0;
137 return tags;
138fail:
139 kfree(tags);
140 return NULL;
141}
142
143/**
144 * blk_init_tags - initialize the tag info for an external tag map
145 * @depth: the maximum queue depth supported
146 * @alloc_policy: tag allocation policy
147 **/
148struct blk_queue_tag *blk_init_tags(int depth, int alloc_policy)
149{
150 return __blk_queue_init_tags(NULL, depth, alloc_policy);
151}
152EXPORT_SYMBOL(blk_init_tags);
153
154/**
155 * blk_queue_init_tags - initialize the queue tag info
156 * @q: the request queue for the device
157 * @depth: the maximum queue depth supported
158 * @tags: the tag to use
159 * @alloc_policy: tag allocation policy
160 *
161 * Queue lock must be held here if the function is called to resize an
162 * existing map.
163 **/
164int blk_queue_init_tags(struct request_queue *q, int depth,
165 struct blk_queue_tag *tags, int alloc_policy)
166{
167 int rc;
168
169 BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
170
171 if (!tags && !q->queue_tags) {
172 tags = __blk_queue_init_tags(q, depth, alloc_policy);
173
174 if (!tags)
175 return -ENOMEM;
176
177 } else if (q->queue_tags) {
178 rc = blk_queue_resize_tags(q, depth);
179 if (rc)
180 return rc;
181 queue_flag_set(QUEUE_FLAG_QUEUED, q);
182 return 0;
183 } else
184 atomic_inc(&tags->refcnt);
185
186 /*
187 * assign it, all done
188 */
189 q->queue_tags = tags;
190 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
191 return 0;
192}
193EXPORT_SYMBOL(blk_queue_init_tags);
194
195/**
196 * blk_queue_resize_tags - change the queueing depth
197 * @q: the request queue for the device
198 * @new_depth: the new max command queueing depth
199 *
200 * Notes:
201 * Must be called with the queue lock held.
202 **/
203int blk_queue_resize_tags(struct request_queue *q, int new_depth)
204{
205 struct blk_queue_tag *bqt = q->queue_tags;
206 struct request **tag_index;
207 unsigned long *tag_map;
208 int max_depth, nr_ulongs;
209
210 if (!bqt)
211 return -ENXIO;
212
213 /*
214 * if we already have large enough real_max_depth. just
215 * adjust max_depth. *NOTE* as requests with tag value
216 * between new_depth and real_max_depth can be in-flight, tag
217 * map can not be shrunk blindly here.
218 */
219 if (new_depth <= bqt->real_max_depth) {
220 bqt->max_depth = new_depth;
221 return 0;
222 }
223
224 /*
225 * Currently cannot replace a shared tag map with a new
226 * one, so error out if this is the case
227 */
228 if (atomic_read(&bqt->refcnt) != 1)
229 return -EBUSY;
230
231 /*
232 * save the old state info, so we can copy it back
233 */
234 tag_index = bqt->tag_index;
235 tag_map = bqt->tag_map;
236 max_depth = bqt->real_max_depth;
237
238 if (init_tag_map(q, bqt, new_depth))
239 return -ENOMEM;
240
241 memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
242 nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
243 memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
244
245 kfree(tag_index);
246 kfree(tag_map);
247 return 0;
248}
249EXPORT_SYMBOL(blk_queue_resize_tags);
250
251/**
252 * blk_queue_end_tag - end tag operations for a request
253 * @q: the request queue for the device
254 * @rq: the request that has completed
255 *
256 * Description:
257 * Typically called when end_that_request_first() returns %0, meaning
258 * all transfers have been done for a request. It's important to call
259 * this function before end_that_request_last(), as that will put the
260 * request back on the free list thus corrupting the internal tag list.
261 **/
262void blk_queue_end_tag(struct request_queue *q, struct request *rq)
263{
264 struct blk_queue_tag *bqt = q->queue_tags;
265 unsigned tag = rq->tag; /* negative tags invalid */
266
267 lockdep_assert_held(q->queue_lock);
268
269 BUG_ON(tag >= bqt->real_max_depth);
270
271 list_del_init(&rq->queuelist);
272 rq->rq_flags &= ~RQF_QUEUED;
273 rq->tag = -1;
274 rq->internal_tag = -1;
275
276 if (unlikely(bqt->tag_index[tag] == NULL))
277 printk(KERN_ERR "%s: tag %d is missing\n",
278 __func__, tag);
279
280 bqt->tag_index[tag] = NULL;
281
282 if (unlikely(!test_bit(tag, bqt->tag_map))) {
283 printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
284 __func__, tag);
285 return;
286 }
287 /*
288 * The tag_map bit acts as a lock for tag_index[bit], so we need
289 * unlock memory barrier semantics.
290 */
291 clear_bit_unlock(tag, bqt->tag_map);
292}
293
294/**
295 * blk_queue_start_tag - find a free tag and assign it
296 * @q: the request queue for the device
297 * @rq: the block request that needs tagging
298 *
299 * Description:
300 * This can either be used as a stand-alone helper, or possibly be
301 * assigned as the queue &prep_rq_fn (in which case &struct request
302 * automagically gets a tag assigned). Note that this function
303 * assumes that any type of request can be queued! if this is not
304 * true for your device, you must check the request type before
305 * calling this function. The request will also be removed from
306 * the request queue, so it's the drivers responsibility to readd
307 * it if it should need to be restarted for some reason.
308 **/
309int blk_queue_start_tag(struct request_queue *q, struct request *rq)
310{
311 struct blk_queue_tag *bqt = q->queue_tags;
312 unsigned max_depth;
313 int tag;
314
315 lockdep_assert_held(q->queue_lock);
316
317 if (unlikely((rq->rq_flags & RQF_QUEUED))) {
318 printk(KERN_ERR
319 "%s: request %p for device [%s] already tagged %d",
320 __func__, rq,
321 rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
322 BUG();
323 }
324
325 /*
326 * Protect against shared tag maps, as we may not have exclusive
327 * access to the tag map.
328 *
329 * We reserve a few tags just for sync IO, since we don't want
330 * to starve sync IO on behalf of flooding async IO.
331 */
332 max_depth = bqt->max_depth;
333 if (!rq_is_sync(rq) && max_depth > 1) {
334 switch (max_depth) {
335 case 2:
336 max_depth = 1;
337 break;
338 case 3:
339 max_depth = 2;
340 break;
341 default:
342 max_depth -= 2;
343 }
344 if (q->in_flight[BLK_RW_ASYNC] > max_depth)
345 return 1;
346 }
347
348 do {
349 if (bqt->alloc_policy == BLK_TAG_ALLOC_FIFO) {
350 tag = find_first_zero_bit(bqt->tag_map, max_depth);
351 if (tag >= max_depth)
352 return 1;
353 } else {
354 int start = bqt->next_tag;
355 int size = min_t(int, bqt->max_depth, max_depth + start);
356 tag = find_next_zero_bit(bqt->tag_map, size, start);
357 if (tag >= size && start + size > bqt->max_depth) {
358 size = start + size - bqt->max_depth;
359 tag = find_first_zero_bit(bqt->tag_map, size);
360 }
361 if (tag >= size)
362 return 1;
363 }
364
365 } while (test_and_set_bit_lock(tag, bqt->tag_map));
366 /*
367 * We need lock ordering semantics given by test_and_set_bit_lock.
368 * See blk_queue_end_tag for details.
369 */
370
371 bqt->next_tag = (tag + 1) % bqt->max_depth;
372 rq->rq_flags |= RQF_QUEUED;
373 rq->tag = tag;
374 bqt->tag_index[tag] = rq;
375 blk_start_request(rq);
376 return 0;
377}
378EXPORT_SYMBOL(blk_queue_start_tag);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 6baea6563364..8afe3331777e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -85,8 +85,6 @@ typedef __u32 __bitwise req_flags_t;
85#define RQF_SORTED ((__force req_flags_t)(1 << 0)) 85#define RQF_SORTED ((__force req_flags_t)(1 << 0))
86/* drive already may have started this one */ 86/* drive already may have started this one */
87#define RQF_STARTED ((__force req_flags_t)(1 << 1)) 87#define RQF_STARTED ((__force req_flags_t)(1 << 1))
88/* uses tagged queueing */
89#define RQF_QUEUED ((__force req_flags_t)(1 << 2))
90/* may not be passed by ioscheduler */ 88/* may not be passed by ioscheduler */
91#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3)) 89#define RQF_SOFTBARRIER ((__force req_flags_t)(1 << 3))
92/* request for flush sequence */ 90/* request for flush sequence */
@@ -336,15 +334,6 @@ enum blk_queue_state {
336 Queue_up, 334 Queue_up,
337}; 335};
338 336
339struct blk_queue_tag {
340 struct request **tag_index; /* map of busy tags */
341 unsigned long *tag_map; /* bit map of free/busy tags */
342 int max_depth; /* what we will send to device */
343 int real_max_depth; /* what the array can hold */
344 atomic_t refcnt; /* map can be shared */
345 int alloc_policy; /* tag allocation policy */
346 int next_tag; /* next tag */
347};
348#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */ 337#define BLK_TAG_ALLOC_FIFO 0 /* allocate starting from 0 */
349#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */ 338#define BLK_TAG_ALLOC_RR 1 /* allocate starting from last allocated tag */
350 339
@@ -568,8 +557,6 @@ struct request_queue {
568 unsigned int dma_pad_mask; 557 unsigned int dma_pad_mask;
569 unsigned int dma_alignment; 558 unsigned int dma_alignment;
570 559
571 struct blk_queue_tag *queue_tags;
572
573 unsigned int nr_sorted; 560 unsigned int nr_sorted;
574 unsigned int in_flight[2]; 561 unsigned int in_flight[2];
575 562
@@ -680,7 +667,6 @@ struct request_queue {
680 u64 write_hints[BLK_MAX_WRITE_HINTS]; 667 u64 write_hints[BLK_MAX_WRITE_HINTS];
681}; 668};
682 669
683#define QUEUE_FLAG_QUEUED 0 /* uses generic tag queueing */
684#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */ 670#define QUEUE_FLAG_STOPPED 1 /* queue is stopped */
685#define QUEUE_FLAG_DYING 2 /* queue being torn down */ 671#define QUEUE_FLAG_DYING 2 /* queue being torn down */
686#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */ 672#define QUEUE_FLAG_BYPASS 3 /* act as dumb FIFO queue */
@@ -724,7 +710,6 @@ void blk_queue_flag_clear(unsigned int flag, struct request_queue *q);
724bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q); 710bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
725bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q); 711bool blk_queue_flag_test_and_clear(unsigned int flag, struct request_queue *q);
726 712
727#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
728#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 713#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
729#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags) 714#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
730#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 715#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
@@ -1359,26 +1344,6 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1359 !list_empty(&plug->cb_list)); 1344 !list_empty(&plug->cb_list));
1360} 1345}
1361 1346
1362/*
1363 * tag stuff
1364 */
1365extern int blk_queue_start_tag(struct request_queue *, struct request *);
1366extern struct request *blk_queue_find_tag(struct request_queue *, int);
1367extern void blk_queue_end_tag(struct request_queue *, struct request *);
1368extern int blk_queue_init_tags(struct request_queue *, int, struct blk_queue_tag *, int);
1369extern void blk_queue_free_tags(struct request_queue *);
1370extern int blk_queue_resize_tags(struct request_queue *, int);
1371extern struct blk_queue_tag *blk_init_tags(int, int);
1372extern void blk_free_tags(struct blk_queue_tag *);
1373
1374static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
1375 int tag)
1376{
1377 if (unlikely(bqt == NULL || tag >= bqt->real_max_depth))
1378 return NULL;
1379 return bqt->tag_index[tag];
1380}
1381
1382extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *); 1347extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
1383extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector, 1348extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
1384 sector_t nr_sects, gfp_t gfp_mask, struct page *page); 1349 sector_t nr_sects, gfp_t gfp_mask, struct page *page);