aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 12:29:34 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-02 12:29:34 -0400
commit681a2895486243a82547d8c9f53043eb54b53da0 (patch)
tree464273280aed6db55a99cc0d8614d4393f94fc48 /include
parent6c52486dedbb30a1313da64945dcd686b4579c51 (diff)
parented851860b4552fc8963ecf71eab9f6f7a5c19d74 (diff)
Merge branch 'for-3.16/core' of git://git.kernel.dk/linux-block into next
Pull block core updates from Jens Axboe: "It's a big(ish) round this time, lots of development effort has gone into blk-mq in the last 3 months. Generally we're heading to where 3.16 will be a feature complete and performant blk-mq. scsi-mq is progressing nicely and will hopefully be in 3.17. A nvme port is in progress, and the Micron pci-e flash driver, mtip32xx, is converted and will be sent in with the driver pull request for 3.16. This pull request contains: - Lots of prep and support patches for scsi-mq have been integrated. All from Christoph. - API and code cleanups for blk-mq from Christoph. - Lots of good corner case and error handling cleanup fixes for blk-mq from Ming Lei. - A flew of blk-mq updates from me: * Provide strict mappings so that the driver can rely on the CPU to queue mapping. This enables optimizations in the driver. * Provided a bitmap tagging instead of percpu_ida, which never really worked well for blk-mq. percpu_ida relies on the fact that we have a lot more tags available than we really need, it fails miserably for cases where we exhaust (or are close to exhausting) the tag space. * Provide sane support for shared tag maps, as utilized by scsi-mq * Various fixes for IO timeouts. * API cleanups, and lots of perf tweaks and optimizations. - Remove 'buffer' from struct request. This is ancient code, from when requests were always virtually mapped. Kill it, to reclaim some space in struct request. From me. - Remove 'magic' from blk_plug. Since we store these on the stack and since we've never caught any actual bugs with this, lets just get rid of it. From me. - Only call part_in_flight() once for IO completion, as includes two atomic reads. Hopefully we'll get a better implementation soon, as the part IO stats are now one of the more expensive parts of doing IO on blk-mq. From me. - File migration of block code from {mm,fs}/ to block/. This includes bio.c, bio-integrity.c, bounce.c, and ioprio.c. From me, from a discussion on lkml. That should describe the meat of the pull request. Also has various little fixes and cleanups from Dave Jones, Shaohua Li, Duan Jiong, Fengguang Wu, Fabian Frederick, Randy Dunlap, Robert Elliott, and Sam Bradshaw" * 'for-3.16/core' of git://git.kernel.dk/linux-block: (100 commits) blk-mq: push IPI or local end_io decision to __blk_mq_complete_request() blk-mq: remember to start timeout handler for direct queue block: ensure that the timer is always added blk-mq: blk_mq_unregister_hctx() can be static blk-mq: make the sysfs mq/ layout reflect current mappings blk-mq: blk_mq_tag_to_rq should handle flush request block: remove dead code in scsi_ioctl:blk_verify_command blk-mq: request initialization optimizations block: add queue flag for disabling SG merging block: remove 'magic' from struct blk_plug blk-mq: remove alloc_hctx and free_hctx methods blk-mq: add file comments and update copyright notices blk-mq: remove blk_mq_alloc_request_pinned blk-mq: do not use blk_mq_alloc_request_pinned in blk_mq_map_request blk-mq: remove blk_mq_wait_for_tags blk-mq: initialize request in __blk_mq_alloc_request blk-mq: merge blk_mq_alloc_reserved_request into blk_mq_alloc_request blk-mq: add helper to insert requests from irq context blk-mq: remove stale comment for blk_mq_complete_request() blk-mq: allow non-softirq completions ...
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h2
-rw-r--r--include/linux/blk-mq.h101
-rw-r--r--include/linux/blk_types.h2
-rw-r--r--include/linux/blkdev.h27
4 files changed, 82 insertions, 50 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index bba550826921..5a645769f020 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -333,7 +333,7 @@ static inline struct bio *bio_next_split(struct bio *bio, int sectors,
333 333
334extern struct bio_set *bioset_create(unsigned int, unsigned int); 334extern struct bio_set *bioset_create(unsigned int, unsigned int);
335extern void bioset_free(struct bio_set *); 335extern void bioset_free(struct bio_set *);
336extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries); 336extern mempool_t *biovec_create_pool(int pool_entries);
337 337
338extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 338extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
339extern void bio_put(struct bio *); 339extern void bio_put(struct bio *);
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0120451545d8..c15128833100 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -8,7 +8,13 @@ struct blk_mq_tags;
8struct blk_mq_cpu_notifier { 8struct blk_mq_cpu_notifier {
9 struct list_head list; 9 struct list_head list;
10 void *data; 10 void *data;
11 void (*notify)(void *data, unsigned long action, unsigned int cpu); 11 int (*notify)(void *data, unsigned long action, unsigned int cpu);
12};
13
14struct blk_mq_ctxmap {
15 unsigned int map_size;
16 unsigned int bits_per_word;
17 struct blk_align_bitmap *map;
12}; 18};
13 19
14struct blk_mq_hw_ctx { 20struct blk_mq_hw_ctx {
@@ -18,7 +24,11 @@ struct blk_mq_hw_ctx {
18 } ____cacheline_aligned_in_smp; 24 } ____cacheline_aligned_in_smp;
19 25
20 unsigned long state; /* BLK_MQ_S_* flags */ 26 unsigned long state; /* BLK_MQ_S_* flags */
21 struct delayed_work delayed_work; 27 struct delayed_work run_work;
28 struct delayed_work delay_work;
29 cpumask_var_t cpumask;
30 int next_cpu;
31 int next_cpu_batch;
22 32
23 unsigned long flags; /* BLK_MQ_F_* flags */ 33 unsigned long flags; /* BLK_MQ_F_* flags */
24 34
@@ -27,13 +37,13 @@ struct blk_mq_hw_ctx {
27 37
28 void *driver_data; 38 void *driver_data;
29 39
40 struct blk_mq_ctxmap ctx_map;
41
30 unsigned int nr_ctx; 42 unsigned int nr_ctx;
31 struct blk_mq_ctx **ctxs; 43 struct blk_mq_ctx **ctxs;
32 unsigned int nr_ctx_map;
33 unsigned long *ctx_map;
34 44
35 struct request **rqs; 45 unsigned int wait_index;
36 struct list_head page_list; 46
37 struct blk_mq_tags *tags; 47 struct blk_mq_tags *tags;
38 48
39 unsigned long queued; 49 unsigned long queued;
@@ -41,31 +51,40 @@ struct blk_mq_hw_ctx {
41#define BLK_MQ_MAX_DISPATCH_ORDER 10 51#define BLK_MQ_MAX_DISPATCH_ORDER 10
42 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER]; 52 unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
43 53
44 unsigned int queue_depth;
45 unsigned int numa_node; 54 unsigned int numa_node;
46 unsigned int cmd_size; /* per-request extra data */ 55 unsigned int cmd_size; /* per-request extra data */
47 56
57 atomic_t nr_active;
58
48 struct blk_mq_cpu_notifier cpu_notifier; 59 struct blk_mq_cpu_notifier cpu_notifier;
49 struct kobject kobj; 60 struct kobject kobj;
50}; 61};
51 62
52struct blk_mq_reg { 63struct blk_mq_tag_set {
53 struct blk_mq_ops *ops; 64 struct blk_mq_ops *ops;
54 unsigned int nr_hw_queues; 65 unsigned int nr_hw_queues;
55 unsigned int queue_depth; 66 unsigned int queue_depth; /* max hw supported */
56 unsigned int reserved_tags; 67 unsigned int reserved_tags;
57 unsigned int cmd_size; /* per-request extra data */ 68 unsigned int cmd_size; /* per-request extra data */
58 int numa_node; 69 int numa_node;
59 unsigned int timeout; 70 unsigned int timeout;
60 unsigned int flags; /* BLK_MQ_F_* */ 71 unsigned int flags; /* BLK_MQ_F_* */
72 void *driver_data;
73
74 struct blk_mq_tags **tags;
75
76 struct mutex tag_list_lock;
77 struct list_head tag_list;
61}; 78};
62 79
63typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *); 80typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, struct request *);
64typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int); 81typedef struct blk_mq_hw_ctx *(map_queue_fn)(struct request_queue *, const int);
65typedef struct blk_mq_hw_ctx *(alloc_hctx_fn)(struct blk_mq_reg *,unsigned int);
66typedef void (free_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
67typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int); 82typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
68typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int); 83typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
84typedef int (init_request_fn)(void *, struct request *, unsigned int,
85 unsigned int, unsigned int);
86typedef void (exit_request_fn)(void *, struct request *, unsigned int,
87 unsigned int);
69 88
70struct blk_mq_ops { 89struct blk_mq_ops {
71 /* 90 /*
@@ -86,18 +105,20 @@ struct blk_mq_ops {
86 softirq_done_fn *complete; 105 softirq_done_fn *complete;
87 106
88 /* 107 /*
89 * Override for hctx allocations (should probably go)
90 */
91 alloc_hctx_fn *alloc_hctx;
92 free_hctx_fn *free_hctx;
93
94 /*
95 * Called when the block layer side of a hardware queue has been 108 * Called when the block layer side of a hardware queue has been
96 * set up, allowing the driver to allocate/init matching structures. 109 * set up, allowing the driver to allocate/init matching structures.
97 * Ditto for exit/teardown. 110 * Ditto for exit/teardown.
98 */ 111 */
99 init_hctx_fn *init_hctx; 112 init_hctx_fn *init_hctx;
100 exit_hctx_fn *exit_hctx; 113 exit_hctx_fn *exit_hctx;
114
115 /*
116 * Called for every command allocated by the block layer to allow
117 * the driver to set up driver specific data.
118 * Ditto for exit/teardown.
119 */
120 init_request_fn *init_request;
121 exit_request_fn *exit_request;
101}; 122};
102 123
103enum { 124enum {
@@ -107,18 +128,24 @@ enum {
107 128
108 BLK_MQ_F_SHOULD_MERGE = 1 << 0, 129 BLK_MQ_F_SHOULD_MERGE = 1 << 0,
109 BLK_MQ_F_SHOULD_SORT = 1 << 1, 130 BLK_MQ_F_SHOULD_SORT = 1 << 1,
110 BLK_MQ_F_SHOULD_IPI = 1 << 2, 131 BLK_MQ_F_TAG_SHARED = 1 << 2,
132 BLK_MQ_F_SG_MERGE = 1 << 3,
133 BLK_MQ_F_SYSFS_UP = 1 << 4,
111 134
112 BLK_MQ_S_STOPPED = 0, 135 BLK_MQ_S_STOPPED = 0,
136 BLK_MQ_S_TAG_ACTIVE = 1,
113 137
114 BLK_MQ_MAX_DEPTH = 2048, 138 BLK_MQ_MAX_DEPTH = 2048,
139
140 BLK_MQ_CPU_WORK_BATCH = 8,
115}; 141};
116 142
117struct request_queue *blk_mq_init_queue(struct blk_mq_reg *, void *); 143struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
118int blk_mq_register_disk(struct gendisk *); 144int blk_mq_register_disk(struct gendisk *);
119void blk_mq_unregister_disk(struct gendisk *); 145void blk_mq_unregister_disk(struct gendisk *);
120int blk_mq_init_commands(struct request_queue *, int (*init)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); 146
121void blk_mq_free_commands(struct request_queue *, void (*free)(void *data, struct blk_mq_hw_ctx *, struct request *, unsigned int), void *data); 147int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set);
148void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
122 149
123void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule); 150void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
124 151
@@ -126,28 +153,28 @@ void blk_mq_insert_request(struct request *, bool, bool, bool);
126void blk_mq_run_queues(struct request_queue *q, bool async); 153void blk_mq_run_queues(struct request_queue *q, bool async);
127void blk_mq_free_request(struct request *rq); 154void blk_mq_free_request(struct request *rq);
128bool blk_mq_can_queue(struct blk_mq_hw_ctx *); 155bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
129struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp); 156struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
130struct request *blk_mq_alloc_reserved_request(struct request_queue *q, int rw, gfp_t gfp); 157 gfp_t gfp, bool reserved);
131struct request *blk_mq_rq_from_tag(struct request_queue *q, unsigned int tag); 158struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx, unsigned int tag);
132 159
133struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index); 160struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *, const int ctx_index);
134struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_reg *, unsigned int); 161struct blk_mq_hw_ctx *blk_mq_alloc_single_hw_queue(struct blk_mq_tag_set *, unsigned int, int);
135void blk_mq_free_single_hw_queue(struct blk_mq_hw_ctx *, unsigned int);
136 162
137bool blk_mq_end_io_partial(struct request *rq, int error, 163void blk_mq_end_io(struct request *rq, int error);
138 unsigned int nr_bytes); 164void __blk_mq_end_io(struct request *rq, int error);
139static inline void blk_mq_end_io(struct request *rq, int error)
140{
141 bool done = !blk_mq_end_io_partial(rq, error, blk_rq_bytes(rq));
142 BUG_ON(!done);
143}
144 165
166void blk_mq_requeue_request(struct request *rq);
167void blk_mq_add_to_requeue_list(struct request *rq, bool at_head);
168void blk_mq_kick_requeue_list(struct request_queue *q);
145void blk_mq_complete_request(struct request *rq); 169void blk_mq_complete_request(struct request *rq);
146 170
147void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx); 171void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
148void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx); 172void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx);
149void blk_mq_stop_hw_queues(struct request_queue *q); 173void blk_mq_stop_hw_queues(struct request_queue *q);
150void blk_mq_start_stopped_hw_queues(struct request_queue *q); 174void blk_mq_start_hw_queues(struct request_queue *q);
175void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
176void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
177void blk_mq_tag_busy_iter(struct blk_mq_tags *tags, void (*fn)(void *data, unsigned long *), void *data);
151 178
152/* 179/*
153 * Driver command data is immediately after the request. So subtract request 180 * Driver command data is immediately after the request. So subtract request
@@ -162,12 +189,6 @@ static inline void *blk_mq_rq_to_pdu(struct request *rq)
162 return (void *) rq + sizeof(*rq); 189 return (void *) rq + sizeof(*rq);
163} 190}
164 191
165static inline struct request *blk_mq_tag_to_rq(struct blk_mq_hw_ctx *hctx,
166 unsigned int tag)
167{
168 return hctx->rqs[tag];
169}
170
171#define queue_for_each_hw_ctx(q, hctx, i) \ 192#define queue_for_each_hw_ctx(q, hctx, i) \
172 for ((i) = 0; (i) < (q)->nr_hw_queues && \ 193 for ((i) = 0; (i) < (q)->nr_hw_queues && \
173 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++) 194 ({ hctx = (q)->queue_hw_ctx[i]; 1; }); (i)++)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index aa0eaa2d0bd8..d8e4cea23a25 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -190,6 +190,7 @@ enum rq_flag_bits {
190 __REQ_PM, /* runtime pm request */ 190 __REQ_PM, /* runtime pm request */
191 __REQ_END, /* last of chain of requests */ 191 __REQ_END, /* last of chain of requests */
192 __REQ_HASHED, /* on IO scheduler merge hash */ 192 __REQ_HASHED, /* on IO scheduler merge hash */
193 __REQ_MQ_INFLIGHT, /* track inflight for MQ */
193 __REQ_NR_BITS, /* stops here */ 194 __REQ_NR_BITS, /* stops here */
194}; 195};
195 196
@@ -243,5 +244,6 @@ enum rq_flag_bits {
243#define REQ_PM (1ULL << __REQ_PM) 244#define REQ_PM (1ULL << __REQ_PM)
244#define REQ_END (1ULL << __REQ_END) 245#define REQ_END (1ULL << __REQ_END)
245#define REQ_HASHED (1ULL << __REQ_HASHED) 246#define REQ_HASHED (1ULL << __REQ_HASHED)
247#define REQ_MQ_INFLIGHT (1ULL << __REQ_MQ_INFLIGHT)
246 248
247#endif /* __LINUX_BLK_TYPES_H */ 249#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0d84981ee03f..695b9fd41efe 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
90#define BLK_MAX_CDB 16 90#define BLK_MAX_CDB 16
91 91
92/* 92/*
93 * try to put the fields that are referenced together in the same cacheline. 93 * Try to put the fields that are referenced together in the same cacheline.
94 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 94 *
95 * as well! 95 * If you modify this structure, make sure to update blk_rq_init() and
96 * especially blk_mq_rq_ctx_init() to take care of the added fields.
96 */ 97 */
97struct request { 98struct request {
98 struct list_head queuelist; 99 struct list_head queuelist;
99 union { 100 union {
100 struct call_single_data csd; 101 struct call_single_data csd;
101 struct work_struct mq_flush_work;
102 unsigned long fifo_time; 102 unsigned long fifo_time;
103 }; 103 };
104 104
@@ -178,7 +178,6 @@ struct request {
178 unsigned short ioprio; 178 unsigned short ioprio;
179 179
180 void *special; /* opaque pointer available for LLD use */ 180 void *special; /* opaque pointer available for LLD use */
181 char *buffer; /* kaddr of the current segment if available */
182 181
183 int tag; 182 int tag;
184 int errors; 183 int errors;
@@ -463,6 +462,10 @@ struct request_queue {
463 struct request *flush_rq; 462 struct request *flush_rq;
464 spinlock_t mq_flush_lock; 463 spinlock_t mq_flush_lock;
465 464
465 struct list_head requeue_list;
466 spinlock_t requeue_lock;
467 struct work_struct requeue_work;
468
466 struct mutex sysfs_lock; 469 struct mutex sysfs_lock;
467 470
468 int bypass_depth; 471 int bypass_depth;
@@ -481,6 +484,9 @@ struct request_queue {
481 wait_queue_head_t mq_freeze_wq; 484 wait_queue_head_t mq_freeze_wq;
482 struct percpu_counter mq_usage_counter; 485 struct percpu_counter mq_usage_counter;
483 struct list_head all_q_node; 486 struct list_head all_q_node;
487
488 struct blk_mq_tag_set *tag_set;
489 struct list_head tag_set_list;
484}; 490};
485 491
486#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 492#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -504,6 +510,7 @@ struct request_queue {
504#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 510#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
505#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 511#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
506#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 512#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
513#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
507 514
508#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 515#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
509 (1 << QUEUE_FLAG_STACKABLE) | \ 516 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -937,6 +944,7 @@ extern struct request *blk_fetch_request(struct request_queue *q);
937 */ 944 */
938extern bool blk_update_request(struct request *rq, int error, 945extern bool blk_update_request(struct request *rq, int error,
939 unsigned int nr_bytes); 946 unsigned int nr_bytes);
947extern void blk_finish_request(struct request *rq, int error);
940extern bool blk_end_request(struct request *rq, int error, 948extern bool blk_end_request(struct request *rq, int error,
941 unsigned int nr_bytes); 949 unsigned int nr_bytes);
942extern void blk_end_request_all(struct request *rq, int error); 950extern void blk_end_request_all(struct request *rq, int error);
@@ -1053,7 +1061,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1053 * schedule() where blk_schedule_flush_plug() is called. 1061 * schedule() where blk_schedule_flush_plug() is called.
1054 */ 1062 */
1055struct blk_plug { 1063struct blk_plug {
1056 unsigned long magic; /* detect uninitialized use-cases */
1057 struct list_head list; /* requests */ 1064 struct list_head list; /* requests */
1058 struct list_head mq_list; /* blk-mq requests */ 1065 struct list_head mq_list; /* blk-mq requests */
1059 struct list_head cb_list; /* md requires an unplug callback */ 1066 struct list_head cb_list; /* md requires an unplug callback */
@@ -1102,7 +1109,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1102/* 1109/*
1103 * tag stuff 1110 * tag stuff
1104 */ 1111 */
1105#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 1112#define blk_rq_tagged(rq) \
1113 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1106extern int blk_queue_start_tag(struct request_queue *, struct request *); 1114extern int blk_queue_start_tag(struct request_queue *, struct request *);
1107extern struct request *blk_queue_find_tag(struct request_queue *, int); 1115extern struct request *blk_queue_find_tag(struct request_queue *, int);
1108extern void blk_queue_end_tag(struct request_queue *, struct request *); 1116extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1370,8 +1378,9 @@ static inline void put_dev_sector(Sector p)
1370} 1378}
1371 1379
1372struct work_struct; 1380struct work_struct;
1373int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1381int kblockd_schedule_work(struct work_struct *work);
1374int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); 1382int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1383int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1375 1384
1376#ifdef CONFIG_BLK_CGROUP 1385#ifdef CONFIG_BLK_CGROUP
1377/* 1386/*