summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-01-15 15:24:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-15 15:24:45 -0500
commitb3c9dd182ed3bdcdaf0e42625a35924b0497afdc (patch)
treead48ad4d923fee147c736318d0fad35b3755f4f5 /include/linux/blkdev.h
parent83c2f912b43c3a7babbb6cb7ae2a5276c1ed2a3e (diff)
parent5d381efb3d1f1ef10535a31ca0dd9b22fe1e1922 (diff)
Merge branch 'for-3.3/core' of git://git.kernel.dk/linux-block
* 'for-3.3/core' of git://git.kernel.dk/linux-block: (37 commits) Revert "block: recursive merge requests" block: Stop using macro stubs for the bio data integrity calls blockdev: convert some macros to static inlines fs: remove unneeded plug in mpage_readpages() block: Add BLKROTATIONAL ioctl block: Introduce blk_set_stacking_limits function block: remove WARN_ON_ONCE() in exit_io_context() block: an exiting task should be allowed to create io_context block: ioc_cgroup_changed() needs to be exported block: recursive merge requests block, cfq: fix empty queue crash caused by request merge block, cfq: move icq creation and rq->elv.icq association to block core block, cfq: restructure io_cq creation path for io_context interface cleanup block, cfq: move io_cq exit/release to blk-ioc.c block, cfq: move icq cache management to block core block, cfq: move io_cq lookup to blk-ioc.c block, cfq: move cfqd->icq_list to request_queue and add request->elv.icq block, cfq: reorganize cfq_io_context into generic and cfq specific parts block: remove elevator_queue->ops block: reorder elevator switch sequence ... Fix up conflicts in: - block/blk-cgroup.c Switch from can_attach_task to can_attach - block/cfq-iosched.c conflict with now removed cic index changes (we now use q->id instead)
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h101
1 files changed, 84 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ed1eb062313..6c6a1f008065 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -111,10 +111,14 @@ struct request {
111 * Three pointers are available for the IO schedulers, if they need 111 * Three pointers are available for the IO schedulers, if they need
112 * more they have to dynamically allocate it. Flush requests are 112 * more they have to dynamically allocate it. Flush requests are
113 * never put on the IO scheduler. So let the flush fields share 113 * never put on the IO scheduler. So let the flush fields share
114 * space with the three elevator_private pointers. 114 * space with the elevator data.
115 */ 115 */
116 union { 116 union {
117 void *elevator_private[3]; 117 struct {
118 struct io_cq *icq;
119 void *priv[2];
120 } elv;
121
118 struct { 122 struct {
119 unsigned int seq; 123 unsigned int seq;
120 struct list_head list; 124 struct list_head list;
@@ -311,6 +315,12 @@ struct request_queue {
311 unsigned long queue_flags; 315 unsigned long queue_flags;
312 316
313 /* 317 /*
318 * ida allocated id for this queue. Used to index queues from
319 * ioctx.
320 */
321 int id;
322
323 /*
314 * queue needs bounce pages for pages above this limit 324 * queue needs bounce pages for pages above this limit
315 */ 325 */
316 gfp_t bounce_gfp; 326 gfp_t bounce_gfp;
@@ -351,6 +361,8 @@ struct request_queue {
351 struct timer_list timeout; 361 struct timer_list timeout;
352 struct list_head timeout_list; 362 struct list_head timeout_list;
353 363
364 struct list_head icq_list;
365
354 struct queue_limits limits; 366 struct queue_limits limits;
355 367
356 /* 368 /*
@@ -387,6 +399,9 @@ struct request_queue {
387 /* Throttle data */ 399 /* Throttle data */
388 struct throtl_data *td; 400 struct throtl_data *td;
389#endif 401#endif
402#ifdef CONFIG_LOCKDEP
403 int ioc_release_depth;
404#endif
390}; 405};
391 406
392#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 407#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -481,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
481 496
482#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 497#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
483#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 498#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
499#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
484#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 500#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
485#define blk_queue_noxmerges(q) \ 501#define blk_queue_noxmerges(q) \
486 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 502 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -660,7 +676,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
660extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 676extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
661extern struct request *blk_make_request(struct request_queue *, struct bio *, 677extern struct request *blk_make_request(struct request_queue *, struct bio *,
662 gfp_t); 678 gfp_t);
663extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
664extern void blk_requeue_request(struct request_queue *, struct request *); 679extern void blk_requeue_request(struct request_queue *, struct request *);
665extern void blk_add_request_payload(struct request *rq, struct page *page, 680extern void blk_add_request_payload(struct request *rq, struct page *page,
666 unsigned int len); 681 unsigned int len);
@@ -829,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
829extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 844extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
830extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 845extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
831extern void blk_set_default_limits(struct queue_limits *lim); 846extern void blk_set_default_limits(struct queue_limits *lim);
847extern void blk_set_stacking_limits(struct queue_limits *lim);
832extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 848extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
833 sector_t offset); 849 sector_t offset);
834extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 850extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
@@ -859,7 +875,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte
859extern void blk_dump_rq_flags(struct request *, char *); 875extern void blk_dump_rq_flags(struct request *, char *);
860extern long nr_blockdev_pages(void); 876extern long nr_blockdev_pages(void);
861 877
862int blk_get_queue(struct request_queue *); 878bool __must_check blk_get_queue(struct request_queue *);
863struct request_queue *blk_alloc_queue(gfp_t); 879struct request_queue *blk_alloc_queue(gfp_t);
864struct request_queue *blk_alloc_queue_node(gfp_t, int); 880struct request_queue *blk_alloc_queue_node(gfp_t, int);
865extern void blk_put_queue(struct request_queue *); 881extern void blk_put_queue(struct request_queue *);
@@ -1282,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q)
1282 1298
1283#else /* CONFIG_BLK_DEV_INTEGRITY */ 1299#else /* CONFIG_BLK_DEV_INTEGRITY */
1284 1300
1285#define blk_integrity_rq(rq) (0) 1301struct bio;
1286#define blk_rq_count_integrity_sg(a, b) (0) 1302struct block_device;
1287#define blk_rq_map_integrity_sg(a, b, c) (0) 1303struct gendisk;
1288#define bdev_get_integrity(a) (0) 1304struct blk_integrity;
1289#define blk_get_integrity(a) (0) 1305
1290#define blk_integrity_compare(a, b) (0) 1306static inline int blk_integrity_rq(struct request *rq)
1291#define blk_integrity_register(a, b) (0) 1307{
1292#define blk_integrity_unregister(a) do { } while (0) 1308 return 0;
1293#define blk_queue_max_integrity_segments(a, b) do { } while (0) 1309}
1294#define queue_max_integrity_segments(a) (0) 1310static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1295#define blk_integrity_merge_rq(a, b, c) (0) 1311 struct bio *b)
1296#define blk_integrity_merge_bio(a, b, c) (0) 1312{
1297#define blk_integrity_is_initialized(a) (0) 1313 return 0;
1314}
1315static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1316 struct bio *b,
1317 struct scatterlist *s)
1318{
1319 return 0;
1320}
1321static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1322{
1323 return 0;
1324}
1325static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1326{
1327 return NULL;
1328}
1329static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1330{
1331 return 0;
1332}
1333static inline int blk_integrity_register(struct gendisk *d,
1334 struct blk_integrity *b)
1335{
1336 return 0;
1337}
1338static inline void blk_integrity_unregister(struct gendisk *d)
1339{
1340}
1341static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1342 unsigned int segs)
1343{
1344}
1345static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1346{
1347 return 0;
1348}
1349static inline int blk_integrity_merge_rq(struct request_queue *rq,
1350 struct request *r1,
1351 struct request *r2)
1352{
1353 return 0;
1354}
1355static inline int blk_integrity_merge_bio(struct request_queue *rq,
1356 struct request *r,
1357 struct bio *b)
1358{
1359 return 0;
1360}
1361static inline bool blk_integrity_is_initialized(struct gendisk *g)
1362{
1363 return 0;
1364}
1298 1365
1299#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1366#endif /* CONFIG_BLK_DEV_INTEGRITY */
1300 1367