aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorOlof Johansson <olof@lixom.net>2014-07-19 17:59:07 -0400
committerOlof Johansson <olof@lixom.net>2014-07-19 17:59:07 -0400
commit4e9816d012dbc28dc89559261c6ffbf8ffc440dd (patch)
treedee9f8b31f3d6d2fb141541da88e1cc1329b017e /include/linux/blkdev.h
parentda98f44f27d81d7fe9a41f69af4fe08c18d13b56 (diff)
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
Merge tag 'v3.16-rc5' into next/fixes-non-critical
Linux 3.16-rc5
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h75
1 files changed, 64 insertions, 11 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0d84981ee03f..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -90,15 +90,15 @@ enum rq_cmd_type_bits {
90#define BLK_MAX_CDB 16 90#define BLK_MAX_CDB 16
91 91
92/* 92/*
93 * try to put the fields that are referenced together in the same cacheline. 93 * Try to put the fields that are referenced together in the same cacheline.
94 * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() 94 *
95 * as well! 95 * If you modify this structure, make sure to update blk_rq_init() and
96 * especially blk_mq_rq_ctx_init() to take care of the added fields.
96 */ 97 */
97struct request { 98struct request {
98 struct list_head queuelist; 99 struct list_head queuelist;
99 union { 100 union {
100 struct call_single_data csd; 101 struct call_single_data csd;
101 struct work_struct mq_flush_work;
102 unsigned long fifo_time; 102 unsigned long fifo_time;
103 }; 103 };
104 104
@@ -178,7 +178,6 @@ struct request {
178 unsigned short ioprio; 178 unsigned short ioprio;
179 179
180 void *special; /* opaque pointer available for LLD use */ 180 void *special; /* opaque pointer available for LLD use */
181 char *buffer; /* kaddr of the current segment if available */
182 181
183 int tag; 182 int tag;
184 int errors; 183 int errors;
@@ -281,6 +280,7 @@ struct queue_limits {
281 unsigned long seg_boundary_mask; 280 unsigned long seg_boundary_mask;
282 281
283 unsigned int max_hw_sectors; 282 unsigned int max_hw_sectors;
283 unsigned int chunk_sectors;
284 unsigned int max_sectors; 284 unsigned int max_sectors;
285 unsigned int max_segment_size; 285 unsigned int max_segment_size;
286 unsigned int physical_block_size; 286 unsigned int physical_block_size;
@@ -336,7 +336,7 @@ struct request_queue {
336 unsigned int *mq_map; 336 unsigned int *mq_map;
337 337
338 /* sw queues */ 338 /* sw queues */
339 struct blk_mq_ctx *queue_ctx; 339 struct blk_mq_ctx __percpu *queue_ctx;
340 unsigned int nr_queues; 340 unsigned int nr_queues;
341 341
342 /* hw dispatch queues */ 342 /* hw dispatch queues */
@@ -463,6 +463,10 @@ struct request_queue {
463 struct request *flush_rq; 463 struct request *flush_rq;
464 spinlock_t mq_flush_lock; 464 spinlock_t mq_flush_lock;
465 465
466 struct list_head requeue_list;
467 spinlock_t requeue_lock;
468 struct work_struct requeue_work;
469
466 struct mutex sysfs_lock; 470 struct mutex sysfs_lock;
467 471
468 int bypass_depth; 472 int bypass_depth;
@@ -481,6 +485,9 @@ struct request_queue {
481 wait_queue_head_t mq_freeze_wq; 485 wait_queue_head_t mq_freeze_wq;
482 struct percpu_counter mq_usage_counter; 486 struct percpu_counter mq_usage_counter;
483 struct list_head all_q_node; 487 struct list_head all_q_node;
488
489 struct blk_mq_tag_set *tag_set;
490 struct list_head tag_set_list;
484}; 491};
485 492
486#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 493#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -504,6 +511,8 @@ struct request_queue {
504#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 511#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
505#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
506#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
507 516
508#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
509 (1 << QUEUE_FLAG_STACKABLE) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -613,6 +622,15 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
613 622
614#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0) 623#define rq_data_dir(rq) (((rq)->cmd_flags & 1) != 0)
615 624
625/*
626 * Driver can handle struct request, if it either has an old style
627 * request_fn defined, or is blk-mq based.
628 */
629static inline bool queue_is_rq_based(struct request_queue *q)
630{
631 return q->request_fn || q->mq_ops;
632}
633
616static inline unsigned int blk_queue_cluster(struct request_queue *q) 634static inline unsigned int blk_queue_cluster(struct request_queue *q)
617{ 635{
618 return q->limits.cluster; 636 return q->limits.cluster;
@@ -779,6 +797,7 @@ extern void __blk_put_request(struct request_queue *, struct request *);
779extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 797extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
780extern struct request *blk_make_request(struct request_queue *, struct bio *, 798extern struct request *blk_make_request(struct request_queue *, struct bio *,
781 gfp_t); 799 gfp_t);
800extern void blk_rq_set_block_pc(struct request *);
782extern void blk_requeue_request(struct request_queue *, struct request *); 801extern void blk_requeue_request(struct request_queue *, struct request *);
783extern void blk_add_request_payload(struct request *rq, struct page *page, 802extern void blk_add_request_payload(struct request *rq, struct page *page,
784 unsigned int len); 803 unsigned int len);
@@ -894,6 +913,20 @@ static inline unsigned int blk_queue_get_max_sectors(struct request_queue *q,
894 return q->limits.max_sectors; 913 return q->limits.max_sectors;
895} 914}
896 915
916/*
917 * Return maximum size of a request at given offset. Only valid for
918 * file system requests.
919 */
920static inline unsigned int blk_max_size_offset(struct request_queue *q,
921 sector_t offset)
922{
923 if (!q->limits.chunk_sectors)
924 return q->limits.max_sectors;
925
926 return q->limits.chunk_sectors -
927 (offset & (q->limits.chunk_sectors - 1));
928}
929
897static inline unsigned int blk_rq_get_max_sectors(struct request *rq) 930static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
898{ 931{
899 struct request_queue *q = rq->q; 932 struct request_queue *q = rq->q;
@@ -901,7 +934,11 @@ static inline unsigned int blk_rq_get_max_sectors(struct request *rq)
901 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC)) 934 if (unlikely(rq->cmd_type == REQ_TYPE_BLOCK_PC))
902 return q->limits.max_hw_sectors; 935 return q->limits.max_hw_sectors;
903 936
904 return blk_queue_get_max_sectors(q, rq->cmd_flags); 937 if (!q->limits.chunk_sectors)
938 return blk_queue_get_max_sectors(q, rq->cmd_flags);
939
940 return min(blk_max_size_offset(q, blk_rq_pos(rq)),
941 blk_queue_get_max_sectors(q, rq->cmd_flags));
905} 942}
906 943
907static inline unsigned int blk_rq_count_bios(struct request *rq) 944static inline unsigned int blk_rq_count_bios(struct request *rq)
@@ -937,6 +974,7 @@ extern struct request *blk_fetch_request(struct request_queue *q);
937 */ 974 */
938extern bool blk_update_request(struct request *rq, int error, 975extern bool blk_update_request(struct request *rq, int error,
939 unsigned int nr_bytes); 976 unsigned int nr_bytes);
977extern void blk_finish_request(struct request *rq, int error);
940extern bool blk_end_request(struct request *rq, int error, 978extern bool blk_end_request(struct request *rq, int error,
941 unsigned int nr_bytes); 979 unsigned int nr_bytes);
942extern void blk_end_request_all(struct request *rq, int error); 980extern void blk_end_request_all(struct request *rq, int error);
@@ -966,6 +1004,7 @@ extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
966extern void blk_queue_bounce_limit(struct request_queue *, u64); 1004extern void blk_queue_bounce_limit(struct request_queue *, u64);
967extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int); 1005extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
968extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 1006extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
1007extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
969extern void blk_queue_max_segments(struct request_queue *, unsigned short); 1008extern void blk_queue_max_segments(struct request_queue *, unsigned short);
970extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 1009extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
971extern void blk_queue_max_discard_sectors(struct request_queue *q, 1010extern void blk_queue_max_discard_sectors(struct request_queue *q,
@@ -1053,7 +1092,6 @@ static inline void blk_post_runtime_resume(struct request_queue *q, int err) {}
1053 * schedule() where blk_schedule_flush_plug() is called. 1092 * schedule() where blk_schedule_flush_plug() is called.
1054 */ 1093 */
1055struct blk_plug { 1094struct blk_plug {
1056 unsigned long magic; /* detect uninitialized use-cases */
1057 struct list_head list; /* requests */ 1095 struct list_head list; /* requests */
1058 struct list_head mq_list; /* blk-mq requests */ 1096 struct list_head mq_list; /* blk-mq requests */
1059 struct list_head cb_list; /* md requires an unplug callback */ 1097 struct list_head cb_list; /* md requires an unplug callback */
@@ -1102,7 +1140,8 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1102/* 1140/*
1103 * tag stuff 1141 * tag stuff
1104 */ 1142 */
1105#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED) 1143#define blk_rq_tagged(rq) \
1144 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1106extern int blk_queue_start_tag(struct request_queue *, struct request *); 1145extern int blk_queue_start_tag(struct request_queue *, struct request *);
1107extern struct request *blk_queue_find_tag(struct request_queue *, int); 1146extern struct request *blk_queue_find_tag(struct request_queue *, int);
1108extern void blk_queue_end_tag(struct request_queue *, struct request *); 1147extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1370,8 +1409,9 @@ static inline void put_dev_sector(Sector p)
1370} 1409}
1371 1410
1372struct work_struct; 1411struct work_struct;
1373int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1412int kblockd_schedule_work(struct work_struct *work);
1374int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); 1413int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
1414int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
1375 1415
1376#ifdef CONFIG_BLK_CGROUP 1416#ifdef CONFIG_BLK_CGROUP
1377/* 1417/*
@@ -1570,6 +1610,7 @@ static inline bool blk_integrity_is_initialized(struct gendisk *g)
1570struct block_device_operations { 1610struct block_device_operations {
1571 int (*open) (struct block_device *, fmode_t); 1611 int (*open) (struct block_device *, fmode_t);
1572 void (*release) (struct gendisk *, fmode_t); 1612 void (*release) (struct gendisk *, fmode_t);
1613 int (*rw_page)(struct block_device *, sector_t, struct page *, int rw);
1573 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1614 int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1574 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1615 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1575 int (*direct_access) (struct block_device *, sector_t, 1616 int (*direct_access) (struct block_device *, sector_t,
@@ -1588,7 +1629,13 @@ struct block_device_operations {
1588 1629
1589extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1630extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
1590 unsigned long); 1631 unsigned long);
1632extern int bdev_read_page(struct block_device *, sector_t, struct page *);
1633extern int bdev_write_page(struct block_device *, sector_t, struct page *,
1634 struct writeback_control *);
1591#else /* CONFIG_BLOCK */ 1635#else /* CONFIG_BLOCK */
1636
1637struct block_device;
1638
1592/* 1639/*
1593 * stubs for when the block layer is configured out 1640 * stubs for when the block layer is configured out
1594 */ 1641 */
@@ -1624,6 +1671,12 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1624 return false; 1671 return false;
1625} 1672}
1626 1673
1674static inline int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
1675 sector_t *error_sector)
1676{
1677 return 0;
1678}
1679
1627#endif /* CONFIG_BLOCK */ 1680#endif /* CONFIG_BLOCK */
1628 1681
1629#endif 1682#endif