aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h84
1 files changed, 37 insertions, 47 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 8699bcf5f099..aac0f9ea952a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -21,6 +21,7 @@
21#include <linux/bsg.h> 21#include <linux/bsg.h>
22#include <linux/smp.h> 22#include <linux/smp.h>
23#include <linux/rcupdate.h> 23#include <linux/rcupdate.h>
24#include <linux/percpu-refcount.h>
24 25
25#include <asm/scatterlist.h> 26#include <asm/scatterlist.h>
26 27
@@ -35,6 +36,7 @@ struct request;
35struct sg_io_hdr; 36struct sg_io_hdr;
36struct bsg_job; 37struct bsg_job;
37struct blkcg_gq; 38struct blkcg_gq;
39struct blk_flush_queue;
38 40
39#define BLKDEV_MIN_RQ 4 41#define BLKDEV_MIN_RQ 4
40#define BLKDEV_MAX_RQ 128 /* Default maximum */ 42#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -454,14 +456,7 @@ struct request_queue {
454 */ 456 */
455 unsigned int flush_flags; 457 unsigned int flush_flags;
456 unsigned int flush_not_queueable:1; 458 unsigned int flush_not_queueable:1;
457 unsigned int flush_queue_delayed:1; 459 struct blk_flush_queue *fq;
458 unsigned int flush_pending_idx:1;
459 unsigned int flush_running_idx:1;
460 unsigned long flush_pending_since;
461 struct list_head flush_queue[2];
462 struct list_head flush_data_in_flight;
463 struct request *flush_rq;
464 spinlock_t mq_flush_lock;
465 460
466 struct list_head requeue_list; 461 struct list_head requeue_list;
467 spinlock_t requeue_lock; 462 spinlock_t requeue_lock;
@@ -470,6 +465,7 @@ struct request_queue {
470 struct mutex sysfs_lock; 465 struct mutex sysfs_lock;
471 466
472 int bypass_depth; 467 int bypass_depth;
468 int mq_freeze_depth;
473 469
474#if defined(CONFIG_BLK_DEV_BSG) 470#if defined(CONFIG_BLK_DEV_BSG)
475 bsg_job_fn *bsg_job_fn; 471 bsg_job_fn *bsg_job_fn;
@@ -483,7 +479,7 @@ struct request_queue {
483#endif 479#endif
484 struct rcu_head rcu_head; 480 struct rcu_head rcu_head;
485 wait_queue_head_t mq_freeze_wq; 481 wait_queue_head_t mq_freeze_wq;
486 struct percpu_counter mq_usage_counter; 482 struct percpu_ref mq_usage_counter;
487 struct list_head all_q_node; 483 struct list_head all_q_node;
488 484
489 struct blk_mq_tag_set *tag_set; 485 struct blk_mq_tag_set *tag_set;
@@ -863,7 +859,7 @@ extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
863 859
864static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 860static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
865{ 861{
866 return bdev->bd_disk->queue; 862 return bdev->bd_disk->queue; /* this is never NULL */
867} 863}
868 864
869/* 865/*
@@ -1140,8 +1136,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1140/* 1136/*
1141 * tag stuff 1137 * tag stuff
1142 */ 1138 */
1143#define blk_rq_tagged(rq) \ 1139#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
1144 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1145extern int blk_queue_start_tag(struct request_queue *, struct request *); 1140extern int blk_queue_start_tag(struct request_queue *, struct request *);
1146extern struct request *blk_queue_find_tag(struct request_queue *, int); 1141extern struct request *blk_queue_find_tag(struct request_queue *, int);
1147extern void blk_queue_end_tag(struct request_queue *, struct request *); 1142extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1283,10 +1278,9 @@ static inline int queue_alignment_offset(struct request_queue *q)
1283static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector) 1278static inline int queue_limit_alignment_offset(struct queue_limits *lim, sector_t sector)
1284{ 1279{
1285 unsigned int granularity = max(lim->physical_block_size, lim->io_min); 1280 unsigned int granularity = max(lim->physical_block_size, lim->io_min);
1286 unsigned int alignment = (sector << 9) & (granularity - 1); 1281 unsigned int alignment = sector_div(sector, granularity >> 9) << 9;
1287 1282
1288 return (granularity + lim->alignment_offset - alignment) 1283 return (granularity + lim->alignment_offset - alignment) % granularity;
1289 & (granularity - 1);
1290} 1284}
1291 1285
1292static inline int bdev_alignment_offset(struct block_device *bdev) 1286static inline int bdev_alignment_offset(struct block_device *bdev)
@@ -1462,32 +1456,31 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1462 1456
1463#if defined(CONFIG_BLK_DEV_INTEGRITY) 1457#if defined(CONFIG_BLK_DEV_INTEGRITY)
1464 1458
1465#define INTEGRITY_FLAG_READ 2 /* verify data integrity on read */ 1459enum blk_integrity_flags {
1466#define INTEGRITY_FLAG_WRITE 4 /* generate data integrity on write */ 1460 BLK_INTEGRITY_VERIFY = 1 << 0,
1461 BLK_INTEGRITY_GENERATE = 1 << 1,
1462 BLK_INTEGRITY_DEVICE_CAPABLE = 1 << 2,
1463 BLK_INTEGRITY_IP_CHECKSUM = 1 << 3,
1464};
1467 1465
1468struct blk_integrity_exchg { 1466struct blk_integrity_iter {
1469 void *prot_buf; 1467 void *prot_buf;
1470 void *data_buf; 1468 void *data_buf;
1471 sector_t sector; 1469 sector_t seed;
1472 unsigned int data_size; 1470 unsigned int data_size;
1473 unsigned short sector_size; 1471 unsigned short interval;
1474 const char *disk_name; 1472 const char *disk_name;
1475}; 1473};
1476 1474
1477typedef void (integrity_gen_fn) (struct blk_integrity_exchg *); 1475typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1478typedef int (integrity_vrfy_fn) (struct blk_integrity_exchg *);
1479typedef void (integrity_set_tag_fn) (void *, void *, unsigned int);
1480typedef void (integrity_get_tag_fn) (void *, void *, unsigned int);
1481 1476
1482struct blk_integrity { 1477struct blk_integrity {
1483 integrity_gen_fn *generate_fn; 1478 integrity_processing_fn *generate_fn;
1484 integrity_vrfy_fn *verify_fn; 1479 integrity_processing_fn *verify_fn;
1485 integrity_set_tag_fn *set_tag_fn;
1486 integrity_get_tag_fn *get_tag_fn;
1487 1480
1488 unsigned short flags; 1481 unsigned short flags;
1489 unsigned short tuple_size; 1482 unsigned short tuple_size;
1490 unsigned short sector_size; 1483 unsigned short interval;
1491 unsigned short tag_size; 1484 unsigned short tag_size;
1492 1485
1493 const char *name; 1486 const char *name;
@@ -1502,10 +1495,10 @@ extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1502extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1495extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1503 struct scatterlist *); 1496 struct scatterlist *);
1504extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *); 1497extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1505extern int blk_integrity_merge_rq(struct request_queue *, struct request *, 1498extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1506 struct request *); 1499 struct request *);
1507extern int blk_integrity_merge_bio(struct request_queue *, struct request *, 1500extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1508 struct bio *); 1501 struct bio *);
1509 1502
1510static inline 1503static inline
1511struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1504struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1518,12 +1511,9 @@ static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1518 return disk->integrity; 1511 return disk->integrity;
1519} 1512}
1520 1513
1521static inline int blk_integrity_rq(struct request *rq) 1514static inline bool blk_integrity_rq(struct request *rq)
1522{ 1515{
1523 if (rq->bio == NULL) 1516 return rq->cmd_flags & REQ_INTEGRITY;
1524 return 0;
1525
1526 return bio_integrity(rq->bio);
1527} 1517}
1528 1518
1529static inline void blk_queue_max_integrity_segments(struct request_queue *q, 1519static inline void blk_queue_max_integrity_segments(struct request_queue *q,
@@ -1562,7 +1552,7 @@ static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1562} 1552}
1563static inline struct blk_integrity *bdev_get_integrity(struct block_device *b) 1553static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1564{ 1554{
1565 return 0; 1555 return NULL;
1566} 1556}
1567static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1557static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1568{ 1558{
@@ -1588,17 +1578,17 @@ static inline unsigned short queue_max_integrity_segments(struct request_queue *
1588{ 1578{
1589 return 0; 1579 return 0;
1590} 1580}
1591static inline int blk_integrity_merge_rq(struct request_queue *rq, 1581static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1592 struct request *r1, 1582 struct request *r1,
1593 struct request *r2) 1583 struct request *r2)
1594{ 1584{
1595 return 0; 1585 return true;
1596} 1586}
1597static inline int blk_integrity_merge_bio(struct request_queue *rq, 1587static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1598 struct request *r, 1588 struct request *r,
1599 struct bio *b) 1589 struct bio *b)
1600{ 1590{
1601 return 0; 1591 return true;
1602} 1592}
1603static inline bool blk_integrity_is_initialized(struct gendisk *g) 1593static inline bool blk_integrity_is_initialized(struct gendisk *g)
1604{ 1594{