aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h98
1 files changed, 74 insertions, 24 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 38a5ff772a37..d045ca8487af 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -35,6 +35,7 @@ struct sg_io_hdr;
35struct bsg_job; 35struct bsg_job;
36struct blkcg_gq; 36struct blkcg_gq;
37struct blk_flush_queue; 37struct blk_flush_queue;
38struct pr_ops;
38 39
39#define BLKDEV_MIN_RQ 4 40#define BLKDEV_MIN_RQ 4
40#define BLKDEV_MAX_RQ 128 /* Default maximum */ 41#define BLKDEV_MAX_RQ 128 /* Default maximum */
@@ -369,6 +370,10 @@ struct request_queue {
369 */ 370 */
370 struct kobject mq_kobj; 371 struct kobject mq_kobj;
371 372
373#ifdef CONFIG_BLK_DEV_INTEGRITY
374 struct blk_integrity integrity;
375#endif /* CONFIG_BLK_DEV_INTEGRITY */
376
372#ifdef CONFIG_PM 377#ifdef CONFIG_PM
373 struct device *dev; 378 struct device *dev;
374 int rpm_status; 379 int rpm_status;
@@ -450,12 +455,14 @@ struct request_queue {
450#endif 455#endif
451 struct rcu_head rcu_head; 456 struct rcu_head rcu_head;
452 wait_queue_head_t mq_freeze_wq; 457 wait_queue_head_t mq_freeze_wq;
453 struct percpu_ref mq_usage_counter; 458 struct percpu_ref q_usage_counter;
454 struct list_head all_q_node; 459 struct list_head all_q_node;
455 460
456 struct blk_mq_tag_set *tag_set; 461 struct blk_mq_tag_set *tag_set;
457 struct list_head tag_set_list; 462 struct list_head tag_set_list;
458 struct bio_set *bio_split; 463 struct bio_set *bio_split;
464
465 bool mq_sysfs_init_done;
459}; 466};
460 467
461#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 468#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -1368,6 +1375,26 @@ static inline bool bvec_gap_to_prev(struct request_queue *q,
1368 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q)); 1375 ((bprv->bv_offset + bprv->bv_len) & queue_virt_boundary(q));
1369} 1376}
1370 1377
1378static inline bool bio_will_gap(struct request_queue *q, struct bio *prev,
1379 struct bio *next)
1380{
1381 if (!bio_has_data(prev))
1382 return false;
1383
1384 return bvec_gap_to_prev(q, &prev->bi_io_vec[prev->bi_vcnt - 1],
1385 next->bi_io_vec[0].bv_offset);
1386}
1387
1388static inline bool req_gap_back_merge(struct request *req, struct bio *bio)
1389{
1390 return bio_will_gap(req->q, req->biotail, bio);
1391}
1392
1393static inline bool req_gap_front_merge(struct request *req, struct bio *bio)
1394{
1395 return bio_will_gap(req->q, bio, req->bio);
1396}
1397
1371struct work_struct; 1398struct work_struct;
1372int kblockd_schedule_work(struct work_struct *work); 1399int kblockd_schedule_work(struct work_struct *work);
1373int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay); 1400int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
@@ -1440,22 +1467,13 @@ struct blk_integrity_iter {
1440 1467
1441typedef int (integrity_processing_fn) (struct blk_integrity_iter *); 1468typedef int (integrity_processing_fn) (struct blk_integrity_iter *);
1442 1469
1443struct blk_integrity { 1470struct blk_integrity_profile {
1444 integrity_processing_fn *generate_fn; 1471 integrity_processing_fn *generate_fn;
1445 integrity_processing_fn *verify_fn; 1472 integrity_processing_fn *verify_fn;
1446 1473 const char *name;
1447 unsigned short flags;
1448 unsigned short tuple_size;
1449 unsigned short interval;
1450 unsigned short tag_size;
1451
1452 const char *name;
1453
1454 struct kobject kobj;
1455}; 1474};
1456 1475
1457extern bool blk_integrity_is_initialized(struct gendisk *); 1476extern void blk_integrity_register(struct gendisk *, struct blk_integrity *);
1458extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1459extern void blk_integrity_unregister(struct gendisk *); 1477extern void blk_integrity_unregister(struct gendisk *);
1460extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1478extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1461extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, 1479extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
@@ -1466,15 +1484,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *,
1466extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, 1484extern bool blk_integrity_merge_bio(struct request_queue *, struct request *,
1467 struct bio *); 1485 struct bio *);
1468 1486
1469static inline 1487static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1470struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1471{ 1488{
1472 return bdev->bd_disk->integrity; 1489 struct blk_integrity *bi = &disk->queue->integrity;
1490
1491 if (!bi->profile)
1492 return NULL;
1493
1494 return bi;
1473} 1495}
1474 1496
1475static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) 1497static inline
1498struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
1476{ 1499{
1477 return disk->integrity; 1500 return blk_get_integrity(bdev->bd_disk);
1478} 1501}
1479 1502
1480static inline bool blk_integrity_rq(struct request *rq) 1503static inline bool blk_integrity_rq(struct request *rq)
@@ -1494,6 +1517,26 @@ queue_max_integrity_segments(struct request_queue *q)
1494 return q->limits.max_integrity_segments; 1517 return q->limits.max_integrity_segments;
1495} 1518}
1496 1519
1520static inline bool integrity_req_gap_back_merge(struct request *req,
1521 struct bio *next)
1522{
1523 struct bio_integrity_payload *bip = bio_integrity(req->bio);
1524 struct bio_integrity_payload *bip_next = bio_integrity(next);
1525
1526 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1527 bip_next->bip_vec[0].bv_offset);
1528}
1529
1530static inline bool integrity_req_gap_front_merge(struct request *req,
1531 struct bio *bio)
1532{
1533 struct bio_integrity_payload *bip = bio_integrity(bio);
1534 struct bio_integrity_payload *bip_next = bio_integrity(req->bio);
1535
1536 return bvec_gap_to_prev(req->q, &bip->bip_vec[bip->bip_vcnt - 1],
1537 bip_next->bip_vec[0].bv_offset);
1538}
1539
1497#else /* CONFIG_BLK_DEV_INTEGRITY */ 1540#else /* CONFIG_BLK_DEV_INTEGRITY */
1498 1541
1499struct bio; 1542struct bio;
@@ -1528,10 +1571,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1528{ 1571{
1529 return 0; 1572 return 0;
1530} 1573}
1531static inline int blk_integrity_register(struct gendisk *d, 1574static inline void blk_integrity_register(struct gendisk *d,
1532 struct blk_integrity *b) 1575 struct blk_integrity *b)
1533{ 1576{
1534 return 0;
1535} 1577}
1536static inline void blk_integrity_unregister(struct gendisk *d) 1578static inline void blk_integrity_unregister(struct gendisk *d)
1537{ 1579{
@@ -1556,9 +1598,16 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1556{ 1598{
1557 return true; 1599 return true;
1558} 1600}
1559static inline bool blk_integrity_is_initialized(struct gendisk *g) 1601
1602static inline bool integrity_req_gap_back_merge(struct request *req,
1603 struct bio *next)
1604{
1605 return false;
1606}
1607static inline bool integrity_req_gap_front_merge(struct request *req,
1608 struct bio *bio)
1560{ 1609{
1561 return 0; 1610 return false;
1562} 1611}
1563 1612
1564#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1613#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -1581,6 +1630,7 @@ struct block_device_operations {
1581 /* this callback is with swap_lock and sometimes page table lock held */ 1630 /* this callback is with swap_lock and sometimes page table lock held */
1582 void (*swap_slot_free_notify) (struct block_device *, unsigned long); 1631 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
1583 struct module *owner; 1632 struct module *owner;
1633 const struct pr_ops *pr_ops;
1584}; 1634};
1585 1635
1586extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, 1636extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,