diff options
author | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
---|---|---|
committer | Daniel Vetter <daniel.vetter@ffwll.ch> | 2015-11-23 03:04:05 -0500 |
commit | 92907cbbef8625bb3998d1eb385fc88f23c97a3f (patch) | |
tree | 15626ff9287e37c3cb81c7286d6db5a7fd77c854 /include/linux/blkdev.h | |
parent | 15fbfccfe92c62ae8d1ecc647c44157ed01ac02e (diff) | |
parent | 1ec218373b8ebda821aec00bb156a9c94fad9cd4 (diff) |
Merge tag 'v4.4-rc2' into drm-intel-next-queued
Linux 4.4-rc2
Backmerge to get at
commit 1b0e3a049efe471c399674fd954500ce97438d30
Author: Imre Deak <imre.deak@intel.com>
Date: Thu Nov 5 23:04:11 2015 +0200
drm/i915/skl: disable display side power well support for now
so that we can proplery re-eanble skl power wells in -next.
Conflicts are just adjacent lines changed, except for intel_fbdev.c
where we need to interleave the changs. Nothing nefarious.
Signed-off-by: Daniel Vetter <daniel.vetter@intel.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 61 |
1 files changed, 33 insertions, 28 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 99da9ebc7377..c0d2b7927c1f 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -35,6 +35,7 @@ struct sg_io_hdr; | |||
35 | struct bsg_job; | 35 | struct bsg_job; |
36 | struct blkcg_gq; | 36 | struct blkcg_gq; |
37 | struct blk_flush_queue; | 37 | struct blk_flush_queue; |
38 | struct pr_ops; | ||
38 | 39 | ||
39 | #define BLKDEV_MIN_RQ 4 | 40 | #define BLKDEV_MIN_RQ 4 |
40 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ | 41 | #define BLKDEV_MAX_RQ 128 /* Default maximum */ |
@@ -208,7 +209,7 @@ static inline unsigned short req_get_ioprio(struct request *req) | |||
208 | struct blk_queue_ctx; | 209 | struct blk_queue_ctx; |
209 | 210 | ||
210 | typedef void (request_fn_proc) (struct request_queue *q); | 211 | typedef void (request_fn_proc) (struct request_queue *q); |
211 | typedef void (make_request_fn) (struct request_queue *q, struct bio *bio); | 212 | typedef blk_qc_t (make_request_fn) (struct request_queue *q, struct bio *bio); |
212 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 213 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
213 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 214 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
214 | 215 | ||
@@ -369,6 +370,10 @@ struct request_queue { | |||
369 | */ | 370 | */ |
370 | struct kobject mq_kobj; | 371 | struct kobject mq_kobj; |
371 | 372 | ||
373 | #ifdef CONFIG_BLK_DEV_INTEGRITY | ||
374 | struct blk_integrity integrity; | ||
375 | #endif /* CONFIG_BLK_DEV_INTEGRITY */ | ||
376 | |||
372 | #ifdef CONFIG_PM | 377 | #ifdef CONFIG_PM |
373 | struct device *dev; | 378 | struct device *dev; |
374 | int rpm_status; | 379 | int rpm_status; |
@@ -450,12 +455,14 @@ struct request_queue { | |||
450 | #endif | 455 | #endif |
451 | struct rcu_head rcu_head; | 456 | struct rcu_head rcu_head; |
452 | wait_queue_head_t mq_freeze_wq; | 457 | wait_queue_head_t mq_freeze_wq; |
453 | struct percpu_ref mq_usage_counter; | 458 | struct percpu_ref q_usage_counter; |
454 | struct list_head all_q_node; | 459 | struct list_head all_q_node; |
455 | 460 | ||
456 | struct blk_mq_tag_set *tag_set; | 461 | struct blk_mq_tag_set *tag_set; |
457 | struct list_head tag_set_list; | 462 | struct list_head tag_set_list; |
458 | struct bio_set *bio_split; | 463 | struct bio_set *bio_split; |
464 | |||
465 | bool mq_sysfs_init_done; | ||
459 | }; | 466 | }; |
460 | 467 | ||
461 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ | 468 | #define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ |
@@ -480,6 +487,7 @@ struct request_queue { | |||
480 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ | 487 | #define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ |
481 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ | 488 | #define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ |
482 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ | 489 | #define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ |
490 | #define QUEUE_FLAG_POLL 22 /* IO polling enabled if set */ | ||
483 | 491 | ||
484 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 492 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
485 | (1 << QUEUE_FLAG_STACKABLE) | \ | 493 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -754,7 +762,7 @@ static inline void rq_flush_dcache_pages(struct request *rq) | |||
754 | 762 | ||
755 | extern int blk_register_queue(struct gendisk *disk); | 763 | extern int blk_register_queue(struct gendisk *disk); |
756 | extern void blk_unregister_queue(struct gendisk *disk); | 764 | extern void blk_unregister_queue(struct gendisk *disk); |
757 | extern void generic_make_request(struct bio *bio); | 765 | extern blk_qc_t generic_make_request(struct bio *bio); |
758 | extern void blk_rq_init(struct request_queue *q, struct request *rq); | 766 | extern void blk_rq_init(struct request_queue *q, struct request *rq); |
759 | extern void blk_put_request(struct request *); | 767 | extern void blk_put_request(struct request *); |
760 | extern void __blk_put_request(struct request_queue *, struct request *); | 768 | extern void __blk_put_request(struct request_queue *, struct request *); |
@@ -786,6 +794,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | |||
786 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 794 | extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
787 | struct scsi_ioctl_command __user *); | 795 | struct scsi_ioctl_command __user *); |
788 | 796 | ||
797 | extern int blk_queue_enter(struct request_queue *q, gfp_t gfp); | ||
798 | extern void blk_queue_exit(struct request_queue *q); | ||
789 | extern void blk_start_queue(struct request_queue *q); | 799 | extern void blk_start_queue(struct request_queue *q); |
790 | extern void blk_stop_queue(struct request_queue *q); | 800 | extern void blk_stop_queue(struct request_queue *q); |
791 | extern void blk_sync_queue(struct request_queue *q); | 801 | extern void blk_sync_queue(struct request_queue *q); |
@@ -807,6 +817,8 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
807 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 817 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
808 | struct request *, int, rq_end_io_fn *); | 818 | struct request *, int, rq_end_io_fn *); |
809 | 819 | ||
820 | bool blk_poll(struct request_queue *q, blk_qc_t cookie); | ||
821 | |||
810 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 822 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
811 | { | 823 | { |
812 | return bdev->bd_disk->queue; /* this is never NULL */ | 824 | return bdev->bd_disk->queue; /* this is never NULL */ |
@@ -1460,22 +1472,13 @@ struct blk_integrity_iter { | |||
1460 | 1472 | ||
1461 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); | 1473 | typedef int (integrity_processing_fn) (struct blk_integrity_iter *); |
1462 | 1474 | ||
1463 | struct blk_integrity { | 1475 | struct blk_integrity_profile { |
1464 | integrity_processing_fn *generate_fn; | 1476 | integrity_processing_fn *generate_fn; |
1465 | integrity_processing_fn *verify_fn; | 1477 | integrity_processing_fn *verify_fn; |
1466 | 1478 | const char *name; | |
1467 | unsigned short flags; | ||
1468 | unsigned short tuple_size; | ||
1469 | unsigned short interval; | ||
1470 | unsigned short tag_size; | ||
1471 | |||
1472 | const char *name; | ||
1473 | |||
1474 | struct kobject kobj; | ||
1475 | }; | 1479 | }; |
1476 | 1480 | ||
1477 | extern bool blk_integrity_is_initialized(struct gendisk *); | 1481 | extern void blk_integrity_register(struct gendisk *, struct blk_integrity *); |
1478 | extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); | ||
1479 | extern void blk_integrity_unregister(struct gendisk *); | 1482 | extern void blk_integrity_unregister(struct gendisk *); |
1480 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); | 1483 | extern int blk_integrity_compare(struct gendisk *, struct gendisk *); |
1481 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, | 1484 | extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *, |
@@ -1486,15 +1489,20 @@ extern bool blk_integrity_merge_rq(struct request_queue *, struct request *, | |||
1486 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, | 1489 | extern bool blk_integrity_merge_bio(struct request_queue *, struct request *, |
1487 | struct bio *); | 1490 | struct bio *); |
1488 | 1491 | ||
1489 | static inline | 1492 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) |
1490 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
1491 | { | 1493 | { |
1492 | return bdev->bd_disk->integrity; | 1494 | struct blk_integrity *bi = &disk->queue->integrity; |
1495 | |||
1496 | if (!bi->profile) | ||
1497 | return NULL; | ||
1498 | |||
1499 | return bi; | ||
1493 | } | 1500 | } |
1494 | 1501 | ||
1495 | static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk) | 1502 | static inline |
1503 | struct blk_integrity *bdev_get_integrity(struct block_device *bdev) | ||
1496 | { | 1504 | { |
1497 | return disk->integrity; | 1505 | return blk_get_integrity(bdev->bd_disk); |
1498 | } | 1506 | } |
1499 | 1507 | ||
1500 | static inline bool blk_integrity_rq(struct request *rq) | 1508 | static inline bool blk_integrity_rq(struct request *rq) |
@@ -1568,10 +1576,9 @@ static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b) | |||
1568 | { | 1576 | { |
1569 | return 0; | 1577 | return 0; |
1570 | } | 1578 | } |
1571 | static inline int blk_integrity_register(struct gendisk *d, | 1579 | static inline void blk_integrity_register(struct gendisk *d, |
1572 | struct blk_integrity *b) | 1580 | struct blk_integrity *b) |
1573 | { | 1581 | { |
1574 | return 0; | ||
1575 | } | 1582 | } |
1576 | static inline void blk_integrity_unregister(struct gendisk *d) | 1583 | static inline void blk_integrity_unregister(struct gendisk *d) |
1577 | { | 1584 | { |
@@ -1596,10 +1603,7 @@ static inline bool blk_integrity_merge_bio(struct request_queue *rq, | |||
1596 | { | 1603 | { |
1597 | return true; | 1604 | return true; |
1598 | } | 1605 | } |
1599 | static inline bool blk_integrity_is_initialized(struct gendisk *g) | 1606 | |
1600 | { | ||
1601 | return 0; | ||
1602 | } | ||
1603 | static inline bool integrity_req_gap_back_merge(struct request *req, | 1607 | static inline bool integrity_req_gap_back_merge(struct request *req, |
1604 | struct bio *next) | 1608 | struct bio *next) |
1605 | { | 1609 | { |
@@ -1631,6 +1635,7 @@ struct block_device_operations { | |||
1631 | /* this callback is with swap_lock and sometimes page table lock held */ | 1635 | /* this callback is with swap_lock and sometimes page table lock held */ |
1632 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); | 1636 | void (*swap_slot_free_notify) (struct block_device *, unsigned long); |
1633 | struct module *owner; | 1637 | struct module *owner; |
1638 | const struct pr_ops *pr_ops; | ||
1634 | }; | 1639 | }; |
1635 | 1640 | ||
1636 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, | 1641 | extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, |