aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h323
1 files changed, 197 insertions, 126 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 2c54906f678f..1a23722e8878 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -108,13 +108,20 @@ struct request {
108 108
109 /* 109 /*
110 * Three pointers are available for the IO schedulers, if they need 110 * Three pointers are available for the IO schedulers, if they need
111 * more they have to dynamically allocate it. 111 * more they have to dynamically allocate it. Flush requests are
112 * never put on the IO scheduler. So let the flush fields share
113 * space with the three elevator_private pointers.
112 */ 114 */
113 void *elevator_private; 115 union {
114 void *elevator_private2; 116 void *elevator_private[3];
115 void *elevator_private3; 117 struct {
118 unsigned int seq;
119 struct list_head list;
120 } flush;
121 };
116 122
117 struct gendisk *rq_disk; 123 struct gendisk *rq_disk;
124 struct hd_struct *part;
118 unsigned long start_time; 125 unsigned long start_time;
119#ifdef CONFIG_BLK_CGROUP 126#ifdef CONFIG_BLK_CGROUP
120 unsigned long long start_time_ns; 127 unsigned long long start_time_ns;
@@ -124,6 +131,9 @@ struct request {
124 * physical address coalescing is performed. 131 * physical address coalescing is performed.
125 */ 132 */
126 unsigned short nr_phys_segments; 133 unsigned short nr_phys_segments;
134#if defined(CONFIG_BLK_DEV_INTEGRITY)
135 unsigned short nr_integrity_segments;
136#endif
127 137
128 unsigned short ioprio; 138 unsigned short ioprio;
129 139
@@ -186,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
186typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 196typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
187typedef int (prep_rq_fn) (struct request_queue *, struct request *); 197typedef int (prep_rq_fn) (struct request_queue *, struct request *);
188typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 198typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
189typedef void (unplug_fn) (struct request_queue *);
190 199
191struct bio_vec; 200struct bio_vec;
192struct bvec_merge_data { 201struct bvec_merge_data {
@@ -243,11 +252,12 @@ struct queue_limits {
243 252
244 unsigned short logical_block_size; 253 unsigned short logical_block_size;
245 unsigned short max_segments; 254 unsigned short max_segments;
255 unsigned short max_integrity_segments;
246 256
247 unsigned char misaligned; 257 unsigned char misaligned;
248 unsigned char discard_misaligned; 258 unsigned char discard_misaligned;
249 unsigned char no_cluster; 259 unsigned char cluster;
250 signed char discard_zeroes_data; 260 unsigned char discard_zeroes_data;
251}; 261};
252 262
253struct request_queue 263struct request_queue
@@ -268,7 +278,6 @@ struct request_queue
268 make_request_fn *make_request_fn; 278 make_request_fn *make_request_fn;
269 prep_rq_fn *prep_rq_fn; 279 prep_rq_fn *prep_rq_fn;
270 unprep_rq_fn *unprep_rq_fn; 280 unprep_rq_fn *unprep_rq_fn;
271 unplug_fn *unplug_fn;
272 merge_bvec_fn *merge_bvec_fn; 281 merge_bvec_fn *merge_bvec_fn;
273 softirq_done_fn *softirq_done_fn; 282 softirq_done_fn *softirq_done_fn;
274 rq_timed_out_fn *rq_timed_out_fn; 283 rq_timed_out_fn *rq_timed_out_fn;
@@ -282,12 +291,9 @@ struct request_queue
282 struct request *boundary_rq; 291 struct request *boundary_rq;
283 292
284 /* 293 /*
285 * Auto-unplugging state 294 * Delayed queue handling
286 */ 295 */
287 struct timer_list unplug_timer; 296 struct delayed_work delay_work;
288 int unplug_thresh; /* After this many requests */
289 unsigned long unplug_delay; /* After this many jiffies */
290 struct work_struct unplug_work;
291 297
292 struct backing_dev_info backing_dev_info; 298 struct backing_dev_info backing_dev_info;
293 299
@@ -355,44 +361,50 @@ struct request_queue
355 struct blk_trace *blk_trace; 361 struct blk_trace *blk_trace;
356#endif 362#endif
357 /* 363 /*
358 * reserved for flush operations 364 * for flush operations
359 */ 365 */
360 unsigned int ordered, next_ordered, ordseq; 366 unsigned int flush_flags;
361 int orderr, ordcolor; 367 unsigned int flush_not_queueable:1;
362 struct request pre_flush_rq, bar_rq, post_flush_rq; 368 unsigned int flush_queue_delayed:1;
363 struct request *orig_bar_rq; 369 unsigned int flush_pending_idx:1;
370 unsigned int flush_running_idx:1;
371 unsigned long flush_pending_since;
372 struct list_head flush_queue[2];
373 struct list_head flush_data_in_flight;
374 struct request flush_rq;
364 375
365 struct mutex sysfs_lock; 376 struct mutex sysfs_lock;
366 377
367#if defined(CONFIG_BLK_DEV_BSG) 378#if defined(CONFIG_BLK_DEV_BSG)
368 struct bsg_class_device bsg_dev; 379 struct bsg_class_device bsg_dev;
369#endif 380#endif
381
382#ifdef CONFIG_BLK_DEV_THROTTLING
383 /* Throttle data */
384 struct throtl_data *td;
385#endif
370}; 386};
371 387
372#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
373#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 388#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
374#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 389#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
375#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 390#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
376#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 391#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
377#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 392#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
378#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 393#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
379#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 394#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
380#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 395#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
381#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 396#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
382#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 397#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
383#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 398#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
384#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 399#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
385#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
386#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
387#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 400#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
388#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 401#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
389#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 402#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
390#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 403#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
391#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 404#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
392#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 405#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
393 406
394#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 407#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
395 (1 << QUEUE_FLAG_CLUSTER) | \
396 (1 << QUEUE_FLAG_STACKABLE) | \ 408 (1 << QUEUE_FLAG_STACKABLE) | \
397 (1 << QUEUE_FLAG_SAME_COMP) | \ 409 (1 << QUEUE_FLAG_SAME_COMP) | \
398 (1 << QUEUE_FLAG_ADD_RANDOM)) 410 (1 << QUEUE_FLAG_ADD_RANDOM))
@@ -462,57 +474,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
462 __clear_bit(flag, &q->queue_flags); 474 __clear_bit(flag, &q->queue_flags);
463} 475}
464 476
465enum {
466 /*
467 * Hardbarrier is supported with one of the following methods.
468 *
469 * NONE : hardbarrier unsupported
470 * DRAIN : ordering by draining is enough
471 * DRAIN_FLUSH : ordering by draining w/ pre and post flushes
472 * DRAIN_FUA : ordering by draining w/ pre flush and FUA write
473 * TAG : ordering by tag is enough
474 * TAG_FLUSH : ordering by tag w/ pre and post flushes
475 * TAG_FUA : ordering by tag w/ pre flush and FUA write
476 */
477 QUEUE_ORDERED_BY_DRAIN = 0x01,
478 QUEUE_ORDERED_BY_TAG = 0x02,
479 QUEUE_ORDERED_DO_PREFLUSH = 0x10,
480 QUEUE_ORDERED_DO_BAR = 0x20,
481 QUEUE_ORDERED_DO_POSTFLUSH = 0x40,
482 QUEUE_ORDERED_DO_FUA = 0x80,
483
484 QUEUE_ORDERED_NONE = 0x00,
485
486 QUEUE_ORDERED_DRAIN = QUEUE_ORDERED_BY_DRAIN |
487 QUEUE_ORDERED_DO_BAR,
488 QUEUE_ORDERED_DRAIN_FLUSH = QUEUE_ORDERED_DRAIN |
489 QUEUE_ORDERED_DO_PREFLUSH |
490 QUEUE_ORDERED_DO_POSTFLUSH,
491 QUEUE_ORDERED_DRAIN_FUA = QUEUE_ORDERED_DRAIN |
492 QUEUE_ORDERED_DO_PREFLUSH |
493 QUEUE_ORDERED_DO_FUA,
494
495 QUEUE_ORDERED_TAG = QUEUE_ORDERED_BY_TAG |
496 QUEUE_ORDERED_DO_BAR,
497 QUEUE_ORDERED_TAG_FLUSH = QUEUE_ORDERED_TAG |
498 QUEUE_ORDERED_DO_PREFLUSH |
499 QUEUE_ORDERED_DO_POSTFLUSH,
500 QUEUE_ORDERED_TAG_FUA = QUEUE_ORDERED_TAG |
501 QUEUE_ORDERED_DO_PREFLUSH |
502 QUEUE_ORDERED_DO_FUA,
503
504 /*
505 * Ordered operation sequence
506 */
507 QUEUE_ORDSEQ_STARTED = 0x01, /* flushing in progress */
508 QUEUE_ORDSEQ_DRAIN = 0x02, /* waiting for the queue to be drained */
509 QUEUE_ORDSEQ_PREFLUSH = 0x04, /* pre-flushing in progress */
510 QUEUE_ORDSEQ_BAR = 0x08, /* original barrier req in progress */
511 QUEUE_ORDSEQ_POSTFLUSH = 0x10, /* post-flushing in progress */
512 QUEUE_ORDSEQ_DONE = 0x20,
513};
514
515#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
516#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 477#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
517#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 478#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
518#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 479#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -521,7 +482,6 @@ enum {
521#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags) 482#define blk_queue_nonrot(q) test_bit(QUEUE_FLAG_NONROT, &(q)->queue_flags)
522#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags) 483#define blk_queue_io_stat(q) test_bit(QUEUE_FLAG_IO_STAT, &(q)->queue_flags)
523#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags) 484#define blk_queue_add_random(q) test_bit(QUEUE_FLAG_ADD_RANDOM, &(q)->queue_flags)
524#define blk_queue_flushing(q) ((q)->ordseq)
525#define blk_queue_stackable(q) \ 485#define blk_queue_stackable(q) \
526 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags) 486 test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
527#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags) 487#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
@@ -550,6 +510,11 @@ enum {
550 510
551#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 511#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
552 512
513static inline unsigned int blk_queue_cluster(struct request_queue *q)
514{
515 return q->limits.cluster;
516}
517
553/* 518/*
554 * We regard a request as sync, if either a read or a sync write 519 * We regard a request as sync, if either a read or a sync write
555 */ 520 */
@@ -592,7 +557,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
592 * it already be started by driver. 557 * it already be started by driver.
593 */ 558 */
594#define RQ_NOMERGE_FLAGS \ 559#define RQ_NOMERGE_FLAGS \
595 (REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER) 560 (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
596#define rq_mergeable(rq) \ 561#define rq_mergeable(rq) \
597 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ 562 (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
598 (((rq)->cmd_flags & REQ_DISCARD) || \ 563 (((rq)->cmd_flags & REQ_DISCARD) || \
@@ -683,7 +648,6 @@ static inline void rq_flush_dcache_pages(struct request *rq)
683 648
684extern int blk_register_queue(struct gendisk *disk); 649extern int blk_register_queue(struct gendisk *disk);
685extern void blk_unregister_queue(struct gendisk *disk); 650extern void blk_unregister_queue(struct gendisk *disk);
686extern void register_disk(struct gendisk *dev);
687extern void generic_make_request(struct bio *bio); 651extern void generic_make_request(struct bio *bio);
688extern void blk_rq_init(struct request_queue *q, struct request *rq); 652extern void blk_rq_init(struct request_queue *q, struct request *rq);
689extern void blk_put_request(struct request *); 653extern void blk_put_request(struct request *);
@@ -704,9 +668,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
704extern void blk_rq_unprep_clone(struct request *rq); 668extern void blk_rq_unprep_clone(struct request *rq);
705extern int blk_insert_cloned_request(struct request_queue *q, 669extern int blk_insert_cloned_request(struct request_queue *q,
706 struct request *rq); 670 struct request *rq);
707extern void blk_plug_device(struct request_queue *); 671extern void blk_delay_queue(struct request_queue *, unsigned long);
708extern void blk_plug_device_unlocked(struct request_queue *);
709extern int blk_remove_plug(struct request_queue *);
710extern void blk_recount_segments(struct request_queue *, struct bio *); 672extern void blk_recount_segments(struct request_queue *, struct bio *);
711extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 673extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
712 unsigned int, void __user *); 674 unsigned int, void __user *);
@@ -736,8 +698,9 @@ extern void blk_start_queue(struct request_queue *q);
736extern void blk_stop_queue(struct request_queue *q); 698extern void blk_stop_queue(struct request_queue *q);
737extern void blk_sync_queue(struct request_queue *q); 699extern void blk_sync_queue(struct request_queue *q);
738extern void __blk_stop_queue(struct request_queue *q); 700extern void __blk_stop_queue(struct request_queue *q);
739extern void __blk_run_queue(struct request_queue *); 701extern void __blk_run_queue(struct request_queue *q);
740extern void blk_run_queue(struct request_queue *); 702extern void blk_run_queue(struct request_queue *);
703extern void blk_run_queue_async(struct request_queue *q);
741extern int blk_rq_map_user(struct request_queue *, struct request *, 704extern int blk_rq_map_user(struct request_queue *, struct request *,
742 struct rq_map_data *, void __user *, unsigned long, 705 struct rq_map_data *, void __user *, unsigned long,
743 gfp_t); 706 gfp_t);
@@ -750,7 +713,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
750 struct request *, int); 713 struct request *, int);
751extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 714extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
752 struct request *, int, rq_end_io_fn *); 715 struct request *, int, rq_end_io_fn *);
753extern void blk_unplug(struct request_queue *q);
754 716
755static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 717static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
756{ 718{
@@ -845,13 +807,14 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
845extern void blk_cleanup_queue(struct request_queue *); 807extern void blk_cleanup_queue(struct request_queue *);
846extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 808extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
847extern void blk_queue_bounce_limit(struct request_queue *, u64); 809extern void blk_queue_bounce_limit(struct request_queue *, u64);
810extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
848extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 811extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
849extern void blk_queue_max_segments(struct request_queue *, unsigned short); 812extern void blk_queue_max_segments(struct request_queue *, unsigned short);
850extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 813extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
851extern void blk_queue_max_discard_sectors(struct request_queue *q, 814extern void blk_queue_max_discard_sectors(struct request_queue *q,
852 unsigned int max_discard_sectors); 815 unsigned int max_discard_sectors);
853extern void blk_queue_logical_block_size(struct request_queue *, unsigned short); 816extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
854extern void blk_queue_physical_block_size(struct request_queue *, unsigned short); 817extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
855extern void blk_queue_alignment_offset(struct request_queue *q, 818extern void blk_queue_alignment_offset(struct request_queue *q,
856 unsigned int alignment); 819 unsigned int alignment);
857extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min); 820extern void blk_limits_io_min(struct queue_limits *limits, unsigned int min);
@@ -881,16 +844,12 @@ extern void blk_queue_update_dma_alignment(struct request_queue *, int);
881extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); 844extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *);
882extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); 845extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *);
883extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); 846extern void blk_queue_rq_timeout(struct request_queue *, unsigned int);
847extern void blk_queue_flush(struct request_queue *q, unsigned int flush);
848extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable);
884extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); 849extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev);
885extern int blk_queue_ordered(struct request_queue *, unsigned);
886extern bool blk_do_ordered(struct request_queue *, struct request **);
887extern unsigned blk_ordered_cur_seq(struct request_queue *);
888extern unsigned blk_ordered_req_seq(struct request *);
889extern bool blk_ordered_complete_seq(struct request_queue *, unsigned, int);
890 850
891extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 851extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
892extern void blk_dump_rq_flags(struct request *, char *); 852extern void blk_dump_rq_flags(struct request *, char *);
893extern void generic_unplug_device(struct request_queue *);
894extern long nr_blockdev_pages(void); 853extern long nr_blockdev_pages(void);
895 854
896int blk_get_queue(struct request_queue *); 855int blk_get_queue(struct request_queue *);
@@ -898,6 +857,44 @@ struct request_queue *blk_alloc_queue(gfp_t);
898struct request_queue *blk_alloc_queue_node(gfp_t, int); 857struct request_queue *blk_alloc_queue_node(gfp_t, int);
899extern void blk_put_queue(struct request_queue *); 858extern void blk_put_queue(struct request_queue *);
900 859
860struct blk_plug {
861 unsigned long magic;
862 struct list_head list;
863 struct list_head cb_list;
864 unsigned int should_sort;
865};
866struct blk_plug_cb {
867 struct list_head list;
868 void (*callback)(struct blk_plug_cb *);
869};
870
871extern void blk_start_plug(struct blk_plug *);
872extern void blk_finish_plug(struct blk_plug *);
873extern void blk_flush_plug_list(struct blk_plug *, bool);
874
875static inline void blk_flush_plug(struct task_struct *tsk)
876{
877 struct blk_plug *plug = tsk->plug;
878
879 if (plug)
880 blk_flush_plug_list(plug, false);
881}
882
883static inline void blk_schedule_flush_plug(struct task_struct *tsk)
884{
885 struct blk_plug *plug = tsk->plug;
886
887 if (plug)
888 blk_flush_plug_list(plug, true);
889}
890
891static inline bool blk_needs_flush_plug(struct task_struct *tsk)
892{
893 struct blk_plug *plug = tsk->plug;
894
895 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
896}
897
901/* 898/*
902 * tag stuff 899 * tag stuff
903 */ 900 */
@@ -919,27 +916,28 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
919 return NULL; 916 return NULL;
920 return bqt->tag_index[tag]; 917 return bqt->tag_index[tag];
921} 918}
922enum{ 919
923 BLKDEV_WAIT, /* wait for completion */ 920#define BLKDEV_DISCARD_SECURE 0x01 /* secure discard */
924 BLKDEV_BARRIER, /* issue request with barrier */ 921
925 BLKDEV_SECURE, /* secure discard */ 922extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
926};
927#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
928#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
929#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
930extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
931 unsigned long);
932extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector, 923extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
933 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 924 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
934extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector, 925extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
935 sector_t nr_sects, gfp_t gfp_mask, unsigned long flags); 926 sector_t nr_sects, gfp_t gfp_mask);
936static inline int sb_issue_discard(struct super_block *sb, 927static inline int sb_issue_discard(struct super_block *sb, sector_t block,
937 sector_t block, sector_t nr_blocks) 928 sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
938{ 929{
939 block <<= (sb->s_blocksize_bits - 9); 930 return blkdev_issue_discard(sb->s_bdev, block << (sb->s_blocksize_bits - 9),
940 nr_blocks <<= (sb->s_blocksize_bits - 9); 931 nr_blocks << (sb->s_blocksize_bits - 9),
941 return blkdev_issue_discard(sb->s_bdev, block, nr_blocks, GFP_NOFS, 932 gfp_mask, flags);
942 BLKDEV_IFL_WAIT | BLKDEV_IFL_BARRIER); 933}
934static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
935 sector_t nr_blocks, gfp_t gfp_mask)
936{
937 return blkdev_issue_zeroout(sb->s_bdev,
938 block << (sb->s_blocksize_bits - 9),
939 nr_blocks << (sb->s_blocksize_bits - 9),
940 gfp_mask);
943} 941}
944 942
945extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm); 943extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1004,7 +1002,7 @@ static inline unsigned int queue_physical_block_size(struct request_queue *q)
1004 return q->limits.physical_block_size; 1002 return q->limits.physical_block_size;
1005} 1003}
1006 1004
1007static inline int bdev_physical_block_size(struct block_device *bdev) 1005static inline unsigned int bdev_physical_block_size(struct block_device *bdev)
1008{ 1006{
1009 return queue_physical_block_size(bdev_get_queue(bdev)); 1007 return queue_physical_block_size(bdev_get_queue(bdev));
1010} 1008}
@@ -1071,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector
1071{ 1069{
1072 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1070 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
1073 1071
1072 if (!lim->max_discard_sectors)
1073 return 0;
1074
1074 return (lim->discard_granularity + lim->discard_alignment - alignment) 1075 return (lim->discard_granularity + lim->discard_alignment - alignment)
1075 & (lim->discard_granularity - 1); 1076 & (lim->discard_granularity - 1);
1076} 1077}
1077 1078
1078static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) 1079static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
1079{ 1080{
1080 if (q->limits.discard_zeroes_data == 1) 1081 if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
1081 return 1; 1082 return 1;
1082 1083
1083 return 0; 1084 return 0;
@@ -1093,11 +1094,11 @@ static inline int queue_dma_alignment(struct request_queue *q)
1093 return q ? q->dma_alignment : 511; 1094 return q ? q->dma_alignment : 511;
1094} 1095}
1095 1096
1096static inline int blk_rq_aligned(struct request_queue *q, void *addr, 1097static inline int blk_rq_aligned(struct request_queue *q, unsigned long addr,
1097 unsigned int len) 1098 unsigned int len)
1098{ 1099{
1099 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask; 1100 unsigned int alignment = queue_dma_alignment(q) | q->dma_pad_mask;
1100 return !((unsigned long)addr & alignment) && !(len & alignment); 1101 return !(addr & alignment) && !(len & alignment);
1101} 1102}
1102 1103
1103/* assumes size > 256 */ 1104/* assumes size > 256 */
@@ -1116,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev)
1116 return bdev->bd_block_size; 1117 return bdev->bd_block_size;
1117} 1118}
1118 1119
1120static inline bool queue_flush_queueable(struct request_queue *q)
1121{
1122 return !q->flush_not_queueable;
1123}
1124
1119typedef struct {struct page *v;} Sector; 1125typedef struct {struct page *v;} Sector;
1120 1126
1121unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); 1127unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
@@ -1170,6 +1176,20 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1170} 1176}
1171#endif 1177#endif
1172 1178
1179#ifdef CONFIG_BLK_DEV_THROTTLING
1180extern int blk_throtl_init(struct request_queue *q);
1181extern void blk_throtl_exit(struct request_queue *q);
1182extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1183#else /* CONFIG_BLK_DEV_THROTTLING */
1184static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1185{
1186 return 0;
1187}
1188
1189static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1190static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1191#endif /* CONFIG_BLK_DEV_THROTTLING */
1192
1173#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1193#define MODULE_ALIAS_BLOCKDEV(major,minor) \
1174 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor)) 1194 MODULE_ALIAS("block-major-" __stringify(major) "-" __stringify(minor))
1175#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \ 1195#define MODULE_ALIAS_BLOCKDEV_MAJOR(major) \
@@ -1210,11 +1230,17 @@ struct blk_integrity {
1210 struct kobject kobj; 1230 struct kobject kobj;
1211}; 1231};
1212 1232
1233extern bool blk_integrity_is_initialized(struct gendisk *);
1213extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1234extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1214extern void blk_integrity_unregister(struct gendisk *); 1235extern void blk_integrity_unregister(struct gendisk *);
1215extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1236extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
1216extern int blk_rq_map_integrity_sg(struct request *, struct scatterlist *); 1237extern int blk_rq_map_integrity_sg(struct request_queue *, struct bio *,
1217extern int blk_rq_count_integrity_sg(struct request *); 1238 struct scatterlist *);
1239extern int blk_rq_count_integrity_sg(struct request_queue *, struct bio *);
1240extern int blk_integrity_merge_rq(struct request_queue *, struct request *,
1241 struct request *);
1242extern int blk_integrity_merge_bio(struct request_queue *, struct request *,
1243 struct bio *);
1218 1244
1219static inline 1245static inline
1220struct blk_integrity *bdev_get_integrity(struct block_device *bdev) 1246struct blk_integrity *bdev_get_integrity(struct block_device *bdev)
@@ -1235,16 +1261,33 @@ static inline int blk_integrity_rq(struct request *rq)
1235 return bio_integrity(rq->bio); 1261 return bio_integrity(rq->bio);
1236} 1262}
1237 1263
1264static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1265 unsigned int segs)
1266{
1267 q->limits.max_integrity_segments = segs;
1268}
1269
1270static inline unsigned short
1271queue_max_integrity_segments(struct request_queue *q)
1272{
1273 return q->limits.max_integrity_segments;
1274}
1275
1238#else /* CONFIG_BLK_DEV_INTEGRITY */ 1276#else /* CONFIG_BLK_DEV_INTEGRITY */
1239 1277
1240#define blk_integrity_rq(rq) (0) 1278#define blk_integrity_rq(rq) (0)
1241#define blk_rq_count_integrity_sg(a) (0) 1279#define blk_rq_count_integrity_sg(a, b) (0)
1242#define blk_rq_map_integrity_sg(a, b) (0) 1280#define blk_rq_map_integrity_sg(a, b, c) (0)
1243#define bdev_get_integrity(a) (0) 1281#define bdev_get_integrity(a) (0)
1244#define blk_get_integrity(a) (0) 1282#define blk_get_integrity(a) (0)
1245#define blk_integrity_compare(a, b) (0) 1283#define blk_integrity_compare(a, b) (0)
1246#define blk_integrity_register(a, b) (0) 1284#define blk_integrity_register(a, b) (0)
1247#define blk_integrity_unregister(a) do { } while (0); 1285#define blk_integrity_unregister(a) do { } while (0)
1286#define blk_queue_max_integrity_segments(a, b) do { } while (0)
1287#define queue_max_integrity_segments(a) (0)
1288#define blk_integrity_merge_rq(a, b, c) (0)
1289#define blk_integrity_merge_bio(a, b, c) (0)
1290#define blk_integrity_is_initialized(a) (0)
1248 1291
1249#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1292#endif /* CONFIG_BLK_DEV_INTEGRITY */
1250 1293
@@ -1255,6 +1298,9 @@ struct block_device_operations {
1255 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1298 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1256 int (*direct_access) (struct block_device *, sector_t, 1299 int (*direct_access) (struct block_device *, sector_t,
1257 void **, unsigned long *); 1300 void **, unsigned long *);
1301 unsigned int (*check_events) (struct gendisk *disk,
1302 unsigned int clearing);
1303 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
1258 int (*media_changed) (struct gendisk *); 1304 int (*media_changed) (struct gendisk *);
1259 void (*unlock_native_capacity) (struct gendisk *); 1305 void (*unlock_native_capacity) (struct gendisk *);
1260 int (*revalidate_disk) (struct gendisk *); 1306 int (*revalidate_disk) (struct gendisk *);
@@ -1277,6 +1323,31 @@ static inline long nr_blockdev_pages(void)
1277 return 0; 1323 return 0;
1278} 1324}
1279 1325
1326struct blk_plug {
1327};
1328
1329static inline void blk_start_plug(struct blk_plug *plug)
1330{
1331}
1332
1333static inline void blk_finish_plug(struct blk_plug *plug)
1334{
1335}
1336
1337static inline void blk_flush_plug(struct task_struct *task)
1338{
1339}
1340
1341static inline void blk_schedule_flush_plug(struct task_struct *task)
1342{
1343}
1344
1345
1346static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1347{
1348 return false;
1349}
1350
1280#endif /* CONFIG_BLOCK */ 1351#endif /* CONFIG_BLOCK */
1281 1352
1282#endif 1353#endif