diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 106 |
1 files changed, 71 insertions, 35 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 4d18ff34670a..16a902f099ac 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -108,11 +108,17 @@ struct request { | |||
108 | 108 | ||
109 | /* | 109 | /* |
110 | * Three pointers are available for the IO schedulers, if they need | 110 | * Three pointers are available for the IO schedulers, if they need |
111 | * more they have to dynamically allocate it. | 111 | * more they have to dynamically allocate it. Flush requests are |
112 | * never put on the IO scheduler. So let the flush fields share | ||
113 | * space with the three elevator_private pointers. | ||
112 | */ | 114 | */ |
113 | void *elevator_private; | 115 | union { |
114 | void *elevator_private2; | 116 | void *elevator_private[3]; |
115 | void *elevator_private3; | 117 | struct { |
118 | unsigned int seq; | ||
119 | struct list_head list; | ||
120 | } flush; | ||
121 | }; | ||
116 | 122 | ||
117 | struct gendisk *rq_disk; | 123 | struct gendisk *rq_disk; |
118 | struct hd_struct *part; | 124 | struct hd_struct *part; |
@@ -190,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q); | |||
190 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); | 196 | typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); |
191 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); | 197 | typedef int (prep_rq_fn) (struct request_queue *, struct request *); |
192 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); | 198 | typedef void (unprep_rq_fn) (struct request_queue *, struct request *); |
193 | typedef void (unplug_fn) (struct request_queue *); | ||
194 | 199 | ||
195 | struct bio_vec; | 200 | struct bio_vec; |
196 | struct bvec_merge_data { | 201 | struct bvec_merge_data { |
@@ -273,7 +278,6 @@ struct request_queue | |||
273 | make_request_fn *make_request_fn; | 278 | make_request_fn *make_request_fn; |
274 | prep_rq_fn *prep_rq_fn; | 279 | prep_rq_fn *prep_rq_fn; |
275 | unprep_rq_fn *unprep_rq_fn; | 280 | unprep_rq_fn *unprep_rq_fn; |
276 | unplug_fn *unplug_fn; | ||
277 | merge_bvec_fn *merge_bvec_fn; | 281 | merge_bvec_fn *merge_bvec_fn; |
278 | softirq_done_fn *softirq_done_fn; | 282 | softirq_done_fn *softirq_done_fn; |
279 | rq_timed_out_fn *rq_timed_out_fn; | 283 | rq_timed_out_fn *rq_timed_out_fn; |
@@ -287,12 +291,9 @@ struct request_queue | |||
287 | struct request *boundary_rq; | 291 | struct request *boundary_rq; |
288 | 292 | ||
289 | /* | 293 | /* |
290 | * Auto-unplugging state | 294 | * Delayed queue handling |
291 | */ | 295 | */ |
292 | struct timer_list unplug_timer; | 296 | struct delayed_work delay_work; |
293 | int unplug_thresh; /* After this many requests */ | ||
294 | unsigned long unplug_delay; /* After this many jiffies */ | ||
295 | struct work_struct unplug_work; | ||
296 | 297 | ||
297 | struct backing_dev_info backing_dev_info; | 298 | struct backing_dev_info backing_dev_info; |
298 | 299 | ||
@@ -363,11 +364,12 @@ struct request_queue | |||
363 | * for flush operations | 364 | * for flush operations |
364 | */ | 365 | */ |
365 | unsigned int flush_flags; | 366 | unsigned int flush_flags; |
366 | unsigned int flush_seq; | 367 | unsigned int flush_pending_idx:1; |
367 | int flush_err; | 368 | unsigned int flush_running_idx:1; |
369 | unsigned long flush_pending_since; | ||
370 | struct list_head flush_queue[2]; | ||
371 | struct list_head flush_data_in_flight; | ||
368 | struct request flush_rq; | 372 | struct request flush_rq; |
369 | struct request *orig_flush_rq; | ||
370 | struct list_head pending_flushes; | ||
371 | 373 | ||
372 | struct mutex sysfs_lock; | 374 | struct mutex sysfs_lock; |
373 | 375 | ||
@@ -387,14 +389,13 @@ struct request_queue | |||
387 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
388 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
389 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ |
390 | #define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ | 392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ |
391 | #define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ | 393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ |
392 | #define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ | 394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ |
393 | #define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ | 395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ |
394 | #define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ | 396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ |
395 | #define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ | 397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ |
396 | #define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ | 398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ |
397 | #define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */ | ||
398 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
399 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ |
400 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ |
@@ -472,7 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q) | |||
472 | __clear_bit(flag, &q->queue_flags); | 473 | __clear_bit(flag, &q->queue_flags); |
473 | } | 474 | } |
474 | 475 | ||
475 | #define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags) | ||
476 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) | 476 | #define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) |
477 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) | 477 | #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) |
478 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) | 478 | #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) |
@@ -667,9 +667,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src, | |||
667 | extern void blk_rq_unprep_clone(struct request *rq); | 667 | extern void blk_rq_unprep_clone(struct request *rq); |
668 | extern int blk_insert_cloned_request(struct request_queue *q, | 668 | extern int blk_insert_cloned_request(struct request_queue *q, |
669 | struct request *rq); | 669 | struct request *rq); |
670 | extern void blk_plug_device(struct request_queue *); | 670 | extern void blk_delay_queue(struct request_queue *, unsigned long); |
671 | extern void blk_plug_device_unlocked(struct request_queue *); | ||
672 | extern int blk_remove_plug(struct request_queue *); | ||
673 | extern void blk_recount_segments(struct request_queue *, struct bio *); | 671 | extern void blk_recount_segments(struct request_queue *, struct bio *); |
674 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, | 672 | extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, |
675 | unsigned int, void __user *); | 673 | unsigned int, void __user *); |
@@ -699,7 +697,7 @@ extern void blk_start_queue(struct request_queue *q); | |||
699 | extern void blk_stop_queue(struct request_queue *q); | 697 | extern void blk_stop_queue(struct request_queue *q); |
700 | extern void blk_sync_queue(struct request_queue *q); | 698 | extern void blk_sync_queue(struct request_queue *q); |
701 | extern void __blk_stop_queue(struct request_queue *q); | 699 | extern void __blk_stop_queue(struct request_queue *q); |
702 | extern void __blk_run_queue(struct request_queue *); | 700 | extern void __blk_run_queue(struct request_queue *q, bool force_kblockd); |
703 | extern void blk_run_queue(struct request_queue *); | 701 | extern void blk_run_queue(struct request_queue *); |
704 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
705 | struct rq_map_data *, void __user *, unsigned long, | 703 | struct rq_map_data *, void __user *, unsigned long, |
@@ -713,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *, | |||
713 | struct request *, int); | 711 | struct request *, int); |
714 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, | 712 | extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, |
715 | struct request *, int, rq_end_io_fn *); | 713 | struct request *, int, rq_end_io_fn *); |
716 | extern void blk_unplug(struct request_queue *q); | ||
717 | 714 | ||
718 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) | 715 | static inline struct request_queue *bdev_get_queue(struct block_device *bdev) |
719 | { | 716 | { |
@@ -850,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd | |||
850 | 847 | ||
851 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
852 | extern void blk_dump_rq_flags(struct request *, char *); | 849 | extern void blk_dump_rq_flags(struct request *, char *); |
853 | extern void generic_unplug_device(struct request_queue *); | ||
854 | extern long nr_blockdev_pages(void); | 850 | extern long nr_blockdev_pages(void); |
855 | 851 | ||
856 | int blk_get_queue(struct request_queue *); | 852 | int blk_get_queue(struct request_queue *); |
@@ -858,6 +854,31 @@ struct request_queue *blk_alloc_queue(gfp_t); | |||
858 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 854 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
859 | extern void blk_put_queue(struct request_queue *); | 855 | extern void blk_put_queue(struct request_queue *); |
860 | 856 | ||
857 | struct blk_plug { | ||
858 | unsigned long magic; | ||
859 | struct list_head list; | ||
860 | unsigned int should_sort; | ||
861 | }; | ||
862 | |||
863 | extern void blk_start_plug(struct blk_plug *); | ||
864 | extern void blk_finish_plug(struct blk_plug *); | ||
865 | extern void __blk_flush_plug(struct task_struct *, struct blk_plug *); | ||
866 | |||
867 | static inline void blk_flush_plug(struct task_struct *tsk) | ||
868 | { | ||
869 | struct blk_plug *plug = tsk->plug; | ||
870 | |||
871 | if (unlikely(plug)) | ||
872 | __blk_flush_plug(tsk, plug); | ||
873 | } | ||
874 | |||
875 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | ||
876 | { | ||
877 | struct blk_plug *plug = tsk->plug; | ||
878 | |||
879 | return plug && !list_empty(&plug->list); | ||
880 | } | ||
881 | |||
861 | /* | 882 | /* |
862 | * tag stuff | 883 | * tag stuff |
863 | */ | 884 | */ |
@@ -1088,7 +1109,6 @@ static inline void put_dev_sector(Sector p) | |||
1088 | 1109 | ||
1089 | struct work_struct; | 1110 | struct work_struct; |
1090 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); | 1111 | int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); |
1091 | int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay); | ||
1092 | 1112 | ||
1093 | #ifdef CONFIG_BLK_CGROUP | 1113 | #ifdef CONFIG_BLK_CGROUP |
1094 | /* | 1114 | /* |
@@ -1136,8 +1156,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req) | |||
1136 | extern int blk_throtl_init(struct request_queue *q); | 1156 | extern int blk_throtl_init(struct request_queue *q); |
1137 | extern void blk_throtl_exit(struct request_queue *q); | 1157 | extern void blk_throtl_exit(struct request_queue *q); |
1138 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); | 1158 | extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); |
1139 | extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay); | ||
1140 | extern void throtl_shutdown_timer_wq(struct request_queue *q); | ||
1141 | #else /* CONFIG_BLK_DEV_THROTTLING */ | 1159 | #else /* CONFIG_BLK_DEV_THROTTLING */ |
1142 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | 1160 | static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) |
1143 | { | 1161 | { |
@@ -1146,8 +1164,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) | |||
1146 | 1164 | ||
1147 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } | 1165 | static inline int blk_throtl_init(struct request_queue *q) { return 0; } |
1148 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } | 1166 | static inline int blk_throtl_exit(struct request_queue *q) { return 0; } |
1149 | static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {} | ||
1150 | static inline void throtl_shutdown_timer_wq(struct request_queue *q) {} | ||
1151 | #endif /* CONFIG_BLK_DEV_THROTTLING */ | 1167 | #endif /* CONFIG_BLK_DEV_THROTTLING */ |
1152 | 1168 | ||
1153 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ | 1169 | #define MODULE_ALIAS_BLOCKDEV(major,minor) \ |
@@ -1281,6 +1297,26 @@ static inline long nr_blockdev_pages(void) | |||
1281 | return 0; | 1297 | return 0; |
1282 | } | 1298 | } |
1283 | 1299 | ||
1300 | struct blk_plug { | ||
1301 | }; | ||
1302 | |||
1303 | static inline void blk_start_plug(struct blk_plug *plug) | ||
1304 | { | ||
1305 | } | ||
1306 | |||
1307 | static inline void blk_finish_plug(struct blk_plug *plug) | ||
1308 | { | ||
1309 | } | ||
1310 | |||
1311 | static inline void blk_flush_plug(struct task_struct *task) | ||
1312 | { | ||
1313 | } | ||
1314 | |||
1315 | static inline bool blk_needs_flush_plug(struct task_struct *tsk) | ||
1316 | { | ||
1317 | return false; | ||
1318 | } | ||
1319 | |||
1284 | #endif /* CONFIG_BLOCK */ | 1320 | #endif /* CONFIG_BLOCK */ |
1285 | 1321 | ||
1286 | #endif | 1322 | #endif |