aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
committerJens Axboe <jaxboe@fusionio.com>2011-03-10 02:52:07 -0500
commit7eaceaccab5f40bbfda044629a6298616aeaed50 (patch)
tree33954d12f63e25a47eb6d86ef3d3d0a5e62bf752 /include/linux/blkdev.h
parent73c101011926c5832e6e141682180c4debe2cf45 (diff)
block: remove per-queue plugging
Code has been converted over to the new explicit on-stack plugging, and delay users have been converted to use the new API for that. So lets kill off the old plugging along with aops->sync_page(). Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h31
1 files changed, 7 insertions, 24 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 5873037eeb91..64ab2a1bb167 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -196,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
196typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 196typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
197typedef int (prep_rq_fn) (struct request_queue *, struct request *); 197typedef int (prep_rq_fn) (struct request_queue *, struct request *);
198typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 198typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
199typedef void (unplug_fn) (struct request_queue *);
200 199
201struct bio_vec; 200struct bio_vec;
202struct bvec_merge_data { 201struct bvec_merge_data {
@@ -279,7 +278,6 @@ struct request_queue
279 make_request_fn *make_request_fn; 278 make_request_fn *make_request_fn;
280 prep_rq_fn *prep_rq_fn; 279 prep_rq_fn *prep_rq_fn;
281 unprep_rq_fn *unprep_rq_fn; 280 unprep_rq_fn *unprep_rq_fn;
282 unplug_fn *unplug_fn;
283 merge_bvec_fn *merge_bvec_fn; 281 merge_bvec_fn *merge_bvec_fn;
284 softirq_done_fn *softirq_done_fn; 282 softirq_done_fn *softirq_done_fn;
285 rq_timed_out_fn *rq_timed_out_fn; 283 rq_timed_out_fn *rq_timed_out_fn;
@@ -293,14 +291,6 @@ struct request_queue
293 struct request *boundary_rq; 291 struct request *boundary_rq;
294 292
295 /* 293 /*
296 * Auto-unplugging state
297 */
298 struct timer_list unplug_timer;
299 int unplug_thresh; /* After this many requests */
300 unsigned long unplug_delay; /* After this many jiffies */
301 struct work_struct unplug_work;
302
303 /*
304 * Delayed queue handling 294 * Delayed queue handling
305 */ 295 */
306 struct delayed_work delay_work; 296 struct delayed_work delay_work;
@@ -399,14 +389,13 @@ struct request_queue
399#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
400#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
401#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 391#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */
402#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 392#define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */
403#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 393#define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */
404#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 394#define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */
405#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 395#define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */
406#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 396#define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */
407#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 397#define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */
408#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */ 398#define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */
409#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
410#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 399#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
411#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 400#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */
412#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 401#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
@@ -484,7 +473,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
484 __clear_bit(flag, &q->queue_flags); 473 __clear_bit(flag, &q->queue_flags);
485} 474}
486 475
487#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
488#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 476#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
489#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 477#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
490#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 478#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -679,9 +667,6 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
679extern void blk_rq_unprep_clone(struct request *rq); 667extern void blk_rq_unprep_clone(struct request *rq);
680extern int blk_insert_cloned_request(struct request_queue *q, 668extern int blk_insert_cloned_request(struct request_queue *q,
681 struct request *rq); 669 struct request *rq);
682extern void blk_plug_device(struct request_queue *);
683extern void blk_plug_device_unlocked(struct request_queue *);
684extern int blk_remove_plug(struct request_queue *);
685extern void blk_delay_queue(struct request_queue *, unsigned long); 670extern void blk_delay_queue(struct request_queue *, unsigned long);
686extern void blk_recount_segments(struct request_queue *, struct bio *); 671extern void blk_recount_segments(struct request_queue *, struct bio *);
687extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 672extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
@@ -726,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
726 struct request *, int); 711 struct request *, int);
727extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 712extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
728 struct request *, int, rq_end_io_fn *); 713 struct request *, int, rq_end_io_fn *);
729extern void blk_unplug(struct request_queue *q);
730 714
731static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 715static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
732{ 716{
@@ -863,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
863 847
864extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 848extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
865extern void blk_dump_rq_flags(struct request *, char *); 849extern void blk_dump_rq_flags(struct request *, char *);
866extern void generic_unplug_device(struct request_queue *);
867extern long nr_blockdev_pages(void); 850extern long nr_blockdev_pages(void);
868 851
869int blk_get_queue(struct request_queue *); 852int blk_get_queue(struct request_queue *);