aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorStefan Richter <stefanr@s5r6.in-berlin.de>2011-05-10 14:52:07 -0400
committerStefan Richter <stefanr@s5r6.in-berlin.de>2011-05-10 16:50:41 -0400
commit020abf03cd659388f94cb328e1e1df0656e0d7ff (patch)
tree40d05011708ad1b4a05928d167eb120420581aa6 /include/linux/blkdev.h
parent0ff8fbc61727c926883eec381fbd3d32d1fab504 (diff)
parent693d92a1bbc9e42681c42ed190bd42b636ca876f (diff)
Merge tag 'v2.6.39-rc7'
in order to pull in changes in drivers/media/dvb/firewire/ and sound/firewire/.
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h153
1 files changed, 108 insertions, 45 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index aae86fd10c4f..2ad95fa1d130 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -108,13 +108,20 @@ struct request {
108 108
109 /* 109 /*
110 * Three pointers are available for the IO schedulers, if they need 110 * Three pointers are available for the IO schedulers, if they need
111 * more they have to dynamically allocate it. 111 * more they have to dynamically allocate it. Flush requests are
112 * never put on the IO scheduler. So let the flush fields share
113 * space with the three elevator_private pointers.
112 */ 114 */
113 void *elevator_private; 115 union {
114 void *elevator_private2; 116 void *elevator_private[3];
115 void *elevator_private3; 117 struct {
118 unsigned int seq;
119 struct list_head list;
120 } flush;
121 };
116 122
117 struct gendisk *rq_disk; 123 struct gendisk *rq_disk;
124 struct hd_struct *part;
118 unsigned long start_time; 125 unsigned long start_time;
119#ifdef CONFIG_BLK_CGROUP 126#ifdef CONFIG_BLK_CGROUP
120 unsigned long long start_time_ns; 127 unsigned long long start_time_ns;
@@ -189,7 +196,6 @@ typedef void (request_fn_proc) (struct request_queue *q);
189typedef int (make_request_fn) (struct request_queue *q, struct bio *bio); 196typedef int (make_request_fn) (struct request_queue *q, struct bio *bio);
190typedef int (prep_rq_fn) (struct request_queue *, struct request *); 197typedef int (prep_rq_fn) (struct request_queue *, struct request *);
191typedef void (unprep_rq_fn) (struct request_queue *, struct request *); 198typedef void (unprep_rq_fn) (struct request_queue *, struct request *);
192typedef void (unplug_fn) (struct request_queue *);
193 199
194struct bio_vec; 200struct bio_vec;
195struct bvec_merge_data { 201struct bvec_merge_data {
@@ -250,7 +256,7 @@ struct queue_limits {
250 256
251 unsigned char misaligned; 257 unsigned char misaligned;
252 unsigned char discard_misaligned; 258 unsigned char discard_misaligned;
253 unsigned char no_cluster; 259 unsigned char cluster;
254 signed char discard_zeroes_data; 260 signed char discard_zeroes_data;
255}; 261};
256 262
@@ -272,7 +278,6 @@ struct request_queue
272 make_request_fn *make_request_fn; 278 make_request_fn *make_request_fn;
273 prep_rq_fn *prep_rq_fn; 279 prep_rq_fn *prep_rq_fn;
274 unprep_rq_fn *unprep_rq_fn; 280 unprep_rq_fn *unprep_rq_fn;
275 unplug_fn *unplug_fn;
276 merge_bvec_fn *merge_bvec_fn; 281 merge_bvec_fn *merge_bvec_fn;
277 softirq_done_fn *softirq_done_fn; 282 softirq_done_fn *softirq_done_fn;
278 rq_timed_out_fn *rq_timed_out_fn; 283 rq_timed_out_fn *rq_timed_out_fn;
@@ -286,12 +291,9 @@ struct request_queue
286 struct request *boundary_rq; 291 struct request *boundary_rq;
287 292
288 /* 293 /*
289 * Auto-unplugging state 294 * Delayed queue handling
290 */ 295 */
291 struct timer_list unplug_timer; 296 struct delayed_work delay_work;
292 int unplug_thresh; /* After this many requests */
293 unsigned long unplug_delay; /* After this many jiffies */
294 struct work_struct unplug_work;
295 297
296 struct backing_dev_info backing_dev_info; 298 struct backing_dev_info backing_dev_info;
297 299
@@ -362,11 +364,12 @@ struct request_queue
362 * for flush operations 364 * for flush operations
363 */ 365 */
364 unsigned int flush_flags; 366 unsigned int flush_flags;
365 unsigned int flush_seq; 367 unsigned int flush_pending_idx:1;
366 int flush_err; 368 unsigned int flush_running_idx:1;
369 unsigned long flush_pending_since;
370 struct list_head flush_queue[2];
371 struct list_head flush_data_in_flight;
367 struct request flush_rq; 372 struct request flush_rq;
368 struct request *orig_flush_rq;
369 struct list_head pending_flushes;
370 373
371 struct mutex sysfs_lock; 374 struct mutex sysfs_lock;
372 375
@@ -380,30 +383,26 @@ struct request_queue
380#endif 383#endif
381}; 384};
382 385
383#define QUEUE_FLAG_CLUSTER 0 /* cluster several segments into 1 */
384#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 386#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
385#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 387#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
386#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 388#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
387#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 389#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
388#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 390#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
389#define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ 391#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */
390#define QUEUE_FLAG_PLUGGED 7 /* queue is plugged */ 392#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
391#define QUEUE_FLAG_ELVSWITCH 8 /* don't use elevator, just do FIFO */ 393#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
392#define QUEUE_FLAG_BIDI 9 /* queue supports bidi requests */ 394#define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */
393#define QUEUE_FLAG_NOMERGES 10 /* disable merge attempts */ 395#define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */
394#define QUEUE_FLAG_SAME_COMP 11 /* force complete on same CPU */ 396#define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */
395#define QUEUE_FLAG_FAIL_IO 12 /* fake timeout */ 397#define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */
396#define QUEUE_FLAG_STACKABLE 13 /* supports request stacking */
397#define QUEUE_FLAG_NONROT 14 /* non-rotational device (SSD) */
398#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ 398#define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */
399#define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ 399#define QUEUE_FLAG_IO_STAT 13 /* do IO stats */
400#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ 400#define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */
401#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ 401#define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */
402#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ 402#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
403#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ 403#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
404 404
405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 405#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
406 (1 << QUEUE_FLAG_CLUSTER) | \
407 (1 << QUEUE_FLAG_STACKABLE) | \ 406 (1 << QUEUE_FLAG_STACKABLE) | \
408 (1 << QUEUE_FLAG_SAME_COMP) | \ 407 (1 << QUEUE_FLAG_SAME_COMP) | \
409 (1 << QUEUE_FLAG_ADD_RANDOM)) 408 (1 << QUEUE_FLAG_ADD_RANDOM))
@@ -473,7 +472,6 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
473 __clear_bit(flag, &q->queue_flags); 472 __clear_bit(flag, &q->queue_flags);
474} 473}
475 474
476#define blk_queue_plugged(q) test_bit(QUEUE_FLAG_PLUGGED, &(q)->queue_flags)
477#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 475#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
478#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 476#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
479#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 477#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -510,6 +508,11 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
510 508
511#define rq_data_dir(rq) ((rq)->cmd_flags & 1) 509#define rq_data_dir(rq) ((rq)->cmd_flags & 1)
512 510
511static inline unsigned int blk_queue_cluster(struct request_queue *q)
512{
513 return q->limits.cluster;
514}
515
513/* 516/*
514 * We regard a request as sync, if either a read or a sync write 517 * We regard a request as sync, if either a read or a sync write
515 */ 518 */
@@ -643,7 +646,6 @@ static inline void rq_flush_dcache_pages(struct request *rq)
643 646
644extern int blk_register_queue(struct gendisk *disk); 647extern int blk_register_queue(struct gendisk *disk);
645extern void blk_unregister_queue(struct gendisk *disk); 648extern void blk_unregister_queue(struct gendisk *disk);
646extern void register_disk(struct gendisk *dev);
647extern void generic_make_request(struct bio *bio); 649extern void generic_make_request(struct bio *bio);
648extern void blk_rq_init(struct request_queue *q, struct request *rq); 650extern void blk_rq_init(struct request_queue *q, struct request *rq);
649extern void blk_put_request(struct request *); 651extern void blk_put_request(struct request *);
@@ -664,9 +666,7 @@ extern int blk_rq_prep_clone(struct request *rq, struct request *rq_src,
664extern void blk_rq_unprep_clone(struct request *rq); 666extern void blk_rq_unprep_clone(struct request *rq);
665extern int blk_insert_cloned_request(struct request_queue *q, 667extern int blk_insert_cloned_request(struct request_queue *q,
666 struct request *rq); 668 struct request *rq);
667extern void blk_plug_device(struct request_queue *); 669extern void blk_delay_queue(struct request_queue *, unsigned long);
668extern void blk_plug_device_unlocked(struct request_queue *);
669extern int blk_remove_plug(struct request_queue *);
670extern void blk_recount_segments(struct request_queue *, struct bio *); 670extern void blk_recount_segments(struct request_queue *, struct bio *);
671extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t, 671extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
672 unsigned int, void __user *); 672 unsigned int, void __user *);
@@ -696,8 +696,9 @@ extern void blk_start_queue(struct request_queue *q);
696extern void blk_stop_queue(struct request_queue *q); 696extern void blk_stop_queue(struct request_queue *q);
697extern void blk_sync_queue(struct request_queue *q); 697extern void blk_sync_queue(struct request_queue *q);
698extern void __blk_stop_queue(struct request_queue *q); 698extern void __blk_stop_queue(struct request_queue *q);
699extern void __blk_run_queue(struct request_queue *); 699extern void __blk_run_queue(struct request_queue *q);
700extern void blk_run_queue(struct request_queue *); 700extern void blk_run_queue(struct request_queue *);
701extern void blk_run_queue_async(struct request_queue *q);
701extern int blk_rq_map_user(struct request_queue *, struct request *, 702extern int blk_rq_map_user(struct request_queue *, struct request *,
702 struct rq_map_data *, void __user *, unsigned long, 703 struct rq_map_data *, void __user *, unsigned long,
703 gfp_t); 704 gfp_t);
@@ -710,7 +711,6 @@ extern int blk_execute_rq(struct request_queue *, struct gendisk *,
710 struct request *, int); 711 struct request *, int);
711extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *, 712extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
712 struct request *, int, rq_end_io_fn *); 713 struct request *, int, rq_end_io_fn *);
713extern void blk_unplug(struct request_queue *q);
714 714
715static inline struct request_queue *bdev_get_queue(struct block_device *bdev) 715static inline struct request_queue *bdev_get_queue(struct block_device *bdev)
716{ 716{
@@ -805,6 +805,7 @@ extern struct request_queue *blk_init_allocated_queue(struct request_queue *,
805extern void blk_cleanup_queue(struct request_queue *); 805extern void blk_cleanup_queue(struct request_queue *);
806extern void blk_queue_make_request(struct request_queue *, make_request_fn *); 806extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
807extern void blk_queue_bounce_limit(struct request_queue *, u64); 807extern void blk_queue_bounce_limit(struct request_queue *, u64);
808extern void blk_limits_max_hw_sectors(struct queue_limits *, unsigned int);
808extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int); 809extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
809extern void blk_queue_max_segments(struct request_queue *, unsigned short); 810extern void blk_queue_max_segments(struct request_queue *, unsigned short);
810extern void blk_queue_max_segment_size(struct request_queue *, unsigned int); 811extern void blk_queue_max_segment_size(struct request_queue *, unsigned int);
@@ -846,7 +847,6 @@ extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bd
846 847
847extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); 848extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *);
848extern void blk_dump_rq_flags(struct request *, char *); 849extern void blk_dump_rq_flags(struct request *, char *);
849extern void generic_unplug_device(struct request_queue *);
850extern long nr_blockdev_pages(void); 850extern long nr_blockdev_pages(void);
851 851
852int blk_get_queue(struct request_queue *); 852int blk_get_queue(struct request_queue *);
@@ -854,6 +854,44 @@ struct request_queue *blk_alloc_queue(gfp_t);
854struct request_queue *blk_alloc_queue_node(gfp_t, int); 854struct request_queue *blk_alloc_queue_node(gfp_t, int);
855extern void blk_put_queue(struct request_queue *); 855extern void blk_put_queue(struct request_queue *);
856 856
857struct blk_plug {
858 unsigned long magic;
859 struct list_head list;
860 struct list_head cb_list;
861 unsigned int should_sort;
862};
863struct blk_plug_cb {
864 struct list_head list;
865 void (*callback)(struct blk_plug_cb *);
866};
867
868extern void blk_start_plug(struct blk_plug *);
869extern void blk_finish_plug(struct blk_plug *);
870extern void blk_flush_plug_list(struct blk_plug *, bool);
871
872static inline void blk_flush_plug(struct task_struct *tsk)
873{
874 struct blk_plug *plug = tsk->plug;
875
876 if (plug)
877 blk_flush_plug_list(plug, false);
878}
879
880static inline void blk_schedule_flush_plug(struct task_struct *tsk)
881{
882 struct blk_plug *plug = tsk->plug;
883
884 if (plug)
885 blk_flush_plug_list(plug, true);
886}
887
888static inline bool blk_needs_flush_plug(struct task_struct *tsk)
889{
890 struct blk_plug *plug = tsk->plug;
891
892 return plug && (!list_empty(&plug->list) || !list_empty(&plug->cb_list));
893}
894
857/* 895/*
858 * tag stuff 896 * tag stuff
859 */ 897 */
@@ -1084,7 +1122,6 @@ static inline void put_dev_sector(Sector p)
1084 1122
1085struct work_struct; 1123struct work_struct;
1086int kblockd_schedule_work(struct request_queue *q, struct work_struct *work); 1124int kblockd_schedule_work(struct request_queue *q, struct work_struct *work);
1087int kblockd_schedule_delayed_work(struct request_queue *q, struct delayed_work *dwork, unsigned long delay);
1088 1125
1089#ifdef CONFIG_BLK_CGROUP 1126#ifdef CONFIG_BLK_CGROUP
1090/* 1127/*
@@ -1132,8 +1169,6 @@ static inline uint64_t rq_io_start_time_ns(struct request *req)
1132extern int blk_throtl_init(struct request_queue *q); 1169extern int blk_throtl_init(struct request_queue *q);
1133extern void blk_throtl_exit(struct request_queue *q); 1170extern void blk_throtl_exit(struct request_queue *q);
1134extern int blk_throtl_bio(struct request_queue *q, struct bio **bio); 1171extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
1135extern void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay);
1136extern void throtl_shutdown_timer_wq(struct request_queue *q);
1137#else /* CONFIG_BLK_DEV_THROTTLING */ 1172#else /* CONFIG_BLK_DEV_THROTTLING */
1138static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio) 1173static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1139{ 1174{
@@ -1142,8 +1177,6 @@ static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
1142 1177
1143static inline int blk_throtl_init(struct request_queue *q) { return 0; } 1178static inline int blk_throtl_init(struct request_queue *q) { return 0; }
1144static inline int blk_throtl_exit(struct request_queue *q) { return 0; } 1179static inline int blk_throtl_exit(struct request_queue *q) { return 0; }
1145static inline void throtl_schedule_delayed_work(struct request_queue *q, unsigned long delay) {}
1146static inline void throtl_shutdown_timer_wq(struct request_queue *q) {}
1147#endif /* CONFIG_BLK_DEV_THROTTLING */ 1180#endif /* CONFIG_BLK_DEV_THROTTLING */
1148 1181
1149#define MODULE_ALIAS_BLOCKDEV(major,minor) \ 1182#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -1186,6 +1219,7 @@ struct blk_integrity {
1186 struct kobject kobj; 1219 struct kobject kobj;
1187}; 1220};
1188 1221
1222extern bool blk_integrity_is_initialized(struct gendisk *);
1189extern int blk_integrity_register(struct gendisk *, struct blk_integrity *); 1223extern int blk_integrity_register(struct gendisk *, struct blk_integrity *);
1190extern void blk_integrity_unregister(struct gendisk *); 1224extern void blk_integrity_unregister(struct gendisk *);
1191extern int blk_integrity_compare(struct gendisk *, struct gendisk *); 1225extern int blk_integrity_compare(struct gendisk *, struct gendisk *);
@@ -1242,6 +1276,7 @@ queue_max_integrity_segments(struct request_queue *q)
1242#define queue_max_integrity_segments(a) (0) 1276#define queue_max_integrity_segments(a) (0)
1243#define blk_integrity_merge_rq(a, b, c) (0) 1277#define blk_integrity_merge_rq(a, b, c) (0)
1244#define blk_integrity_merge_bio(a, b, c) (0) 1278#define blk_integrity_merge_bio(a, b, c) (0)
1279#define blk_integrity_is_initialized(a) (0)
1245 1280
1246#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1281#endif /* CONFIG_BLK_DEV_INTEGRITY */
1247 1282
@@ -1252,6 +1287,9 @@ struct block_device_operations {
1252 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); 1287 int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
1253 int (*direct_access) (struct block_device *, sector_t, 1288 int (*direct_access) (struct block_device *, sector_t,
1254 void **, unsigned long *); 1289 void **, unsigned long *);
1290 unsigned int (*check_events) (struct gendisk *disk,
1291 unsigned int clearing);
1292 /* ->media_changed() is DEPRECATED, use ->check_events() instead */
1255 int (*media_changed) (struct gendisk *); 1293 int (*media_changed) (struct gendisk *);
1256 void (*unlock_native_capacity) (struct gendisk *); 1294 void (*unlock_native_capacity) (struct gendisk *);
1257 int (*revalidate_disk) (struct gendisk *); 1295 int (*revalidate_disk) (struct gendisk *);
@@ -1274,6 +1312,31 @@ static inline long nr_blockdev_pages(void)
1274 return 0; 1312 return 0;
1275} 1313}
1276 1314
1315struct blk_plug {
1316};
1317
1318static inline void blk_start_plug(struct blk_plug *plug)
1319{
1320}
1321
1322static inline void blk_finish_plug(struct blk_plug *plug)
1323{
1324}
1325
1326static inline void blk_flush_plug(struct task_struct *task)
1327{
1328}
1329
1330static inline void blk_schedule_flush_plug(struct task_struct *task)
1331{
1332}
1333
1334
1335static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1336{
1337 return false;
1338}
1339
1277#endif /* CONFIG_BLOCK */ 1340#endif /* CONFIG_BLOCK */
1278 1341
1279#endif 1342#endif