diff options
| author | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-07-27 03:54:47 -0400 |
|---|---|---|
| committer | Dmitry Torokhov <dmitry.torokhov@gmail.com> | 2011-07-27 03:54:47 -0400 |
| commit | aa7eb8e78d8ecd6cd0475d86ea8385ff9cb47ece (patch) | |
| tree | 3f9e98fadd5124fb05e8f6f9b06aa23698d4f215 /include/linux/blkdev.h | |
| parent | cca8edfd2ec2a34d9f50f593bc753bb11e1bc1f5 (diff) | |
| parent | 3c6b50141ef9f0a8844bf1357b80c0cdf518bf05 (diff) | |
Merge branch 'next' into for-linus
Diffstat (limited to 'include/linux/blkdev.h')
| -rw-r--r-- | include/linux/blkdev.h | 45 |
1 files changed, 28 insertions, 17 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index cbbfd98ad4a3..1a23722e8878 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
| @@ -257,7 +257,7 @@ struct queue_limits { | |||
| 257 | unsigned char misaligned; | 257 | unsigned char misaligned; |
| 258 | unsigned char discard_misaligned; | 258 | unsigned char discard_misaligned; |
| 259 | unsigned char cluster; | 259 | unsigned char cluster; |
| 260 | signed char discard_zeroes_data; | 260 | unsigned char discard_zeroes_data; |
| 261 | }; | 261 | }; |
| 262 | 262 | ||
| 263 | struct request_queue | 263 | struct request_queue |
| @@ -364,6 +364,8 @@ struct request_queue | |||
| 364 | * for flush operations | 364 | * for flush operations |
| 365 | */ | 365 | */ |
| 366 | unsigned int flush_flags; | 366 | unsigned int flush_flags; |
| 367 | unsigned int flush_not_queueable:1; | ||
| 368 | unsigned int flush_queue_delayed:1; | ||
| 367 | unsigned int flush_pending_idx:1; | 369 | unsigned int flush_pending_idx:1; |
| 368 | unsigned int flush_running_idx:1; | 370 | unsigned int flush_running_idx:1; |
| 369 | unsigned long flush_pending_since; | 371 | unsigned long flush_pending_since; |
| @@ -388,20 +390,19 @@ struct request_queue | |||
| 388 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ | 390 | #define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ |
| 389 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ | 391 | #define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ |
| 390 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ | 392 | #define QUEUE_FLAG_DEAD 5 /* queue being torn down */ |
| 391 | #define QUEUE_FLAG_REENTER 6 /* Re-entrancy avoidance */ | 393 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
| 392 | #define QUEUE_FLAG_ELVSWITCH 7 /* don't use elevator, just do FIFO */ | 394 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
| 393 | #define QUEUE_FLAG_BIDI 8 /* queue supports bidi requests */ | 395 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
| 394 | #define QUEUE_FLAG_NOMERGES 9 /* disable merge attempts */ | 396 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ |
| 395 | #define QUEUE_FLAG_SAME_COMP 10 /* force complete on same CPU */ | 397 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
| 396 | #define QUEUE_FLAG_FAIL_IO 11 /* fake timeout */ | 398 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
| 397 | #define QUEUE_FLAG_STACKABLE 12 /* supports request stacking */ | 399 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
| 398 | #define QUEUE_FLAG_NONROT 13 /* non-rotational device (SSD) */ | ||
| 399 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ | 400 | #define QUEUE_FLAG_VIRT QUEUE_FLAG_NONROT /* paravirt device */ |
| 400 | #define QUEUE_FLAG_IO_STAT 15 /* do IO stats */ | 401 | #define QUEUE_FLAG_IO_STAT 13 /* do IO stats */ |
| 401 | #define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */ | 402 | #define QUEUE_FLAG_DISCARD 14 /* supports DISCARD */ |
| 402 | #define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */ | 403 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
| 403 | #define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */ | 404 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
| 404 | #define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */ | 405 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
| 405 | 406 | ||
| 406 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 407 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
| 407 | (1 << QUEUE_FLAG_STACKABLE) | \ | 408 | (1 << QUEUE_FLAG_STACKABLE) | \ |
| @@ -699,6 +700,7 @@ extern void blk_sync_queue(struct request_queue *q); | |||
| 699 | extern void __blk_stop_queue(struct request_queue *q); | 700 | extern void __blk_stop_queue(struct request_queue *q); |
| 700 | extern void __blk_run_queue(struct request_queue *q); | 701 | extern void __blk_run_queue(struct request_queue *q); |
| 701 | extern void blk_run_queue(struct request_queue *); | 702 | extern void blk_run_queue(struct request_queue *); |
| 703 | extern void blk_run_queue_async(struct request_queue *q); | ||
| 702 | extern int blk_rq_map_user(struct request_queue *, struct request *, | 704 | extern int blk_rq_map_user(struct request_queue *, struct request *, |
| 703 | struct rq_map_data *, void __user *, unsigned long, | 705 | struct rq_map_data *, void __user *, unsigned long, |
| 704 | gfp_t); | 706 | gfp_t); |
| @@ -843,6 +845,7 @@ extern void blk_queue_softirq_done(struct request_queue *, softirq_done_fn *); | |||
| 843 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); | 845 | extern void blk_queue_rq_timed_out(struct request_queue *, rq_timed_out_fn *); |
| 844 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); | 846 | extern void blk_queue_rq_timeout(struct request_queue *, unsigned int); |
| 845 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); | 847 | extern void blk_queue_flush(struct request_queue *q, unsigned int flush); |
| 848 | extern void blk_queue_flush_queueable(struct request_queue *q, bool queueable); | ||
| 846 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); | 849 | extern struct backing_dev_info *blk_get_backing_dev_info(struct block_device *bdev); |
| 847 | 850 | ||
| 848 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); | 851 | extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatterlist *); |
| @@ -1066,13 +1069,16 @@ static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector | |||
| 1066 | { | 1069 | { |
| 1067 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); | 1070 | unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); |
| 1068 | 1071 | ||
| 1072 | if (!lim->max_discard_sectors) | ||
| 1073 | return 0; | ||
| 1074 | |||
| 1069 | return (lim->discard_granularity + lim->discard_alignment - alignment) | 1075 | return (lim->discard_granularity + lim->discard_alignment - alignment) |
| 1070 | & (lim->discard_granularity - 1); | 1076 | & (lim->discard_granularity - 1); |
| 1071 | } | 1077 | } |
| 1072 | 1078 | ||
| 1073 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) | 1079 | static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) |
| 1074 | { | 1080 | { |
| 1075 | if (q->limits.discard_zeroes_data == 1) | 1081 | if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1) |
| 1076 | return 1; | 1082 | return 1; |
| 1077 | 1083 | ||
| 1078 | return 0; | 1084 | return 0; |
| @@ -1111,6 +1117,11 @@ static inline unsigned int block_size(struct block_device *bdev) | |||
| 1111 | return bdev->bd_block_size; | 1117 | return bdev->bd_block_size; |
| 1112 | } | 1118 | } |
| 1113 | 1119 | ||
| 1120 | static inline bool queue_flush_queueable(struct request_queue *q) | ||
| 1121 | { | ||
| 1122 | return !q->flush_not_queueable; | ||
| 1123 | } | ||
| 1124 | |||
| 1114 | typedef struct {struct page *v;} Sector; | 1125 | typedef struct {struct page *v;} Sector; |
| 1115 | 1126 | ||
| 1116 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); | 1127 | unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *); |
| @@ -1271,8 +1282,8 @@ queue_max_integrity_segments(struct request_queue *q) | |||
| 1271 | #define blk_get_integrity(a) (0) | 1282 | #define blk_get_integrity(a) (0) |
| 1272 | #define blk_integrity_compare(a, b) (0) | 1283 | #define blk_integrity_compare(a, b) (0) |
| 1273 | #define blk_integrity_register(a, b) (0) | 1284 | #define blk_integrity_register(a, b) (0) |
| 1274 | #define blk_integrity_unregister(a) do { } while (0); | 1285 | #define blk_integrity_unregister(a) do { } while (0) |
| 1275 | #define blk_queue_max_integrity_segments(a, b) do { } while (0); | 1286 | #define blk_queue_max_integrity_segments(a, b) do { } while (0) |
| 1276 | #define queue_max_integrity_segments(a) (0) | 1287 | #define queue_max_integrity_segments(a) (0) |
| 1277 | #define blk_integrity_merge_rq(a, b, c) (0) | 1288 | #define blk_integrity_merge_rq(a, b, c) (0) |
| 1278 | #define blk_integrity_merge_bio(a, b, c) (0) | 1289 | #define blk_integrity_merge_bio(a, b, c) (0) |
