diff options
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r-- | include/linux/blkdev.h | 27 |
1 files changed, 18 insertions, 9 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index 1a23722e8878..0e67c45b3bc9 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h | |||
@@ -73,7 +73,7 @@ enum rq_cmd_type_bits { | |||
73 | 73 | ||
74 | /* | 74 | /* |
75 | * try to put the fields that are referenced together in the same cacheline. | 75 | * try to put the fields that are referenced together in the same cacheline. |
76 | * if you modify this structure, be sure to check block/blk-core.c:rq_init() | 76 | * if you modify this structure, be sure to check block/blk-core.c:blk_rq_init() |
77 | * as well! | 77 | * as well! |
78 | */ | 78 | */ |
79 | struct request { | 79 | struct request { |
@@ -260,8 +260,7 @@ struct queue_limits { | |||
260 | unsigned char discard_zeroes_data; | 260 | unsigned char discard_zeroes_data; |
261 | }; | 261 | }; |
262 | 262 | ||
263 | struct request_queue | 263 | struct request_queue { |
264 | { | ||
265 | /* | 264 | /* |
266 | * Together with queue_head for cacheline sharing | 265 | * Together with queue_head for cacheline sharing |
267 | */ | 266 | */ |
@@ -304,14 +303,14 @@ struct request_queue | |||
304 | void *queuedata; | 303 | void *queuedata; |
305 | 304 | ||
306 | /* | 305 | /* |
307 | * queue needs bounce pages for pages above this limit | 306 | * various queue flags, see QUEUE_* below |
308 | */ | 307 | */ |
309 | gfp_t bounce_gfp; | 308 | unsigned long queue_flags; |
310 | 309 | ||
311 | /* | 310 | /* |
312 | * various queue flags, see QUEUE_* below | 311 | * queue needs bounce pages for pages above this limit |
313 | */ | 312 | */ |
314 | unsigned long queue_flags; | 313 | gfp_t bounce_gfp; |
315 | 314 | ||
316 | /* | 315 | /* |
317 | * protects queue structures from reentrancy. ->__queue_lock should | 316 | * protects queue structures from reentrancy. ->__queue_lock should |
@@ -334,8 +333,8 @@ struct request_queue | |||
334 | unsigned int nr_congestion_off; | 333 | unsigned int nr_congestion_off; |
335 | unsigned int nr_batching; | 334 | unsigned int nr_batching; |
336 | 335 | ||
337 | void *dma_drain_buffer; | ||
338 | unsigned int dma_drain_size; | 336 | unsigned int dma_drain_size; |
337 | void *dma_drain_buffer; | ||
339 | unsigned int dma_pad_mask; | 338 | unsigned int dma_pad_mask; |
340 | unsigned int dma_alignment; | 339 | unsigned int dma_alignment; |
341 | 340 | ||
@@ -393,7 +392,7 @@ struct request_queue | |||
393 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ | 392 | #define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ |
394 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ | 393 | #define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ |
395 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ | 394 | #define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ |
396 | #define QUEUE_FLAG_SAME_COMP 9 /* force complete on same CPU */ | 395 | #define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ |
397 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ | 396 | #define QUEUE_FLAG_FAIL_IO 10 /* fake timeout */ |
398 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ | 397 | #define QUEUE_FLAG_STACKABLE 11 /* supports request stacking */ |
399 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ | 398 | #define QUEUE_FLAG_NONROT 12 /* non-rotational device (SSD) */ |
@@ -403,6 +402,7 @@ struct request_queue | |||
403 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ | 402 | #define QUEUE_FLAG_NOXMERGES 15 /* No extended merges */ |
404 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ | 403 | #define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ |
405 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ | 404 | #define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ |
405 | #define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ | ||
406 | 406 | ||
407 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ | 407 | #define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ |
408 | (1 << QUEUE_FLAG_STACKABLE) | \ | 408 | (1 << QUEUE_FLAG_STACKABLE) | \ |
@@ -857,12 +857,21 @@ struct request_queue *blk_alloc_queue(gfp_t); | |||
857 | struct request_queue *blk_alloc_queue_node(gfp_t, int); | 857 | struct request_queue *blk_alloc_queue_node(gfp_t, int); |
858 | extern void blk_put_queue(struct request_queue *); | 858 | extern void blk_put_queue(struct request_queue *); |
859 | 859 | ||
860 | /* | ||
861 | * Note: Code in between changing the blk_plug list/cb_list or element of such | ||
862 | * lists is preemptable, but such code can't do sleep (or be very careful), | ||
863 | * otherwise data is corrupted. For details, please check schedule() where | ||
864 | * blk_schedule_flush_plug() is called. | ||
865 | */ | ||
860 | struct blk_plug { | 866 | struct blk_plug { |
861 | unsigned long magic; | 867 | unsigned long magic; |
862 | struct list_head list; | 868 | struct list_head list; |
863 | struct list_head cb_list; | 869 | struct list_head cb_list; |
864 | unsigned int should_sort; | 870 | unsigned int should_sort; |
871 | unsigned int count; | ||
865 | }; | 872 | }; |
873 | #define BLK_MAX_REQUEST_COUNT 16 | ||
874 | |||
866 | struct blk_plug_cb { | 875 | struct blk_plug_cb { |
867 | struct list_head list; | 876 | struct list_head list; |
868 | void (*callback)(struct blk_plug_cb *); | 877 | void (*callback)(struct blk_plug_cb *); |