aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorDmitry Torokhov <dmitry.torokhov@gmail.com>2013-03-17 22:40:50 -0400
committerDmitry Torokhov <dmitry.torokhov@gmail.com>2013-03-17 22:40:50 -0400
commit688d794c4c3f8b08c814381ee2edd3ede5856056 (patch)
treeef680add71e2a9588d07d8b594edbc1b5cd127d7 /include/linux/blkdev.h
parent16142655269aaf580488e074eabfdcf0fb4e3687 (diff)
parenta937536b868b8369b98967929045f1df54234323 (diff)
Merge tag 'v3.9-rc3' into next
Merge with mainline to bring in module_platform_driver_probe() and devm_ioremap_resource().
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h31
1 files changed, 26 insertions, 5 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1756001210d2..78feda9bbae2 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -19,6 +19,7 @@
19#include <linux/gfp.h> 19#include <linux/gfp.h>
20#include <linux/bsg.h> 20#include <linux/bsg.h>
21#include <linux/smp.h> 21#include <linux/smp.h>
22#include <linux/rcupdate.h>
22 23
23#include <asm/scatterlist.h> 24#include <asm/scatterlist.h>
24 25
@@ -378,6 +379,12 @@ struct request_queue {
378 379
379 unsigned int nr_sorted; 380 unsigned int nr_sorted;
380 unsigned int in_flight[2]; 381 unsigned int in_flight[2];
382 /*
383 * Number of active block driver functions for which blk_drain_queue()
384 * must wait. Must be incremented around functions that unlock the
385 * queue_lock internally, e.g. scsi_request_fn().
386 */
387 unsigned int request_fn_active;
381 388
382 unsigned int rq_timeout; 389 unsigned int rq_timeout;
383 struct timer_list timeout; 390 struct timer_list timeout;
@@ -431,13 +438,14 @@ struct request_queue {
431 /* Throttle data */ 438 /* Throttle data */
432 struct throtl_data *td; 439 struct throtl_data *td;
433#endif 440#endif
441 struct rcu_head rcu_head;
434}; 442};
435 443
436#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 444#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
437#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */ 445#define QUEUE_FLAG_STOPPED 2 /* queue is stopped */
438#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 446#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
439#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 447#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
440#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 448#define QUEUE_FLAG_DYING 5 /* queue being torn down */
441#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */ 449#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
442#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 450#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
443#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 451#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
@@ -452,6 +460,7 @@ struct request_queue {
452#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */ 460#define QUEUE_FLAG_ADD_RANDOM 16 /* Contributes to random pool */
453#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */ 461#define QUEUE_FLAG_SECDISCARD 17 /* supports SECDISCARD */
454#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */ 462#define QUEUE_FLAG_SAME_FORCE 18 /* force complete on same CPU */
463#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
455 464
456#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 465#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
457 (1 << QUEUE_FLAG_STACKABLE) | \ 466 (1 << QUEUE_FLAG_STACKABLE) | \
@@ -521,6 +530,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
521 530
522#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 531#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
523#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 532#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
533#define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
524#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 534#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
525#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags) 535#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
526#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 536#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
@@ -966,7 +976,6 @@ struct blk_plug {
966 unsigned long magic; /* detect uninitialized use-cases */ 976 unsigned long magic; /* detect uninitialized use-cases */
967 struct list_head list; /* requests */ 977 struct list_head list; /* requests */
968 struct list_head cb_list; /* md requires an unplug callback */ 978 struct list_head cb_list; /* md requires an unplug callback */
969 unsigned int should_sort; /* list to be sorted before flushing? */
970}; 979};
971#define BLK_MAX_REQUEST_COUNT 16 980#define BLK_MAX_REQUEST_COUNT 16
972 981
@@ -1180,13 +1189,25 @@ static inline int queue_discard_alignment(struct request_queue *q)
1180 1189
1181static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector) 1190static inline int queue_limit_discard_alignment(struct queue_limits *lim, sector_t sector)
1182{ 1191{
1183 unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1); 1192 unsigned int alignment, granularity, offset;
1184 1193
1185 if (!lim->max_discard_sectors) 1194 if (!lim->max_discard_sectors)
1186 return 0; 1195 return 0;
1187 1196
1188 return (lim->discard_granularity + lim->discard_alignment - alignment) 1197 /* Why are these in bytes, not sectors? */
1189 & (lim->discard_granularity - 1); 1198 alignment = lim->discard_alignment >> 9;
1199 granularity = lim->discard_granularity >> 9;
1200 if (!granularity)
1201 return 0;
1202
1203 /* Offset of the partition start in 'granularity' sectors */
1204 offset = sector_div(sector, granularity);
1205
1206 /* And why do we do this modulus *again* in blkdev_issue_discard()? */
1207 offset = (granularity + alignment - offset) % granularity;
1208
1209 /* Turn it back into bytes, gaah */
1210 return offset << 9;
1190} 1211}
1191 1212
1192static inline int bdev_discard_alignment(struct block_device *bdev) 1213static inline int bdev_discard_alignment(struct block_device *bdev)