aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-30 11:52:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-30 11:52:42 -0400
commit0d167518e045cc8bb63f0a8a0a85ad4fa4e0044f (patch)
tree101a9b5d425d79f663e4f25f1e90b7a8cc6604f1 /include/linux/blkdev.h
parent2f83766d4b18774c856329a8fca4c9338dfeda39 (diff)
parentff26eaadf4d914e397872b99885d45756104e9ae (diff)
Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block
Merge block/IO core bits from Jens Axboe: "This is a bit bigger on the core side than usual, but that is purely because we decided to hold off on parts of Tejun's submission on 3.4 to give it a bit more time to simmer. As a consequence, it's seen a long cycle in for-next. It contains: - Bug fix from Dan, wrong locking type. - Relax splice gifting restriction from Eric. - A ton of updates from Tejun, primarily for blkcg. This improves the code a lot, making the API nicer and cleaner, and also includes fixes for how we handle and tie policies and re-activate on switches. The changes also include generic bug fixes. - A simple fix from Vivek, along with a fix for doing proper delayed allocation of the blkcg stats." Fix up annoying conflict just due to different merge resolution in Documentation/feature-removal-schedule.txt * 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits) blkcg: tg_stats_alloc_lock is an irq lock vmsplice: relax alignement requirements for SPLICE_F_GIFT blkcg: use radix tree to index blkgs from blkcg blkcg: fix blkcg->css ref leak in __blkg_lookup_create() block: fix elvpriv allocation failure handling block: collapse blk_alloc_request() into get_request() blkcg: collapse blkcg_policy_ops into blkcg_policy blkcg: embed struct blkg_policy_data in policy specific data blkcg: mass rename of blkcg API blkcg: style cleanups for blk-cgroup.h blkcg: remove blkio_group->path[] blkcg: blkg_rwstat_read() was missing inline blkcg: shoot down blkgs if all policies are deactivated blkcg: drop stuff unused after per-queue policy activation update blkcg: implement per-queue policy activation blkcg: add request_queue->root_blkg blkcg: make request_queue bypassing on allocation blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing blkcg: make blkg_conf_prep() take @pol and return with queue lock held blkcg: remove static policy ID enums ...
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h20
1 files changed, 19 insertions, 1 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 4d4ac24a263e..ba43f408baa3 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -32,10 +32,17 @@ struct blk_trace;
32struct request; 32struct request;
33struct sg_io_hdr; 33struct sg_io_hdr;
34struct bsg_job; 34struct bsg_job;
35struct blkcg_gq;
35 36
36#define BLKDEV_MIN_RQ 4 37#define BLKDEV_MIN_RQ 4
37#define BLKDEV_MAX_RQ 128 /* Default maximum */ 38#define BLKDEV_MAX_RQ 128 /* Default maximum */
38 39
40/*
41 * Maximum number of blkcg policies allowed to be registered concurrently.
42 * Defined here to simplify include dependency.
43 */
44#define BLKCG_MAX_POLS 2
45
39struct request; 46struct request;
40typedef void (rq_end_io_fn)(struct request *, int); 47typedef void (rq_end_io_fn)(struct request *, int);
41 48
@@ -363,6 +370,11 @@ struct request_queue {
363 struct list_head timeout_list; 370 struct list_head timeout_list;
364 371
365 struct list_head icq_list; 372 struct list_head icq_list;
373#ifdef CONFIG_BLK_CGROUP
374 DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
375 struct blkcg_gq *root_blkg;
376 struct list_head blkg_list;
377#endif
366 378
367 struct queue_limits limits; 379 struct queue_limits limits;
368 380
@@ -390,12 +402,17 @@ struct request_queue {
390 402
391 struct mutex sysfs_lock; 403 struct mutex sysfs_lock;
392 404
405 int bypass_depth;
406
393#if defined(CONFIG_BLK_DEV_BSG) 407#if defined(CONFIG_BLK_DEV_BSG)
394 bsg_job_fn *bsg_job_fn; 408 bsg_job_fn *bsg_job_fn;
395 int bsg_job_size; 409 int bsg_job_size;
396 struct bsg_class_device bsg_dev; 410 struct bsg_class_device bsg_dev;
397#endif 411#endif
398 412
413#ifdef CONFIG_BLK_CGROUP
414 struct list_head all_q_node;
415#endif
399#ifdef CONFIG_BLK_DEV_THROTTLING 416#ifdef CONFIG_BLK_DEV_THROTTLING
400 /* Throttle data */ 417 /* Throttle data */
401 struct throtl_data *td; 418 struct throtl_data *td;
@@ -407,7 +424,7 @@ struct request_queue {
407#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */ 424#define QUEUE_FLAG_SYNCFULL 3 /* read queue has been filled */
408#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */ 425#define QUEUE_FLAG_ASYNCFULL 4 /* write queue has been filled */
409#define QUEUE_FLAG_DEAD 5 /* queue being torn down */ 426#define QUEUE_FLAG_DEAD 5 /* queue being torn down */
410#define QUEUE_FLAG_ELVSWITCH 6 /* don't use elevator, just do FIFO */ 427#define QUEUE_FLAG_BYPASS 6 /* act as dumb FIFO queue */
411#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */ 428#define QUEUE_FLAG_BIDI 7 /* queue supports bidi requests */
412#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */ 429#define QUEUE_FLAG_NOMERGES 8 /* disable merge attempts */
413#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */ 430#define QUEUE_FLAG_SAME_COMP 9 /* complete on same CPU-group */
@@ -491,6 +508,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
491#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 508#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
492#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 509#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
493#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags) 510#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
511#define blk_queue_bypass(q) test_bit(QUEUE_FLAG_BYPASS, &(q)->queue_flags)
494#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 512#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
495#define blk_queue_noxmerges(q) \ 513#define blk_queue_noxmerges(q) \
496 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 514 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)