aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/linux/bio.h66
-rw-r--r--include/linux/blkdev.h101
-rw-r--r--include/linux/elevator.h41
-rw-r--r--include/linux/fs.h1
-rw-r--r--include/linux/iocontext.h136
5 files changed, 252 insertions, 93 deletions
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 847994aef0e9..129a9c097958 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -515,24 +515,64 @@ extern void bio_integrity_init(void);
515 515
516#else /* CONFIG_BLK_DEV_INTEGRITY */ 516#else /* CONFIG_BLK_DEV_INTEGRITY */
517 517
518#define bio_integrity(a) (0) 518static inline int bio_integrity(struct bio *bio)
519#define bioset_integrity_create(a, b) (0) 519{
520#define bio_integrity_prep(a) (0) 520 return 0;
521#define bio_integrity_enabled(a) (0) 521}
522
523static inline int bio_integrity_enabled(struct bio *bio)
524{
525 return 0;
526}
527
528static inline int bioset_integrity_create(struct bio_set *bs, int pool_size)
529{
530 return 0;
531}
532
533static inline void bioset_integrity_free (struct bio_set *bs)
534{
535 return;
536}
537
538static inline int bio_integrity_prep(struct bio *bio)
539{
540 return 0;
541}
542
543static inline void bio_integrity_free(struct bio *bio, struct bio_set *bs)
544{
545 return;
546}
547
522static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src, 548static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
523 gfp_t gfp_mask, struct bio_set *bs) 549 gfp_t gfp_mask, struct bio_set *bs)
524{ 550{
525 return 0; 551 return 0;
526} 552}
527#define bioset_integrity_free(a) do { } while (0) 553
528#define bio_integrity_free(a, b) do { } while (0) 554static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
529#define bio_integrity_endio(a, b) do { } while (0) 555 int sectors)
530#define bio_integrity_advance(a, b) do { } while (0) 556{
531#define bio_integrity_trim(a, b, c) do { } while (0) 557 return;
532#define bio_integrity_split(a, b, c) do { } while (0) 558}
533#define bio_integrity_set_tag(a, b, c) do { } while (0) 559
534#define bio_integrity_get_tag(a, b, c) do { } while (0) 560static inline void bio_integrity_advance(struct bio *bio,
535#define bio_integrity_init(a) do { } while (0) 561 unsigned int bytes_done)
562{
563 return;
564}
565
566static inline void bio_integrity_trim(struct bio *bio, unsigned int offset,
567 unsigned int sectors)
568{
569 return;
570}
571
572static inline void bio_integrity_init(void)
573{
574 return;
575}
536 576
537#endif /* CONFIG_BLK_DEV_INTEGRITY */ 577#endif /* CONFIG_BLK_DEV_INTEGRITY */
538 578
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0ed1eb062313..6c6a1f008065 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -111,10 +111,14 @@ struct request {
111 * Three pointers are available for the IO schedulers, if they need 111 * Three pointers are available for the IO schedulers, if they need
112 * more they have to dynamically allocate it. Flush requests are 112 * more they have to dynamically allocate it. Flush requests are
113 * never put on the IO scheduler. So let the flush fields share 113 * never put on the IO scheduler. So let the flush fields share
114 * space with the three elevator_private pointers. 114 * space with the elevator data.
115 */ 115 */
116 union { 116 union {
117 void *elevator_private[3]; 117 struct {
118 struct io_cq *icq;
119 void *priv[2];
120 } elv;
121
118 struct { 122 struct {
119 unsigned int seq; 123 unsigned int seq;
120 struct list_head list; 124 struct list_head list;
@@ -311,6 +315,12 @@ struct request_queue {
311 unsigned long queue_flags; 315 unsigned long queue_flags;
312 316
313 /* 317 /*
318 * ida allocated id for this queue. Used to index queues from
319 * ioctx.
320 */
321 int id;
322
323 /*
314 * queue needs bounce pages for pages above this limit 324 * queue needs bounce pages for pages above this limit
315 */ 325 */
316 gfp_t bounce_gfp; 326 gfp_t bounce_gfp;
@@ -351,6 +361,8 @@ struct request_queue {
351 struct timer_list timeout; 361 struct timer_list timeout;
352 struct list_head timeout_list; 362 struct list_head timeout_list;
353 363
364 struct list_head icq_list;
365
354 struct queue_limits limits; 366 struct queue_limits limits;
355 367
356 /* 368 /*
@@ -387,6 +399,9 @@ struct request_queue {
387 /* Throttle data */ 399 /* Throttle data */
388 struct throtl_data *td; 400 struct throtl_data *td;
389#endif 401#endif
402#ifdef CONFIG_LOCKDEP
403 int ioc_release_depth;
404#endif
390}; 405};
391 406
392#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */ 407#define QUEUE_FLAG_QUEUED 1 /* uses generic tag queueing */
@@ -481,6 +496,7 @@ static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
481 496
482#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags) 497#define blk_queue_tagged(q) test_bit(QUEUE_FLAG_QUEUED, &(q)->queue_flags)
483#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags) 498#define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
499#define blk_queue_dead(q) test_bit(QUEUE_FLAG_DEAD, &(q)->queue_flags)
484#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags) 500#define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
485#define blk_queue_noxmerges(q) \ 501#define blk_queue_noxmerges(q) \
486 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags) 502 test_bit(QUEUE_FLAG_NOXMERGES, &(q)->queue_flags)
@@ -660,7 +676,6 @@ extern void __blk_put_request(struct request_queue *, struct request *);
660extern struct request *blk_get_request(struct request_queue *, int, gfp_t); 676extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
661extern struct request *blk_make_request(struct request_queue *, struct bio *, 677extern struct request *blk_make_request(struct request_queue *, struct bio *,
662 gfp_t); 678 gfp_t);
663extern void blk_insert_request(struct request_queue *, struct request *, int, void *);
664extern void blk_requeue_request(struct request_queue *, struct request *); 679extern void blk_requeue_request(struct request_queue *, struct request *);
665extern void blk_add_request_payload(struct request *rq, struct page *page, 680extern void blk_add_request_payload(struct request *rq, struct page *page,
666 unsigned int len); 681 unsigned int len);
@@ -829,6 +844,7 @@ extern void blk_queue_io_min(struct request_queue *q, unsigned int min);
829extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt); 844extern void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt);
830extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); 845extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
831extern void blk_set_default_limits(struct queue_limits *lim); 846extern void blk_set_default_limits(struct queue_limits *lim);
847extern void blk_set_stacking_limits(struct queue_limits *lim);
832extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, 848extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
833 sector_t offset); 849 sector_t offset);
834extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev, 850extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
@@ -859,7 +875,7 @@ extern int blk_rq_map_sg(struct request_queue *, struct request *, struct scatte
859extern void blk_dump_rq_flags(struct request *, char *); 875extern void blk_dump_rq_flags(struct request *, char *);
860extern long nr_blockdev_pages(void); 876extern long nr_blockdev_pages(void);
861 877
862int blk_get_queue(struct request_queue *); 878bool __must_check blk_get_queue(struct request_queue *);
863struct request_queue *blk_alloc_queue(gfp_t); 879struct request_queue *blk_alloc_queue(gfp_t);
864struct request_queue *blk_alloc_queue_node(gfp_t, int); 880struct request_queue *blk_alloc_queue_node(gfp_t, int);
865extern void blk_put_queue(struct request_queue *); 881extern void blk_put_queue(struct request_queue *);
@@ -1282,19 +1298,70 @@ queue_max_integrity_segments(struct request_queue *q)
1282 1298
1283#else /* CONFIG_BLK_DEV_INTEGRITY */ 1299#else /* CONFIG_BLK_DEV_INTEGRITY */
1284 1300
1285#define blk_integrity_rq(rq) (0) 1301struct bio;
1286#define blk_rq_count_integrity_sg(a, b) (0) 1302struct block_device;
1287#define blk_rq_map_integrity_sg(a, b, c) (0) 1303struct gendisk;
1288#define bdev_get_integrity(a) (0) 1304struct blk_integrity;
1289#define blk_get_integrity(a) (0) 1305
1290#define blk_integrity_compare(a, b) (0) 1306static inline int blk_integrity_rq(struct request *rq)
1291#define blk_integrity_register(a, b) (0) 1307{
1292#define blk_integrity_unregister(a) do { } while (0) 1308 return 0;
1293#define blk_queue_max_integrity_segments(a, b) do { } while (0) 1309}
1294#define queue_max_integrity_segments(a) (0) 1310static inline int blk_rq_count_integrity_sg(struct request_queue *q,
1295#define blk_integrity_merge_rq(a, b, c) (0) 1311 struct bio *b)
1296#define blk_integrity_merge_bio(a, b, c) (0) 1312{
1297#define blk_integrity_is_initialized(a) (0) 1313 return 0;
1314}
1315static inline int blk_rq_map_integrity_sg(struct request_queue *q,
1316 struct bio *b,
1317 struct scatterlist *s)
1318{
1319 return 0;
1320}
1321static inline struct blk_integrity *bdev_get_integrity(struct block_device *b)
1322{
1323 return 0;
1324}
1325static inline struct blk_integrity *blk_get_integrity(struct gendisk *disk)
1326{
1327 return NULL;
1328}
1329static inline int blk_integrity_compare(struct gendisk *a, struct gendisk *b)
1330{
1331 return 0;
1332}
1333static inline int blk_integrity_register(struct gendisk *d,
1334 struct blk_integrity *b)
1335{
1336 return 0;
1337}
1338static inline void blk_integrity_unregister(struct gendisk *d)
1339{
1340}
1341static inline void blk_queue_max_integrity_segments(struct request_queue *q,
1342 unsigned int segs)
1343{
1344}
1345static inline unsigned short queue_max_integrity_segments(struct request_queue *q)
1346{
1347 return 0;
1348}
1349static inline int blk_integrity_merge_rq(struct request_queue *rq,
1350 struct request *r1,
1351 struct request *r2)
1352{
1353 return 0;
1354}
1355static inline int blk_integrity_merge_bio(struct request_queue *rq,
1356 struct request *r,
1357 struct bio *b)
1358{
1359 return 0;
1360}
1361static inline bool blk_integrity_is_initialized(struct gendisk *g)
1362{
1363 return 0;
1364}
1298 1365
1299#endif /* CONFIG_BLK_DEV_INTEGRITY */ 1366#endif /* CONFIG_BLK_DEV_INTEGRITY */
1300 1367
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 1d0f7a2ff73b..c24f3d7fbf1e 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -5,6 +5,8 @@
5 5
6#ifdef CONFIG_BLOCK 6#ifdef CONFIG_BLOCK
7 7
8struct io_cq;
9
8typedef int (elevator_merge_fn) (struct request_queue *, struct request **, 10typedef int (elevator_merge_fn) (struct request_queue *, struct request **,
9 struct bio *); 11 struct bio *);
10 12
@@ -24,6 +26,8 @@ typedef struct request *(elevator_request_list_fn) (struct request_queue *, stru
24typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *); 26typedef void (elevator_completed_req_fn) (struct request_queue *, struct request *);
25typedef int (elevator_may_queue_fn) (struct request_queue *, int); 27typedef int (elevator_may_queue_fn) (struct request_queue *, int);
26 28
29typedef void (elevator_init_icq_fn) (struct io_cq *);
30typedef void (elevator_exit_icq_fn) (struct io_cq *);
27typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t); 31typedef int (elevator_set_req_fn) (struct request_queue *, struct request *, gfp_t);
28typedef void (elevator_put_req_fn) (struct request *); 32typedef void (elevator_put_req_fn) (struct request *);
29typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *); 33typedef void (elevator_activate_req_fn) (struct request_queue *, struct request *);
@@ -56,6 +60,9 @@ struct elevator_ops
56 elevator_request_list_fn *elevator_former_req_fn; 60 elevator_request_list_fn *elevator_former_req_fn;
57 elevator_request_list_fn *elevator_latter_req_fn; 61 elevator_request_list_fn *elevator_latter_req_fn;
58 62
63 elevator_init_icq_fn *elevator_init_icq_fn; /* see iocontext.h */
64 elevator_exit_icq_fn *elevator_exit_icq_fn; /* ditto */
65
59 elevator_set_req_fn *elevator_set_req_fn; 66 elevator_set_req_fn *elevator_set_req_fn;
60 elevator_put_req_fn *elevator_put_req_fn; 67 elevator_put_req_fn *elevator_put_req_fn;
61 68
@@ -63,7 +70,6 @@ struct elevator_ops
63 70
64 elevator_init_fn *elevator_init_fn; 71 elevator_init_fn *elevator_init_fn;
65 elevator_exit_fn *elevator_exit_fn; 72 elevator_exit_fn *elevator_exit_fn;
66 void (*trim)(struct io_context *);
67}; 73};
68 74
69#define ELV_NAME_MAX (16) 75#define ELV_NAME_MAX (16)
@@ -79,11 +85,20 @@ struct elv_fs_entry {
79 */ 85 */
80struct elevator_type 86struct elevator_type
81{ 87{
82 struct list_head list; 88 /* managed by elevator core */
89 struct kmem_cache *icq_cache;
90
91 /* fields provided by elevator implementation */
83 struct elevator_ops ops; 92 struct elevator_ops ops;
93 size_t icq_size; /* see iocontext.h */
94 size_t icq_align; /* ditto */
84 struct elv_fs_entry *elevator_attrs; 95 struct elv_fs_entry *elevator_attrs;
85 char elevator_name[ELV_NAME_MAX]; 96 char elevator_name[ELV_NAME_MAX];
86 struct module *elevator_owner; 97 struct module *elevator_owner;
98
99 /* managed by elevator core */
100 char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
101 struct list_head list;
87}; 102};
88 103
89/* 104/*
@@ -91,10 +106,9 @@ struct elevator_type
91 */ 106 */
92struct elevator_queue 107struct elevator_queue
93{ 108{
94 struct elevator_ops *ops; 109 struct elevator_type *type;
95 void *elevator_data; 110 void *elevator_data;
96 struct kobject kobj; 111 struct kobject kobj;
97 struct elevator_type *elevator_type;
98 struct mutex sysfs_lock; 112 struct mutex sysfs_lock;
99 struct hlist_head *hash; 113 struct hlist_head *hash;
100 unsigned int registered:1; 114 unsigned int registered:1;
@@ -129,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
129/* 143/*
130 * io scheduler registration 144 * io scheduler registration
131 */ 145 */
132extern void elv_register(struct elevator_type *); 146extern int elv_register(struct elevator_type *);
133extern void elv_unregister(struct elevator_type *); 147extern void elv_unregister(struct elevator_type *);
134 148
135/* 149/*
@@ -197,22 +211,5 @@ enum {
197 INIT_LIST_HEAD(&(rq)->csd.list); \ 211 INIT_LIST_HEAD(&(rq)->csd.list); \
198 } while (0) 212 } while (0)
199 213
200/*
201 * io context count accounting
202 */
203#define elv_ioc_count_mod(name, __val) this_cpu_add(name, __val)
204#define elv_ioc_count_inc(name) this_cpu_inc(name)
205#define elv_ioc_count_dec(name) this_cpu_dec(name)
206
207#define elv_ioc_count_read(name) \
208({ \
209 unsigned long __val = 0; \
210 int __cpu; \
211 smp_wmb(); \
212 for_each_possible_cpu(__cpu) \
213 __val += per_cpu(name, __cpu); \
214 __val; \
215})
216
217#endif /* CONFIG_BLOCK */ 214#endif /* CONFIG_BLOCK */
218#endif 215#endif
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4bc8169fb5a1..0244082d42c5 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -319,6 +319,7 @@ struct inodes_stat_t {
319#define BLKPBSZGET _IO(0x12,123) 319#define BLKPBSZGET _IO(0x12,123)
320#define BLKDISCARDZEROES _IO(0x12,124) 320#define BLKDISCARDZEROES _IO(0x12,124)
321#define BLKSECDISCARD _IO(0x12,125) 321#define BLKSECDISCARD _IO(0x12,125)
322#define BLKROTATIONAL _IO(0x12,126)
322 323
323#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */ 324#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
324#define FIBMAP _IO(0x00,1) /* bmap access */ 325#define FIBMAP _IO(0x00,1) /* bmap access */
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 5037a0ad2312..7e1371c4bccf 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -3,32 +3,92 @@
3 3
4#include <linux/radix-tree.h> 4#include <linux/radix-tree.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/workqueue.h>
6 7
7struct cfq_queue; 8enum {
8struct cfq_ttime { 9 ICQ_IOPRIO_CHANGED,
9 unsigned long last_end_request; 10 ICQ_CGROUP_CHANGED,
10
11 unsigned long ttime_total;
12 unsigned long ttime_samples;
13 unsigned long ttime_mean;
14}; 11};
15 12
16struct cfq_io_context { 13/*
17 void *key; 14 * An io_cq (icq) is association between an io_context (ioc) and a
18 15 * request_queue (q). This is used by elevators which need to track
19 struct cfq_queue *cfqq[2]; 16 * information per ioc - q pair.
20 17 *
21 struct io_context *ioc; 18 * Elevator can request use of icq by setting elevator_type->icq_size and
22 19 * ->icq_align. Both size and align must be larger than that of struct
23 struct cfq_ttime ttime; 20 * io_cq and elevator can use the tail area for private information. The
24 21 * recommended way to do this is defining a struct which contains io_cq as
25 struct list_head queue_list; 22 * the first member followed by private members and using its size and
26 struct hlist_node cic_list; 23 * align. For example,
27 24 *
28 void (*dtor)(struct io_context *); /* destructor */ 25 * struct snail_io_cq {
29 void (*exit)(struct io_context *); /* called on task exit */ 26 * struct io_cq icq;
27 * int poke_snail;
28 * int feed_snail;
29 * };
30 *
31 * struct elevator_type snail_elv_type {
32 * .ops = { ... },
33 * .icq_size = sizeof(struct snail_io_cq),
34 * .icq_align = __alignof__(struct snail_io_cq),
35 * ...
36 * };
37 *
38 * If icq_size is set, block core will manage icq's. All requests will
39 * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
40 * is called and be holding a reference to the associated io_context.
41 *
42 * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
43 * called and, on destruction, ->elevator_exit_icq_fn(). Both functions
44 * are called with both the associated io_context and queue locks held.
45 *
46 * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
47 * queue lock but the returned icq is valid only until the queue lock is
48 * released. Elevators can not and should not try to create or destroy
49 * icq's.
50 *
51 * As icq's are linked from both ioc and q, the locking rules are a bit
52 * complex.
53 *
54 * - ioc lock nests inside q lock.
55 *
56 * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
57 * q->icq_list and icq->q_node by q lock.
58 *
59 * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
60 * itself is protected by q lock. However, both the indexes and icq
61 * itself are also RCU managed and lookup can be performed holding only
62 * the q lock.
63 *
64 * - icq's are not reference counted. They are destroyed when either the
65 * ioc or q goes away. Each request with icq set holds an extra
66 * reference to ioc to ensure it stays until the request is completed.
67 *
68 * - Linking and unlinking icq's are performed while holding both ioc and q
69 * locks. Due to the lock ordering, q exit is simple but ioc exit
70 * requires reverse-order double lock dance.
71 */
72struct io_cq {
73 struct request_queue *q;
74 struct io_context *ioc;
30 75
31 struct rcu_head rcu_head; 76 /*
77 * q_node and ioc_node link io_cq through icq_list of q and ioc
78 * respectively. Both fields are unused once ioc_exit_icq() is
79 * called and shared with __rcu_icq_cache and __rcu_head which are
80 * used for RCU free of io_cq.
81 */
82 union {
83 struct list_head q_node;
84 struct kmem_cache *__rcu_icq_cache;
85 };
86 union {
87 struct hlist_node ioc_node;
88 struct rcu_head __rcu_head;
89 };
90
91 unsigned long changed;
32}; 92};
33 93
34/* 94/*
@@ -43,11 +103,6 @@ struct io_context {
43 spinlock_t lock; 103 spinlock_t lock;
44 104
45 unsigned short ioprio; 105 unsigned short ioprio;
46 unsigned short ioprio_changed;
47
48#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
49 unsigned short cgroup_changed;
50#endif
51 106
52 /* 107 /*
53 * For request batching 108 * For request batching
@@ -55,9 +110,11 @@ struct io_context {
55 int nr_batch_requests; /* Number of requests left in the batch */ 110 int nr_batch_requests; /* Number of requests left in the batch */
56 unsigned long last_waited; /* Time last woken after wait for request */ 111 unsigned long last_waited; /* Time last woken after wait for request */
57 112
58 struct radix_tree_root radix_root; 113 struct radix_tree_root icq_tree;
59 struct hlist_head cic_list; 114 struct io_cq __rcu *icq_hint;
60 void __rcu *ioc_data; 115 struct hlist_head icq_list;
116
117 struct work_struct release_work;
61}; 118};
62 119
63static inline struct io_context *ioc_task_link(struct io_context *ioc) 120static inline struct io_context *ioc_task_link(struct io_context *ioc)
@@ -76,20 +133,17 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
76 133
77struct task_struct; 134struct task_struct;
78#ifdef CONFIG_BLOCK 135#ifdef CONFIG_BLOCK
79int put_io_context(struct io_context *ioc); 136void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
80void exit_io_context(struct task_struct *task); 137void exit_io_context(struct task_struct *task);
81struct io_context *get_io_context(gfp_t gfp_flags, int node); 138struct io_context *get_task_io_context(struct task_struct *task,
82struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 139 gfp_t gfp_flags, int node);
140void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc);
83#else 142#else
84static inline void exit_io_context(struct task_struct *task)
85{
86}
87
88struct io_context; 143struct io_context;
89static inline int put_io_context(struct io_context *ioc) 144static inline void put_io_context(struct io_context *ioc,
90{ 145 struct request_queue *locked_q) { }
91 return 1; 146static inline void exit_io_context(struct task_struct *task) { }
92}
93#endif 147#endif
94 148
95#endif 149#endif