aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/iocontext.h
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux/iocontext.h')
-rw-r--r--include/linux/iocontext.h136
1 files changed, 95 insertions, 41 deletions
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 5037a0ad2312..7e1371c4bccf 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -3,32 +3,92 @@
3 3
4#include <linux/radix-tree.h> 4#include <linux/radix-tree.h>
5#include <linux/rcupdate.h> 5#include <linux/rcupdate.h>
6#include <linux/workqueue.h>
6 7
7struct cfq_queue; 8enum {
8struct cfq_ttime { 9 ICQ_IOPRIO_CHANGED,
9 unsigned long last_end_request; 10 ICQ_CGROUP_CHANGED,
10
11 unsigned long ttime_total;
12 unsigned long ttime_samples;
13 unsigned long ttime_mean;
14}; 11};
15 12
16struct cfq_io_context { 13/*
17 void *key; 14 * An io_cq (icq) is association between an io_context (ioc) and a
18 15 * request_queue (q). This is used by elevators which need to track
19 struct cfq_queue *cfqq[2]; 16 * information per ioc - q pair.
20 17 *
21 struct io_context *ioc; 18 * Elevator can request use of icq by setting elevator_type->icq_size and
22 19 * ->icq_align. Both size and align must be larger than that of struct
23 struct cfq_ttime ttime; 20 * io_cq and elevator can use the tail area for private information. The
24 21 * recommended way to do this is defining a struct which contains io_cq as
25 struct list_head queue_list; 22 * the first member followed by private members and using its size and
26 struct hlist_node cic_list; 23 * align. For example,
27 24 *
28 void (*dtor)(struct io_context *); /* destructor */ 25 * struct snail_io_cq {
29 void (*exit)(struct io_context *); /* called on task exit */ 26 * struct io_cq icq;
27 * int poke_snail;
28 * int feed_snail;
29 * };
30 *
31 * struct elevator_type snail_elv_type {
32 * .ops = { ... },
33 * .icq_size = sizeof(struct snail_io_cq),
34 * .icq_align = __alignof__(struct snail_io_cq),
35 * ...
36 * };
37 *
38 * If icq_size is set, block core will manage icq's. All requests will
39 * have its ->elv.icq field set before elevator_ops->elevator_set_req_fn()
40 * is called and be holding a reference to the associated io_context.
41 *
42 * Whenever a new icq is created, elevator_ops->elevator_init_icq_fn() is
43 * called and, on destruction, ->elevator_exit_icq_fn(). Both functions
44 * are called with both the associated io_context and queue locks held.
45 *
46 * Elevator is allowed to lookup icq using ioc_lookup_icq() while holding
47 * queue lock but the returned icq is valid only until the queue lock is
48 * released. Elevators can not and should not try to create or destroy
49 * icq's.
50 *
51 * As icq's are linked from both ioc and q, the locking rules are a bit
52 * complex.
53 *
54 * - ioc lock nests inside q lock.
55 *
56 * - ioc->icq_list and icq->ioc_node are protected by ioc lock.
57 * q->icq_list and icq->q_node by q lock.
58 *
59 * - ioc->icq_tree and ioc->icq_hint are protected by ioc lock, while icq
60 * itself is protected by q lock. However, both the indexes and icq
61 * itself are also RCU managed and lookup can be performed holding only
62 * the q lock.
63 *
64 * - icq's are not reference counted. They are destroyed when either the
65 * ioc or q goes away. Each request with icq set holds an extra
66 * reference to ioc to ensure it stays until the request is completed.
67 *
68 * - Linking and unlinking icq's are performed while holding both ioc and q
69 * locks. Due to the lock ordering, q exit is simple but ioc exit
70 * requires reverse-order double lock dance.
71 */
72struct io_cq {
73 struct request_queue *q;
74 struct io_context *ioc;
30 75
31 struct rcu_head rcu_head; 76 /*
77 * q_node and ioc_node link io_cq through icq_list of q and ioc
78 * respectively. Both fields are unused once ioc_exit_icq() is
79 * called and shared with __rcu_icq_cache and __rcu_head which are
80 * used for RCU free of io_cq.
81 */
82 union {
83 struct list_head q_node;
84 struct kmem_cache *__rcu_icq_cache;
85 };
86 union {
87 struct hlist_node ioc_node;
88 struct rcu_head __rcu_head;
89 };
90
91 unsigned long changed;
32}; 92};
33 93
34/* 94/*
@@ -43,11 +103,6 @@ struct io_context {
43 spinlock_t lock; 103 spinlock_t lock;
44 104
45 unsigned short ioprio; 105 unsigned short ioprio;
46 unsigned short ioprio_changed;
47
48#if defined(CONFIG_BLK_CGROUP) || defined(CONFIG_BLK_CGROUP_MODULE)
49 unsigned short cgroup_changed;
50#endif
51 106
52 /* 107 /*
53 * For request batching 108 * For request batching
@@ -55,9 +110,11 @@ struct io_context {
55 int nr_batch_requests; /* Number of requests left in the batch */ 110 int nr_batch_requests; /* Number of requests left in the batch */
56 unsigned long last_waited; /* Time last woken after wait for request */ 111 unsigned long last_waited; /* Time last woken after wait for request */
57 112
58 struct radix_tree_root radix_root; 113 struct radix_tree_root icq_tree;
59 struct hlist_head cic_list; 114 struct io_cq __rcu *icq_hint;
60 void __rcu *ioc_data; 115 struct hlist_head icq_list;
116
117 struct work_struct release_work;
61}; 118};
62 119
63static inline struct io_context *ioc_task_link(struct io_context *ioc) 120static inline struct io_context *ioc_task_link(struct io_context *ioc)
@@ -76,20 +133,17 @@ static inline struct io_context *ioc_task_link(struct io_context *ioc)
76 133
77struct task_struct; 134struct task_struct;
78#ifdef CONFIG_BLOCK 135#ifdef CONFIG_BLOCK
79int put_io_context(struct io_context *ioc); 136void put_io_context(struct io_context *ioc, struct request_queue *locked_q);
80void exit_io_context(struct task_struct *task); 137void exit_io_context(struct task_struct *task);
81struct io_context *get_io_context(gfp_t gfp_flags, int node); 138struct io_context *get_task_io_context(struct task_struct *task,
82struct io_context *alloc_io_context(gfp_t gfp_flags, int node); 139 gfp_t gfp_flags, int node);
140void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
141void ioc_cgroup_changed(struct io_context *ioc);
83#else 142#else
84static inline void exit_io_context(struct task_struct *task)
85{
86}
87
88struct io_context; 143struct io_context;
89static inline int put_io_context(struct io_context *ioc) 144static inline void put_io_context(struct io_context *ioc,
90{ 145 struct request_queue *locked_q) { }
91 return 1; 146static inline void exit_io_context(struct task_struct *task) { }
92}
93#endif 147#endif
94 148
95#endif 149#endif