aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/iocontext.h
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-05-30 11:52:42 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-30 11:52:42 -0400
commit0d167518e045cc8bb63f0a8a0a85ad4fa4e0044f (patch)
tree101a9b5d425d79f663e4f25f1e90b7a8cc6604f1 /include/linux/iocontext.h
parent2f83766d4b18774c856329a8fca4c9338dfeda39 (diff)
parentff26eaadf4d914e397872b99885d45756104e9ae (diff)
Merge branch 'for-3.5/core' of git://git.kernel.dk/linux-block
Merge block/IO core bits from Jens Axboe: "This is a bit bigger on the core side than usual, but that is purely because we decided to hold off on parts of Tejun's submission on 3.4 to give it a bit more time to simmer. As a consequence, it's seen a long cycle in for-next. It contains: - Bug fix from Dan, wrong locking type. - Relax splice gifting restriction from Eric. - A ton of updates from Tejun, primarily for blkcg. This improves the code a lot, making the API nicer and cleaner, and also includes fixes for how we handle and tie policies and re-activate on switches. The changes also include generic bug fixes. - A simple fix from Vivek, along with a fix for doing proper delayed allocation of the blkcg stats." Fix up annoying conflict just due to different merge resolution in Documentation/feature-removal-schedule.txt * 'for-3.5/core' of git://git.kernel.dk/linux-block: (92 commits) blkcg: tg_stats_alloc_lock is an irq lock vmsplice: relax alignement requirements for SPLICE_F_GIFT blkcg: use radix tree to index blkgs from blkcg blkcg: fix blkcg->css ref leak in __blkg_lookup_create() block: fix elvpriv allocation failure handling block: collapse blk_alloc_request() into get_request() blkcg: collapse blkcg_policy_ops into blkcg_policy blkcg: embed struct blkg_policy_data in policy specific data blkcg: mass rename of blkcg API blkcg: style cleanups for blk-cgroup.h blkcg: remove blkio_group->path[] blkcg: blkg_rwstat_read() was missing inline blkcg: shoot down blkgs if all policies are deactivated blkcg: drop stuff unused after per-queue policy activation update blkcg: implement per-queue policy activation blkcg: add request_queue->root_blkg blkcg: make request_queue bypassing on allocation blkcg: make sure blkg_lookup() returns %NULL if @q is bypassing blkcg: make blkg_conf_prep() take @pol and return with queue lock held blkcg: remove static policy ID enums ...
Diffstat (limited to 'include/linux/iocontext.h')
-rw-r--r--include/linux/iocontext.h39
1 files changed, 22 insertions, 17 deletions
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index 1a3018063034..df38db2ef45b 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -6,11 +6,7 @@
6#include <linux/workqueue.h> 6#include <linux/workqueue.h>
7 7
8enum { 8enum {
9 ICQ_IOPRIO_CHANGED = 1 << 0,
10 ICQ_CGROUP_CHANGED = 1 << 1,
11 ICQ_EXITED = 1 << 2, 9 ICQ_EXITED = 1 << 2,
12
13 ICQ_CHANGED_MASK = ICQ_IOPRIO_CHANGED | ICQ_CGROUP_CHANGED,
14}; 10};
15 11
16/* 12/*
@@ -100,6 +96,7 @@ struct io_cq {
100 */ 96 */
101struct io_context { 97struct io_context {
102 atomic_long_t refcount; 98 atomic_long_t refcount;
99 atomic_t active_ref;
103 atomic_t nr_tasks; 100 atomic_t nr_tasks;
104 101
105 /* all the fields below are protected by this lock */ 102 /* all the fields below are protected by this lock */
@@ -120,29 +117,37 @@ struct io_context {
120 struct work_struct release_work; 117 struct work_struct release_work;
121}; 118};
122 119
123static inline struct io_context *ioc_task_link(struct io_context *ioc) 120/**
121 * get_io_context_active - get active reference on ioc
122 * @ioc: ioc of interest
123 *
124 * Only iocs with active reference can issue new IOs. This function
125 * acquires an active reference on @ioc. The caller must already have an
126 * active reference on @ioc.
127 */
128static inline void get_io_context_active(struct io_context *ioc)
124{ 129{
125 /* 130 WARN_ON_ONCE(atomic_long_read(&ioc->refcount) <= 0);
126 * if ref count is zero, don't allow sharing (ioc is going away, it's 131 WARN_ON_ONCE(atomic_read(&ioc->active_ref) <= 0);
127 * a race). 132 atomic_long_inc(&ioc->refcount);
128 */ 133 atomic_inc(&ioc->active_ref);
129 if (ioc && atomic_long_inc_not_zero(&ioc->refcount)) { 134}
130 atomic_inc(&ioc->nr_tasks); 135
131 return ioc; 136static inline void ioc_task_link(struct io_context *ioc)
132 } 137{
138 get_io_context_active(ioc);
133 139
134 return NULL; 140 WARN_ON_ONCE(atomic_read(&ioc->nr_tasks) <= 0);
141 atomic_inc(&ioc->nr_tasks);
135} 142}
136 143
137struct task_struct; 144struct task_struct;
138#ifdef CONFIG_BLOCK 145#ifdef CONFIG_BLOCK
139void put_io_context(struct io_context *ioc); 146void put_io_context(struct io_context *ioc);
147void put_io_context_active(struct io_context *ioc);
140void exit_io_context(struct task_struct *task); 148void exit_io_context(struct task_struct *task);
141struct io_context *get_task_io_context(struct task_struct *task, 149struct io_context *get_task_io_context(struct task_struct *task,
142 gfp_t gfp_flags, int node); 150 gfp_t gfp_flags, int node);
143void ioc_ioprio_changed(struct io_context *ioc, int ioprio);
144void ioc_cgroup_changed(struct io_context *ioc);
145unsigned int icq_get_changed(struct io_cq *icq);
146#else 151#else
147struct io_context; 152struct io_context;
148static inline void put_io_context(struct io_context *ioc) { } 153static inline void put_io_context(struct io_context *ioc) { }