aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c75
1 files changed, 52 insertions, 23 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 7ef747b7f056..201275467d8b 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -126,7 +126,7 @@ err_free:
126} 126}
127 127
128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, 128static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
129 struct request_queue *q) 129 struct request_queue *q, bool update_hint)
130{ 130{
131 struct blkcg_gq *blkg; 131 struct blkcg_gq *blkg;
132 132
@@ -135,14 +135,19 @@ static struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
135 return blkg; 135 return blkg;
136 136
137 /* 137 /*
138 * Hint didn't match. Look up from the radix tree. Note that we 138 * Hint didn't match. Look up from the radix tree. Note that the
139 * may not be holding queue_lock and thus are not sure whether 139 * hint can only be updated under queue_lock as otherwise @blkg
140 * @blkg from blkg_tree has already been removed or not, so we 140 * could have already been removed from blkg_tree. The caller is
141 * can't update hint to the lookup result. Leave it to the caller. 141 * responsible for grabbing queue_lock if @update_hint.
142 */ 142 */
143 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id); 143 blkg = radix_tree_lookup(&blkcg->blkg_tree, q->id);
144 if (blkg && blkg->q == q) 144 if (blkg && blkg->q == q) {
145 if (update_hint) {
146 lockdep_assert_held(q->queue_lock);
147 rcu_assign_pointer(blkcg->blkg_hint, blkg);
148 }
145 return blkg; 149 return blkg;
150 }
146 151
147 return NULL; 152 return NULL;
148} 153}
@@ -162,7 +167,7 @@ struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, struct request_queue *q)
162 167
163 if (unlikely(blk_queue_bypass(q))) 168 if (unlikely(blk_queue_bypass(q)))
164 return NULL; 169 return NULL;
165 return __blkg_lookup(blkcg, q); 170 return __blkg_lookup(blkcg, q, false);
166} 171}
167EXPORT_SYMBOL_GPL(blkg_lookup); 172EXPORT_SYMBOL_GPL(blkg_lookup);
168 173
@@ -170,9 +175,9 @@ EXPORT_SYMBOL_GPL(blkg_lookup);
170 * If @new_blkg is %NULL, this function tries to allocate a new one as 175 * If @new_blkg is %NULL, this function tries to allocate a new one as
171 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return. 176 * necessary using %GFP_ATOMIC. @new_blkg is always consumed on return.
172 */ 177 */
173static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg, 178static struct blkcg_gq *blkg_create(struct blkcg *blkcg,
174 struct request_queue *q, 179 struct request_queue *q,
175 struct blkcg_gq *new_blkg) 180 struct blkcg_gq *new_blkg)
176{ 181{
177 struct blkcg_gq *blkg; 182 struct blkcg_gq *blkg;
178 int ret; 183 int ret;
@@ -180,13 +185,6 @@ static struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
180 WARN_ON_ONCE(!rcu_read_lock_held()); 185 WARN_ON_ONCE(!rcu_read_lock_held());
181 lockdep_assert_held(q->queue_lock); 186 lockdep_assert_held(q->queue_lock);
182 187
183 /* lookup and update hint on success, see __blkg_lookup() for details */
184 blkg = __blkg_lookup(blkcg, q);
185 if (blkg) {
186 rcu_assign_pointer(blkcg->blkg_hint, blkg);
187 goto out_free;
188 }
189
190 /* blkg holds a reference to blkcg */ 188 /* blkg holds a reference to blkcg */
191 if (!css_tryget(&blkcg->css)) { 189 if (!css_tryget(&blkcg->css)) {
192 blkg = ERR_PTR(-EINVAL); 190 blkg = ERR_PTR(-EINVAL);
@@ -223,16 +221,39 @@ out_free:
223 return blkg; 221 return blkg;
224} 222}
225 223
224/**
225 * blkg_lookup_create - lookup blkg, try to create one if not there
226 * @blkcg: blkcg of interest
227 * @q: request_queue of interest
228 *
229 * Lookup blkg for the @blkcg - @q pair. If it doesn't exist, try to
230 * create one. This function should be called under RCU read lock and
231 * @q->queue_lock.
232 *
233 * Returns pointer to the looked up or created blkg on success, ERR_PTR()
234 * value on error. If @q is dead, returns ERR_PTR(-EINVAL). If @q is not
235 * dead and bypassing, returns ERR_PTR(-EBUSY).
236 */
226struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg, 237struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
227 struct request_queue *q) 238 struct request_queue *q)
228{ 239{
240 struct blkcg_gq *blkg;
241
242 WARN_ON_ONCE(!rcu_read_lock_held());
243 lockdep_assert_held(q->queue_lock);
244
229 /* 245 /*
230 * This could be the first entry point of blkcg implementation and 246 * This could be the first entry point of blkcg implementation and
231 * we shouldn't allow anything to go through for a bypassing queue. 247 * we shouldn't allow anything to go through for a bypassing queue.
232 */ 248 */
233 if (unlikely(blk_queue_bypass(q))) 249 if (unlikely(blk_queue_bypass(q)))
234 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY); 250 return ERR_PTR(blk_queue_dying(q) ? -EINVAL : -EBUSY);
235 return __blkg_lookup_create(blkcg, q, NULL); 251
252 blkg = __blkg_lookup(blkcg, q, true);
253 if (blkg)
254 return blkg;
255
256 return blkg_create(blkcg, q, NULL);
236} 257}
237EXPORT_SYMBOL_GPL(blkg_lookup_create); 258EXPORT_SYMBOL_GPL(blkg_lookup_create);
238 259
@@ -777,7 +798,7 @@ int blkcg_activate_policy(struct request_queue *q,
777 const struct blkcg_policy *pol) 798 const struct blkcg_policy *pol)
778{ 799{
779 LIST_HEAD(pds); 800 LIST_HEAD(pds);
780 struct blkcg_gq *blkg; 801 struct blkcg_gq *blkg, *new_blkg;
781 struct blkg_policy_data *pd, *n; 802 struct blkg_policy_data *pd, *n;
782 int cnt = 0, ret; 803 int cnt = 0, ret;
783 bool preloaded; 804 bool preloaded;
@@ -786,19 +807,27 @@ int blkcg_activate_policy(struct request_queue *q,
786 return 0; 807 return 0;
787 808
788 /* preallocations for root blkg */ 809 /* preallocations for root blkg */
789 blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL); 810 new_blkg = blkg_alloc(&blkcg_root, q, GFP_KERNEL);
790 if (!blkg) 811 if (!new_blkg)
791 return -ENOMEM; 812 return -ENOMEM;
792 813
793 preloaded = !radix_tree_preload(GFP_KERNEL); 814 preloaded = !radix_tree_preload(GFP_KERNEL);
794 815
795 blk_queue_bypass_start(q); 816 blk_queue_bypass_start(q);
796 817
797 /* make sure the root blkg exists and count the existing blkgs */ 818 /*
819 * Make sure the root blkg exists and count the existing blkgs. As
820 * @q is bypassing at this point, blkg_lookup_create() can't be
821 * used. Open code it.
822 */
798 spin_lock_irq(q->queue_lock); 823 spin_lock_irq(q->queue_lock);
799 824
800 rcu_read_lock(); 825 rcu_read_lock();
801 blkg = __blkg_lookup_create(&blkcg_root, q, blkg); 826 blkg = __blkg_lookup(&blkcg_root, q, false);
827 if (blkg)
828 blkg_free(new_blkg);
829 else
830 blkg = blkg_create(&blkcg_root, q, new_blkg);
802 rcu_read_unlock(); 831 rcu_read_unlock();
803 832
804 if (preloaded) 833 if (preloaded)