aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c8
-rw-r--r--block/blk-cgroup.h4
-rw-r--r--block/blk-throttle.c3
-rw-r--r--include/linux/cgroup.h17
-rw-r--r--kernel/cgroup.c29
-rw-r--r--kernel/cgroup_freezer.c29
-rw-r--r--kernel/cpuset.c42
-rw-r--r--mm/memcontrol.c9
-rw-r--r--security/device_cgroup.c2
9 files changed, 69 insertions, 74 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 54ad00292edf..e90c7c164c83 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -615,12 +615,10 @@ u64 blkg_stat_recursive_sum(struct blkg_policy_data *pd, int off)
615 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 615 struct blkcg_policy *pol = blkcg_policy[pd->plid];
616 struct blkcg_gq *pos_blkg; 616 struct blkcg_gq *pos_blkg;
617 struct cgroup_subsys_state *pos_css; 617 struct cgroup_subsys_state *pos_css;
618 u64 sum; 618 u64 sum = 0;
619 619
620 lockdep_assert_held(pd->blkg->q->queue_lock); 620 lockdep_assert_held(pd->blkg->q->queue_lock);
621 621
622 sum = blkg_stat_read((void *)pd + off);
623
624 rcu_read_lock(); 622 rcu_read_lock();
625 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 623 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
626 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 624 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
@@ -650,13 +648,11 @@ struct blkg_rwstat blkg_rwstat_recursive_sum(struct blkg_policy_data *pd,
650 struct blkcg_policy *pol = blkcg_policy[pd->plid]; 648 struct blkcg_policy *pol = blkcg_policy[pd->plid];
651 struct blkcg_gq *pos_blkg; 649 struct blkcg_gq *pos_blkg;
652 struct cgroup_subsys_state *pos_css; 650 struct cgroup_subsys_state *pos_css;
653 struct blkg_rwstat sum; 651 struct blkg_rwstat sum = { };
654 int i; 652 int i;
655 653
656 lockdep_assert_held(pd->blkg->q->queue_lock); 654 lockdep_assert_held(pd->blkg->q->queue_lock);
657 655
658 sum = blkg_rwstat_read((void *)pd + off);
659
660 rcu_read_lock(); 656 rcu_read_lock();
661 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) { 657 blkg_for_each_descendant_pre(pos_blkg, pos_css, pd_to_blkg(pd)) {
662 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol); 658 struct blkg_policy_data *pos_pd = blkg_to_pd(pos_blkg, pol);
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index 855538630300..ae6969a7ffd4 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -291,6 +291,7 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
291 * read locked. If called under either blkcg or queue lock, the iteration 291 * read locked. If called under either blkcg or queue lock, the iteration
292 * is guaranteed to include all and only online blkgs. The caller may 292 * is guaranteed to include all and only online blkgs. The caller may
293 * update @pos_css by calling css_rightmost_descendant() to skip subtree. 293 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
294 * @p_blkg is included in the iteration and the first node to be visited.
294 */ 295 */
295#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \ 296#define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
296 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \ 297 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
@@ -304,7 +305,8 @@ struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg, struct request_queue *q,
304 * @p_blkg: target blkg to walk descendants of 305 * @p_blkg: target blkg to walk descendants of
305 * 306 *
306 * Similar to blkg_for_each_descendant_pre() but performs post-order 307 * Similar to blkg_for_each_descendant_pre() but performs post-order
307 * traversal instead. Synchronization rules are the same. 308 * traversal instead. Synchronization rules are the same. @p_blkg is
309 * included in the iteration and the last node to be visited.
308 */ 310 */
309#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \ 311#define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
310 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \ 312 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 8cefa7f8590e..8331aba9426f 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1379,7 +1379,6 @@ static int tg_set_conf(struct cgroup_subsys_state *css, struct cftype *cft,
1379 * restrictions in the whole hierarchy and allows them to bypass 1379 * restrictions in the whole hierarchy and allows them to bypass
1380 * blk-throttle. 1380 * blk-throttle.
1381 */ 1381 */
1382 tg_update_has_rules(tg);
1383 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg) 1382 blkg_for_each_descendant_pre(blkg, pos_css, ctx.blkg)
1384 tg_update_has_rules(blkg_to_tg(blkg)); 1383 tg_update_has_rules(blkg_to_tg(blkg));
1385 1384
@@ -1639,8 +1638,6 @@ void blk_throtl_drain(struct request_queue *q)
1639 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) 1638 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg)
1640 tg_drain_bios(&blkg_to_tg(blkg)->service_queue); 1639 tg_drain_bios(&blkg_to_tg(blkg)->service_queue);
1641 1640
1642 tg_drain_bios(&td_root_tg(td)->service_queue);
1643
1644 /* finally, transfer bios from top-level tg's into the td */ 1641 /* finally, transfer bios from top-level tg's into the td */
1645 tg_drain_bios(&td->service_queue); 1642 tg_drain_bios(&td->service_queue);
1646 1643
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c40e508d54e9..8ec5b0f38292 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -798,7 +798,8 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos);
798 * @pos: the css * to use as the loop cursor 798 * @pos: the css * to use as the loop cursor
799 * @root: css whose descendants to walk 799 * @root: css whose descendants to walk
800 * 800 *
801 * Walk @root's descendants. Must be called under rcu_read_lock(). A 801 * Walk @root's descendants. @root is included in the iteration and the
802 * first node to be visited. Must be called under rcu_read_lock(). A
802 * descendant css which hasn't finished ->css_online() or already has 803 * descendant css which hasn't finished ->css_online() or already has
803 * finished ->css_offline() may show up during traversal and it's each 804 * finished ->css_offline() may show up during traversal and it's each
804 * subsystem's responsibility to verify that each @pos is alive. 805 * subsystem's responsibility to verify that each @pos is alive.
@@ -820,13 +821,12 @@ css_rightmost_descendant(struct cgroup_subsys_state *pos);
820 * 821 *
821 * my_update_state(@css) 822 * my_update_state(@css)
822 * { 823 * {
823 * Lock @css;
824 * Update @css's state;
825 * Unlock @css;
826 *
827 * css_for_each_descendant_pre(@pos, @css) { 824 * css_for_each_descendant_pre(@pos, @css) {
828 * Lock @pos; 825 * Lock @pos;
829 * Verify @pos is alive and inherit state from @pos's parent; 826 * if (@pos == @css)
827 * Update @css's state;
828 * else
829 * Verify @pos is alive and inherit state from its parent;
830 * Unlock @pos; 830 * Unlock @pos;
831 * } 831 * }
832 * } 832 * }
@@ -864,8 +864,9 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
864 * @css: css whose descendants to walk 864 * @css: css whose descendants to walk
865 * 865 *
866 * Similar to css_for_each_descendant_pre() but performs post-order 866 * Similar to css_for_each_descendant_pre() but performs post-order
867 * traversal instead. Note that the walk visibility guarantee described in 867 * traversal instead. @root is included in the iteration and the last
868 * pre-order walk doesn't apply the same to post-order walks. 868 * node to be visited. Note that the walk visibility guarantee described
869 * in pre-order walk doesn't apply the same to post-order walks.
869 */ 870 */
870#define css_for_each_descendant_post(pos, css) \ 871#define css_for_each_descendant_post(pos, css) \
871 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \ 872 for ((pos) = css_next_descendant_post(NULL, (css)); (pos); \
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c02a288a4e3d..52f0498db946 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2868,17 +2868,6 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2868 2868
2869 mutex_unlock(&cgroup_mutex); 2869 mutex_unlock(&cgroup_mutex);
2870 2870
2871 /* @root always needs to be updated */
2872 inode = root->dentry->d_inode;
2873 mutex_lock(&inode->i_mutex);
2874 mutex_lock(&cgroup_mutex);
2875 ret = cgroup_addrm_files(root, cfts, is_add);
2876 mutex_unlock(&cgroup_mutex);
2877 mutex_unlock(&inode->i_mutex);
2878
2879 if (ret)
2880 goto out_deact;
2881
2882 /* add/rm files for all cgroups created before */ 2871 /* add/rm files for all cgroups created before */
2883 rcu_read_lock(); 2872 rcu_read_lock();
2884 css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) { 2873 css_for_each_descendant_pre(css, cgroup_css(root, ss->subsys_id)) {
@@ -2907,7 +2896,6 @@ static int cgroup_cfts_commit(struct cftype *cfts, bool is_add)
2907 } 2896 }
2908 rcu_read_unlock(); 2897 rcu_read_unlock();
2909 dput(prev); 2898 dput(prev);
2910out_deact:
2911 deactivate_super(sb); 2899 deactivate_super(sb);
2912 return ret; 2900 return ret;
2913} 2901}
@@ -3099,7 +3087,8 @@ EXPORT_SYMBOL_GPL(css_next_child);
3099 * @root: css whose descendants to walk 3087 * @root: css whose descendants to walk
3100 * 3088 *
3101 * To be used by css_for_each_descendant_pre(). Find the next descendant 3089 * To be used by css_for_each_descendant_pre(). Find the next descendant
3102 * to visit for pre-order traversal of @root's descendants. 3090 * to visit for pre-order traversal of @root's descendants. @root is
3091 * included in the iteration and the first node to be visited.
3103 * 3092 *
3104 * While this function requires RCU read locking, it doesn't require the 3093 * While this function requires RCU read locking, it doesn't require the
3105 * whole traversal to be contained in a single RCU critical section. This 3094 * whole traversal to be contained in a single RCU critical section. This
@@ -3114,9 +3103,9 @@ css_next_descendant_pre(struct cgroup_subsys_state *pos,
3114 3103
3115 WARN_ON_ONCE(!rcu_read_lock_held()); 3104 WARN_ON_ONCE(!rcu_read_lock_held());
3116 3105
3117 /* if first iteration, pretend we just visited @root */ 3106 /* if first iteration, visit @root */
3118 if (!pos) 3107 if (!pos)
3119 pos = root; 3108 return root;
3120 3109
3121 /* visit the first child if exists */ 3110 /* visit the first child if exists */
3122 next = css_next_child(NULL, pos); 3111 next = css_next_child(NULL, pos);
@@ -3186,7 +3175,8 @@ css_leftmost_descendant(struct cgroup_subsys_state *pos)
3186 * @root: css whose descendants to walk 3175 * @root: css whose descendants to walk
3187 * 3176 *
3188 * To be used by css_for_each_descendant_post(). Find the next descendant 3177 * To be used by css_for_each_descendant_post(). Find the next descendant
3189 * to visit for post-order traversal of @root's descendants. 3178 * to visit for post-order traversal of @root's descendants. @root is
3179 * included in the iteration and the last node to be visited.
3190 * 3180 *
3191 * While this function requires RCU read locking, it doesn't require the 3181 * While this function requires RCU read locking, it doesn't require the
3192 * whole traversal to be contained in a single RCU critical section. This 3182 * whole traversal to be contained in a single RCU critical section. This
@@ -3207,14 +3197,17 @@ css_next_descendant_post(struct cgroup_subsys_state *pos,
3207 return next != root ? next : NULL; 3197 return next != root ? next : NULL;
3208 } 3198 }
3209 3199
3200 /* if we visited @root, we're done */
3201 if (pos == root)
3202 return NULL;
3203
3210 /* if there's an unvisited sibling, visit its leftmost descendant */ 3204 /* if there's an unvisited sibling, visit its leftmost descendant */
3211 next = css_next_child(pos, css_parent(pos)); 3205 next = css_next_child(pos, css_parent(pos));
3212 if (next) 3206 if (next)
3213 return css_leftmost_descendant(next); 3207 return css_leftmost_descendant(next);
3214 3208
3215 /* no sibling left, visit parent */ 3209 /* no sibling left, visit parent */
3216 next = css_parent(pos); 3210 return css_parent(pos);
3217 return next != root ? next : NULL;
3218} 3211}
3219EXPORT_SYMBOL_GPL(css_next_descendant_post); 3212EXPORT_SYMBOL_GPL(css_next_descendant_post);
3220 3213
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 224da9aa27f5..f0ff64d0ebaa 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -311,7 +311,6 @@ static int freezer_read(struct cgroup_subsys_state *css, struct cftype *cft,
311 /* update states bottom-up */ 311 /* update states bottom-up */
312 css_for_each_descendant_post(pos, css) 312 css_for_each_descendant_post(pos, css)
313 update_if_frozen(pos); 313 update_if_frozen(pos);
314 update_if_frozen(css);
315 314
316 rcu_read_unlock(); 315 rcu_read_unlock();
317 316
@@ -391,11 +390,6 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
391{ 390{
392 struct cgroup_subsys_state *pos; 391 struct cgroup_subsys_state *pos;
393 392
394 /* update @freezer */
395 spin_lock_irq(&freezer->lock);
396 freezer_apply_state(freezer, freeze, CGROUP_FREEZING_SELF);
397 spin_unlock_irq(&freezer->lock);
398
399 /* 393 /*
400 * Update all its descendants in pre-order traversal. Each 394 * Update all its descendants in pre-order traversal. Each
401 * descendant will try to inherit its parent's FREEZING state as 395 * descendant will try to inherit its parent's FREEZING state as
@@ -406,14 +400,23 @@ static void freezer_change_state(struct freezer *freezer, bool freeze)
406 struct freezer *pos_f = css_freezer(pos); 400 struct freezer *pos_f = css_freezer(pos);
407 struct freezer *parent = parent_freezer(pos_f); 401 struct freezer *parent = parent_freezer(pos_f);
408 402
409 /*
410 * Our update to @parent->state is already visible which is
411 * all we need. No need to lock @parent. For more info on
412 * synchronization, see freezer_post_create().
413 */
414 spin_lock_irq(&pos_f->lock); 403 spin_lock_irq(&pos_f->lock);
415 freezer_apply_state(pos_f, parent->state & CGROUP_FREEZING, 404
416 CGROUP_FREEZING_PARENT); 405 if (pos_f == freezer) {
406 freezer_apply_state(pos_f, freeze,
407 CGROUP_FREEZING_SELF);
408 } else {
409 /*
410 * Our update to @parent->state is already visible
411 * which is all we need. No need to lock @parent.
412 * For more info on synchronization, see
413 * freezer_post_create().
414 */
415 freezer_apply_state(pos_f,
416 parent->state & CGROUP_FREEZING,
417 CGROUP_FREEZING_PARENT);
418 }
419
417 spin_unlock_irq(&pos_f->lock); 420 spin_unlock_irq(&pos_f->lock);
418 } 421 }
419 rcu_read_unlock(); 422 rcu_read_unlock();
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index bf69717325b4..72a0383f382f 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -222,7 +222,8 @@ static struct cpuset top_cpuset = {
222 * 222 *
223 * Walk @des_cs through the online descendants of @root_cs. Must be used 223 * Walk @des_cs through the online descendants of @root_cs. Must be used
224 * with RCU read locked. The caller may modify @pos_css by calling 224 * with RCU read locked. The caller may modify @pos_css by calling
225 * css_rightmost_descendant() to skip subtree. 225 * css_rightmost_descendant() to skip subtree. @root_cs is included in the
226 * iteration and the first node to be visited.
226 */ 227 */
227#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \ 228#define cpuset_for_each_descendant_pre(des_cs, pos_css, root_cs) \
228 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \ 229 css_for_each_descendant_pre((pos_css), &(root_cs)->css) \
@@ -506,6 +507,9 @@ static void update_domain_attr_tree(struct sched_domain_attr *dattr,
506 507
507 rcu_read_lock(); 508 rcu_read_lock();
508 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 509 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
510 if (cp == root_cs)
511 continue;
512
509 /* skip the whole subtree if @cp doesn't have any CPU */ 513 /* skip the whole subtree if @cp doesn't have any CPU */
510 if (cpumask_empty(cp->cpus_allowed)) { 514 if (cpumask_empty(cp->cpus_allowed)) {
511 pos_css = css_rightmost_descendant(pos_css); 515 pos_css = css_rightmost_descendant(pos_css);
@@ -613,6 +617,8 @@ static int generate_sched_domains(cpumask_var_t **domains,
613 617
614 rcu_read_lock(); 618 rcu_read_lock();
615 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) { 619 cpuset_for_each_descendant_pre(cp, pos_css, &top_cpuset) {
620 if (cp == &top_cpuset)
621 continue;
616 /* 622 /*
617 * Continue traversing beyond @cp iff @cp has some CPUs and 623 * Continue traversing beyond @cp iff @cp has some CPUs and
618 * isn't load balancing. The former is obvious. The 624 * isn't load balancing. The former is obvious. The
@@ -875,15 +881,17 @@ static void update_tasks_cpumask_hier(struct cpuset *root_cs,
875 struct cpuset *cp; 881 struct cpuset *cp;
876 struct cgroup_subsys_state *pos_css; 882 struct cgroup_subsys_state *pos_css;
877 883
878 if (update_root)
879 update_tasks_cpumask(root_cs, heap);
880
881 rcu_read_lock(); 884 rcu_read_lock();
882 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 885 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
883 /* skip the whole subtree if @cp have some CPU */ 886 if (cp == root_cs) {
884 if (!cpumask_empty(cp->cpus_allowed)) { 887 if (!update_root)
885 pos_css = css_rightmost_descendant(pos_css); 888 continue;
886 continue; 889 } else {
890 /* skip the whole subtree if @cp have some CPU */
891 if (!cpumask_empty(cp->cpus_allowed)) {
892 pos_css = css_rightmost_descendant(pos_css);
893 continue;
894 }
887 } 895 }
888 if (!css_tryget(&cp->css)) 896 if (!css_tryget(&cp->css))
889 continue; 897 continue;
@@ -1130,15 +1138,17 @@ static void update_tasks_nodemask_hier(struct cpuset *root_cs,
1130 struct cpuset *cp; 1138 struct cpuset *cp;
1131 struct cgroup_subsys_state *pos_css; 1139 struct cgroup_subsys_state *pos_css;
1132 1140
1133 if (update_root)
1134 update_tasks_nodemask(root_cs, heap);
1135
1136 rcu_read_lock(); 1141 rcu_read_lock();
1137 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) { 1142 cpuset_for_each_descendant_pre(cp, pos_css, root_cs) {
1138 /* skip the whole subtree if @cp have some CPU */ 1143 if (cp == root_cs) {
1139 if (!nodes_empty(cp->mems_allowed)) { 1144 if (!update_root)
1140 pos_css = css_rightmost_descendant(pos_css); 1145 continue;
1141 continue; 1146 } else {
1147 /* skip the whole subtree if @cp have some CPU */
1148 if (!nodes_empty(cp->mems_allowed)) {
1149 pos_css = css_rightmost_descendant(pos_css);
1150 continue;
1151 }
1142 } 1152 }
1143 if (!css_tryget(&cp->css)) 1153 if (!css_tryget(&cp->css))
1144 continue; 1154 continue;
@@ -2237,7 +2247,7 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2237 2247
2238 rcu_read_lock(); 2248 rcu_read_lock();
2239 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) { 2249 cpuset_for_each_descendant_pre(cs, pos_css, &top_cpuset) {
2240 if (!css_tryget(&cs->css)) 2250 if (cs == &top_cpuset || !css_tryget(&cs->css))
2241 continue; 2251 continue;
2242 rcu_read_unlock(); 2252 rcu_read_unlock();
2243 2253
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 2885e3e85047..b89d4cbc0c08 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1079,14 +1079,7 @@ static struct mem_cgroup *__mem_cgroup_iter_next(struct mem_cgroup *root,
1079{ 1079{
1080 struct cgroup_subsys_state *prev_css, *next_css; 1080 struct cgroup_subsys_state *prev_css, *next_css;
1081 1081
1082 /* 1082 prev_css = last_visited ? &last_visited->css : NULL;
1083 * Root is not visited by cgroup iterators so it needs an
1084 * explicit visit.
1085 */
1086 if (!last_visited)
1087 return root;
1088
1089 prev_css = (last_visited == root) ? NULL : &last_visited->css;
1090skip_node: 1083skip_node:
1091 next_css = css_next_descendant_pre(prev_css, &root->css); 1084 next_css = css_next_descendant_pre(prev_css, &root->css);
1092 1085
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 9bf230aa28b0..c123628d3f84 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -456,7 +456,7 @@ static int propagate_exception(struct dev_cgroup *devcg_root,
456 * methods), and online ones are safe to access outside RCU 456 * methods), and online ones are safe to access outside RCU
457 * read lock without bumping refcnt. 457 * read lock without bumping refcnt.
458 */ 458 */
459 if (!is_devcg_online(devcg)) 459 if (pos == &devcg_root->css || !is_devcg_online(devcg))
460 continue; 460 continue;
461 461
462 rcu_read_unlock(); 462 rcu_read_unlock();