diff options
Diffstat (limited to 'kernel/rcu/tree.c')
-rw-r--r-- | kernel/rcu/tree.c | 80 |
1 files changed, 47 insertions, 33 deletions
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 67fe75725486..f79a1c646846 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c | |||
@@ -3309,16 +3309,6 @@ static bool rcu_exp_gp_seq_done(struct rcu_state *rsp, unsigned long s) | |||
3309 | return rcu_seq_done(&rsp->expedited_sequence, s); | 3309 | return rcu_seq_done(&rsp->expedited_sequence, s); |
3310 | } | 3310 | } |
3311 | 3311 | ||
3312 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
3313 | { | ||
3314 | struct rcu_state *rsp = data; | ||
3315 | |||
3316 | /* We are here: If we are last, do the wakeup. */ | ||
3317 | if (atomic_dec_and_test(&rsp->expedited_need_qs)) | ||
3318 | wake_up(&rsp->expedited_wq); | ||
3319 | return 0; | ||
3320 | } | ||
3321 | |||
3322 | /* Common code for synchronize_sched_expedited() work-done checking. */ | 3312 | /* Common code for synchronize_sched_expedited() work-done checking. */ |
3323 | static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, | 3313 | static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, |
3324 | atomic_long_t *stat, unsigned long s) | 3314 | atomic_long_t *stat, unsigned long s) |
@@ -3335,6 +3325,48 @@ static bool sync_sched_exp_wd(struct rcu_state *rsp, struct rcu_node *rnp, | |||
3335 | return false; | 3325 | return false; |
3336 | } | 3326 | } |
3337 | 3327 | ||
3328 | /* | ||
3329 | * Funnel-lock acquisition for expedited grace periods. Returns a | ||
3330 | * pointer to the root rcu_node structure, or NULL if some other | ||
3331 | * task did the expedited grace period for us. | ||
3332 | */ | ||
3333 | static struct rcu_node *exp_funnel_lock(struct rcu_state *rsp, unsigned long s) | ||
3334 | { | ||
3335 | struct rcu_node *rnp0; | ||
3336 | struct rcu_node *rnp1 = NULL; | ||
3337 | |||
3338 | /* | ||
3339 | * Each pass through the following loop works its way | ||
3340 | * up the rcu_node tree, returning if others have done the | ||
3341 | * work or otherwise falls through holding the root rnp's | ||
3342 | * ->exp_funnel_mutex. The mapping from CPU to rcu_node structure | ||
3343 | * can be inexact, as it is just promoting locality and is not | ||
3344 | * strictly needed for correctness. | ||
3345 | */ | ||
3346 | rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; | ||
3347 | for (; rnp0 != NULL; rnp0 = rnp0->parent) { | ||
3348 | if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s)) | ||
3349 | return NULL; | ||
3350 | mutex_lock(&rnp0->exp_funnel_mutex); | ||
3351 | if (rnp1) | ||
3352 | mutex_unlock(&rnp1->exp_funnel_mutex); | ||
3353 | rnp1 = rnp0; | ||
3354 | } | ||
3355 | if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone2, s)) | ||
3356 | return NULL; | ||
3357 | return rnp1; | ||
3358 | } | ||
3359 | |||
3360 | static int synchronize_sched_expedited_cpu_stop(void *data) | ||
3361 | { | ||
3362 | struct rcu_state *rsp = data; | ||
3363 | |||
3364 | /* We are here: If we are last, do the wakeup. */ | ||
3365 | if (atomic_dec_and_test(&rsp->expedited_need_qs)) | ||
3366 | wake_up(&rsp->expedited_wq); | ||
3367 | return 0; | ||
3368 | } | ||
3369 | |||
3338 | /** | 3370 | /** |
3339 | * synchronize_sched_expedited - Brute-force RCU-sched grace period | 3371 | * synchronize_sched_expedited - Brute-force RCU-sched grace period |
3340 | * | 3372 | * |
@@ -3355,8 +3387,7 @@ void synchronize_sched_expedited(void) | |||
3355 | { | 3387 | { |
3356 | int cpu; | 3388 | int cpu; |
3357 | long s; | 3389 | long s; |
3358 | struct rcu_node *rnp0; | 3390 | struct rcu_node *rnp; |
3359 | struct rcu_node *rnp1 = NULL; | ||
3360 | struct rcu_state *rsp = &rcu_sched_state; | 3391 | struct rcu_state *rsp = &rcu_sched_state; |
3361 | 3392 | ||
3362 | /* Take a snapshot of the sequence number. */ | 3393 | /* Take a snapshot of the sequence number. */ |
@@ -3370,26 +3401,9 @@ void synchronize_sched_expedited(void) | |||
3370 | } | 3401 | } |
3371 | WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); | 3402 | WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); |
3372 | 3403 | ||
3373 | /* | 3404 | rnp = exp_funnel_lock(rsp, s); |
3374 | * Each pass through the following loop works its way | 3405 | if (rnp == NULL) |
3375 | * up the rcu_node tree, returning if others have done the | 3406 | return; /* Someone else did our work for us. */ |
3376 | * work or otherwise falls through holding the root rnp's | ||
3377 | * ->exp_funnel_mutex. The mapping from CPU to rcu_node structure | ||
3378 | * can be inexact, as it is just promoting locality and is not | ||
3379 | * strictly needed for correctness. | ||
3380 | */ | ||
3381 | rnp0 = per_cpu_ptr(rsp->rda, raw_smp_processor_id())->mynode; | ||
3382 | for (; rnp0 != NULL; rnp0 = rnp0->parent) { | ||
3383 | if (sync_sched_exp_wd(rsp, rnp1, &rsp->expedited_workdone1, s)) | ||
3384 | return; | ||
3385 | mutex_lock(&rnp0->exp_funnel_mutex); | ||
3386 | if (rnp1) | ||
3387 | mutex_unlock(&rnp1->exp_funnel_mutex); | ||
3388 | rnp1 = rnp0; | ||
3389 | } | ||
3390 | rnp0 = rnp1; /* rcu_get_root(rsp), AKA root rcu_node structure. */ | ||
3391 | if (sync_sched_exp_wd(rsp, rnp0, &rsp->expedited_workdone2, s)) | ||
3392 | return; | ||
3393 | 3407 | ||
3394 | rcu_exp_gp_seq_start(rsp); | 3408 | rcu_exp_gp_seq_start(rsp); |
3395 | 3409 | ||
@@ -3415,7 +3429,7 @@ void synchronize_sched_expedited(void) | |||
3415 | !atomic_read(&rsp->expedited_need_qs)); | 3429 | !atomic_read(&rsp->expedited_need_qs)); |
3416 | 3430 | ||
3417 | rcu_exp_gp_seq_end(rsp); | 3431 | rcu_exp_gp_seq_end(rsp); |
3418 | mutex_unlock(&rnp0->exp_funnel_mutex); | 3432 | mutex_unlock(&rnp->exp_funnel_mutex); |
3419 | smp_mb(); /* ensure subsequent action seen after grace period. */ | 3433 | smp_mb(); /* ensure subsequent action seen after grace period. */ |
3420 | 3434 | ||
3421 | put_online_cpus(); | 3435 | put_online_cpus(); |