diff options
-rw-r--r-- | kernel/rcutree.c | 25 |
1 files changed, 18 insertions, 7 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c index b0d2cc3ea15a..7b5be56d95ae 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c | |||
@@ -246,7 +246,10 @@ module_param(jiffies_till_next_fqs, ulong, 0644); | |||
246 | 246 | ||
247 | static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, | 247 | static void rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, |
248 | struct rcu_data *rdp); | 248 | struct rcu_data *rdp); |
249 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)); | 249 | static void force_qs_rnp(struct rcu_state *rsp, |
250 | int (*f)(struct rcu_data *rsp, bool *isidle, | ||
251 | unsigned long *maxj), | ||
252 | bool *isidle, unsigned long *maxj); | ||
250 | static void force_quiescent_state(struct rcu_state *rsp); | 253 | static void force_quiescent_state(struct rcu_state *rsp); |
251 | static int rcu_pending(int cpu); | 254 | static int rcu_pending(int cpu); |
252 | 255 | ||
@@ -727,7 +730,8 @@ static int rcu_is_cpu_rrupt_from_idle(void) | |||
727 | * credit them with an implicit quiescent state. Return 1 if this CPU | 730 | * credit them with an implicit quiescent state. Return 1 if this CPU |
728 | * is in dynticks idle mode, which is an extended quiescent state. | 731 | * is in dynticks idle mode, which is an extended quiescent state. |
729 | */ | 732 | */ |
730 | static int dyntick_save_progress_counter(struct rcu_data *rdp) | 733 | static int dyntick_save_progress_counter(struct rcu_data *rdp, |
734 | bool *isidle, unsigned long *maxj) | ||
731 | { | 735 | { |
732 | rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); | 736 | rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks); |
733 | return (rdp->dynticks_snap & 0x1) == 0; | 737 | return (rdp->dynticks_snap & 0x1) == 0; |
@@ -739,7 +743,8 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp) | |||
739 | * idle state since the last call to dyntick_save_progress_counter() | 743 | * idle state since the last call to dyntick_save_progress_counter() |
740 | * for this same CPU, or by virtue of having been offline. | 744 | * for this same CPU, or by virtue of having been offline. |
741 | */ | 745 | */ |
742 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp) | 746 | static int rcu_implicit_dynticks_qs(struct rcu_data *rdp, |
747 | bool *isidle, unsigned long *maxj) | ||
743 | { | 748 | { |
744 | unsigned int curr; | 749 | unsigned int curr; |
745 | unsigned int snap; | 750 | unsigned int snap; |
@@ -1361,16 +1366,19 @@ static int rcu_gp_init(struct rcu_state *rsp) | |||
1361 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) | 1366 | int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in) |
1362 | { | 1367 | { |
1363 | int fqs_state = fqs_state_in; | 1368 | int fqs_state = fqs_state_in; |
1369 | bool isidle = false; | ||
1370 | unsigned long maxj; | ||
1364 | struct rcu_node *rnp = rcu_get_root(rsp); | 1371 | struct rcu_node *rnp = rcu_get_root(rsp); |
1365 | 1372 | ||
1366 | rsp->n_force_qs++; | 1373 | rsp->n_force_qs++; |
1367 | if (fqs_state == RCU_SAVE_DYNTICK) { | 1374 | if (fqs_state == RCU_SAVE_DYNTICK) { |
1368 | /* Collect dyntick-idle snapshots. */ | 1375 | /* Collect dyntick-idle snapshots. */ |
1369 | force_qs_rnp(rsp, dyntick_save_progress_counter); | 1376 | force_qs_rnp(rsp, dyntick_save_progress_counter, |
1377 | &isidle, &maxj); | ||
1370 | fqs_state = RCU_FORCE_QS; | 1378 | fqs_state = RCU_FORCE_QS; |
1371 | } else { | 1379 | } else { |
1372 | /* Handle dyntick-idle and offline CPUs. */ | 1380 | /* Handle dyntick-idle and offline CPUs. */ |
1373 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs); | 1381 | force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj); |
1374 | } | 1382 | } |
1375 | /* Clear flag to prevent immediate re-entry. */ | 1383 | /* Clear flag to prevent immediate re-entry. */ |
1376 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { | 1384 | if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) { |
@@ -2069,7 +2077,10 @@ void rcu_check_callbacks(int cpu, int user) | |||
2069 | * | 2077 | * |
2070 | * The caller must have suppressed start of new grace periods. | 2078 | * The caller must have suppressed start of new grace periods. |
2071 | */ | 2079 | */ |
2072 | static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | 2080 | static void force_qs_rnp(struct rcu_state *rsp, |
2081 | int (*f)(struct rcu_data *rsp, bool *isidle, | ||
2082 | unsigned long *maxj), | ||
2083 | bool *isidle, unsigned long *maxj) | ||
2073 | { | 2084 | { |
2074 | unsigned long bit; | 2085 | unsigned long bit; |
2075 | int cpu; | 2086 | int cpu; |
@@ -2093,7 +2104,7 @@ static void force_qs_rnp(struct rcu_state *rsp, int (*f)(struct rcu_data *)) | |||
2093 | bit = 1; | 2104 | bit = 1; |
2094 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { | 2105 | for (; cpu <= rnp->grphi; cpu++, bit <<= 1) { |
2095 | if ((rnp->qsmask & bit) != 0 && | 2106 | if ((rnp->qsmask & bit) != 0 && |
2096 | f(per_cpu_ptr(rsp->rda, cpu))) | 2107 | f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj)) |
2097 | mask |= bit; | 2108 | mask |= bit; |
2098 | } | 2109 | } |
2099 | if (mask != 0) { | 2110 | if (mask != 0) { |