aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/rcutree.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/rcutree.c')
-rw-r--r--kernel/rcutree.c208
1 files changed, 160 insertions, 48 deletions
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index effd47a54b36..5ffadcc3bb26 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -68,9 +68,9 @@ static struct lock_class_key rcu_fqs_class[RCU_NUM_LVLS];
68 .level = { &sname##_state.node[0] }, \ 68 .level = { &sname##_state.node[0] }, \
69 .call = cr, \ 69 .call = cr, \
70 .fqs_state = RCU_GP_IDLE, \ 70 .fqs_state = RCU_GP_IDLE, \
71 .gpnum = -300, \ 71 .gpnum = 0UL - 300UL, \
72 .completed = -300, \ 72 .completed = 0UL - 300UL, \
73 .onofflock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.onofflock), \ 73 .orphan_lock = __RAW_SPIN_LOCK_UNLOCKED(&sname##_state.orphan_lock), \
74 .orphan_nxttail = &sname##_state.orphan_nxtlist, \ 74 .orphan_nxttail = &sname##_state.orphan_nxtlist, \
75 .orphan_donetail = &sname##_state.orphan_donelist, \ 75 .orphan_donetail = &sname##_state.orphan_donelist, \
76 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \ 76 .barrier_mutex = __MUTEX_INITIALIZER(sname##_state.barrier_mutex), \
@@ -212,13 +212,13 @@ DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212#endif 212#endif
213}; 213};
214 214
215static int blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 215static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
216static int qhimark = 10000; /* If this many pending, ignore blimit. */ 216static long qhimark = 10000; /* If this many pending, ignore blimit. */
217static int qlowmark = 100; /* Once only this many pending, use blimit. */ 217static long qlowmark = 100; /* Once only this many pending, use blimit. */
218 218
219module_param(blimit, int, 0444); 219module_param(blimit, long, 0444);
220module_param(qhimark, int, 0444); 220module_param(qhimark, long, 0444);
221module_param(qlowmark, int, 0444); 221module_param(qlowmark, long, 0444);
222 222
223int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */ 223int rcu_cpu_stall_suppress __read_mostly; /* 1 = suppress stall warnings. */
224int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT; 224int rcu_cpu_stall_timeout __read_mostly = CONFIG_RCU_CPU_STALL_TIMEOUT;
@@ -873,6 +873,29 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
873 rsp->jiffies_stall = jiffies + jiffies_till_stall_check(); 873 rsp->jiffies_stall = jiffies + jiffies_till_stall_check();
874} 874}
875 875
876/*
877 * Dump stacks of all tasks running on stalled CPUs. This is a fallback
878 * for architectures that do not implement trigger_all_cpu_backtrace().
879 * The NMI-triggered stack traces are more accurate because they are
880 * printed by the target CPU.
881 */
882static void rcu_dump_cpu_stacks(struct rcu_state *rsp)
883{
884 int cpu;
885 unsigned long flags;
886 struct rcu_node *rnp;
887
888 rcu_for_each_leaf_node(rsp, rnp) {
889 raw_spin_lock_irqsave(&rnp->lock, flags);
890 if (rnp->qsmask != 0) {
891 for (cpu = 0; cpu <= rnp->grphi - rnp->grplo; cpu++)
892 if (rnp->qsmask & (1UL << cpu))
893 dump_cpu_task(rnp->grplo + cpu);
894 }
895 raw_spin_unlock_irqrestore(&rnp->lock, flags);
896 }
897}
898
876static void print_other_cpu_stall(struct rcu_state *rsp) 899static void print_other_cpu_stall(struct rcu_state *rsp)
877{ 900{
878 int cpu; 901 int cpu;
@@ -880,6 +903,7 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
880 unsigned long flags; 903 unsigned long flags;
881 int ndetected = 0; 904 int ndetected = 0;
882 struct rcu_node *rnp = rcu_get_root(rsp); 905 struct rcu_node *rnp = rcu_get_root(rsp);
906 long totqlen = 0;
883 907
884 /* Only let one CPU complain about others per time interval. */ 908 /* Only let one CPU complain about others per time interval. */
885 909
@@ -924,12 +948,15 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
924 raw_spin_unlock_irqrestore(&rnp->lock, flags); 948 raw_spin_unlock_irqrestore(&rnp->lock, flags);
925 949
926 print_cpu_stall_info_end(); 950 print_cpu_stall_info_end();
927 printk(KERN_CONT "(detected by %d, t=%ld jiffies)\n", 951 for_each_possible_cpu(cpu)
928 smp_processor_id(), (long)(jiffies - rsp->gp_start)); 952 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
953 pr_cont("(detected by %d, t=%ld jiffies, g=%lu, c=%lu, q=%lu)\n",
954 smp_processor_id(), (long)(jiffies - rsp->gp_start),
955 rsp->gpnum, rsp->completed, totqlen);
929 if (ndetected == 0) 956 if (ndetected == 0)
930 printk(KERN_ERR "INFO: Stall ended before state dump start\n"); 957 printk(KERN_ERR "INFO: Stall ended before state dump start\n");
931 else if (!trigger_all_cpu_backtrace()) 958 else if (!trigger_all_cpu_backtrace())
932 dump_stack(); 959 rcu_dump_cpu_stacks(rsp);
933 960
934 /* Complain about tasks blocking the grace period. */ 961 /* Complain about tasks blocking the grace period. */
935 962
@@ -940,8 +967,10 @@ static void print_other_cpu_stall(struct rcu_state *rsp)
940 967
941static void print_cpu_stall(struct rcu_state *rsp) 968static void print_cpu_stall(struct rcu_state *rsp)
942{ 969{
970 int cpu;
943 unsigned long flags; 971 unsigned long flags;
944 struct rcu_node *rnp = rcu_get_root(rsp); 972 struct rcu_node *rnp = rcu_get_root(rsp);
973 long totqlen = 0;
945 974
946 /* 975 /*
947 * OK, time to rat on ourselves... 976 * OK, time to rat on ourselves...
@@ -952,7 +981,10 @@ static void print_cpu_stall(struct rcu_state *rsp)
952 print_cpu_stall_info_begin(); 981 print_cpu_stall_info_begin();
953 print_cpu_stall_info(rsp, smp_processor_id()); 982 print_cpu_stall_info(rsp, smp_processor_id());
954 print_cpu_stall_info_end(); 983 print_cpu_stall_info_end();
955 printk(KERN_CONT " (t=%lu jiffies)\n", jiffies - rsp->gp_start); 984 for_each_possible_cpu(cpu)
985 totqlen += per_cpu_ptr(rsp->rda, cpu)->qlen;
986 pr_cont(" (t=%lu jiffies g=%lu c=%lu q=%lu)\n",
987 jiffies - rsp->gp_start, rsp->gpnum, rsp->completed, totqlen);
956 if (!trigger_all_cpu_backtrace()) 988 if (!trigger_all_cpu_backtrace())
957 dump_stack(); 989 dump_stack();
958 990
@@ -1404,15 +1436,37 @@ rcu_start_gp(struct rcu_state *rsp, unsigned long flags)
1404 !cpu_needs_another_gp(rsp, rdp)) { 1436 !cpu_needs_another_gp(rsp, rdp)) {
1405 /* 1437 /*
1406 * Either we have not yet spawned the grace-period 1438 * Either we have not yet spawned the grace-period
1407 * task or this CPU does not need another grace period. 1439 * task, this CPU does not need another grace period,
1440 * or a grace period is already in progress.
1408 * Either way, don't start a new grace period. 1441 * Either way, don't start a new grace period.
1409 */ 1442 */
1410 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1443 raw_spin_unlock_irqrestore(&rnp->lock, flags);
1411 return; 1444 return;
1412 } 1445 }
1413 1446
1447 /*
1448 * Because there is no grace period in progress right now,
1449 * any callbacks we have up to this point will be satisfied
1450 * by the next grace period. So promote all callbacks to be
1451 * handled after the end of the next grace period. If the
1452 * CPU is not yet aware of the end of the previous grace period,
1453 * we need to allow for the callback advancement that will
1454 * occur when it does become aware. Deadlock prevents us from
1455 * making it aware at this point: We cannot acquire a leaf
1456 * rcu_node ->lock while holding the root rcu_node ->lock.
1457 */
1458 rdp->nxttail[RCU_NEXT_READY_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1459 if (rdp->completed == rsp->completed)
1460 rdp->nxttail[RCU_WAIT_TAIL] = rdp->nxttail[RCU_NEXT_TAIL];
1461
1414 rsp->gp_flags = RCU_GP_FLAG_INIT; 1462 rsp->gp_flags = RCU_GP_FLAG_INIT;
1415 raw_spin_unlock_irqrestore(&rnp->lock, flags); 1463 raw_spin_unlock(&rnp->lock); /* Interrupts remain disabled. */
1464
1465 /* Ensure that CPU is aware of completion of last grace period. */
1466 rcu_process_gp_end(rsp, rdp);
1467 local_irq_restore(flags);
1468
1469 /* Wake up rcu_gp_kthread() to start the grace period. */
1416 wake_up(&rsp->gp_wq); 1470 wake_up(&rsp->gp_wq);
1417} 1471}
1418 1472
@@ -1573,7 +1627,7 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
1573/* 1627/*
1574 * Send the specified CPU's RCU callbacks to the orphanage. The 1628 * Send the specified CPU's RCU callbacks to the orphanage. The
1575 * specified CPU must be offline, and the caller must hold the 1629 * specified CPU must be offline, and the caller must hold the
1576 * ->onofflock. 1630 * ->orphan_lock.
1577 */ 1631 */
1578static void 1632static void
1579rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp, 1633rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
@@ -1581,8 +1635,8 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1581{ 1635{
1582 /* 1636 /*
1583 * Orphan the callbacks. First adjust the counts. This is safe 1637 * Orphan the callbacks. First adjust the counts. This is safe
1584 * because ->onofflock excludes _rcu_barrier()'s adoption of 1638 * because _rcu_barrier() excludes CPU-hotplug operations, so it
1585 * the callbacks, thus no memory barrier is required. 1639 * cannot be running now. Thus no memory barrier is required.
1586 */ 1640 */
1587 if (rdp->nxtlist != NULL) { 1641 if (rdp->nxtlist != NULL) {
1588 rsp->qlen_lazy += rdp->qlen_lazy; 1642 rsp->qlen_lazy += rdp->qlen_lazy;
@@ -1623,7 +1677,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
1623 1677
1624/* 1678/*
1625 * Adopt the RCU callbacks from the specified rcu_state structure's 1679 * Adopt the RCU callbacks from the specified rcu_state structure's
1626 * orphanage. The caller must hold the ->onofflock. 1680 * orphanage. The caller must hold the ->orphan_lock.
1627 */ 1681 */
1628static void rcu_adopt_orphan_cbs(struct rcu_state *rsp) 1682static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1629{ 1683{
@@ -1702,7 +1756,7 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1702 1756
1703 /* Exclude any attempts to start a new grace period. */ 1757 /* Exclude any attempts to start a new grace period. */
1704 mutex_lock(&rsp->onoff_mutex); 1758 mutex_lock(&rsp->onoff_mutex);
1705 raw_spin_lock_irqsave(&rsp->onofflock, flags); 1759 raw_spin_lock_irqsave(&rsp->orphan_lock, flags);
1706 1760
1707 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */ 1761 /* Orphan the dead CPU's callbacks, and adopt them if appropriate. */
1708 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp); 1762 rcu_send_cbs_to_orphanage(cpu, rsp, rnp, rdp);
@@ -1729,10 +1783,10 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
1729 /* 1783 /*
1730 * We still hold the leaf rcu_node structure lock here, and 1784 * We still hold the leaf rcu_node structure lock here, and
1731 * irqs are still disabled. The reason for this subterfuge is 1785 * irqs are still disabled. The reason for this subterfuge is
1732 * because invoking rcu_report_unblock_qs_rnp() with ->onofflock 1786 * because invoking rcu_report_unblock_qs_rnp() with ->orphan_lock
1733 * held leads to deadlock. 1787 * held leads to deadlock.
1734 */ 1788 */
1735 raw_spin_unlock(&rsp->onofflock); /* irqs remain disabled. */ 1789 raw_spin_unlock(&rsp->orphan_lock); /* irqs remain disabled. */
1736 rnp = rdp->mynode; 1790 rnp = rdp->mynode;
1737 if (need_report & RCU_OFL_TASKS_NORM_GP) 1791 if (need_report & RCU_OFL_TASKS_NORM_GP)
1738 rcu_report_unblock_qs_rnp(rnp, flags); 1792 rcu_report_unblock_qs_rnp(rnp, flags);
@@ -1769,7 +1823,8 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
1769{ 1823{
1770 unsigned long flags; 1824 unsigned long flags;
1771 struct rcu_head *next, *list, **tail; 1825 struct rcu_head *next, *list, **tail;
1772 int bl, count, count_lazy, i; 1826 long bl, count, count_lazy;
1827 int i;
1773 1828
1774 /* If no callbacks are ready, just return.*/ 1829 /* If no callbacks are ready, just return.*/
1775 if (!cpu_has_callbacks_ready_to_invoke(rdp)) { 1830 if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
@@ -2205,10 +2260,28 @@ static inline int rcu_blocking_is_gp(void)
2205 * rcu_read_lock_sched(). 2260 * rcu_read_lock_sched().
2206 * 2261 *
2207 * This means that all preempt_disable code sequences, including NMI and 2262 * This means that all preempt_disable code sequences, including NMI and
2208 * hardware-interrupt handlers, in progress on entry will have completed 2263 * non-threaded hardware-interrupt handlers, in progress on entry will
2209 * before this primitive returns. However, this does not guarantee that 2264 * have completed before this primitive returns. However, this does not
2210 * softirq handlers will have completed, since in some kernels, these 2265 * guarantee that softirq handlers will have completed, since in some
2211 * handlers can run in process context, and can block. 2266 * kernels, these handlers can run in process context, and can block.
2267 *
2268 * Note that this guarantee implies further memory-ordering guarantees.
2269 * On systems with more than one CPU, when synchronize_sched() returns,
2270 * each CPU is guaranteed to have executed a full memory barrier since the
2271 * end of its last RCU-sched read-side critical section whose beginning
2272 * preceded the call to synchronize_sched(). In addition, each CPU having
2273 * an RCU read-side critical section that extends beyond the return from
2274 * synchronize_sched() is guaranteed to have executed a full memory barrier
2275 * after the beginning of synchronize_sched() and before the beginning of
2276 * that RCU read-side critical section. Note that these guarantees include
2277 * CPUs that are offline, idle, or executing in user mode, as well as CPUs
2278 * that are executing in the kernel.
2279 *
2280 * Furthermore, if CPU A invoked synchronize_sched(), which returned
2281 * to its caller on CPU B, then both CPU A and CPU B are guaranteed
2282 * to have executed a full memory barrier during the execution of
2283 * synchronize_sched() -- even if CPU A and CPU B are the same CPU (but
2284 * again only if the system has more than one CPU).
2212 * 2285 *
2213 * This primitive provides the guarantees made by the (now removed) 2286 * This primitive provides the guarantees made by the (now removed)
2214 * synchronize_kernel() API. In contrast, synchronize_rcu() only 2287 * synchronize_kernel() API. In contrast, synchronize_rcu() only
@@ -2239,6 +2312,9 @@ EXPORT_SYMBOL_GPL(synchronize_sched);
2239 * read-side critical sections have completed. RCU read-side critical 2312 * read-side critical sections have completed. RCU read-side critical
2240 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(), 2313 * sections are delimited by rcu_read_lock_bh() and rcu_read_unlock_bh(),
2241 * and may be nested. 2314 * and may be nested.
2315 *
2316 * See the description of synchronize_sched() for more detailed information
2317 * on memory ordering guarantees.
2242 */ 2318 */
2243void synchronize_rcu_bh(void) 2319void synchronize_rcu_bh(void)
2244{ 2320{
@@ -2255,9 +2331,6 @@ void synchronize_rcu_bh(void)
2255} 2331}
2256EXPORT_SYMBOL_GPL(synchronize_rcu_bh); 2332EXPORT_SYMBOL_GPL(synchronize_rcu_bh);
2257 2333
2258static atomic_t sync_sched_expedited_started = ATOMIC_INIT(0);
2259static atomic_t sync_sched_expedited_done = ATOMIC_INIT(0);
2260
2261static int synchronize_sched_expedited_cpu_stop(void *data) 2334static int synchronize_sched_expedited_cpu_stop(void *data)
2262{ 2335{
2263 /* 2336 /*
@@ -2314,10 +2387,32 @@ static int synchronize_sched_expedited_cpu_stop(void *data)
2314 */ 2387 */
2315void synchronize_sched_expedited(void) 2388void synchronize_sched_expedited(void)
2316{ 2389{
2317 int firstsnap, s, snap, trycount = 0; 2390 long firstsnap, s, snap;
2391 int trycount = 0;
2392 struct rcu_state *rsp = &rcu_sched_state;
2318 2393
2319 /* Note that atomic_inc_return() implies full memory barrier. */ 2394 /*
2320 firstsnap = snap = atomic_inc_return(&sync_sched_expedited_started); 2395 * If we are in danger of counter wrap, just do synchronize_sched().
2396 * By allowing sync_sched_expedited_started to advance no more than
2397 * ULONG_MAX/8 ahead of sync_sched_expedited_done, we are ensuring
2398 * that more than 3.5 billion CPUs would be required to force a
2399 * counter wrap on a 32-bit system. Quite a few more CPUs would of
2400 * course be required on a 64-bit system.
2401 */
2402 if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
2403 (ulong)atomic_long_read(&rsp->expedited_done) +
2404 ULONG_MAX / 8)) {
2405 synchronize_sched();
2406 atomic_long_inc(&rsp->expedited_wrap);
2407 return;
2408 }
2409
2410 /*
2411 * Take a ticket. Note that atomic_inc_return() implies a
2412 * full memory barrier.
2413 */
2414 snap = atomic_long_inc_return(&rsp->expedited_start);
2415 firstsnap = snap;
2321 get_online_cpus(); 2416 get_online_cpus();
2322 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); 2417 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
2323 2418
@@ -2329,48 +2424,65 @@ void synchronize_sched_expedited(void)
2329 synchronize_sched_expedited_cpu_stop, 2424 synchronize_sched_expedited_cpu_stop,
2330 NULL) == -EAGAIN) { 2425 NULL) == -EAGAIN) {
2331 put_online_cpus(); 2426 put_online_cpus();
2427 atomic_long_inc(&rsp->expedited_tryfail);
2428
2429 /* Check to see if someone else did our work for us. */
2430 s = atomic_long_read(&rsp->expedited_done);
2431 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2432 /* ensure test happens before caller kfree */
2433 smp_mb__before_atomic_inc(); /* ^^^ */
2434 atomic_long_inc(&rsp->expedited_workdone1);
2435 return;
2436 }
2332 2437
2333 /* No joy, try again later. Or just synchronize_sched(). */ 2438 /* No joy, try again later. Or just synchronize_sched(). */
2334 if (trycount++ < 10) { 2439 if (trycount++ < 10) {
2335 udelay(trycount * num_online_cpus()); 2440 udelay(trycount * num_online_cpus());
2336 } else { 2441 } else {
2337 wait_rcu_gp(call_rcu_sched); 2442 wait_rcu_gp(call_rcu_sched);
2443 atomic_long_inc(&rsp->expedited_normal);
2338 return; 2444 return;
2339 } 2445 }
2340 2446
2341 /* Check to see if someone else did our work for us. */ 2447 /* Recheck to see if someone else did our work for us. */
2342 s = atomic_read(&sync_sched_expedited_done); 2448 s = atomic_long_read(&rsp->expedited_done);
2343 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { 2449 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
2344 smp_mb(); /* ensure test happens before caller kfree */ 2450 /* ensure test happens before caller kfree */
2451 smp_mb__before_atomic_inc(); /* ^^^ */
2452 atomic_long_inc(&rsp->expedited_workdone2);
2345 return; 2453 return;
2346 } 2454 }
2347 2455
2348 /* 2456 /*
2349 * Refetching sync_sched_expedited_started allows later 2457 * Refetching sync_sched_expedited_started allows later
2350 * callers to piggyback on our grace period. We subtract 2458 * callers to piggyback on our grace period. We retry
2351 * 1 to get the same token that the last incrementer got. 2459 * after they started, so our grace period works for them,
2352 * We retry after they started, so our grace period works 2460 * and they started after our first try, so their grace
2353 * for them, and they started after our first try, so their 2461 * period works for us.
2354 * grace period works for us.
2355 */ 2462 */
2356 get_online_cpus(); 2463 get_online_cpus();
2357 snap = atomic_read(&sync_sched_expedited_started); 2464 snap = atomic_long_read(&rsp->expedited_start);
2358 smp_mb(); /* ensure read is before try_stop_cpus(). */ 2465 smp_mb(); /* ensure read is before try_stop_cpus(). */
2359 } 2466 }
2467 atomic_long_inc(&rsp->expedited_stoppedcpus);
2360 2468
2361 /* 2469 /*
2362 * Everyone up to our most recent fetch is covered by our grace 2470 * Everyone up to our most recent fetch is covered by our grace
2363 * period. Update the counter, but only if our work is still 2471 * period. Update the counter, but only if our work is still
2364 * relevant -- which it won't be if someone who started later 2472 * relevant -- which it won't be if someone who started later
2365 * than we did beat us to the punch. 2473 * than we did already did their update.
2366 */ 2474 */
2367 do { 2475 do {
2368 s = atomic_read(&sync_sched_expedited_done); 2476 atomic_long_inc(&rsp->expedited_done_tries);
2369 if (UINT_CMP_GE((unsigned)s, (unsigned)snap)) { 2477 s = atomic_long_read(&rsp->expedited_done);
2370 smp_mb(); /* ensure test happens before caller kfree */ 2478 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
2479 /* ensure test happens before caller kfree */
2480 smp_mb__before_atomic_inc(); /* ^^^ */
2481 atomic_long_inc(&rsp->expedited_done_lost);
2371 break; 2482 break;
2372 } 2483 }
2373 } while (atomic_cmpxchg(&sync_sched_expedited_done, s, snap) != s); 2484 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
2485 atomic_long_inc(&rsp->expedited_done_exit);
2374 2486
2375 put_online_cpus(); 2487 put_online_cpus();
2376} 2488}