summaryrefslogtreecommitdiffstats
path: root/kernel/sched
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2019-11-08 05:11:52 -0500
committerPeter Zijlstra <peterz@infradead.org>2019-11-08 16:34:14 -0500
commit6e2df0581f569038719cf2bc2b3baa3fcc83cab4 (patch)
tree91a337f916b868f9a73864949698dd27762d9a8e /kernel/sched
parente3b8b6a0d12cccf772113d6b5c1875192186fbd4 (diff)
sched: Fix pick_next_task() vs 'change' pattern race
Commit 67692435c411 ("sched: Rework pick_next_task() slow-path") inadvertly introduced a race because it changed a previously unexplored dependency between dropping the rq->lock and sched_class::put_prev_task(). The comments about dropping rq->lock, in for example newidle_balance(), only mentions the task being current and ->on_cpu being set. But when we look at the 'change' pattern (in for example sched_setnuma()): queued = task_on_rq_queued(p); /* p->on_rq == TASK_ON_RQ_QUEUED */ running = task_current(rq, p); /* rq->curr == p */ if (queued) dequeue_task(...); if (running) put_prev_task(...); /* change task properties */ if (queued) enqueue_task(...); if (running) set_next_task(...); It becomes obvious that if we do this after put_prev_task() has already been called on @p, things go sideways. This is exactly what the commit in question allows to happen when it does: prev->sched_class->put_prev_task(rq, prev, rf); if (!rq->nr_running) newidle_balance(rq, rf); The newidle_balance() call will drop rq->lock after we've called put_prev_task() and that allows the above 'change' pattern to interleave and mess up the state. Furthermore, it turns out we lost the RT-pull when we put the last DL task. Fix both problems by extracting the balancing from put_prev_task() and doing a multi-class balance() pass before put_prev_task(). Fixes: 67692435c411 ("sched: Rework pick_next_task() slow-path") Reported-by: Quentin Perret <qperret@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Tested-by: Quentin Perret <qperret@google.com> Tested-by: Valentin Schneider <valentin.schneider@arm.com>
Diffstat (limited to 'kernel/sched')
-rw-r--r--kernel/sched/core.c21
-rw-r--r--kernel/sched/deadline.c40
-rw-r--r--kernel/sched/fair.c15
-rw-r--r--kernel/sched/idle.c9
-rw-r--r--kernel/sched/rt.c37
-rw-r--r--kernel/sched/sched.h30
-rw-r--r--kernel/sched/stop_task.c18
7 files changed, 112 insertions, 58 deletions
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index afd4d8028771..0f2eb3629070 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -3929,13 +3929,22 @@ pick_next_task(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
3929 } 3929 }
3930 3930
3931restart: 3931restart:
3932#ifdef CONFIG_SMP
3932 /* 3933 /*
3933 * Ensure that we put DL/RT tasks before the pick loop, such that they 3934 * We must do the balancing pass before put_next_task(), such
3934 * can PULL higher prio tasks when we lower the RQ 'priority'. 3935 * that when we release the rq->lock the task is in the same
3936 * state as before we took rq->lock.
3937 *
3938 * We can terminate the balance pass as soon as we know there is
3939 * a runnable task of @class priority or higher.
3935 */ 3940 */
3936 prev->sched_class->put_prev_task(rq, prev, rf); 3941 for_class_range(class, prev->sched_class, &idle_sched_class) {
3937 if (!rq->nr_running) 3942 if (class->balance(rq, prev, rf))
3938 newidle_balance(rq, rf); 3943 break;
3944 }
3945#endif
3946
3947 put_prev_task(rq, prev);
3939 3948
3940 for_each_class(class) { 3949 for_each_class(class) {
3941 p = class->pick_next_task(rq, NULL, NULL); 3950 p = class->pick_next_task(rq, NULL, NULL);
@@ -6201,7 +6210,7 @@ static struct task_struct *__pick_migrate_task(struct rq *rq)
6201 for_each_class(class) { 6210 for_each_class(class) {
6202 next = class->pick_next_task(rq, NULL, NULL); 6211 next = class->pick_next_task(rq, NULL, NULL);
6203 if (next) { 6212 if (next) {
6204 next->sched_class->put_prev_task(rq, next, NULL); 6213 next->sched_class->put_prev_task(rq, next);
6205 return next; 6214 return next;
6206 } 6215 }
6207 } 6216 }
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 2dc48720f189..a8a08030a8f7 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -1691,6 +1691,22 @@ static void check_preempt_equal_dl(struct rq *rq, struct task_struct *p)
1691 resched_curr(rq); 1691 resched_curr(rq);
1692} 1692}
1693 1693
1694static int balance_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1695{
1696 if (!on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1697 /*
1698 * This is OK, because current is on_cpu, which avoids it being
1699 * picked for load-balance and preemption/IRQs are still
1700 * disabled avoiding further scheduler activity on it and we've
1701 * not yet started the picking loop.
1702 */
1703 rq_unpin_lock(rq, rf);
1704 pull_dl_task(rq);
1705 rq_repin_lock(rq, rf);
1706 }
1707
1708 return sched_stop_runnable(rq) || sched_dl_runnable(rq);
1709}
1694#endif /* CONFIG_SMP */ 1710#endif /* CONFIG_SMP */
1695 1711
1696/* 1712/*
@@ -1758,45 +1774,28 @@ static struct task_struct *
1758pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1774pick_next_task_dl(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1759{ 1775{
1760 struct sched_dl_entity *dl_se; 1776 struct sched_dl_entity *dl_se;
1777 struct dl_rq *dl_rq = &rq->dl;
1761 struct task_struct *p; 1778 struct task_struct *p;
1762 struct dl_rq *dl_rq;
1763 1779
1764 WARN_ON_ONCE(prev || rf); 1780 WARN_ON_ONCE(prev || rf);
1765 1781
1766 dl_rq = &rq->dl; 1782 if (!sched_dl_runnable(rq))
1767
1768 if (unlikely(!dl_rq->dl_nr_running))
1769 return NULL; 1783 return NULL;
1770 1784
1771 dl_se = pick_next_dl_entity(rq, dl_rq); 1785 dl_se = pick_next_dl_entity(rq, dl_rq);
1772 BUG_ON(!dl_se); 1786 BUG_ON(!dl_se);
1773
1774 p = dl_task_of(dl_se); 1787 p = dl_task_of(dl_se);
1775
1776 set_next_task_dl(rq, p); 1788 set_next_task_dl(rq, p);
1777
1778 return p; 1789 return p;
1779} 1790}
1780 1791
1781static void put_prev_task_dl(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1792static void put_prev_task_dl(struct rq *rq, struct task_struct *p)
1782{ 1793{
1783 update_curr_dl(rq); 1794 update_curr_dl(rq);
1784 1795
1785 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1); 1796 update_dl_rq_load_avg(rq_clock_pelt(rq), rq, 1);
1786 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1) 1797 if (on_dl_rq(&p->dl) && p->nr_cpus_allowed > 1)
1787 enqueue_pushable_dl_task(rq, p); 1798 enqueue_pushable_dl_task(rq, p);
1788
1789 if (rf && !on_dl_rq(&p->dl) && need_pull_dl_task(rq, p)) {
1790 /*
1791 * This is OK, because current is on_cpu, which avoids it being
1792 * picked for load-balance and preemption/IRQs are still
1793 * disabled avoiding further scheduler activity on it and we've
1794 * not yet started the picking loop.
1795 */
1796 rq_unpin_lock(rq, rf);
1797 pull_dl_task(rq);
1798 rq_repin_lock(rq, rf);
1799 }
1800} 1799}
1801 1800
1802/* 1801/*
@@ -2442,6 +2441,7 @@ const struct sched_class dl_sched_class = {
2442 .set_next_task = set_next_task_dl, 2441 .set_next_task = set_next_task_dl,
2443 2442
2444#ifdef CONFIG_SMP 2443#ifdef CONFIG_SMP
2444 .balance = balance_dl,
2445 .select_task_rq = select_task_rq_dl, 2445 .select_task_rq = select_task_rq_dl,
2446 .migrate_task_rq = migrate_task_rq_dl, 2446 .migrate_task_rq = migrate_task_rq_dl,
2447 .set_cpus_allowed = set_cpus_allowed_dl, 2447 .set_cpus_allowed = set_cpus_allowed_dl,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 682a754ea3e1..22a2fed29054 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -6570,6 +6570,15 @@ static void task_dead_fair(struct task_struct *p)
6570{ 6570{
6571 remove_entity_load_avg(&p->se); 6571 remove_entity_load_avg(&p->se);
6572} 6572}
6573
6574static int
6575balance_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
6576{
6577 if (rq->nr_running)
6578 return 1;
6579
6580 return newidle_balance(rq, rf) != 0;
6581}
6573#endif /* CONFIG_SMP */ 6582#endif /* CONFIG_SMP */
6574 6583
6575static unsigned long wakeup_gran(struct sched_entity *se) 6584static unsigned long wakeup_gran(struct sched_entity *se)
@@ -6746,7 +6755,7 @@ pick_next_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf
6746 int new_tasks; 6755 int new_tasks;
6747 6756
6748again: 6757again:
6749 if (!cfs_rq->nr_running) 6758 if (!sched_fair_runnable(rq))
6750 goto idle; 6759 goto idle;
6751 6760
6752#ifdef CONFIG_FAIR_GROUP_SCHED 6761#ifdef CONFIG_FAIR_GROUP_SCHED
@@ -6884,7 +6893,7 @@ idle:
6884/* 6893/*
6885 * Account for a descheduled task: 6894 * Account for a descheduled task:
6886 */ 6895 */
6887static void put_prev_task_fair(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 6896static void put_prev_task_fair(struct rq *rq, struct task_struct *prev)
6888{ 6897{
6889 struct sched_entity *se = &prev->se; 6898 struct sched_entity *se = &prev->se;
6890 struct cfs_rq *cfs_rq; 6899 struct cfs_rq *cfs_rq;
@@ -10414,11 +10423,11 @@ const struct sched_class fair_sched_class = {
10414 .check_preempt_curr = check_preempt_wakeup, 10423 .check_preempt_curr = check_preempt_wakeup,
10415 10424
10416 .pick_next_task = pick_next_task_fair, 10425 .pick_next_task = pick_next_task_fair,
10417
10418 .put_prev_task = put_prev_task_fair, 10426 .put_prev_task = put_prev_task_fair,
10419 .set_next_task = set_next_task_fair, 10427 .set_next_task = set_next_task_fair,
10420 10428
10421#ifdef CONFIG_SMP 10429#ifdef CONFIG_SMP
10430 .balance = balance_fair,
10422 .select_task_rq = select_task_rq_fair, 10431 .select_task_rq = select_task_rq_fair,
10423 .migrate_task_rq = migrate_task_rq_fair, 10432 .migrate_task_rq = migrate_task_rq_fair,
10424 10433
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index 8dad5aa600ea..f65ef1e2f204 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -365,6 +365,12 @@ select_task_rq_idle(struct task_struct *p, int cpu, int sd_flag, int flags)
365{ 365{
366 return task_cpu(p); /* IDLE tasks as never migrated */ 366 return task_cpu(p); /* IDLE tasks as never migrated */
367} 367}
368
369static int
370balance_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
371{
372 return WARN_ON_ONCE(1);
373}
368#endif 374#endif
369 375
370/* 376/*
@@ -375,7 +381,7 @@ static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int fl
375 resched_curr(rq); 381 resched_curr(rq);
376} 382}
377 383
378static void put_prev_task_idle(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 384static void put_prev_task_idle(struct rq *rq, struct task_struct *prev)
379{ 385{
380} 386}
381 387
@@ -460,6 +466,7 @@ const struct sched_class idle_sched_class = {
460 .set_next_task = set_next_task_idle, 466 .set_next_task = set_next_task_idle,
461 467
462#ifdef CONFIG_SMP 468#ifdef CONFIG_SMP
469 .balance = balance_idle,
463 .select_task_rq = select_task_rq_idle, 470 .select_task_rq = select_task_rq_idle,
464 .set_cpus_allowed = set_cpus_allowed_common, 471 .set_cpus_allowed = set_cpus_allowed_common,
465#endif 472#endif
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index ebaa4e619684..9b8adc01be3d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1469,6 +1469,22 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1469 resched_curr(rq); 1469 resched_curr(rq);
1470} 1470}
1471 1471
1472static int balance_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf)
1473{
1474 if (!on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1475 /*
1476 * This is OK, because current is on_cpu, which avoids it being
1477 * picked for load-balance and preemption/IRQs are still
1478 * disabled avoiding further scheduler activity on it and we've
1479 * not yet started the picking loop.
1480 */
1481 rq_unpin_lock(rq, rf);
1482 pull_rt_task(rq);
1483 rq_repin_lock(rq, rf);
1484 }
1485
1486 return sched_stop_runnable(rq) || sched_dl_runnable(rq) || sched_rt_runnable(rq);
1487}
1472#endif /* CONFIG_SMP */ 1488#endif /* CONFIG_SMP */
1473 1489
1474/* 1490/*
@@ -1552,21 +1568,18 @@ static struct task_struct *
1552pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 1568pick_next_task_rt(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
1553{ 1569{
1554 struct task_struct *p; 1570 struct task_struct *p;
1555 struct rt_rq *rt_rq = &rq->rt;
1556 1571
1557 WARN_ON_ONCE(prev || rf); 1572 WARN_ON_ONCE(prev || rf);
1558 1573
1559 if (!rt_rq->rt_queued) 1574 if (!sched_rt_runnable(rq))
1560 return NULL; 1575 return NULL;
1561 1576
1562 p = _pick_next_task_rt(rq); 1577 p = _pick_next_task_rt(rq);
1563
1564 set_next_task_rt(rq, p); 1578 set_next_task_rt(rq, p);
1565
1566 return p; 1579 return p;
1567} 1580}
1568 1581
1569static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_flags *rf) 1582static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1570{ 1583{
1571 update_curr_rt(rq); 1584 update_curr_rt(rq);
1572 1585
@@ -1578,18 +1591,6 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p, struct rq_fla
1578 */ 1591 */
1579 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1) 1592 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1580 enqueue_pushable_task(rq, p); 1593 enqueue_pushable_task(rq, p);
1581
1582 if (rf && !on_rt_rq(&p->rt) && need_pull_rt_task(rq, p)) {
1583 /*
1584 * This is OK, because current is on_cpu, which avoids it being
1585 * picked for load-balance and preemption/IRQs are still
1586 * disabled avoiding further scheduler activity on it and we've
1587 * not yet started the picking loop.
1588 */
1589 rq_unpin_lock(rq, rf);
1590 pull_rt_task(rq);
1591 rq_repin_lock(rq, rf);
1592 }
1593} 1594}
1594 1595
1595#ifdef CONFIG_SMP 1596#ifdef CONFIG_SMP
@@ -2366,8 +2367,8 @@ const struct sched_class rt_sched_class = {
2366 .set_next_task = set_next_task_rt, 2367 .set_next_task = set_next_task_rt,
2367 2368
2368#ifdef CONFIG_SMP 2369#ifdef CONFIG_SMP
2370 .balance = balance_rt,
2369 .select_task_rq = select_task_rq_rt, 2371 .select_task_rq = select_task_rq_rt,
2370
2371 .set_cpus_allowed = set_cpus_allowed_common, 2372 .set_cpus_allowed = set_cpus_allowed_common,
2372 .rq_online = rq_online_rt, 2373 .rq_online = rq_online_rt,
2373 .rq_offline = rq_offline_rt, 2374 .rq_offline = rq_offline_rt,
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 0db2c1b3361e..c8870c5bd7df 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1727,10 +1727,11 @@ struct sched_class {
1727 struct task_struct * (*pick_next_task)(struct rq *rq, 1727 struct task_struct * (*pick_next_task)(struct rq *rq,
1728 struct task_struct *prev, 1728 struct task_struct *prev,
1729 struct rq_flags *rf); 1729 struct rq_flags *rf);
1730 void (*put_prev_task)(struct rq *rq, struct task_struct *p, struct rq_flags *rf); 1730 void (*put_prev_task)(struct rq *rq, struct task_struct *p);
1731 void (*set_next_task)(struct rq *rq, struct task_struct *p); 1731 void (*set_next_task)(struct rq *rq, struct task_struct *p);
1732 1732
1733#ifdef CONFIG_SMP 1733#ifdef CONFIG_SMP
1734 int (*balance)(struct rq *rq, struct task_struct *prev, struct rq_flags *rf);
1734 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags); 1735 int (*select_task_rq)(struct task_struct *p, int task_cpu, int sd_flag, int flags);
1735 void (*migrate_task_rq)(struct task_struct *p, int new_cpu); 1736 void (*migrate_task_rq)(struct task_struct *p, int new_cpu);
1736 1737
@@ -1773,7 +1774,7 @@ struct sched_class {
1773static inline void put_prev_task(struct rq *rq, struct task_struct *prev) 1774static inline void put_prev_task(struct rq *rq, struct task_struct *prev)
1774{ 1775{
1775 WARN_ON_ONCE(rq->curr != prev); 1776 WARN_ON_ONCE(rq->curr != prev);
1776 prev->sched_class->put_prev_task(rq, prev, NULL); 1777 prev->sched_class->put_prev_task(rq, prev);
1777} 1778}
1778 1779
1779static inline void set_next_task(struct rq *rq, struct task_struct *next) 1780static inline void set_next_task(struct rq *rq, struct task_struct *next)
@@ -1787,8 +1788,12 @@ static inline void set_next_task(struct rq *rq, struct task_struct *next)
1787#else 1788#else
1788#define sched_class_highest (&dl_sched_class) 1789#define sched_class_highest (&dl_sched_class)
1789#endif 1790#endif
1791
1792#define for_class_range(class, _from, _to) \
1793 for (class = (_from); class != (_to); class = class->next)
1794
1790#define for_each_class(class) \ 1795#define for_each_class(class) \
1791 for (class = sched_class_highest; class; class = class->next) 1796 for_class_range(class, sched_class_highest, NULL)
1792 1797
1793extern const struct sched_class stop_sched_class; 1798extern const struct sched_class stop_sched_class;
1794extern const struct sched_class dl_sched_class; 1799extern const struct sched_class dl_sched_class;
@@ -1796,6 +1801,25 @@ extern const struct sched_class rt_sched_class;
1796extern const struct sched_class fair_sched_class; 1801extern const struct sched_class fair_sched_class;
1797extern const struct sched_class idle_sched_class; 1802extern const struct sched_class idle_sched_class;
1798 1803
1804static inline bool sched_stop_runnable(struct rq *rq)
1805{
1806 return rq->stop && task_on_rq_queued(rq->stop);
1807}
1808
1809static inline bool sched_dl_runnable(struct rq *rq)
1810{
1811 return rq->dl.dl_nr_running > 0;
1812}
1813
1814static inline bool sched_rt_runnable(struct rq *rq)
1815{
1816 return rq->rt.rt_queued > 0;
1817}
1818
1819static inline bool sched_fair_runnable(struct rq *rq)
1820{
1821 return rq->cfs.nr_running > 0;
1822}
1799 1823
1800#ifdef CONFIG_SMP 1824#ifdef CONFIG_SMP
1801 1825
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 7e1cee4e65b2..c0640739e05e 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -15,6 +15,12 @@ select_task_rq_stop(struct task_struct *p, int cpu, int sd_flag, int flags)
15{ 15{
16 return task_cpu(p); /* stop tasks as never migrate */ 16 return task_cpu(p); /* stop tasks as never migrate */
17} 17}
18
19static int
20balance_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
21{
22 return sched_stop_runnable(rq);
23}
18#endif /* CONFIG_SMP */ 24#endif /* CONFIG_SMP */
19 25
20static void 26static void
@@ -31,16 +37,13 @@ static void set_next_task_stop(struct rq *rq, struct task_struct *stop)
31static struct task_struct * 37static struct task_struct *
32pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 38pick_next_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf)
33{ 39{
34 struct task_struct *stop = rq->stop;
35
36 WARN_ON_ONCE(prev || rf); 40 WARN_ON_ONCE(prev || rf);
37 41
38 if (!stop || !task_on_rq_queued(stop)) 42 if (!sched_stop_runnable(rq))
39 return NULL; 43 return NULL;
40 44
41 set_next_task_stop(rq, stop); 45 set_next_task_stop(rq, rq->stop);
42 46 return rq->stop;
43 return stop;
44} 47}
45 48
46static void 49static void
@@ -60,7 +63,7 @@ static void yield_task_stop(struct rq *rq)
60 BUG(); /* the stop task should never yield, its pointless. */ 63 BUG(); /* the stop task should never yield, its pointless. */
61} 64}
62 65
63static void put_prev_task_stop(struct rq *rq, struct task_struct *prev, struct rq_flags *rf) 66static void put_prev_task_stop(struct rq *rq, struct task_struct *prev)
64{ 67{
65 struct task_struct *curr = rq->curr; 68 struct task_struct *curr = rq->curr;
66 u64 delta_exec; 69 u64 delta_exec;
@@ -129,6 +132,7 @@ const struct sched_class stop_sched_class = {
129 .set_next_task = set_next_task_stop, 132 .set_next_task = set_next_task_stop,
130 133
131#ifdef CONFIG_SMP 134#ifdef CONFIG_SMP
135 .balance = balance_stop,
132 .select_task_rq = select_task_rq_stop, 136 .select_task_rq = select_task_rq_stop,
133 .set_cpus_allowed = set_cpus_allowed_common, 137 .set_cpus_allowed = set_cpus_allowed_common,
134#endif 138#endif