diff options
author | David S. Miller <davem@davemloft.net> | 2014-11-21 22:28:24 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2014-11-21 22:28:24 -0500 |
commit | 1459143386c5d868c87903b8d433a52cffcf3e66 (patch) | |
tree | e7878a550aaf6a3af5e84f4258bbcc3bbdd20fef /kernel | |
parent | 53b15ef3c2a6bac8e3d9bb58c5689d731ed9593b (diff) | |
parent | 8a84e01e147f44111988f9d8ccd2eaa30215a0f2 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Conflicts:
drivers/net/ieee802154/fakehard.c
A bug fix went into 'net' for ieee802154/fakehard.c, which is removed
in 'net-next'.
Add build fix into the merge from Stephen Rothwell in openvswitch, the
logging macros take a new initial 'log' argument, a new call was added
in 'net' so when we merge that in here we have to explicitly add the
new 'log' arg to it else the build fails.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 8 | ||||
-rw-r--r-- | kernel/power/suspend.c | 4 | ||||
-rw-r--r-- | kernel/sched/core.c | 63 | ||||
-rw-r--r-- | kernel/sched/deadline.c | 2 | ||||
-rw-r--r-- | kernel/sched/fair.c | 14 | ||||
-rw-r--r-- | kernel/sched/rt.c | 2 | ||||
-rw-r--r-- | kernel/sched/sched.h | 2 | ||||
-rw-r--r-- | kernel/time/posix-cpu-timers.c | 2 |
8 files changed, 49 insertions, 48 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2b02c9fda790..1cd5eef1fcdd 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group | |||
1562 | 1562 | ||
1563 | if (!task) { | 1563 | if (!task) { |
1564 | /* | 1564 | /* |
1565 | * Per cpu events are removed via an smp call and | 1565 | * Per cpu events are removed via an smp call. The removal can |
1566 | * the removal is always successful. | 1566 | * fail if the CPU is currently offline, but in that case we |
1567 | * already called __perf_remove_from_context from | ||
1568 | * perf_event_exit_cpu. | ||
1567 | */ | 1569 | */ |
1568 | cpu_function_call(event->cpu, __perf_remove_from_context, &re); | 1570 | cpu_function_call(event->cpu, __perf_remove_from_context, &re); |
1569 | return; | 1571 | return; |
@@ -8117,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu) | |||
8117 | 8119 | ||
8118 | static void __perf_event_exit_context(void *__info) | 8120 | static void __perf_event_exit_context(void *__info) |
8119 | { | 8121 | { |
8120 | struct remove_event re = { .detach_group = false }; | 8122 | struct remove_event re = { .detach_group = true }; |
8121 | struct perf_event_context *ctx = __info; | 8123 | struct perf_event_context *ctx = __info; |
8122 | 8124 | ||
8123 | perf_pmu_rotate_stop(ctx->pmu); | 8125 | perf_pmu_rotate_stop(ctx->pmu); |
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c index 4ca9a33ff620..c347e3ce3a55 100644 --- a/kernel/power/suspend.c +++ b/kernel/power/suspend.c | |||
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state) | |||
146 | 146 | ||
147 | static int platform_suspend_prepare_late(suspend_state_t state) | 147 | static int platform_suspend_prepare_late(suspend_state_t state) |
148 | { | 148 | { |
149 | return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? | 149 | return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ? |
150 | freeze_ops->prepare() : 0; | 150 | freeze_ops->prepare() : 0; |
151 | } | 151 | } |
152 | 152 | ||
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state) | |||
164 | 164 | ||
165 | static void platform_resume_early(suspend_state_t state) | 165 | static void platform_resume_early(suspend_state_t state) |
166 | { | 166 | { |
167 | if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) | 167 | if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore) |
168 | freeze_ops->restore(); | 168 | freeze_ops->restore(); |
169 | } | 169 | } |
170 | 170 | ||
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 240157c13ddc..24beb9bb4c3e 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat); | |||
2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); | 2475 | EXPORT_PER_CPU_SYMBOL(kernel_cpustat); |
2476 | 2476 | ||
2477 | /* | 2477 | /* |
2478 | * Return any ns on the sched_clock that have not yet been accounted in | ||
2479 | * @p in case that task is currently running. | ||
2480 | * | ||
2481 | * Called with task_rq_lock() held on @rq. | ||
2482 | */ | ||
2483 | static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq) | ||
2484 | { | ||
2485 | u64 ns = 0; | ||
2486 | |||
2487 | /* | ||
2488 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2489 | * project cycles that may never be accounted to this | ||
2490 | * thread, breaking clock_gettime(). | ||
2491 | */ | ||
2492 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2493 | update_rq_clock(rq); | ||
2494 | ns = rq_clock_task(rq) - p->se.exec_start; | ||
2495 | if ((s64)ns < 0) | ||
2496 | ns = 0; | ||
2497 | } | ||
2498 | |||
2499 | return ns; | ||
2500 | } | ||
2501 | |||
2502 | unsigned long long task_delta_exec(struct task_struct *p) | ||
2503 | { | ||
2504 | unsigned long flags; | ||
2505 | struct rq *rq; | ||
2506 | u64 ns = 0; | ||
2507 | |||
2508 | rq = task_rq_lock(p, &flags); | ||
2509 | ns = do_task_delta_exec(p, rq); | ||
2510 | task_rq_unlock(rq, p, &flags); | ||
2511 | |||
2512 | return ns; | ||
2513 | } | ||
2514 | |||
2515 | /* | ||
2516 | * Return accounted runtime for the task. | 2478 | * Return accounted runtime for the task. |
2517 | * In case the task is currently running, return the runtime plus current's | 2479 | * In case the task is currently running, return the runtime plus current's |
2518 | * pending runtime that have not been accounted yet. | 2480 | * pending runtime that have not been accounted yet. |
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2521 | { | 2483 | { |
2522 | unsigned long flags; | 2484 | unsigned long flags; |
2523 | struct rq *rq; | 2485 | struct rq *rq; |
2524 | u64 ns = 0; | 2486 | u64 ns; |
2525 | 2487 | ||
2526 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) | 2488 | #if defined(CONFIG_64BIT) && defined(CONFIG_SMP) |
2527 | /* | 2489 | /* |
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
2540 | #endif | 2502 | #endif |
2541 | 2503 | ||
2542 | rq = task_rq_lock(p, &flags); | 2504 | rq = task_rq_lock(p, &flags); |
2543 | ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); | 2505 | /* |
2506 | * Must be ->curr _and_ ->on_rq. If dequeued, we would | ||
2507 | * project cycles that may never be accounted to this | ||
2508 | * thread, breaking clock_gettime(). | ||
2509 | */ | ||
2510 | if (task_current(rq, p) && task_on_rq_queued(p)) { | ||
2511 | update_rq_clock(rq); | ||
2512 | p->sched_class->update_curr(rq); | ||
2513 | } | ||
2514 | ns = p->se.sum_exec_runtime; | ||
2544 | task_rq_unlock(rq, p, &flags); | 2515 | task_rq_unlock(rq, p, &flags); |
2545 | 2516 | ||
2546 | return ns; | 2517 | return ns; |
@@ -6368,6 +6339,10 @@ static void sched_init_numa(void) | |||
6368 | if (!sched_debug()) | 6339 | if (!sched_debug()) |
6369 | break; | 6340 | break; |
6370 | } | 6341 | } |
6342 | |||
6343 | if (!level) | ||
6344 | return; | ||
6345 | |||
6371 | /* | 6346 | /* |
6372 | * 'level' contains the number of unique distances, excluding the | 6347 | * 'level' contains the number of unique distances, excluding the |
6373 | * identity distance node_distance(i,i). | 6348 | * identity distance node_distance(i,i). |
@@ -7444,8 +7419,12 @@ void sched_move_task(struct task_struct *tsk) | |||
7444 | if (unlikely(running)) | 7419 | if (unlikely(running)) |
7445 | put_prev_task(rq, tsk); | 7420 | put_prev_task(rq, tsk); |
7446 | 7421 | ||
7447 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, | 7422 | /* |
7448 | lockdep_is_held(&tsk->sighand->siglock)), | 7423 | * All callers are synchronized by task_rq_lock(); we do not use RCU |
7424 | * which is pointless here. Thus, we pass "true" to task_css_check() | ||
7425 | * to prevent lockdep warnings. | ||
7426 | */ | ||
7427 | tg = container_of(task_css_check(tsk, cpu_cgrp_id, true), | ||
7449 | struct task_group, css); | 7428 | struct task_group, css); |
7450 | tg = autogroup_task_group(tsk, tg); | 7429 | tg = autogroup_task_group(tsk, tg); |
7451 | tsk->sched_task_group = tg; | 7430 | tsk->sched_task_group = tg; |
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c index 5285332392d5..28fa9d9e9201 100644 --- a/kernel/sched/deadline.c +++ b/kernel/sched/deadline.c | |||
@@ -1701,4 +1701,6 @@ const struct sched_class dl_sched_class = { | |||
1701 | .prio_changed = prio_changed_dl, | 1701 | .prio_changed = prio_changed_dl, |
1702 | .switched_from = switched_from_dl, | 1702 | .switched_from = switched_from_dl, |
1703 | .switched_to = switched_to_dl, | 1703 | .switched_to = switched_to_dl, |
1704 | |||
1705 | .update_curr = update_curr_dl, | ||
1704 | }; | 1706 | }; |
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c index 34baa60f8a7b..ef2b104b254c 100644 --- a/kernel/sched/fair.c +++ b/kernel/sched/fair.c | |||
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq) | |||
726 | account_cfs_rq_runtime(cfs_rq, delta_exec); | 726 | account_cfs_rq_runtime(cfs_rq, delta_exec); |
727 | } | 727 | } |
728 | 728 | ||
729 | static void update_curr_fair(struct rq *rq) | ||
730 | { | ||
731 | update_curr(cfs_rq_of(&rq->curr->se)); | ||
732 | } | ||
733 | |||
729 | static inline void | 734 | static inline void |
730 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) | 735 | update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) |
731 | { | 736 | { |
@@ -1180,6 +1185,13 @@ static void task_numa_compare(struct task_numa_env *env, | |||
1180 | raw_spin_unlock_irq(&dst_rq->lock); | 1185 | raw_spin_unlock_irq(&dst_rq->lock); |
1181 | 1186 | ||
1182 | /* | 1187 | /* |
1188 | * Because we have preemption enabled we can get migrated around and | ||
1189 | * end try selecting ourselves (current == env->p) as a swap candidate. | ||
1190 | */ | ||
1191 | if (cur == env->p) | ||
1192 | goto unlock; | ||
1193 | |||
1194 | /* | ||
1183 | * "imp" is the fault differential for the source task between the | 1195 | * "imp" is the fault differential for the source task between the |
1184 | * source and destination node. Calculate the total differential for | 1196 | * source and destination node. Calculate the total differential for |
1185 | * the source task and potential destination task. The more negative | 1197 | * the source task and potential destination task. The more negative |
@@ -7949,6 +7961,8 @@ const struct sched_class fair_sched_class = { | |||
7949 | 7961 | ||
7950 | .get_rr_interval = get_rr_interval_fair, | 7962 | .get_rr_interval = get_rr_interval_fair, |
7951 | 7963 | ||
7964 | .update_curr = update_curr_fair, | ||
7965 | |||
7952 | #ifdef CONFIG_FAIR_GROUP_SCHED | 7966 | #ifdef CONFIG_FAIR_GROUP_SCHED |
7953 | .task_move_group = task_move_group_fair, | 7967 | .task_move_group = task_move_group_fair, |
7954 | #endif | 7968 | #endif |
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c index d024e6ce30ba..20bca398084a 100644 --- a/kernel/sched/rt.c +++ b/kernel/sched/rt.c | |||
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = { | |||
2128 | 2128 | ||
2129 | .prio_changed = prio_changed_rt, | 2129 | .prio_changed = prio_changed_rt, |
2130 | .switched_to = switched_to_rt, | 2130 | .switched_to = switched_to_rt, |
2131 | |||
2132 | .update_curr = update_curr_rt, | ||
2131 | }; | 2133 | }; |
2132 | 2134 | ||
2133 | #ifdef CONFIG_SCHED_DEBUG | 2135 | #ifdef CONFIG_SCHED_DEBUG |
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h index 24156c8434d1..2df8ef067cc5 100644 --- a/kernel/sched/sched.h +++ b/kernel/sched/sched.h | |||
@@ -1135,6 +1135,8 @@ struct sched_class { | |||
1135 | unsigned int (*get_rr_interval) (struct rq *rq, | 1135 | unsigned int (*get_rr_interval) (struct rq *rq, |
1136 | struct task_struct *task); | 1136 | struct task_struct *task); |
1137 | 1137 | ||
1138 | void (*update_curr) (struct rq *rq); | ||
1139 | |||
1138 | #ifdef CONFIG_FAIR_GROUP_SCHED | 1140 | #ifdef CONFIG_FAIR_GROUP_SCHED |
1139 | void (*task_move_group) (struct task_struct *p, int on_rq); | 1141 | void (*task_move_group) (struct task_struct *p, int on_rq); |
1140 | #endif | 1142 | #endif |
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c index 492b986195d5..a16b67859e2a 100644 --- a/kernel/time/posix-cpu-timers.c +++ b/kernel/time/posix-cpu-timers.c | |||
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock, | |||
553 | *sample = cputime_to_expires(cputime.utime); | 553 | *sample = cputime_to_expires(cputime.utime); |
554 | break; | 554 | break; |
555 | case CPUCLOCK_SCHED: | 555 | case CPUCLOCK_SCHED: |
556 | *sample = cputime.sum_exec_runtime + task_delta_exec(p); | 556 | *sample = cputime.sum_exec_runtime; |
557 | break; | 557 | break; |
558 | } | 558 | } |
559 | return 0; | 559 | return 0; |