diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/cpuset.c | 12 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 7 | ||||
| -rw-r--r-- | kernel/sched.c | 13 | ||||
| -rw-r--r-- | kernel/sched_debug.c | 5 | ||||
| -rw-r--r-- | kernel/sched_stats.h | 15 |
5 files changed, 34 insertions, 18 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 3e00526f52e..81fc6791a29 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
| @@ -587,7 +587,6 @@ static int generate_sched_domains(cpumask_t **domains, | |||
| 587 | int ndoms; /* number of sched domains in result */ | 587 | int ndoms; /* number of sched domains in result */ |
| 588 | int nslot; /* next empty doms[] cpumask_t slot */ | 588 | int nslot; /* next empty doms[] cpumask_t slot */ |
| 589 | 589 | ||
| 590 | ndoms = 0; | ||
| 591 | doms = NULL; | 590 | doms = NULL; |
| 592 | dattr = NULL; | 591 | dattr = NULL; |
| 593 | csa = NULL; | 592 | csa = NULL; |
| @@ -674,10 +673,8 @@ restart: | |||
| 674 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. | 673 | * Convert <csn, csa> to <ndoms, doms> and populate cpu masks. |
| 675 | */ | 674 | */ |
| 676 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); | 675 | doms = kmalloc(ndoms * sizeof(cpumask_t), GFP_KERNEL); |
| 677 | if (!doms) { | 676 | if (!doms) |
| 678 | ndoms = 0; | ||
| 679 | goto done; | 677 | goto done; |
| 680 | } | ||
| 681 | 678 | ||
| 682 | /* | 679 | /* |
| 683 | * The rest of the code, including the scheduler, can deal with | 680 | * The rest of the code, including the scheduler, can deal with |
| @@ -732,6 +729,13 @@ restart: | |||
| 732 | done: | 729 | done: |
| 733 | kfree(csa); | 730 | kfree(csa); |
| 734 | 731 | ||
| 732 | /* | ||
| 733 | * Fallback to the default domain if kmalloc() failed. | ||
| 734 | * See comments in partition_sched_domains(). | ||
| 735 | */ | ||
| 736 | if (doms == NULL) | ||
| 737 | ndoms = 1; | ||
| 738 | |||
| 735 | *domains = doms; | 739 | *domains = doms; |
| 736 | *attributes = dattr; | 740 | *attributes = dattr; |
| 737 | return ndoms; | 741 | return ndoms; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 153dcb2639c..895337b16a2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -1308,9 +1308,10 @@ static inline int task_cputime_expired(const struct task_cputime *sample, | |||
| 1308 | */ | 1308 | */ |
| 1309 | static inline int fastpath_timer_check(struct task_struct *tsk) | 1309 | static inline int fastpath_timer_check(struct task_struct *tsk) |
| 1310 | { | 1310 | { |
| 1311 | struct signal_struct *sig = tsk->signal; | 1311 | struct signal_struct *sig; |
| 1312 | 1312 | ||
| 1313 | if (unlikely(!sig)) | 1313 | /* tsk == current, ensure it is safe to use ->signal/sighand */ |
| 1314 | if (unlikely(tsk->exit_state)) | ||
| 1314 | return 0; | 1315 | return 0; |
| 1315 | 1316 | ||
| 1316 | if (!task_cputime_zero(&tsk->cputime_expires)) { | 1317 | if (!task_cputime_zero(&tsk->cputime_expires)) { |
| @@ -1323,6 +1324,8 @@ static inline int fastpath_timer_check(struct task_struct *tsk) | |||
| 1323 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) | 1324 | if (task_cputime_expired(&task_sample, &tsk->cputime_expires)) |
| 1324 | return 1; | 1325 | return 1; |
| 1325 | } | 1326 | } |
| 1327 | |||
| 1328 | sig = tsk->signal; | ||
| 1326 | if (!task_cputime_zero(&sig->cputime_expires)) { | 1329 | if (!task_cputime_zero(&sig->cputime_expires)) { |
| 1327 | struct task_cputime group_sample; | 1330 | struct task_cputime group_sample; |
| 1328 | 1331 | ||
diff --git a/kernel/sched.c b/kernel/sched.c index c94baf2969e..9b1e79371c2 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -7789,13 +7789,14 @@ static int dattrs_equal(struct sched_domain_attr *cur, int idx_cur, | |||
| 7789 | * | 7789 | * |
| 7790 | * The passed in 'doms_new' should be kmalloc'd. This routine takes | 7790 | * The passed in 'doms_new' should be kmalloc'd. This routine takes |
| 7791 | * ownership of it and will kfree it when done with it. If the caller | 7791 | * ownership of it and will kfree it when done with it. If the caller |
| 7792 | * failed the kmalloc call, then it can pass in doms_new == NULL, | 7792 | * failed the kmalloc call, then it can pass in doms_new == NULL && |
| 7793 | * and partition_sched_domains() will fallback to the single partition | 7793 | * ndoms_new == 1, and partition_sched_domains() will fallback to |
| 7794 | * 'fallback_doms', it also forces the domains to be rebuilt. | 7794 | * the single partition 'fallback_doms', it also forces the domains |
| 7795 | * to be rebuilt. | ||
| 7795 | * | 7796 | * |
| 7796 | * If doms_new==NULL it will be replaced with cpu_online_map. | 7797 | * If doms_new == NULL it will be replaced with cpu_online_map. |
| 7797 | * ndoms_new==0 is a special case for destroying existing domains. | 7798 | * ndoms_new == 0 is a special case for destroying existing domains, |
| 7798 | * It will not create the default domain. | 7799 | * and it will not create the default domain. |
| 7799 | * | 7800 | * |
| 7800 | * Call with hotplug lock held | 7801 | * Call with hotplug lock held |
| 7801 | */ | 7802 | */ |
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c index 48ecc51e770..26ed8e3d1c1 100644 --- a/kernel/sched_debug.c +++ b/kernel/sched_debug.c | |||
| @@ -423,10 +423,11 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m) | |||
| 423 | #undef __P | 423 | #undef __P |
| 424 | 424 | ||
| 425 | { | 425 | { |
| 426 | unsigned int this_cpu = raw_smp_processor_id(); | ||
| 426 | u64 t0, t1; | 427 | u64 t0, t1; |
| 427 | 428 | ||
| 428 | t0 = sched_clock(); | 429 | t0 = cpu_clock(this_cpu); |
| 429 | t1 = sched_clock(); | 430 | t1 = cpu_clock(this_cpu); |
| 430 | SEQ_printf(m, "%-35s:%21Ld\n", | 431 | SEQ_printf(m, "%-35s:%21Ld\n", |
| 431 | "clock-delta", (long long)(t1-t0)); | 432 | "clock-delta", (long long)(t1-t0)); |
| 432 | } | 433 | } |
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h index ee71bec1da6..7dbf72a2b02 100644 --- a/kernel/sched_stats.h +++ b/kernel/sched_stats.h | |||
| @@ -298,9 +298,11 @@ static inline void account_group_user_time(struct task_struct *tsk, | |||
| 298 | { | 298 | { |
| 299 | struct signal_struct *sig; | 299 | struct signal_struct *sig; |
| 300 | 300 | ||
| 301 | sig = tsk->signal; | 301 | /* tsk == current, ensure it is safe to use ->signal */ |
| 302 | if (unlikely(!sig)) | 302 | if (unlikely(tsk->exit_state)) |
| 303 | return; | 303 | return; |
| 304 | |||
| 305 | sig = tsk->signal; | ||
| 304 | if (sig->cputime.totals) { | 306 | if (sig->cputime.totals) { |
| 305 | struct task_cputime *times; | 307 | struct task_cputime *times; |
| 306 | 308 | ||
| @@ -325,9 +327,11 @@ static inline void account_group_system_time(struct task_struct *tsk, | |||
| 325 | { | 327 | { |
| 326 | struct signal_struct *sig; | 328 | struct signal_struct *sig; |
| 327 | 329 | ||
| 328 | sig = tsk->signal; | 330 | /* tsk == current, ensure it is safe to use ->signal */ |
| 329 | if (unlikely(!sig)) | 331 | if (unlikely(tsk->exit_state)) |
| 330 | return; | 332 | return; |
| 333 | |||
| 334 | sig = tsk->signal; | ||
| 331 | if (sig->cputime.totals) { | 335 | if (sig->cputime.totals) { |
| 332 | struct task_cputime *times; | 336 | struct task_cputime *times; |
| 333 | 337 | ||
| @@ -353,8 +357,11 @@ static inline void account_group_exec_runtime(struct task_struct *tsk, | |||
| 353 | struct signal_struct *sig; | 357 | struct signal_struct *sig; |
| 354 | 358 | ||
| 355 | sig = tsk->signal; | 359 | sig = tsk->signal; |
| 360 | /* see __exit_signal()->task_rq_unlock_wait() */ | ||
| 361 | barrier(); | ||
| 356 | if (unlikely(!sig)) | 362 | if (unlikely(!sig)) |
| 357 | return; | 363 | return; |
| 364 | |||
| 358 | if (sig->cputime.totals) { | 365 | if (sig->cputime.totals) { |
| 359 | struct task_cputime *times; | 366 | struct task_cputime *times; |
| 360 | 367 | ||
