diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/irq/chip.c | 2 | ||||
| -rw-r--r-- | kernel/irq/irqdomain.c | 6 | ||||
| -rw-r--r-- | kernel/posix-cpu-timers.c | 5 | ||||
| -rw-r--r-- | kernel/ptrace.c | 23 | ||||
| -rw-r--r-- | kernel/resource.c | 7 | ||||
| -rw-r--r-- | kernel/sched.c | 26 | ||||
| -rw-r--r-- | kernel/sched_rt.c | 4 | ||||
| -rw-r--r-- | kernel/taskstats.c | 1 | ||||
| -rw-r--r-- | kernel/tsacct.c | 15 | ||||
| -rw-r--r-- | kernel/workqueue.c | 7 |
10 files changed, 43 insertions, 53 deletions
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c index d5a3009da71a..dc5114b4c16c 100644 --- a/kernel/irq/chip.c +++ b/kernel/irq/chip.c | |||
| @@ -178,7 +178,7 @@ void irq_shutdown(struct irq_desc *desc) | |||
| 178 | desc->depth = 1; | 178 | desc->depth = 1; |
| 179 | if (desc->irq_data.chip->irq_shutdown) | 179 | if (desc->irq_data.chip->irq_shutdown) |
| 180 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); | 180 | desc->irq_data.chip->irq_shutdown(&desc->irq_data); |
| 181 | if (desc->irq_data.chip->irq_disable) | 181 | else if (desc->irq_data.chip->irq_disable) |
| 182 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 182 | desc->irq_data.chip->irq_disable(&desc->irq_data); |
| 183 | else | 183 | else |
| 184 | desc->irq_data.chip->irq_mask(&desc->irq_data); | 184 | desc->irq_data.chip->irq_mask(&desc->irq_data); |
diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c index d5828da3fd38..b57a3776de44 100644 --- a/kernel/irq/irqdomain.c +++ b/kernel/irq/irqdomain.c | |||
| @@ -29,7 +29,11 @@ void irq_domain_add(struct irq_domain *domain) | |||
| 29 | */ | 29 | */ |
| 30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { | 30 | for (hwirq = 0; hwirq < domain->nr_irq; hwirq++) { |
| 31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); | 31 | d = irq_get_irq_data(irq_domain_to_irq(domain, hwirq)); |
| 32 | if (d || d->domain) { | 32 | if (!d) { |
| 33 | WARN(1, "error: assigning domain to non existant irq_desc"); | ||
| 34 | return; | ||
| 35 | } | ||
| 36 | if (d->domain) { | ||
| 33 | /* things are broken; just report, don't clean up */ | 37 | /* things are broken; just report, don't clean up */ |
| 34 | WARN(1, "error: irq_desc already assigned to a domain"); | 38 | WARN(1, "error: irq_desc already assigned to a domain"); |
| 35 | return; | 39 | return; |
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c index 58f405b581e7..c8008dd58ef2 100644 --- a/kernel/posix-cpu-timers.c +++ b/kernel/posix-cpu-timers.c | |||
| @@ -250,7 +250,7 @@ void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times) | |||
| 250 | do { | 250 | do { |
| 251 | times->utime = cputime_add(times->utime, t->utime); | 251 | times->utime = cputime_add(times->utime, t->utime); |
| 252 | times->stime = cputime_add(times->stime, t->stime); | 252 | times->stime = cputime_add(times->stime, t->stime); |
| 253 | times->sum_exec_runtime += t->se.sum_exec_runtime; | 253 | times->sum_exec_runtime += task_sched_runtime(t); |
| 254 | } while_each_thread(tsk, t); | 254 | } while_each_thread(tsk, t); |
| 255 | out: | 255 | out: |
| 256 | rcu_read_unlock(); | 256 | rcu_read_unlock(); |
| @@ -312,7 +312,8 @@ static int cpu_clock_sample_group(const clockid_t which_clock, | |||
| 312 | cpu->cpu = cputime.utime; | 312 | cpu->cpu = cputime.utime; |
| 313 | break; | 313 | break; |
| 314 | case CPUCLOCK_SCHED: | 314 | case CPUCLOCK_SCHED: |
| 315 | cpu->sched = thread_group_sched_runtime(p); | 315 | thread_group_cputime(p, &cputime); |
| 316 | cpu->sched = cputime.sum_exec_runtime; | ||
| 316 | break; | 317 | break; |
| 317 | } | 318 | } |
| 318 | return 0; | 319 | return 0; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index 9de3ecfd20f9..a70d2a5d8c7b 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
| @@ -744,20 +744,17 @@ int ptrace_request(struct task_struct *child, long request, | |||
| 744 | break; | 744 | break; |
| 745 | 745 | ||
| 746 | si = child->last_siginfo; | 746 | si = child->last_siginfo; |
| 747 | if (unlikely(!si || si->si_code >> 8 != PTRACE_EVENT_STOP)) | 747 | if (likely(si && (si->si_code >> 8) == PTRACE_EVENT_STOP)) { |
| 748 | break; | 748 | child->jobctl |= JOBCTL_LISTENING; |
| 749 | 749 | /* | |
| 750 | child->jobctl |= JOBCTL_LISTENING; | 750 | * If NOTIFY is set, it means event happened between |
| 751 | 751 | * start of this trap and now. Trigger re-trap. | |
| 752 | /* | 752 | */ |
| 753 | * If NOTIFY is set, it means event happened between start | 753 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) |
| 754 | * of this trap and now. Trigger re-trap immediately. | 754 | signal_wake_up(child, true); |
| 755 | */ | 755 | ret = 0; |
| 756 | if (child->jobctl & JOBCTL_TRAP_NOTIFY) | 756 | } |
| 757 | signal_wake_up(child, true); | ||
| 758 | |||
| 759 | unlock_task_sighand(child, &flags); | 757 | unlock_task_sighand(child, &flags); |
| 760 | ret = 0; | ||
| 761 | break; | 758 | break; |
| 762 | 759 | ||
| 763 | case PTRACE_DETACH: /* detach a process that was attached. */ | 760 | case PTRACE_DETACH: /* detach a process that was attached. */ |
diff --git a/kernel/resource.c b/kernel/resource.c index 3b3cedc52592..c8dc249da5ce 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
| @@ -419,6 +419,9 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
| 419 | else | 419 | else |
| 420 | tmp.end = root->end; | 420 | tmp.end = root->end; |
| 421 | 421 | ||
| 422 | if (tmp.end < tmp.start) | ||
| 423 | goto next; | ||
| 424 | |||
| 422 | resource_clip(&tmp, constraint->min, constraint->max); | 425 | resource_clip(&tmp, constraint->min, constraint->max); |
| 423 | arch_remove_reservations(&tmp); | 426 | arch_remove_reservations(&tmp); |
| 424 | 427 | ||
| @@ -436,8 +439,10 @@ static int __find_resource(struct resource *root, struct resource *old, | |||
| 436 | return 0; | 439 | return 0; |
| 437 | } | 440 | } |
| 438 | } | 441 | } |
| 439 | if (!this) | 442 | |
| 443 | next: if (!this || this->end == root->end) | ||
| 440 | break; | 444 | break; |
| 445 | |||
| 441 | if (this != old) | 446 | if (this != old) |
| 442 | tmp.start = this->end + 1; | 447 | tmp.start = this->end + 1; |
| 443 | this = this->sibling; | 448 | this = this->sibling; |
diff --git a/kernel/sched.c b/kernel/sched.c index ec5f472bc5b9..b50b0f0c9aa9 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3725,30 +3725,6 @@ unsigned long long task_sched_runtime(struct task_struct *p) | |||
| 3725 | } | 3725 | } |
| 3726 | 3726 | ||
| 3727 | /* | 3727 | /* |
| 3728 | * Return sum_exec_runtime for the thread group. | ||
| 3729 | * In case the task is currently running, return the sum plus current's | ||
| 3730 | * pending runtime that have not been accounted yet. | ||
| 3731 | * | ||
| 3732 | * Note that the thread group might have other running tasks as well, | ||
| 3733 | * so the return value not includes other pending runtime that other | ||
| 3734 | * running tasks might have. | ||
| 3735 | */ | ||
| 3736 | unsigned long long thread_group_sched_runtime(struct task_struct *p) | ||
| 3737 | { | ||
| 3738 | struct task_cputime totals; | ||
| 3739 | unsigned long flags; | ||
| 3740 | struct rq *rq; | ||
| 3741 | u64 ns; | ||
| 3742 | |||
| 3743 | rq = task_rq_lock(p, &flags); | ||
| 3744 | thread_group_cputime(p, &totals); | ||
| 3745 | ns = totals.sum_exec_runtime + do_task_delta_exec(p, rq); | ||
| 3746 | task_rq_unlock(rq, p, &flags); | ||
| 3747 | |||
| 3748 | return ns; | ||
| 3749 | } | ||
| 3750 | |||
| 3751 | /* | ||
| 3752 | * Account user cpu time to a process. | 3728 | * Account user cpu time to a process. |
| 3753 | * @p: the process that the cpu time gets accounted to | 3729 | * @p: the process that the cpu time gets accounted to |
| 3754 | * @cputime: the cpu time spent in user space since the last update | 3730 | * @cputime: the cpu time spent in user space since the last update |
| @@ -4372,7 +4348,7 @@ static inline void sched_submit_work(struct task_struct *tsk) | |||
| 4372 | blk_schedule_flush_plug(tsk); | 4348 | blk_schedule_flush_plug(tsk); |
| 4373 | } | 4349 | } |
| 4374 | 4350 | ||
| 4375 | asmlinkage void schedule(void) | 4351 | asmlinkage void __sched schedule(void) |
| 4376 | { | 4352 | { |
| 4377 | struct task_struct *tsk = current; | 4353 | struct task_struct *tsk = current; |
| 4378 | 4354 | ||
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 97540f0c9e47..af1177858be3 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c | |||
| @@ -1050,7 +1050,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags) | |||
| 1050 | */ | 1050 | */ |
| 1051 | if (curr && unlikely(rt_task(curr)) && | 1051 | if (curr && unlikely(rt_task(curr)) && |
| 1052 | (curr->rt.nr_cpus_allowed < 2 || | 1052 | (curr->rt.nr_cpus_allowed < 2 || |
| 1053 | curr->prio < p->prio) && | 1053 | curr->prio <= p->prio) && |
| 1054 | (p->rt.nr_cpus_allowed > 1)) { | 1054 | (p->rt.nr_cpus_allowed > 1)) { |
| 1055 | int target = find_lowest_rq(p); | 1055 | int target = find_lowest_rq(p); |
| 1056 | 1056 | ||
| @@ -1581,7 +1581,7 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p) | |||
| 1581 | p->rt.nr_cpus_allowed > 1 && | 1581 | p->rt.nr_cpus_allowed > 1 && |
| 1582 | rt_task(rq->curr) && | 1582 | rt_task(rq->curr) && |
| 1583 | (rq->curr->rt.nr_cpus_allowed < 2 || | 1583 | (rq->curr->rt.nr_cpus_allowed < 2 || |
| 1584 | rq->curr->prio < p->prio)) | 1584 | rq->curr->prio <= p->prio)) |
| 1585 | push_rt_tasks(rq); | 1585 | push_rt_tasks(rq); |
| 1586 | } | 1586 | } |
| 1587 | 1587 | ||
diff --git a/kernel/taskstats.c b/kernel/taskstats.c index e19ce1454ee1..e66046456f4f 100644 --- a/kernel/taskstats.c +++ b/kernel/taskstats.c | |||
| @@ -655,6 +655,7 @@ static struct genl_ops taskstats_ops = { | |||
| 655 | .cmd = TASKSTATS_CMD_GET, | 655 | .cmd = TASKSTATS_CMD_GET, |
| 656 | .doit = taskstats_user_cmd, | 656 | .doit = taskstats_user_cmd, |
| 657 | .policy = taskstats_cmd_get_policy, | 657 | .policy = taskstats_cmd_get_policy, |
| 658 | .flags = GENL_ADMIN_PERM, | ||
| 658 | }; | 659 | }; |
| 659 | 660 | ||
| 660 | static struct genl_ops cgroupstats_ops = { | 661 | static struct genl_ops cgroupstats_ops = { |
diff --git a/kernel/tsacct.c b/kernel/tsacct.c index 24dc60d9fa1f..5bbfac85866e 100644 --- a/kernel/tsacct.c +++ b/kernel/tsacct.c | |||
| @@ -78,6 +78,7 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk) | |||
| 78 | 78 | ||
| 79 | #define KB 1024 | 79 | #define KB 1024 |
| 80 | #define MB (1024*KB) | 80 | #define MB (1024*KB) |
| 81 | #define KB_MASK (~(KB-1)) | ||
| 81 | /* | 82 | /* |
| 82 | * fill in extended accounting fields | 83 | * fill in extended accounting fields |
| 83 | */ | 84 | */ |
| @@ -95,14 +96,14 @@ void xacct_add_tsk(struct taskstats *stats, struct task_struct *p) | |||
| 95 | stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; | 96 | stats->hiwater_vm = get_mm_hiwater_vm(mm) * PAGE_SIZE / KB; |
| 96 | mmput(mm); | 97 | mmput(mm); |
| 97 | } | 98 | } |
| 98 | stats->read_char = p->ioac.rchar; | 99 | stats->read_char = p->ioac.rchar & KB_MASK; |
| 99 | stats->write_char = p->ioac.wchar; | 100 | stats->write_char = p->ioac.wchar & KB_MASK; |
| 100 | stats->read_syscalls = p->ioac.syscr; | 101 | stats->read_syscalls = p->ioac.syscr & KB_MASK; |
| 101 | stats->write_syscalls = p->ioac.syscw; | 102 | stats->write_syscalls = p->ioac.syscw & KB_MASK; |
| 102 | #ifdef CONFIG_TASK_IO_ACCOUNTING | 103 | #ifdef CONFIG_TASK_IO_ACCOUNTING |
| 103 | stats->read_bytes = p->ioac.read_bytes; | 104 | stats->read_bytes = p->ioac.read_bytes & KB_MASK; |
| 104 | stats->write_bytes = p->ioac.write_bytes; | 105 | stats->write_bytes = p->ioac.write_bytes & KB_MASK; |
| 105 | stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes; | 106 | stats->cancelled_write_bytes = p->ioac.cancelled_write_bytes & KB_MASK; |
| 106 | #else | 107 | #else |
| 107 | stats->read_bytes = 0; | 108 | stats->read_bytes = 0; |
| 108 | stats->write_bytes = 0; | 109 | stats->write_bytes = 0; |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 25fb1b0e53fa..1783aabc6128 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -2412,8 +2412,13 @@ reflush: | |||
| 2412 | 2412 | ||
| 2413 | for_each_cwq_cpu(cpu, wq) { | 2413 | for_each_cwq_cpu(cpu, wq) { |
| 2414 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); | 2414 | struct cpu_workqueue_struct *cwq = get_cwq(cpu, wq); |
| 2415 | bool drained; | ||
| 2415 | 2416 | ||
| 2416 | if (!cwq->nr_active && list_empty(&cwq->delayed_works)) | 2417 | spin_lock_irq(&cwq->gcwq->lock); |
| 2418 | drained = !cwq->nr_active && list_empty(&cwq->delayed_works); | ||
| 2419 | spin_unlock_irq(&cwq->gcwq->lock); | ||
| 2420 | |||
| 2421 | if (drained) | ||
| 2417 | continue; | 2422 | continue; |
| 2418 | 2423 | ||
| 2419 | if (++flush_cnt == 10 || | 2424 | if (++flush_cnt == 10 || |
