diff options
Diffstat (limited to 'kernel')
| -rw-r--r-- | kernel/compat.c | 21 | ||||
| -rw-r--r-- | kernel/fork.c | 2 | ||||
| -rw-r--r-- | kernel/hw_breakpoint.c | 3 | ||||
| -rw-r--r-- | kernel/kfifo.c | 2 | ||||
| -rw-r--r-- | kernel/module.c | 4 | ||||
| -rw-r--r-- | kernel/sched.c | 8 | ||||
| -rw-r--r-- | kernel/sched_fair.c | 8 | ||||
| -rw-r--r-- | kernel/smp.c | 17 | ||||
| -rw-r--r-- | kernel/workqueue.c | 27 |
9 files changed, 67 insertions, 25 deletions
diff --git a/kernel/compat.c b/kernel/compat.c index e167efce8423..c9e2ec0b34a8 100644 --- a/kernel/compat.c +++ b/kernel/compat.c | |||
| @@ -1126,3 +1126,24 @@ compat_sys_sysinfo(struct compat_sysinfo __user *info) | |||
| 1126 | 1126 | ||
| 1127 | return 0; | 1127 | return 0; |
| 1128 | } | 1128 | } |
| 1129 | |||
| 1130 | /* | ||
| 1131 | * Allocate user-space memory for the duration of a single system call, | ||
| 1132 | * in order to marshall parameters inside a compat thunk. | ||
| 1133 | */ | ||
| 1134 | void __user *compat_alloc_user_space(unsigned long len) | ||
| 1135 | { | ||
| 1136 | void __user *ptr; | ||
| 1137 | |||
| 1138 | /* If len would occupy more than half of the entire compat space... */ | ||
| 1139 | if (unlikely(len > (((compat_uptr_t)~0) >> 1))) | ||
| 1140 | return NULL; | ||
| 1141 | |||
| 1142 | ptr = arch_compat_alloc_user_space(len); | ||
| 1143 | |||
| 1144 | if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) | ||
| 1145 | return NULL; | ||
| 1146 | |||
| 1147 | return ptr; | ||
| 1148 | } | ||
| 1149 | EXPORT_SYMBOL_GPL(compat_alloc_user_space); | ||
diff --git a/kernel/fork.c b/kernel/fork.c index b7e9d60a675d..c445f8cc408d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
| @@ -356,10 +356,10 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
| 356 | if (IS_ERR(pol)) | 356 | if (IS_ERR(pol)) |
| 357 | goto fail_nomem_policy; | 357 | goto fail_nomem_policy; |
| 358 | vma_set_policy(tmp, pol); | 358 | vma_set_policy(tmp, pol); |
| 359 | tmp->vm_mm = mm; | ||
| 359 | if (anon_vma_fork(tmp, mpnt)) | 360 | if (anon_vma_fork(tmp, mpnt)) |
| 360 | goto fail_nomem_anon_vma_fork; | 361 | goto fail_nomem_anon_vma_fork; |
| 361 | tmp->vm_flags &= ~VM_LOCKED; | 362 | tmp->vm_flags &= ~VM_LOCKED; |
| 362 | tmp->vm_mm = mm; | ||
| 363 | tmp->vm_next = tmp->vm_prev = NULL; | 363 | tmp->vm_next = tmp->vm_prev = NULL; |
| 364 | file = tmp->vm_file; | 364 | file = tmp->vm_file; |
| 365 | if (file) { | 365 | if (file) { |
diff --git a/kernel/hw_breakpoint.c b/kernel/hw_breakpoint.c index d71a987fd2bf..c7c2aed9e2dc 100644 --- a/kernel/hw_breakpoint.c +++ b/kernel/hw_breakpoint.c | |||
| @@ -433,7 +433,8 @@ register_user_hw_breakpoint(struct perf_event_attr *attr, | |||
| 433 | perf_overflow_handler_t triggered, | 433 | perf_overflow_handler_t triggered, |
| 434 | struct task_struct *tsk) | 434 | struct task_struct *tsk) |
| 435 | { | 435 | { |
| 436 | return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered); | 436 | return perf_event_create_kernel_counter(attr, -1, task_pid_vnr(tsk), |
| 437 | triggered); | ||
| 437 | } | 438 | } |
| 438 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); | 439 | EXPORT_SYMBOL_GPL(register_user_hw_breakpoint); |
| 439 | 440 | ||
diff --git a/kernel/kfifo.c b/kernel/kfifo.c index 6b5580c57644..01a0700e873f 100644 --- a/kernel/kfifo.c +++ b/kernel/kfifo.c | |||
| @@ -365,8 +365,6 @@ static unsigned int setup_sgl(struct __kfifo *fifo, struct scatterlist *sgl, | |||
| 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); | 365 | n = setup_sgl_buf(sgl, fifo->data + off, nents, l); |
| 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); | 366 | n += setup_sgl_buf(sgl + n, fifo->data, nents - n, len - l); |
| 367 | 367 | ||
| 368 | if (n) | ||
| 369 | sg_mark_end(sgl + n - 1); | ||
| 370 | return n; | 368 | return n; |
| 371 | } | 369 | } |
| 372 | 370 | ||
diff --git a/kernel/module.c b/kernel/module.c index d0b5f8db11b4..ccd641991842 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
| @@ -1537,6 +1537,7 @@ static int __unlink_module(void *_mod) | |||
| 1537 | { | 1537 | { |
| 1538 | struct module *mod = _mod; | 1538 | struct module *mod = _mod; |
| 1539 | list_del(&mod->list); | 1539 | list_del(&mod->list); |
| 1540 | module_bug_cleanup(mod); | ||
| 1540 | return 0; | 1541 | return 0; |
| 1541 | } | 1542 | } |
| 1542 | 1543 | ||
| @@ -2625,6 +2626,7 @@ static struct module *load_module(void __user *umod, | |||
| 2625 | if (err < 0) | 2626 | if (err < 0) |
| 2626 | goto ddebug; | 2627 | goto ddebug; |
| 2627 | 2628 | ||
| 2629 | module_bug_finalize(info.hdr, info.sechdrs, mod); | ||
| 2628 | list_add_rcu(&mod->list, &modules); | 2630 | list_add_rcu(&mod->list, &modules); |
| 2629 | mutex_unlock(&module_mutex); | 2631 | mutex_unlock(&module_mutex); |
| 2630 | 2632 | ||
| @@ -2650,6 +2652,8 @@ static struct module *load_module(void __user *umod, | |||
| 2650 | mutex_lock(&module_mutex); | 2652 | mutex_lock(&module_mutex); |
| 2651 | /* Unlink carefully: kallsyms could be walking list. */ | 2653 | /* Unlink carefully: kallsyms could be walking list. */ |
| 2652 | list_del_rcu(&mod->list); | 2654 | list_del_rcu(&mod->list); |
| 2655 | module_bug_cleanup(mod); | ||
| 2656 | |||
| 2653 | ddebug: | 2657 | ddebug: |
| 2654 | if (!mod->taints) | 2658 | if (!mod->taints) |
| 2655 | dynamic_debug_remove(info.debug); | 2659 | dynamic_debug_remove(info.debug); |
diff --git a/kernel/sched.c b/kernel/sched.c index ed09d4f2a69c..dc85ceb90832 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
| @@ -3513,9 +3513,9 @@ void task_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); | 3513 | rtime = nsecs_to_cputime(p->se.sum_exec_runtime); |
| 3514 | 3514 | ||
| 3515 | if (total) { | 3515 | if (total) { |
| 3516 | u64 temp; | 3516 | u64 temp = rtime; |
| 3517 | 3517 | ||
| 3518 | temp = (u64)(rtime * utime); | 3518 | temp *= utime; |
| 3519 | do_div(temp, total); | 3519 | do_div(temp, total); |
| 3520 | utime = (cputime_t)temp; | 3520 | utime = (cputime_t)temp; |
| 3521 | } else | 3521 | } else |
| @@ -3546,9 +3546,9 @@ void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t *st) | |||
| 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); | 3546 | rtime = nsecs_to_cputime(cputime.sum_exec_runtime); |
| 3547 | 3547 | ||
| 3548 | if (total) { | 3548 | if (total) { |
| 3549 | u64 temp; | 3549 | u64 temp = rtime; |
| 3550 | 3550 | ||
| 3551 | temp = (u64)(rtime * cputime.utime); | 3551 | temp *= cputime.utime; |
| 3552 | do_div(temp, total); | 3552 | do_div(temp, total); |
| 3553 | utime = (cputime_t)temp; | 3553 | utime = (cputime_t)temp; |
| 3554 | } else | 3554 | } else |
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index 9b5b4f86b742..db3f674ca49d 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c | |||
| @@ -54,13 +54,13 @@ enum sched_tunable_scaling sysctl_sched_tunable_scaling | |||
| 54 | * Minimal preemption granularity for CPU-bound tasks: | 54 | * Minimal preemption granularity for CPU-bound tasks: |
| 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) | 55 | * (default: 2 msec * (1 + ilog(ncpus)), units: nanoseconds) |
| 56 | */ | 56 | */ |
| 57 | unsigned int sysctl_sched_min_granularity = 2000000ULL; | 57 | unsigned int sysctl_sched_min_granularity = 750000ULL; |
| 58 | unsigned int normalized_sysctl_sched_min_granularity = 2000000ULL; | 58 | unsigned int normalized_sysctl_sched_min_granularity = 750000ULL; |
| 59 | 59 | ||
| 60 | /* | 60 | /* |
| 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity | 61 | * is kept at sysctl_sched_latency / sysctl_sched_min_granularity |
| 62 | */ | 62 | */ |
| 63 | static unsigned int sched_nr_latency = 3; | 63 | static unsigned int sched_nr_latency = 8; |
| 64 | 64 | ||
| 65 | /* | 65 | /* |
| 66 | * After fork, child runs first. If set to 0 (default) then | 66 | * After fork, child runs first. If set to 0 (default) then |
| @@ -3630,7 +3630,7 @@ static inline int nohz_kick_needed(struct rq *rq, int cpu) | |||
| 3630 | if (time_before(now, nohz.next_balance)) | 3630 | if (time_before(now, nohz.next_balance)) |
| 3631 | return 0; | 3631 | return 0; |
| 3632 | 3632 | ||
| 3633 | if (!rq->nr_running) | 3633 | if (rq->idle_at_tick) |
| 3634 | return 0; | 3634 | return 0; |
| 3635 | 3635 | ||
| 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); | 3636 | first_pick_cpu = atomic_read(&nohz.first_pick_cpu); |
diff --git a/kernel/smp.c b/kernel/smp.c index 75c970c715d3..ed6aacfcb7ef 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
| @@ -365,9 +365,10 @@ call: | |||
| 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); | 365 | EXPORT_SYMBOL_GPL(smp_call_function_any); |
| 366 | 366 | ||
| 367 | /** | 367 | /** |
| 368 | * __smp_call_function_single(): Run a function on another CPU | 368 | * __smp_call_function_single(): Run a function on a specific CPU |
| 369 | * @cpu: The CPU to run on. | 369 | * @cpu: The CPU to run on. |
| 370 | * @data: Pre-allocated and setup data structure | 370 | * @data: Pre-allocated and setup data structure |
| 371 | * @wait: If true, wait until function has completed on specified CPU. | ||
| 371 | * | 372 | * |
| 372 | * Like smp_call_function_single(), but allow caller to pass in a | 373 | * Like smp_call_function_single(), but allow caller to pass in a |
| 373 | * pre-allocated data structure. Useful for embedding @data inside | 374 | * pre-allocated data structure. Useful for embedding @data inside |
| @@ -376,8 +377,10 @@ EXPORT_SYMBOL_GPL(smp_call_function_any); | |||
| 376 | void __smp_call_function_single(int cpu, struct call_single_data *data, | 377 | void __smp_call_function_single(int cpu, struct call_single_data *data, |
| 377 | int wait) | 378 | int wait) |
| 378 | { | 379 | { |
| 379 | csd_lock(data); | 380 | unsigned int this_cpu; |
| 381 | unsigned long flags; | ||
| 380 | 382 | ||
| 383 | this_cpu = get_cpu(); | ||
| 381 | /* | 384 | /* |
| 382 | * Can deadlock when called with interrupts disabled. | 385 | * Can deadlock when called with interrupts disabled. |
| 383 | * We allow cpu's that are not yet online though, as no one else can | 386 | * We allow cpu's that are not yet online though, as no one else can |
| @@ -387,7 +390,15 @@ void __smp_call_function_single(int cpu, struct call_single_data *data, | |||
| 387 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() | 390 | WARN_ON_ONCE(cpu_online(smp_processor_id()) && wait && irqs_disabled() |
| 388 | && !oops_in_progress); | 391 | && !oops_in_progress); |
| 389 | 392 | ||
| 390 | generic_exec_single(cpu, data, wait); | 393 | if (cpu == this_cpu) { |
| 394 | local_irq_save(flags); | ||
| 395 | data->func(data->info); | ||
| 396 | local_irq_restore(flags); | ||
| 397 | } else { | ||
| 398 | csd_lock(data); | ||
| 399 | generic_exec_single(cpu, data, wait); | ||
| 400 | } | ||
| 401 | put_cpu(); | ||
| 391 | } | 402 | } |
| 392 | 403 | ||
| 393 | /** | 404 | /** |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 727f24e563ae..f77afd939229 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
| @@ -1,19 +1,26 @@ | |||
| 1 | /* | 1 | /* |
| 2 | * linux/kernel/workqueue.c | 2 | * kernel/workqueue.c - generic async execution with shared worker pool |
| 3 | * | 3 | * |
| 4 | * Generic mechanism for defining kernel helper threads for running | 4 | * Copyright (C) 2002 Ingo Molnar |
| 5 | * arbitrary tasks in process context. | ||
| 6 | * | 5 | * |
| 7 | * Started by Ingo Molnar, Copyright (C) 2002 | 6 | * Derived from the taskqueue/keventd code by: |
| 7 | * David Woodhouse <dwmw2@infradead.org> | ||
| 8 | * Andrew Morton | ||
| 9 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 10 | * Theodore Ts'o <tytso@mit.edu> | ||
| 8 | * | 11 | * |
| 9 | * Derived from the taskqueue/keventd code by: | 12 | * Made to use alloc_percpu by Christoph Lameter. |
| 10 | * | 13 | * |
| 11 | * David Woodhouse <dwmw2@infradead.org> | 14 | * Copyright (C) 2010 SUSE Linux Products GmbH |
| 12 | * Andrew Morton | 15 | * Copyright (C) 2010 Tejun Heo <tj@kernel.org> |
| 13 | * Kai Petzke <wpp@marie.physik.tu-berlin.de> | ||
| 14 | * Theodore Ts'o <tytso@mit.edu> | ||
| 15 | * | 16 | * |
| 16 | * Made to use alloc_percpu by Christoph Lameter. | 17 | * This is the generic async execution mechanism. Work items as are |
| 18 | * executed in process context. The worker pool is shared and | ||
| 19 | * automatically managed. There is one worker pool for each CPU and | ||
| 20 | * one extra for works which are better served by workers which are | ||
| 21 | * not bound to any specific CPU. | ||
| 22 | * | ||
| 23 | * Please read Documentation/workqueue.txt for details. | ||
| 17 | */ | 24 | */ |
| 18 | 25 | ||
| 19 | #include <linux/module.h> | 26 | #include <linux/module.h> |
