aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
committerJames Morris <jmorris@namei.org>2011-04-19 07:32:41 -0400
commitd4ab4e6a23f805abb8fc3cc34525eec3788aeca1 (patch)
treeeefd82c155bc27469a85667d759cd90facf4a6e3 /kernel
parentc0fa797ae6cd02ff87c0bfe0d509368a3b45640e (diff)
parent96fd2d57b8252e16dfacf8941f7a74a6119197f5 (diff)
Merge branch 'master'; commit 'v2.6.39-rc3' into next
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c6
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/futex.c2
-rw-r--r--kernel/irq/Kconfig3
-rw-r--r--kernel/irq/chip.c4
-rw-r--r--kernel/irq/manage.c1
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/kexec.c11
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/mutex.c2
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/perf_event.c21
-rw-r--r--kernel/pid.c5
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/Kconfig6
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/sched.c40
-rw-r--r--kernel/sched_autogroup.c2
-rw-r--r--kernel/sched_fair.c33
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/signal.c155
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/ntp.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/blktrace.c33
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c2
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/user-return-notifier.c2
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/workqueue.c2
48 files changed, 255 insertions, 153 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 37b2bea170c8..e99dda04b126 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -607,7 +607,7 @@ void audit_trim_trees(void)
607 spin_lock(&hash_lock); 607 spin_lock(&hash_lock);
608 list_for_each_entry(node, &tree->chunks, list) { 608 list_for_each_entry(node, &tree->chunks, list) {
609 struct audit_chunk *chunk = find_chunk(node); 609 struct audit_chunk *chunk = find_chunk(node);
610 /* this could be NULL if the watch is dieing else where... */ 610 /* this could be NULL if the watch is dying else where... */
611 struct inode *inode = chunk->mark.i.inode; 611 struct inode *inode = chunk->mark.i.inode;
612 node->index |= 1U<<31; 612 node->index |= 1U<<31;
613 if (iterate_mounts(compare_root, inode, root_mnt)) 613 if (iterate_mounts(compare_root, inode, root_mnt))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index f49a0318c2ed..b33513a08beb 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1011,7 +1011,7 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
1011/* 1011/*
1012 * to_send and len_sent accounting are very loose estimates. We aren't 1012 * to_send and len_sent accounting are very loose estimates. We aren't
1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being 1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
1014 * within about 500 bytes (next page boundry) 1014 * within about 500 bytes (next page boundary)
1015 * 1015 *
1016 * why snprintf? an int is up to 12 digits long. if we just assumed when 1016 * why snprintf? an int is up to 12 digits long. if we just assumed when
1017 * logging that a[%d]= was going to be 16 characters long we would be wasting 1017 * logging that a[%d]= was going to be 16 characters long we would be wasting
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e31b220a743d..25c7eb52de1a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -157,7 +157,7 @@ struct css_id {
157}; 157};
158 158
159/* 159/*
160 * cgroup_event represents events which userspace want to recieve. 160 * cgroup_event represents events which userspace want to receive.
161 */ 161 */
162struct cgroup_event { 162struct cgroup_event {
163 /* 163 /*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c95fc4df0faa..12b7458f23b1 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -126,7 +126,7 @@ static void cpu_hotplug_done(void)
126#else /* #if CONFIG_HOTPLUG_CPU */ 126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {} 127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {} 128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */ 129#endif /* #else #if CONFIG_HOTPLUG_CPU */
130 130
131/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
132int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index cefd4a11f6d9..bad6786dee88 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -538,7 +538,7 @@ return_normal:
538 538
539 /* 539 /*
540 * For single stepping, try to only enter on the processor 540 * For single stepping, try to only enter on the processor
541 * that was single stepping. To gaurd against a deadlock, the 541 * that was single stepping. To guard against a deadlock, the
542 * kernel will only try for the value of sstep_tries before 542 * kernel will only try for the value of sstep_tries before
543 * giving up and continuing on. 543 * giving up and continuing on.
544 */ 544 */
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 6bc6e3bc4f9c..be14779bcef6 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -441,9 +441,9 @@ static int kdb_check_regs(void)
441 * symbol name, and offset to the caller. 441 * symbol name, and offset to the caller.
442 * 442 *
443 * The argument may consist of a numeric value (decimal or 443 * The argument may consist of a numeric value (decimal or
444 * hexidecimal), a symbol name, a register name (preceeded by the 444 * hexidecimal), a symbol name, a register name (preceded by the
445 * percent sign), an environment variable with a numeric value 445 * percent sign), an environment variable with a numeric value
446 * (preceeded by a dollar sign) or a simple arithmetic expression 446 * (preceded by a dollar sign) or a simple arithmetic expression
447 * consisting of a symbol name, +/-, and a numeric constant value 447 * consisting of a symbol name, +/-, and a numeric constant value
448 * (offset). 448 * (offset).
449 * Parameters: 449 * Parameters:
@@ -1335,7 +1335,7 @@ void kdb_print_state(const char *text, int value)
1335 * error The hardware-defined error code 1335 * error The hardware-defined error code
1336 * reason2 kdb's current reason code. 1336 * reason2 kdb's current reason code.
1337 * Initially error but can change 1337 * Initially error but can change
1338 * acording to kdb state. 1338 * according to kdb state.
1339 * db_result Result code from break or debug point. 1339 * db_result Result code from break or debug point.
1340 * regs The exception frame at time of fault/breakpoint. 1340 * regs The exception frame at time of fault/breakpoint.
1341 * should always be valid. 1341 * should always be valid.
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 6b2485dcb050..5532dd37aa86 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -545,7 +545,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
545 * Mask for process state. 545 * Mask for process state.
546 * Notes: 546 * Notes:
547 * The mask folds data from several sources into a single long value, so 547 * The mask folds data from several sources into a single long value, so
548 * be carefull not to overlap the bits. TASK_* bits are in the LSB, 548 * be careful not to overlap the bits. TASK_* bits are in the LSB,
549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there 549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
550 * is no overlap between TASK_* and EXIT_* but that may not always be 550 * is no overlap between TASK_* and EXIT_* but that may not always be
551 * true, so EXIT_* bits are shifted left 16 bits before being stored in 551 * true, so EXIT_* bits are shifted left 16 bits before being stored in
diff --git a/kernel/exit.c b/kernel/exit.c
index 6a488ad2dce5..f5d2f63bae0b 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -841,7 +841,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
841 /* Let father know we died 841 /* Let father know we died
842 * 842 *
843 * Thread signals are configurable, but you aren't going to use 843 * Thread signals are configurable, but you aren't going to use
844 * that to send signals to arbitary processes. 844 * that to send signals to arbitrary processes.
845 * That stops right now. 845 * That stops right now.
846 * 846 *
847 * If the parent exec id doesn't match the exec id we saved 847 * If the parent exec id doesn't match the exec id we saved
diff --git a/kernel/futex.c b/kernel/futex.c
index dfb924ffe65b..fe28dc282eae 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1886,7 +1886,7 @@ retry:
1886 restart->futex.val = val; 1886 restart->futex.val = val;
1887 restart->futex.time = abs_time->tv64; 1887 restart->futex.time = abs_time->tv64;
1888 restart->futex.bitset = bitset; 1888 restart->futex.bitset = bitset;
1889 restart->futex.flags = flags; 1889 restart->futex.flags = flags | FLAGS_HAS_TIMEOUT;
1890 1890
1891 ret = -ERESTART_RESTARTBLOCK; 1891 ret = -ERESTART_RESTARTBLOCK;
1892 1892
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig
index a69c333f78e4..c574f9a12c48 100644
--- a/kernel/irq/Kconfig
+++ b/kernel/irq/Kconfig
@@ -10,9 +10,6 @@ menu "IRQ subsystem"
10config GENERIC_HARDIRQS 10config GENERIC_HARDIRQS
11 def_bool y 11 def_bool y
12 12
13config GENERIC_HARDIRQS_NO_COMPAT
14 bool
15
16# Options selectable by the architecture code 13# Options selectable by the architecture code
17 14
18# Make sparse irq Kconfig switch below available 15# Make sparse irq Kconfig switch below available
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 616ec1c6b06f..4af1e2b244cb 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -415,7 +415,7 @@ out:
415 * @desc: the interrupt description structure for this irq 415 * @desc: the interrupt description structure for this irq
416 * 416 *
417 * Interrupt occures on the falling and/or rising edge of a hardware 417 * Interrupt occures on the falling and/or rising edge of a hardware
418 * signal. The occurence is latched into the irq controller hardware 418 * signal. The occurrence is latched into the irq controller hardware
419 * and must be acked in order to be reenabled. After the ack another 419 * and must be acked in order to be reenabled. After the ack another
420 * interrupt can happen on the same source even before the first one 420 * interrupt can happen on the same source even before the first one
421 * is handled by the associated event handler. If this happens it 421 * is handled by the associated event handler. If this happens it
@@ -514,7 +514,7 @@ void handle_edge_eoi_irq(unsigned int irq, struct irq_desc *desc)
514 } while ((desc->istate & IRQS_PENDING) && 514 } while ((desc->istate & IRQS_PENDING) &&
515 !irqd_irq_disabled(&desc->irq_data)); 515 !irqd_irq_disabled(&desc->irq_data));
516 516
517out_unlock: 517out_eoi:
518 chip->irq_eoi(&desc->irq_data); 518 chip->irq_eoi(&desc->irq_data);
519 raw_spin_unlock(&desc->lock); 519 raw_spin_unlock(&desc->lock);
520} 520}
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 12a80fdae11c..07c1611f3899 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -1051,6 +1051,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1051 register_irq_proc(irq, desc); 1051 register_irq_proc(irq, desc);
1052 new->dir = NULL; 1052 new->dir = NULL;
1053 register_handler_proc(irq, new); 1053 register_handler_proc(irq, new);
1054 free_cpumask_var(mask);
1054 1055
1055 return 0; 1056 return 0;
1056 1057
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bc6194698dfd..47420908fba0 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -35,7 +35,7 @@ void irq_move_masked_irq(struct irq_data *idata)
35 * do the disable, re-program, enable sequence. 35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered 36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte 37 * but in a edge trigger case, we might be setting rte
38 * when an active trigger is comming in. This could 38 * when an active trigger is coming in. This could
39 * cause some ioapics to mal-function. 39 * cause some ioapics to mal-function.
40 * Being paranoid i guess! 40 * Being paranoid i guess!
41 * 41 *
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ec19b92c7ebd..55936f9cb251 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -144,7 +144,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
144 /* Initialize the list of destination pages */ 144 /* Initialize the list of destination pages */
145 INIT_LIST_HEAD(&image->dest_pages); 145 INIT_LIST_HEAD(&image->dest_pages);
146 146
147 /* Initialize the list of unuseable pages */ 147 /* Initialize the list of unusable pages */
148 INIT_LIST_HEAD(&image->unuseable_pages); 148 INIT_LIST_HEAD(&image->unuseable_pages);
149 149
150 /* Read in the segments */ 150 /* Read in the segments */
@@ -454,7 +454,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
454 /* Deal with the destination pages I have inadvertently allocated. 454 /* Deal with the destination pages I have inadvertently allocated.
455 * 455 *
456 * Ideally I would convert multi-page allocations into single 456 * Ideally I would convert multi-page allocations into single
457 * page allocations, and add everyting to image->dest_pages. 457 * page allocations, and add everything to image->dest_pages.
458 * 458 *
459 * For now it is simpler to just free the pages. 459 * For now it is simpler to just free the pages.
460 */ 460 */
@@ -602,7 +602,7 @@ static void kimage_free_extra_pages(struct kimage *image)
602 /* Walk through and free any extra destination pages I may have */ 602 /* Walk through and free any extra destination pages I may have */
603 kimage_free_page_list(&image->dest_pages); 603 kimage_free_page_list(&image->dest_pages);
604 604
605 /* Walk through and free any unuseable pages I have cached */ 605 /* Walk through and free any unusable pages I have cached */
606 kimage_free_page_list(&image->unuseable_pages); 606 kimage_free_page_list(&image->unuseable_pages);
607 607
608} 608}
@@ -1099,7 +1099,8 @@ size_t crash_get_memory_size(void)
1099 return size; 1099 return size;
1100} 1100}
1101 1101
1102static void free_reserved_phys_range(unsigned long begin, unsigned long end) 1102void __weak crash_free_reserved_phys_range(unsigned long begin,
1103 unsigned long end)
1103{ 1104{
1104 unsigned long addr; 1105 unsigned long addr;
1105 1106
@@ -1135,7 +1136,7 @@ int crash_shrink_memory(unsigned long new_size)
1135 start = roundup(start, PAGE_SIZE); 1136 start = roundup(start, PAGE_SIZE);
1136 end = roundup(start + new_size, PAGE_SIZE); 1137 end = roundup(start + new_size, PAGE_SIZE);
1137 1138
1138 free_reserved_phys_range(end, crashk_res.end); 1139 crash_free_reserved_phys_range(end, crashk_res.end);
1139 1140
1140 if ((start == end) && (crashk_res.parent != NULL)) 1141 if ((start == end) && (crashk_res.parent != NULL))
1141 release_resource(&crashk_res); 1142 release_resource(&crashk_res);
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 684ab3f7dd72..3b34d2732bce 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -139,7 +139,7 @@ static void create_kthread(struct kthread_create_info *create)
139 * in @node, to get NUMA affinity for kthread stack, or else give -1. 139 * in @node, to get NUMA affinity for kthread stack, or else give -1.
140 * When woken, the thread will run @threadfn() with @data as its 140 * When woken, the thread will run @threadfn() with @data as its
141 * argument. @threadfn() can either call do_exit() directly if it is a 141 * argument. @threadfn() can either call do_exit() directly if it is a
142 * standalone thread for which noone will call kthread_stop(), or 142 * standalone thread for which no one will call kthread_stop(), or
143 * return when 'kthread_should_stop()' is true (which means 143 * return when 'kthread_should_stop()' is true (which means
144 * kthread_stop() has been called). The return value should be zero 144 * kthread_stop() has been called). The return value should be zero
145 * or a negative error number; it will be passed to kthread_stop(). 145 * or a negative error number; it will be passed to kthread_stop().
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index ee74b35e528d..376066e10413 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -153,7 +153,7 @@ static inline void store_stacktrace(struct task_struct *tsk,
153} 153}
154 154
155/** 155/**
156 * __account_scheduler_latency - record an occured latency 156 * __account_scheduler_latency - record an occurred latency
157 * @tsk - the task struct of the task hitting the latency 157 * @tsk - the task struct of the task hitting the latency
158 * @usecs - the duration of the latency in microseconds 158 * @usecs - the duration of the latency in microseconds
159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible 159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 0d2058da80f5..53a68956f131 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2309,7 +2309,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2309 if (unlikely(curr->hardirqs_enabled)) { 2309 if (unlikely(curr->hardirqs_enabled)) {
2310 /* 2310 /*
2311 * Neither irq nor preemption are disabled here 2311 * Neither irq nor preemption are disabled here
2312 * so this is racy by nature but loosing one hit 2312 * so this is racy by nature but losing one hit
2313 * in a stat is not a big deal. 2313 * in a stat is not a big deal.
2314 */ 2314 */
2315 __debug_atomic_inc(redundant_hardirqs_on); 2315 __debug_atomic_inc(redundant_hardirqs_on);
@@ -2620,7 +2620,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2620 if (!graph_lock()) 2620 if (!graph_lock())
2621 return 0; 2621 return 0;
2622 /* 2622 /*
2623 * Make sure we didnt race: 2623 * Make sure we didn't race:
2624 */ 2624 */
2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2626 graph_unlock(); 2626 graph_unlock();
diff --git a/kernel/module.c b/kernel/module.c
index 1f9f7bc56ca1..d5938a5c19c4 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -809,7 +809,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
809 wait_for_zero_refcount(mod); 809 wait_for_zero_refcount(mod);
810 810
811 mutex_unlock(&module_mutex); 811 mutex_unlock(&module_mutex);
812 /* Final destruction now noone is using it. */ 812 /* Final destruction now no one is using it. */
813 if (mod->exit != NULL) 813 if (mod->exit != NULL)
814 mod->exit(); 814 mod->exit();
815 blocking_notifier_call_chain(&module_notify_list, 815 blocking_notifier_call_chain(&module_notify_list,
@@ -2777,7 +2777,7 @@ static struct module *load_module(void __user *umod,
2777 mod->state = MODULE_STATE_COMING; 2777 mod->state = MODULE_STATE_COMING;
2778 2778
2779 /* Now sew it into the lists so we can get lockdep and oops 2779 /* Now sew it into the lists so we can get lockdep and oops
2780 * info during argument parsing. Noone should access us, since 2780 * info during argument parsing. No one should access us, since
2781 * strong_try_module_get() will fail. 2781 * strong_try_module_get() will fail.
2782 * lockdep/oops can run asynchronous, so use the RCU list insertion 2782 * lockdep/oops can run asynchronous, so use the RCU list insertion
2783 * function to insert in a way safe to concurrent readers. 2783 * function to insert in a way safe to concurrent readers.
@@ -2971,7 +2971,7 @@ static const char *get_ksymbol(struct module *mod,
2971 else 2971 else
2972 nextval = (unsigned long)mod->module_core+mod->core_text_size; 2972 nextval = (unsigned long)mod->module_core+mod->core_text_size;
2973 2973
2974 /* Scan for closest preceeding symbol, and next symbol. (ELF 2974 /* Scan for closest preceding symbol, and next symbol. (ELF
2975 starts real symbols at 1). */ 2975 starts real symbols at 1). */
2976 for (i = 1; i < mod->num_symtab; i++) { 2976 for (i = 1; i < mod->num_symtab; i++) {
2977 if (mod->symtab[i].st_shndx == SHN_UNDEF) 2977 if (mod->symtab[i].st_shndx == SHN_UNDEF)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a5889fb28ecf..c4195fa98900 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -245,7 +245,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
245 } 245 }
246 __set_task_state(task, state); 246 __set_task_state(task, state);
247 247
248 /* didnt get the lock, go to sleep: */ 248 /* didn't get the lock, go to sleep: */
249 spin_unlock_mutex(&lock->wait_lock, flags); 249 spin_unlock_mutex(&lock->wait_lock, flags);
250 preempt_enable_no_resched(); 250 preempt_enable_no_resched();
251 schedule(); 251 schedule();
diff --git a/kernel/padata.c b/kernel/padata.c
index 751019415d23..b91941df5e63 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -262,7 +262,7 @@ static void padata_reorder(struct parallel_data *pd)
262 /* 262 /*
263 * This cpu has to do the parallel processing of the next 263 * This cpu has to do the parallel processing of the next
264 * object. It's waiting in the cpu's parallelization queue, 264 * object. It's waiting in the cpu's parallelization queue,
265 * so exit imediately. 265 * so exit immediately.
266 */ 266 */
267 if (PTR_ERR(padata) == -ENODATA) { 267 if (PTR_ERR(padata) == -ENODATA) {
268 del_timer(&pd->timer); 268 del_timer(&pd->timer);
@@ -284,7 +284,7 @@ static void padata_reorder(struct parallel_data *pd)
284 /* 284 /*
285 * The next object that needs serialization might have arrived to 285 * The next object that needs serialization might have arrived to
286 * the reorder queues in the meantime, we will be called again 286 * the reorder queues in the meantime, we will be called again
287 * from the timer function if noone else cares for it. 287 * from the timer function if no one else cares for it.
288 */ 288 */
289 if (atomic_read(&pd->reorder_objects) 289 if (atomic_read(&pd->reorder_objects)
290 && !(pinst->flags & PADATA_RESET)) 290 && !(pinst->flags & PADATA_RESET))
@@ -515,7 +515,7 @@ static void __padata_stop(struct padata_instance *pinst)
515 put_online_cpus(); 515 put_online_cpus();
516} 516}
517 517
518/* Replace the internal control stucture with a new one. */ 518/* Replace the internal control structure with a new one. */
519static void padata_replace(struct padata_instance *pinst, 519static void padata_replace(struct padata_instance *pinst,
520 struct parallel_data *pd_new) 520 struct parallel_data *pd_new)
521{ 521{
@@ -768,7 +768,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
768} 768}
769 769
770 /** 770 /**
771 * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) 771 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
772 * padata cpumasks. 772 * padata cpumasks.
773 * 773 *
774 * @pinst: padata instance 774 * @pinst: padata instance
diff --git a/kernel/params.c b/kernel/params.c
index 0da1411222b9..7ab388a48a2e 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -95,7 +95,7 @@ static int parse_one(char *param,
95 /* Find parameter */ 95 /* Find parameter */
96 for (i = 0; i < num_params; i++) { 96 for (i = 0; i < num_params; i++) {
97 if (parameq(param, params[i].name)) { 97 if (parameq(param, params[i].name)) {
98 /* Noone handled NULL, so do it here. */ 98 /* No one handled NULL, so do it here. */
99 if (!val && params[i].ops->set != param_set_bool) 99 if (!val && params[i].ops->set != param_set_bool)
100 return -EINVAL; 100 return -EINVAL;
101 DEBUGP("They are equal! Calling %p\n", 101 DEBUGP("They are equal! Calling %p\n",
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index c75925c4d1e2..8e81a9860a0d 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -145,8 +145,8 @@ static struct srcu_struct pmus_srcu;
145 */ 145 */
146int sysctl_perf_event_paranoid __read_mostly = 1; 146int sysctl_perf_event_paranoid __read_mostly = 1;
147 147
148/* Minimum for 128 pages + 1 for the user control page */ 148/* Minimum for 512 kiB + 1 user control page */
149int sysctl_perf_event_mlock __read_mostly = 516; /* 'free' kb per user */ 149int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
150 150
151/* 151/*
152 * max perf event sample rate 152 * max perf event sample rate
@@ -364,6 +364,7 @@ void perf_cgroup_switch(struct task_struct *task, int mode)
364 } 364 }
365 365
366 if (mode & PERF_CGROUP_SWIN) { 366 if (mode & PERF_CGROUP_SWIN) {
367 WARN_ON_ONCE(cpuctx->cgrp);
367 /* set cgrp before ctxsw in to 368 /* set cgrp before ctxsw in to
368 * allow event_filter_match() to not 369 * allow event_filter_match() to not
369 * have to pass task around 370 * have to pass task around
@@ -2423,6 +2424,14 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2423 if (!ctx || !ctx->nr_events) 2424 if (!ctx || !ctx->nr_events)
2424 goto out; 2425 goto out;
2425 2426
2427 /*
2428 * We must ctxsw out cgroup events to avoid conflict
2429 * when invoking perf_task_event_sched_in() later on
2430 * in this function. Otherwise we end up trying to
2431 * ctxswin cgroup events which are already scheduled
2432 * in.
2433 */
2434 perf_cgroup_sched_out(current);
2426 task_ctx_sched_out(ctx, EVENT_ALL); 2435 task_ctx_sched_out(ctx, EVENT_ALL);
2427 2436
2428 raw_spin_lock(&ctx->lock); 2437 raw_spin_lock(&ctx->lock);
@@ -2447,6 +2456,9 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2447 2456
2448 raw_spin_unlock(&ctx->lock); 2457 raw_spin_unlock(&ctx->lock);
2449 2458
2459 /*
2460 * Also calls ctxswin for cgroup events, if any:
2461 */
2450 perf_event_context_sched_in(ctx, ctx->task); 2462 perf_event_context_sched_in(ctx, ctx->task);
2451out: 2463out:
2452 local_irq_restore(flags); 2464 local_irq_restore(flags);
@@ -6531,6 +6543,11 @@ SYSCALL_DEFINE5(perf_event_open,
6531 goto err_alloc; 6543 goto err_alloc;
6532 } 6544 }
6533 6545
6546 if (task) {
6547 put_task_struct(task);
6548 task = NULL;
6549 }
6550
6534 /* 6551 /*
6535 * Look up the group leader (we will attach this event to it): 6552 * Look up the group leader (we will attach this event to it):
6536 */ 6553 */
diff --git a/kernel/pid.c b/kernel/pid.c
index 02f221274265..57a8346a270e 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -217,11 +217,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
217 return -1; 217 return -1;
218} 218}
219 219
220int next_pidmap(struct pid_namespace *pid_ns, int last) 220int next_pidmap(struct pid_namespace *pid_ns, unsigned int last)
221{ 221{
222 int offset; 222 int offset;
223 struct pidmap *map, *end; 223 struct pidmap *map, *end;
224 224
225 if (last >= PID_MAX_LIMIT)
226 return -1;
227
225 offset = (last + 1) & BITS_PER_PAGE_MASK; 228 offset = (last + 1) & BITS_PER_PAGE_MASK;
226 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; 229 map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE];
227 end = &pid_ns->pidmap[PIDMAP_ENTRIES]; 230 end = &pid_ns->pidmap[PIDMAP_ENTRIES];
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 67fea9d25d55..0791b13df7bf 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1347,7 +1347,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1347 1347
1348 /* 1348 /*
1349 * Now that all the timers on our list have the firing flag, 1349 * Now that all the timers on our list have the firing flag,
1350 * noone will touch their list entries but us. We'll take 1350 * no one will touch their list entries but us. We'll take
1351 * each timer's lock before clearing its firing flag, so no 1351 * each timer's lock before clearing its firing flag, so no
1352 * timer call will interfere. 1352 * timer call will interfere.
1353 */ 1353 */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 4c0124919f9a..e5498d7405c3 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -313,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr)
313 * restarted (i.e. we have flagged this in the sys_private entry of the 313 * restarted (i.e. we have flagged this in the sys_private entry of the
314 * info block). 314 * info block).
315 * 315 *
316 * To protect aginst the timer going away while the interrupt is queued, 316 * To protect against the timer going away while the interrupt is queued,
317 * we require that the it_requeue_pending flag be set. 317 * we require that the it_requeue_pending flag be set.
318 */ 318 */
319void do_schedule_next_timer(struct siginfo *info) 319void do_schedule_next_timer(struct siginfo *info)
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 4603f08dc47b..6de9a8fc3417 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -18,9 +18,13 @@ config SUSPEND_FREEZER
18 18
19 Turning OFF this setting is NOT recommended! If in doubt, say Y. 19 Turning OFF this setting is NOT recommended! If in doubt, say Y.
20 20
21config HIBERNATE_CALLBACKS
22 bool
23
21config HIBERNATION 24config HIBERNATION
22 bool "Hibernation (aka 'suspend to disk')" 25 bool "Hibernation (aka 'suspend to disk')"
23 depends on SWAP && ARCH_HIBERNATION_POSSIBLE 26 depends on SWAP && ARCH_HIBERNATION_POSSIBLE
27 select HIBERNATE_CALLBACKS
24 select LZO_COMPRESS 28 select LZO_COMPRESS
25 select LZO_DECOMPRESS 29 select LZO_DECOMPRESS
26 ---help--- 30 ---help---
@@ -85,7 +89,7 @@ config PM_STD_PARTITION
85 89
86config PM_SLEEP 90config PM_SLEEP
87 def_bool y 91 def_bool y
88 depends on SUSPEND || HIBERNATION || XEN_SAVE_RESTORE 92 depends on SUSPEND || HIBERNATE_CALLBACKS
89 93
90config PM_SLEEP_SMP 94config PM_SLEEP_SMP
91 def_bool y 95 def_bool y
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8eaba5f27b10..de9aef8742f4 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -224,7 +224,7 @@ power_attr(state);
224 * writing to 'state'. It first should read from 'wakeup_count' and store 224 * writing to 'state'. It first should read from 'wakeup_count' and store
225 * the read value. Then, after carrying out its own preparations for the system 225 * the read value. Then, after carrying out its own preparations for the system
226 * transition to a sleep state, it should write the stored value to 226 * transition to a sleep state, it should write the stored value to
227 * 'wakeup_count'. If that fails, at least one wakeup event has occured since 227 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
229 * is allowed to write to 'state', but the transition will be aborted if there 229 * is allowed to write to 'state', but the transition will be aborted if there
230 * are any wakeup events detected after 'wakeup_count' was written to. 230 * are any wakeup events detected after 'wakeup_count' was written to.
diff --git a/kernel/sched.c b/kernel/sched.c
index f592ce6f8616..312f8b95c2d4 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2309 * Cause a process which is running on another CPU to enter 2309 * Cause a process which is running on another CPU to enter
2310 * kernel-mode, without any delay. (to get signals handled.) 2310 * kernel-mode, without any delay. (to get signals handled.)
2311 * 2311 *
2312 * NOTE: this function doesnt have to take the runqueue lock, 2312 * NOTE: this function doesn't have to take the runqueue lock,
2313 * because all it wants to ensure is that the remote task enters 2313 * because all it wants to ensure is that the remote task enters
2314 * the kernel. If the IPI races and the task has been migrated 2314 * the kernel. If the IPI races and the task has been migrated
2315 * to another CPU then no harm is done and the purpose has been 2315 * to another CPU then no harm is done and the purpose has been
@@ -4111,20 +4111,20 @@ need_resched:
4111 try_to_wake_up_local(to_wakeup); 4111 try_to_wake_up_local(to_wakeup);
4112 } 4112 }
4113 deactivate_task(rq, prev, DEQUEUE_SLEEP); 4113 deactivate_task(rq, prev, DEQUEUE_SLEEP);
4114
4115 /*
4116 * If we are going to sleep and we have plugged IO queued, make
4117 * sure to submit it to avoid deadlocks.
4118 */
4119 if (blk_needs_flush_plug(prev)) {
4120 raw_spin_unlock(&rq->lock);
4121 blk_schedule_flush_plug(prev);
4122 raw_spin_lock(&rq->lock);
4123 }
4114 } 4124 }
4115 switch_count = &prev->nvcsw; 4125 switch_count = &prev->nvcsw;
4116 } 4126 }
4117 4127
4118 /*
4119 * If we are going to sleep and we have plugged IO queued, make
4120 * sure to submit it to avoid deadlocks.
4121 */
4122 if (prev->state != TASK_RUNNING && blk_needs_flush_plug(prev)) {
4123 raw_spin_unlock(&rq->lock);
4124 blk_flush_plug(prev);
4125 raw_spin_lock(&rq->lock);
4126 }
4127
4128 pre_schedule(rq, prev); 4128 pre_schedule(rq, prev);
4129 4129
4130 if (unlikely(!rq->nr_running)) 4130 if (unlikely(!rq->nr_running))
@@ -4997,7 +4997,7 @@ recheck:
4997 */ 4997 */
4998 raw_spin_lock_irqsave(&p->pi_lock, flags); 4998 raw_spin_lock_irqsave(&p->pi_lock, flags);
4999 /* 4999 /*
5000 * To be able to change p->policy safely, the apropriate 5000 * To be able to change p->policy safely, the appropriate
5001 * runqueue lock must be held. 5001 * runqueue lock must be held.
5002 */ 5002 */
5003 rq = __task_rq_lock(p); 5003 rq = __task_rq_lock(p);
@@ -5011,6 +5011,17 @@ recheck:
5011 return -EINVAL; 5011 return -EINVAL;
5012 } 5012 }
5013 5013
5014 /*
5015 * If not changing anything there's no need to proceed further:
5016 */
5017 if (unlikely(policy == p->policy && (!rt_policy(policy) ||
5018 param->sched_priority == p->rt_priority))) {
5019
5020 __task_rq_unlock(rq);
5021 raw_spin_unlock_irqrestore(&p->pi_lock, flags);
5022 return 0;
5023 }
5024
5014#ifdef CONFIG_RT_GROUP_SCHED 5025#ifdef CONFIG_RT_GROUP_SCHED
5015 if (user) { 5026 if (user) {
5016 /* 5027 /*
@@ -5705,7 +5716,7 @@ void show_state_filter(unsigned long state_filter)
5705 do_each_thread(g, p) { 5716 do_each_thread(g, p) {
5706 /* 5717 /*
5707 * reset the NMI-timeout, listing all files on a slow 5718 * reset the NMI-timeout, listing all files on a slow
5708 * console might take alot of time: 5719 * console might take a lot of time:
5709 */ 5720 */
5710 touch_nmi_watchdog(); 5721 touch_nmi_watchdog();
5711 if (!state_filter || (p->state & state_filter)) 5722 if (!state_filter || (p->state & state_filter))
@@ -6320,6 +6331,9 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
6320 break; 6331 break;
6321#endif 6332#endif
6322 } 6333 }
6334
6335 update_max_interval();
6336
6323 return NOTIFY_OK; 6337 return NOTIFY_OK;
6324} 6338}
6325 6339
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 5946ac515602..429242f3c484 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -179,7 +179,7 @@ void sched_autogroup_create_attach(struct task_struct *p)
179 struct autogroup *ag = autogroup_create(); 179 struct autogroup *ag = autogroup_create();
180 180
181 autogroup_move_group(p, ag); 181 autogroup_move_group(p, ag);
182 /* drop extra refrence added by autogroup_create() */ 182 /* drop extra reference added by autogroup_create() */
183 autogroup_kref_put(ag); 183 autogroup_kref_put(ag);
184} 184}
185EXPORT_SYMBOL(sched_autogroup_create_attach); 185EXPORT_SYMBOL(sched_autogroup_create_attach);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee1..6fa833ab2cb8 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/latencytop.h> 23#include <linux/latencytop.h>
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/cpumask.h>
25 26
26/* 27/*
27 * Targeted preemption latency for CPU-bound tasks: 28 * Targeted preemption latency for CPU-bound tasks:
@@ -2103,21 +2104,20 @@ balance_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
2103 enum cpu_idle_type idle, int *all_pinned, 2104 enum cpu_idle_type idle, int *all_pinned,
2104 int *this_best_prio, struct cfs_rq *busiest_cfs_rq) 2105 int *this_best_prio, struct cfs_rq *busiest_cfs_rq)
2105{ 2106{
2106 int loops = 0, pulled = 0, pinned = 0; 2107 int loops = 0, pulled = 0;
2107 long rem_load_move = max_load_move; 2108 long rem_load_move = max_load_move;
2108 struct task_struct *p, *n; 2109 struct task_struct *p, *n;
2109 2110
2110 if (max_load_move == 0) 2111 if (max_load_move == 0)
2111 goto out; 2112 goto out;
2112 2113
2113 pinned = 1;
2114
2115 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) { 2114 list_for_each_entry_safe(p, n, &busiest_cfs_rq->tasks, se.group_node) {
2116 if (loops++ > sysctl_sched_nr_migrate) 2115 if (loops++ > sysctl_sched_nr_migrate)
2117 break; 2116 break;
2118 2117
2119 if ((p->se.load.weight >> 1) > rem_load_move || 2118 if ((p->se.load.weight >> 1) > rem_load_move ||
2120 !can_migrate_task(p, busiest, this_cpu, sd, idle, &pinned)) 2119 !can_migrate_task(p, busiest, this_cpu, sd, idle,
2120 all_pinned))
2121 continue; 2121 continue;
2122 2122
2123 pull_task(busiest, p, this_rq, this_cpu); 2123 pull_task(busiest, p, this_rq, this_cpu);
@@ -2152,9 +2152,6 @@ out:
2152 */ 2152 */
2153 schedstat_add(sd, lb_gained[idle], pulled); 2153 schedstat_add(sd, lb_gained[idle], pulled);
2154 2154
2155 if (all_pinned)
2156 *all_pinned = pinned;
2157
2158 return max_load_move - rem_load_move; 2155 return max_load_move - rem_load_move;
2159} 2156}
2160 2157
@@ -3061,7 +3058,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3061 3058
3062 /* 3059 /*
3063 * if *imbalance is less than the average load per runnable task 3060 * if *imbalance is less than the average load per runnable task
3064 * there is no gaurantee that any tasks will be moved so we'll have 3061 * there is no guarantee that any tasks will be moved so we'll have
3065 * a think about bumping its value to force at least one task to be 3062 * a think about bumping its value to force at least one task to be
3066 * moved 3063 * moved
3067 */ 3064 */
@@ -3126,6 +3123,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3126 if (!sds.busiest || sds.busiest_nr_running == 0) 3123 if (!sds.busiest || sds.busiest_nr_running == 0)
3127 goto out_balanced; 3124 goto out_balanced;
3128 3125
3126 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3127
3129 /* 3128 /*
3130 * If the busiest group is imbalanced the below checks don't 3129 * If the busiest group is imbalanced the below checks don't
3131 * work because they assumes all things are equal, which typically 3130 * work because they assumes all things are equal, which typically
@@ -3150,7 +3149,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
3150 * Don't pull any tasks if this group is already above the domain 3149 * Don't pull any tasks if this group is already above the domain
3151 * average load. 3150 * average load.
3152 */ 3151 */
3153 sds.avg_load = (SCHED_LOAD_SCALE * sds.total_load) / sds.total_pwr;
3154 if (sds.this_load >= sds.avg_load) 3152 if (sds.this_load >= sds.avg_load)
3155 goto out_balanced; 3153 goto out_balanced;
3156 3154
@@ -3339,6 +3337,7 @@ redo:
3339 * still unbalanced. ld_moved simply stays zero, so it is 3337 * still unbalanced. ld_moved simply stays zero, so it is
3340 * correctly treated as an imbalance. 3338 * correctly treated as an imbalance.
3341 */ 3339 */
3340 all_pinned = 1;
3342 local_irq_save(flags); 3341 local_irq_save(flags);
3343 double_rq_lock(this_rq, busiest); 3342 double_rq_lock(this_rq, busiest);
3344 ld_moved = move_tasks(this_rq, this_cpu, busiest, 3343 ld_moved = move_tasks(this_rq, this_cpu, busiest,
@@ -3819,6 +3818,17 @@ void select_nohz_load_balancer(int stop_tick)
3819 3818
3820static DEFINE_SPINLOCK(balancing); 3819static DEFINE_SPINLOCK(balancing);
3821 3820
3821static unsigned long __read_mostly max_load_balance_interval = HZ/10;
3822
3823/*
3824 * Scale the max load_balance interval with the number of CPUs in the system.
3825 * This trades load-balance latency on larger machines for less cross talk.
3826 */
3827static void update_max_interval(void)
3828{
3829 max_load_balance_interval = HZ*num_online_cpus()/10;
3830}
3831
3822/* 3832/*
3823 * It checks each scheduling domain to see if it is due to be balanced, 3833 * It checks each scheduling domain to see if it is due to be balanced,
3824 * and initiates a balancing operation if so. 3834 * and initiates a balancing operation if so.
@@ -3848,10 +3858,7 @@ static void rebalance_domains(int cpu, enum cpu_idle_type idle)
3848 3858
3849 /* scale ms to jiffies */ 3859 /* scale ms to jiffies */
3850 interval = msecs_to_jiffies(interval); 3860 interval = msecs_to_jiffies(interval);
3851 if (unlikely(!interval)) 3861 interval = clamp(interval, 1UL, max_load_balance_interval);
3852 interval = 1;
3853 if (interval > HZ*NR_CPUS/10)
3854 interval = HZ*NR_CPUS/10;
3855 3862
3856 need_serialize = sd->flags & SD_SERIALIZE; 3863 need_serialize = sd->flags & SD_SERIALIZE;
3857 3864
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index db308cb08b75..e7cebdc65f82 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1378,7 +1378,7 @@ retry:
1378 task = pick_next_pushable_task(rq); 1378 task = pick_next_pushable_task(rq);
1379 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1379 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1380 /* 1380 /*
1381 * If we get here, the task hasnt moved at all, but 1381 * If we get here, the task hasn't moved at all, but
1382 * it has failed to push. We will not try again, 1382 * it has failed to push. We will not try again,
1383 * since the other cpus will pull from us when they 1383 * since the other cpus will pull from us when they
1384 * are ready. 1384 * are ready.
@@ -1488,7 +1488,7 @@ static int pull_rt_task(struct rq *this_rq)
1488 /* 1488 /*
1489 * We continue with the search, just in 1489 * We continue with the search, just in
1490 * case there's an even higher prio task 1490 * case there's an even higher prio task
1491 * in another runqueue. (low likelyhood 1491 * in another runqueue. (low likelihood
1492 * but possible) 1492 * but possible)
1493 */ 1493 */
1494 } 1494 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 1186cf7fac77..7165af5f1b11 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -226,7 +226,7 @@ static inline void print_dropped_signal(int sig)
226/* 226/*
227 * allocate a new signal queue record 227 * allocate a new signal queue record
228 * - this may be called without locks if and only if t == current, otherwise an 228 * - this may be called without locks if and only if t == current, otherwise an
229 * appopriate lock must be held to stop the target task from exiting 229 * appropriate lock must be held to stop the target task from exiting
230 */ 230 */
231static struct sigqueue * 231static struct sigqueue *
232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit) 232__sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimit)
@@ -375,15 +375,15 @@ int unhandled_signal(struct task_struct *tsk, int sig)
375 return !tracehook_consider_fatal_signal(tsk, sig); 375 return !tracehook_consider_fatal_signal(tsk, sig);
376} 376}
377 377
378 378/*
379/* Notify the system that a driver wants to block all signals for this 379 * Notify the system that a driver wants to block all signals for this
380 * process, and wants to be notified if any signals at all were to be 380 * process, and wants to be notified if any signals at all were to be
381 * sent/acted upon. If the notifier routine returns non-zero, then the 381 * sent/acted upon. If the notifier routine returns non-zero, then the
382 * signal will be acted upon after all. If the notifier routine returns 0, 382 * signal will be acted upon after all. If the notifier routine returns 0,
383 * then then signal will be blocked. Only one block per process is 383 * then then signal will be blocked. Only one block per process is
384 * allowed. priv is a pointer to private data that the notifier routine 384 * allowed. priv is a pointer to private data that the notifier routine
385 * can use to determine if the signal should be blocked or not. */ 385 * can use to determine if the signal should be blocked or not.
386 386 */
387void 387void
388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask) 388block_all_signals(int (*notifier)(void *priv), void *priv, sigset_t *mask)
389{ 389{
@@ -434,9 +434,10 @@ still_pending:
434 copy_siginfo(info, &first->info); 434 copy_siginfo(info, &first->info);
435 __sigqueue_free(first); 435 __sigqueue_free(first);
436 } else { 436 } else {
437 /* Ok, it wasn't in the queue. This must be 437 /*
438 a fast-pathed signal or we must have been 438 * Ok, it wasn't in the queue. This must be
439 out of queue space. So zero out the info. 439 * a fast-pathed signal or we must have been
440 * out of queue space. So zero out the info.
440 */ 441 */
441 info->si_signo = sig; 442 info->si_signo = sig;
442 info->si_errno = 0; 443 info->si_errno = 0;
@@ -468,7 +469,7 @@ static int __dequeue_signal(struct sigpending *pending, sigset_t *mask,
468} 469}
469 470
470/* 471/*
471 * Dequeue a signal and return the element to the caller, which is 472 * Dequeue a signal and return the element to the caller, which is
472 * expected to free it. 473 * expected to free it.
473 * 474 *
474 * All callers have to hold the siglock. 475 * All callers have to hold the siglock.
@@ -490,7 +491,7 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
490 * itimers are process shared and we restart periodic 491 * itimers are process shared and we restart periodic
491 * itimers in the signal delivery path to prevent DoS 492 * itimers in the signal delivery path to prevent DoS
492 * attacks in the high resolution timer case. This is 493 * attacks in the high resolution timer case. This is
493 * compliant with the old way of self restarting 494 * compliant with the old way of self-restarting
494 * itimers, as the SIGALRM is a legacy signal and only 495 * itimers, as the SIGALRM is a legacy signal and only
495 * queued once. Changing the restart behaviour to 496 * queued once. Changing the restart behaviour to
496 * restart the timer in the signal dequeue path is 497 * restart the timer in the signal dequeue path is
@@ -923,14 +924,15 @@ static int __send_signal(int sig, struct siginfo *info, struct task_struct *t,
923 if (info == SEND_SIG_FORCED) 924 if (info == SEND_SIG_FORCED)
924 goto out_set; 925 goto out_set;
925 926
926 /* Real-time signals must be queued if sent by sigqueue, or 927 /*
927 some other real-time mechanism. It is implementation 928 * Real-time signals must be queued if sent by sigqueue, or
928 defined whether kill() does so. We attempt to do so, on 929 * some other real-time mechanism. It is implementation
929 the principle of least surprise, but since kill is not 930 * defined whether kill() does so. We attempt to do so, on
930 allowed to fail with EAGAIN when low on memory we just 931 * the principle of least surprise, but since kill is not
931 make sure at least one signal gets delivered and don't 932 * allowed to fail with EAGAIN when low on memory we just
932 pass on the info struct. */ 933 * make sure at least one signal gets delivered and don't
933 934 * pass on the info struct.
935 */
934 if (sig < SIGRTMIN) 936 if (sig < SIGRTMIN)
935 override_rlimit = (is_si_special(info) || info->si_code >= 0); 937 override_rlimit = (is_si_special(info) || info->si_code >= 0);
936 else 938 else
@@ -1201,8 +1203,7 @@ retry:
1201 return error; 1203 return error;
1202} 1204}
1203 1205
1204int 1206int kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1205kill_proc_info(int sig, struct siginfo *info, pid_t pid)
1206{ 1207{
1207 int error; 1208 int error;
1208 rcu_read_lock(); 1209 rcu_read_lock();
@@ -1299,8 +1300,7 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1299 * These are for backward compatibility with the rest of the kernel source. 1300 * These are for backward compatibility with the rest of the kernel source.
1300 */ 1301 */
1301 1302
1302int 1303int send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1303send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1304{ 1304{
1305 /* 1305 /*
1306 * Make sure legacy kernel users don't send in bad values 1306 * Make sure legacy kernel users don't send in bad values
@@ -1368,7 +1368,7 @@ EXPORT_SYMBOL(kill_pid);
1368 * These functions support sending signals using preallocated sigqueue 1368 * These functions support sending signals using preallocated sigqueue
1369 * structures. This is needed "because realtime applications cannot 1369 * structures. This is needed "because realtime applications cannot
1370 * afford to lose notifications of asynchronous events, like timer 1370 * afford to lose notifications of asynchronous events, like timer
1371 * expirations or I/O completions". In the case of Posix Timers 1371 * expirations or I/O completions". In the case of POSIX Timers
1372 * we allocate the sigqueue structure from the timer_create. If this 1372 * we allocate the sigqueue structure from the timer_create. If this
1373 * allocation fails we are able to report the failure to the application 1373 * allocation fails we are able to report the failure to the application
1374 * with an EAGAIN error. 1374 * with an EAGAIN error.
@@ -1553,7 +1553,7 @@ static void do_notify_parent_cldstop(struct task_struct *tsk, int why)
1553 info.si_signo = SIGCHLD; 1553 info.si_signo = SIGCHLD;
1554 info.si_errno = 0; 1554 info.si_errno = 0;
1555 /* 1555 /*
1556 * see comment in do_notify_parent() abot the following 3 lines 1556 * see comment in do_notify_parent() about the following 4 lines
1557 */ 1557 */
1558 rcu_read_lock(); 1558 rcu_read_lock();
1559 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns); 1559 info.si_pid = task_pid_nr_ns(tsk, parent->nsproxy->pid_ns);
@@ -1611,7 +1611,7 @@ static inline int may_ptrace_stop(void)
1611} 1611}
1612 1612
1613/* 1613/*
1614 * Return nonzero if there is a SIGKILL that should be waking us up. 1614 * Return non-zero if there is a SIGKILL that should be waking us up.
1615 * Called with the siglock held. 1615 * Called with the siglock held.
1616 */ 1616 */
1617static int sigkill_pending(struct task_struct *tsk) 1617static int sigkill_pending(struct task_struct *tsk)
@@ -1735,7 +1735,7 @@ void ptrace_notify(int exit_code)
1735/* 1735/*
1736 * This performs the stopping for SIGSTOP and other stop signals. 1736 * This performs the stopping for SIGSTOP and other stop signals.
1737 * We have to stop all threads in the thread group. 1737 * We have to stop all threads in the thread group.
1738 * Returns nonzero if we've actually stopped and released the siglock. 1738 * Returns non-zero if we've actually stopped and released the siglock.
1739 * Returns zero if we didn't stop and still hold the siglock. 1739 * Returns zero if we didn't stop and still hold the siglock.
1740 */ 1740 */
1741static int do_signal_stop(int signr) 1741static int do_signal_stop(int signr)
@@ -1823,10 +1823,12 @@ static int ptrace_signal(int signr, siginfo_t *info,
1823 1823
1824 current->exit_code = 0; 1824 current->exit_code = 0;
1825 1825
1826 /* Update the siginfo structure if the signal has 1826 /*
1827 changed. If the debugger wanted something 1827 * Update the siginfo structure if the signal has
1828 specific in the siginfo structure then it should 1828 * changed. If the debugger wanted something
1829 have updated *info via PTRACE_SETSIGINFO. */ 1829 * specific in the siginfo structure then it should
1830 * have updated *info via PTRACE_SETSIGINFO.
1831 */
1830 if (signr != info->si_signo) { 1832 if (signr != info->si_signo) {
1831 info->si_signo = signr; 1833 info->si_signo = signr;
1832 info->si_errno = 0; 1834 info->si_errno = 0;
@@ -1885,7 +1887,7 @@ relock:
1885 for (;;) { 1887 for (;;) {
1886 struct k_sigaction *ka; 1888 struct k_sigaction *ka;
1887 /* 1889 /*
1888 * Tracing can induce an artifical signal and choose sigaction. 1890 * Tracing can induce an artificial signal and choose sigaction.
1889 * The return value in @signr determines the default action, 1891 * The return value in @signr determines the default action,
1890 * but @info->si_signo is the signal number we will report. 1892 * but @info->si_signo is the signal number we will report.
1891 */ 1893 */
@@ -2034,7 +2036,8 @@ void exit_signals(struct task_struct *tsk)
2034 if (!signal_pending(tsk)) 2036 if (!signal_pending(tsk))
2035 goto out; 2037 goto out;
2036 2038
2037 /* It could be that __group_complete_signal() choose us to 2039 /*
2040 * It could be that __group_complete_signal() choose us to
2038 * notify about group-wide signal. Another thread should be 2041 * notify about group-wide signal. Another thread should be
2039 * woken now to take the signal since we will not. 2042 * woken now to take the signal since we will not.
2040 */ 2043 */
@@ -2072,6 +2075,9 @@ EXPORT_SYMBOL(unblock_all_signals);
2072 * System call entry points. 2075 * System call entry points.
2073 */ 2076 */
2074 2077
2078/**
2079 * sys_restart_syscall - restart a system call
2080 */
2075SYSCALL_DEFINE0(restart_syscall) 2081SYSCALL_DEFINE0(restart_syscall)
2076{ 2082{
2077 struct restart_block *restart = &current_thread_info()->restart_block; 2083 struct restart_block *restart = &current_thread_info()->restart_block;
@@ -2125,6 +2131,13 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2125 return error; 2131 return error;
2126} 2132}
2127 2133
2134/**
2135 * sys_rt_sigprocmask - change the list of currently blocked signals
2136 * @how: whether to add, remove, or set signals
2137 * @set: stores pending signals
2138 * @oset: previous value of signal mask if non-null
2139 * @sigsetsize: size of sigset_t type
2140 */
2128SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set, 2141SYSCALL_DEFINE4(rt_sigprocmask, int, how, sigset_t __user *, set,
2129 sigset_t __user *, oset, size_t, sigsetsize) 2142 sigset_t __user *, oset, size_t, sigsetsize)
2130{ 2143{
@@ -2183,8 +2196,14 @@ long do_sigpending(void __user *set, unsigned long sigsetsize)
2183 2196
2184out: 2197out:
2185 return error; 2198 return error;
2186} 2199}
2187 2200
2201/**
2202 * sys_rt_sigpending - examine a pending signal that has been raised
2203 * while blocked
2204 * @set: stores pending signals
2205 * @sigsetsize: size of sigset_t type or larger
2206 */
2188SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize) 2207SYSCALL_DEFINE2(rt_sigpending, sigset_t __user *, set, size_t, sigsetsize)
2189{ 2208{
2190 return do_sigpending(set, sigsetsize); 2209 return do_sigpending(set, sigsetsize);
@@ -2233,9 +2252,9 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2233 err |= __put_user(from->si_trapno, &to->si_trapno); 2252 err |= __put_user(from->si_trapno, &to->si_trapno);
2234#endif 2253#endif
2235#ifdef BUS_MCEERR_AO 2254#ifdef BUS_MCEERR_AO
2236 /* 2255 /*
2237 * Other callers might not initialize the si_lsb field, 2256 * Other callers might not initialize the si_lsb field,
2238 * so check explicitely for the right codes here. 2257 * so check explicitly for the right codes here.
2239 */ 2258 */
2240 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2259 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO)
2241 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2260 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
@@ -2264,6 +2283,14 @@ int copy_siginfo_to_user(siginfo_t __user *to, siginfo_t *from)
2264 2283
2265#endif 2284#endif
2266 2285
2286/**
2287 * sys_rt_sigtimedwait - synchronously wait for queued signals specified
2288 * in @uthese
2289 * @uthese: queued signals to wait for
2290 * @uinfo: if non-null, the signal's siginfo is returned here
2291 * @uts: upper bound on process time suspension
2292 * @sigsetsize: size of sigset_t type
2293 */
2267SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese, 2294SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2268 siginfo_t __user *, uinfo, const struct timespec __user *, uts, 2295 siginfo_t __user *, uinfo, const struct timespec __user *, uts,
2269 size_t, sigsetsize) 2296 size_t, sigsetsize)
@@ -2280,7 +2307,7 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2280 2307
2281 if (copy_from_user(&these, uthese, sizeof(these))) 2308 if (copy_from_user(&these, uthese, sizeof(these)))
2282 return -EFAULT; 2309 return -EFAULT;
2283 2310
2284 /* 2311 /*
2285 * Invert the set of allowed signals to get those we 2312 * Invert the set of allowed signals to get those we
2286 * want to block. 2313 * want to block.
@@ -2305,9 +2332,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2305 + (ts.tv_sec || ts.tv_nsec)); 2332 + (ts.tv_sec || ts.tv_nsec));
2306 2333
2307 if (timeout) { 2334 if (timeout) {
2308 /* None ready -- temporarily unblock those we're 2335 /*
2336 * None ready -- temporarily unblock those we're
2309 * interested while we are sleeping in so that we'll 2337 * interested while we are sleeping in so that we'll
2310 * be awakened when they arrive. */ 2338 * be awakened when they arrive.
2339 */
2311 current->real_blocked = current->blocked; 2340 current->real_blocked = current->blocked;
2312 sigandsets(&current->blocked, &current->blocked, &these); 2341 sigandsets(&current->blocked, &current->blocked, &these);
2313 recalc_sigpending(); 2342 recalc_sigpending();
@@ -2339,6 +2368,11 @@ SYSCALL_DEFINE4(rt_sigtimedwait, const sigset_t __user *, uthese,
2339 return ret; 2368 return ret;
2340} 2369}
2341 2370
2371/**
2372 * sys_kill - send a signal to a process
2373 * @pid: the PID of the process
2374 * @sig: signal to be sent
2375 */
2342SYSCALL_DEFINE2(kill, pid_t, pid, int, sig) 2376SYSCALL_DEFINE2(kill, pid_t, pid, int, sig)
2343{ 2377{
2344 struct siginfo info; 2378 struct siginfo info;
@@ -2414,7 +2448,11 @@ SYSCALL_DEFINE3(tgkill, pid_t, tgid, pid_t, pid, int, sig)
2414 return do_tkill(tgid, pid, sig); 2448 return do_tkill(tgid, pid, sig);
2415} 2449}
2416 2450
2417/* 2451/**
2452 * sys_tkill - send signal to one specific task
2453 * @pid: the PID of the task
2454 * @sig: signal to be sent
2455 *
2418 * Send a signal to only one task, even if it's a CLONE_THREAD task. 2456 * Send a signal to only one task, even if it's a CLONE_THREAD task.
2419 */ 2457 */
2420SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig) 2458SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
@@ -2426,6 +2464,12 @@ SYSCALL_DEFINE2(tkill, pid_t, pid, int, sig)
2426 return do_tkill(0, pid, sig); 2464 return do_tkill(0, pid, sig);
2427} 2465}
2428 2466
2467/**
2468 * sys_rt_sigqueueinfo - send signal information to a signal
2469 * @pid: the PID of the thread
2470 * @sig: signal to be sent
2471 * @uinfo: signal info to be sent
2472 */
2429SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig, 2473SYSCALL_DEFINE3(rt_sigqueueinfo, pid_t, pid, int, sig,
2430 siginfo_t __user *, uinfo) 2474 siginfo_t __user *, uinfo)
2431{ 2475{
@@ -2553,12 +2597,11 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2553 2597
2554 error = -EINVAL; 2598 error = -EINVAL;
2555 /* 2599 /*
2556 * 2600 * Note - this code used to test ss_flags incorrectly:
2557 * Note - this code used to test ss_flags incorrectly
2558 * old code may have been written using ss_flags==0 2601 * old code may have been written using ss_flags==0
2559 * to mean ss_flags==SS_ONSTACK (as this was the only 2602 * to mean ss_flags==SS_ONSTACK (as this was the only
2560 * way that worked) - this fix preserves that older 2603 * way that worked) - this fix preserves that older
2561 * mechanism 2604 * mechanism.
2562 */ 2605 */
2563 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0) 2606 if (ss_flags != SS_DISABLE && ss_flags != SS_ONSTACK && ss_flags != 0)
2564 goto out; 2607 goto out;
@@ -2592,6 +2635,10 @@ out:
2592 2635
2593#ifdef __ARCH_WANT_SYS_SIGPENDING 2636#ifdef __ARCH_WANT_SYS_SIGPENDING
2594 2637
2638/**
2639 * sys_sigpending - examine pending signals
2640 * @set: where mask of pending signal is returned
2641 */
2595SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set) 2642SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2596{ 2643{
2597 return do_sigpending(set, sizeof(*set)); 2644 return do_sigpending(set, sizeof(*set));
@@ -2600,8 +2647,15 @@ SYSCALL_DEFINE1(sigpending, old_sigset_t __user *, set)
2600#endif 2647#endif
2601 2648
2602#ifdef __ARCH_WANT_SYS_SIGPROCMASK 2649#ifdef __ARCH_WANT_SYS_SIGPROCMASK
2603/* Some platforms have their own version with special arguments others 2650/**
2604 support only sys_rt_sigprocmask. */ 2651 * sys_sigprocmask - examine and change blocked signals
2652 * @how: whether to add, remove, or set signals
2653 * @set: signals to add or remove (if non-null)
2654 * @oset: previous value of signal mask if non-null
2655 *
2656 * Some platforms have their own version with special arguments;
2657 * others support only sys_rt_sigprocmask.
2658 */
2605 2659
2606SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set, 2660SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, set,
2607 old_sigset_t __user *, oset) 2661 old_sigset_t __user *, oset)
@@ -2654,6 +2708,13 @@ out:
2654#endif /* __ARCH_WANT_SYS_SIGPROCMASK */ 2708#endif /* __ARCH_WANT_SYS_SIGPROCMASK */
2655 2709
2656#ifdef __ARCH_WANT_SYS_RT_SIGACTION 2710#ifdef __ARCH_WANT_SYS_RT_SIGACTION
2711/**
2712 * sys_rt_sigaction - alter an action taken by a process
2713 * @sig: signal to be sent
2714 * @act: new sigaction
2715 * @oact: used to save the previous sigaction
2716 * @sigsetsize: size of sigset_t type
2717 */
2657SYSCALL_DEFINE4(rt_sigaction, int, sig, 2718SYSCALL_DEFINE4(rt_sigaction, int, sig,
2658 const struct sigaction __user *, act, 2719 const struct sigaction __user *, act,
2659 struct sigaction __user *, oact, 2720 struct sigaction __user *, oact,
@@ -2740,6 +2801,12 @@ SYSCALL_DEFINE0(pause)
2740#endif 2801#endif
2741 2802
2742#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 2803#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
2804/**
2805 * sys_rt_sigsuspend - replace the signal mask for a value with the
2806 * @unewset value until a signal is received
2807 * @unewset: new signal mask value
2808 * @sigsetsize: size of sigset_t type
2809 */
2743SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize) 2810SYSCALL_DEFINE2(rt_sigsuspend, sigset_t __user *, unewset, size_t, sigsetsize)
2744{ 2811{
2745 sigset_t newset; 2812 sigset_t newset;
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 735d87095172..174f976c2874 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -567,7 +567,7 @@ static void __tasklet_hrtimer_trampoline(unsigned long data)
567/** 567/**
568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks 568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
569 * @ttimer: tasklet_hrtimer which is initialized 569 * @ttimer: tasklet_hrtimer which is initialized
570 * @function: hrtimer callback funtion which gets called from softirq context 570 * @function: hrtimer callback function which gets called from softirq context
571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) 571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) 572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
573 */ 573 */
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index b2fa506667c0..a470154e0408 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -34,7 +34,7 @@
34 * inaccuracies caused by missed or lost timer 34 * inaccuracies caused by missed or lost timer
35 * interrupts and the inability for the timer 35 * interrupts and the inability for the timer
36 * interrupt hardware to accuratly tick at the 36 * interrupt hardware to accuratly tick at the
37 * requested HZ value. It is also not reccomended 37 * requested HZ value. It is also not recommended
38 * for "tick-less" systems. 38 * for "tick-less" systems.
39 */ 39 */
40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) 40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 5f1bb8e2008f..f6117a4c7cb8 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -652,6 +652,8 @@ int do_adjtimex(struct timex *txc)
652 struct timespec delta; 652 struct timespec delta;
653 delta.tv_sec = txc->time.tv_sec; 653 delta.tv_sec = txc->time.tv_sec;
654 delta.tv_nsec = txc->time.tv_usec; 654 delta.tv_nsec = txc->time.tv_usec;
655 if (!capable(CAP_SYS_TIME))
656 return -EPERM;
655 if (!(txc->modes & ADJ_NANO)) 657 if (!(txc->modes & ADJ_NANO))
656 delta.tv_nsec *= 1000; 658 delta.tv_nsec *= 1000;
657 result = timekeeping_inject_offset(&delta); 659 result = timekeeping_inject_offset(&delta);
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 2f3b585b8d7d..a5d0a3a85dd8 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -236,7 +236,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
236 unsigned int timer_flag) 236 unsigned int timer_flag)
237{ 237{
238 /* 238 /*
239 * It doesnt matter which lock we take: 239 * It doesn't matter which lock we take:
240 */ 240 */
241 raw_spinlock_t *lock; 241 raw_spinlock_t *lock;
242 struct entry *entry, input; 242 struct entry *entry, input;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 7aa40f8e182d..6957aa298dfa 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -850,29 +850,21 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL); 850 __blk_add_trace(bt, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL);
851} 851}
852 852
853static void blk_add_trace_unplug_io(void *ignore, struct request_queue *q) 853static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
854 unsigned int depth, bool explicit)
854{ 855{
855 struct blk_trace *bt = q->blk_trace; 856 struct blk_trace *bt = q->blk_trace;
856 857
857 if (bt) { 858 if (bt) {
858 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE]; 859 __be64 rpdu = cpu_to_be64(depth);
859 __be64 rpdu = cpu_to_be64(pdu); 860 u32 what;
860 861
861 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_IO, 0, 862 if (explicit)
862 sizeof(rpdu), &rpdu); 863 what = BLK_TA_UNPLUG_IO;
863 } 864 else
864} 865 what = BLK_TA_UNPLUG_TIMER;
865
866static void blk_add_trace_unplug_timer(void *ignore, struct request_queue *q)
867{
868 struct blk_trace *bt = q->blk_trace;
869
870 if (bt) {
871 unsigned int pdu = q->rq.count[READ] + q->rq.count[WRITE];
872 __be64 rpdu = cpu_to_be64(pdu);
873 866
874 __blk_add_trace(bt, 0, 0, 0, BLK_TA_UNPLUG_TIMER, 0, 867 __blk_add_trace(bt, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu);
875 sizeof(rpdu), &rpdu);
876 } 868 }
877} 869}
878 870
@@ -1015,9 +1007,7 @@ static void blk_register_tracepoints(void)
1015 WARN_ON(ret); 1007 WARN_ON(ret);
1016 ret = register_trace_block_plug(blk_add_trace_plug, NULL); 1008 ret = register_trace_block_plug(blk_add_trace_plug, NULL);
1017 WARN_ON(ret); 1009 WARN_ON(ret);
1018 ret = register_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL); 1010 ret = register_trace_block_unplug(blk_add_trace_unplug, NULL);
1019 WARN_ON(ret);
1020 ret = register_trace_block_unplug_io(blk_add_trace_unplug_io, NULL);
1021 WARN_ON(ret); 1011 WARN_ON(ret);
1022 ret = register_trace_block_split(blk_add_trace_split, NULL); 1012 ret = register_trace_block_split(blk_add_trace_split, NULL);
1023 WARN_ON(ret); 1013 WARN_ON(ret);
@@ -1032,8 +1022,7 @@ static void blk_unregister_tracepoints(void)
1032 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL); 1022 unregister_trace_block_rq_remap(blk_add_trace_rq_remap, NULL);
1033 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL); 1023 unregister_trace_block_bio_remap(blk_add_trace_bio_remap, NULL);
1034 unregister_trace_block_split(blk_add_trace_split, NULL); 1024 unregister_trace_block_split(blk_add_trace_split, NULL);
1035 unregister_trace_block_unplug_io(blk_add_trace_unplug_io, NULL); 1025 unregister_trace_block_unplug(blk_add_trace_unplug, NULL);
1036 unregister_trace_block_unplug_timer(blk_add_trace_unplug_timer, NULL);
1037 unregister_trace_block_plug(blk_add_trace_plug, NULL); 1026 unregister_trace_block_plug(blk_add_trace_plug, NULL);
1038 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL); 1027 unregister_trace_block_sleeprq(blk_add_trace_sleeprq, NULL);
1039 unregister_trace_block_getrq(blk_add_trace_getrq, NULL); 1028 unregister_trace_block_getrq(blk_add_trace_getrq, NULL);
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c075f4ea6b94..ee24fa1935ac 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod)
1268 p->flags = 0L; 1268 p->flags = 0L;
1269 1269
1270 /* 1270 /*
1271 * Do the initial record convertion from mcount jump 1271 * Do the initial record conversion from mcount jump
1272 * to the NOP instructions. 1272 * to the NOP instructions.
1273 */ 1273 */
1274 if (!ftrace_code_disable(mod, p)) { 1274 if (!ftrace_code_disable(mod, p)) {
@@ -3425,7 +3425,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3425 atomic_set(&t->tracing_graph_pause, 0); 3425 atomic_set(&t->tracing_graph_pause, 0);
3426 atomic_set(&t->trace_overrun, 0); 3426 atomic_set(&t->trace_overrun, 0);
3427 t->ftrace_timestamp = 0; 3427 t->ftrace_timestamp = 0;
3428 /* make curr_ret_stack visable before we add the ret_stack */ 3428 /* make curr_ret_stack visible before we add the ret_stack */
3429 smp_wmb(); 3429 smp_wmb();
3430 t->ret_stack = ret_stack; 3430 t->ret_stack = ret_stack;
3431} 3431}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d9c8bcafb120..0ef7b4b2a1f7 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1478,7 +1478,7 @@ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1478 return local_read(&bpage->entries) & RB_WRITE_MASK; 1478 return local_read(&bpage->entries) & RB_WRITE_MASK;
1479} 1479}
1480 1480
1481/* Size is determined by what has been commited */ 1481/* Size is determined by what has been committed */
1482static inline unsigned rb_page_size(struct buffer_page *bpage) 1482static inline unsigned rb_page_size(struct buffer_page *bpage)
1483{ 1483{
1484 return rb_page_commit(bpage); 1484 return rb_page_commit(bpage);
@@ -2932,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2932 /* 2932 /*
2933 * cpu_buffer->pages just needs to point to the buffer, it 2933 * cpu_buffer->pages just needs to point to the buffer, it
2934 * has no specific buffer page to point to. Lets move it out 2934 * has no specific buffer page to point to. Lets move it out
2935 * of our way so we don't accidently swap it. 2935 * of our way so we don't accidentally swap it.
2936 */ 2936 */
2937 cpu_buffer->pages = reader->list.prev; 2937 cpu_buffer->pages = reader->list.prev;
2938 2938
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9541c27c1cf2..d38c16a06a6f 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3239,7 +3239,7 @@ waitagain:
3239 trace_seq_init(&iter->seq); 3239 trace_seq_init(&iter->seq);
3240 3240
3241 /* 3241 /*
3242 * If there was nothing to send to user, inspite of consuming trace 3242 * If there was nothing to send to user, in spite of consuming trace
3243 * entries, go back to wait for more entries. 3243 * entries, go back to wait for more entries.
3244 */ 3244 */
3245 if (sret == -EBUSY) 3245 if (sret == -EBUSY)
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 685a67d55db0..6302747a1398 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -46,7 +46,7 @@ u64 notrace trace_clock_local(void)
46} 46}
47 47
48/* 48/*
49 * trace_clock(): 'inbetween' trace clock. Not completely serialized, 49 * trace_clock(): 'between' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either. 50 * but not completely incorrect when crossing CPUs either.
51 * 51 *
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of 52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 1516cb3ec549..e32744c84d94 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -27,7 +27,7 @@
27 * in the structure. 27 * in the structure.
28 * 28 *
29 * * for structures within structures, the format of the internal 29 * * for structures within structures, the format of the internal
30 * structure is layed out. This allows the internal structure 30 * structure is laid out. This allows the internal structure
31 * to be deciphered for the format file. Although these macros 31 * to be deciphered for the format file. Although these macros
32 * may become out of sync with the internal structure, they 32 * may become out of sync with the internal structure, they
33 * will create a compile error if it happens. Since the 33 * will create a compile error if it happens. Since the
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 76b05980225c..962cdb24ed81 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -905,7 +905,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
905 * 905 *
906 * returns 1 if 906 * returns 1 if
907 * - we are inside irq code 907 * - we are inside irq code
908 * - we just extered irq code 908 * - we just entered irq code
909 * 909 *
910 * retunns 0 if 910 * retunns 0 if
911 * - funcgraph-interrupts option is set 911 * - funcgraph-interrupts option is set
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 92b6e1e12d98..a4969b47afc1 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -80,7 +80,7 @@ static struct tracer_flags tracer_flags = {
80 * skip the latency if the sequence has changed - some other section 80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console 81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare 82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt 83 * and what happens together happens separately as well, so this doesn't
84 * decrease the validity of the maximum found: 84 * decrease the validity of the maximum found:
85 */ 85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence; 86static __cacheline_aligned_in_smp unsigned long max_sequence;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8435b43b1782..35d55a386145 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1839,7 +1839,7 @@ static void unregister_probe_event(struct trace_probe *tp)
1839 kfree(tp->call.print_fmt); 1839 kfree(tp->call.print_fmt);
1840} 1840}
1841 1841
1842/* Make a debugfs interface for controling probe points */ 1842/* Make a debugfs interface for controlling probe points */
1843static __init int init_kprobe_trace(void) 1843static __init int init_kprobe_trace(void)
1844{ 1844{
1845 struct dentry *d_tracer; 1845 struct dentry *d_tracer;
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index eb27fd3430a2..92cb706c7fc8 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
20 20
21/* 21/*
22 * Removes a registered user return notifier. Must be called from atomic 22 * Removes a registered user return notifier. Must be called from atomic
23 * context, and from the same cpu registration occured in. 23 * context, and from the same cpu registration occurred in.
24 */ 24 */
25void user_return_notifier_unregister(struct user_return_notifier *urn) 25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{ 26{
diff --git a/kernel/wait.c b/kernel/wait.c
index b0310eb6cc1e..f45ea8d2a1ce 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -142,7 +142,7 @@ EXPORT_SYMBOL(finish_wait);
142 * woken up through the queue. 142 * woken up through the queue.
143 * 143 *
144 * This prevents waiter starvation where an exclusive waiter 144 * This prevents waiter starvation where an exclusive waiter
145 * aborts and is woken up concurrently and noone wakes up 145 * aborts and is woken up concurrently and no one wakes up
146 * the next waiter. 146 * the next waiter.
147 */ 147 */
148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 04ef830690ec..8859a41806dd 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1291,7 +1291,7 @@ __acquires(&gcwq->lock)
1291 return true; 1291 return true;
1292 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1293 1293
1294 /* CPU has come up inbetween, retry migration */ 1294 /* CPU has come up in between, retry migration */
1295 cpu_relax(); 1295 cpu_relax();
1296 } 1296 }
1297} 1297}