aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-30 21:57:33 -0400
committerLucas De Marchi <lucas.demarchi@profusion.mobi>2011-03-31 10:26:23 -0400
commit25985edcedea6396277003854657b5f3cb31a628 (patch)
treef026e810210a2ee7290caeb737c23cb6472b7c38 /kernel
parent6aba74f2791287ec407e0f92487a725a25908067 (diff)
Fix common misspellings
Fixes generated by 'codespell' and manually reviewed. Signed-off-by: Lucas De Marchi <lucas.demarchi@profusion.mobi>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/audit_tree.c2
-rw-r--r--kernel/auditsc.c2
-rw-r--r--kernel/cgroup.c2
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c6
-rw-r--r--kernel/debug/kdb/kdb_support.c2
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/chip.c2
-rw-r--r--kernel/irq/migration.c2
-rw-r--r--kernel/kexec.c6
-rw-r--r--kernel/kthread.c2
-rw-r--r--kernel/latencytop.c2
-rw-r--r--kernel/lockdep.c4
-rw-r--r--kernel/module.c6
-rw-r--r--kernel/mutex.c2
-rw-r--r--kernel/padata.c8
-rw-r--r--kernel/params.c2
-rw-r--r--kernel/posix-cpu-timers.c2
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/power/main.c2
-rw-r--r--kernel/sched.c6
-rw-r--r--kernel/sched_autogroup.c2
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/sched_rt.c4
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/softirq.c2
-rw-r--r--kernel/time/jiffies.c2
-rw-r--r--kernel/time/timer_stats.c2
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_clock.c2
-rw-r--r--kernel/trace/trace_entries.h2
-rw-r--r--kernel/trace/trace_functions_graph.c2
-rw-r--r--kernel/trace/trace_irqsoff.c2
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--kernel/user-return-notifier.c2
-rw-r--r--kernel/wait.c2
-rw-r--r--kernel/workqueue.c2
40 files changed, 55 insertions, 55 deletions
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index 37b2bea170c..e99dda04b12 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -607,7 +607,7 @@ void audit_trim_trees(void)
607 spin_lock(&hash_lock); 607 spin_lock(&hash_lock);
608 list_for_each_entry(node, &tree->chunks, list) { 608 list_for_each_entry(node, &tree->chunks, list) {
609 struct audit_chunk *chunk = find_chunk(node); 609 struct audit_chunk *chunk = find_chunk(node);
610 /* this could be NULL if the watch is dieing else where... */ 610 /* this could be NULL if the watch is dying else where... */
611 struct inode *inode = chunk->mark.i.inode; 611 struct inode *inode = chunk->mark.i.inode;
612 node->index |= 1U<<31; 612 node->index |= 1U<<31;
613 if (iterate_mounts(compare_root, inode, root_mnt)) 613 if (iterate_mounts(compare_root, inode, root_mnt))
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index f49a0318c2e..b33513a08be 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -1011,7 +1011,7 @@ static int audit_log_pid_context(struct audit_context *context, pid_t pid,
1011/* 1011/*
1012 * to_send and len_sent accounting are very loose estimates. We aren't 1012 * to_send and len_sent accounting are very loose estimates. We aren't
1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being 1013 * really worried about a hard cap to MAX_EXECVE_AUDIT_LEN so much as being
1014 * within about 500 bytes (next page boundry) 1014 * within about 500 bytes (next page boundary)
1015 * 1015 *
1016 * why snprintf? an int is up to 12 digits long. if we just assumed when 1016 * why snprintf? an int is up to 12 digits long. if we just assumed when
1017 * logging that a[%d]= was going to be 16 characters long we would be wasting 1017 * logging that a[%d]= was going to be 16 characters long we would be wasting
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e31b220a743..25c7eb52de1 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -157,7 +157,7 @@ struct css_id {
157}; 157};
158 158
159/* 159/*
160 * cgroup_event represents events which userspace want to recieve. 160 * cgroup_event represents events which userspace want to receive.
161 */ 161 */
162struct cgroup_event { 162struct cgroup_event {
163 /* 163 /*
diff --git a/kernel/cpu.c b/kernel/cpu.c
index c95fc4df0fa..12b7458f23b 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -126,7 +126,7 @@ static void cpu_hotplug_done(void)
126#else /* #if CONFIG_HOTPLUG_CPU */ 126#else /* #if CONFIG_HOTPLUG_CPU */
127static void cpu_hotplug_begin(void) {} 127static void cpu_hotplug_begin(void) {}
128static void cpu_hotplug_done(void) {} 128static void cpu_hotplug_done(void) {}
129#endif /* #esle #if CONFIG_HOTPLUG_CPU */ 129#endif /* #else #if CONFIG_HOTPLUG_CPU */
130 130
131/* Need to know about CPUs going up/down? */ 131/* Need to know about CPUs going up/down? */
132int __ref register_cpu_notifier(struct notifier_block *nb) 132int __ref register_cpu_notifier(struct notifier_block *nb)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index cefd4a11f6d..bad6786dee8 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -538,7 +538,7 @@ return_normal:
538 538
539 /* 539 /*
540 * For single stepping, try to only enter on the processor 540 * For single stepping, try to only enter on the processor
541 * that was single stepping. To gaurd against a deadlock, the 541 * that was single stepping. To guard against a deadlock, the
542 * kernel will only try for the value of sstep_tries before 542 * kernel will only try for the value of sstep_tries before
543 * giving up and continuing on. 543 * giving up and continuing on.
544 */ 544 */
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 6bc6e3bc4f9..be14779bcef 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -441,9 +441,9 @@ static int kdb_check_regs(void)
441 * symbol name, and offset to the caller. 441 * symbol name, and offset to the caller.
442 * 442 *
443 * The argument may consist of a numeric value (decimal or 443 * The argument may consist of a numeric value (decimal or
444 * hexidecimal), a symbol name, a register name (preceeded by the 444 * hexidecimal), a symbol name, a register name (preceded by the
445 * percent sign), an environment variable with a numeric value 445 * percent sign), an environment variable with a numeric value
446 * (preceeded by a dollar sign) or a simple arithmetic expression 446 * (preceded by a dollar sign) or a simple arithmetic expression
447 * consisting of a symbol name, +/-, and a numeric constant value 447 * consisting of a symbol name, +/-, and a numeric constant value
448 * (offset). 448 * (offset).
449 * Parameters: 449 * Parameters:
@@ -1335,7 +1335,7 @@ void kdb_print_state(const char *text, int value)
1335 * error The hardware-defined error code 1335 * error The hardware-defined error code
1336 * reason2 kdb's current reason code. 1336 * reason2 kdb's current reason code.
1337 * Initially error but can change 1337 * Initially error but can change
1338 * acording to kdb state. 1338 * according to kdb state.
1339 * db_result Result code from break or debug point. 1339 * db_result Result code from break or debug point.
1340 * regs The exception frame at time of fault/breakpoint. 1340 * regs The exception frame at time of fault/breakpoint.
1341 * should always be valid. 1341 * should always be valid.
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 6b2485dcb05..5532dd37aa8 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -545,7 +545,7 @@ int kdb_putword(unsigned long addr, unsigned long word, size_t size)
545 * Mask for process state. 545 * Mask for process state.
546 * Notes: 546 * Notes:
547 * The mask folds data from several sources into a single long value, so 547 * The mask folds data from several sources into a single long value, so
548 * be carefull not to overlap the bits. TASK_* bits are in the LSB, 548 * be careful not to overlap the bits. TASK_* bits are in the LSB,
549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there 549 * special cases like UNRUNNABLE are in the MSB. As of 2.6.10-rc1 there
550 * is no overlap between TASK_* and EXIT_* but that may not always be 550 * is no overlap between TASK_* and EXIT_* but that may not always be
551 * true, so EXIT_* bits are shifted left 16 bits before being stored in 551 * true, so EXIT_* bits are shifted left 16 bits before being stored in
diff --git a/kernel/exit.c b/kernel/exit.c
index 6a488ad2dce..f5d2f63bae0 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -841,7 +841,7 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
841 /* Let father know we died 841 /* Let father know we died
842 * 842 *
843 * Thread signals are configurable, but you aren't going to use 843 * Thread signals are configurable, but you aren't going to use
844 * that to send signals to arbitary processes. 844 * that to send signals to arbitrary processes.
845 * That stops right now. 845 * That stops right now.
846 * 846 *
847 * If the parent exec id doesn't match the exec id we saved 847 * If the parent exec id doesn't match the exec id we saved
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 1dafc8652bd..4af1e2b244c 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -415,7 +415,7 @@ out:
415 * @desc: the interrupt description structure for this irq 415 * @desc: the interrupt description structure for this irq
416 * 416 *
417 * Interrupt occures on the falling and/or rising edge of a hardware 417 * Interrupt occures on the falling and/or rising edge of a hardware
418 * signal. The occurence is latched into the irq controller hardware 418 * signal. The occurrence is latched into the irq controller hardware
419 * and must be acked in order to be reenabled. After the ack another 419 * and must be acked in order to be reenabled. After the ack another
420 * interrupt can happen on the same source even before the first one 420 * interrupt can happen on the same source even before the first one
421 * is handled by the associated event handler. If this happens it 421 * is handled by the associated event handler. If this happens it
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index bc6194698df..47420908fba 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -35,7 +35,7 @@ void irq_move_masked_irq(struct irq_data *idata)
35 * do the disable, re-program, enable sequence. 35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered 36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte 37 * but in a edge trigger case, we might be setting rte
38 * when an active trigger is comming in. This could 38 * when an active trigger is coming in. This could
39 * cause some ioapics to mal-function. 39 * cause some ioapics to mal-function.
40 * Being paranoid i guess! 40 * Being paranoid i guess!
41 * 41 *
diff --git a/kernel/kexec.c b/kernel/kexec.c
index ec19b92c7eb..e7e3d9788dc 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -144,7 +144,7 @@ static int do_kimage_alloc(struct kimage **rimage, unsigned long entry,
144 /* Initialize the list of destination pages */ 144 /* Initialize the list of destination pages */
145 INIT_LIST_HEAD(&image->dest_pages); 145 INIT_LIST_HEAD(&image->dest_pages);
146 146
147 /* Initialize the list of unuseable pages */ 147 /* Initialize the list of unusable pages */
148 INIT_LIST_HEAD(&image->unuseable_pages); 148 INIT_LIST_HEAD(&image->unuseable_pages);
149 149
150 /* Read in the segments */ 150 /* Read in the segments */
@@ -454,7 +454,7 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image,
454 /* Deal with the destination pages I have inadvertently allocated. 454 /* Deal with the destination pages I have inadvertently allocated.
455 * 455 *
456 * Ideally I would convert multi-page allocations into single 456 * Ideally I would convert multi-page allocations into single
457 * page allocations, and add everyting to image->dest_pages. 457 * page allocations, and add everything to image->dest_pages.
458 * 458 *
459 * For now it is simpler to just free the pages. 459 * For now it is simpler to just free the pages.
460 */ 460 */
@@ -602,7 +602,7 @@ static void kimage_free_extra_pages(struct kimage *image)
602 /* Walk through and free any extra destination pages I may have */ 602 /* Walk through and free any extra destination pages I may have */
603 kimage_free_page_list(&image->dest_pages); 603 kimage_free_page_list(&image->dest_pages);
604 604
605 /* Walk through and free any unuseable pages I have cached */ 605 /* Walk through and free any unusable pages I have cached */
606 kimage_free_page_list(&image->unuseable_pages); 606 kimage_free_page_list(&image->unuseable_pages);
607 607
608} 608}
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 684ab3f7dd7..3b34d2732bc 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -139,7 +139,7 @@ static void create_kthread(struct kthread_create_info *create)
139 * in @node, to get NUMA affinity for kthread stack, or else give -1. 139 * in @node, to get NUMA affinity for kthread stack, or else give -1.
140 * When woken, the thread will run @threadfn() with @data as its 140 * When woken, the thread will run @threadfn() with @data as its
141 * argument. @threadfn() can either call do_exit() directly if it is a 141 * argument. @threadfn() can either call do_exit() directly if it is a
142 * standalone thread for which noone will call kthread_stop(), or 142 * standalone thread for which no one will call kthread_stop(), or
143 * return when 'kthread_should_stop()' is true (which means 143 * return when 'kthread_should_stop()' is true (which means
144 * kthread_stop() has been called). The return value should be zero 144 * kthread_stop() has been called). The return value should be zero
145 * or a negative error number; it will be passed to kthread_stop(). 145 * or a negative error number; it will be passed to kthread_stop().
diff --git a/kernel/latencytop.c b/kernel/latencytop.c
index ee74b35e528..376066e1041 100644
--- a/kernel/latencytop.c
+++ b/kernel/latencytop.c
@@ -153,7 +153,7 @@ static inline void store_stacktrace(struct task_struct *tsk,
153} 153}
154 154
155/** 155/**
156 * __account_scheduler_latency - record an occured latency 156 * __account_scheduler_latency - record an occurred latency
157 * @tsk - the task struct of the task hitting the latency 157 * @tsk - the task struct of the task hitting the latency
158 * @usecs - the duration of the latency in microseconds 158 * @usecs - the duration of the latency in microseconds
159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible 159 * @inter - 1 if the sleep was interruptible, 0 if uninterruptible
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 0d2058da80f..53a68956f13 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -2309,7 +2309,7 @@ void trace_hardirqs_on_caller(unsigned long ip)
2309 if (unlikely(curr->hardirqs_enabled)) { 2309 if (unlikely(curr->hardirqs_enabled)) {
2310 /* 2310 /*
2311 * Neither irq nor preemption are disabled here 2311 * Neither irq nor preemption are disabled here
2312 * so this is racy by nature but loosing one hit 2312 * so this is racy by nature but losing one hit
2313 * in a stat is not a big deal. 2313 * in a stat is not a big deal.
2314 */ 2314 */
2315 __debug_atomic_inc(redundant_hardirqs_on); 2315 __debug_atomic_inc(redundant_hardirqs_on);
@@ -2620,7 +2620,7 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
2620 if (!graph_lock()) 2620 if (!graph_lock())
2621 return 0; 2621 return 0;
2622 /* 2622 /*
2623 * Make sure we didnt race: 2623 * Make sure we didn't race:
2624 */ 2624 */
2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) { 2625 if (unlikely(hlock_class(this)->usage_mask & new_mask)) {
2626 graph_unlock(); 2626 graph_unlock();
diff --git a/kernel/module.c b/kernel/module.c
index 1f9f7bc56ca..d5938a5c19c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -809,7 +809,7 @@ SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
809 wait_for_zero_refcount(mod); 809 wait_for_zero_refcount(mod);
810 810
811 mutex_unlock(&module_mutex); 811 mutex_unlock(&module_mutex);
812 /* Final destruction now noone is using it. */ 812 /* Final destruction now no one is using it. */
813 if (mod->exit != NULL) 813 if (mod->exit != NULL)
814 mod->exit(); 814 mod->exit();
815 blocking_notifier_call_chain(&module_notify_list, 815 blocking_notifier_call_chain(&module_notify_list,
@@ -2777,7 +2777,7 @@ static struct module *load_module(void __user *umod,
2777 mod->state = MODULE_STATE_COMING; 2777 mod->state = MODULE_STATE_COMING;
2778 2778
2779 /* Now sew it into the lists so we can get lockdep and oops 2779 /* Now sew it into the lists so we can get lockdep and oops
2780 * info during argument parsing. Noone should access us, since 2780 * info during argument parsing. No one should access us, since
2781 * strong_try_module_get() will fail. 2781 * strong_try_module_get() will fail.
2782 * lockdep/oops can run asynchronous, so use the RCU list insertion 2782 * lockdep/oops can run asynchronous, so use the RCU list insertion
2783 * function to insert in a way safe to concurrent readers. 2783 * function to insert in a way safe to concurrent readers.
@@ -2971,7 +2971,7 @@ static const char *get_ksymbol(struct module *mod,
2971 else 2971 else
2972 nextval = (unsigned long)mod->module_core+mod->core_text_size; 2972 nextval = (unsigned long)mod->module_core+mod->core_text_size;
2973 2973
2974 /* Scan for closest preceeding symbol, and next symbol. (ELF 2974 /* Scan for closest preceding symbol, and next symbol. (ELF
2975 starts real symbols at 1). */ 2975 starts real symbols at 1). */
2976 for (i = 1; i < mod->num_symtab; i++) { 2976 for (i = 1; i < mod->num_symtab; i++) {
2977 if (mod->symtab[i].st_shndx == SHN_UNDEF) 2977 if (mod->symtab[i].st_shndx == SHN_UNDEF)
diff --git a/kernel/mutex.c b/kernel/mutex.c
index a5889fb28ec..c4195fa9890 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -245,7 +245,7 @@ __mutex_lock_common(struct mutex *lock, long state, unsigned int subclass,
245 } 245 }
246 __set_task_state(task, state); 246 __set_task_state(task, state);
247 247
248 /* didnt get the lock, go to sleep: */ 248 /* didn't get the lock, go to sleep: */
249 spin_unlock_mutex(&lock->wait_lock, flags); 249 spin_unlock_mutex(&lock->wait_lock, flags);
250 preempt_enable_no_resched(); 250 preempt_enable_no_resched();
251 schedule(); 251 schedule();
diff --git a/kernel/padata.c b/kernel/padata.c
index 751019415d2..b91941df5e6 100644
--- a/kernel/padata.c
+++ b/kernel/padata.c
@@ -262,7 +262,7 @@ static void padata_reorder(struct parallel_data *pd)
262 /* 262 /*
263 * This cpu has to do the parallel processing of the next 263 * This cpu has to do the parallel processing of the next
264 * object. It's waiting in the cpu's parallelization queue, 264 * object. It's waiting in the cpu's parallelization queue,
265 * so exit imediately. 265 * so exit immediately.
266 */ 266 */
267 if (PTR_ERR(padata) == -ENODATA) { 267 if (PTR_ERR(padata) == -ENODATA) {
268 del_timer(&pd->timer); 268 del_timer(&pd->timer);
@@ -284,7 +284,7 @@ static void padata_reorder(struct parallel_data *pd)
284 /* 284 /*
285 * The next object that needs serialization might have arrived to 285 * The next object that needs serialization might have arrived to
286 * the reorder queues in the meantime, we will be called again 286 * the reorder queues in the meantime, we will be called again
287 * from the timer function if noone else cares for it. 287 * from the timer function if no one else cares for it.
288 */ 288 */
289 if (atomic_read(&pd->reorder_objects) 289 if (atomic_read(&pd->reorder_objects)
290 && !(pinst->flags & PADATA_RESET)) 290 && !(pinst->flags & PADATA_RESET))
@@ -515,7 +515,7 @@ static void __padata_stop(struct padata_instance *pinst)
515 put_online_cpus(); 515 put_online_cpus();
516} 516}
517 517
518/* Replace the internal control stucture with a new one. */ 518/* Replace the internal control structure with a new one. */
519static void padata_replace(struct padata_instance *pinst, 519static void padata_replace(struct padata_instance *pinst,
520 struct parallel_data *pd_new) 520 struct parallel_data *pd_new)
521{ 521{
@@ -768,7 +768,7 @@ static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
768} 768}
769 769
770 /** 770 /**
771 * padata_remove_cpu - remove a cpu from the one or both(serial and paralell) 771 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
772 * padata cpumasks. 772 * padata cpumasks.
773 * 773 *
774 * @pinst: padata instance 774 * @pinst: padata instance
diff --git a/kernel/params.c b/kernel/params.c
index 0da1411222b..7ab388a48a2 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -95,7 +95,7 @@ static int parse_one(char *param,
95 /* Find parameter */ 95 /* Find parameter */
96 for (i = 0; i < num_params; i++) { 96 for (i = 0; i < num_params; i++) {
97 if (parameq(param, params[i].name)) { 97 if (parameq(param, params[i].name)) {
98 /* Noone handled NULL, so do it here. */ 98 /* No one handled NULL, so do it here. */
99 if (!val && params[i].ops->set != param_set_bool) 99 if (!val && params[i].ops->set != param_set_bool)
100 return -EINVAL; 100 return -EINVAL;
101 DEBUGP("They are equal! Calling %p\n", 101 DEBUGP("They are equal! Calling %p\n",
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 67fea9d25d5..0791b13df7b 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1347,7 +1347,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1347 1347
1348 /* 1348 /*
1349 * Now that all the timers on our list have the firing flag, 1349 * Now that all the timers on our list have the firing flag,
1350 * noone will touch their list entries but us. We'll take 1350 * no one will touch their list entries but us. We'll take
1351 * each timer's lock before clearing its firing flag, so no 1351 * each timer's lock before clearing its firing flag, so no
1352 * timer call will interfere. 1352 * timer call will interfere.
1353 */ 1353 */
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 4c0124919f9..e5498d7405c 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -313,7 +313,7 @@ static void schedule_next_timer(struct k_itimer *timr)
313 * restarted (i.e. we have flagged this in the sys_private entry of the 313 * restarted (i.e. we have flagged this in the sys_private entry of the
314 * info block). 314 * info block).
315 * 315 *
316 * To protect aginst the timer going away while the interrupt is queued, 316 * To protect against the timer going away while the interrupt is queued,
317 * we require that the it_requeue_pending flag be set. 317 * we require that the it_requeue_pending flag be set.
318 */ 318 */
319void do_schedule_next_timer(struct siginfo *info) 319void do_schedule_next_timer(struct siginfo *info)
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 8eaba5f27b1..de9aef8742f 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -224,7 +224,7 @@ power_attr(state);
224 * writing to 'state'. It first should read from 'wakeup_count' and store 224 * writing to 'state'. It first should read from 'wakeup_count' and store
225 * the read value. Then, after carrying out its own preparations for the system 225 * the read value. Then, after carrying out its own preparations for the system
226 * transition to a sleep state, it should write the stored value to 226 * transition to a sleep state, it should write the stored value to
227 * 'wakeup_count'. If that fails, at least one wakeup event has occured since 227 * 'wakeup_count'. If that fails, at least one wakeup event has occurred since
228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it 228 * 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
229 * is allowed to write to 'state', but the transition will be aborted if there 229 * is allowed to write to 'state', but the transition will be aborted if there
230 * are any wakeup events detected after 'wakeup_count' was written to. 230 * are any wakeup events detected after 'wakeup_count' was written to.
diff --git a/kernel/sched.c b/kernel/sched.c
index f592ce6f861..865b433fac5 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2309,7 +2309,7 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
2309 * Cause a process which is running on another CPU to enter 2309 * Cause a process which is running on another CPU to enter
2310 * kernel-mode, without any delay. (to get signals handled.) 2310 * kernel-mode, without any delay. (to get signals handled.)
2311 * 2311 *
2312 * NOTE: this function doesnt have to take the runqueue lock, 2312 * NOTE: this function doesn't have to take the runqueue lock,
2313 * because all it wants to ensure is that the remote task enters 2313 * because all it wants to ensure is that the remote task enters
2314 * the kernel. If the IPI races and the task has been migrated 2314 * the kernel. If the IPI races and the task has been migrated
2315 * to another CPU then no harm is done and the purpose has been 2315 * to another CPU then no harm is done and the purpose has been
@@ -4997,7 +4997,7 @@ recheck:
4997 */ 4997 */
4998 raw_spin_lock_irqsave(&p->pi_lock, flags); 4998 raw_spin_lock_irqsave(&p->pi_lock, flags);
4999 /* 4999 /*
5000 * To be able to change p->policy safely, the apropriate 5000 * To be able to change p->policy safely, the appropriate
5001 * runqueue lock must be held. 5001 * runqueue lock must be held.
5002 */ 5002 */
5003 rq = __task_rq_lock(p); 5003 rq = __task_rq_lock(p);
@@ -5705,7 +5705,7 @@ void show_state_filter(unsigned long state_filter)
5705 do_each_thread(g, p) { 5705 do_each_thread(g, p) {
5706 /* 5706 /*
5707 * reset the NMI-timeout, listing all files on a slow 5707 * reset the NMI-timeout, listing all files on a slow
5708 * console might take alot of time: 5708 * console might take a lot of time:
5709 */ 5709 */
5710 touch_nmi_watchdog(); 5710 touch_nmi_watchdog();
5711 if (!state_filter || (p->state & state_filter)) 5711 if (!state_filter || (p->state & state_filter))
diff --git a/kernel/sched_autogroup.c b/kernel/sched_autogroup.c
index 5946ac51560..429242f3c48 100644
--- a/kernel/sched_autogroup.c
+++ b/kernel/sched_autogroup.c
@@ -179,7 +179,7 @@ void sched_autogroup_create_attach(struct task_struct *p)
179 struct autogroup *ag = autogroup_create(); 179 struct autogroup *ag = autogroup_create();
180 180
181 autogroup_move_group(p, ag); 181 autogroup_move_group(p, ag);
182 /* drop extra refrence added by autogroup_create() */ 182 /* drop extra reference added by autogroup_create() */
183 autogroup_kref_put(ag); 183 autogroup_kref_put(ag);
184} 184}
185EXPORT_SYMBOL(sched_autogroup_create_attach); 185EXPORT_SYMBOL(sched_autogroup_create_attach);
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 3f7ec9e27ee..3cb7f07887a 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3061,7 +3061,7 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
3061 3061
3062 /* 3062 /*
3063 * if *imbalance is less than the average load per runnable task 3063 * if *imbalance is less than the average load per runnable task
3064 * there is no gaurantee that any tasks will be moved so we'll have 3064 * there is no guarantee that any tasks will be moved so we'll have
3065 * a think about bumping its value to force at least one task to be 3065 * a think about bumping its value to force at least one task to be
3066 * moved 3066 * moved
3067 */ 3067 */
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index db308cb08b7..e7cebdc65f8 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -1378,7 +1378,7 @@ retry:
1378 task = pick_next_pushable_task(rq); 1378 task = pick_next_pushable_task(rq);
1379 if (task_cpu(next_task) == rq->cpu && task == next_task) { 1379 if (task_cpu(next_task) == rq->cpu && task == next_task) {
1380 /* 1380 /*
1381 * If we get here, the task hasnt moved at all, but 1381 * If we get here, the task hasn't moved at all, but
1382 * it has failed to push. We will not try again, 1382 * it has failed to push. We will not try again,
1383 * since the other cpus will pull from us when they 1383 * since the other cpus will pull from us when they
1384 * are ready. 1384 * are ready.
@@ -1488,7 +1488,7 @@ static int pull_rt_task(struct rq *this_rq)
1488 /* 1488 /*
1489 * We continue with the search, just in 1489 * We continue with the search, just in
1490 * case there's an even higher prio task 1490 * case there's an even higher prio task
1491 * in another runqueue. (low likelyhood 1491 * in another runqueue. (low likelihood
1492 * but possible) 1492 * but possible)
1493 */ 1493 */
1494 } 1494 }
diff --git a/kernel/signal.c b/kernel/signal.c
index 1186cf7fac7..f486d10f3b8 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1885,7 +1885,7 @@ relock:
1885 for (;;) { 1885 for (;;) {
1886 struct k_sigaction *ka; 1886 struct k_sigaction *ka;
1887 /* 1887 /*
1888 * Tracing can induce an artifical signal and choose sigaction. 1888 * Tracing can induce an artificial signal and choose sigaction.
1889 * The return value in @signr determines the default action, 1889 * The return value in @signr determines the default action,
1890 * but @info->si_signo is the signal number we will report. 1890 * but @info->si_signo is the signal number we will report.
1891 */ 1891 */
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 735d8709517..174f976c287 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -567,7 +567,7 @@ static void __tasklet_hrtimer_trampoline(unsigned long data)
567/** 567/**
568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks 568 * tasklet_hrtimer_init - Init a tasklet/hrtimer combo for softirq callbacks
569 * @ttimer: tasklet_hrtimer which is initialized 569 * @ttimer: tasklet_hrtimer which is initialized
570 * @function: hrtimer callback funtion which gets called from softirq context 570 * @function: hrtimer callback function which gets called from softirq context
571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME) 571 * @which_clock: clock id (CLOCK_MONOTONIC/CLOCK_REALTIME)
572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL) 572 * @mode: hrtimer mode (HRTIMER_MODE_ABS/HRTIMER_MODE_REL)
573 */ 573 */
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index b2fa506667c..a470154e040 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -34,7 +34,7 @@
34 * inaccuracies caused by missed or lost timer 34 * inaccuracies caused by missed or lost timer
35 * interrupts and the inability for the timer 35 * interrupts and the inability for the timer
36 * interrupt hardware to accuratly tick at the 36 * interrupt hardware to accuratly tick at the
37 * requested HZ value. It is also not reccomended 37 * requested HZ value. It is also not recommended
38 * for "tick-less" systems. 38 * for "tick-less" systems.
39 */ 39 */
40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ)) 40#define NSEC_PER_JIFFY ((u32)((((u64)NSEC_PER_SEC)<<8)/ACTHZ))
diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
index 2f3b585b8d7..a5d0a3a85dd 100644
--- a/kernel/time/timer_stats.c
+++ b/kernel/time/timer_stats.c
@@ -236,7 +236,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
236 unsigned int timer_flag) 236 unsigned int timer_flag)
237{ 237{
238 /* 238 /*
239 * It doesnt matter which lock we take: 239 * It doesn't matter which lock we take:
240 */ 240 */
241 raw_spinlock_t *lock; 241 raw_spinlock_t *lock;
242 struct entry *entry, input; 242 struct entry *entry, input;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index c075f4ea6b9..ee24fa1935a 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1268,7 +1268,7 @@ static int ftrace_update_code(struct module *mod)
1268 p->flags = 0L; 1268 p->flags = 0L;
1269 1269
1270 /* 1270 /*
1271 * Do the initial record convertion from mcount jump 1271 * Do the initial record conversion from mcount jump
1272 * to the NOP instructions. 1272 * to the NOP instructions.
1273 */ 1273 */
1274 if (!ftrace_code_disable(mod, p)) { 1274 if (!ftrace_code_disable(mod, p)) {
@@ -3425,7 +3425,7 @@ graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3425 atomic_set(&t->tracing_graph_pause, 0); 3425 atomic_set(&t->tracing_graph_pause, 0);
3426 atomic_set(&t->trace_overrun, 0); 3426 atomic_set(&t->trace_overrun, 0);
3427 t->ftrace_timestamp = 0; 3427 t->ftrace_timestamp = 0;
3428 /* make curr_ret_stack visable before we add the ret_stack */ 3428 /* make curr_ret_stack visible before we add the ret_stack */
3429 smp_wmb(); 3429 smp_wmb();
3430 t->ret_stack = ret_stack; 3430 t->ret_stack = ret_stack;
3431} 3431}
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index d9c8bcafb12..0ef7b4b2a1f 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1478,7 +1478,7 @@ static inline unsigned long rb_page_entries(struct buffer_page *bpage)
1478 return local_read(&bpage->entries) & RB_WRITE_MASK; 1478 return local_read(&bpage->entries) & RB_WRITE_MASK;
1479} 1479}
1480 1480
1481/* Size is determined by what has been commited */ 1481/* Size is determined by what has been committed */
1482static inline unsigned rb_page_size(struct buffer_page *bpage) 1482static inline unsigned rb_page_size(struct buffer_page *bpage)
1483{ 1483{
1484 return rb_page_commit(bpage); 1484 return rb_page_commit(bpage);
@@ -2932,7 +2932,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
2932 /* 2932 /*
2933 * cpu_buffer->pages just needs to point to the buffer, it 2933 * cpu_buffer->pages just needs to point to the buffer, it
2934 * has no specific buffer page to point to. Lets move it out 2934 * has no specific buffer page to point to. Lets move it out
2935 * of our way so we don't accidently swap it. 2935 * of our way so we don't accidentally swap it.
2936 */ 2936 */
2937 cpu_buffer->pages = reader->list.prev; 2937 cpu_buffer->pages = reader->list.prev;
2938 2938
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 9541c27c1cf..d38c16a06a6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3239,7 +3239,7 @@ waitagain:
3239 trace_seq_init(&iter->seq); 3239 trace_seq_init(&iter->seq);
3240 3240
3241 /* 3241 /*
3242 * If there was nothing to send to user, inspite of consuming trace 3242 * If there was nothing to send to user, in spite of consuming trace
3243 * entries, go back to wait for more entries. 3243 * entries, go back to wait for more entries.
3244 */ 3244 */
3245 if (sret == -EBUSY) 3245 if (sret == -EBUSY)
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 685a67d55db..6302747a139 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -46,7 +46,7 @@ u64 notrace trace_clock_local(void)
46} 46}
47 47
48/* 48/*
49 * trace_clock(): 'inbetween' trace clock. Not completely serialized, 49 * trace_clock(): 'between' trace clock. Not completely serialized,
50 * but not completely incorrect when crossing CPUs either. 50 * but not completely incorrect when crossing CPUs either.
51 * 51 *
52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of 52 * This is based on cpu_clock(), which will allow at most ~1 jiffy of
diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
index 1516cb3ec54..e32744c84d9 100644
--- a/kernel/trace/trace_entries.h
+++ b/kernel/trace/trace_entries.h
@@ -27,7 +27,7 @@
27 * in the structure. 27 * in the structure.
28 * 28 *
29 * * for structures within structures, the format of the internal 29 * * for structures within structures, the format of the internal
30 * structure is layed out. This allows the internal structure 30 * structure is laid out. This allows the internal structure
31 * to be deciphered for the format file. Although these macros 31 * to be deciphered for the format file. Although these macros
32 * may become out of sync with the internal structure, they 32 * may become out of sync with the internal structure, they
33 * will create a compile error if it happens. Since the 33 * will create a compile error if it happens. Since the
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 76b05980225..962cdb24ed8 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -905,7 +905,7 @@ print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
905 * 905 *
906 * returns 1 if 906 * returns 1 if
907 * - we are inside irq code 907 * - we are inside irq code
908 * - we just extered irq code 908 * - we just entered irq code
909 * 909 *
910 * retunns 0 if 910 * retunns 0 if
911 * - funcgraph-interrupts option is set 911 * - funcgraph-interrupts option is set
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index 92b6e1e12d9..a4969b47afc 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -80,7 +80,7 @@ static struct tracer_flags tracer_flags = {
80 * skip the latency if the sequence has changed - some other section 80 * skip the latency if the sequence has changed - some other section
81 * did a maximum and could disturb our measurement with serial console 81 * did a maximum and could disturb our measurement with serial console
82 * printouts, etc. Truly coinciding maximum latencies should be rare 82 * printouts, etc. Truly coinciding maximum latencies should be rare
83 * and what happens together happens separately as well, so this doesnt 83 * and what happens together happens separately as well, so this doesn't
84 * decrease the validity of the maximum found: 84 * decrease the validity of the maximum found:
85 */ 85 */
86static __cacheline_aligned_in_smp unsigned long max_sequence; 86static __cacheline_aligned_in_smp unsigned long max_sequence;
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index 8435b43b178..35d55a38614 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1839,7 +1839,7 @@ static void unregister_probe_event(struct trace_probe *tp)
1839 kfree(tp->call.print_fmt); 1839 kfree(tp->call.print_fmt);
1840} 1840}
1841 1841
1842/* Make a debugfs interface for controling probe points */ 1842/* Make a debugfs interface for controlling probe points */
1843static __init int init_kprobe_trace(void) 1843static __init int init_kprobe_trace(void)
1844{ 1844{
1845 struct dentry *d_tracer; 1845 struct dentry *d_tracer;
diff --git a/kernel/user-return-notifier.c b/kernel/user-return-notifier.c
index eb27fd3430a..92cb706c7fc 100644
--- a/kernel/user-return-notifier.c
+++ b/kernel/user-return-notifier.c
@@ -20,7 +20,7 @@ EXPORT_SYMBOL_GPL(user_return_notifier_register);
20 20
21/* 21/*
22 * Removes a registered user return notifier. Must be called from atomic 22 * Removes a registered user return notifier. Must be called from atomic
23 * context, and from the same cpu registration occured in. 23 * context, and from the same cpu registration occurred in.
24 */ 24 */
25void user_return_notifier_unregister(struct user_return_notifier *urn) 25void user_return_notifier_unregister(struct user_return_notifier *urn)
26{ 26{
diff --git a/kernel/wait.c b/kernel/wait.c
index b0310eb6cc1..f45ea8d2a1c 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -142,7 +142,7 @@ EXPORT_SYMBOL(finish_wait);
142 * woken up through the queue. 142 * woken up through the queue.
143 * 143 *
144 * This prevents waiter starvation where an exclusive waiter 144 * This prevents waiter starvation where an exclusive waiter
145 * aborts and is woken up concurrently and noone wakes up 145 * aborts and is woken up concurrently and no one wakes up
146 * the next waiter. 146 * the next waiter.
147 */ 147 */
148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait, 148void abort_exclusive_wait(wait_queue_head_t *q, wait_queue_t *wait,
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 04ef830690e..8859a41806d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1291,7 +1291,7 @@ __acquires(&gcwq->lock)
1291 return true; 1291 return true;
1292 spin_unlock_irq(&gcwq->lock); 1292 spin_unlock_irq(&gcwq->lock);
1293 1293
1294 /* CPU has come up inbetween, retry migration */ 1294 /* CPU has come up in between, retry migration */
1295 cpu_relax(); 1295 cpu_relax();
1296 } 1296 }
1297} 1297}