aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/cgroup.c9
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/debug/kdb/kdb_main.c2
-rw-r--r--kernel/debug/kdb/kdb_private.h7
-rw-r--r--kernel/debug/kdb/kdb_support.c4
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/kfifo.c9
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/pm_qos_params.c12
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c2
-rw-r--r--kernel/trace/ring_buffer.c3
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c207
-rw-r--r--kernel/trace/trace_functions_graph.c10
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/watchdog.c3
-rw-r--r--kernel/workqueue.c62
20 files changed, 273 insertions, 110 deletions
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 192f88c5b0f..ed19afd9e3f 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1791,10 +1791,11 @@ out:
1791} 1791}
1792 1792
1793/** 1793/**
1794 * cgroup_attach_task_current_cg - attach task 'tsk' to current task's cgroup 1794 * cgroup_attach_task_all - attach task 'tsk' to all cgroups of task 'from'
1795 * @from: attach to all cgroups of a given task
1795 * @tsk: the task to be attached 1796 * @tsk: the task to be attached
1796 */ 1797 */
1797int cgroup_attach_task_current_cg(struct task_struct *tsk) 1798int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
1798{ 1799{
1799 struct cgroupfs_root *root; 1800 struct cgroupfs_root *root;
1800 struct cgroup *cur_cg; 1801 struct cgroup *cur_cg;
@@ -1802,7 +1803,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
1802 1803
1803 cgroup_lock(); 1804 cgroup_lock();
1804 for_each_active_root(root) { 1805 for_each_active_root(root) {
1805 cur_cg = task_cgroup_from_root(current, root); 1806 cur_cg = task_cgroup_from_root(from, root);
1806 retval = cgroup_attach_task(cur_cg, tsk); 1807 retval = cgroup_attach_task(cur_cg, tsk);
1807 if (retval) 1808 if (retval)
1808 break; 1809 break;
@@ -1811,7 +1812,7 @@ int cgroup_attach_task_current_cg(struct task_struct *tsk)
1811 1812
1812 return retval; 1813 return retval;
1813} 1814}
1814EXPORT_SYMBOL_GPL(cgroup_attach_task_current_cg); 1815EXPORT_SYMBOL_GPL(cgroup_attach_task_all);
1815 1816
1816/* 1817/*
1817 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex 1818 * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index 3c2d4972d23..de407c78178 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -741,7 +741,7 @@ static struct console kgdbcons = {
741}; 741};
742 742
743#ifdef CONFIG_MAGIC_SYSRQ 743#ifdef CONFIG_MAGIC_SYSRQ
744static void sysrq_handle_dbg(int key, struct tty_struct *tty) 744static void sysrq_handle_dbg(int key)
745{ 745{
746 if (!dbg_io_ops) { 746 if (!dbg_io_ops) {
747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n"); 747 printk(KERN_CRIT "ERROR: No KGDB I/O module available\n");
diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
index 28b844118bb..caf057a3de0 100644
--- a/kernel/debug/kdb/kdb_main.c
+++ b/kernel/debug/kdb/kdb_main.c
@@ -1929,7 +1929,7 @@ static int kdb_sr(int argc, const char **argv)
1929 if (argc != 1) 1929 if (argc != 1)
1930 return KDB_ARGCOUNT; 1930 return KDB_ARGCOUNT;
1931 kdb_trap_printk++; 1931 kdb_trap_printk++;
1932 __handle_sysrq(*argv[1], NULL, 0); 1932 __handle_sysrq(*argv[1], false);
1933 kdb_trap_printk--; 1933 kdb_trap_printk--;
1934 1934
1935 return 0; 1935 return 0;
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index c438f545a32..be775f7e81e 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p);
255extern void kdb_print_nameval(const char *name, unsigned long val); 255extern void kdb_print_nameval(const char *name, unsigned long val);
256extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); 256extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
257extern void kdb_meminfo_proc_show(void); 257extern void kdb_meminfo_proc_show(void);
258#ifdef CONFIG_KALLSYMS
258extern const char *kdb_walk_kallsyms(loff_t *pos); 259extern const char *kdb_walk_kallsyms(loff_t *pos);
260#else /* ! CONFIG_KALLSYMS */
261static inline const char *kdb_walk_kallsyms(loff_t *pos)
262{
263 return NULL;
264}
265#endif /* ! CONFIG_KALLSYMS */
259extern char *kdb_getstr(char *, size_t, char *); 266extern char *kdb_getstr(char *, size_t, char *);
260 267
261/* Defines for kdb_symbol_print */ 268/* Defines for kdb_symbol_print */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 45344d5c53d..6b2485dcb05 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */
82int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) 82int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
83{ 83{
84 int ret = 0; 84 int ret = 0;
85 unsigned long symbolsize; 85 unsigned long symbolsize = 0;
86 unsigned long offset; 86 unsigned long offset = 0;
87#define knt1_size 128 /* must be >= kallsyms table size */ 87#define knt1_size 128 /* must be >= kallsyms table size */
88 char *knt1 = NULL; 88 char *knt1 = NULL;
89 89
diff --git a/kernel/exit.c b/kernel/exit.c
index 671ed56e0a4..03120229db2 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo,
1386 if (!unlikely(wo->wo_flags & WNOWAIT)) 1386 if (!unlikely(wo->wo_flags & WNOWAIT))
1387 *p_code = 0; 1387 *p_code = 0;
1388 1388
1389 /* don't need the RCU readlock here as we're holding a spinlock */ 1389 uid = task_uid(p);
1390 uid = __task_cred(p)->uid;
1391unlock_sig: 1390unlock_sig:
1392 spin_unlock_irq(&p->sighand->siglock); 1391 spin_unlock_irq(&p->sighand->siglock);
1393 if (!exit_code) 1392 if (!exit_code)
@@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1460 } 1459 }
1461 if (!unlikely(wo->wo_flags & WNOWAIT)) 1460 if (!unlikely(wo->wo_flags & WNOWAIT))
1462 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1461 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1463 uid = __task_cred(p)->uid; 1462 uid = task_uid(p);
1464 spin_unlock_irq(&p->sighand->siglock); 1463 spin_unlock_irq(&p->sighand->siglock);
1465 1464
1466 pid = task_pid_vnr(p); 1465 pid = task_pid_vnr(p);
diff --git a/kernel/fork.c b/kernel/fork.c
index 98b450876f9..b7e9d60a675 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -300,7 +300,7 @@ out:
300#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
302{ 302{
303 struct vm_area_struct *mpnt, *tmp, **pprev; 303 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
304 struct rb_node **rb_link, *rb_parent; 304 struct rb_node **rb_link, *rb_parent;
305 int retval; 305 int retval;
306 unsigned long charge; 306 unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
328 if (retval) 328 if (retval)
329 goto out; 329 goto out;
330 330
331 prev = NULL;
331 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 332 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
332 struct file *file; 333 struct file *file;
333 334
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
359 goto fail_nomem_anon_vma_fork; 360 goto fail_nomem_anon_vma_fork;
360 tmp->vm_flags &= ~VM_LOCKED; 361 tmp->vm_flags &= ~VM_LOCKED;
361 tmp->vm_mm = mm; 362 tmp->vm_mm = mm;
362 tmp->vm_next = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
363 file = tmp->vm_file; 364 file = tmp->vm_file;
364 if (file) { 365 if (file) {
365 struct inode *inode = file->f_path.dentry->d_inode; 366 struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
392 */ 393 */
393 *pprev = tmp; 394 *pprev = tmp;
394 pprev = &tmp->vm_next; 395 pprev = &tmp->vm_next;
396 tmp->vm_prev = prev;
397 prev = tmp;
395 398
396 __vma_link_rb(mm, tmp, rb_link, rb_parent); 399 __vma_link_rb(mm, tmp, rb_link, rb_parent);
397 rb_link = &tmp->vm_rb.rb_right; 400 rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
752 struct fs_struct *fs = current->fs; 755 struct fs_struct *fs = current->fs;
753 if (clone_flags & CLONE_FS) { 756 if (clone_flags & CLONE_FS) {
754 /* tsk->fs is already what we want */ 757 /* tsk->fs is already what we want */
755 write_lock(&fs->lock); 758 spin_lock(&fs->lock);
756 if (fs->in_exec) { 759 if (fs->in_exec) {
757 write_unlock(&fs->lock); 760 spin_unlock(&fs->lock);
758 return -EAGAIN; 761 return -EAGAIN;
759 } 762 }
760 fs->users++; 763 fs->users++;
761 write_unlock(&fs->lock); 764 spin_unlock(&fs->lock);
762 return 0; 765 return 0;
763 } 766 }
764 tsk->fs = copy_fs_struct(fs); 767 tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1676 1679
1677 if (new_fs) { 1680 if (new_fs) {
1678 fs = current->fs; 1681 fs = current->fs;
1679 write_lock(&fs->lock); 1682 spin_lock(&fs->lock);
1680 current->fs = new_fs; 1683 current->fs = new_fs;
1681 if (--fs->users) 1684 if (--fs->users)
1682 new_fs = NULL; 1685 new_fs = NULL;
1683 else 1686 else
1684 new_fs = fs; 1687 new_fs = fs;
1685 write_unlock(&fs->lock); 1688 spin_unlock(&fs->lock);
1686 } 1689 }
1687 1690
1688 if (new_mm) { 1691 if (new_mm) {
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 4502604ecad..6b5580c5764 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
503} 503}
504EXPORT_SYMBOL(__kfifo_out_r); 504EXPORT_SYMBOL(__kfifo_out_r);
505 505
506void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
507{
508 unsigned int n;
509
510 n = __kfifo_peek_n(fifo, recsize);
511 fifo->out += n + recsize;
512}
513EXPORT_SYMBOL(__kfifo_skip_r);
514
506int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, 515int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
507 unsigned long len, unsigned int *copied, size_t recsize) 516 unsigned long len, unsigned int *copied, size_t recsize)
508{ 517{
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 6e9b19667a8..9cd0591c96a 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data)
153 goto fail; 153 goto fail;
154 } 154 }
155 155
156 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); 156 retval = kernel_execve(sub_info->path,
157 (const char *const *)sub_info->argv,
158 (const char *const *)sub_info->envp);
157 159
158 /* Exec failed? */ 160 /* Exec failed? */
159fail: 161fail:
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4dec5f9..b7e4c362361 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
212 212
213/** 213/**
214 * pm_qos_add_request - inserts new qos request into the list 214 * pm_qos_add_request - inserts new qos request into the list
215 * @pm_qos_class: identifies which list of qos request to us 215 * @dep: pointer to a preallocated handle
216 * @pm_qos_class: identifies which list of qos request to use
216 * @value: defines the qos request 217 * @value: defines the qos request
217 * 218 *
218 * This function inserts a new entry in the pm_qos_class list of requested qos 219 * This function inserts a new entry in the pm_qos_class list of requested qos
219 * performance characteristics. It recomputes the aggregate QoS expectations 220 * performance characteristics. It recomputes the aggregate QoS expectations
220 * for the pm_qos_class of parameters, and returns the pm_qos_request list 221 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
221 * element as a handle for use in updating and removal. Call needs to save 222 * handle. Caller needs to save this handle for later use in updates and
222 * this handle for later use. 223 * removal.
223 */ 224 */
225
224void pm_qos_add_request(struct pm_qos_request_list *dep, 226void pm_qos_add_request(struct pm_qos_request_list *dep,
225 int pm_qos_class, s32 value) 227 int pm_qos_class, s32 value)
226{ 228{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
348 350
349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 351 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
350 if (pm_qos_class >= 0) { 352 if (pm_qos_class >= 0) {
351 struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 353 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
352 if (!req) 354 if (!req)
353 return -ENOMEM; 355 return -ENOMEM;
354 356
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index e8b33700627..d52359374e8 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -24,7 +24,7 @@ static void do_poweroff(struct work_struct *dummy)
24 24
25static DECLARE_WORK(poweroff_work, do_poweroff); 25static DECLARE_WORK(poweroff_work, do_poweroff);
26 26
27static void handle_poweroff(int key, struct tty_struct *tty) 27static void handle_poweroff(int key)
28{ 28{
29 /* run sysrq poweroff on boot cpu */ 29 /* run sysrq poweroff on boot cpu */
30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work); 30 schedule_work_on(cpumask_first(cpu_online_mask), &poweroff_work);
diff --git a/kernel/sched.c b/kernel/sched.c
index 41541d79e3c..09b574e7f4d 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3865 /* 3865 /*
3866 * Owner changed, break to re-assess state. 3866 * Owner changed, break to re-assess state.
3867 */ 3867 */
3868 if (lock->owner != owner) 3868 if (lock->owner != owner) {
3869 /*
3870 * If the lock has switched to a different owner,
3871 * we likely have heavy contention. Return 0 to quit
3872 * optimistic spinning and not contend further:
3873 */
3874 if (lock->owner)
3875 return 0;
3869 break; 3876 break;
3877 }
3870 3878
3871 /* 3879 /*
3872 * Is that owner really running on that cpu? 3880 * Is that owner really running on that cpu?
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b227a2..ab661ebc489 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -3752,6 +3752,8 @@ static void task_fork_fair(struct task_struct *p)
3752 3752
3753 raw_spin_lock_irqsave(&rq->lock, flags); 3753 raw_spin_lock_irqsave(&rq->lock, flags);
3754 3754
3755 update_rq_clock(rq);
3756
3755 if (unlikely(task_cpu(p) != this_cpu)) 3757 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu); 3758 __set_task_cpu(p, this_cpu);
3757 3759
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3632ce87674..19cccc3c302 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -3846,6 +3846,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3846 rpos = reader->read; 3846 rpos = reader->read;
3847 pos += size; 3847 pos += size;
3848 3848
3849 if (rpos >= commit)
3850 break;
3851
3849 event = rb_reader_event(cpu_buffer); 3852 event = rb_reader_event(cpu_buffer);
3850 size = rb_event_length(event); 3853 size = rb_event_length(event);
3851 } while (len > size); 3854 } while (len > size);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba14a22be4c..9ec59f54115 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3463 size_t cnt, loff_t *fpos) 3463 size_t cnt, loff_t *fpos)
3464{ 3464{
3465 char *buf; 3465 char *buf;
3466 size_t written;
3466 3467
3467 if (tracing_disabled) 3468 if (tracing_disabled)
3468 return -EINVAL; 3469 return -EINVAL;
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3484 } else 3485 } else
3485 buf[cnt] = '\0'; 3486 buf[cnt] = '\0';
3486 3487
3487 cnt = mark_printk("%s", buf); 3488 written = mark_printk("%s", buf);
3488 kfree(buf); 3489 kfree(buf);
3489 *fpos += cnt; 3490 *fpos += written;
3490 3491
3491 return cnt; 3492 /* don't tell userspace we wrote more - it might confuse them */
3493 if (written > cnt)
3494 written = cnt;
3495
3496 return written;
3492} 3497}
3493 3498
3494static int tracing_clock_show(struct seq_file *m, void *v) 3499static int tracing_clock_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 09b4fa6e4d3..4c758f14632 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -598,88 +598,165 @@ out:
598 return ret; 598 return ret;
599} 599}
600 600
601static void print_event_fields(struct trace_seq *s, struct list_head *head) 601enum {
602 FORMAT_HEADER = 1,
603 FORMAT_PRINTFMT = 2,
604};
605
606static void *f_next(struct seq_file *m, void *v, loff_t *pos)
602{ 607{
608 struct ftrace_event_call *call = m->private;
603 struct ftrace_event_field *field; 609 struct ftrace_event_field *field;
610 struct list_head *head;
604 611
605 list_for_each_entry_reverse(field, head, link) { 612 (*pos)++;
606 /*
607 * Smartly shows the array type(except dynamic array).
608 * Normal:
609 * field:TYPE VAR
610 * If TYPE := TYPE[LEN], it is shown:
611 * field:TYPE VAR[LEN]
612 */
613 const char *array_descriptor = strchr(field->type, '[');
614 613
615 if (!strncmp(field->type, "__data_loc", 10)) 614 switch ((unsigned long)v) {
616 array_descriptor = NULL; 615 case FORMAT_HEADER:
616 head = &ftrace_common_fields;
617 617
618 if (!array_descriptor) { 618 if (unlikely(list_empty(head)))
619 trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" 619 return NULL;
620 "\tsize:%u;\tsigned:%d;\n", 620
621 field->type, field->name, field->offset, 621 field = list_entry(head->prev, struct ftrace_event_field, link);
622 field->size, !!field->is_signed); 622 return field;
623 } else { 623
624 trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" 624 case FORMAT_PRINTFMT:
625 "\tsize:%u;\tsigned:%d;\n", 625 /* all done */
626 (int)(array_descriptor - field->type), 626 return NULL;
627 field->type, field->name, 627 }
628 array_descriptor, field->offset, 628
629 field->size, !!field->is_signed); 629 head = trace_get_fields(call);
630 } 630
631 /*
632 * To separate common fields from event fields, the
633 * LSB is set on the first event field. Clear it in case.
634 */
635 v = (void *)((unsigned long)v & ~1L);
636
637 field = v;
638 /*
639 * If this is a common field, and at the end of the list, then
640 * continue with main list.
641 */
642 if (field->link.prev == &ftrace_common_fields) {
643 if (unlikely(list_empty(head)))
644 return NULL;
645 field = list_entry(head->prev, struct ftrace_event_field, link);
646 /* Set the LSB to notify f_show to print an extra newline */
647 field = (struct ftrace_event_field *)
648 ((unsigned long)field | 1);
649 return field;
631 } 650 }
651
652 /* If we are done tell f_show to print the format */
653 if (field->link.prev == head)
654 return (void *)FORMAT_PRINTFMT;
655
656 field = list_entry(field->link.prev, struct ftrace_event_field, link);
657
658 return field;
632} 659}
633 660
634static ssize_t 661static void *f_start(struct seq_file *m, loff_t *pos)
635event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
636 loff_t *ppos)
637{ 662{
638 struct ftrace_event_call *call = filp->private_data; 663 loff_t l = 0;
639 struct list_head *head; 664 void *p;
640 struct trace_seq *s;
641 char *buf;
642 int r;
643 665
644 if (*ppos) 666 /* Start by showing the header */
667 if (!*pos)
668 return (void *)FORMAT_HEADER;
669
670 p = (void *)FORMAT_HEADER;
671 do {
672 p = f_next(m, p, &l);
673 } while (p && l < *pos);
674
675 return p;
676}
677
678static int f_show(struct seq_file *m, void *v)
679{
680 struct ftrace_event_call *call = m->private;
681 struct ftrace_event_field *field;
682 const char *array_descriptor;
683
684 switch ((unsigned long)v) {
685 case FORMAT_HEADER:
686 seq_printf(m, "name: %s\n", call->name);
687 seq_printf(m, "ID: %d\n", call->event.type);
688 seq_printf(m, "format:\n");
645 return 0; 689 return 0;
646 690
647 s = kmalloc(sizeof(*s), GFP_KERNEL); 691 case FORMAT_PRINTFMT:
648 if (!s) 692 seq_printf(m, "\nprint fmt: %s\n",
649 return -ENOMEM; 693 call->print_fmt);
694 return 0;
695 }
650 696
651 trace_seq_init(s); 697 /*
698 * To separate common fields from event fields, the
699 * LSB is set on the first event field. Clear it and
700 * print a newline if it is set.
701 */
702 if ((unsigned long)v & 1) {
703 seq_putc(m, '\n');
704 v = (void *)((unsigned long)v & ~1L);
705 }
652 706
653 trace_seq_printf(s, "name: %s\n", call->name); 707 field = v;
654 trace_seq_printf(s, "ID: %d\n", call->event.type);
655 trace_seq_printf(s, "format:\n");
656 708
657 /* print common fields */ 709 /*
658 print_event_fields(s, &ftrace_common_fields); 710 * Smartly shows the array type(except dynamic array).
711 * Normal:
712 * field:TYPE VAR
713 * If TYPE := TYPE[LEN], it is shown:
714 * field:TYPE VAR[LEN]
715 */
716 array_descriptor = strchr(field->type, '[');
659 717
660 trace_seq_putc(s, '\n'); 718 if (!strncmp(field->type, "__data_loc", 10))
719 array_descriptor = NULL;
661 720
662 /* print event specific fields */ 721 if (!array_descriptor)
663 head = trace_get_fields(call); 722 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
664 print_event_fields(s, head); 723 field->type, field->name, field->offset,
724 field->size, !!field->is_signed);
725 else
726 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
727 (int)(array_descriptor - field->type),
728 field->type, field->name,
729 array_descriptor, field->offset,
730 field->size, !!field->is_signed);
665 731
666 r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); 732 return 0;
733}
667 734
668 if (!r) { 735static void f_stop(struct seq_file *m, void *p)
669 /* 736{
670 * ug! The format output is bigger than a PAGE!! 737}
671 */
672 buf = "FORMAT TOO BIG\n";
673 r = simple_read_from_buffer(ubuf, cnt, ppos,
674 buf, strlen(buf));
675 goto out;
676 }
677 738
678 r = simple_read_from_buffer(ubuf, cnt, ppos, 739static const struct seq_operations trace_format_seq_ops = {
679 s->buffer, s->len); 740 .start = f_start,
680 out: 741 .next = f_next,
681 kfree(s); 742 .stop = f_stop,
682 return r; 743 .show = f_show,
744};
745
746static int trace_format_open(struct inode *inode, struct file *file)
747{
748 struct ftrace_event_call *call = inode->i_private;
749 struct seq_file *m;
750 int ret;
751
752 ret = seq_open(file, &trace_format_seq_ops);
753 if (ret < 0)
754 return ret;
755
756 m = file->private_data;
757 m->private = call;
758
759 return 0;
683} 760}
684 761
685static ssize_t 762static ssize_t
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = {
877}; 954};
878 955
879static const struct file_operations ftrace_event_format_fops = { 956static const struct file_operations ftrace_event_format_fops = {
880 .open = tracing_open_generic, 957 .open = trace_format_open,
881 .read = event_format_read, 958 .read = seq_read,
959 .llseek = seq_lseek,
960 .release = seq_release,
882}; 961};
883 962
884static const struct file_operations ftrace_event_id_fops = { 963static const struct file_operations ftrace_event_id_fops = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6bff2362578..6f233698518 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
507 * if the output fails. 507 * if the output fails.
508 */ 508 */
509 data->ent = *curr; 509 data->ent = *curr;
510 data->ret = *next; 510 /*
511 * If the next event is not a return type, then
512 * we only care about what type it is. Otherwise we can
513 * safely copy the entire event.
514 */
515 if (next->ent.type == TRACE_GRAPH_RET)
516 data->ret = *next;
517 else
518 data->ret.ent.type = next->ent.type;
511 } 519 }
512 } 520 }
513 521
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 056468eae7c..a6b7e0e0f3e 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
249{ 249{
250 unsigned long addr = stack_dump_trace[i]; 250 unsigned long addr = stack_dump_trace[i];
251 251
252 return seq_printf(m, "%pF\n", (void *)addr); 252 return seq_printf(m, "%pS\n", (void *)addr);
253} 253}
254 254
255static void print_disabled(struct seq_file *m) 255static void print_disabled(struct seq_file *m)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 613bc1f0461..0d53c8e853b 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -206,6 +206,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
206 struct perf_sample_data *data, 206 struct perf_sample_data *data,
207 struct pt_regs *regs) 207 struct pt_regs *regs)
208{ 208{
209 /* Ensure the watchdog never gets throttled */
210 event->hw.interrupts = 0;
211
209 if (__get_cpu_var(watchdog_nmi_touch) == true) { 212 if (__get_cpu_var(watchdog_nmi_touch) == true) {
210 __get_cpu_var(watchdog_nmi_touch) = false; 213 __get_cpu_var(watchdog_nmi_touch) = false;
211 return; 214 return;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2994a0e3a61..727f24e563a 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,9 @@
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37 37
38#define CREATE_TRACE_POINTS
39#include <trace/events/workqueue.h>
40
38#include "workqueue_sched.h" 41#include "workqueue_sched.h"
39 42
40enum { 43enum {
@@ -87,7 +90,8 @@ enum {
87/* 90/*
88 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
89 * 92 *
90 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
91 * 95 *
92 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
93 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -195,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
195 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
196#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
197#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
198#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
199#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
200#else 204#else
201typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -940,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
940 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
941 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
942 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
943 unsigned long flags; 948 unsigned long flags;
944 949
945 debug_work_activate(work); 950 debug_work_activate(work);
946 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
947 /* determine gcwq to use */ 955 /* determine gcwq to use */
948 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
949 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -986,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
986 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
987 995
988 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
989 998
990 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
991 cwq->nr_active++; 1000 cwq->nr_active++;
992 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
993 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
994 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
995 1006
996 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
997 1008
998 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
999} 1010}
@@ -1212,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1212 * bound), %false if offline. 1223 * bound), %false if offline.
1213 */ 1224 */
1214static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1215{ 1227{
1216 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1217 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1485,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1485 * otherwise. 1497 * otherwise.
1486 */ 1498 */
1487static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1488{ 1502{
1489 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1490 return false; 1504 return false;
@@ -1659,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1659 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1660 1674
1661 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1662 cwq->nr_active++; 1677 cwq->nr_active++;
1663} 1678}
1664 1679
@@ -1666,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1666 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1667 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1668 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1669 * 1685 *
1670 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1671 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1673,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1673 * CONTEXT: 1689 * CONTEXT:
1674 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1675 */ 1691 */
1676static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1677{ 1694{
1678 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1679 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1680 return; 1697 return;
1681 1698
1682 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1683 cwq->nr_active--;
1684 1700
1685 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1686 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1687 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1688 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1689 } 1708 }
1690 1709
1691 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1722,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1722 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1723 */ 1742 */
1724static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1725{ 1746{
1726 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1727 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1790,7 +1811,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1790 work_clear_pending(work); 1811 work_clear_pending(work);
1791 lock_map_acquire(&cwq->wq->lockdep_map); 1812 lock_map_acquire(&cwq->wq->lockdep_map);
1792 lock_map_acquire(&lockdep_map); 1813 lock_map_acquire(&lockdep_map);
1814 trace_workqueue_execute_start(work);
1793 f(work); 1815 f(work);
1816 /*
1817 * While we must be careful to not use "work" after this, the trace
1818 * point will only record its address.
1819 */
1820 trace_workqueue_execute_end(work);
1794 lock_map_release(&lockdep_map); 1821 lock_map_release(&lockdep_map);
1795 lock_map_release(&cwq->wq->lockdep_map); 1822 lock_map_release(&cwq->wq->lockdep_map);
1796 1823
@@ -1814,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1814 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1815 worker->current_work = NULL; 1842 worker->current_work = NULL;
1816 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1817 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1818} 1845}
1819 1846
1820/** 1847/**
@@ -2379,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2379 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2380 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2381 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2382 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2383 ret = 1; 2411 ret = 1;
2384 } 2412 }
2385 } 2413 }
@@ -2782,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2782 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2783 goto err; 2811 goto err;
2784 2812
2785 wq->rescuer = rescuer;
2786 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2787 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2788 } 2815 }
@@ -2824,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2824{ 2851{
2825 unsigned int cpu; 2852 unsigned int cpu;
2826 2853
2854 wq->flags |= WQ_DYING;
2827 flush_workqueue(wq); 2855 flush_workqueue(wq);
2828 2856
2829 /* 2857 /*
@@ -2848,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2848 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2849 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2850 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2851 } 2880 }
2852 2881
2853 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3230,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3230 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3231 */ 3260 */
3232static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3233{ 3264{
3234 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3235 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3536,8 +3567,7 @@ static int __init init_workqueues(void)
3536 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3537 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3538 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3539 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3540 gcwq->flags |= GCWQ_DISASSOCIATED;
3541 3571
3542 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3543 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3561,6 +3591,8 @@ static int __init init_workqueues(void)
3561 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3562 struct worker *worker; 3592 struct worker *worker;
3563 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3564 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3565 BUG_ON(!worker); 3597 BUG_ON(!worker);
3566 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);