aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/debug/kdb/kdb_bp.c2
-rw-r--r--kernel/debug/kdb/kdb_private.h7
-rw-r--r--kernel/debug/kdb/kdb_support.c4
-rw-r--r--kernel/exit.c5
-rw-r--r--kernel/fork.c17
-rw-r--r--kernel/hrtimer.c3
-rw-r--r--kernel/kfifo.c9
-rw-r--r--kernel/kmod.c4
-rw-r--r--kernel/mutex.c23
-rw-r--r--kernel/perf_event.c26
-rw-r--r--kernel/pm_qos_params.c12
-rw-r--r--kernel/sched.c10
-rw-r--r--kernel/sched_fair.c5
-rw-r--r--kernel/sys.c2
-rw-r--r--kernel/sysctl.c5
-rw-r--r--kernel/trace/ftrace.c15
-rw-r--r--kernel/trace/ring_buffer.c5
-rw-r--r--kernel/trace/trace.c11
-rw-r--r--kernel/trace/trace_events.c207
-rw-r--r--kernel/trace/trace_functions_graph.c10
-rw-r--r--kernel/trace/trace_stack.c2
-rw-r--r--kernel/watchdog.c20
-rw-r--r--kernel/workqueue.c62
23 files changed, 322 insertions, 144 deletions
diff --git a/kernel/debug/kdb/kdb_bp.c b/kernel/debug/kdb/kdb_bp.c
index 75bd9b3ebbb7..20059ef4459a 100644
--- a/kernel/debug/kdb/kdb_bp.c
+++ b/kernel/debug/kdb/kdb_bp.c
@@ -274,7 +274,6 @@ static int kdb_bp(int argc, const char **argv)
274 int i, bpno; 274 int i, bpno;
275 kdb_bp_t *bp, *bp_check; 275 kdb_bp_t *bp, *bp_check;
276 int diag; 276 int diag;
277 int free;
278 char *symname = NULL; 277 char *symname = NULL;
279 long offset = 0ul; 278 long offset = 0ul;
280 int nextarg; 279 int nextarg;
@@ -305,7 +304,6 @@ static int kdb_bp(int argc, const char **argv)
305 /* 304 /*
306 * Find an empty bp structure to allocate 305 * Find an empty bp structure to allocate
307 */ 306 */
308 free = KDB_MAXBPT;
309 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) { 307 for (bpno = 0, bp = kdb_breakpoints; bpno < KDB_MAXBPT; bpno++, bp++) {
310 if (bp->bp_free) 308 if (bp->bp_free)
311 break; 309 break;
diff --git a/kernel/debug/kdb/kdb_private.h b/kernel/debug/kdb/kdb_private.h
index c438f545a321..be775f7e81e0 100644
--- a/kernel/debug/kdb/kdb_private.h
+++ b/kernel/debug/kdb/kdb_private.h
@@ -255,7 +255,14 @@ extern void kdb_ps1(const struct task_struct *p);
255extern void kdb_print_nameval(const char *name, unsigned long val); 255extern void kdb_print_nameval(const char *name, unsigned long val);
256extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info); 256extern void kdb_send_sig_info(struct task_struct *p, struct siginfo *info);
257extern void kdb_meminfo_proc_show(void); 257extern void kdb_meminfo_proc_show(void);
258#ifdef CONFIG_KALLSYMS
258extern const char *kdb_walk_kallsyms(loff_t *pos); 259extern const char *kdb_walk_kallsyms(loff_t *pos);
260#else /* ! CONFIG_KALLSYMS */
261static inline const char *kdb_walk_kallsyms(loff_t *pos)
262{
263 return NULL;
264}
265#endif /* ! CONFIG_KALLSYMS */
259extern char *kdb_getstr(char *, size_t, char *); 266extern char *kdb_getstr(char *, size_t, char *);
260 267
261/* Defines for kdb_symbol_print */ 268/* Defines for kdb_symbol_print */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 45344d5c53dd..6b2485dcb050 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -82,8 +82,8 @@ static char *kdb_name_table[100]; /* arbitrary size */
82int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab) 82int kdbnearsym(unsigned long addr, kdb_symtab_t *symtab)
83{ 83{
84 int ret = 0; 84 int ret = 0;
85 unsigned long symbolsize; 85 unsigned long symbolsize = 0;
86 unsigned long offset; 86 unsigned long offset = 0;
87#define knt1_size 128 /* must be >= kallsyms table size */ 87#define knt1_size 128 /* must be >= kallsyms table size */
88 char *knt1 = NULL; 88 char *knt1 = NULL;
89 89
diff --git a/kernel/exit.c b/kernel/exit.c
index 671ed56e0a49..03120229db28 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -1386,8 +1386,7 @@ static int wait_task_stopped(struct wait_opts *wo,
1386 if (!unlikely(wo->wo_flags & WNOWAIT)) 1386 if (!unlikely(wo->wo_flags & WNOWAIT))
1387 *p_code = 0; 1387 *p_code = 0;
1388 1388
1389 /* don't need the RCU readlock here as we're holding a spinlock */ 1389 uid = task_uid(p);
1390 uid = __task_cred(p)->uid;
1391unlock_sig: 1390unlock_sig:
1392 spin_unlock_irq(&p->sighand->siglock); 1391 spin_unlock_irq(&p->sighand->siglock);
1393 if (!exit_code) 1392 if (!exit_code)
@@ -1460,7 +1459,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1460 } 1459 }
1461 if (!unlikely(wo->wo_flags & WNOWAIT)) 1460 if (!unlikely(wo->wo_flags & WNOWAIT))
1462 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1461 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1463 uid = __task_cred(p)->uid; 1462 uid = task_uid(p);
1464 spin_unlock_irq(&p->sighand->siglock); 1463 spin_unlock_irq(&p->sighand->siglock);
1465 1464
1466 pid = task_pid_vnr(p); 1465 pid = task_pid_vnr(p);
diff --git a/kernel/fork.c b/kernel/fork.c
index 98b450876f93..b7e9d60a675d 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -300,7 +300,7 @@ out:
300#ifdef CONFIG_MMU 300#ifdef CONFIG_MMU
301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) 301static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
302{ 302{
303 struct vm_area_struct *mpnt, *tmp, **pprev; 303 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
304 struct rb_node **rb_link, *rb_parent; 304 struct rb_node **rb_link, *rb_parent;
305 int retval; 305 int retval;
306 unsigned long charge; 306 unsigned long charge;
@@ -328,6 +328,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
328 if (retval) 328 if (retval)
329 goto out; 329 goto out;
330 330
331 prev = NULL;
331 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { 332 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
332 struct file *file; 333 struct file *file;
333 334
@@ -359,7 +360,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
359 goto fail_nomem_anon_vma_fork; 360 goto fail_nomem_anon_vma_fork;
360 tmp->vm_flags &= ~VM_LOCKED; 361 tmp->vm_flags &= ~VM_LOCKED;
361 tmp->vm_mm = mm; 362 tmp->vm_mm = mm;
362 tmp->vm_next = NULL; 363 tmp->vm_next = tmp->vm_prev = NULL;
363 file = tmp->vm_file; 364 file = tmp->vm_file;
364 if (file) { 365 if (file) {
365 struct inode *inode = file->f_path.dentry->d_inode; 366 struct inode *inode = file->f_path.dentry->d_inode;
@@ -392,6 +393,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
392 */ 393 */
393 *pprev = tmp; 394 *pprev = tmp;
394 pprev = &tmp->vm_next; 395 pprev = &tmp->vm_next;
396 tmp->vm_prev = prev;
397 prev = tmp;
395 398
396 __vma_link_rb(mm, tmp, rb_link, rb_parent); 399 __vma_link_rb(mm, tmp, rb_link, rb_parent);
397 rb_link = &tmp->vm_rb.rb_right; 400 rb_link = &tmp->vm_rb.rb_right;
@@ -752,13 +755,13 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
752 struct fs_struct *fs = current->fs; 755 struct fs_struct *fs = current->fs;
753 if (clone_flags & CLONE_FS) { 756 if (clone_flags & CLONE_FS) {
754 /* tsk->fs is already what we want */ 757 /* tsk->fs is already what we want */
755 write_lock(&fs->lock); 758 spin_lock(&fs->lock);
756 if (fs->in_exec) { 759 if (fs->in_exec) {
757 write_unlock(&fs->lock); 760 spin_unlock(&fs->lock);
758 return -EAGAIN; 761 return -EAGAIN;
759 } 762 }
760 fs->users++; 763 fs->users++;
761 write_unlock(&fs->lock); 764 spin_unlock(&fs->lock);
762 return 0; 765 return 0;
763 } 766 }
764 tsk->fs = copy_fs_struct(fs); 767 tsk->fs = copy_fs_struct(fs);
@@ -1676,13 +1679,13 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
1676 1679
1677 if (new_fs) { 1680 if (new_fs) {
1678 fs = current->fs; 1681 fs = current->fs;
1679 write_lock(&fs->lock); 1682 spin_lock(&fs->lock);
1680 current->fs = new_fs; 1683 current->fs = new_fs;
1681 if (--fs->users) 1684 if (--fs->users)
1682 new_fs = NULL; 1685 new_fs = NULL;
1683 else 1686 else
1684 new_fs = fs; 1687 new_fs = fs;
1685 write_unlock(&fs->lock); 1688 spin_unlock(&fs->lock);
1686 } 1689 }
1687 1690
1688 if (new_mm) { 1691 if (new_mm) {
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index ce669174f355..1decafbb6b1a 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1091,11 +1091,10 @@ EXPORT_SYMBOL_GPL(hrtimer_cancel);
1091 */ 1091 */
1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer) 1092ktime_t hrtimer_get_remaining(const struct hrtimer *timer)
1093{ 1093{
1094 struct hrtimer_clock_base *base;
1095 unsigned long flags; 1094 unsigned long flags;
1096 ktime_t rem; 1095 ktime_t rem;
1097 1096
1098 base = lock_hrtimer_base(timer, &flags); 1097 lock_hrtimer_base(timer, &flags);
1099 rem = hrtimer_expires_remaining(timer); 1098 rem = hrtimer_expires_remaining(timer);
1100 unlock_hrtimer_base(timer, &flags); 1099 unlock_hrtimer_base(timer, &flags);
1101 1100
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 4502604ecadf..6b5580c57644 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -503,6 +503,15 @@ unsigned int __kfifo_out_r(struct __kfifo *fifo, void *buf,
503} 503}
504EXPORT_SYMBOL(__kfifo_out_r); 504EXPORT_SYMBOL(__kfifo_out_r);
505 505
506void __kfifo_skip_r(struct __kfifo *fifo, size_t recsize)
507{
508 unsigned int n;
509
510 n = __kfifo_peek_n(fifo, recsize);
511 fifo->out += n + recsize;
512}
513EXPORT_SYMBOL(__kfifo_skip_r);
514
506int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from, 515int __kfifo_from_user_r(struct __kfifo *fifo, const void __user *from,
507 unsigned long len, unsigned int *copied, size_t recsize) 516 unsigned long len, unsigned int *copied, size_t recsize)
508{ 517{
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 6e9b19667a8d..9cd0591c96a2 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -153,7 +153,9 @@ static int ____call_usermodehelper(void *data)
153 goto fail; 153 goto fail;
154 } 154 }
155 155
156 retval = kernel_execve(sub_info->path, sub_info->argv, sub_info->envp); 156 retval = kernel_execve(sub_info->path,
157 (const char *const *)sub_info->argv,
158 (const char *const *)sub_info->envp);
157 159
158 /* Exec failed? */ 160 /* Exec failed? */
159fail: 161fail:
diff --git a/kernel/mutex.c b/kernel/mutex.c
index 4c0b7b3e6d2e..200407c1502f 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -36,15 +36,6 @@
36# include <asm/mutex.h> 36# include <asm/mutex.h>
37#endif 37#endif
38 38
39/***
40 * mutex_init - initialize the mutex
41 * @lock: the mutex to be initialized
42 * @key: the lock_class_key for the class; used by mutex lock debugging
43 *
44 * Initialize the mutex to unlocked state.
45 *
46 * It is not allowed to initialize an already locked mutex.
47 */
48void 39void
49__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key) 40__mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
50{ 41{
@@ -68,7 +59,7 @@ EXPORT_SYMBOL(__mutex_init);
68static __used noinline void __sched 59static __used noinline void __sched
69__mutex_lock_slowpath(atomic_t *lock_count); 60__mutex_lock_slowpath(atomic_t *lock_count);
70 61
71/*** 62/**
72 * mutex_lock - acquire the mutex 63 * mutex_lock - acquire the mutex
73 * @lock: the mutex to be acquired 64 * @lock: the mutex to be acquired
74 * 65 *
@@ -105,7 +96,7 @@ EXPORT_SYMBOL(mutex_lock);
105 96
106static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count); 97static __used noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
107 98
108/*** 99/**
109 * mutex_unlock - release the mutex 100 * mutex_unlock - release the mutex
110 * @lock: the mutex to be released 101 * @lock: the mutex to be released
111 * 102 *
@@ -364,8 +355,8 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count);
364static noinline int __sched 355static noinline int __sched
365__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 356__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
366 357
367/*** 358/**
368 * mutex_lock_interruptible - acquire the mutex, interruptable 359 * mutex_lock_interruptible - acquire the mutex, interruptible
369 * @lock: the mutex to be acquired 360 * @lock: the mutex to be acquired
370 * 361 *
371 * Lock the mutex like mutex_lock(), and return 0 if the mutex has 362 * Lock the mutex like mutex_lock(), and return 0 if the mutex has
@@ -456,15 +447,15 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
456 return prev == 1; 447 return prev == 1;
457} 448}
458 449
459/*** 450/**
460 * mutex_trylock - try acquire the mutex, without waiting 451 * mutex_trylock - try to acquire the mutex, without waiting
461 * @lock: the mutex to be acquired 452 * @lock: the mutex to be acquired
462 * 453 *
463 * Try to acquire the mutex atomically. Returns 1 if the mutex 454 * Try to acquire the mutex atomically. Returns 1 if the mutex
464 * has been acquired successfully, and 0 on contention. 455 * has been acquired successfully, and 0 on contention.
465 * 456 *
466 * NOTE: this function follows the spin_trylock() convention, so 457 * NOTE: this function follows the spin_trylock() convention, so
467 * it is negated to the down_trylock() return values! Be careful 458 * it is negated from the down_trylock() return values! Be careful
468 * about this when converting semaphore users to mutexes. 459 * about this when converting semaphore users to mutexes.
469 * 460 *
470 * This function must not be used in interrupt context. The 461 * This function must not be used in interrupt context. The
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 403d1804b198..657555a5f30f 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -402,11 +402,31 @@ static void perf_group_detach(struct perf_event *event)
402 } 402 }
403} 403}
404 404
405static inline int
406event_filter_match(struct perf_event *event)
407{
408 return event->cpu == -1 || event->cpu == smp_processor_id();
409}
410
405static void 411static void
406event_sched_out(struct perf_event *event, 412event_sched_out(struct perf_event *event,
407 struct perf_cpu_context *cpuctx, 413 struct perf_cpu_context *cpuctx,
408 struct perf_event_context *ctx) 414 struct perf_event_context *ctx)
409{ 415{
416 u64 delta;
417 /*
418 * An event which could not be activated because of
419 * filter mismatch still needs to have its timings
420 * maintained, otherwise bogus information is return
421 * via read() for time_enabled, time_running:
422 */
423 if (event->state == PERF_EVENT_STATE_INACTIVE
424 && !event_filter_match(event)) {
425 delta = ctx->time - event->tstamp_stopped;
426 event->tstamp_running += delta;
427 event->tstamp_stopped = ctx->time;
428 }
429
410 if (event->state != PERF_EVENT_STATE_ACTIVE) 430 if (event->state != PERF_EVENT_STATE_ACTIVE)
411 return; 431 return;
412 432
@@ -432,9 +452,7 @@ group_sched_out(struct perf_event *group_event,
432 struct perf_event_context *ctx) 452 struct perf_event_context *ctx)
433{ 453{
434 struct perf_event *event; 454 struct perf_event *event;
435 455 int state = group_event->state;
436 if (group_event->state != PERF_EVENT_STATE_ACTIVE)
437 return;
438 456
439 event_sched_out(group_event, cpuctx, ctx); 457 event_sched_out(group_event, cpuctx, ctx);
440 458
@@ -444,7 +462,7 @@ group_sched_out(struct perf_event *group_event,
444 list_for_each_entry(event, &group_event->sibling_list, group_entry) 462 list_for_each_entry(event, &group_event->sibling_list, group_entry)
445 event_sched_out(event, cpuctx, ctx); 463 event_sched_out(event, cpuctx, ctx);
446 464
447 if (group_event->attr.exclusive) 465 if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
448 cpuctx->exclusive = 0; 466 cpuctx->exclusive = 0;
449} 467}
450 468
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c
index 996a4dec5f96..b7e4c362361b 100644
--- a/kernel/pm_qos_params.c
+++ b/kernel/pm_qos_params.c
@@ -212,15 +212,17 @@ EXPORT_SYMBOL_GPL(pm_qos_request_active);
212 212
213/** 213/**
214 * pm_qos_add_request - inserts new qos request into the list 214 * pm_qos_add_request - inserts new qos request into the list
215 * @pm_qos_class: identifies which list of qos request to us 215 * @dep: pointer to a preallocated handle
216 * @pm_qos_class: identifies which list of qos request to use
216 * @value: defines the qos request 217 * @value: defines the qos request
217 * 218 *
218 * This function inserts a new entry in the pm_qos_class list of requested qos 219 * This function inserts a new entry in the pm_qos_class list of requested qos
219 * performance characteristics. It recomputes the aggregate QoS expectations 220 * performance characteristics. It recomputes the aggregate QoS expectations
220 * for the pm_qos_class of parameters, and returns the pm_qos_request list 221 * for the pm_qos_class of parameters and initializes the pm_qos_request_list
221 * element as a handle for use in updating and removal. Call needs to save 222 * handle. Caller needs to save this handle for later use in updates and
222 * this handle for later use. 223 * removal.
223 */ 224 */
225
224void pm_qos_add_request(struct pm_qos_request_list *dep, 226void pm_qos_add_request(struct pm_qos_request_list *dep,
225 int pm_qos_class, s32 value) 227 int pm_qos_class, s32 value)
226{ 228{
@@ -348,7 +350,7 @@ static int pm_qos_power_open(struct inode *inode, struct file *filp)
348 350
349 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode)); 351 pm_qos_class = find_pm_qos_object_by_minor(iminor(inode));
350 if (pm_qos_class >= 0) { 352 if (pm_qos_class >= 0) {
351 struct pm_qos_request_list *req = kzalloc(GFP_KERNEL, sizeof(*req)); 353 struct pm_qos_request_list *req = kzalloc(sizeof(*req), GFP_KERNEL);
352 if (!req) 354 if (!req)
353 return -ENOMEM; 355 return -ENOMEM;
354 356
diff --git a/kernel/sched.c b/kernel/sched.c
index 41541d79e3c8..09b574e7f4df 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -3865,8 +3865,16 @@ int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
3865 /* 3865 /*
3866 * Owner changed, break to re-assess state. 3866 * Owner changed, break to re-assess state.
3867 */ 3867 */
3868 if (lock->owner != owner) 3868 if (lock->owner != owner) {
3869 /*
3870 * If the lock has switched to a different owner,
3871 * we likely have heavy contention. Return 0 to quit
3872 * optimistic spinning and not contend further:
3873 */
3874 if (lock->owner)
3875 return 0;
3869 break; 3876 break;
3877 }
3870 3878
3871 /* 3879 /*
3872 * Is that owner really running on that cpu? 3880 * Is that owner really running on that cpu?
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 806d1b227a21..134f7edb30c6 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -1313,7 +1313,7 @@ static struct sched_group *
1313find_idlest_group(struct sched_domain *sd, struct task_struct *p, 1313find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1314 int this_cpu, int load_idx) 1314 int this_cpu, int load_idx)
1315{ 1315{
1316 struct sched_group *idlest = NULL, *this = NULL, *group = sd->groups; 1316 struct sched_group *idlest = NULL, *group = sd->groups;
1317 unsigned long min_load = ULONG_MAX, this_load = 0; 1317 unsigned long min_load = ULONG_MAX, this_load = 0;
1318 int imbalance = 100 + (sd->imbalance_pct-100)/2; 1318 int imbalance = 100 + (sd->imbalance_pct-100)/2;
1319 1319
@@ -1348,7 +1348,6 @@ find_idlest_group(struct sched_domain *sd, struct task_struct *p,
1348 1348
1349 if (local_group) { 1349 if (local_group) {
1350 this_load = avg_load; 1350 this_load = avg_load;
1351 this = group;
1352 } else if (avg_load < min_load) { 1351 } else if (avg_load < min_load) {
1353 min_load = avg_load; 1352 min_load = avg_load;
1354 idlest = group; 1353 idlest = group;
@@ -3752,6 +3751,8 @@ static void task_fork_fair(struct task_struct *p)
3752 3751
3753 raw_spin_lock_irqsave(&rq->lock, flags); 3752 raw_spin_lock_irqsave(&rq->lock, flags);
3754 3753
3754 update_rq_clock(rq);
3755
3755 if (unlikely(task_cpu(p) != this_cpu)) 3756 if (unlikely(task_cpu(p) != this_cpu))
3756 __set_task_cpu(p, this_cpu); 3757 __set_task_cpu(p, this_cpu);
3757 3758
diff --git a/kernel/sys.c b/kernel/sys.c
index e9ad44489828..7f5a0cd296a9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -931,6 +931,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
931 pgid = pid; 931 pgid = pid;
932 if (pgid < 0) 932 if (pgid < 0)
933 return -EINVAL; 933 return -EINVAL;
934 rcu_read_lock();
934 935
935 /* From this point forward we keep holding onto the tasklist lock 936 /* From this point forward we keep holding onto the tasklist lock
936 * so that our parent does not change from under us. -DaveM 937 * so that our parent does not change from under us. -DaveM
@@ -984,6 +985,7 @@ SYSCALL_DEFINE2(setpgid, pid_t, pid, pid_t, pgid)
984out: 985out:
985 /* All paths lead to here, thus we are safe. -DaveM */ 986 /* All paths lead to here, thus we are safe. -DaveM */
986 write_unlock_irq(&tasklist_lock); 987 write_unlock_irq(&tasklist_lock);
988 rcu_read_unlock();
987 return err; 989 return err;
988} 990}
989 991
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ca38e8e3e907..f88552c6d227 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -1713,10 +1713,7 @@ static __init int sysctl_init(void)
1713{ 1713{
1714 sysctl_set_parent(NULL, root_table); 1714 sysctl_set_parent(NULL, root_table);
1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK 1715#ifdef CONFIG_SYSCTL_SYSCALL_CHECK
1716 { 1716 sysctl_check_table(current->nsproxy, root_table);
1717 int err;
1718 err = sysctl_check_table(current->nsproxy, root_table);
1719 }
1720#endif 1717#endif
1721 return 0; 1718 return 0;
1722} 1719}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 0d88ce9b9fb8..7cb1f45a1de1 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -381,12 +381,19 @@ static int function_stat_show(struct seq_file *m, void *v)
381{ 381{
382 struct ftrace_profile *rec = v; 382 struct ftrace_profile *rec = v;
383 char str[KSYM_SYMBOL_LEN]; 383 char str[KSYM_SYMBOL_LEN];
384 int ret = 0;
384#ifdef CONFIG_FUNCTION_GRAPH_TRACER 385#ifdef CONFIG_FUNCTION_GRAPH_TRACER
385 static DEFINE_MUTEX(mutex);
386 static struct trace_seq s; 386 static struct trace_seq s;
387 unsigned long long avg; 387 unsigned long long avg;
388 unsigned long long stddev; 388 unsigned long long stddev;
389#endif 389#endif
390 mutex_lock(&ftrace_profile_lock);
391
392 /* we raced with function_profile_reset() */
393 if (unlikely(rec->counter == 0)) {
394 ret = -EBUSY;
395 goto out;
396 }
390 397
391 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str); 398 kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
392 seq_printf(m, " %-30.30s %10lu", str, rec->counter); 399 seq_printf(m, " %-30.30s %10lu", str, rec->counter);
@@ -408,7 +415,6 @@ static int function_stat_show(struct seq_file *m, void *v)
408 do_div(stddev, (rec->counter - 1) * 1000); 415 do_div(stddev, (rec->counter - 1) * 1000);
409 } 416 }
410 417
411 mutex_lock(&mutex);
412 trace_seq_init(&s); 418 trace_seq_init(&s);
413 trace_print_graph_duration(rec->time, &s); 419 trace_print_graph_duration(rec->time, &s);
414 trace_seq_puts(&s, " "); 420 trace_seq_puts(&s, " ");
@@ -416,11 +422,12 @@ static int function_stat_show(struct seq_file *m, void *v)
416 trace_seq_puts(&s, " "); 422 trace_seq_puts(&s, " ");
417 trace_print_graph_duration(stddev, &s); 423 trace_print_graph_duration(stddev, &s);
418 trace_print_seq(m, &s); 424 trace_print_seq(m, &s);
419 mutex_unlock(&mutex);
420#endif 425#endif
421 seq_putc(m, '\n'); 426 seq_putc(m, '\n');
427out:
428 mutex_unlock(&ftrace_profile_lock);
422 429
423 return 0; 430 return ret;
424} 431}
425 432
426static void ftrace_profile_reset(struct ftrace_profile_stat *stat) 433static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 3632ce87674f..492197e2f86c 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -2985,13 +2985,11 @@ static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
2985 2985
2986static void rb_advance_iter(struct ring_buffer_iter *iter) 2986static void rb_advance_iter(struct ring_buffer_iter *iter)
2987{ 2987{
2988 struct ring_buffer *buffer;
2989 struct ring_buffer_per_cpu *cpu_buffer; 2988 struct ring_buffer_per_cpu *cpu_buffer;
2990 struct ring_buffer_event *event; 2989 struct ring_buffer_event *event;
2991 unsigned length; 2990 unsigned length;
2992 2991
2993 cpu_buffer = iter->cpu_buffer; 2992 cpu_buffer = iter->cpu_buffer;
2994 buffer = cpu_buffer->buffer;
2995 2993
2996 /* 2994 /*
2997 * Check if we are at the end of the buffer. 2995 * Check if we are at the end of the buffer.
@@ -3846,6 +3844,9 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
3846 rpos = reader->read; 3844 rpos = reader->read;
3847 pos += size; 3845 pos += size;
3848 3846
3847 if (rpos >= commit)
3848 break;
3849
3849 event = rb_reader_event(cpu_buffer); 3850 event = rb_reader_event(cpu_buffer);
3850 size = rb_event_length(event); 3851 size = rb_event_length(event);
3851 } while (len > size); 3852 } while (len > size);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index ba14a22be4cc..9ec59f541156 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3463,6 +3463,7 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3463 size_t cnt, loff_t *fpos) 3463 size_t cnt, loff_t *fpos)
3464{ 3464{
3465 char *buf; 3465 char *buf;
3466 size_t written;
3466 3467
3467 if (tracing_disabled) 3468 if (tracing_disabled)
3468 return -EINVAL; 3469 return -EINVAL;
@@ -3484,11 +3485,15 @@ tracing_mark_write(struct file *filp, const char __user *ubuf,
3484 } else 3485 } else
3485 buf[cnt] = '\0'; 3486 buf[cnt] = '\0';
3486 3487
3487 cnt = mark_printk("%s", buf); 3488 written = mark_printk("%s", buf);
3488 kfree(buf); 3489 kfree(buf);
3489 *fpos += cnt; 3490 *fpos += written;
3490 3491
3491 return cnt; 3492 /* don't tell userspace we wrote more - it might confuse them */
3493 if (written > cnt)
3494 written = cnt;
3495
3496 return written;
3492} 3497}
3493 3498
3494static int tracing_clock_show(struct seq_file *m, void *v) 3499static int tracing_clock_show(struct seq_file *m, void *v)
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 09b4fa6e4d3b..4c758f146328 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -598,88 +598,165 @@ out:
598 return ret; 598 return ret;
599} 599}
600 600
601static void print_event_fields(struct trace_seq *s, struct list_head *head) 601enum {
602 FORMAT_HEADER = 1,
603 FORMAT_PRINTFMT = 2,
604};
605
606static void *f_next(struct seq_file *m, void *v, loff_t *pos)
602{ 607{
608 struct ftrace_event_call *call = m->private;
603 struct ftrace_event_field *field; 609 struct ftrace_event_field *field;
610 struct list_head *head;
604 611
605 list_for_each_entry_reverse(field, head, link) { 612 (*pos)++;
606 /*
607 * Smartly shows the array type(except dynamic array).
608 * Normal:
609 * field:TYPE VAR
610 * If TYPE := TYPE[LEN], it is shown:
611 * field:TYPE VAR[LEN]
612 */
613 const char *array_descriptor = strchr(field->type, '[');
614 613
615 if (!strncmp(field->type, "__data_loc", 10)) 614 switch ((unsigned long)v) {
616 array_descriptor = NULL; 615 case FORMAT_HEADER:
616 head = &ftrace_common_fields;
617 617
618 if (!array_descriptor) { 618 if (unlikely(list_empty(head)))
619 trace_seq_printf(s, "\tfield:%s %s;\toffset:%u;" 619 return NULL;
620 "\tsize:%u;\tsigned:%d;\n", 620
621 field->type, field->name, field->offset, 621 field = list_entry(head->prev, struct ftrace_event_field, link);
622 field->size, !!field->is_signed); 622 return field;
623 } else { 623
624 trace_seq_printf(s, "\tfield:%.*s %s%s;\toffset:%u;" 624 case FORMAT_PRINTFMT:
625 "\tsize:%u;\tsigned:%d;\n", 625 /* all done */
626 (int)(array_descriptor - field->type), 626 return NULL;
627 field->type, field->name, 627 }
628 array_descriptor, field->offset, 628
629 field->size, !!field->is_signed); 629 head = trace_get_fields(call);
630 } 630
631 /*
632 * To separate common fields from event fields, the
633 * LSB is set on the first event field. Clear it in case.
634 */
635 v = (void *)((unsigned long)v & ~1L);
636
637 field = v;
638 /*
639 * If this is a common field, and at the end of the list, then
640 * continue with main list.
641 */
642 if (field->link.prev == &ftrace_common_fields) {
643 if (unlikely(list_empty(head)))
644 return NULL;
645 field = list_entry(head->prev, struct ftrace_event_field, link);
646 /* Set the LSB to notify f_show to print an extra newline */
647 field = (struct ftrace_event_field *)
648 ((unsigned long)field | 1);
649 return field;
631 } 650 }
651
652 /* If we are done tell f_show to print the format */
653 if (field->link.prev == head)
654 return (void *)FORMAT_PRINTFMT;
655
656 field = list_entry(field->link.prev, struct ftrace_event_field, link);
657
658 return field;
632} 659}
633 660
634static ssize_t 661static void *f_start(struct seq_file *m, loff_t *pos)
635event_format_read(struct file *filp, char __user *ubuf, size_t cnt,
636 loff_t *ppos)
637{ 662{
638 struct ftrace_event_call *call = filp->private_data; 663 loff_t l = 0;
639 struct list_head *head; 664 void *p;
640 struct trace_seq *s;
641 char *buf;
642 int r;
643 665
644 if (*ppos) 666 /* Start by showing the header */
667 if (!*pos)
668 return (void *)FORMAT_HEADER;
669
670 p = (void *)FORMAT_HEADER;
671 do {
672 p = f_next(m, p, &l);
673 } while (p && l < *pos);
674
675 return p;
676}
677
678static int f_show(struct seq_file *m, void *v)
679{
680 struct ftrace_event_call *call = m->private;
681 struct ftrace_event_field *field;
682 const char *array_descriptor;
683
684 switch ((unsigned long)v) {
685 case FORMAT_HEADER:
686 seq_printf(m, "name: %s\n", call->name);
687 seq_printf(m, "ID: %d\n", call->event.type);
688 seq_printf(m, "format:\n");
645 return 0; 689 return 0;
646 690
647 s = kmalloc(sizeof(*s), GFP_KERNEL); 691 case FORMAT_PRINTFMT:
648 if (!s) 692 seq_printf(m, "\nprint fmt: %s\n",
649 return -ENOMEM; 693 call->print_fmt);
694 return 0;
695 }
650 696
651 trace_seq_init(s); 697 /*
698 * To separate common fields from event fields, the
699 * LSB is set on the first event field. Clear it and
700 * print a newline if it is set.
701 */
702 if ((unsigned long)v & 1) {
703 seq_putc(m, '\n');
704 v = (void *)((unsigned long)v & ~1L);
705 }
652 706
653 trace_seq_printf(s, "name: %s\n", call->name); 707 field = v;
654 trace_seq_printf(s, "ID: %d\n", call->event.type);
655 trace_seq_printf(s, "format:\n");
656 708
657 /* print common fields */ 709 /*
658 print_event_fields(s, &ftrace_common_fields); 710 * Smartly shows the array type(except dynamic array).
711 * Normal:
712 * field:TYPE VAR
713 * If TYPE := TYPE[LEN], it is shown:
714 * field:TYPE VAR[LEN]
715 */
716 array_descriptor = strchr(field->type, '[');
659 717
660 trace_seq_putc(s, '\n'); 718 if (!strncmp(field->type, "__data_loc", 10))
719 array_descriptor = NULL;
661 720
662 /* print event specific fields */ 721 if (!array_descriptor)
663 head = trace_get_fields(call); 722 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
664 print_event_fields(s, head); 723 field->type, field->name, field->offset,
724 field->size, !!field->is_signed);
725 else
726 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
727 (int)(array_descriptor - field->type),
728 field->type, field->name,
729 array_descriptor, field->offset,
730 field->size, !!field->is_signed);
665 731
666 r = trace_seq_printf(s, "\nprint fmt: %s\n", call->print_fmt); 732 return 0;
733}
667 734
668 if (!r) { 735static void f_stop(struct seq_file *m, void *p)
669 /* 736{
670 * ug! The format output is bigger than a PAGE!! 737}
671 */
672 buf = "FORMAT TOO BIG\n";
673 r = simple_read_from_buffer(ubuf, cnt, ppos,
674 buf, strlen(buf));
675 goto out;
676 }
677 738
678 r = simple_read_from_buffer(ubuf, cnt, ppos, 739static const struct seq_operations trace_format_seq_ops = {
679 s->buffer, s->len); 740 .start = f_start,
680 out: 741 .next = f_next,
681 kfree(s); 742 .stop = f_stop,
682 return r; 743 .show = f_show,
744};
745
746static int trace_format_open(struct inode *inode, struct file *file)
747{
748 struct ftrace_event_call *call = inode->i_private;
749 struct seq_file *m;
750 int ret;
751
752 ret = seq_open(file, &trace_format_seq_ops);
753 if (ret < 0)
754 return ret;
755
756 m = file->private_data;
757 m->private = call;
758
759 return 0;
683} 760}
684 761
685static ssize_t 762static ssize_t
@@ -877,8 +954,10 @@ static const struct file_operations ftrace_enable_fops = {
877}; 954};
878 955
879static const struct file_operations ftrace_event_format_fops = { 956static const struct file_operations ftrace_event_format_fops = {
880 .open = tracing_open_generic, 957 .open = trace_format_open,
881 .read = event_format_read, 958 .read = seq_read,
959 .llseek = seq_lseek,
960 .release = seq_release,
882}; 961};
883 962
884static const struct file_operations ftrace_event_id_fops = { 963static const struct file_operations ftrace_event_id_fops = {
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index 6bff23625781..6f233698518e 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -507,7 +507,15 @@ get_return_for_leaf(struct trace_iterator *iter,
507 * if the output fails. 507 * if the output fails.
508 */ 508 */
509 data->ent = *curr; 509 data->ent = *curr;
510 data->ret = *next; 510 /*
511 * If the next event is not a return type, then
512 * we only care about what type it is. Otherwise we can
513 * safely copy the entire event.
514 */
515 if (next->ent.type == TRACE_GRAPH_RET)
516 data->ret = *next;
517 else
518 data->ret.ent.type = next->ent.type;
511 } 519 }
512 } 520 }
513 521
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 056468eae7cf..a6b7e0e0f3eb 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -249,7 +249,7 @@ static int trace_lookup_stack(struct seq_file *m, long i)
249{ 249{
250 unsigned long addr = stack_dump_trace[i]; 250 unsigned long addr = stack_dump_trace[i];
251 251
252 return seq_printf(m, "%pF\n", (void *)addr); 252 return seq_printf(m, "%pS\n", (void *)addr);
253} 253}
254 254
255static void print_disabled(struct seq_file *m) 255static void print_disabled(struct seq_file *m)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 613bc1f04610..7f9c3c52ecc1 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -122,7 +122,7 @@ static void __touch_watchdog(void)
122 122
123void touch_softlockup_watchdog(void) 123void touch_softlockup_watchdog(void)
124{ 124{
125 __get_cpu_var(watchdog_touch_ts) = 0; 125 __raw_get_cpu_var(watchdog_touch_ts) = 0;
126} 126}
127EXPORT_SYMBOL(touch_softlockup_watchdog); 127EXPORT_SYMBOL(touch_softlockup_watchdog);
128 128
@@ -142,7 +142,14 @@ void touch_all_softlockup_watchdogs(void)
142#ifdef CONFIG_HARDLOCKUP_DETECTOR 142#ifdef CONFIG_HARDLOCKUP_DETECTOR
143void touch_nmi_watchdog(void) 143void touch_nmi_watchdog(void)
144{ 144{
145 __get_cpu_var(watchdog_nmi_touch) = true; 145 if (watchdog_enabled) {
146 unsigned cpu;
147
148 for_each_present_cpu(cpu) {
149 if (per_cpu(watchdog_nmi_touch, cpu) != true)
150 per_cpu(watchdog_nmi_touch, cpu) = true;
151 }
152 }
146 touch_softlockup_watchdog(); 153 touch_softlockup_watchdog();
147} 154}
148EXPORT_SYMBOL(touch_nmi_watchdog); 155EXPORT_SYMBOL(touch_nmi_watchdog);
@@ -206,6 +213,9 @@ void watchdog_overflow_callback(struct perf_event *event, int nmi,
206 struct perf_sample_data *data, 213 struct perf_sample_data *data,
207 struct pt_regs *regs) 214 struct pt_regs *regs)
208{ 215{
216 /* Ensure the watchdog never gets throttled */
217 event->hw.interrupts = 0;
218
209 if (__get_cpu_var(watchdog_nmi_touch) == true) { 219 if (__get_cpu_var(watchdog_nmi_touch) == true) {
210 __get_cpu_var(watchdog_nmi_touch) = false; 220 __get_cpu_var(watchdog_nmi_touch) = false;
211 return; 221 return;
@@ -430,6 +440,9 @@ static int watchdog_enable(int cpu)
430 wake_up_process(p); 440 wake_up_process(p);
431 } 441 }
432 442
443 /* if any cpu succeeds, watchdog is considered enabled for the system */
444 watchdog_enabled = 1;
445
433 return 0; 446 return 0;
434} 447}
435 448
@@ -452,9 +465,6 @@ static void watchdog_disable(int cpu)
452 per_cpu(softlockup_watchdog, cpu) = NULL; 465 per_cpu(softlockup_watchdog, cpu) = NULL;
453 kthread_stop(p); 466 kthread_stop(p);
454 } 467 }
455
456 /* if any cpu succeeds, watchdog is considered enabled for the system */
457 watchdog_enabled = 1;
458} 468}
459 469
460static void watchdog_enable_all_cpus(void) 470static void watchdog_enable_all_cpus(void)
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 2994a0e3a61c..727f24e563ae 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -35,6 +35,9 @@
35#include <linux/lockdep.h> 35#include <linux/lockdep.h>
36#include <linux/idr.h> 36#include <linux/idr.h>
37 37
38#define CREATE_TRACE_POINTS
39#include <trace/events/workqueue.h>
40
38#include "workqueue_sched.h" 41#include "workqueue_sched.h"
39 42
40enum { 43enum {
@@ -87,7 +90,8 @@ enum {
87/* 90/*
88 * Structure fields follow one of the following exclusion rules. 91 * Structure fields follow one of the following exclusion rules.
89 * 92 *
90 * I: Set during initialization and read-only afterwards. 93 * I: Modifiable by initialization/destruction paths and read-only for
94 * everyone else.
91 * 95 *
92 * P: Preemption protected. Disabling preemption is enough and should 96 * P: Preemption protected. Disabling preemption is enough and should
93 * only be modified and accessed from the local cpu. 97 * only be modified and accessed from the local cpu.
@@ -195,7 +199,7 @@ typedef cpumask_var_t mayday_mask_t;
195 cpumask_test_and_set_cpu((cpu), (mask)) 199 cpumask_test_and_set_cpu((cpu), (mask))
196#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask)) 200#define mayday_clear_cpu(cpu, mask) cpumask_clear_cpu((cpu), (mask))
197#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask)) 201#define for_each_mayday_cpu(cpu, mask) for_each_cpu((cpu), (mask))
198#define alloc_mayday_mask(maskp, gfp) alloc_cpumask_var((maskp), (gfp)) 202#define alloc_mayday_mask(maskp, gfp) zalloc_cpumask_var((maskp), (gfp))
199#define free_mayday_mask(mask) free_cpumask_var((mask)) 203#define free_mayday_mask(mask) free_cpumask_var((mask))
200#else 204#else
201typedef unsigned long mayday_mask_t; 205typedef unsigned long mayday_mask_t;
@@ -940,10 +944,14 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
940 struct global_cwq *gcwq; 944 struct global_cwq *gcwq;
941 struct cpu_workqueue_struct *cwq; 945 struct cpu_workqueue_struct *cwq;
942 struct list_head *worklist; 946 struct list_head *worklist;
947 unsigned int work_flags;
943 unsigned long flags; 948 unsigned long flags;
944 949
945 debug_work_activate(work); 950 debug_work_activate(work);
946 951
952 if (WARN_ON_ONCE(wq->flags & WQ_DYING))
953 return;
954
947 /* determine gcwq to use */ 955 /* determine gcwq to use */
948 if (!(wq->flags & WQ_UNBOUND)) { 956 if (!(wq->flags & WQ_UNBOUND)) {
949 struct global_cwq *last_gcwq; 957 struct global_cwq *last_gcwq;
@@ -986,14 +994,17 @@ static void __queue_work(unsigned int cpu, struct workqueue_struct *wq,
986 BUG_ON(!list_empty(&work->entry)); 994 BUG_ON(!list_empty(&work->entry));
987 995
988 cwq->nr_in_flight[cwq->work_color]++; 996 cwq->nr_in_flight[cwq->work_color]++;
997 work_flags = work_color_to_flags(cwq->work_color);
989 998
990 if (likely(cwq->nr_active < cwq->max_active)) { 999 if (likely(cwq->nr_active < cwq->max_active)) {
991 cwq->nr_active++; 1000 cwq->nr_active++;
992 worklist = gcwq_determine_ins_pos(gcwq, cwq); 1001 worklist = gcwq_determine_ins_pos(gcwq, cwq);
993 } else 1002 } else {
1003 work_flags |= WORK_STRUCT_DELAYED;
994 worklist = &cwq->delayed_works; 1004 worklist = &cwq->delayed_works;
1005 }
995 1006
996 insert_work(cwq, work, worklist, work_color_to_flags(cwq->work_color)); 1007 insert_work(cwq, work, worklist, work_flags);
997 1008
998 spin_unlock_irqrestore(&gcwq->lock, flags); 1009 spin_unlock_irqrestore(&gcwq->lock, flags);
999} 1010}
@@ -1212,6 +1223,7 @@ static void worker_leave_idle(struct worker *worker)
1212 * bound), %false if offline. 1223 * bound), %false if offline.
1213 */ 1224 */
1214static bool worker_maybe_bind_and_lock(struct worker *worker) 1225static bool worker_maybe_bind_and_lock(struct worker *worker)
1226__acquires(&gcwq->lock)
1215{ 1227{
1216 struct global_cwq *gcwq = worker->gcwq; 1228 struct global_cwq *gcwq = worker->gcwq;
1217 struct task_struct *task = worker->task; 1229 struct task_struct *task = worker->task;
@@ -1485,6 +1497,8 @@ static void gcwq_mayday_timeout(unsigned long __gcwq)
1485 * otherwise. 1497 * otherwise.
1486 */ 1498 */
1487static bool maybe_create_worker(struct global_cwq *gcwq) 1499static bool maybe_create_worker(struct global_cwq *gcwq)
1500__releases(&gcwq->lock)
1501__acquires(&gcwq->lock)
1488{ 1502{
1489 if (!need_to_create_worker(gcwq)) 1503 if (!need_to_create_worker(gcwq))
1490 return false; 1504 return false;
@@ -1659,6 +1673,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1659 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq); 1673 struct list_head *pos = gcwq_determine_ins_pos(cwq->gcwq, cwq);
1660 1674
1661 move_linked_works(work, pos, NULL); 1675 move_linked_works(work, pos, NULL);
1676 __clear_bit(WORK_STRUCT_DELAYED_BIT, work_data_bits(work));
1662 cwq->nr_active++; 1677 cwq->nr_active++;
1663} 1678}
1664 1679
@@ -1666,6 +1681,7 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1666 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight 1681 * cwq_dec_nr_in_flight - decrement cwq's nr_in_flight
1667 * @cwq: cwq of interest 1682 * @cwq: cwq of interest
1668 * @color: color of work which left the queue 1683 * @color: color of work which left the queue
1684 * @delayed: for a delayed work
1669 * 1685 *
1670 * A work either has completed or is removed from pending queue, 1686 * A work either has completed or is removed from pending queue,
1671 * decrement nr_in_flight of its cwq and handle workqueue flushing. 1687 * decrement nr_in_flight of its cwq and handle workqueue flushing.
@@ -1673,19 +1689,22 @@ static void cwq_activate_first_delayed(struct cpu_workqueue_struct *cwq)
1673 * CONTEXT: 1689 * CONTEXT:
1674 * spin_lock_irq(gcwq->lock). 1690 * spin_lock_irq(gcwq->lock).
1675 */ 1691 */
1676static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color) 1692static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color,
1693 bool delayed)
1677{ 1694{
1678 /* ignore uncolored works */ 1695 /* ignore uncolored works */
1679 if (color == WORK_NO_COLOR) 1696 if (color == WORK_NO_COLOR)
1680 return; 1697 return;
1681 1698
1682 cwq->nr_in_flight[color]--; 1699 cwq->nr_in_flight[color]--;
1683 cwq->nr_active--;
1684 1700
1685 if (!list_empty(&cwq->delayed_works)) { 1701 if (!delayed) {
1686 /* one down, submit a delayed one */ 1702 cwq->nr_active--;
1687 if (cwq->nr_active < cwq->max_active) 1703 if (!list_empty(&cwq->delayed_works)) {
1688 cwq_activate_first_delayed(cwq); 1704 /* one down, submit a delayed one */
1705 if (cwq->nr_active < cwq->max_active)
1706 cwq_activate_first_delayed(cwq);
1707 }
1689 } 1708 }
1690 1709
1691 /* is flush in progress and are we at the flushing tip? */ 1710 /* is flush in progress and are we at the flushing tip? */
@@ -1722,6 +1741,8 @@ static void cwq_dec_nr_in_flight(struct cpu_workqueue_struct *cwq, int color)
1722 * spin_lock_irq(gcwq->lock) which is released and regrabbed. 1741 * spin_lock_irq(gcwq->lock) which is released and regrabbed.
1723 */ 1742 */
1724static void process_one_work(struct worker *worker, struct work_struct *work) 1743static void process_one_work(struct worker *worker, struct work_struct *work)
1744__releases(&gcwq->lock)
1745__acquires(&gcwq->lock)
1725{ 1746{
1726 struct cpu_workqueue_struct *cwq = get_work_cwq(work); 1747 struct cpu_workqueue_struct *cwq = get_work_cwq(work);
1727 struct global_cwq *gcwq = cwq->gcwq; 1748 struct global_cwq *gcwq = cwq->gcwq;
@@ -1790,7 +1811,13 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1790 work_clear_pending(work); 1811 work_clear_pending(work);
1791 lock_map_acquire(&cwq->wq->lockdep_map); 1812 lock_map_acquire(&cwq->wq->lockdep_map);
1792 lock_map_acquire(&lockdep_map); 1813 lock_map_acquire(&lockdep_map);
1814 trace_workqueue_execute_start(work);
1793 f(work); 1815 f(work);
1816 /*
1817 * While we must be careful to not use "work" after this, the trace
1818 * point will only record its address.
1819 */
1820 trace_workqueue_execute_end(work);
1794 lock_map_release(&lockdep_map); 1821 lock_map_release(&lockdep_map);
1795 lock_map_release(&cwq->wq->lockdep_map); 1822 lock_map_release(&cwq->wq->lockdep_map);
1796 1823
@@ -1814,7 +1841,7 @@ static void process_one_work(struct worker *worker, struct work_struct *work)
1814 hlist_del_init(&worker->hentry); 1841 hlist_del_init(&worker->hentry);
1815 worker->current_work = NULL; 1842 worker->current_work = NULL;
1816 worker->current_cwq = NULL; 1843 worker->current_cwq = NULL;
1817 cwq_dec_nr_in_flight(cwq, work_color); 1844 cwq_dec_nr_in_flight(cwq, work_color, false);
1818} 1845}
1819 1846
1820/** 1847/**
@@ -2379,7 +2406,8 @@ static int try_to_grab_pending(struct work_struct *work)
2379 debug_work_deactivate(work); 2406 debug_work_deactivate(work);
2380 list_del_init(&work->entry); 2407 list_del_init(&work->entry);
2381 cwq_dec_nr_in_flight(get_work_cwq(work), 2408 cwq_dec_nr_in_flight(get_work_cwq(work),
2382 get_work_color(work)); 2409 get_work_color(work),
2410 *work_data_bits(work) & WORK_STRUCT_DELAYED);
2383 ret = 1; 2411 ret = 1;
2384 } 2412 }
2385 } 2413 }
@@ -2782,7 +2810,6 @@ struct workqueue_struct *__alloc_workqueue_key(const char *name,
2782 if (IS_ERR(rescuer->task)) 2810 if (IS_ERR(rescuer->task))
2783 goto err; 2811 goto err;
2784 2812
2785 wq->rescuer = rescuer;
2786 rescuer->task->flags |= PF_THREAD_BOUND; 2813 rescuer->task->flags |= PF_THREAD_BOUND;
2787 wake_up_process(rescuer->task); 2814 wake_up_process(rescuer->task);
2788 } 2815 }
@@ -2824,6 +2851,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2824{ 2851{
2825 unsigned int cpu; 2852 unsigned int cpu;
2826 2853
2854 wq->flags |= WQ_DYING;
2827 flush_workqueue(wq); 2855 flush_workqueue(wq);
2828 2856
2829 /* 2857 /*
@@ -2848,6 +2876,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
2848 if (wq->flags & WQ_RESCUER) { 2876 if (wq->flags & WQ_RESCUER) {
2849 kthread_stop(wq->rescuer->task); 2877 kthread_stop(wq->rescuer->task);
2850 free_mayday_mask(wq->mayday_mask); 2878 free_mayday_mask(wq->mayday_mask);
2879 kfree(wq->rescuer);
2851 } 2880 }
2852 2881
2853 free_cwqs(wq); 2882 free_cwqs(wq);
@@ -3230,6 +3259,8 @@ static int __cpuinit trustee_thread(void *__gcwq)
3230 * multiple times. To be used by cpu_callback. 3259 * multiple times. To be used by cpu_callback.
3231 */ 3260 */
3232static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state) 3261static void __cpuinit wait_trustee_state(struct global_cwq *gcwq, int state)
3262__releases(&gcwq->lock)
3263__acquires(&gcwq->lock)
3233{ 3264{
3234 if (!(gcwq->trustee_state == state || 3265 if (!(gcwq->trustee_state == state ||
3235 gcwq->trustee_state == TRUSTEE_DONE)) { 3266 gcwq->trustee_state == TRUSTEE_DONE)) {
@@ -3536,8 +3567,7 @@ static int __init init_workqueues(void)
3536 spin_lock_init(&gcwq->lock); 3567 spin_lock_init(&gcwq->lock);
3537 INIT_LIST_HEAD(&gcwq->worklist); 3568 INIT_LIST_HEAD(&gcwq->worklist);
3538 gcwq->cpu = cpu; 3569 gcwq->cpu = cpu;
3539 if (cpu == WORK_CPU_UNBOUND) 3570 gcwq->flags |= GCWQ_DISASSOCIATED;
3540 gcwq->flags |= GCWQ_DISASSOCIATED;
3541 3571
3542 INIT_LIST_HEAD(&gcwq->idle_list); 3572 INIT_LIST_HEAD(&gcwq->idle_list);
3543 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++) 3573 for (i = 0; i < BUSY_WORKER_HASH_SIZE; i++)
@@ -3561,6 +3591,8 @@ static int __init init_workqueues(void)
3561 struct global_cwq *gcwq = get_gcwq(cpu); 3591 struct global_cwq *gcwq = get_gcwq(cpu);
3562 struct worker *worker; 3592 struct worker *worker;
3563 3593
3594 if (cpu != WORK_CPU_UNBOUND)
3595 gcwq->flags &= ~GCWQ_DISASSOCIATED;
3564 worker = create_worker(gcwq, true); 3596 worker = create_worker(gcwq, true);
3565 BUG_ON(!worker); 3597 BUG_ON(!worker);
3566 spin_lock_irq(&gcwq->lock); 3598 spin_lock_irq(&gcwq->lock);