aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 20:37:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-04-30 20:37:43 -0400
commit5f56886521d6ddd3648777fae44d82382dd8c87f (patch)
treeaa0db6331cdb01c23f1884439840aadd31bbcca4 /kernel
parentf1e9a236e5ddab6c349611ee86f54291916f226c (diff)
parente2a8b0a779787314eca1061308a8182e6c5bfabd (diff)
Merge branch 'akpm' (incoming from Andrew)
Merge third batch of fixes from Andrew Morton: "Most of the rest. I still have two large patchsets against AIO and IPC, but they're a bit stuck behind other trees and I'm about to vanish for six days. - random fixlets - inotify - more of the MM queue - show_stack() cleanups - DMI update - kthread/workqueue things - compat cleanups - epoll udpates - binfmt updates - nilfs2 - hfs - hfsplus - ptrace - kmod - coredump - kexec - rbtree - pids - pidns - pps - semaphore tweaks - some w1 patches - relay updates - core Kconfig changes - sysrq tweaks" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (109 commits) Documentation/sysrq: fix inconstistent help message of sysrq key ethernet/emac/sysrq: fix inconstistent help message of sysrq key sparc/sysrq: fix inconstistent help message of sysrq key powerpc/xmon/sysrq: fix inconstistent help message of sysrq key ARM/etm/sysrq: fix inconstistent help message of sysrq key power/sysrq: fix inconstistent help message of sysrq key kgdb/sysrq: fix inconstistent help message of sysrq key lib/decompress.c: fix initconst notifier-error-inject: fix module names in Kconfig kernel/sys.c: make prctl(PR_SET_MM) generally available UAPI: remove empty Kbuild files menuconfig: print more info for symbol without prompts init/Kconfig: re-order CONFIG_EXPERT options to fix menuconfig display kconfig menu: move Virtualization drivers near other virtualization options Kconfig: consolidate CONFIG_DEBUG_STRICT_USER_COPY_CHECKS relay: use macro PAGE_ALIGN instead of FIX_SIZE kernel/relay.c: move FIX_SIZE macro into relay.c kernel/relay.c: remove unused function argument actor drivers/w1/slaves/w1_ds2760.c: fix the error handling in w1_ds2760_add_slave() drivers/w1/slaves/w1_ds2781.c: fix the error handling in w1_ds2781_add_slave() ...
Diffstat (limited to 'kernel')
-rw-r--r--kernel/compat.c65
-rw-r--r--kernel/debug/debug_core.c2
-rw-r--r--kernel/kexec.c30
-rw-r--r--kernel/kmod.c98
-rw-r--r--kernel/kthread.c19
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/pid.c11
-rw-r--r--kernel/pid_namespace.c2
-rw-r--r--kernel/power/poweroff.c2
-rw-r--r--kernel/printk.c62
-rw-r--r--kernel/ptrace.c80
-rw-r--r--kernel/range.c3
-rw-r--r--kernel/relay.c14
-rw-r--r--kernel/sched/core.c1
-rw-r--r--kernel/semaphore.c8
-rw-r--r--kernel/signal.c9
-rw-r--r--kernel/smp.c91
-rw-r--r--kernel/softirq.c6
-rw-r--r--kernel/sys.c221
-rw-r--r--kernel/timer.c143
-rw-r--r--kernel/workqueue.c79
-rw-r--r--kernel/workqueue_internal.h12
22 files changed, 600 insertions, 364 deletions
diff --git a/kernel/compat.c b/kernel/compat.c
index 19971d8c7299..1e8f1455117a 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1138,71 +1138,6 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
1138} 1138}
1139#endif 1139#endif
1140 1140
1141struct compat_sysinfo {
1142 s32 uptime;
1143 u32 loads[3];
1144 u32 totalram;
1145 u32 freeram;
1146 u32 sharedram;
1147 u32 bufferram;
1148 u32 totalswap;
1149 u32 freeswap;
1150 u16 procs;
1151 u16 pad;
1152 u32 totalhigh;
1153 u32 freehigh;
1154 u32 mem_unit;
1155 char _f[20-2*sizeof(u32)-sizeof(int)];
1156};
1157
1158asmlinkage long
1159compat_sys_sysinfo(struct compat_sysinfo __user *info)
1160{
1161 struct sysinfo s;
1162
1163 do_sysinfo(&s);
1164
1165 /* Check to see if any memory value is too large for 32-bit and scale
1166 * down if needed
1167 */
1168 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
1169 int bitcount = 0;
1170
1171 while (s.mem_unit < PAGE_SIZE) {
1172 s.mem_unit <<= 1;
1173 bitcount++;
1174 }
1175
1176 s.totalram >>= bitcount;
1177 s.freeram >>= bitcount;
1178 s.sharedram >>= bitcount;
1179 s.bufferram >>= bitcount;
1180 s.totalswap >>= bitcount;
1181 s.freeswap >>= bitcount;
1182 s.totalhigh >>= bitcount;
1183 s.freehigh >>= bitcount;
1184 }
1185
1186 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
1187 __put_user (s.uptime, &info->uptime) ||
1188 __put_user (s.loads[0], &info->loads[0]) ||
1189 __put_user (s.loads[1], &info->loads[1]) ||
1190 __put_user (s.loads[2], &info->loads[2]) ||
1191 __put_user (s.totalram, &info->totalram) ||
1192 __put_user (s.freeram, &info->freeram) ||
1193 __put_user (s.sharedram, &info->sharedram) ||
1194 __put_user (s.bufferram, &info->bufferram) ||
1195 __put_user (s.totalswap, &info->totalswap) ||
1196 __put_user (s.freeswap, &info->freeswap) ||
1197 __put_user (s.procs, &info->procs) ||
1198 __put_user (s.totalhigh, &info->totalhigh) ||
1199 __put_user (s.freehigh, &info->freehigh) ||
1200 __put_user (s.mem_unit, &info->mem_unit))
1201 return -EFAULT;
1202
1203 return 0;
1204}
1205
1206COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval, 1141COMPAT_SYSCALL_DEFINE2(sched_rr_get_interval,
1207 compat_pid_t, pid, 1142 compat_pid_t, pid,
1208 struct compat_timespec __user *, interval) 1143 struct compat_timespec __user *, interval)
diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
index c26278fd4851..0506d447aed2 100644
--- a/kernel/debug/debug_core.c
+++ b/kernel/debug/debug_core.c
@@ -775,7 +775,7 @@ static void sysrq_handle_dbg(int key)
775 775
776static struct sysrq_key_op sysrq_dbg_op = { 776static struct sysrq_key_op sysrq_dbg_op = {
777 .handler = sysrq_handle_dbg, 777 .handler = sysrq_handle_dbg,
778 .help_msg = "debug(G)", 778 .help_msg = "debug(g)",
779 .action_msg = "DEBUG", 779 .action_msg = "DEBUG",
780}; 780};
781#endif 781#endif
diff --git a/kernel/kexec.c b/kernel/kexec.c
index b574920cbd4b..59f7b55ba745 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -786,7 +786,7 @@ static int kimage_load_normal_segment(struct kimage *image,
786 struct kexec_segment *segment) 786 struct kexec_segment *segment)
787{ 787{
788 unsigned long maddr; 788 unsigned long maddr;
789 unsigned long ubytes, mbytes; 789 size_t ubytes, mbytes;
790 int result; 790 int result;
791 unsigned char __user *buf; 791 unsigned char __user *buf;
792 792
@@ -819,13 +819,9 @@ static int kimage_load_normal_segment(struct kimage *image,
819 /* Start with a clear page */ 819 /* Start with a clear page */
820 clear_page(ptr); 820 clear_page(ptr);
821 ptr += maddr & ~PAGE_MASK; 821 ptr += maddr & ~PAGE_MASK;
822 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 822 mchunk = min_t(size_t, mbytes,
823 if (mchunk > mbytes) 823 PAGE_SIZE - (maddr & ~PAGE_MASK));
824 mchunk = mbytes; 824 uchunk = min(ubytes, mchunk);
825
826 uchunk = mchunk;
827 if (uchunk > ubytes)
828 uchunk = ubytes;
829 825
830 result = copy_from_user(ptr, buf, uchunk); 826 result = copy_from_user(ptr, buf, uchunk);
831 kunmap(page); 827 kunmap(page);
@@ -850,7 +846,7 @@ static int kimage_load_crash_segment(struct kimage *image,
850 * We do things a page at a time for the sake of kmap. 846 * We do things a page at a time for the sake of kmap.
851 */ 847 */
852 unsigned long maddr; 848 unsigned long maddr;
853 unsigned long ubytes, mbytes; 849 size_t ubytes, mbytes;
854 int result; 850 int result;
855 unsigned char __user *buf; 851 unsigned char __user *buf;
856 852
@@ -871,13 +867,10 @@ static int kimage_load_crash_segment(struct kimage *image,
871 } 867 }
872 ptr = kmap(page); 868 ptr = kmap(page);
873 ptr += maddr & ~PAGE_MASK; 869 ptr += maddr & ~PAGE_MASK;
874 mchunk = PAGE_SIZE - (maddr & ~PAGE_MASK); 870 mchunk = min_t(size_t, mbytes,
875 if (mchunk > mbytes) 871 PAGE_SIZE - (maddr & ~PAGE_MASK));
876 mchunk = mbytes; 872 uchunk = min(ubytes, mchunk);
877 873 if (mchunk > uchunk) {
878 uchunk = mchunk;
879 if (uchunk > ubytes) {
880 uchunk = ubytes;
881 /* Zero the trailing part of the page */ 874 /* Zero the trailing part of the page */
882 memset(ptr + uchunk, 0, mchunk - uchunk); 875 memset(ptr + uchunk, 0, mchunk - uchunk);
883 } 876 }
@@ -1540,14 +1533,13 @@ void vmcoreinfo_append_str(const char *fmt, ...)
1540{ 1533{
1541 va_list args; 1534 va_list args;
1542 char buf[0x50]; 1535 char buf[0x50];
1543 int r; 1536 size_t r;
1544 1537
1545 va_start(args, fmt); 1538 va_start(args, fmt);
1546 r = vsnprintf(buf, sizeof(buf), fmt, args); 1539 r = vsnprintf(buf, sizeof(buf), fmt, args);
1547 va_end(args); 1540 va_end(args);
1548 1541
1549 if (r + vmcoreinfo_size > vmcoreinfo_max_size) 1542 r = min(r, vmcoreinfo_max_size - vmcoreinfo_size);
1550 r = vmcoreinfo_max_size - vmcoreinfo_size;
1551 1543
1552 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r); 1544 memcpy(&vmcoreinfo_data[vmcoreinfo_size], buf, r);
1553 1545
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 56dd34976d7b..1296e72e4161 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -77,6 +77,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
77 77
78static int call_modprobe(char *module_name, int wait) 78static int call_modprobe(char *module_name, int wait)
79{ 79{
80 struct subprocess_info *info;
80 static char *envp[] = { 81 static char *envp[] = {
81 "HOME=/", 82 "HOME=/",
82 "TERM=linux", 83 "TERM=linux",
@@ -98,8 +99,15 @@ static int call_modprobe(char *module_name, int wait)
98 argv[3] = module_name; /* check free_modprobe_argv() */ 99 argv[3] = module_name; /* check free_modprobe_argv() */
99 argv[4] = NULL; 100 argv[4] = NULL;
100 101
101 return call_usermodehelper_fns(modprobe_path, argv, envp, 102 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
102 wait | UMH_KILLABLE, NULL, free_modprobe_argv, NULL); 103 NULL, free_modprobe_argv, NULL);
104 if (!info)
105 goto free_module_name;
106
107 return call_usermodehelper_exec(info, wait | UMH_KILLABLE);
108
109free_module_name:
110 kfree(module_name);
103free_argv: 111free_argv:
104 kfree(argv); 112 kfree(argv);
105out: 113out:
@@ -502,14 +510,28 @@ static void helper_unlock(void)
502 * @argv: arg vector for process 510 * @argv: arg vector for process
503 * @envp: environment for process 511 * @envp: environment for process
504 * @gfp_mask: gfp mask for memory allocation 512 * @gfp_mask: gfp mask for memory allocation
513 * @cleanup: a cleanup function
514 * @init: an init function
515 * @data: arbitrary context sensitive data
505 * 516 *
506 * Returns either %NULL on allocation failure, or a subprocess_info 517 * Returns either %NULL on allocation failure, or a subprocess_info
507 * structure. This should be passed to call_usermodehelper_exec to 518 * structure. This should be passed to call_usermodehelper_exec to
508 * exec the process and free the structure. 519 * exec the process and free the structure.
520 *
521 * The init function is used to customize the helper process prior to
522 * exec. A non-zero return code causes the process to error out, exit,
523 * and return the failure to the calling process
524 *
525 * The cleanup function is just before ethe subprocess_info is about to
526 * be freed. This can be used for freeing the argv and envp. The
527 * Function must be runnable in either a process context or the
528 * context in which call_usermodehelper_exec is called.
509 */ 529 */
510static
511struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, 530struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
512 char **envp, gfp_t gfp_mask) 531 char **envp, gfp_t gfp_mask,
532 int (*init)(struct subprocess_info *info, struct cred *new),
533 void (*cleanup)(struct subprocess_info *info),
534 void *data)
513{ 535{
514 struct subprocess_info *sub_info; 536 struct subprocess_info *sub_info;
515 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask); 537 sub_info = kzalloc(sizeof(struct subprocess_info), gfp_mask);
@@ -520,50 +542,27 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
520 sub_info->path = path; 542 sub_info->path = path;
521 sub_info->argv = argv; 543 sub_info->argv = argv;
522 sub_info->envp = envp; 544 sub_info->envp = envp;
545
546 sub_info->cleanup = cleanup;
547 sub_info->init = init;
548 sub_info->data = data;
523 out: 549 out:
524 return sub_info; 550 return sub_info;
525} 551}
526 552EXPORT_SYMBOL(call_usermodehelper_setup);
527/**
528 * call_usermodehelper_setfns - set a cleanup/init function
529 * @info: a subprocess_info returned by call_usermodehelper_setup
530 * @cleanup: a cleanup function
531 * @init: an init function
532 * @data: arbitrary context sensitive data
533 *
534 * The init function is used to customize the helper process prior to
535 * exec. A non-zero return code causes the process to error out, exit,
536 * and return the failure to the calling process
537 *
538 * The cleanup function is just before ethe subprocess_info is about to
539 * be freed. This can be used for freeing the argv and envp. The
540 * Function must be runnable in either a process context or the
541 * context in which call_usermodehelper_exec is called.
542 */
543static
544void call_usermodehelper_setfns(struct subprocess_info *info,
545 int (*init)(struct subprocess_info *info, struct cred *new),
546 void (*cleanup)(struct subprocess_info *info),
547 void *data)
548{
549 info->cleanup = cleanup;
550 info->init = init;
551 info->data = data;
552}
553 553
554/** 554/**
555 * call_usermodehelper_exec - start a usermode application 555 * call_usermodehelper_exec - start a usermode application
556 * @sub_info: information about the subprocessa 556 * @sub_info: information about the subprocessa
557 * @wait: wait for the application to finish and return status. 557 * @wait: wait for the application to finish and return status.
558 * when -1 don't wait at all, but you get no useful error back when 558 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
559 * the program couldn't be exec'ed. This makes it safe to call 559 * when the program couldn't be exec'ed. This makes it safe to call
560 * from interrupt context. 560 * from interrupt context.
561 * 561 *
562 * Runs a user-space application. The application is started 562 * Runs a user-space application. The application is started
563 * asynchronously if wait is not set, and runs as a child of keventd. 563 * asynchronously if wait is not set, and runs as a child of keventd.
564 * (ie. it runs with full root capabilities). 564 * (ie. it runs with full root capabilities).
565 */ 565 */
566static
567int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 566int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
568{ 567{
569 DECLARE_COMPLETION_ONSTACK(done); 568 DECLARE_COMPLETION_ONSTACK(done);
@@ -615,31 +614,34 @@ unlock:
615 helper_unlock(); 614 helper_unlock();
616 return retval; 615 return retval;
617} 616}
617EXPORT_SYMBOL(call_usermodehelper_exec);
618 618
619/* 619/**
620 * call_usermodehelper_fns() will not run the caller-provided cleanup function 620 * call_usermodehelper() - prepare and start a usermode application
621 * if a memory allocation failure is experienced. So the caller might need to 621 * @path: path to usermode executable
622 * check the call_usermodehelper_fns() return value: if it is -ENOMEM, perform 622 * @argv: arg vector for process
623 * the necessaary cleanup within the caller. 623 * @envp: environment for process
624 * @wait: wait for the application to finish and return status.
625 * when UMH_NO_WAIT don't wait at all, but you get no useful error back
626 * when the program couldn't be exec'ed. This makes it safe to call
627 * from interrupt context.
628 *
629 * This function is the equivalent to use call_usermodehelper_setup() and
630 * call_usermodehelper_exec().
624 */ 631 */
625int call_usermodehelper_fns( 632int call_usermodehelper(char *path, char **argv, char **envp, int wait)
626 char *path, char **argv, char **envp, int wait,
627 int (*init)(struct subprocess_info *info, struct cred *new),
628 void (*cleanup)(struct subprocess_info *), void *data)
629{ 633{
630 struct subprocess_info *info; 634 struct subprocess_info *info;
631 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 635 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
632 636
633 info = call_usermodehelper_setup(path, argv, envp, gfp_mask); 637 info = call_usermodehelper_setup(path, argv, envp, gfp_mask,
634 638 NULL, NULL, NULL);
635 if (info == NULL) 639 if (info == NULL)
636 return -ENOMEM; 640 return -ENOMEM;
637 641
638 call_usermodehelper_setfns(info, init, cleanup, data);
639
640 return call_usermodehelper_exec(info, wait); 642 return call_usermodehelper_exec(info, wait);
641} 643}
642EXPORT_SYMBOL(call_usermodehelper_fns); 644EXPORT_SYMBOL(call_usermodehelper);
643 645
644static int proc_cap_handler(struct ctl_table *table, int write, 646static int proc_cap_handler(struct ctl_table *table, int write,
645 void __user *buffer, size_t *lenp, loff_t *ppos) 647 void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 16d8ddd268b1..760e86df8c20 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/freezer.h> 18#include <linux/freezer.h>
19#include <linux/ptrace.h> 19#include <linux/ptrace.h>
20#include <linux/uaccess.h>
20#include <trace/events/sched.h> 21#include <trace/events/sched.h>
21 22
22static DEFINE_SPINLOCK(kthread_create_lock); 23static DEFINE_SPINLOCK(kthread_create_lock);
@@ -135,6 +136,24 @@ void *kthread_data(struct task_struct *task)
135 return to_kthread(task)->data; 136 return to_kthread(task)->data;
136} 137}
137 138
139/**
140 * probe_kthread_data - speculative version of kthread_data()
141 * @task: possible kthread task in question
142 *
143 * @task could be a kthread task. Return the data value specified when it
144 * was created if accessible. If @task isn't a kthread task or its data is
145 * inaccessible for any reason, %NULL is returned. This function requires
146 * that @task itself is safe to dereference.
147 */
148void *probe_kthread_data(struct task_struct *task)
149{
150 struct kthread *kthread = to_kthread(task);
151 void *data = NULL;
152
153 probe_kernel_read(&data, &kthread->data, sizeof(data));
154 return data;
155}
156
138static void __kthread_parkme(struct kthread *self) 157static void __kthread_parkme(struct kthread *self)
139{ 158{
140 __set_current_state(TASK_PARKED); 159 __set_current_state(TASK_PARKED);
diff --git a/kernel/panic.c b/kernel/panic.c
index 7c57cc9eee2c..167ec097ce8b 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -22,7 +22,6 @@
22#include <linux/sysrq.h> 22#include <linux/sysrq.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/nmi.h> 24#include <linux/nmi.h>
25#include <linux/dmi.h>
26 25
27#define PANIC_TIMER_STEP 100 26#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18 27#define PANIC_BLINK_SPD 18
@@ -400,13 +399,8 @@ struct slowpath_args {
400static void warn_slowpath_common(const char *file, int line, void *caller, 399static void warn_slowpath_common(const char *file, int line, void *caller,
401 unsigned taint, struct slowpath_args *args) 400 unsigned taint, struct slowpath_args *args)
402{ 401{
403 const char *board;
404
405 printk(KERN_WARNING "------------[ cut here ]------------\n"); 402 printk(KERN_WARNING "------------[ cut here ]------------\n");
406 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller); 403 printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
407 board = dmi_get_system_info(DMI_PRODUCT_NAME);
408 if (board)
409 printk(KERN_WARNING "Hardware name: %s\n", board);
410 404
411 if (args) 405 if (args)
412 vprintk(args->fmt, args->args); 406 vprintk(args->fmt, args->args);
diff --git a/kernel/pid.c b/kernel/pid.c
index 047dc6264638..6283d6412aff 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -51,9 +51,6 @@ int pid_max = PID_MAX_DEFAULT;
51int pid_max_min = RESERVED_PIDS + 1; 51int pid_max_min = RESERVED_PIDS + 1;
52int pid_max_max = PID_MAX_LIMIT; 52int pid_max_max = PID_MAX_LIMIT;
53 53
54#define BITS_PER_PAGE (PAGE_SIZE*8)
55#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1)
56
57static inline int mk_pid(struct pid_namespace *pid_ns, 54static inline int mk_pid(struct pid_namespace *pid_ns,
58 struct pidmap *map, int off) 55 struct pidmap *map, int off)
59{ 56{
@@ -183,15 +180,19 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
183 break; 180 break;
184 } 181 }
185 if (likely(atomic_read(&map->nr_free))) { 182 if (likely(atomic_read(&map->nr_free))) {
186 do { 183 for ( ; ; ) {
187 if (!test_and_set_bit(offset, map->page)) { 184 if (!test_and_set_bit(offset, map->page)) {
188 atomic_dec(&map->nr_free); 185 atomic_dec(&map->nr_free);
189 set_last_pid(pid_ns, last, pid); 186 set_last_pid(pid_ns, last, pid);
190 return pid; 187 return pid;
191 } 188 }
192 offset = find_next_offset(map, offset); 189 offset = find_next_offset(map, offset);
190 if (offset >= BITS_PER_PAGE)
191 break;
193 pid = mk_pid(pid_ns, map, offset); 192 pid = mk_pid(pid_ns, map, offset);
194 } while (offset < BITS_PER_PAGE && pid < pid_max); 193 if (pid >= pid_max)
194 break;
195 }
195 } 196 }
196 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { 197 if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) {
197 ++map; 198 ++map;
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index bea15bdf82b0..69473c4a653f 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -19,8 +19,6 @@
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/export.h> 20#include <linux/export.h>
21 21
22#define BITS_PER_PAGE (PAGE_SIZE*8)
23
24struct pid_cache { 22struct pid_cache {
25 int nr_ids; 23 int nr_ids;
26 char name[16]; 24 char name[16];
diff --git a/kernel/power/poweroff.c b/kernel/power/poweroff.c
index 68197a4e8fc9..7ef6866b521d 100644
--- a/kernel/power/poweroff.c
+++ b/kernel/power/poweroff.c
@@ -32,7 +32,7 @@ static void handle_poweroff(int key)
32 32
33static struct sysrq_key_op sysrq_poweroff_op = { 33static struct sysrq_key_op sysrq_poweroff_op = {
34 .handler = handle_poweroff, 34 .handler = handle_poweroff,
35 .help_msg = "powerOff", 35 .help_msg = "poweroff(o)",
36 .action_msg = "Power Off", 36 .action_msg = "Power Off",
37 .enable_mask = SYSRQ_ENABLE_BOOT, 37 .enable_mask = SYSRQ_ENABLE_BOOT,
38}; 38};
diff --git a/kernel/printk.c b/kernel/printk.c
index 376914e2869d..96dcfcd9a2d4 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -43,6 +43,7 @@
43#include <linux/rculist.h> 43#include <linux/rculist.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/irq_work.h> 45#include <linux/irq_work.h>
46#include <linux/utsname.h>
46 47
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
@@ -2849,4 +2850,65 @@ void kmsg_dump_rewind(struct kmsg_dumper *dumper)
2849 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2850 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2850} 2851}
2851EXPORT_SYMBOL_GPL(kmsg_dump_rewind); 2852EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
2853
2854static char dump_stack_arch_desc_str[128];
2855
2856/**
2857 * dump_stack_set_arch_desc - set arch-specific str to show with task dumps
2858 * @fmt: printf-style format string
2859 * @...: arguments for the format string
2860 *
2861 * The configured string will be printed right after utsname during task
2862 * dumps. Usually used to add arch-specific system identifiers. If an
2863 * arch wants to make use of such an ID string, it should initialize this
2864 * as soon as possible during boot.
2865 */
2866void __init dump_stack_set_arch_desc(const char *fmt, ...)
2867{
2868 va_list args;
2869
2870 va_start(args, fmt);
2871 vsnprintf(dump_stack_arch_desc_str, sizeof(dump_stack_arch_desc_str),
2872 fmt, args);
2873 va_end(args);
2874}
2875
2876/**
2877 * dump_stack_print_info - print generic debug info for dump_stack()
2878 * @log_lvl: log level
2879 *
2880 * Arch-specific dump_stack() implementations can use this function to
2881 * print out the same debug information as the generic dump_stack().
2882 */
2883void dump_stack_print_info(const char *log_lvl)
2884{
2885 printk("%sCPU: %d PID: %d Comm: %.20s %s %s %.*s\n",
2886 log_lvl, raw_smp_processor_id(), current->pid, current->comm,
2887 print_tainted(), init_utsname()->release,
2888 (int)strcspn(init_utsname()->version, " "),
2889 init_utsname()->version);
2890
2891 if (dump_stack_arch_desc_str[0] != '\0')
2892 printk("%sHardware name: %s\n",
2893 log_lvl, dump_stack_arch_desc_str);
2894
2895 print_worker_info(log_lvl, current);
2896}
2897
2898/**
2899 * show_regs_print_info - print generic debug info for show_regs()
2900 * @log_lvl: log level
2901 *
2902 * show_regs() implementations can use this function to print out generic
2903 * debug information.
2904 */
2905void show_regs_print_info(const char *log_lvl)
2906{
2907 dump_stack_print_info(log_lvl);
2908
2909 printk("%stask: %p ti: %p task.ti: %p\n",
2910 log_lvl, current, current_thread_info(),
2911 task_thread_info(current));
2912}
2913
2852#endif 2914#endif
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index acbd28424d81..17ae54da0ec2 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -24,6 +24,7 @@
24#include <linux/regset.h> 24#include <linux/regset.h>
25#include <linux/hw_breakpoint.h> 25#include <linux/hw_breakpoint.h>
26#include <linux/cn_proc.h> 26#include <linux/cn_proc.h>
27#include <linux/compat.h>
27 28
28 29
29static int ptrace_trapping_sleep_fn(void *flags) 30static int ptrace_trapping_sleep_fn(void *flags)
@@ -618,6 +619,81 @@ static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
618 return error; 619 return error;
619} 620}
620 621
622static int ptrace_peek_siginfo(struct task_struct *child,
623 unsigned long addr,
624 unsigned long data)
625{
626 struct ptrace_peeksiginfo_args arg;
627 struct sigpending *pending;
628 struct sigqueue *q;
629 int ret, i;
630
631 ret = copy_from_user(&arg, (void __user *) addr,
632 sizeof(struct ptrace_peeksiginfo_args));
633 if (ret)
634 return -EFAULT;
635
636 if (arg.flags & ~PTRACE_PEEKSIGINFO_SHARED)
637 return -EINVAL; /* unknown flags */
638
639 if (arg.nr < 0)
640 return -EINVAL;
641
642 if (arg.flags & PTRACE_PEEKSIGINFO_SHARED)
643 pending = &child->signal->shared_pending;
644 else
645 pending = &child->pending;
646
647 for (i = 0; i < arg.nr; ) {
648 siginfo_t info;
649 s32 off = arg.off + i;
650
651 spin_lock_irq(&child->sighand->siglock);
652 list_for_each_entry(q, &pending->list, list) {
653 if (!off--) {
654 copy_siginfo(&info, &q->info);
655 break;
656 }
657 }
658 spin_unlock_irq(&child->sighand->siglock);
659
660 if (off >= 0) /* beyond the end of the list */
661 break;
662
663#ifdef CONFIG_COMPAT
664 if (unlikely(is_compat_task())) {
665 compat_siginfo_t __user *uinfo = compat_ptr(data);
666
667 ret = copy_siginfo_to_user32(uinfo, &info);
668 ret |= __put_user(info.si_code, &uinfo->si_code);
669 } else
670#endif
671 {
672 siginfo_t __user *uinfo = (siginfo_t __user *) data;
673
674 ret = copy_siginfo_to_user(uinfo, &info);
675 ret |= __put_user(info.si_code, &uinfo->si_code);
676 }
677
678 if (ret) {
679 ret = -EFAULT;
680 break;
681 }
682
683 data += sizeof(siginfo_t);
684 i++;
685
686 if (signal_pending(current))
687 break;
688
689 cond_resched();
690 }
691
692 if (i > 0)
693 return i;
694
695 return ret;
696}
621 697
622#ifdef PTRACE_SINGLESTEP 698#ifdef PTRACE_SINGLESTEP
623#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP) 699#define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
@@ -748,6 +824,10 @@ int ptrace_request(struct task_struct *child, long request,
748 ret = put_user(child->ptrace_message, datalp); 824 ret = put_user(child->ptrace_message, datalp);
749 break; 825 break;
750 826
827 case PTRACE_PEEKSIGINFO:
828 ret = ptrace_peek_siginfo(child, addr, data);
829 break;
830
751 case PTRACE_GETSIGINFO: 831 case PTRACE_GETSIGINFO:
752 ret = ptrace_getsiginfo(child, &siginfo); 832 ret = ptrace_getsiginfo(child, &siginfo);
753 if (!ret) 833 if (!ret)
diff --git a/kernel/range.c b/kernel/range.c
index 9b8ae2d6ed68..071b0ab455cb 100644
--- a/kernel/range.c
+++ b/kernel/range.c
@@ -97,7 +97,8 @@ void subtract_range(struct range *range, int az, u64 start, u64 end)
97 range[i].end = range[j].end; 97 range[i].end = range[j].end;
98 range[i].start = end; 98 range[i].start = end;
99 } else { 99 } else {
100 printk(KERN_ERR "run of slot in ranges\n"); 100 pr_err("%s: run out of slot in ranges\n",
101 __func__);
101 } 102 }
102 range[j].end = start; 103 range[j].end = start;
103 continue; 104 continue;
diff --git a/kernel/relay.c b/kernel/relay.c
index 01ab081ac53a..eef0d113b79e 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -588,7 +588,7 @@ struct rchan *relay_open(const char *base_filename,
588 chan->version = RELAYFS_CHANNEL_VERSION; 588 chan->version = RELAYFS_CHANNEL_VERSION;
589 chan->n_subbufs = n_subbufs; 589 chan->n_subbufs = n_subbufs;
590 chan->subbuf_size = subbuf_size; 590 chan->subbuf_size = subbuf_size;
591 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 591 chan->alloc_size = PAGE_ALIGN(subbuf_size * n_subbufs);
592 chan->parent = parent; 592 chan->parent = parent;
593 chan->private_data = private_data; 593 chan->private_data = private_data;
594 if (base_filename) { 594 if (base_filename) {
@@ -1099,8 +1099,7 @@ static size_t relay_file_read_end_pos(struct rchan_buf *buf,
1099static int subbuf_read_actor(size_t read_start, 1099static int subbuf_read_actor(size_t read_start,
1100 struct rchan_buf *buf, 1100 struct rchan_buf *buf,
1101 size_t avail, 1101 size_t avail,
1102 read_descriptor_t *desc, 1102 read_descriptor_t *desc)
1103 read_actor_t actor)
1104{ 1103{
1105 void *from; 1104 void *from;
1106 int ret = 0; 1105 int ret = 0;
@@ -1121,15 +1120,13 @@ static int subbuf_read_actor(size_t read_start,
1121typedef int (*subbuf_actor_t) (size_t read_start, 1120typedef int (*subbuf_actor_t) (size_t read_start,
1122 struct rchan_buf *buf, 1121 struct rchan_buf *buf,
1123 size_t avail, 1122 size_t avail,
1124 read_descriptor_t *desc, 1123 read_descriptor_t *desc);
1125 read_actor_t actor);
1126 1124
1127/* 1125/*
1128 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries 1126 * relay_file_read_subbufs - read count bytes, bridging subbuf boundaries
1129 */ 1127 */
1130static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos, 1128static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
1131 subbuf_actor_t subbuf_actor, 1129 subbuf_actor_t subbuf_actor,
1132 read_actor_t actor,
1133 read_descriptor_t *desc) 1130 read_descriptor_t *desc)
1134{ 1131{
1135 struct rchan_buf *buf = filp->private_data; 1132 struct rchan_buf *buf = filp->private_data;
@@ -1150,7 +1147,7 @@ static ssize_t relay_file_read_subbufs(struct file *filp, loff_t *ppos,
1150 break; 1147 break;
1151 1148
1152 avail = min(desc->count, avail); 1149 avail = min(desc->count, avail);
1153 ret = subbuf_actor(read_start, buf, avail, desc, actor); 1150 ret = subbuf_actor(read_start, buf, avail, desc);
1154 if (desc->error < 0) 1151 if (desc->error < 0)
1155 break; 1152 break;
1156 1153
@@ -1174,8 +1171,7 @@ static ssize_t relay_file_read(struct file *filp,
1174 desc.count = count; 1171 desc.count = count;
1175 desc.arg.buf = buffer; 1172 desc.arg.buf = buffer;
1176 desc.error = 0; 1173 desc.error = 0;
1177 return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, 1174 return relay_file_read_subbufs(filp, ppos, subbuf_read_actor, &desc);
1178 NULL, &desc);
1179} 1175}
1180 1176
1181static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed) 1177static void relay_consume_bytes(struct rchan_buf *rbuf, int bytes_consumed)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index c70a8814a767..5662f58f0b69 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4586,6 +4586,7 @@ void sched_show_task(struct task_struct *p)
4586 task_pid_nr(p), ppid, 4586 task_pid_nr(p), ppid,
4587 (unsigned long)task_thread_info(p)->flags); 4587 (unsigned long)task_thread_info(p)->flags);
4588 4588
4589 print_worker_info(KERN_INFO, p);
4589 show_stack(p, NULL); 4590 show_stack(p, NULL);
4590} 4591}
4591 4592
diff --git a/kernel/semaphore.c b/kernel/semaphore.c
index 4567fc020fe3..6815171a4fff 100644
--- a/kernel/semaphore.c
+++ b/kernel/semaphore.c
@@ -193,7 +193,7 @@ EXPORT_SYMBOL(up);
193struct semaphore_waiter { 193struct semaphore_waiter {
194 struct list_head list; 194 struct list_head list;
195 struct task_struct *task; 195 struct task_struct *task;
196 int up; 196 bool up;
197}; 197};
198 198
199/* 199/*
@@ -209,12 +209,12 @@ static inline int __sched __down_common(struct semaphore *sem, long state,
209 209
210 list_add_tail(&waiter.list, &sem->wait_list); 210 list_add_tail(&waiter.list, &sem->wait_list);
211 waiter.task = task; 211 waiter.task = task;
212 waiter.up = 0; 212 waiter.up = false;
213 213
214 for (;;) { 214 for (;;) {
215 if (signal_pending_state(state, task)) 215 if (signal_pending_state(state, task))
216 goto interrupted; 216 goto interrupted;
217 if (timeout <= 0) 217 if (unlikely(timeout <= 0))
218 goto timed_out; 218 goto timed_out;
219 __set_task_state(task, state); 219 __set_task_state(task, state);
220 raw_spin_unlock_irq(&sem->lock); 220 raw_spin_unlock_irq(&sem->lock);
@@ -258,6 +258,6 @@ static noinline void __sched __up(struct semaphore *sem)
258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list, 258 struct semaphore_waiter *waiter = list_first_entry(&sem->wait_list,
259 struct semaphore_waiter, list); 259 struct semaphore_waiter, list);
260 list_del(&waiter->list); 260 list_del(&waiter->list);
261 waiter->up = 1; 261 waiter->up = true;
262 wake_up_process(waiter->task); 262 wake_up_process(waiter->task);
263} 263}
diff --git a/kernel/signal.c b/kernel/signal.c
index 598dc06be421..cede58910f9c 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -854,12 +854,14 @@ static void ptrace_trap_notify(struct task_struct *t)
854 * Returns true if the signal should be actually delivered, otherwise 854 * Returns true if the signal should be actually delivered, otherwise
855 * it should be dropped. 855 * it should be dropped.
856 */ 856 */
857static int prepare_signal(int sig, struct task_struct *p, bool force) 857static bool prepare_signal(int sig, struct task_struct *p, bool force)
858{ 858{
859 struct signal_struct *signal = p->signal; 859 struct signal_struct *signal = p->signal;
860 struct task_struct *t; 860 struct task_struct *t;
861 861
862 if (unlikely(signal->flags & SIGNAL_GROUP_EXIT)) { 862 if (signal->flags & (SIGNAL_GROUP_EXIT | SIGNAL_GROUP_COREDUMP)) {
863 if (signal->flags & SIGNAL_GROUP_COREDUMP)
864 return sig == SIGKILL;
863 /* 865 /*
864 * The process is in the middle of dying, nothing to do. 866 * The process is in the middle of dying, nothing to do.
865 */ 867 */
@@ -1160,8 +1162,7 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
1160static void print_fatal_signal(int signr) 1162static void print_fatal_signal(int signr)
1161{ 1163{
1162 struct pt_regs *regs = signal_pt_regs(); 1164 struct pt_regs *regs = signal_pt_regs();
1163 printk(KERN_INFO "%s/%d: potentially unexpected fatal signal %d.\n", 1165 printk(KERN_INFO "potentially unexpected fatal signal %d.\n", signr);
1164 current->comm, task_pid_nr(current), signr);
1165 1166
1166#if defined(__i386__) && !defined(__arch_um__) 1167#if defined(__i386__) && !defined(__arch_um__)
1167 printk(KERN_INFO "code at %08lx: ", regs->ip); 1168 printk(KERN_INFO "code at %08lx: ", regs->ip);
diff --git a/kernel/smp.c b/kernel/smp.c
index 8e451f3ff51b..4dba0f7b72ad 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -100,16 +100,16 @@ void __init call_function_init(void)
100 * previous function call. For multi-cpu calls its even more interesting 100 * previous function call. For multi-cpu calls its even more interesting
101 * as we'll have to ensure no other cpu is observing our csd. 101 * as we'll have to ensure no other cpu is observing our csd.
102 */ 102 */
103static void csd_lock_wait(struct call_single_data *data) 103static void csd_lock_wait(struct call_single_data *csd)
104{ 104{
105 while (data->flags & CSD_FLAG_LOCK) 105 while (csd->flags & CSD_FLAG_LOCK)
106 cpu_relax(); 106 cpu_relax();
107} 107}
108 108
109static void csd_lock(struct call_single_data *data) 109static void csd_lock(struct call_single_data *csd)
110{ 110{
111 csd_lock_wait(data); 111 csd_lock_wait(csd);
112 data->flags = CSD_FLAG_LOCK; 112 csd->flags |= CSD_FLAG_LOCK;
113 113
114 /* 114 /*
115 * prevent CPU from reordering the above assignment 115 * prevent CPU from reordering the above assignment
@@ -119,16 +119,16 @@ static void csd_lock(struct call_single_data *data)
119 smp_mb(); 119 smp_mb();
120} 120}
121 121
122static void csd_unlock(struct call_single_data *data) 122static void csd_unlock(struct call_single_data *csd)
123{ 123{
124 WARN_ON(!(data->flags & CSD_FLAG_LOCK)); 124 WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
125 125
126 /* 126 /*
127 * ensure we're all done before releasing data: 127 * ensure we're all done before releasing data:
128 */ 128 */
129 smp_mb(); 129 smp_mb();
130 130
131 data->flags &= ~CSD_FLAG_LOCK; 131 csd->flags &= ~CSD_FLAG_LOCK;
132} 132}
133 133
134/* 134/*
@@ -137,7 +137,7 @@ static void csd_unlock(struct call_single_data *data)
137 * ->func, ->info, and ->flags set. 137 * ->func, ->info, and ->flags set.
138 */ 138 */
139static 139static
140void generic_exec_single(int cpu, struct call_single_data *data, int wait) 140void generic_exec_single(int cpu, struct call_single_data *csd, int wait)
141{ 141{
142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu); 142 struct call_single_queue *dst = &per_cpu(call_single_queue, cpu);
143 unsigned long flags; 143 unsigned long flags;
@@ -145,7 +145,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
145 145
146 raw_spin_lock_irqsave(&dst->lock, flags); 146 raw_spin_lock_irqsave(&dst->lock, flags);
147 ipi = list_empty(&dst->list); 147 ipi = list_empty(&dst->list);
148 list_add_tail(&data->list, &dst->list); 148 list_add_tail(&csd->list, &dst->list);
149 raw_spin_unlock_irqrestore(&dst->lock, flags); 149 raw_spin_unlock_irqrestore(&dst->lock, flags);
150 150
151 /* 151 /*
@@ -163,7 +163,7 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
163 arch_send_call_function_single_ipi(cpu); 163 arch_send_call_function_single_ipi(cpu);
164 164
165 if (wait) 165 if (wait)
166 csd_lock_wait(data); 166 csd_lock_wait(csd);
167} 167}
168 168
169/* 169/*
@@ -173,7 +173,6 @@ void generic_exec_single(int cpu, struct call_single_data *data, int wait)
173void generic_smp_call_function_single_interrupt(void) 173void generic_smp_call_function_single_interrupt(void)
174{ 174{
175 struct call_single_queue *q = &__get_cpu_var(call_single_queue); 175 struct call_single_queue *q = &__get_cpu_var(call_single_queue);
176 unsigned int data_flags;
177 LIST_HEAD(list); 176 LIST_HEAD(list);
178 177
179 /* 178 /*
@@ -186,25 +185,26 @@ void generic_smp_call_function_single_interrupt(void)
186 raw_spin_unlock(&q->lock); 185 raw_spin_unlock(&q->lock);
187 186
188 while (!list_empty(&list)) { 187 while (!list_empty(&list)) {
189 struct call_single_data *data; 188 struct call_single_data *csd;
189 unsigned int csd_flags;
190 190
191 data = list_entry(list.next, struct call_single_data, list); 191 csd = list_entry(list.next, struct call_single_data, list);
192 list_del(&data->list); 192 list_del(&csd->list);
193 193
194 /* 194 /*
195 * 'data' can be invalid after this call if flags == 0 195 * 'csd' can be invalid after this call if flags == 0
196 * (when called through generic_exec_single()), 196 * (when called through generic_exec_single()),
197 * so save them away before making the call: 197 * so save them away before making the call:
198 */ 198 */
199 data_flags = data->flags; 199 csd_flags = csd->flags;
200 200
201 data->func(data->info); 201 csd->func(csd->info);
202 202
203 /* 203 /*
204 * Unlocked CSDs are valid through generic_exec_single(): 204 * Unlocked CSDs are valid through generic_exec_single():
205 */ 205 */
206 if (data_flags & CSD_FLAG_LOCK) 206 if (csd_flags & CSD_FLAG_LOCK)
207 csd_unlock(data); 207 csd_unlock(csd);
208 } 208 }
209} 209}
210 210
@@ -249,16 +249,16 @@ int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
249 local_irq_restore(flags); 249 local_irq_restore(flags);
250 } else { 250 } else {
251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) { 251 if ((unsigned)cpu < nr_cpu_ids && cpu_online(cpu)) {
252 struct call_single_data *data = &d; 252 struct call_single_data *csd = &d;
253 253
254 if (!wait) 254 if (!wait)
255 data = &__get_cpu_var(csd_data); 255 csd = &__get_cpu_var(csd_data);
256 256
257 csd_lock(data); 257 csd_lock(csd);
258 258
259 data->func = func; 259 csd->func = func;
260 data->info = info; 260 csd->info = info;
261 generic_exec_single(cpu, data, wait); 261 generic_exec_single(cpu, csd, wait);
262 } else { 262 } else {
263 err = -ENXIO; /* CPU not online */ 263 err = -ENXIO; /* CPU not online */
264 } 264 }
@@ -325,7 +325,7 @@ EXPORT_SYMBOL_GPL(smp_call_function_any);
325 * pre-allocated data structure. Useful for embedding @data inside 325 * pre-allocated data structure. Useful for embedding @data inside
326 * other structures, for instance. 326 * other structures, for instance.
327 */ 327 */
328void __smp_call_function_single(int cpu, struct call_single_data *data, 328void __smp_call_function_single(int cpu, struct call_single_data *csd,
329 int wait) 329 int wait)
330{ 330{
331 unsigned int this_cpu; 331 unsigned int this_cpu;
@@ -343,11 +343,11 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
343 343
344 if (cpu == this_cpu) { 344 if (cpu == this_cpu) {
345 local_irq_save(flags); 345 local_irq_save(flags);
346 data->func(data->info); 346 csd->func(csd->info);
347 local_irq_restore(flags); 347 local_irq_restore(flags);
348 } else { 348 } else {
349 csd_lock(data); 349 csd_lock(csd);
350 generic_exec_single(cpu, data, wait); 350 generic_exec_single(cpu, csd, wait);
351 } 351 }
352 put_cpu(); 352 put_cpu();
353} 353}
@@ -369,7 +369,7 @@ void __smp_call_function_single(int cpu, struct call_single_data *data,
369void smp_call_function_many(const struct cpumask *mask, 369void smp_call_function_many(const struct cpumask *mask,
370 smp_call_func_t func, void *info, bool wait) 370 smp_call_func_t func, void *info, bool wait)
371{ 371{
372 struct call_function_data *data; 372 struct call_function_data *cfd;
373 int cpu, next_cpu, this_cpu = smp_processor_id(); 373 int cpu, next_cpu, this_cpu = smp_processor_id();
374 374
375 /* 375 /*
@@ -401,24 +401,24 @@ void smp_call_function_many(const struct cpumask *mask,
401 return; 401 return;
402 } 402 }
403 403
404 data = &__get_cpu_var(cfd_data); 404 cfd = &__get_cpu_var(cfd_data);
405 405
406 cpumask_and(data->cpumask, mask, cpu_online_mask); 406 cpumask_and(cfd->cpumask, mask, cpu_online_mask);
407 cpumask_clear_cpu(this_cpu, data->cpumask); 407 cpumask_clear_cpu(this_cpu, cfd->cpumask);
408 408
409 /* Some callers race with other cpus changing the passed mask */ 409 /* Some callers race with other cpus changing the passed mask */
410 if (unlikely(!cpumask_weight(data->cpumask))) 410 if (unlikely(!cpumask_weight(cfd->cpumask)))
411 return; 411 return;
412 412
413 /* 413 /*
414 * After we put an entry into the list, data->cpumask 414 * After we put an entry into the list, cfd->cpumask may be cleared
415 * may be cleared again when another CPU sends another IPI for 415 * again when another CPU sends another IPI for a SMP function call, so
416 * a SMP function call, so data->cpumask will be zero. 416 * cfd->cpumask will be zero.
417 */ 417 */
418 cpumask_copy(data->cpumask_ipi, data->cpumask); 418 cpumask_copy(cfd->cpumask_ipi, cfd->cpumask);
419 419
420 for_each_cpu(cpu, data->cpumask) { 420 for_each_cpu(cpu, cfd->cpumask) {
421 struct call_single_data *csd = per_cpu_ptr(data->csd, cpu); 421 struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
422 struct call_single_queue *dst = 422 struct call_single_queue *dst =
423 &per_cpu(call_single_queue, cpu); 423 &per_cpu(call_single_queue, cpu);
424 unsigned long flags; 424 unsigned long flags;
@@ -433,12 +433,13 @@ void smp_call_function_many(const struct cpumask *mask,
433 } 433 }
434 434
435 /* Send a message to all CPUs in the map */ 435 /* Send a message to all CPUs in the map */
436 arch_send_call_function_ipi_mask(data->cpumask_ipi); 436 arch_send_call_function_ipi_mask(cfd->cpumask_ipi);
437 437
438 if (wait) { 438 if (wait) {
439 for_each_cpu(cpu, data->cpumask) { 439 for_each_cpu(cpu, cfd->cpumask) {
440 struct call_single_data *csd = 440 struct call_single_data *csd;
441 per_cpu_ptr(data->csd, cpu); 441
442 csd = per_cpu_ptr(cfd->csd, cpu);
442 csd_lock_wait(csd); 443 csd_lock_wait(csd);
443 } 444 }
444 } 445 }
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 14d7758074aa..aa82723c7202 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -620,8 +620,7 @@ static void remote_softirq_receive(void *data)
620 unsigned long flags; 620 unsigned long flags;
621 int softirq; 621 int softirq;
622 622
623 softirq = cp->priv; 623 softirq = *(int *)cp->info;
624
625 local_irq_save(flags); 624 local_irq_save(flags);
626 __local_trigger(cp, softirq); 625 __local_trigger(cp, softirq);
627 local_irq_restore(flags); 626 local_irq_restore(flags);
@@ -631,9 +630,8 @@ static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softir
631{ 630{
632 if (cpu_online(cpu)) { 631 if (cpu_online(cpu)) {
633 cp->func = remote_softirq_receive; 632 cp->func = remote_softirq_receive;
634 cp->info = cp; 633 cp->info = &softirq;
635 cp->flags = 0; 634 cp->flags = 0;
636 cp->priv = softirq;
637 635
638 __smp_call_function_single(cpu, cp, 0); 636 __smp_call_function_single(cpu, cp, 0);
639 return 0; 637 return 0;
diff --git a/kernel/sys.c b/kernel/sys.c
index 0da73cf73e60..afd0f7e125c9 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -49,6 +49,11 @@
49#include <linux/user_namespace.h> 49#include <linux/user_namespace.h>
50#include <linux/binfmts.h> 50#include <linux/binfmts.h>
51 51
52#include <linux/sched.h>
53#include <linux/rcupdate.h>
54#include <linux/uidgid.h>
55#include <linux/cred.h>
56
52#include <linux/kmsg_dump.h> 57#include <linux/kmsg_dump.h>
53/* Move somewhere else to avoid recompiling? */ 58/* Move somewhere else to avoid recompiling? */
54#include <generated/utsrelease.h> 59#include <generated/utsrelease.h>
@@ -1044,6 +1049,67 @@ change_okay:
1044 return old_fsgid; 1049 return old_fsgid;
1045} 1050}
1046 1051
1052/**
1053 * sys_getpid - return the thread group id of the current process
1054 *
1055 * Note, despite the name, this returns the tgid not the pid. The tgid and
1056 * the pid are identical unless CLONE_THREAD was specified on clone() in
1057 * which case the tgid is the same in all threads of the same group.
1058 *
1059 * This is SMP safe as current->tgid does not change.
1060 */
1061SYSCALL_DEFINE0(getpid)
1062{
1063 return task_tgid_vnr(current);
1064}
1065
1066/* Thread ID - the internal kernel "pid" */
1067SYSCALL_DEFINE0(gettid)
1068{
1069 return task_pid_vnr(current);
1070}
1071
1072/*
1073 * Accessing ->real_parent is not SMP-safe, it could
1074 * change from under us. However, we can use a stale
1075 * value of ->real_parent under rcu_read_lock(), see
1076 * release_task()->call_rcu(delayed_put_task_struct).
1077 */
1078SYSCALL_DEFINE0(getppid)
1079{
1080 int pid;
1081
1082 rcu_read_lock();
1083 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1084 rcu_read_unlock();
1085
1086 return pid;
1087}
1088
1089SYSCALL_DEFINE0(getuid)
1090{
1091 /* Only we change this so SMP safe */
1092 return from_kuid_munged(current_user_ns(), current_uid());
1093}
1094
1095SYSCALL_DEFINE0(geteuid)
1096{
1097 /* Only we change this so SMP safe */
1098 return from_kuid_munged(current_user_ns(), current_euid());
1099}
1100
1101SYSCALL_DEFINE0(getgid)
1102{
1103 /* Only we change this so SMP safe */
1104 return from_kgid_munged(current_user_ns(), current_gid());
1105}
1106
1107SYSCALL_DEFINE0(getegid)
1108{
1109 /* Only we change this so SMP safe */
1110 return from_kgid_munged(current_user_ns(), current_egid());
1111}
1112
1047void do_sys_times(struct tms *tms) 1113void do_sys_times(struct tms *tms)
1048{ 1114{
1049 cputime_t tgutime, tgstime, cutime, cstime; 1115 cputime_t tgutime, tgstime, cutime, cstime;
@@ -1791,7 +1857,6 @@ SYSCALL_DEFINE1(umask, int, mask)
1791 return mask; 1857 return mask;
1792} 1858}
1793 1859
1794#ifdef CONFIG_CHECKPOINT_RESTORE
1795static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd) 1860static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1796{ 1861{
1797 struct fd exe; 1862 struct fd exe;
@@ -1985,17 +2050,12 @@ out:
1985 return error; 2050 return error;
1986} 2051}
1987 2052
2053#ifdef CONFIG_CHECKPOINT_RESTORE
1988static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2054static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1989{ 2055{
1990 return put_user(me->clear_child_tid, tid_addr); 2056 return put_user(me->clear_child_tid, tid_addr);
1991} 2057}
1992 2058#else
1993#else /* CONFIG_CHECKPOINT_RESTORE */
1994static int prctl_set_mm(int opt, unsigned long addr,
1995 unsigned long arg4, unsigned long arg5)
1996{
1997 return -EINVAL;
1998}
1999static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr) 2059static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
2000{ 2060{
2001 return -EINVAL; 2061 return -EINVAL;
@@ -2245,3 +2305,148 @@ int orderly_poweroff(bool force)
2245 return 0; 2305 return 0;
2246} 2306}
2247EXPORT_SYMBOL_GPL(orderly_poweroff); 2307EXPORT_SYMBOL_GPL(orderly_poweroff);
2308
2309/**
2310 * do_sysinfo - fill in sysinfo struct
2311 * @info: pointer to buffer to fill
2312 */
2313static int do_sysinfo(struct sysinfo *info)
2314{
2315 unsigned long mem_total, sav_total;
2316 unsigned int mem_unit, bitcount;
2317 struct timespec tp;
2318
2319 memset(info, 0, sizeof(struct sysinfo));
2320
2321 ktime_get_ts(&tp);
2322 monotonic_to_bootbased(&tp);
2323 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
2324
2325 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
2326
2327 info->procs = nr_threads;
2328
2329 si_meminfo(info);
2330 si_swapinfo(info);
2331
2332 /*
2333 * If the sum of all the available memory (i.e. ram + swap)
2334 * is less than can be stored in a 32 bit unsigned long then
2335 * we can be binary compatible with 2.2.x kernels. If not,
2336 * well, in that case 2.2.x was broken anyways...
2337 *
2338 * -Erik Andersen <andersee@debian.org>
2339 */
2340
2341 mem_total = info->totalram + info->totalswap;
2342 if (mem_total < info->totalram || mem_total < info->totalswap)
2343 goto out;
2344 bitcount = 0;
2345 mem_unit = info->mem_unit;
2346 while (mem_unit > 1) {
2347 bitcount++;
2348 mem_unit >>= 1;
2349 sav_total = mem_total;
2350 mem_total <<= 1;
2351 if (mem_total < sav_total)
2352 goto out;
2353 }
2354
2355 /*
2356 * If mem_total did not overflow, multiply all memory values by
2357 * info->mem_unit and set it to 1. This leaves things compatible
2358 * with 2.2.x, and also retains compatibility with earlier 2.4.x
2359 * kernels...
2360 */
2361
2362 info->mem_unit = 1;
2363 info->totalram <<= bitcount;
2364 info->freeram <<= bitcount;
2365 info->sharedram <<= bitcount;
2366 info->bufferram <<= bitcount;
2367 info->totalswap <<= bitcount;
2368 info->freeswap <<= bitcount;
2369 info->totalhigh <<= bitcount;
2370 info->freehigh <<= bitcount;
2371
2372out:
2373 return 0;
2374}
2375
2376SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
2377{
2378 struct sysinfo val;
2379
2380 do_sysinfo(&val);
2381
2382 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
2383 return -EFAULT;
2384
2385 return 0;
2386}
2387
2388#ifdef CONFIG_COMPAT
2389struct compat_sysinfo {
2390 s32 uptime;
2391 u32 loads[3];
2392 u32 totalram;
2393 u32 freeram;
2394 u32 sharedram;
2395 u32 bufferram;
2396 u32 totalswap;
2397 u32 freeswap;
2398 u16 procs;
2399 u16 pad;
2400 u32 totalhigh;
2401 u32 freehigh;
2402 u32 mem_unit;
2403 char _f[20-2*sizeof(u32)-sizeof(int)];
2404};
2405
2406COMPAT_SYSCALL_DEFINE1(sysinfo, struct compat_sysinfo __user *, info)
2407{
2408 struct sysinfo s;
2409
2410 do_sysinfo(&s);
2411
2412 /* Check to see if any memory value is too large for 32-bit and scale
2413 * down if needed
2414 */
2415 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
2416 int bitcount = 0;
2417
2418 while (s.mem_unit < PAGE_SIZE) {
2419 s.mem_unit <<= 1;
2420 bitcount++;
2421 }
2422
2423 s.totalram >>= bitcount;
2424 s.freeram >>= bitcount;
2425 s.sharedram >>= bitcount;
2426 s.bufferram >>= bitcount;
2427 s.totalswap >>= bitcount;
2428 s.freeswap >>= bitcount;
2429 s.totalhigh >>= bitcount;
2430 s.freehigh >>= bitcount;
2431 }
2432
2433 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
2434 __put_user(s.uptime, &info->uptime) ||
2435 __put_user(s.loads[0], &info->loads[0]) ||
2436 __put_user(s.loads[1], &info->loads[1]) ||
2437 __put_user(s.loads[2], &info->loads[2]) ||
2438 __put_user(s.totalram, &info->totalram) ||
2439 __put_user(s.freeram, &info->freeram) ||
2440 __put_user(s.sharedram, &info->sharedram) ||
2441 __put_user(s.bufferram, &info->bufferram) ||
2442 __put_user(s.totalswap, &info->totalswap) ||
2443 __put_user(s.freeswap, &info->freeswap) ||
2444 __put_user(s.procs, &info->procs) ||
2445 __put_user(s.totalhigh, &info->totalhigh) ||
2446 __put_user(s.freehigh, &info->freehigh) ||
2447 __put_user(s.mem_unit, &info->mem_unit))
2448 return -EFAULT;
2449
2450 return 0;
2451}
2452#endif /* CONFIG_COMPAT */
diff --git a/kernel/timer.c b/kernel/timer.c
index dbf7a78a1ef1..09bca8ce9771 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * linux/kernel/timer.c 2 * linux/kernel/timer.c
3 * 3 *
4 * Kernel internal timers, basic process system calls 4 * Kernel internal timers
5 * 5 *
6 * Copyright (C) 1991, 1992 Linus Torvalds 6 * Copyright (C) 1991, 1992 Linus Torvalds
7 * 7 *
@@ -41,6 +41,7 @@
41#include <linux/sched.h> 41#include <linux/sched.h>
42#include <linux/sched/sysctl.h> 42#include <linux/sched/sysctl.h>
43#include <linux/slab.h> 43#include <linux/slab.h>
44#include <linux/compat.h>
44 45
45#include <asm/uaccess.h> 46#include <asm/uaccess.h>
46#include <asm/unistd.h> 47#include <asm/unistd.h>
@@ -1395,61 +1396,6 @@ SYSCALL_DEFINE1(alarm, unsigned int, seconds)
1395 1396
1396#endif 1397#endif
1397 1398
1398/**
1399 * sys_getpid - return the thread group id of the current process
1400 *
1401 * Note, despite the name, this returns the tgid not the pid. The tgid and
1402 * the pid are identical unless CLONE_THREAD was specified on clone() in
1403 * which case the tgid is the same in all threads of the same group.
1404 *
1405 * This is SMP safe as current->tgid does not change.
1406 */
1407SYSCALL_DEFINE0(getpid)
1408{
1409 return task_tgid_vnr(current);
1410}
1411
1412/*
1413 * Accessing ->real_parent is not SMP-safe, it could
1414 * change from under us. However, we can use a stale
1415 * value of ->real_parent under rcu_read_lock(), see
1416 * release_task()->call_rcu(delayed_put_task_struct).
1417 */
1418SYSCALL_DEFINE0(getppid)
1419{
1420 int pid;
1421
1422 rcu_read_lock();
1423 pid = task_tgid_vnr(rcu_dereference(current->real_parent));
1424 rcu_read_unlock();
1425
1426 return pid;
1427}
1428
1429SYSCALL_DEFINE0(getuid)
1430{
1431 /* Only we change this so SMP safe */
1432 return from_kuid_munged(current_user_ns(), current_uid());
1433}
1434
1435SYSCALL_DEFINE0(geteuid)
1436{
1437 /* Only we change this so SMP safe */
1438 return from_kuid_munged(current_user_ns(), current_euid());
1439}
1440
1441SYSCALL_DEFINE0(getgid)
1442{
1443 /* Only we change this so SMP safe */
1444 return from_kgid_munged(current_user_ns(), current_gid());
1445}
1446
1447SYSCALL_DEFINE0(getegid)
1448{
1449 /* Only we change this so SMP safe */
1450 return from_kgid_munged(current_user_ns(), current_egid());
1451}
1452
1453static void process_timeout(unsigned long __data) 1399static void process_timeout(unsigned long __data)
1454{ 1400{
1455 wake_up_process((struct task_struct *)__data); 1401 wake_up_process((struct task_struct *)__data);
@@ -1557,91 +1503,6 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
1557} 1503}
1558EXPORT_SYMBOL(schedule_timeout_uninterruptible); 1504EXPORT_SYMBOL(schedule_timeout_uninterruptible);
1559 1505
1560/* Thread ID - the internal kernel "pid" */
1561SYSCALL_DEFINE0(gettid)
1562{
1563 return task_pid_vnr(current);
1564}
1565
1566/**
1567 * do_sysinfo - fill in sysinfo struct
1568 * @info: pointer to buffer to fill
1569 */
1570int do_sysinfo(struct sysinfo *info)
1571{
1572 unsigned long mem_total, sav_total;
1573 unsigned int mem_unit, bitcount;
1574 struct timespec tp;
1575
1576 memset(info, 0, sizeof(struct sysinfo));
1577
1578 ktime_get_ts(&tp);
1579 monotonic_to_bootbased(&tp);
1580 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1581
1582 get_avenrun(info->loads, 0, SI_LOAD_SHIFT - FSHIFT);
1583
1584 info->procs = nr_threads;
1585
1586 si_meminfo(info);
1587 si_swapinfo(info);
1588
1589 /*
1590 * If the sum of all the available memory (i.e. ram + swap)
1591 * is less than can be stored in a 32 bit unsigned long then
1592 * we can be binary compatible with 2.2.x kernels. If not,
1593 * well, in that case 2.2.x was broken anyways...
1594 *
1595 * -Erik Andersen <andersee@debian.org>
1596 */
1597
1598 mem_total = info->totalram + info->totalswap;
1599 if (mem_total < info->totalram || mem_total < info->totalswap)
1600 goto out;
1601 bitcount = 0;
1602 mem_unit = info->mem_unit;
1603 while (mem_unit > 1) {
1604 bitcount++;
1605 mem_unit >>= 1;
1606 sav_total = mem_total;
1607 mem_total <<= 1;
1608 if (mem_total < sav_total)
1609 goto out;
1610 }
1611
1612 /*
1613 * If mem_total did not overflow, multiply all memory values by
1614 * info->mem_unit and set it to 1. This leaves things compatible
1615 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1616 * kernels...
1617 */
1618
1619 info->mem_unit = 1;
1620 info->totalram <<= bitcount;
1621 info->freeram <<= bitcount;
1622 info->sharedram <<= bitcount;
1623 info->bufferram <<= bitcount;
1624 info->totalswap <<= bitcount;
1625 info->freeswap <<= bitcount;
1626 info->totalhigh <<= bitcount;
1627 info->freehigh <<= bitcount;
1628
1629out:
1630 return 0;
1631}
1632
1633SYSCALL_DEFINE1(sysinfo, struct sysinfo __user *, info)
1634{
1635 struct sysinfo val;
1636
1637 do_sysinfo(&val);
1638
1639 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1640 return -EFAULT;
1641
1642 return 0;
1643}
1644
1645static int __cpuinit init_timers_cpu(int cpu) 1506static int __cpuinit init_timers_cpu(int cpu)
1646{ 1507{
1647 int j; 1508 int j;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 154aa12af48e..4aa9f5bc6b2d 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -46,6 +46,7 @@
46#include <linux/rculist.h> 46#include <linux/rculist.h>
47#include <linux/nodemask.h> 47#include <linux/nodemask.h>
48#include <linux/moduleparam.h> 48#include <linux/moduleparam.h>
49#include <linux/uaccess.h>
49 50
50#include "workqueue_internal.h" 51#include "workqueue_internal.h"
51 52
@@ -2197,6 +2198,7 @@ __acquires(&pool->lock)
2197 worker->current_work = NULL; 2198 worker->current_work = NULL;
2198 worker->current_func = NULL; 2199 worker->current_func = NULL;
2199 worker->current_pwq = NULL; 2200 worker->current_pwq = NULL;
2201 worker->desc_valid = false;
2200 pwq_dec_nr_in_flight(pwq, work_color); 2202 pwq_dec_nr_in_flight(pwq, work_color);
2201} 2203}
2202 2204
@@ -4365,6 +4367,83 @@ unsigned int work_busy(struct work_struct *work)
4365} 4367}
4366EXPORT_SYMBOL_GPL(work_busy); 4368EXPORT_SYMBOL_GPL(work_busy);
4367 4369
4370/**
4371 * set_worker_desc - set description for the current work item
4372 * @fmt: printf-style format string
4373 * @...: arguments for the format string
4374 *
4375 * This function can be called by a running work function to describe what
4376 * the work item is about. If the worker task gets dumped, this
4377 * information will be printed out together to help debugging. The
4378 * description can be at most WORKER_DESC_LEN including the trailing '\0'.
4379 */
4380void set_worker_desc(const char *fmt, ...)
4381{
4382 struct worker *worker = current_wq_worker();
4383 va_list args;
4384
4385 if (worker) {
4386 va_start(args, fmt);
4387 vsnprintf(worker->desc, sizeof(worker->desc), fmt, args);
4388 va_end(args);
4389 worker->desc_valid = true;
4390 }
4391}
4392
4393/**
4394 * print_worker_info - print out worker information and description
4395 * @log_lvl: the log level to use when printing
4396 * @task: target task
4397 *
4398 * If @task is a worker and currently executing a work item, print out the
4399 * name of the workqueue being serviced and worker description set with
4400 * set_worker_desc() by the currently executing work item.
4401 *
4402 * This function can be safely called on any task as long as the
4403 * task_struct itself is accessible. While safe, this function isn't
4404 * synchronized and may print out mixups or garbages of limited length.
4405 */
4406void print_worker_info(const char *log_lvl, struct task_struct *task)
4407{
4408 work_func_t *fn = NULL;
4409 char name[WQ_NAME_LEN] = { };
4410 char desc[WORKER_DESC_LEN] = { };
4411 struct pool_workqueue *pwq = NULL;
4412 struct workqueue_struct *wq = NULL;
4413 bool desc_valid = false;
4414 struct worker *worker;
4415
4416 if (!(task->flags & PF_WQ_WORKER))
4417 return;
4418
4419 /*
4420 * This function is called without any synchronization and @task
4421 * could be in any state. Be careful with dereferences.
4422 */
4423 worker = probe_kthread_data(task);
4424
4425 /*
4426 * Carefully copy the associated workqueue's workfn and name. Keep
4427 * the original last '\0' in case the original contains garbage.
4428 */
4429 probe_kernel_read(&fn, &worker->current_func, sizeof(fn));
4430 probe_kernel_read(&pwq, &worker->current_pwq, sizeof(pwq));
4431 probe_kernel_read(&wq, &pwq->wq, sizeof(wq));
4432 probe_kernel_read(name, wq->name, sizeof(name) - 1);
4433
4434 /* copy worker description */
4435 probe_kernel_read(&desc_valid, &worker->desc_valid, sizeof(desc_valid));
4436 if (desc_valid)
4437 probe_kernel_read(desc, worker->desc, sizeof(desc) - 1);
4438
4439 if (fn || name[0] || desc[0]) {
4440 printk("%sWorkqueue: %s %pf", log_lvl, name, fn);
4441 if (desc[0])
4442 pr_cont(" (%s)", desc);
4443 pr_cont("\n");
4444 }
4445}
4446
4368/* 4447/*
4369 * CPU hotplug. 4448 * CPU hotplug.
4370 * 4449 *
diff --git a/kernel/workqueue_internal.h b/kernel/workqueue_internal.h
index 84ab6e1dc6fb..ad83c96b2ece 100644
--- a/kernel/workqueue_internal.h
+++ b/kernel/workqueue_internal.h
@@ -29,15 +29,25 @@ struct worker {
29 struct work_struct *current_work; /* L: work being processed */ 29 struct work_struct *current_work; /* L: work being processed */
30 work_func_t current_func; /* L: current_work's fn */ 30 work_func_t current_func; /* L: current_work's fn */
31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */ 31 struct pool_workqueue *current_pwq; /* L: current_work's pwq */
32 bool desc_valid; /* ->desc is valid */
32 struct list_head scheduled; /* L: scheduled works */ 33 struct list_head scheduled; /* L: scheduled works */
34
35 /* 64 bytes boundary on 64bit, 32 on 32bit */
36
33 struct task_struct *task; /* I: worker task */ 37 struct task_struct *task; /* I: worker task */
34 struct worker_pool *pool; /* I: the associated pool */ 38 struct worker_pool *pool; /* I: the associated pool */
35 /* L: for rescuers */ 39 /* L: for rescuers */
36 /* 64 bytes boundary on 64bit, 32 on 32bit */ 40
37 unsigned long last_active; /* L: last active timestamp */ 41 unsigned long last_active; /* L: last active timestamp */
38 unsigned int flags; /* X: flags */ 42 unsigned int flags; /* X: flags */
39 int id; /* I: worker id */ 43 int id; /* I: worker id */
40 44
45 /*
46 * Opaque string set with work_set_desc(). Printed out with task
47 * dump for debugging - WARN, BUG, panic or sysrq.
48 */
49 char desc[WORKER_DESC_LEN];
50
41 /* used only by rescuers to point to the target workqueue */ 51 /* used only by rescuers to point to the target workqueue */
42 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */ 52 struct workqueue_struct *rescue_wq; /* I: the workqueue to rescue */
43}; 53};