diff options
Diffstat (limited to 'kernel')
47 files changed, 490 insertions, 565 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 35ef1185e359..1ce47553fb02 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -26,6 +26,7 @@ obj-y += sched/ | |||
26 | obj-y += power/ | 26 | obj-y += power/ |
27 | obj-y += printk/ | 27 | obj-y += printk/ |
28 | obj-y += cpu/ | 28 | obj-y += cpu/ |
29 | obj-y += irq/ | ||
29 | 30 | ||
30 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o | 31 | obj-$(CONFIG_CHECKPOINT_RESTORE) += kcmp.o |
31 | obj-$(CONFIG_FREEZER) += freezer.o | 32 | obj-$(CONFIG_FREEZER) += freezer.o |
@@ -79,7 +80,6 @@ obj-$(CONFIG_KPROBES) += kprobes.o | |||
79 | obj-$(CONFIG_KGDB) += debug/ | 80 | obj-$(CONFIG_KGDB) += debug/ |
80 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o | 81 | obj-$(CONFIG_DETECT_HUNG_TASK) += hung_task.o |
81 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o | 82 | obj-$(CONFIG_LOCKUP_DETECTOR) += watchdog.o |
82 | obj-$(CONFIG_GENERIC_HARDIRQS) += irq/ | ||
83 | obj-$(CONFIG_SECCOMP) += seccomp.o | 83 | obj-$(CONFIG_SECCOMP) += seccomp.o |
84 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o | 84 | obj-$(CONFIG_RCU_TORTURE_TEST) += rcutorture.o |
85 | obj-$(CONFIG_TREE_RCU) += rcutree.o | 85 | obj-$(CONFIG_TREE_RCU) += rcutree.o |
diff --git a/kernel/capability.c b/kernel/capability.c index f6c2ce5701e1..4e66bf9275b0 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -433,18 +433,6 @@ bool capable(int cap) | |||
433 | EXPORT_SYMBOL(capable); | 433 | EXPORT_SYMBOL(capable); |
434 | 434 | ||
435 | /** | 435 | /** |
436 | * nsown_capable - Check superior capability to one's own user_ns | ||
437 | * @cap: The capability in question | ||
438 | * | ||
439 | * Return true if the current task has the given superior capability | ||
440 | * targeted at its own user namespace. | ||
441 | */ | ||
442 | bool nsown_capable(int cap) | ||
443 | { | ||
444 | return ns_capable(current_user_ns(), cap); | ||
445 | } | ||
446 | |||
447 | /** | ||
448 | * inode_capable - Check superior capability over inode | 436 | * inode_capable - Check superior capability over inode |
449 | * @inode: The inode in question | 437 | * @inode: The inode in question |
450 | * @cap: The capability in question | 438 | * @cap: The capability in question |
@@ -464,3 +452,4 @@ bool inode_capable(const struct inode *inode, int cap) | |||
464 | 452 | ||
465 | return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid); | 453 | return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid); |
466 | } | 454 | } |
455 | EXPORT_SYMBOL(inode_capable); | ||
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index e0aeb32415ff..2418b6e71a85 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <linux/poll.h> | 60 | #include <linux/poll.h> |
61 | #include <linux/flex_array.h> /* used in cgroup_attach_task */ | 61 | #include <linux/flex_array.h> /* used in cgroup_attach_task */ |
62 | #include <linux/kthread.h> | 62 | #include <linux/kthread.h> |
63 | #include <linux/file.h> | ||
63 | 64 | ||
64 | #include <linux/atomic.h> | 65 | #include <linux/atomic.h> |
65 | 66 | ||
@@ -4034,8 +4035,8 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, | |||
4034 | struct cgroup_event *event; | 4035 | struct cgroup_event *event; |
4035 | struct cgroup_subsys_state *cfile_css; | 4036 | struct cgroup_subsys_state *cfile_css; |
4036 | unsigned int efd, cfd; | 4037 | unsigned int efd, cfd; |
4037 | struct file *efile; | 4038 | struct fd efile; |
4038 | struct file *cfile; | 4039 | struct fd cfile; |
4039 | char *endp; | 4040 | char *endp; |
4040 | int ret; | 4041 | int ret; |
4041 | 4042 | ||
@@ -4058,31 +4059,31 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, | |||
4058 | init_waitqueue_func_entry(&event->wait, cgroup_event_wake); | 4059 | init_waitqueue_func_entry(&event->wait, cgroup_event_wake); |
4059 | INIT_WORK(&event->remove, cgroup_event_remove); | 4060 | INIT_WORK(&event->remove, cgroup_event_remove); |
4060 | 4061 | ||
4061 | efile = eventfd_fget(efd); | 4062 | efile = fdget(efd); |
4062 | if (IS_ERR(efile)) { | 4063 | if (!efile.file) { |
4063 | ret = PTR_ERR(efile); | 4064 | ret = -EBADF; |
4064 | goto out_kfree; | 4065 | goto out_kfree; |
4065 | } | 4066 | } |
4066 | 4067 | ||
4067 | event->eventfd = eventfd_ctx_fileget(efile); | 4068 | event->eventfd = eventfd_ctx_fileget(efile.file); |
4068 | if (IS_ERR(event->eventfd)) { | 4069 | if (IS_ERR(event->eventfd)) { |
4069 | ret = PTR_ERR(event->eventfd); | 4070 | ret = PTR_ERR(event->eventfd); |
4070 | goto out_put_efile; | 4071 | goto out_put_efile; |
4071 | } | 4072 | } |
4072 | 4073 | ||
4073 | cfile = fget(cfd); | 4074 | cfile = fdget(cfd); |
4074 | if (!cfile) { | 4075 | if (!cfile.file) { |
4075 | ret = -EBADF; | 4076 | ret = -EBADF; |
4076 | goto out_put_eventfd; | 4077 | goto out_put_eventfd; |
4077 | } | 4078 | } |
4078 | 4079 | ||
4079 | /* the process need read permission on control file */ | 4080 | /* the process need read permission on control file */ |
4080 | /* AV: shouldn't we check that it's been opened for read instead? */ | 4081 | /* AV: shouldn't we check that it's been opened for read instead? */ |
4081 | ret = inode_permission(file_inode(cfile), MAY_READ); | 4082 | ret = inode_permission(file_inode(cfile.file), MAY_READ); |
4082 | if (ret < 0) | 4083 | if (ret < 0) |
4083 | goto out_put_cfile; | 4084 | goto out_put_cfile; |
4084 | 4085 | ||
4085 | event->cft = __file_cft(cfile); | 4086 | event->cft = __file_cft(cfile.file); |
4086 | if (IS_ERR(event->cft)) { | 4087 | if (IS_ERR(event->cft)) { |
4087 | ret = PTR_ERR(event->cft); | 4088 | ret = PTR_ERR(event->cft); |
4088 | goto out_put_cfile; | 4089 | goto out_put_cfile; |
@@ -4103,7 +4104,7 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, | |||
4103 | 4104 | ||
4104 | ret = -EINVAL; | 4105 | ret = -EINVAL; |
4105 | event->css = cgroup_css(cgrp, event->cft->ss); | 4106 | event->css = cgroup_css(cgrp, event->cft->ss); |
4106 | cfile_css = css_from_dir(cfile->f_dentry->d_parent, event->cft->ss); | 4107 | cfile_css = css_from_dir(cfile.file->f_dentry->d_parent, event->cft->ss); |
4107 | if (event->css && event->css == cfile_css && css_tryget(event->css)) | 4108 | if (event->css && event->css == cfile_css && css_tryget(event->css)) |
4108 | ret = 0; | 4109 | ret = 0; |
4109 | 4110 | ||
@@ -4121,25 +4122,25 @@ static int cgroup_write_event_control(struct cgroup_subsys_state *dummy_css, | |||
4121 | if (ret) | 4122 | if (ret) |
4122 | goto out_put_css; | 4123 | goto out_put_css; |
4123 | 4124 | ||
4124 | efile->f_op->poll(efile, &event->pt); | 4125 | efile.file->f_op->poll(efile.file, &event->pt); |
4125 | 4126 | ||
4126 | spin_lock(&cgrp->event_list_lock); | 4127 | spin_lock(&cgrp->event_list_lock); |
4127 | list_add(&event->list, &cgrp->event_list); | 4128 | list_add(&event->list, &cgrp->event_list); |
4128 | spin_unlock(&cgrp->event_list_lock); | 4129 | spin_unlock(&cgrp->event_list_lock); |
4129 | 4130 | ||
4130 | fput(cfile); | 4131 | fdput(cfile); |
4131 | fput(efile); | 4132 | fdput(efile); |
4132 | 4133 | ||
4133 | return 0; | 4134 | return 0; |
4134 | 4135 | ||
4135 | out_put_css: | 4136 | out_put_css: |
4136 | css_put(event->css); | 4137 | css_put(event->css); |
4137 | out_put_cfile: | 4138 | out_put_cfile: |
4138 | fput(cfile); | 4139 | fdput(cfile); |
4139 | out_put_eventfd: | 4140 | out_put_eventfd: |
4140 | eventfd_ctx_put(event->eventfd); | 4141 | eventfd_ctx_put(event->eventfd); |
4141 | out_put_efile: | 4142 | out_put_efile: |
4142 | fput(efile); | 4143 | fdput(efile); |
4143 | out_kfree: | 4144 | out_kfree: |
4144 | kfree(event); | 4145 | kfree(event); |
4145 | 4146 | ||
diff --git a/kernel/events/core.c b/kernel/events/core.c index 2207efc941d1..dd236b66ca3a 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -5039,6 +5039,7 @@ static void perf_event_mmap_output(struct perf_event *event, | |||
5039 | mmap_event->event_id.header.size += sizeof(mmap_event->maj); | 5039 | mmap_event->event_id.header.size += sizeof(mmap_event->maj); |
5040 | mmap_event->event_id.header.size += sizeof(mmap_event->min); | 5040 | mmap_event->event_id.header.size += sizeof(mmap_event->min); |
5041 | mmap_event->event_id.header.size += sizeof(mmap_event->ino); | 5041 | mmap_event->event_id.header.size += sizeof(mmap_event->ino); |
5042 | mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation); | ||
5042 | } | 5043 | } |
5043 | 5044 | ||
5044 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); | 5045 | perf_event_header__init_id(&mmap_event->event_id.header, &sample, event); |
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c index f3569747d629..ad8e1bdca70e 100644 --- a/kernel/events/uprobes.c +++ b/kernel/events/uprobes.c | |||
@@ -1682,12 +1682,10 @@ static bool handle_trampoline(struct pt_regs *regs) | |||
1682 | tmp = ri; | 1682 | tmp = ri; |
1683 | ri = ri->next; | 1683 | ri = ri->next; |
1684 | kfree(tmp); | 1684 | kfree(tmp); |
1685 | utask->depth--; | ||
1685 | 1686 | ||
1686 | if (!chained) | 1687 | if (!chained) |
1687 | break; | 1688 | break; |
1688 | |||
1689 | utask->depth--; | ||
1690 | |||
1691 | BUG_ON(!ri); | 1689 | BUG_ON(!ri); |
1692 | } | 1690 | } |
1693 | 1691 | ||
diff --git a/kernel/extable.c b/kernel/extable.c index 67460b93b1a1..832cb28105bb 100644 --- a/kernel/extable.c +++ b/kernel/extable.c | |||
@@ -41,7 +41,7 @@ u32 __initdata main_extable_sort_needed = 1; | |||
41 | /* Sort the kernel's built-in exception table */ | 41 | /* Sort the kernel's built-in exception table */ |
42 | void __init sort_main_extable(void) | 42 | void __init sort_main_extable(void) |
43 | { | 43 | { |
44 | if (main_extable_sort_needed) { | 44 | if (main_extable_sort_needed && __stop___ex_table > __start___ex_table) { |
45 | pr_notice("Sorting __ex_table...\n"); | 45 | pr_notice("Sorting __ex_table...\n"); |
46 | sort_extable(__start___ex_table, __stop___ex_table); | 46 | sort_extable(__start___ex_table, __stop___ex_table); |
47 | } | 47 | } |
diff --git a/kernel/fork.c b/kernel/fork.c index bf46287c91a4..086fe73ad6bd 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -351,7 +351,6 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
351 | struct rb_node **rb_link, *rb_parent; | 351 | struct rb_node **rb_link, *rb_parent; |
352 | int retval; | 352 | int retval; |
353 | unsigned long charge; | 353 | unsigned long charge; |
354 | struct mempolicy *pol; | ||
355 | 354 | ||
356 | uprobe_start_dup_mmap(); | 355 | uprobe_start_dup_mmap(); |
357 | down_write(&oldmm->mmap_sem); | 356 | down_write(&oldmm->mmap_sem); |
@@ -400,11 +399,9 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) | |||
400 | goto fail_nomem; | 399 | goto fail_nomem; |
401 | *tmp = *mpnt; | 400 | *tmp = *mpnt; |
402 | INIT_LIST_HEAD(&tmp->anon_vma_chain); | 401 | INIT_LIST_HEAD(&tmp->anon_vma_chain); |
403 | pol = mpol_dup(vma_policy(mpnt)); | 402 | retval = vma_dup_policy(mpnt, tmp); |
404 | retval = PTR_ERR(pol); | 403 | if (retval) |
405 | if (IS_ERR(pol)) | ||
406 | goto fail_nomem_policy; | 404 | goto fail_nomem_policy; |
407 | vma_set_policy(tmp, pol); | ||
408 | tmp->vm_mm = mm; | 405 | tmp->vm_mm = mm; |
409 | if (anon_vma_fork(tmp, mpnt)) | 406 | if (anon_vma_fork(tmp, mpnt)) |
410 | goto fail_nomem_anon_vma_fork; | 407 | goto fail_nomem_anon_vma_fork; |
@@ -472,7 +469,7 @@ out: | |||
472 | uprobe_end_dup_mmap(); | 469 | uprobe_end_dup_mmap(); |
473 | return retval; | 470 | return retval; |
474 | fail_nomem_anon_vma_fork: | 471 | fail_nomem_anon_vma_fork: |
475 | mpol_put(pol); | 472 | mpol_put(vma_policy(tmp)); |
476 | fail_nomem_policy: | 473 | fail_nomem_policy: |
477 | kmem_cache_free(vm_area_cachep, tmp); | 474 | kmem_cache_free(vm_area_cachep, tmp); |
478 | fail_nomem: | 475 | fail_nomem: |
@@ -522,7 +519,7 @@ static void mm_init_aio(struct mm_struct *mm) | |||
522 | { | 519 | { |
523 | #ifdef CONFIG_AIO | 520 | #ifdef CONFIG_AIO |
524 | spin_lock_init(&mm->ioctx_lock); | 521 | spin_lock_init(&mm->ioctx_lock); |
525 | INIT_HLIST_HEAD(&mm->ioctx_list); | 522 | mm->ioctx_table = NULL; |
526 | #endif | 523 | #endif |
527 | } | 524 | } |
528 | 525 | ||
@@ -1173,13 +1170,16 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1173 | return ERR_PTR(-EINVAL); | 1170 | return ERR_PTR(-EINVAL); |
1174 | 1171 | ||
1175 | /* | 1172 | /* |
1176 | * If the new process will be in a different pid namespace | 1173 | * If the new process will be in a different pid or user namespace |
1177 | * don't allow the creation of threads. | 1174 | * do not allow it to share a thread group or signal handlers or |
1175 | * parent with the forking task. | ||
1178 | */ | 1176 | */ |
1179 | if ((clone_flags & (CLONE_VM|CLONE_NEWPID)) && | 1177 | if (clone_flags & (CLONE_SIGHAND | CLONE_PARENT)) { |
1180 | (task_active_pid_ns(current) != | 1178 | if ((clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) || |
1181 | current->nsproxy->pid_ns_for_children)) | 1179 | (task_active_pid_ns(current) != |
1182 | return ERR_PTR(-EINVAL); | 1180 | current->nsproxy->pid_ns_for_children)) |
1181 | return ERR_PTR(-EINVAL); | ||
1182 | } | ||
1183 | 1183 | ||
1184 | retval = security_task_create(clone_flags); | 1184 | retval = security_task_create(clone_flags); |
1185 | if (retval) | 1185 | if (retval) |
@@ -1576,15 +1576,6 @@ long do_fork(unsigned long clone_flags, | |||
1576 | long nr; | 1576 | long nr; |
1577 | 1577 | ||
1578 | /* | 1578 | /* |
1579 | * Do some preliminary argument and permissions checking before we | ||
1580 | * actually start allocating stuff | ||
1581 | */ | ||
1582 | if (clone_flags & (CLONE_NEWUSER | CLONE_NEWPID)) { | ||
1583 | if (clone_flags & (CLONE_THREAD|CLONE_PARENT)) | ||
1584 | return -EINVAL; | ||
1585 | } | ||
1586 | |||
1587 | /* | ||
1588 | * Determine whether and which event to report to ptracer. When | 1579 | * Determine whether and which event to report to ptracer. When |
1589 | * called from kernel_thread or CLONE_UNTRACED is explicitly | 1580 | * called from kernel_thread or CLONE_UNTRACED is explicitly |
1590 | * requested, no event is reported; otherwise, report if the event | 1581 | * requested, no event is reported; otherwise, report if the event |
@@ -1825,11 +1816,6 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags) | |||
1825 | if (unshare_flags & CLONE_NEWUSER) | 1816 | if (unshare_flags & CLONE_NEWUSER) |
1826 | unshare_flags |= CLONE_THREAD | CLONE_FS; | 1817 | unshare_flags |= CLONE_THREAD | CLONE_FS; |
1827 | /* | 1818 | /* |
1828 | * If unsharing a pid namespace must also unshare the thread. | ||
1829 | */ | ||
1830 | if (unshare_flags & CLONE_NEWPID) | ||
1831 | unshare_flags |= CLONE_THREAD; | ||
1832 | /* | ||
1833 | * If unsharing a thread from a thread group, must also unshare vm. | 1819 | * If unsharing a thread from a thread group, must also unshare vm. |
1834 | */ | 1820 | */ |
1835 | if (unshare_flags & CLONE_THREAD) | 1821 | if (unshare_flags & CLONE_THREAD) |
diff --git a/kernel/gcov/fs.c b/kernel/gcov/fs.c index 9bd0934f6c33..7a7d2ee96d42 100644 --- a/kernel/gcov/fs.c +++ b/kernel/gcov/fs.c | |||
@@ -74,7 +74,7 @@ static int __init gcov_persist_setup(char *str) | |||
74 | { | 74 | { |
75 | unsigned long val; | 75 | unsigned long val; |
76 | 76 | ||
77 | if (strict_strtoul(str, 0, &val)) { | 77 | if (kstrtoul(str, 0, &val)) { |
78 | pr_warning("invalid gcov_persist parameter '%s'\n", str); | 78 | pr_warning("invalid gcov_persist parameter '%s'\n", str); |
79 | return 0; | 79 | return 0; |
80 | } | 80 | } |
diff --git a/kernel/groups.c b/kernel/groups.c index 6b2588dd04ff..90cf1c38c8ea 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -233,7 +233,7 @@ SYSCALL_DEFINE2(setgroups, int, gidsetsize, gid_t __user *, grouplist) | |||
233 | struct group_info *group_info; | 233 | struct group_info *group_info; |
234 | int retval; | 234 | int retval; |
235 | 235 | ||
236 | if (!nsown_capable(CAP_SETGID)) | 236 | if (!ns_capable(current_user_ns(), CAP_SETGID)) |
237 | return -EPERM; | 237 | return -EPERM; |
238 | if ((unsigned)gidsetsize > NGROUPS_MAX) | 238 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
239 | return -EINVAL; | 239 | return -EINVAL; |
diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig index d1a758bc972a..4a1fef09f658 100644 --- a/kernel/irq/Kconfig +++ b/kernel/irq/Kconfig | |||
@@ -1,15 +1,4 @@ | |||
1 | # Select this to activate the generic irq options below | ||
2 | config HAVE_GENERIC_HARDIRQS | ||
3 | bool | ||
4 | |||
5 | if HAVE_GENERIC_HARDIRQS | ||
6 | menu "IRQ subsystem" | 1 | menu "IRQ subsystem" |
7 | # | ||
8 | # Interrupt subsystem related configuration options | ||
9 | # | ||
10 | config GENERIC_HARDIRQS | ||
11 | def_bool y | ||
12 | |||
13 | # Options selectable by the architecture code | 2 | # Options selectable by the architecture code |
14 | 3 | ||
15 | # Make sparse irq Kconfig switch below available | 4 | # Make sparse irq Kconfig switch below available |
@@ -84,4 +73,3 @@ config SPARSE_IRQ | |||
84 | If you don't know what to do here, say N. | 73 | If you don't know what to do here, say N. |
85 | 74 | ||
86 | endmenu | 75 | endmenu |
87 | endif | ||
diff --git a/kernel/kexec.c b/kernel/kexec.c index 59f7b55ba745..2a74f307c5ec 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c | |||
@@ -1474,11 +1474,8 @@ static int __init __parse_crashkernel(char *cmdline, | |||
1474 | if (first_colon && (!first_space || first_colon < first_space)) | 1474 | if (first_colon && (!first_space || first_colon < first_space)) |
1475 | return parse_crashkernel_mem(ck_cmdline, system_ram, | 1475 | return parse_crashkernel_mem(ck_cmdline, system_ram, |
1476 | crash_size, crash_base); | 1476 | crash_size, crash_base); |
1477 | else | ||
1478 | return parse_crashkernel_simple(ck_cmdline, crash_size, | ||
1479 | crash_base); | ||
1480 | 1477 | ||
1481 | return 0; | 1478 | return parse_crashkernel_simple(ck_cmdline, crash_size, crash_base); |
1482 | } | 1479 | } |
1483 | 1480 | ||
1484 | /* | 1481 | /* |
diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 6e33498d665c..a0d367a49122 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c | |||
@@ -112,6 +112,7 @@ static struct kprobe_blackpoint kprobe_blacklist[] = { | |||
112 | struct kprobe_insn_page { | 112 | struct kprobe_insn_page { |
113 | struct list_head list; | 113 | struct list_head list; |
114 | kprobe_opcode_t *insns; /* Page of instruction slots */ | 114 | kprobe_opcode_t *insns; /* Page of instruction slots */ |
115 | struct kprobe_insn_cache *cache; | ||
115 | int nused; | 116 | int nused; |
116 | int ngarbage; | 117 | int ngarbage; |
117 | char slot_used[]; | 118 | char slot_used[]; |
@@ -121,12 +122,6 @@ struct kprobe_insn_page { | |||
121 | (offsetof(struct kprobe_insn_page, slot_used) + \ | 122 | (offsetof(struct kprobe_insn_page, slot_used) + \ |
122 | (sizeof(char) * (slots))) | 123 | (sizeof(char) * (slots))) |
123 | 124 | ||
124 | struct kprobe_insn_cache { | ||
125 | struct list_head pages; /* list of kprobe_insn_page */ | ||
126 | size_t insn_size; /* size of instruction slot */ | ||
127 | int nr_garbage; | ||
128 | }; | ||
129 | |||
130 | static int slots_per_page(struct kprobe_insn_cache *c) | 125 | static int slots_per_page(struct kprobe_insn_cache *c) |
131 | { | 126 | { |
132 | return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); | 127 | return PAGE_SIZE/(c->insn_size * sizeof(kprobe_opcode_t)); |
@@ -138,8 +133,20 @@ enum kprobe_slot_state { | |||
138 | SLOT_USED = 2, | 133 | SLOT_USED = 2, |
139 | }; | 134 | }; |
140 | 135 | ||
141 | static DEFINE_MUTEX(kprobe_insn_mutex); /* Protects kprobe_insn_slots */ | 136 | static void *alloc_insn_page(void) |
142 | static struct kprobe_insn_cache kprobe_insn_slots = { | 137 | { |
138 | return module_alloc(PAGE_SIZE); | ||
139 | } | ||
140 | |||
141 | static void free_insn_page(void *page) | ||
142 | { | ||
143 | module_free(NULL, page); | ||
144 | } | ||
145 | |||
146 | struct kprobe_insn_cache kprobe_insn_slots = { | ||
147 | .mutex = __MUTEX_INITIALIZER(kprobe_insn_slots.mutex), | ||
148 | .alloc = alloc_insn_page, | ||
149 | .free = free_insn_page, | ||
143 | .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), | 150 | .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), |
144 | .insn_size = MAX_INSN_SIZE, | 151 | .insn_size = MAX_INSN_SIZE, |
145 | .nr_garbage = 0, | 152 | .nr_garbage = 0, |
@@ -150,10 +157,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c); | |||
150 | * __get_insn_slot() - Find a slot on an executable page for an instruction. | 157 | * __get_insn_slot() - Find a slot on an executable page for an instruction. |
151 | * We allocate an executable page if there's no room on existing ones. | 158 | * We allocate an executable page if there's no room on existing ones. |
152 | */ | 159 | */ |
153 | static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) | 160 | kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) |
154 | { | 161 | { |
155 | struct kprobe_insn_page *kip; | 162 | struct kprobe_insn_page *kip; |
163 | kprobe_opcode_t *slot = NULL; | ||
156 | 164 | ||
165 | mutex_lock(&c->mutex); | ||
157 | retry: | 166 | retry: |
158 | list_for_each_entry(kip, &c->pages, list) { | 167 | list_for_each_entry(kip, &c->pages, list) { |
159 | if (kip->nused < slots_per_page(c)) { | 168 | if (kip->nused < slots_per_page(c)) { |
@@ -162,7 +171,8 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) | |||
162 | if (kip->slot_used[i] == SLOT_CLEAN) { | 171 | if (kip->slot_used[i] == SLOT_CLEAN) { |
163 | kip->slot_used[i] = SLOT_USED; | 172 | kip->slot_used[i] = SLOT_USED; |
164 | kip->nused++; | 173 | kip->nused++; |
165 | return kip->insns + (i * c->insn_size); | 174 | slot = kip->insns + (i * c->insn_size); |
175 | goto out; | ||
166 | } | 176 | } |
167 | } | 177 | } |
168 | /* kip->nused is broken. Fix it. */ | 178 | /* kip->nused is broken. Fix it. */ |
@@ -178,37 +188,29 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c) | |||
178 | /* All out of space. Need to allocate a new page. */ | 188 | /* All out of space. Need to allocate a new page. */ |
179 | kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); | 189 | kip = kmalloc(KPROBE_INSN_PAGE_SIZE(slots_per_page(c)), GFP_KERNEL); |
180 | if (!kip) | 190 | if (!kip) |
181 | return NULL; | 191 | goto out; |
182 | 192 | ||
183 | /* | 193 | /* |
184 | * Use module_alloc so this page is within +/- 2GB of where the | 194 | * Use module_alloc so this page is within +/- 2GB of where the |
185 | * kernel image and loaded module images reside. This is required | 195 | * kernel image and loaded module images reside. This is required |
186 | * so x86_64 can correctly handle the %rip-relative fixups. | 196 | * so x86_64 can correctly handle the %rip-relative fixups. |
187 | */ | 197 | */ |
188 | kip->insns = module_alloc(PAGE_SIZE); | 198 | kip->insns = c->alloc(); |
189 | if (!kip->insns) { | 199 | if (!kip->insns) { |
190 | kfree(kip); | 200 | kfree(kip); |
191 | return NULL; | 201 | goto out; |
192 | } | 202 | } |
193 | INIT_LIST_HEAD(&kip->list); | 203 | INIT_LIST_HEAD(&kip->list); |
194 | memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); | 204 | memset(kip->slot_used, SLOT_CLEAN, slots_per_page(c)); |
195 | kip->slot_used[0] = SLOT_USED; | 205 | kip->slot_used[0] = SLOT_USED; |
196 | kip->nused = 1; | 206 | kip->nused = 1; |
197 | kip->ngarbage = 0; | 207 | kip->ngarbage = 0; |
208 | kip->cache = c; | ||
198 | list_add(&kip->list, &c->pages); | 209 | list_add(&kip->list, &c->pages); |
199 | return kip->insns; | 210 | slot = kip->insns; |
200 | } | 211 | out: |
201 | 212 | mutex_unlock(&c->mutex); | |
202 | 213 | return slot; | |
203 | kprobe_opcode_t __kprobes *get_insn_slot(void) | ||
204 | { | ||
205 | kprobe_opcode_t *ret = NULL; | ||
206 | |||
207 | mutex_lock(&kprobe_insn_mutex); | ||
208 | ret = __get_insn_slot(&kprobe_insn_slots); | ||
209 | mutex_unlock(&kprobe_insn_mutex); | ||
210 | |||
211 | return ret; | ||
212 | } | 214 | } |
213 | 215 | ||
214 | /* Return 1 if all garbages are collected, otherwise 0. */ | 216 | /* Return 1 if all garbages are collected, otherwise 0. */ |
@@ -225,7 +227,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx) | |||
225 | */ | 227 | */ |
226 | if (!list_is_singular(&kip->list)) { | 228 | if (!list_is_singular(&kip->list)) { |
227 | list_del(&kip->list); | 229 | list_del(&kip->list); |
228 | module_free(NULL, kip->insns); | 230 | kip->cache->free(kip->insns); |
229 | kfree(kip); | 231 | kfree(kip); |
230 | } | 232 | } |
231 | return 1; | 233 | return 1; |
@@ -255,11 +257,12 @@ static int __kprobes collect_garbage_slots(struct kprobe_insn_cache *c) | |||
255 | return 0; | 257 | return 0; |
256 | } | 258 | } |
257 | 259 | ||
258 | static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, | 260 | void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, |
259 | kprobe_opcode_t *slot, int dirty) | 261 | kprobe_opcode_t *slot, int dirty) |
260 | { | 262 | { |
261 | struct kprobe_insn_page *kip; | 263 | struct kprobe_insn_page *kip; |
262 | 264 | ||
265 | mutex_lock(&c->mutex); | ||
263 | list_for_each_entry(kip, &c->pages, list) { | 266 | list_for_each_entry(kip, &c->pages, list) { |
264 | long idx = ((long)slot - (long)kip->insns) / | 267 | long idx = ((long)slot - (long)kip->insns) / |
265 | (c->insn_size * sizeof(kprobe_opcode_t)); | 268 | (c->insn_size * sizeof(kprobe_opcode_t)); |
@@ -272,45 +275,25 @@ static void __kprobes __free_insn_slot(struct kprobe_insn_cache *c, | |||
272 | collect_garbage_slots(c); | 275 | collect_garbage_slots(c); |
273 | } else | 276 | } else |
274 | collect_one_slot(kip, idx); | 277 | collect_one_slot(kip, idx); |
275 | return; | 278 | goto out; |
276 | } | 279 | } |
277 | } | 280 | } |
278 | /* Could not free this slot. */ | 281 | /* Could not free this slot. */ |
279 | WARN_ON(1); | 282 | WARN_ON(1); |
283 | out: | ||
284 | mutex_unlock(&c->mutex); | ||
280 | } | 285 | } |
281 | 286 | ||
282 | void __kprobes free_insn_slot(kprobe_opcode_t * slot, int dirty) | ||
283 | { | ||
284 | mutex_lock(&kprobe_insn_mutex); | ||
285 | __free_insn_slot(&kprobe_insn_slots, slot, dirty); | ||
286 | mutex_unlock(&kprobe_insn_mutex); | ||
287 | } | ||
288 | #ifdef CONFIG_OPTPROBES | 287 | #ifdef CONFIG_OPTPROBES |
289 | /* For optimized_kprobe buffer */ | 288 | /* For optimized_kprobe buffer */ |
290 | static DEFINE_MUTEX(kprobe_optinsn_mutex); /* Protects kprobe_optinsn_slots */ | 289 | struct kprobe_insn_cache kprobe_optinsn_slots = { |
291 | static struct kprobe_insn_cache kprobe_optinsn_slots = { | 290 | .mutex = __MUTEX_INITIALIZER(kprobe_optinsn_slots.mutex), |
291 | .alloc = alloc_insn_page, | ||
292 | .free = free_insn_page, | ||
292 | .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), | 293 | .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages), |
293 | /* .insn_size is initialized later */ | 294 | /* .insn_size is initialized later */ |
294 | .nr_garbage = 0, | 295 | .nr_garbage = 0, |
295 | }; | 296 | }; |
296 | /* Get a slot for optimized_kprobe buffer */ | ||
297 | kprobe_opcode_t __kprobes *get_optinsn_slot(void) | ||
298 | { | ||
299 | kprobe_opcode_t *ret = NULL; | ||
300 | |||
301 | mutex_lock(&kprobe_optinsn_mutex); | ||
302 | ret = __get_insn_slot(&kprobe_optinsn_slots); | ||
303 | mutex_unlock(&kprobe_optinsn_mutex); | ||
304 | |||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | void __kprobes free_optinsn_slot(kprobe_opcode_t * slot, int dirty) | ||
309 | { | ||
310 | mutex_lock(&kprobe_optinsn_mutex); | ||
311 | __free_insn_slot(&kprobe_optinsn_slots, slot, dirty); | ||
312 | mutex_unlock(&kprobe_optinsn_mutex); | ||
313 | } | ||
314 | #endif | 297 | #endif |
315 | #endif | 298 | #endif |
316 | 299 | ||
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c index 6ada93c23a9a..9659d38e008f 100644 --- a/kernel/ksysfs.c +++ b/kernel/ksysfs.c | |||
@@ -113,7 +113,7 @@ static ssize_t kexec_crash_size_store(struct kobject *kobj, | |||
113 | unsigned long cnt; | 113 | unsigned long cnt; |
114 | int ret; | 114 | int ret; |
115 | 115 | ||
116 | if (strict_strtoul(buf, 0, &cnt)) | 116 | if (kstrtoul(buf, 0, &cnt)) |
117 | return -EINVAL; | 117 | return -EINVAL; |
118 | 118 | ||
119 | ret = crash_shrink_memory(cnt); | 119 | ret = crash_shrink_memory(cnt); |
diff --git a/kernel/modsign_pubkey.c b/kernel/modsign_pubkey.c index 2b6e69909c39..7cbd4507a7e6 100644 --- a/kernel/modsign_pubkey.c +++ b/kernel/modsign_pubkey.c | |||
@@ -18,14 +18,14 @@ | |||
18 | 18 | ||
19 | struct key *modsign_keyring; | 19 | struct key *modsign_keyring; |
20 | 20 | ||
21 | extern __initdata const u8 modsign_certificate_list[]; | 21 | extern __initconst const u8 modsign_certificate_list[]; |
22 | extern __initdata const u8 modsign_certificate_list_end[]; | 22 | extern __initconst const u8 modsign_certificate_list_end[]; |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * We need to make sure ccache doesn't cache the .o file as it doesn't notice | 25 | * We need to make sure ccache doesn't cache the .o file as it doesn't notice |
26 | * if modsign.pub changes. | 26 | * if modsign.pub changes. |
27 | */ | 27 | */ |
28 | static __initdata const char annoy_ccache[] = __TIME__ "foo"; | 28 | static __initconst const char annoy_ccache[] = __TIME__ "foo"; |
29 | 29 | ||
30 | /* | 30 | /* |
31 | * Load the compiled-in keys | 31 | * Load the compiled-in keys |
diff --git a/kernel/module.c b/kernel/module.c index 206915830d29..dc582749fa13 100644 --- a/kernel/module.c +++ b/kernel/module.c | |||
@@ -136,6 +136,7 @@ static int param_set_bool_enable_only(const char *val, | |||
136 | } | 136 | } |
137 | 137 | ||
138 | static const struct kernel_param_ops param_ops_bool_enable_only = { | 138 | static const struct kernel_param_ops param_ops_bool_enable_only = { |
139 | .flags = KERNEL_PARAM_FL_NOARG, | ||
139 | .set = param_set_bool_enable_only, | 140 | .set = param_set_bool_enable_only, |
140 | .get = param_get_bool, | 141 | .get = param_get_bool, |
141 | }; | 142 | }; |
@@ -603,7 +604,7 @@ static void setup_modinfo_##field(struct module *mod, const char *s) \ | |||
603 | static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ | 604 | static ssize_t show_modinfo_##field(struct module_attribute *mattr, \ |
604 | struct module_kobject *mk, char *buffer) \ | 605 | struct module_kobject *mk, char *buffer) \ |
605 | { \ | 606 | { \ |
606 | return sprintf(buffer, "%s\n", mk->mod->field); \ | 607 | return scnprintf(buffer, PAGE_SIZE, "%s\n", mk->mod->field); \ |
607 | } \ | 608 | } \ |
608 | static int modinfo_##field##_exists(struct module *mod) \ | 609 | static int modinfo_##field##_exists(struct module *mod) \ |
609 | { \ | 610 | { \ |
@@ -1611,6 +1612,14 @@ static void module_remove_modinfo_attrs(struct module *mod) | |||
1611 | kfree(mod->modinfo_attrs); | 1612 | kfree(mod->modinfo_attrs); |
1612 | } | 1613 | } |
1613 | 1614 | ||
1615 | static void mod_kobject_put(struct module *mod) | ||
1616 | { | ||
1617 | DECLARE_COMPLETION_ONSTACK(c); | ||
1618 | mod->mkobj.kobj_completion = &c; | ||
1619 | kobject_put(&mod->mkobj.kobj); | ||
1620 | wait_for_completion(&c); | ||
1621 | } | ||
1622 | |||
1614 | static int mod_sysfs_init(struct module *mod) | 1623 | static int mod_sysfs_init(struct module *mod) |
1615 | { | 1624 | { |
1616 | int err; | 1625 | int err; |
@@ -1638,7 +1647,7 @@ static int mod_sysfs_init(struct module *mod) | |||
1638 | err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, | 1647 | err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL, |
1639 | "%s", mod->name); | 1648 | "%s", mod->name); |
1640 | if (err) | 1649 | if (err) |
1641 | kobject_put(&mod->mkobj.kobj); | 1650 | mod_kobject_put(mod); |
1642 | 1651 | ||
1643 | /* delay uevent until full sysfs population */ | 1652 | /* delay uevent until full sysfs population */ |
1644 | out: | 1653 | out: |
@@ -1682,7 +1691,7 @@ out_unreg_param: | |||
1682 | out_unreg_holders: | 1691 | out_unreg_holders: |
1683 | kobject_put(mod->holders_dir); | 1692 | kobject_put(mod->holders_dir); |
1684 | out_unreg: | 1693 | out_unreg: |
1685 | kobject_put(&mod->mkobj.kobj); | 1694 | mod_kobject_put(mod); |
1686 | out: | 1695 | out: |
1687 | return err; | 1696 | return err; |
1688 | } | 1697 | } |
@@ -1691,7 +1700,7 @@ static void mod_sysfs_fini(struct module *mod) | |||
1691 | { | 1700 | { |
1692 | remove_notes_attrs(mod); | 1701 | remove_notes_attrs(mod); |
1693 | remove_sect_attrs(mod); | 1702 | remove_sect_attrs(mod); |
1694 | kobject_put(&mod->mkobj.kobj); | 1703 | mod_kobject_put(mod); |
1695 | } | 1704 | } |
1696 | 1705 | ||
1697 | #else /* !CONFIG_SYSFS */ | 1706 | #else /* !CONFIG_SYSFS */ |
@@ -2540,21 +2549,20 @@ static int copy_module_from_user(const void __user *umod, unsigned long len, | |||
2540 | /* Sets info->hdr and info->len. */ | 2549 | /* Sets info->hdr and info->len. */ |
2541 | static int copy_module_from_fd(int fd, struct load_info *info) | 2550 | static int copy_module_from_fd(int fd, struct load_info *info) |
2542 | { | 2551 | { |
2543 | struct file *file; | 2552 | struct fd f = fdget(fd); |
2544 | int err; | 2553 | int err; |
2545 | struct kstat stat; | 2554 | struct kstat stat; |
2546 | loff_t pos; | 2555 | loff_t pos; |
2547 | ssize_t bytes = 0; | 2556 | ssize_t bytes = 0; |
2548 | 2557 | ||
2549 | file = fget(fd); | 2558 | if (!f.file) |
2550 | if (!file) | ||
2551 | return -ENOEXEC; | 2559 | return -ENOEXEC; |
2552 | 2560 | ||
2553 | err = security_kernel_module_from_file(file); | 2561 | err = security_kernel_module_from_file(f.file); |
2554 | if (err) | 2562 | if (err) |
2555 | goto out; | 2563 | goto out; |
2556 | 2564 | ||
2557 | err = vfs_getattr(&file->f_path, &stat); | 2565 | err = vfs_getattr(&f.file->f_path, &stat); |
2558 | if (err) | 2566 | if (err) |
2559 | goto out; | 2567 | goto out; |
2560 | 2568 | ||
@@ -2577,7 +2585,7 @@ static int copy_module_from_fd(int fd, struct load_info *info) | |||
2577 | 2585 | ||
2578 | pos = 0; | 2586 | pos = 0; |
2579 | while (pos < stat.size) { | 2587 | while (pos < stat.size) { |
2580 | bytes = kernel_read(file, pos, (char *)(info->hdr) + pos, | 2588 | bytes = kernel_read(f.file, pos, (char *)(info->hdr) + pos, |
2581 | stat.size - pos); | 2589 | stat.size - pos); |
2582 | if (bytes < 0) { | 2590 | if (bytes < 0) { |
2583 | vfree(info->hdr); | 2591 | vfree(info->hdr); |
@@ -2591,7 +2599,7 @@ static int copy_module_from_fd(int fd, struct load_info *info) | |||
2591 | info->len = pos; | 2599 | info->len = pos; |
2592 | 2600 | ||
2593 | out: | 2601 | out: |
2594 | fput(file); | 2602 | fdput(f); |
2595 | return err; | 2603 | return err; |
2596 | } | 2604 | } |
2597 | 2605 | ||
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 997cbb951a3b..8e7811086b82 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -126,22 +126,16 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
126 | struct nsproxy *old_ns = tsk->nsproxy; | 126 | struct nsproxy *old_ns = tsk->nsproxy; |
127 | struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns); | 127 | struct user_namespace *user_ns = task_cred_xxx(tsk, user_ns); |
128 | struct nsproxy *new_ns; | 128 | struct nsproxy *new_ns; |
129 | int err = 0; | ||
130 | 129 | ||
131 | if (!old_ns) | 130 | if (likely(!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | |
131 | CLONE_NEWPID | CLONE_NEWNET)))) { | ||
132 | get_nsproxy(old_ns); | ||
132 | return 0; | 133 | return 0; |
133 | |||
134 | get_nsproxy(old_ns); | ||
135 | |||
136 | if (!(flags & (CLONE_NEWNS | CLONE_NEWUTS | CLONE_NEWIPC | | ||
137 | CLONE_NEWPID | CLONE_NEWNET))) | ||
138 | return 0; | ||
139 | |||
140 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) { | ||
141 | err = -EPERM; | ||
142 | goto out; | ||
143 | } | 134 | } |
144 | 135 | ||
136 | if (!ns_capable(user_ns, CAP_SYS_ADMIN)) | ||
137 | return -EPERM; | ||
138 | |||
145 | /* | 139 | /* |
146 | * CLONE_NEWIPC must detach from the undolist: after switching | 140 | * CLONE_NEWIPC must detach from the undolist: after switching |
147 | * to a new ipc namespace, the semaphore arrays from the old | 141 | * to a new ipc namespace, the semaphore arrays from the old |
@@ -149,22 +143,16 @@ int copy_namespaces(unsigned long flags, struct task_struct *tsk) | |||
149 | * means share undolist with parent, so we must forbid using | 143 | * means share undolist with parent, so we must forbid using |
150 | * it along with CLONE_NEWIPC. | 144 | * it along with CLONE_NEWIPC. |
151 | */ | 145 | */ |
152 | if ((flags & CLONE_NEWIPC) && (flags & CLONE_SYSVSEM)) { | 146 | if ((flags & (CLONE_NEWIPC | CLONE_SYSVSEM)) == |
153 | err = -EINVAL; | 147 | (CLONE_NEWIPC | CLONE_SYSVSEM)) |
154 | goto out; | 148 | return -EINVAL; |
155 | } | ||
156 | 149 | ||
157 | new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs); | 150 | new_ns = create_new_namespaces(flags, tsk, user_ns, tsk->fs); |
158 | if (IS_ERR(new_ns)) { | 151 | if (IS_ERR(new_ns)) |
159 | err = PTR_ERR(new_ns); | 152 | return PTR_ERR(new_ns); |
160 | goto out; | ||
161 | } | ||
162 | 153 | ||
163 | tsk->nsproxy = new_ns; | 154 | tsk->nsproxy = new_ns; |
164 | 155 | return 0; | |
165 | out: | ||
166 | put_nsproxy(old_ns); | ||
167 | return err; | ||
168 | } | 156 | } |
169 | 157 | ||
170 | void free_nsproxy(struct nsproxy *ns) | 158 | void free_nsproxy(struct nsproxy *ns) |
diff --git a/kernel/padata.c b/kernel/padata.c index 072f4ee4eb89..07af2c95dcfe 100644 --- a/kernel/padata.c +++ b/kernel/padata.c | |||
@@ -846,6 +846,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
846 | switch (action) { | 846 | switch (action) { |
847 | case CPU_ONLINE: | 847 | case CPU_ONLINE: |
848 | case CPU_ONLINE_FROZEN: | 848 | case CPU_ONLINE_FROZEN: |
849 | case CPU_DOWN_FAILED: | ||
850 | case CPU_DOWN_FAILED_FROZEN: | ||
849 | if (!pinst_has_cpu(pinst, cpu)) | 851 | if (!pinst_has_cpu(pinst, cpu)) |
850 | break; | 852 | break; |
851 | mutex_lock(&pinst->lock); | 853 | mutex_lock(&pinst->lock); |
@@ -857,6 +859,8 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
857 | 859 | ||
858 | case CPU_DOWN_PREPARE: | 860 | case CPU_DOWN_PREPARE: |
859 | case CPU_DOWN_PREPARE_FROZEN: | 861 | case CPU_DOWN_PREPARE_FROZEN: |
862 | case CPU_UP_CANCELED: | ||
863 | case CPU_UP_CANCELED_FROZEN: | ||
860 | if (!pinst_has_cpu(pinst, cpu)) | 864 | if (!pinst_has_cpu(pinst, cpu)) |
861 | break; | 865 | break; |
862 | mutex_lock(&pinst->lock); | 866 | mutex_lock(&pinst->lock); |
@@ -865,22 +869,6 @@ static int padata_cpu_callback(struct notifier_block *nfb, | |||
865 | if (err) | 869 | if (err) |
866 | return notifier_from_errno(err); | 870 | return notifier_from_errno(err); |
867 | break; | 871 | break; |
868 | |||
869 | case CPU_UP_CANCELED: | ||
870 | case CPU_UP_CANCELED_FROZEN: | ||
871 | if (!pinst_has_cpu(pinst, cpu)) | ||
872 | break; | ||
873 | mutex_lock(&pinst->lock); | ||
874 | __padata_remove_cpu(pinst, cpu); | ||
875 | mutex_unlock(&pinst->lock); | ||
876 | |||
877 | case CPU_DOWN_FAILED: | ||
878 | case CPU_DOWN_FAILED_FROZEN: | ||
879 | if (!pinst_has_cpu(pinst, cpu)) | ||
880 | break; | ||
881 | mutex_lock(&pinst->lock); | ||
882 | __padata_add_cpu(pinst, cpu); | ||
883 | mutex_unlock(&pinst->lock); | ||
884 | } | 872 | } |
885 | 873 | ||
886 | return NOTIFY_OK; | 874 | return NOTIFY_OK; |
@@ -1086,18 +1074,18 @@ struct padata_instance *padata_alloc(struct workqueue_struct *wq, | |||
1086 | 1074 | ||
1087 | pinst->flags = 0; | 1075 | pinst->flags = 0; |
1088 | 1076 | ||
1089 | #ifdef CONFIG_HOTPLUG_CPU | ||
1090 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; | ||
1091 | pinst->cpu_notifier.priority = 0; | ||
1092 | register_hotcpu_notifier(&pinst->cpu_notifier); | ||
1093 | #endif | ||
1094 | |||
1095 | put_online_cpus(); | 1077 | put_online_cpus(); |
1096 | 1078 | ||
1097 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); | 1079 | BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier); |
1098 | kobject_init(&pinst->kobj, &padata_attr_type); | 1080 | kobject_init(&pinst->kobj, &padata_attr_type); |
1099 | mutex_init(&pinst->lock); | 1081 | mutex_init(&pinst->lock); |
1100 | 1082 | ||
1083 | #ifdef CONFIG_HOTPLUG_CPU | ||
1084 | pinst->cpu_notifier.notifier_call = padata_cpu_callback; | ||
1085 | pinst->cpu_notifier.priority = 0; | ||
1086 | register_hotcpu_notifier(&pinst->cpu_notifier); | ||
1087 | #endif | ||
1088 | |||
1101 | return pinst; | 1089 | return pinst; |
1102 | 1090 | ||
1103 | err_free_masks: | 1091 | err_free_masks: |
diff --git a/kernel/panic.c b/kernel/panic.c index 801864600514..b6c482ccc5db 100644 --- a/kernel/panic.c +++ b/kernel/panic.c | |||
@@ -123,10 +123,14 @@ void panic(const char *fmt, ...) | |||
123 | */ | 123 | */ |
124 | smp_send_stop(); | 124 | smp_send_stop(); |
125 | 125 | ||
126 | kmsg_dump(KMSG_DUMP_PANIC); | 126 | /* |
127 | 127 | * Run any panic handlers, including those that might need to | |
128 | * add information to the kmsg dump output. | ||
129 | */ | ||
128 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); | 130 | atomic_notifier_call_chain(&panic_notifier_list, 0, buf); |
129 | 131 | ||
132 | kmsg_dump(KMSG_DUMP_PANIC); | ||
133 | |||
130 | bust_spinlocks(0); | 134 | bust_spinlocks(0); |
131 | 135 | ||
132 | if (!panic_blink) | 136 | if (!panic_blink) |
diff --git a/kernel/params.c b/kernel/params.c index 440e65d1a544..81c4e78c8f4c 100644 --- a/kernel/params.c +++ b/kernel/params.c | |||
@@ -103,8 +103,8 @@ static int parse_one(char *param, | |||
103 | || params[i].level > max_level) | 103 | || params[i].level > max_level) |
104 | return 0; | 104 | return 0; |
105 | /* No one handled NULL, so do it here. */ | 105 | /* No one handled NULL, so do it here. */ |
106 | if (!val && params[i].ops->set != param_set_bool | 106 | if (!val && |
107 | && params[i].ops->set != param_set_bint) | 107 | !(params[i].ops->flags & KERNEL_PARAM_FL_NOARG)) |
108 | return -EINVAL; | 108 | return -EINVAL; |
109 | pr_debug("handling %s with %p\n", param, | 109 | pr_debug("handling %s with %p\n", param, |
110 | params[i].ops->set); | 110 | params[i].ops->set); |
@@ -241,7 +241,8 @@ int parse_args(const char *doing, | |||
241 | } \ | 241 | } \ |
242 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ | 242 | int param_get_##name(char *buffer, const struct kernel_param *kp) \ |
243 | { \ | 243 | { \ |
244 | return sprintf(buffer, format, *((type *)kp->arg)); \ | 244 | return scnprintf(buffer, PAGE_SIZE, format, \ |
245 | *((type *)kp->arg)); \ | ||
245 | } \ | 246 | } \ |
246 | struct kernel_param_ops param_ops_##name = { \ | 247 | struct kernel_param_ops param_ops_##name = { \ |
247 | .set = param_set_##name, \ | 248 | .set = param_set_##name, \ |
@@ -252,13 +253,13 @@ int parse_args(const char *doing, | |||
252 | EXPORT_SYMBOL(param_ops_##name) | 253 | EXPORT_SYMBOL(param_ops_##name) |
253 | 254 | ||
254 | 255 | ||
255 | STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul); | 256 | STANDARD_PARAM_DEF(byte, unsigned char, "%hhu", unsigned long, kstrtoul); |
256 | STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol); | 257 | STANDARD_PARAM_DEF(short, short, "%hi", long, kstrtoul); |
257 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul); | 258 | STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, kstrtoul); |
258 | STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol); | 259 | STANDARD_PARAM_DEF(int, int, "%i", long, kstrtoul); |
259 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul); | 260 | STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, kstrtoul); |
260 | STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol); | 261 | STANDARD_PARAM_DEF(long, long, "%li", long, kstrtoul); |
261 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul); | 262 | STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, kstrtoul); |
262 | 263 | ||
263 | int param_set_charp(const char *val, const struct kernel_param *kp) | 264 | int param_set_charp(const char *val, const struct kernel_param *kp) |
264 | { | 265 | { |
@@ -285,7 +286,7 @@ EXPORT_SYMBOL(param_set_charp); | |||
285 | 286 | ||
286 | int param_get_charp(char *buffer, const struct kernel_param *kp) | 287 | int param_get_charp(char *buffer, const struct kernel_param *kp) |
287 | { | 288 | { |
288 | return sprintf(buffer, "%s", *((char **)kp->arg)); | 289 | return scnprintf(buffer, PAGE_SIZE, "%s", *((char **)kp->arg)); |
289 | } | 290 | } |
290 | EXPORT_SYMBOL(param_get_charp); | 291 | EXPORT_SYMBOL(param_get_charp); |
291 | 292 | ||
@@ -320,6 +321,7 @@ int param_get_bool(char *buffer, const struct kernel_param *kp) | |||
320 | EXPORT_SYMBOL(param_get_bool); | 321 | EXPORT_SYMBOL(param_get_bool); |
321 | 322 | ||
322 | struct kernel_param_ops param_ops_bool = { | 323 | struct kernel_param_ops param_ops_bool = { |
324 | .flags = KERNEL_PARAM_FL_NOARG, | ||
323 | .set = param_set_bool, | 325 | .set = param_set_bool, |
324 | .get = param_get_bool, | 326 | .get = param_get_bool, |
325 | }; | 327 | }; |
@@ -370,6 +372,7 @@ int param_set_bint(const char *val, const struct kernel_param *kp) | |||
370 | EXPORT_SYMBOL(param_set_bint); | 372 | EXPORT_SYMBOL(param_set_bint); |
371 | 373 | ||
372 | struct kernel_param_ops param_ops_bint = { | 374 | struct kernel_param_ops param_ops_bint = { |
375 | .flags = KERNEL_PARAM_FL_NOARG, | ||
373 | .set = param_set_bint, | 376 | .set = param_set_bint, |
374 | .get = param_get_int, | 377 | .get = param_get_int, |
375 | }; | 378 | }; |
@@ -827,7 +830,7 @@ ssize_t __modver_version_show(struct module_attribute *mattr, | |||
827 | struct module_version_attribute *vattr = | 830 | struct module_version_attribute *vattr = |
828 | container_of(mattr, struct module_version_attribute, mattr); | 831 | container_of(mattr, struct module_version_attribute, mattr); |
829 | 832 | ||
830 | return sprintf(buf, "%s\n", vattr->version); | 833 | return scnprintf(buf, PAGE_SIZE, "%s\n", vattr->version); |
831 | } | 834 | } |
832 | 835 | ||
833 | extern const struct module_version_attribute *__start___modver[]; | 836 | extern const struct module_version_attribute *__start___modver[]; |
@@ -912,7 +915,14 @@ static const struct kset_uevent_ops module_uevent_ops = { | |||
912 | struct kset *module_kset; | 915 | struct kset *module_kset; |
913 | int module_sysfs_initialized; | 916 | int module_sysfs_initialized; |
914 | 917 | ||
918 | static void module_kobj_release(struct kobject *kobj) | ||
919 | { | ||
920 | struct module_kobject *mk = to_module_kobject(kobj); | ||
921 | complete(mk->kobj_completion); | ||
922 | } | ||
923 | |||
915 | struct kobj_type module_ktype = { | 924 | struct kobj_type module_ktype = { |
925 | .release = module_kobj_release, | ||
916 | .sysfs_ops = &module_sysfs_ops, | 926 | .sysfs_ops = &module_sysfs_ops, |
917 | }; | 927 | }; |
918 | 928 | ||
diff --git a/kernel/pid.c b/kernel/pid.c index 66505c1dfc51..ebe5e80b10f8 100644 --- a/kernel/pid.c +++ b/kernel/pid.c | |||
@@ -265,6 +265,7 @@ void free_pid(struct pid *pid) | |||
265 | struct pid_namespace *ns = upid->ns; | 265 | struct pid_namespace *ns = upid->ns; |
266 | hlist_del_rcu(&upid->pid_chain); | 266 | hlist_del_rcu(&upid->pid_chain); |
267 | switch(--ns->nr_hashed) { | 267 | switch(--ns->nr_hashed) { |
268 | case 2: | ||
268 | case 1: | 269 | case 1: |
269 | /* When all that is left in the pid namespace | 270 | /* When all that is left in the pid namespace |
270 | * is the reaper wake up the reaper. The reaper | 271 | * is the reaper wake up the reaper. The reaper |
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c index 601bb361c235..42086551a24a 100644 --- a/kernel/pid_namespace.c +++ b/kernel/pid_namespace.c | |||
@@ -329,7 +329,7 @@ static int pidns_install(struct nsproxy *nsproxy, void *ns) | |||
329 | struct pid_namespace *ancestor, *new = ns; | 329 | struct pid_namespace *ancestor, *new = ns; |
330 | 330 | ||
331 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || | 331 | if (!ns_capable(new->user_ns, CAP_SYS_ADMIN) || |
332 | !nsown_capable(CAP_SYS_ADMIN)) | 332 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
333 | return -EPERM; | 333 | return -EPERM; |
334 | 334 | ||
335 | /* | 335 | /* |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index 3085e62a80a5..c9c759d5a15c 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -644,22 +644,23 @@ int hibernate(void) | |||
644 | if (error) | 644 | if (error) |
645 | goto Exit; | 645 | goto Exit; |
646 | 646 | ||
647 | /* Allocate memory management structures */ | ||
648 | error = create_basic_memory_bitmaps(); | ||
649 | if (error) | ||
650 | goto Exit; | ||
651 | |||
652 | printk(KERN_INFO "PM: Syncing filesystems ... "); | 647 | printk(KERN_INFO "PM: Syncing filesystems ... "); |
653 | sys_sync(); | 648 | sys_sync(); |
654 | printk("done.\n"); | 649 | printk("done.\n"); |
655 | 650 | ||
656 | error = freeze_processes(); | 651 | error = freeze_processes(); |
657 | if (error) | 652 | if (error) |
658 | goto Free_bitmaps; | 653 | goto Exit; |
654 | |||
655 | lock_device_hotplug(); | ||
656 | /* Allocate memory management structures */ | ||
657 | error = create_basic_memory_bitmaps(); | ||
658 | if (error) | ||
659 | goto Thaw; | ||
659 | 660 | ||
660 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); | 661 | error = hibernation_snapshot(hibernation_mode == HIBERNATION_PLATFORM); |
661 | if (error || freezer_test_done) | 662 | if (error || freezer_test_done) |
662 | goto Thaw; | 663 | goto Free_bitmaps; |
663 | 664 | ||
664 | if (in_suspend) { | 665 | if (in_suspend) { |
665 | unsigned int flags = 0; | 666 | unsigned int flags = 0; |
@@ -682,14 +683,14 @@ int hibernate(void) | |||
682 | pr_debug("PM: Image restored successfully.\n"); | 683 | pr_debug("PM: Image restored successfully.\n"); |
683 | } | 684 | } |
684 | 685 | ||
686 | Free_bitmaps: | ||
687 | free_basic_memory_bitmaps(); | ||
685 | Thaw: | 688 | Thaw: |
689 | unlock_device_hotplug(); | ||
686 | thaw_processes(); | 690 | thaw_processes(); |
687 | 691 | ||
688 | /* Don't bother checking whether freezer_test_done is true */ | 692 | /* Don't bother checking whether freezer_test_done is true */ |
689 | freezer_test_done = false; | 693 | freezer_test_done = false; |
690 | |||
691 | Free_bitmaps: | ||
692 | free_basic_memory_bitmaps(); | ||
693 | Exit: | 694 | Exit: |
694 | pm_notifier_call_chain(PM_POST_HIBERNATION); | 695 | pm_notifier_call_chain(PM_POST_HIBERNATION); |
695 | pm_restore_console(); | 696 | pm_restore_console(); |
@@ -806,21 +807,20 @@ static int software_resume(void) | |||
806 | pm_prepare_console(); | 807 | pm_prepare_console(); |
807 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); | 808 | error = pm_notifier_call_chain(PM_RESTORE_PREPARE); |
808 | if (error) | 809 | if (error) |
809 | goto close_finish; | 810 | goto Close_Finish; |
810 | |||
811 | error = create_basic_memory_bitmaps(); | ||
812 | if (error) | ||
813 | goto close_finish; | ||
814 | 811 | ||
815 | pr_debug("PM: Preparing processes for restore.\n"); | 812 | pr_debug("PM: Preparing processes for restore.\n"); |
816 | error = freeze_processes(); | 813 | error = freeze_processes(); |
817 | if (error) { | 814 | if (error) |
818 | swsusp_close(FMODE_READ); | 815 | goto Close_Finish; |
819 | goto Done; | ||
820 | } | ||
821 | 816 | ||
822 | pr_debug("PM: Loading hibernation image.\n"); | 817 | pr_debug("PM: Loading hibernation image.\n"); |
823 | 818 | ||
819 | lock_device_hotplug(); | ||
820 | error = create_basic_memory_bitmaps(); | ||
821 | if (error) | ||
822 | goto Thaw; | ||
823 | |||
824 | error = swsusp_read(&flags); | 824 | error = swsusp_read(&flags); |
825 | swsusp_close(FMODE_READ); | 825 | swsusp_close(FMODE_READ); |
826 | if (!error) | 826 | if (!error) |
@@ -828,9 +828,10 @@ static int software_resume(void) | |||
828 | 828 | ||
829 | printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); | 829 | printk(KERN_ERR "PM: Failed to load hibernation image, recovering.\n"); |
830 | swsusp_free(); | 830 | swsusp_free(); |
831 | thaw_processes(); | ||
832 | Done: | ||
833 | free_basic_memory_bitmaps(); | 831 | free_basic_memory_bitmaps(); |
832 | Thaw: | ||
833 | unlock_device_hotplug(); | ||
834 | thaw_processes(); | ||
834 | Finish: | 835 | Finish: |
835 | pm_notifier_call_chain(PM_POST_RESTORE); | 836 | pm_notifier_call_chain(PM_POST_RESTORE); |
836 | pm_restore_console(); | 837 | pm_restore_console(); |
@@ -840,7 +841,7 @@ static int software_resume(void) | |||
840 | mutex_unlock(&pm_mutex); | 841 | mutex_unlock(&pm_mutex); |
841 | pr_debug("PM: Hibernation image not present or could not be loaded.\n"); | 842 | pr_debug("PM: Hibernation image not present or could not be loaded.\n"); |
842 | return error; | 843 | return error; |
843 | close_finish: | 844 | Close_Finish: |
844 | swsusp_close(FMODE_READ); | 845 | swsusp_close(FMODE_READ); |
845 | goto Finish; | 846 | goto Finish; |
846 | } | 847 | } |
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c index 349587bb03e1..358a146fd4da 100644 --- a/kernel/power/snapshot.c +++ b/kernel/power/snapshot.c | |||
@@ -352,7 +352,7 @@ static int create_mem_extents(struct list_head *list, gfp_t gfp_mask) | |||
352 | struct mem_extent *ext, *cur, *aux; | 352 | struct mem_extent *ext, *cur, *aux; |
353 | 353 | ||
354 | zone_start = zone->zone_start_pfn; | 354 | zone_start = zone->zone_start_pfn; |
355 | zone_end = zone->zone_start_pfn + zone->spanned_pages; | 355 | zone_end = zone_end_pfn(zone); |
356 | 356 | ||
357 | list_for_each_entry(ext, list, hook) | 357 | list_for_each_entry(ext, list, hook) |
358 | if (zone_start <= ext->end) | 358 | if (zone_start <= ext->end) |
@@ -884,7 +884,7 @@ static unsigned int count_highmem_pages(void) | |||
884 | continue; | 884 | continue; |
885 | 885 | ||
886 | mark_free_pages(zone); | 886 | mark_free_pages(zone); |
887 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 887 | max_zone_pfn = zone_end_pfn(zone); |
888 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 888 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
889 | if (saveable_highmem_page(zone, pfn)) | 889 | if (saveable_highmem_page(zone, pfn)) |
890 | n++; | 890 | n++; |
@@ -948,7 +948,7 @@ static unsigned int count_data_pages(void) | |||
948 | continue; | 948 | continue; |
949 | 949 | ||
950 | mark_free_pages(zone); | 950 | mark_free_pages(zone); |
951 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 951 | max_zone_pfn = zone_end_pfn(zone); |
952 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 952 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
953 | if (saveable_page(zone, pfn)) | 953 | if (saveable_page(zone, pfn)) |
954 | n++; | 954 | n++; |
@@ -1041,7 +1041,7 @@ copy_data_pages(struct memory_bitmap *copy_bm, struct memory_bitmap *orig_bm) | |||
1041 | unsigned long max_zone_pfn; | 1041 | unsigned long max_zone_pfn; |
1042 | 1042 | ||
1043 | mark_free_pages(zone); | 1043 | mark_free_pages(zone); |
1044 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 1044 | max_zone_pfn = zone_end_pfn(zone); |
1045 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 1045 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1046 | if (page_is_saveable(zone, pfn)) | 1046 | if (page_is_saveable(zone, pfn)) |
1047 | memory_bm_set_bit(orig_bm, pfn); | 1047 | memory_bm_set_bit(orig_bm, pfn); |
@@ -1093,7 +1093,7 @@ void swsusp_free(void) | |||
1093 | unsigned long pfn, max_zone_pfn; | 1093 | unsigned long pfn, max_zone_pfn; |
1094 | 1094 | ||
1095 | for_each_populated_zone(zone) { | 1095 | for_each_populated_zone(zone) { |
1096 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 1096 | max_zone_pfn = zone_end_pfn(zone); |
1097 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 1097 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1098 | if (pfn_valid(pfn)) { | 1098 | if (pfn_valid(pfn)) { |
1099 | struct page *page = pfn_to_page(pfn); | 1099 | struct page *page = pfn_to_page(pfn); |
@@ -1755,7 +1755,7 @@ static int mark_unsafe_pages(struct memory_bitmap *bm) | |||
1755 | 1755 | ||
1756 | /* Clear page flags */ | 1756 | /* Clear page flags */ |
1757 | for_each_populated_zone(zone) { | 1757 | for_each_populated_zone(zone) { |
1758 | max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages; | 1758 | max_zone_pfn = zone_end_pfn(zone); |
1759 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) | 1759 | for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++) |
1760 | if (pfn_valid(pfn)) | 1760 | if (pfn_valid(pfn)) |
1761 | swsusp_unset_page_free(pfn_to_page(pfn)); | 1761 | swsusp_unset_page_free(pfn_to_page(pfn)); |
diff --git a/kernel/power/user.c b/kernel/power/user.c index 4ed81e74f86f..72e8f4fd616d 100644 --- a/kernel/power/user.c +++ b/kernel/power/user.c | |||
@@ -60,11 +60,6 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
60 | error = -ENOSYS; | 60 | error = -ENOSYS; |
61 | goto Unlock; | 61 | goto Unlock; |
62 | } | 62 | } |
63 | if(create_basic_memory_bitmaps()) { | ||
64 | atomic_inc(&snapshot_device_available); | ||
65 | error = -ENOMEM; | ||
66 | goto Unlock; | ||
67 | } | ||
68 | nonseekable_open(inode, filp); | 63 | nonseekable_open(inode, filp); |
69 | data = &snapshot_state; | 64 | data = &snapshot_state; |
70 | filp->private_data = data; | 65 | filp->private_data = data; |
@@ -90,10 +85,9 @@ static int snapshot_open(struct inode *inode, struct file *filp) | |||
90 | if (error) | 85 | if (error) |
91 | pm_notifier_call_chain(PM_POST_RESTORE); | 86 | pm_notifier_call_chain(PM_POST_RESTORE); |
92 | } | 87 | } |
93 | if (error) { | 88 | if (error) |
94 | free_basic_memory_bitmaps(); | ||
95 | atomic_inc(&snapshot_device_available); | 89 | atomic_inc(&snapshot_device_available); |
96 | } | 90 | |
97 | data->frozen = 0; | 91 | data->frozen = 0; |
98 | data->ready = 0; | 92 | data->ready = 0; |
99 | data->platform_support = 0; | 93 | data->platform_support = 0; |
@@ -111,11 +105,11 @@ static int snapshot_release(struct inode *inode, struct file *filp) | |||
111 | lock_system_sleep(); | 105 | lock_system_sleep(); |
112 | 106 | ||
113 | swsusp_free(); | 107 | swsusp_free(); |
114 | free_basic_memory_bitmaps(); | ||
115 | data = filp->private_data; | 108 | data = filp->private_data; |
116 | free_all_swap_pages(data->swap); | 109 | free_all_swap_pages(data->swap); |
117 | if (data->frozen) { | 110 | if (data->frozen) { |
118 | pm_restore_gfp_mask(); | 111 | pm_restore_gfp_mask(); |
112 | free_basic_memory_bitmaps(); | ||
119 | thaw_processes(); | 113 | thaw_processes(); |
120 | } | 114 | } |
121 | pm_notifier_call_chain(data->mode == O_RDONLY ? | 115 | pm_notifier_call_chain(data->mode == O_RDONLY ? |
@@ -207,6 +201,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
207 | if (!mutex_trylock(&pm_mutex)) | 201 | if (!mutex_trylock(&pm_mutex)) |
208 | return -EBUSY; | 202 | return -EBUSY; |
209 | 203 | ||
204 | lock_device_hotplug(); | ||
210 | data = filp->private_data; | 205 | data = filp->private_data; |
211 | 206 | ||
212 | switch (cmd) { | 207 | switch (cmd) { |
@@ -220,14 +215,22 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
220 | printk("done.\n"); | 215 | printk("done.\n"); |
221 | 216 | ||
222 | error = freeze_processes(); | 217 | error = freeze_processes(); |
223 | if (!error) | 218 | if (error) |
219 | break; | ||
220 | |||
221 | error = create_basic_memory_bitmaps(); | ||
222 | if (error) | ||
223 | thaw_processes(); | ||
224 | else | ||
224 | data->frozen = 1; | 225 | data->frozen = 1; |
226 | |||
225 | break; | 227 | break; |
226 | 228 | ||
227 | case SNAPSHOT_UNFREEZE: | 229 | case SNAPSHOT_UNFREEZE: |
228 | if (!data->frozen || data->ready) | 230 | if (!data->frozen || data->ready) |
229 | break; | 231 | break; |
230 | pm_restore_gfp_mask(); | 232 | pm_restore_gfp_mask(); |
233 | free_basic_memory_bitmaps(); | ||
231 | thaw_processes(); | 234 | thaw_processes(); |
232 | data->frozen = 0; | 235 | data->frozen = 0; |
233 | break; | 236 | break; |
@@ -371,6 +374,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd, | |||
371 | 374 | ||
372 | } | 375 | } |
373 | 376 | ||
377 | unlock_device_hotplug(); | ||
374 | mutex_unlock(&pm_mutex); | 378 | mutex_unlock(&pm_mutex); |
375 | 379 | ||
376 | return error; | 380 | return error; |
diff --git a/kernel/ptrace.c b/kernel/ptrace.c index a146ee327f6a..dd562e9aa2c8 100644 --- a/kernel/ptrace.c +++ b/kernel/ptrace.c | |||
@@ -236,7 +236,7 @@ static int __ptrace_may_access(struct task_struct *task, unsigned int mode) | |||
236 | */ | 236 | */ |
237 | int dumpable = 0; | 237 | int dumpable = 0; |
238 | /* Don't let security modules deny introspection */ | 238 | /* Don't let security modules deny introspection */ |
239 | if (task == current) | 239 | if (same_thread_group(task, current)) |
240 | return 0; | 240 | return 0; |
241 | rcu_read_lock(); | 241 | rcu_read_lock(); |
242 | tcred = __task_cred(task); | 242 | tcred = __task_cred(task); |
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c index 33eb4620aa17..b02a339836b4 100644 --- a/kernel/rcupdate.c +++ b/kernel/rcupdate.c | |||
@@ -122,7 +122,7 @@ struct lockdep_map rcu_sched_lock_map = | |||
122 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); | 122 | STATIC_LOCKDEP_MAP_INIT("rcu_read_lock_sched", &rcu_sched_lock_key); |
123 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); | 123 | EXPORT_SYMBOL_GPL(rcu_sched_lock_map); |
124 | 124 | ||
125 | int debug_lockdep_rcu_enabled(void) | 125 | int notrace debug_lockdep_rcu_enabled(void) |
126 | { | 126 | { |
127 | return rcu_scheduler_active && debug_locks && | 127 | return rcu_scheduler_active && debug_locks && |
128 | current->lockdep_recursion == 0; | 128 | current->lockdep_recursion == 0; |
diff --git a/kernel/res_counter.c b/kernel/res_counter.c index ff55247e7049..4aa8a305aede 100644 --- a/kernel/res_counter.c +++ b/kernel/res_counter.c | |||
@@ -17,8 +17,8 @@ | |||
17 | void res_counter_init(struct res_counter *counter, struct res_counter *parent) | 17 | void res_counter_init(struct res_counter *counter, struct res_counter *parent) |
18 | { | 18 | { |
19 | spin_lock_init(&counter->lock); | 19 | spin_lock_init(&counter->lock); |
20 | counter->limit = RESOURCE_MAX; | 20 | counter->limit = RES_COUNTER_MAX; |
21 | counter->soft_limit = RESOURCE_MAX; | 21 | counter->soft_limit = RES_COUNTER_MAX; |
22 | counter->parent = parent; | 22 | counter->parent = parent; |
23 | } | 23 | } |
24 | 24 | ||
@@ -178,23 +178,30 @@ u64 res_counter_read_u64(struct res_counter *counter, int member) | |||
178 | #endif | 178 | #endif |
179 | 179 | ||
180 | int res_counter_memparse_write_strategy(const char *buf, | 180 | int res_counter_memparse_write_strategy(const char *buf, |
181 | unsigned long long *res) | 181 | unsigned long long *resp) |
182 | { | 182 | { |
183 | char *end; | 183 | char *end; |
184 | unsigned long long res; | ||
184 | 185 | ||
185 | /* return RESOURCE_MAX(unlimited) if "-1" is specified */ | 186 | /* return RES_COUNTER_MAX(unlimited) if "-1" is specified */ |
186 | if (*buf == '-') { | 187 | if (*buf == '-') { |
187 | *res = simple_strtoull(buf + 1, &end, 10); | 188 | res = simple_strtoull(buf + 1, &end, 10); |
188 | if (*res != 1 || *end != '\0') | 189 | if (res != 1 || *end != '\0') |
189 | return -EINVAL; | 190 | return -EINVAL; |
190 | *res = RESOURCE_MAX; | 191 | *resp = RES_COUNTER_MAX; |
191 | return 0; | 192 | return 0; |
192 | } | 193 | } |
193 | 194 | ||
194 | *res = memparse(buf, &end); | 195 | res = memparse(buf, &end); |
195 | if (*end != '\0') | 196 | if (*end != '\0') |
196 | return -EINVAL; | 197 | return -EINVAL; |
197 | 198 | ||
198 | *res = PAGE_ALIGN(*res); | 199 | if (PAGE_ALIGN(res) >= res) |
200 | res = PAGE_ALIGN(res); | ||
201 | else | ||
202 | res = RES_COUNTER_MAX; | ||
203 | |||
204 | *resp = res; | ||
205 | |||
199 | return 0; | 206 | return 0; |
200 | } | 207 | } |
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 725aa067ad63..5ac63c9a995a 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c | |||
@@ -978,13 +978,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags) | |||
978 | rq->skip_clock_update = 1; | 978 | rq->skip_clock_update = 1; |
979 | } | 979 | } |
980 | 980 | ||
981 | static ATOMIC_NOTIFIER_HEAD(task_migration_notifier); | ||
982 | |||
983 | void register_task_migration_notifier(struct notifier_block *n) | ||
984 | { | ||
985 | atomic_notifier_chain_register(&task_migration_notifier, n); | ||
986 | } | ||
987 | |||
988 | #ifdef CONFIG_SMP | 981 | #ifdef CONFIG_SMP |
989 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | 982 | void set_task_cpu(struct task_struct *p, unsigned int new_cpu) |
990 | { | 983 | { |
@@ -1015,18 +1008,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu) | |||
1015 | trace_sched_migrate_task(p, new_cpu); | 1008 | trace_sched_migrate_task(p, new_cpu); |
1016 | 1009 | ||
1017 | if (task_cpu(p) != new_cpu) { | 1010 | if (task_cpu(p) != new_cpu) { |
1018 | struct task_migration_notifier tmn; | ||
1019 | |||
1020 | if (p->sched_class->migrate_task_rq) | 1011 | if (p->sched_class->migrate_task_rq) |
1021 | p->sched_class->migrate_task_rq(p, new_cpu); | 1012 | p->sched_class->migrate_task_rq(p, new_cpu); |
1022 | p->se.nr_migrations++; | 1013 | p->se.nr_migrations++; |
1023 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); | 1014 | perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, NULL, 0); |
1024 | |||
1025 | tmn.task = p; | ||
1026 | tmn.from_cpu = task_cpu(p); | ||
1027 | tmn.to_cpu = new_cpu; | ||
1028 | |||
1029 | atomic_notifier_call_chain(&task_migration_notifier, 0, &tmn); | ||
1030 | } | 1015 | } |
1031 | 1016 | ||
1032 | __set_task_cpu(p, new_cpu); | 1017 | __set_task_cpu(p, new_cpu); |
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c index ace34f95e200..99947919e30b 100644 --- a/kernel/sched/cputime.c +++ b/kernel/sched/cputime.c | |||
@@ -551,10 +551,7 @@ static void cputime_adjust(struct task_cputime *curr, | |||
551 | struct cputime *prev, | 551 | struct cputime *prev, |
552 | cputime_t *ut, cputime_t *st) | 552 | cputime_t *ut, cputime_t *st) |
553 | { | 553 | { |
554 | cputime_t rtime, stime, utime, total; | 554 | cputime_t rtime, stime, utime; |
555 | |||
556 | stime = curr->stime; | ||
557 | total = stime + curr->utime; | ||
558 | 555 | ||
559 | /* | 556 | /* |
560 | * Tick based cputime accounting depend on random scheduling | 557 | * Tick based cputime accounting depend on random scheduling |
@@ -576,13 +573,19 @@ static void cputime_adjust(struct task_cputime *curr, | |||
576 | if (prev->stime + prev->utime >= rtime) | 573 | if (prev->stime + prev->utime >= rtime) |
577 | goto out; | 574 | goto out; |
578 | 575 | ||
579 | if (total) { | 576 | stime = curr->stime; |
577 | utime = curr->utime; | ||
578 | |||
579 | if (utime == 0) { | ||
580 | stime = rtime; | ||
581 | } else if (stime == 0) { | ||
582 | utime = rtime; | ||
583 | } else { | ||
584 | cputime_t total = stime + utime; | ||
585 | |||
580 | stime = scale_stime((__force u64)stime, | 586 | stime = scale_stime((__force u64)stime, |
581 | (__force u64)rtime, (__force u64)total); | 587 | (__force u64)rtime, (__force u64)total); |
582 | utime = rtime - stime; | 588 | utime = rtime - stime; |
583 | } else { | ||
584 | stime = rtime; | ||
585 | utime = 0; | ||
586 | } | 589 | } |
587 | 590 | ||
588 | /* | 591 | /* |
diff --git a/kernel/signal.c b/kernel/signal.c index 50e41075ac77..ded28b91fa53 100644 --- a/kernel/signal.c +++ b/kernel/signal.c | |||
@@ -3394,7 +3394,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, | |||
3394 | new_ka.sa.sa_restorer = compat_ptr(restorer); | 3394 | new_ka.sa.sa_restorer = compat_ptr(restorer); |
3395 | #endif | 3395 | #endif |
3396 | ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); | 3396 | ret |= copy_from_user(&mask, &act->sa_mask, sizeof(mask)); |
3397 | ret |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | 3397 | ret |= get_user(new_ka.sa.sa_flags, &act->sa_flags); |
3398 | if (ret) | 3398 | if (ret) |
3399 | return -EFAULT; | 3399 | return -EFAULT; |
3400 | sigset_from_compat(&new_ka.sa.sa_mask, &mask); | 3400 | sigset_from_compat(&new_ka.sa.sa_mask, &mask); |
@@ -3406,7 +3406,7 @@ COMPAT_SYSCALL_DEFINE4(rt_sigaction, int, sig, | |||
3406 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), | 3406 | ret = put_user(ptr_to_compat(old_ka.sa.sa_handler), |
3407 | &oact->sa_handler); | 3407 | &oact->sa_handler); |
3408 | ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); | 3408 | ret |= copy_to_user(&oact->sa_mask, &mask, sizeof(mask)); |
3409 | ret |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | 3409 | ret |= put_user(old_ka.sa.sa_flags, &oact->sa_flags); |
3410 | #ifdef __ARCH_HAS_SA_RESTORER | 3410 | #ifdef __ARCH_HAS_SA_RESTORER |
3411 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), | 3411 | ret |= put_user(ptr_to_compat(old_ka.sa.sa_restorer), |
3412 | &oact->sa_restorer); | 3412 | &oact->sa_restorer); |
diff --git a/kernel/smp.c b/kernel/smp.c index 449b707fc20d..0564571dcdf7 100644 --- a/kernel/smp.c +++ b/kernel/smp.c | |||
@@ -48,10 +48,13 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) | |||
48 | cpu_to_node(cpu))) | 48 | cpu_to_node(cpu))) |
49 | return notifier_from_errno(-ENOMEM); | 49 | return notifier_from_errno(-ENOMEM); |
50 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, | 50 | if (!zalloc_cpumask_var_node(&cfd->cpumask_ipi, GFP_KERNEL, |
51 | cpu_to_node(cpu))) | 51 | cpu_to_node(cpu))) { |
52 | free_cpumask_var(cfd->cpumask); | ||
52 | return notifier_from_errno(-ENOMEM); | 53 | return notifier_from_errno(-ENOMEM); |
54 | } | ||
53 | cfd->csd = alloc_percpu(struct call_single_data); | 55 | cfd->csd = alloc_percpu(struct call_single_data); |
54 | if (!cfd->csd) { | 56 | if (!cfd->csd) { |
57 | free_cpumask_var(cfd->cpumask_ipi); | ||
55 | free_cpumask_var(cfd->cpumask); | 58 | free_cpumask_var(cfd->cpumask); |
56 | return notifier_from_errno(-ENOMEM); | 59 | return notifier_from_errno(-ENOMEM); |
57 | } | 60 | } |
@@ -572,8 +575,10 @@ EXPORT_SYMBOL(on_each_cpu); | |||
572 | * | 575 | * |
573 | * If @wait is true, then returns once @func has returned. | 576 | * If @wait is true, then returns once @func has returned. |
574 | * | 577 | * |
575 | * You must not call this function with disabled interrupts or | 578 | * You must not call this function with disabled interrupts or from a |
576 | * from a hardware interrupt handler or from a bottom half handler. | 579 | * hardware interrupt handler or from a bottom half handler. The |
580 | * exception is that it may be used during early boot while | ||
581 | * early_boot_irqs_disabled is set. | ||
577 | */ | 582 | */ |
578 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | 583 | void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, |
579 | void *info, bool wait) | 584 | void *info, bool wait) |
@@ -582,9 +587,10 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func, | |||
582 | 587 | ||
583 | smp_call_function_many(mask, func, info, wait); | 588 | smp_call_function_many(mask, func, info, wait); |
584 | if (cpumask_test_cpu(cpu, mask)) { | 589 | if (cpumask_test_cpu(cpu, mask)) { |
585 | local_irq_disable(); | 590 | unsigned long flags; |
591 | local_irq_save(flags); | ||
586 | func(info); | 592 | func(info); |
587 | local_irq_enable(); | 593 | local_irq_restore(flags); |
588 | } | 594 | } |
589 | put_cpu(); | 595 | put_cpu(); |
590 | } | 596 | } |
diff --git a/kernel/softirq.c b/kernel/softirq.c index be3d3514c325..53cc09ceb0b8 100644 --- a/kernel/softirq.c +++ b/kernel/softirq.c | |||
@@ -876,7 +876,6 @@ int __init __weak early_irq_init(void) | |||
876 | return 0; | 876 | return 0; |
877 | } | 877 | } |
878 | 878 | ||
879 | #ifdef CONFIG_GENERIC_HARDIRQS | ||
880 | int __init __weak arch_probe_nr_irqs(void) | 879 | int __init __weak arch_probe_nr_irqs(void) |
881 | { | 880 | { |
882 | return NR_IRQS_LEGACY; | 881 | return NR_IRQS_LEGACY; |
@@ -886,4 +885,3 @@ int __init __weak arch_early_irq_init(void) | |||
886 | { | 885 | { |
887 | return 0; | 886 | return 0; |
888 | } | 887 | } |
889 | #endif | ||
diff --git a/kernel/spinlock.c b/kernel/spinlock.c index 5cdd8065a3ce..4b082b5cac9e 100644 --- a/kernel/spinlock.c +++ b/kernel/spinlock.c | |||
@@ -34,6 +34,20 @@ | |||
34 | #else | 34 | #else |
35 | #define raw_read_can_lock(l) read_can_lock(l) | 35 | #define raw_read_can_lock(l) read_can_lock(l) |
36 | #define raw_write_can_lock(l) write_can_lock(l) | 36 | #define raw_write_can_lock(l) write_can_lock(l) |
37 | |||
38 | /* | ||
39 | * Some architectures can relax in favour of the CPU owning the lock. | ||
40 | */ | ||
41 | #ifndef arch_read_relax | ||
42 | # define arch_read_relax(l) cpu_relax() | ||
43 | #endif | ||
44 | #ifndef arch_write_relax | ||
45 | # define arch_write_relax(l) cpu_relax() | ||
46 | #endif | ||
47 | #ifndef arch_spin_relax | ||
48 | # define arch_spin_relax(l) cpu_relax() | ||
49 | #endif | ||
50 | |||
37 | /* | 51 | /* |
38 | * We build the __lock_function inlines here. They are too large for | 52 | * We build the __lock_function inlines here. They are too large for |
39 | * inlining all over the place, but here is only one user per function | 53 | * inlining all over the place, but here is only one user per function |
diff --git a/kernel/sys.c b/kernel/sys.c index 771129b299f8..c18ecca575b4 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -337,7 +337,7 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) | |||
337 | if (rgid != (gid_t) -1) { | 337 | if (rgid != (gid_t) -1) { |
338 | if (gid_eq(old->gid, krgid) || | 338 | if (gid_eq(old->gid, krgid) || |
339 | gid_eq(old->egid, krgid) || | 339 | gid_eq(old->egid, krgid) || |
340 | nsown_capable(CAP_SETGID)) | 340 | ns_capable(old->user_ns, CAP_SETGID)) |
341 | new->gid = krgid; | 341 | new->gid = krgid; |
342 | else | 342 | else |
343 | goto error; | 343 | goto error; |
@@ -346,7 +346,7 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) | |||
346 | if (gid_eq(old->gid, kegid) || | 346 | if (gid_eq(old->gid, kegid) || |
347 | gid_eq(old->egid, kegid) || | 347 | gid_eq(old->egid, kegid) || |
348 | gid_eq(old->sgid, kegid) || | 348 | gid_eq(old->sgid, kegid) || |
349 | nsown_capable(CAP_SETGID)) | 349 | ns_capable(old->user_ns, CAP_SETGID)) |
350 | new->egid = kegid; | 350 | new->egid = kegid; |
351 | else | 351 | else |
352 | goto error; | 352 | goto error; |
@@ -387,7 +387,7 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) | |||
387 | old = current_cred(); | 387 | old = current_cred(); |
388 | 388 | ||
389 | retval = -EPERM; | 389 | retval = -EPERM; |
390 | if (nsown_capable(CAP_SETGID)) | 390 | if (ns_capable(old->user_ns, CAP_SETGID)) |
391 | new->gid = new->egid = new->sgid = new->fsgid = kgid; | 391 | new->gid = new->egid = new->sgid = new->fsgid = kgid; |
392 | else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) | 392 | else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid)) |
393 | new->egid = new->fsgid = kgid; | 393 | new->egid = new->fsgid = kgid; |
@@ -471,7 +471,7 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) | |||
471 | new->uid = kruid; | 471 | new->uid = kruid; |
472 | if (!uid_eq(old->uid, kruid) && | 472 | if (!uid_eq(old->uid, kruid) && |
473 | !uid_eq(old->euid, kruid) && | 473 | !uid_eq(old->euid, kruid) && |
474 | !nsown_capable(CAP_SETUID)) | 474 | !ns_capable(old->user_ns, CAP_SETUID)) |
475 | goto error; | 475 | goto error; |
476 | } | 476 | } |
477 | 477 | ||
@@ -480,7 +480,7 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid) | |||
480 | if (!uid_eq(old->uid, keuid) && | 480 | if (!uid_eq(old->uid, keuid) && |
481 | !uid_eq(old->euid, keuid) && | 481 | !uid_eq(old->euid, keuid) && |
482 | !uid_eq(old->suid, keuid) && | 482 | !uid_eq(old->suid, keuid) && |
483 | !nsown_capable(CAP_SETUID)) | 483 | !ns_capable(old->user_ns, CAP_SETUID)) |
484 | goto error; | 484 | goto error; |
485 | } | 485 | } |
486 | 486 | ||
@@ -534,7 +534,7 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) | |||
534 | old = current_cred(); | 534 | old = current_cred(); |
535 | 535 | ||
536 | retval = -EPERM; | 536 | retval = -EPERM; |
537 | if (nsown_capable(CAP_SETUID)) { | 537 | if (ns_capable(old->user_ns, CAP_SETUID)) { |
538 | new->suid = new->uid = kuid; | 538 | new->suid = new->uid = kuid; |
539 | if (!uid_eq(kuid, old->uid)) { | 539 | if (!uid_eq(kuid, old->uid)) { |
540 | retval = set_user(new); | 540 | retval = set_user(new); |
@@ -591,7 +591,7 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid) | |||
591 | old = current_cred(); | 591 | old = current_cred(); |
592 | 592 | ||
593 | retval = -EPERM; | 593 | retval = -EPERM; |
594 | if (!nsown_capable(CAP_SETUID)) { | 594 | if (!ns_capable(old->user_ns, CAP_SETUID)) { |
595 | if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && | 595 | if (ruid != (uid_t) -1 && !uid_eq(kruid, old->uid) && |
596 | !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) | 596 | !uid_eq(kruid, old->euid) && !uid_eq(kruid, old->suid)) |
597 | goto error; | 597 | goto error; |
@@ -673,7 +673,7 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid) | |||
673 | old = current_cred(); | 673 | old = current_cred(); |
674 | 674 | ||
675 | retval = -EPERM; | 675 | retval = -EPERM; |
676 | if (!nsown_capable(CAP_SETGID)) { | 676 | if (!ns_capable(old->user_ns, CAP_SETGID)) { |
677 | if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && | 677 | if (rgid != (gid_t) -1 && !gid_eq(krgid, old->gid) && |
678 | !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) | 678 | !gid_eq(krgid, old->egid) && !gid_eq(krgid, old->sgid)) |
679 | goto error; | 679 | goto error; |
@@ -744,7 +744,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) | |||
744 | 744 | ||
745 | if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || | 745 | if (uid_eq(kuid, old->uid) || uid_eq(kuid, old->euid) || |
746 | uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || | 746 | uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) || |
747 | nsown_capable(CAP_SETUID)) { | 747 | ns_capable(old->user_ns, CAP_SETUID)) { |
748 | if (!uid_eq(kuid, old->fsuid)) { | 748 | if (!uid_eq(kuid, old->fsuid)) { |
749 | new->fsuid = kuid; | 749 | new->fsuid = kuid; |
750 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) | 750 | if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0) |
@@ -783,7 +783,7 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) | |||
783 | 783 | ||
784 | if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || | 784 | if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) || |
785 | gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || | 785 | gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) || |
786 | nsown_capable(CAP_SETGID)) { | 786 | ns_capable(old->user_ns, CAP_SETGID)) { |
787 | if (!gid_eq(kgid, old->fsgid)) { | 787 | if (!gid_eq(kgid, old->fsgid)) { |
788 | new->fsgid = kgid; | 788 | new->fsgid = kgid; |
789 | goto change_okay; | 789 | goto change_okay; |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 07f6fc468e17..b2f06f3c6a3f 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1225,7 +1225,7 @@ static struct ctl_table vm_table[] = { | |||
1225 | .data = &hugepages_treat_as_movable, | 1225 | .data = &hugepages_treat_as_movable, |
1226 | .maxlen = sizeof(int), | 1226 | .maxlen = sizeof(int), |
1227 | .mode = 0644, | 1227 | .mode = 0644, |
1228 | .proc_handler = hugetlb_treat_movable_handler, | 1228 | .proc_handler = proc_dointvec, |
1229 | }, | 1229 | }, |
1230 | { | 1230 | { |
1231 | .procname = "nr_overcommit_hugepages", | 1231 | .procname = "nr_overcommit_hugepages", |
@@ -1471,14 +1471,14 @@ static struct ctl_table fs_table[] = { | |||
1471 | { | 1471 | { |
1472 | .procname = "inode-nr", | 1472 | .procname = "inode-nr", |
1473 | .data = &inodes_stat, | 1473 | .data = &inodes_stat, |
1474 | .maxlen = 2*sizeof(int), | 1474 | .maxlen = 2*sizeof(long), |
1475 | .mode = 0444, | 1475 | .mode = 0444, |
1476 | .proc_handler = proc_nr_inodes, | 1476 | .proc_handler = proc_nr_inodes, |
1477 | }, | 1477 | }, |
1478 | { | 1478 | { |
1479 | .procname = "inode-state", | 1479 | .procname = "inode-state", |
1480 | .data = &inodes_stat, | 1480 | .data = &inodes_stat, |
1481 | .maxlen = 7*sizeof(int), | 1481 | .maxlen = 7*sizeof(long), |
1482 | .mode = 0444, | 1482 | .mode = 0444, |
1483 | .proc_handler = proc_nr_inodes, | 1483 | .proc_handler = proc_nr_inodes, |
1484 | }, | 1484 | }, |
@@ -1508,7 +1508,7 @@ static struct ctl_table fs_table[] = { | |||
1508 | { | 1508 | { |
1509 | .procname = "dentry-state", | 1509 | .procname = "dentry-state", |
1510 | .data = &dentry_stat, | 1510 | .data = &dentry_stat, |
1511 | .maxlen = 6*sizeof(int), | 1511 | .maxlen = 6*sizeof(long), |
1512 | .mode = 0444, | 1512 | .mode = 0444, |
1513 | .proc_handler = proc_nr_dentry, | 1513 | .proc_handler = proc_nr_dentry, |
1514 | }, | 1514 | }, |
diff --git a/kernel/task_work.c b/kernel/task_work.c index 65bd3c92d6f3..8727032e3a6f 100644 --- a/kernel/task_work.c +++ b/kernel/task_work.c | |||
@@ -4,6 +4,23 @@ | |||
4 | 4 | ||
5 | static struct callback_head work_exited; /* all we need is ->next == NULL */ | 5 | static struct callback_head work_exited; /* all we need is ->next == NULL */ |
6 | 6 | ||
7 | /** | ||
8 | * task_work_add - ask the @task to execute @work->func() | ||
9 | * @task: the task which should run the callback | ||
10 | * @work: the callback to run | ||
11 | * @notify: send the notification if true | ||
12 | * | ||
13 | * Queue @work for task_work_run() below and notify the @task if @notify. | ||
14 | * Fails if the @task is exiting/exited and thus it can't process this @work. | ||
15 | * Otherwise @work->func() will be called when the @task returns from kernel | ||
16 | * mode or exits. | ||
17 | * | ||
18 | * This is like the signal handler which runs in kernel mode, but it doesn't | ||
19 | * try to wake up the @task. | ||
20 | * | ||
21 | * RETURNS: | ||
22 | * 0 if succeeds or -ESRCH. | ||
23 | */ | ||
7 | int | 24 | int |
8 | task_work_add(struct task_struct *task, struct callback_head *work, bool notify) | 25 | task_work_add(struct task_struct *task, struct callback_head *work, bool notify) |
9 | { | 26 | { |
@@ -21,11 +38,22 @@ task_work_add(struct task_struct *task, struct callback_head *work, bool notify) | |||
21 | return 0; | 38 | return 0; |
22 | } | 39 | } |
23 | 40 | ||
41 | /** | ||
42 | * task_work_cancel - cancel a pending work added by task_work_add() | ||
43 | * @task: the task which should execute the work | ||
44 | * @func: identifies the work to remove | ||
45 | * | ||
46 | * Find the last queued pending work with ->func == @func and remove | ||
47 | * it from queue. | ||
48 | * | ||
49 | * RETURNS: | ||
50 | * The found work or NULL if not found. | ||
51 | */ | ||
24 | struct callback_head * | 52 | struct callback_head * |
25 | task_work_cancel(struct task_struct *task, task_work_func_t func) | 53 | task_work_cancel(struct task_struct *task, task_work_func_t func) |
26 | { | 54 | { |
27 | struct callback_head **pprev = &task->task_works; | 55 | struct callback_head **pprev = &task->task_works; |
28 | struct callback_head *work = NULL; | 56 | struct callback_head *work; |
29 | unsigned long flags; | 57 | unsigned long flags; |
30 | /* | 58 | /* |
31 | * If cmpxchg() fails we continue without updating pprev. | 59 | * If cmpxchg() fails we continue without updating pprev. |
@@ -35,7 +63,7 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) | |||
35 | */ | 63 | */ |
36 | raw_spin_lock_irqsave(&task->pi_lock, flags); | 64 | raw_spin_lock_irqsave(&task->pi_lock, flags); |
37 | while ((work = ACCESS_ONCE(*pprev))) { | 65 | while ((work = ACCESS_ONCE(*pprev))) { |
38 | read_barrier_depends(); | 66 | smp_read_barrier_depends(); |
39 | if (work->func != func) | 67 | if (work->func != func) |
40 | pprev = &work->next; | 68 | pprev = &work->next; |
41 | else if (cmpxchg(pprev, work, work->next) == work) | 69 | else if (cmpxchg(pprev, work, work->next) == work) |
@@ -46,6 +74,14 @@ task_work_cancel(struct task_struct *task, task_work_func_t func) | |||
46 | return work; | 74 | return work; |
47 | } | 75 | } |
48 | 76 | ||
77 | /** | ||
78 | * task_work_run - execute the works added by task_work_add() | ||
79 | * | ||
80 | * Flush the pending works. Should be used by the core kernel code. | ||
81 | * Called before the task returns to the user-mode or stops, or when | ||
82 | * it exits. In the latter case task_work_add() can no longer add the | ||
83 | * new work after task_work_run() returns. | ||
84 | */ | ||
49 | void task_work_run(void) | 85 | void task_work_run(void) |
50 | { | 86 | { |
51 | struct task_struct *task = current; | 87 | struct task_struct *task = current; |
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index a6d098c6df3f..03cf44ac54d3 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c | |||
@@ -1978,12 +1978,27 @@ int __weak ftrace_arch_code_modify_post_process(void) | |||
1978 | 1978 | ||
1979 | void ftrace_modify_all_code(int command) | 1979 | void ftrace_modify_all_code(int command) |
1980 | { | 1980 | { |
1981 | int update = command & FTRACE_UPDATE_TRACE_FUNC; | ||
1982 | |||
1983 | /* | ||
1984 | * If the ftrace_caller calls a ftrace_ops func directly, | ||
1985 | * we need to make sure that it only traces functions it | ||
1986 | * expects to trace. When doing the switch of functions, | ||
1987 | * we need to update to the ftrace_ops_list_func first | ||
1988 | * before the transition between old and new calls are set, | ||
1989 | * as the ftrace_ops_list_func will check the ops hashes | ||
1990 | * to make sure the ops are having the right functions | ||
1991 | * traced. | ||
1992 | */ | ||
1993 | if (update) | ||
1994 | ftrace_update_ftrace_func(ftrace_ops_list_func); | ||
1995 | |||
1981 | if (command & FTRACE_UPDATE_CALLS) | 1996 | if (command & FTRACE_UPDATE_CALLS) |
1982 | ftrace_replace_code(1); | 1997 | ftrace_replace_code(1); |
1983 | else if (command & FTRACE_DISABLE_CALLS) | 1998 | else if (command & FTRACE_DISABLE_CALLS) |
1984 | ftrace_replace_code(0); | 1999 | ftrace_replace_code(0); |
1985 | 2000 | ||
1986 | if (command & FTRACE_UPDATE_TRACE_FUNC) | 2001 | if (update && ftrace_trace_function != ftrace_ops_list_func) |
1987 | ftrace_update_ftrace_func(ftrace_trace_function); | 2002 | ftrace_update_ftrace_func(ftrace_trace_function); |
1988 | 2003 | ||
1989 | if (command & FTRACE_START_FUNC_RET) | 2004 | if (command & FTRACE_START_FUNC_RET) |
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index 496f94d57698..7974ba20557d 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c | |||
@@ -3166,11 +3166,6 @@ static const struct file_operations show_traces_fops = { | |||
3166 | }; | 3166 | }; |
3167 | 3167 | ||
3168 | /* | 3168 | /* |
3169 | * Only trace on a CPU if the bitmask is set: | ||
3170 | */ | ||
3171 | static cpumask_var_t tracing_cpumask; | ||
3172 | |||
3173 | /* | ||
3174 | * The tracer itself will not take this lock, but still we want | 3169 | * The tracer itself will not take this lock, but still we want |
3175 | * to provide a consistent cpumask to user-space: | 3170 | * to provide a consistent cpumask to user-space: |
3176 | */ | 3171 | */ |
@@ -3186,11 +3181,12 @@ static ssize_t | |||
3186 | tracing_cpumask_read(struct file *filp, char __user *ubuf, | 3181 | tracing_cpumask_read(struct file *filp, char __user *ubuf, |
3187 | size_t count, loff_t *ppos) | 3182 | size_t count, loff_t *ppos) |
3188 | { | 3183 | { |
3184 | struct trace_array *tr = file_inode(filp)->i_private; | ||
3189 | int len; | 3185 | int len; |
3190 | 3186 | ||
3191 | mutex_lock(&tracing_cpumask_update_lock); | 3187 | mutex_lock(&tracing_cpumask_update_lock); |
3192 | 3188 | ||
3193 | len = cpumask_scnprintf(mask_str, count, tracing_cpumask); | 3189 | len = cpumask_scnprintf(mask_str, count, tr->tracing_cpumask); |
3194 | if (count - len < 2) { | 3190 | if (count - len < 2) { |
3195 | count = -EINVAL; | 3191 | count = -EINVAL; |
3196 | goto out_err; | 3192 | goto out_err; |
@@ -3208,7 +3204,7 @@ static ssize_t | |||
3208 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, | 3204 | tracing_cpumask_write(struct file *filp, const char __user *ubuf, |
3209 | size_t count, loff_t *ppos) | 3205 | size_t count, loff_t *ppos) |
3210 | { | 3206 | { |
3211 | struct trace_array *tr = filp->private_data; | 3207 | struct trace_array *tr = file_inode(filp)->i_private; |
3212 | cpumask_var_t tracing_cpumask_new; | 3208 | cpumask_var_t tracing_cpumask_new; |
3213 | int err, cpu; | 3209 | int err, cpu; |
3214 | 3210 | ||
@@ -3228,12 +3224,12 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
3228 | * Increase/decrease the disabled counter if we are | 3224 | * Increase/decrease the disabled counter if we are |
3229 | * about to flip a bit in the cpumask: | 3225 | * about to flip a bit in the cpumask: |
3230 | */ | 3226 | */ |
3231 | if (cpumask_test_cpu(cpu, tracing_cpumask) && | 3227 | if (cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
3232 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 3228 | !cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
3233 | atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); | 3229 | atomic_inc(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
3234 | ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); | 3230 | ring_buffer_record_disable_cpu(tr->trace_buffer.buffer, cpu); |
3235 | } | 3231 | } |
3236 | if (!cpumask_test_cpu(cpu, tracing_cpumask) && | 3232 | if (!cpumask_test_cpu(cpu, tr->tracing_cpumask) && |
3237 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { | 3233 | cpumask_test_cpu(cpu, tracing_cpumask_new)) { |
3238 | atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); | 3234 | atomic_dec(&per_cpu_ptr(tr->trace_buffer.data, cpu)->disabled); |
3239 | ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); | 3235 | ring_buffer_record_enable_cpu(tr->trace_buffer.buffer, cpu); |
@@ -3242,7 +3238,7 @@ tracing_cpumask_write(struct file *filp, const char __user *ubuf, | |||
3242 | arch_spin_unlock(&ftrace_max_lock); | 3238 | arch_spin_unlock(&ftrace_max_lock); |
3243 | local_irq_enable(); | 3239 | local_irq_enable(); |
3244 | 3240 | ||
3245 | cpumask_copy(tracing_cpumask, tracing_cpumask_new); | 3241 | cpumask_copy(tr->tracing_cpumask, tracing_cpumask_new); |
3246 | 3242 | ||
3247 | mutex_unlock(&tracing_cpumask_update_lock); | 3243 | mutex_unlock(&tracing_cpumask_update_lock); |
3248 | free_cpumask_var(tracing_cpumask_new); | 3244 | free_cpumask_var(tracing_cpumask_new); |
@@ -3256,9 +3252,10 @@ err_unlock: | |||
3256 | } | 3252 | } |
3257 | 3253 | ||
3258 | static const struct file_operations tracing_cpumask_fops = { | 3254 | static const struct file_operations tracing_cpumask_fops = { |
3259 | .open = tracing_open_generic, | 3255 | .open = tracing_open_generic_tr, |
3260 | .read = tracing_cpumask_read, | 3256 | .read = tracing_cpumask_read, |
3261 | .write = tracing_cpumask_write, | 3257 | .write = tracing_cpumask_write, |
3258 | .release = tracing_release_generic_tr, | ||
3262 | .llseek = generic_file_llseek, | 3259 | .llseek = generic_file_llseek, |
3263 | }; | 3260 | }; |
3264 | 3261 | ||
@@ -5938,6 +5935,11 @@ static int new_instance_create(const char *name) | |||
5938 | if (!tr->name) | 5935 | if (!tr->name) |
5939 | goto out_free_tr; | 5936 | goto out_free_tr; |
5940 | 5937 | ||
5938 | if (!alloc_cpumask_var(&tr->tracing_cpumask, GFP_KERNEL)) | ||
5939 | goto out_free_tr; | ||
5940 | |||
5941 | cpumask_copy(tr->tracing_cpumask, cpu_all_mask); | ||
5942 | |||
5941 | raw_spin_lock_init(&tr->start_lock); | 5943 | raw_spin_lock_init(&tr->start_lock); |
5942 | 5944 | ||
5943 | tr->current_trace = &nop_trace; | 5945 | tr->current_trace = &nop_trace; |
@@ -5969,6 +5971,7 @@ static int new_instance_create(const char *name) | |||
5969 | out_free_tr: | 5971 | out_free_tr: |
5970 | if (tr->trace_buffer.buffer) | 5972 | if (tr->trace_buffer.buffer) |
5971 | ring_buffer_free(tr->trace_buffer.buffer); | 5973 | ring_buffer_free(tr->trace_buffer.buffer); |
5974 | free_cpumask_var(tr->tracing_cpumask); | ||
5972 | kfree(tr->name); | 5975 | kfree(tr->name); |
5973 | kfree(tr); | 5976 | kfree(tr); |
5974 | 5977 | ||
@@ -6098,6 +6101,9 @@ init_tracer_debugfs(struct trace_array *tr, struct dentry *d_tracer) | |||
6098 | { | 6101 | { |
6099 | int cpu; | 6102 | int cpu; |
6100 | 6103 | ||
6104 | trace_create_file("tracing_cpumask", 0644, d_tracer, | ||
6105 | tr, &tracing_cpumask_fops); | ||
6106 | |||
6101 | trace_create_file("trace_options", 0644, d_tracer, | 6107 | trace_create_file("trace_options", 0644, d_tracer, |
6102 | tr, &tracing_iter_fops); | 6108 | tr, &tracing_iter_fops); |
6103 | 6109 | ||
@@ -6147,9 +6153,6 @@ static __init int tracer_init_debugfs(void) | |||
6147 | 6153 | ||
6148 | init_tracer_debugfs(&global_trace, d_tracer); | 6154 | init_tracer_debugfs(&global_trace, d_tracer); |
6149 | 6155 | ||
6150 | trace_create_file("tracing_cpumask", 0644, d_tracer, | ||
6151 | &global_trace, &tracing_cpumask_fops); | ||
6152 | |||
6153 | trace_create_file("available_tracers", 0444, d_tracer, | 6156 | trace_create_file("available_tracers", 0444, d_tracer, |
6154 | &global_trace, &show_traces_fops); | 6157 | &global_trace, &show_traces_fops); |
6155 | 6158 | ||
@@ -6371,7 +6374,7 @@ __init static int tracer_alloc_buffers(void) | |||
6371 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) | 6374 | if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL)) |
6372 | goto out; | 6375 | goto out; |
6373 | 6376 | ||
6374 | if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL)) | 6377 | if (!alloc_cpumask_var(&global_trace.tracing_cpumask, GFP_KERNEL)) |
6375 | goto out_free_buffer_mask; | 6378 | goto out_free_buffer_mask; |
6376 | 6379 | ||
6377 | /* Only allocate trace_printk buffers if a trace_printk exists */ | 6380 | /* Only allocate trace_printk buffers if a trace_printk exists */ |
@@ -6386,7 +6389,7 @@ __init static int tracer_alloc_buffers(void) | |||
6386 | ring_buf_size = 1; | 6389 | ring_buf_size = 1; |
6387 | 6390 | ||
6388 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); | 6391 | cpumask_copy(tracing_buffer_mask, cpu_possible_mask); |
6389 | cpumask_copy(tracing_cpumask, cpu_all_mask); | 6392 | cpumask_copy(global_trace.tracing_cpumask, cpu_all_mask); |
6390 | 6393 | ||
6391 | raw_spin_lock_init(&global_trace.start_lock); | 6394 | raw_spin_lock_init(&global_trace.start_lock); |
6392 | 6395 | ||
@@ -6441,7 +6444,7 @@ out_free_cpumask: | |||
6441 | #ifdef CONFIG_TRACER_MAX_TRACE | 6444 | #ifdef CONFIG_TRACER_MAX_TRACE |
6442 | free_percpu(global_trace.max_buffer.data); | 6445 | free_percpu(global_trace.max_buffer.data); |
6443 | #endif | 6446 | #endif |
6444 | free_cpumask_var(tracing_cpumask); | 6447 | free_cpumask_var(global_trace.tracing_cpumask); |
6445 | out_free_buffer_mask: | 6448 | out_free_buffer_mask: |
6446 | free_cpumask_var(tracing_buffer_mask); | 6449 | free_cpumask_var(tracing_buffer_mask); |
6447 | out: | 6450 | out: |
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h index fe39acd4c1aa..10c86fb7a2b4 100644 --- a/kernel/trace/trace.h +++ b/kernel/trace/trace.h | |||
@@ -206,6 +206,7 @@ struct trace_array { | |||
206 | struct dentry *event_dir; | 206 | struct dentry *event_dir; |
207 | struct list_head systems; | 207 | struct list_head systems; |
208 | struct list_head events; | 208 | struct list_head events; |
209 | cpumask_var_t tracing_cpumask; /* only trace on set CPUs */ | ||
209 | int ref; | 210 | int ref; |
210 | }; | 211 | }; |
211 | 212 | ||
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c index 29a7ebcfb426..368a4d50cc30 100644 --- a/kernel/trace/trace_events.c +++ b/kernel/trace/trace_events.c | |||
@@ -1489,12 +1489,7 @@ event_subsystem_dir(struct trace_array *tr, const char *name, | |||
1489 | } | 1489 | } |
1490 | 1490 | ||
1491 | static int | 1491 | static int |
1492 | event_create_dir(struct dentry *parent, | 1492 | event_create_dir(struct dentry *parent, struct ftrace_event_file *file) |
1493 | struct ftrace_event_file *file, | ||
1494 | const struct file_operations *id, | ||
1495 | const struct file_operations *enable, | ||
1496 | const struct file_operations *filter, | ||
1497 | const struct file_operations *format) | ||
1498 | { | 1493 | { |
1499 | struct ftrace_event_call *call = file->event_call; | 1494 | struct ftrace_event_call *call = file->event_call; |
1500 | struct trace_array *tr = file->tr; | 1495 | struct trace_array *tr = file->tr; |
@@ -1522,12 +1517,13 @@ event_create_dir(struct dentry *parent, | |||
1522 | 1517 | ||
1523 | if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) | 1518 | if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)) |
1524 | trace_create_file("enable", 0644, file->dir, file, | 1519 | trace_create_file("enable", 0644, file->dir, file, |
1525 | enable); | 1520 | &ftrace_enable_fops); |
1526 | 1521 | ||
1527 | #ifdef CONFIG_PERF_EVENTS | 1522 | #ifdef CONFIG_PERF_EVENTS |
1528 | if (call->event.type && call->class->reg) | 1523 | if (call->event.type && call->class->reg) |
1529 | trace_create_file("id", 0444, file->dir, | 1524 | trace_create_file("id", 0444, file->dir, |
1530 | (void *)(long)call->event.type, id); | 1525 | (void *)(long)call->event.type, |
1526 | &ftrace_event_id_fops); | ||
1531 | #endif | 1527 | #endif |
1532 | 1528 | ||
1533 | /* | 1529 | /* |
@@ -1544,10 +1540,10 @@ event_create_dir(struct dentry *parent, | |||
1544 | } | 1540 | } |
1545 | } | 1541 | } |
1546 | trace_create_file("filter", 0644, file->dir, call, | 1542 | trace_create_file("filter", 0644, file->dir, call, |
1547 | filter); | 1543 | &ftrace_event_filter_fops); |
1548 | 1544 | ||
1549 | trace_create_file("format", 0444, file->dir, call, | 1545 | trace_create_file("format", 0444, file->dir, call, |
1550 | format); | 1546 | &ftrace_event_format_fops); |
1551 | 1547 | ||
1552 | return 0; | 1548 | return 0; |
1553 | } | 1549 | } |
@@ -1648,12 +1644,7 @@ trace_create_new_event(struct ftrace_event_call *call, | |||
1648 | 1644 | ||
1649 | /* Add an event to a trace directory */ | 1645 | /* Add an event to a trace directory */ |
1650 | static int | 1646 | static int |
1651 | __trace_add_new_event(struct ftrace_event_call *call, | 1647 | __trace_add_new_event(struct ftrace_event_call *call, struct trace_array *tr) |
1652 | struct trace_array *tr, | ||
1653 | const struct file_operations *id, | ||
1654 | const struct file_operations *enable, | ||
1655 | const struct file_operations *filter, | ||
1656 | const struct file_operations *format) | ||
1657 | { | 1648 | { |
1658 | struct ftrace_event_file *file; | 1649 | struct ftrace_event_file *file; |
1659 | 1650 | ||
@@ -1661,7 +1652,7 @@ __trace_add_new_event(struct ftrace_event_call *call, | |||
1661 | if (!file) | 1652 | if (!file) |
1662 | return -ENOMEM; | 1653 | return -ENOMEM; |
1663 | 1654 | ||
1664 | return event_create_dir(tr->event_dir, file, id, enable, filter, format); | 1655 | return event_create_dir(tr->event_dir, file); |
1665 | } | 1656 | } |
1666 | 1657 | ||
1667 | /* | 1658 | /* |
@@ -1683,8 +1674,7 @@ __trace_early_add_new_event(struct ftrace_event_call *call, | |||
1683 | } | 1674 | } |
1684 | 1675 | ||
1685 | struct ftrace_module_file_ops; | 1676 | struct ftrace_module_file_ops; |
1686 | static void __add_event_to_tracers(struct ftrace_event_call *call, | 1677 | static void __add_event_to_tracers(struct ftrace_event_call *call); |
1687 | struct ftrace_module_file_ops *file_ops); | ||
1688 | 1678 | ||
1689 | /* Add an additional event_call dynamically */ | 1679 | /* Add an additional event_call dynamically */ |
1690 | int trace_add_event_call(struct ftrace_event_call *call) | 1680 | int trace_add_event_call(struct ftrace_event_call *call) |
@@ -1695,7 +1685,7 @@ int trace_add_event_call(struct ftrace_event_call *call) | |||
1695 | 1685 | ||
1696 | ret = __register_event(call, NULL); | 1686 | ret = __register_event(call, NULL); |
1697 | if (ret >= 0) | 1687 | if (ret >= 0) |
1698 | __add_event_to_tracers(call, NULL); | 1688 | __add_event_to_tracers(call); |
1699 | 1689 | ||
1700 | mutex_unlock(&event_mutex); | 1690 | mutex_unlock(&event_mutex); |
1701 | mutex_unlock(&trace_types_lock); | 1691 | mutex_unlock(&trace_types_lock); |
@@ -1769,100 +1759,21 @@ int trace_remove_event_call(struct ftrace_event_call *call) | |||
1769 | 1759 | ||
1770 | #ifdef CONFIG_MODULES | 1760 | #ifdef CONFIG_MODULES |
1771 | 1761 | ||
1772 | static LIST_HEAD(ftrace_module_file_list); | ||
1773 | |||
1774 | /* | ||
1775 | * Modules must own their file_operations to keep up with | ||
1776 | * reference counting. | ||
1777 | */ | ||
1778 | struct ftrace_module_file_ops { | ||
1779 | struct list_head list; | ||
1780 | struct module *mod; | ||
1781 | struct file_operations id; | ||
1782 | struct file_operations enable; | ||
1783 | struct file_operations format; | ||
1784 | struct file_operations filter; | ||
1785 | }; | ||
1786 | |||
1787 | static struct ftrace_module_file_ops * | ||
1788 | find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) | ||
1789 | { | ||
1790 | /* | ||
1791 | * As event_calls are added in groups by module, | ||
1792 | * when we find one file_ops, we don't need to search for | ||
1793 | * each call in that module, as the rest should be the | ||
1794 | * same. Only search for a new one if the last one did | ||
1795 | * not match. | ||
1796 | */ | ||
1797 | if (file_ops && mod == file_ops->mod) | ||
1798 | return file_ops; | ||
1799 | |||
1800 | list_for_each_entry(file_ops, &ftrace_module_file_list, list) { | ||
1801 | if (file_ops->mod == mod) | ||
1802 | return file_ops; | ||
1803 | } | ||
1804 | return NULL; | ||
1805 | } | ||
1806 | |||
1807 | static struct ftrace_module_file_ops * | ||
1808 | trace_create_file_ops(struct module *mod) | ||
1809 | { | ||
1810 | struct ftrace_module_file_ops *file_ops; | ||
1811 | |||
1812 | /* | ||
1813 | * This is a bit of a PITA. To allow for correct reference | ||
1814 | * counting, modules must "own" their file_operations. | ||
1815 | * To do this, we allocate the file operations that will be | ||
1816 | * used in the event directory. | ||
1817 | */ | ||
1818 | |||
1819 | file_ops = kmalloc(sizeof(*file_ops), GFP_KERNEL); | ||
1820 | if (!file_ops) | ||
1821 | return NULL; | ||
1822 | |||
1823 | file_ops->mod = mod; | ||
1824 | |||
1825 | file_ops->id = ftrace_event_id_fops; | ||
1826 | file_ops->id.owner = mod; | ||
1827 | |||
1828 | file_ops->enable = ftrace_enable_fops; | ||
1829 | file_ops->enable.owner = mod; | ||
1830 | |||
1831 | file_ops->filter = ftrace_event_filter_fops; | ||
1832 | file_ops->filter.owner = mod; | ||
1833 | |||
1834 | file_ops->format = ftrace_event_format_fops; | ||
1835 | file_ops->format.owner = mod; | ||
1836 | |||
1837 | list_add(&file_ops->list, &ftrace_module_file_list); | ||
1838 | |||
1839 | return file_ops; | ||
1840 | } | ||
1841 | |||
1842 | static void trace_module_add_events(struct module *mod) | 1762 | static void trace_module_add_events(struct module *mod) |
1843 | { | 1763 | { |
1844 | struct ftrace_module_file_ops *file_ops = NULL; | ||
1845 | struct ftrace_event_call **call, **start, **end; | 1764 | struct ftrace_event_call **call, **start, **end; |
1846 | 1765 | ||
1847 | start = mod->trace_events; | 1766 | start = mod->trace_events; |
1848 | end = mod->trace_events + mod->num_trace_events; | 1767 | end = mod->trace_events + mod->num_trace_events; |
1849 | 1768 | ||
1850 | if (start == end) | ||
1851 | return; | ||
1852 | |||
1853 | file_ops = trace_create_file_ops(mod); | ||
1854 | if (!file_ops) | ||
1855 | return; | ||
1856 | |||
1857 | for_each_event(call, start, end) { | 1769 | for_each_event(call, start, end) { |
1858 | __register_event(*call, mod); | 1770 | __register_event(*call, mod); |
1859 | __add_event_to_tracers(*call, file_ops); | 1771 | __add_event_to_tracers(*call); |
1860 | } | 1772 | } |
1861 | } | 1773 | } |
1862 | 1774 | ||
1863 | static void trace_module_remove_events(struct module *mod) | 1775 | static void trace_module_remove_events(struct module *mod) |
1864 | { | 1776 | { |
1865 | struct ftrace_module_file_ops *file_ops; | ||
1866 | struct ftrace_event_call *call, *p; | 1777 | struct ftrace_event_call *call, *p; |
1867 | bool clear_trace = false; | 1778 | bool clear_trace = false; |
1868 | 1779 | ||
@@ -1874,16 +1785,6 @@ static void trace_module_remove_events(struct module *mod) | |||
1874 | __trace_remove_event_call(call); | 1785 | __trace_remove_event_call(call); |
1875 | } | 1786 | } |
1876 | } | 1787 | } |
1877 | |||
1878 | /* Now free the file_operations */ | ||
1879 | list_for_each_entry(file_ops, &ftrace_module_file_list, list) { | ||
1880 | if (file_ops->mod == mod) | ||
1881 | break; | ||
1882 | } | ||
1883 | if (&file_ops->list != &ftrace_module_file_list) { | ||
1884 | list_del(&file_ops->list); | ||
1885 | kfree(file_ops); | ||
1886 | } | ||
1887 | up_write(&trace_event_sem); | 1788 | up_write(&trace_event_sem); |
1888 | 1789 | ||
1889 | /* | 1790 | /* |
@@ -1919,67 +1820,21 @@ static int trace_module_notify(struct notifier_block *self, | |||
1919 | return 0; | 1820 | return 0; |
1920 | } | 1821 | } |
1921 | 1822 | ||
1922 | static int | 1823 | static struct notifier_block trace_module_nb = { |
1923 | __trace_add_new_mod_event(struct ftrace_event_call *call, | 1824 | .notifier_call = trace_module_notify, |
1924 | struct trace_array *tr, | 1825 | .priority = 0, |
1925 | struct ftrace_module_file_ops *file_ops) | 1826 | }; |
1926 | { | ||
1927 | return __trace_add_new_event(call, tr, | ||
1928 | &file_ops->id, &file_ops->enable, | ||
1929 | &file_ops->filter, &file_ops->format); | ||
1930 | } | ||
1931 | |||
1932 | #else | ||
1933 | static inline struct ftrace_module_file_ops * | ||
1934 | find_ftrace_file_ops(struct ftrace_module_file_ops *file_ops, struct module *mod) | ||
1935 | { | ||
1936 | return NULL; | ||
1937 | } | ||
1938 | static inline int trace_module_notify(struct notifier_block *self, | ||
1939 | unsigned long val, void *data) | ||
1940 | { | ||
1941 | return 0; | ||
1942 | } | ||
1943 | static inline int | ||
1944 | __trace_add_new_mod_event(struct ftrace_event_call *call, | ||
1945 | struct trace_array *tr, | ||
1946 | struct ftrace_module_file_ops *file_ops) | ||
1947 | { | ||
1948 | return -ENODEV; | ||
1949 | } | ||
1950 | #endif /* CONFIG_MODULES */ | 1827 | #endif /* CONFIG_MODULES */ |
1951 | 1828 | ||
1952 | /* Create a new event directory structure for a trace directory. */ | 1829 | /* Create a new event directory structure for a trace directory. */ |
1953 | static void | 1830 | static void |
1954 | __trace_add_event_dirs(struct trace_array *tr) | 1831 | __trace_add_event_dirs(struct trace_array *tr) |
1955 | { | 1832 | { |
1956 | struct ftrace_module_file_ops *file_ops = NULL; | ||
1957 | struct ftrace_event_call *call; | 1833 | struct ftrace_event_call *call; |
1958 | int ret; | 1834 | int ret; |
1959 | 1835 | ||
1960 | list_for_each_entry(call, &ftrace_events, list) { | 1836 | list_for_each_entry(call, &ftrace_events, list) { |
1961 | if (call->mod) { | 1837 | ret = __trace_add_new_event(call, tr); |
1962 | /* | ||
1963 | * Directories for events by modules need to | ||
1964 | * keep module ref counts when opened (as we don't | ||
1965 | * want the module to disappear when reading one | ||
1966 | * of these files). The file_ops keep account of | ||
1967 | * the module ref count. | ||
1968 | */ | ||
1969 | file_ops = find_ftrace_file_ops(file_ops, call->mod); | ||
1970 | if (!file_ops) | ||
1971 | continue; /* Warn? */ | ||
1972 | ret = __trace_add_new_mod_event(call, tr, file_ops); | ||
1973 | if (ret < 0) | ||
1974 | pr_warning("Could not create directory for event %s\n", | ||
1975 | call->name); | ||
1976 | continue; | ||
1977 | } | ||
1978 | ret = __trace_add_new_event(call, tr, | ||
1979 | &ftrace_event_id_fops, | ||
1980 | &ftrace_enable_fops, | ||
1981 | &ftrace_event_filter_fops, | ||
1982 | &ftrace_event_format_fops); | ||
1983 | if (ret < 0) | 1838 | if (ret < 0) |
1984 | pr_warning("Could not create directory for event %s\n", | 1839 | pr_warning("Could not create directory for event %s\n", |
1985 | call->name); | 1840 | call->name); |
@@ -2287,11 +2142,7 @@ __trace_early_add_event_dirs(struct trace_array *tr) | |||
2287 | 2142 | ||
2288 | 2143 | ||
2289 | list_for_each_entry(file, &tr->events, list) { | 2144 | list_for_each_entry(file, &tr->events, list) { |
2290 | ret = event_create_dir(tr->event_dir, file, | 2145 | ret = event_create_dir(tr->event_dir, file); |
2291 | &ftrace_event_id_fops, | ||
2292 | &ftrace_enable_fops, | ||
2293 | &ftrace_event_filter_fops, | ||
2294 | &ftrace_event_format_fops); | ||
2295 | if (ret < 0) | 2146 | if (ret < 0) |
2296 | pr_warning("Could not create directory for event %s\n", | 2147 | pr_warning("Could not create directory for event %s\n", |
2297 | file->event_call->name); | 2148 | file->event_call->name); |
@@ -2332,29 +2183,14 @@ __trace_remove_event_dirs(struct trace_array *tr) | |||
2332 | remove_event_file_dir(file); | 2183 | remove_event_file_dir(file); |
2333 | } | 2184 | } |
2334 | 2185 | ||
2335 | static void | 2186 | static void __add_event_to_tracers(struct ftrace_event_call *call) |
2336 | __add_event_to_tracers(struct ftrace_event_call *call, | ||
2337 | struct ftrace_module_file_ops *file_ops) | ||
2338 | { | 2187 | { |
2339 | struct trace_array *tr; | 2188 | struct trace_array *tr; |
2340 | 2189 | ||
2341 | list_for_each_entry(tr, &ftrace_trace_arrays, list) { | 2190 | list_for_each_entry(tr, &ftrace_trace_arrays, list) |
2342 | if (file_ops) | 2191 | __trace_add_new_event(call, tr); |
2343 | __trace_add_new_mod_event(call, tr, file_ops); | ||
2344 | else | ||
2345 | __trace_add_new_event(call, tr, | ||
2346 | &ftrace_event_id_fops, | ||
2347 | &ftrace_enable_fops, | ||
2348 | &ftrace_event_filter_fops, | ||
2349 | &ftrace_event_format_fops); | ||
2350 | } | ||
2351 | } | 2192 | } |
2352 | 2193 | ||
2353 | static struct notifier_block trace_module_nb = { | ||
2354 | .notifier_call = trace_module_notify, | ||
2355 | .priority = 0, | ||
2356 | }; | ||
2357 | |||
2358 | extern struct ftrace_event_call *__start_ftrace_events[]; | 2194 | extern struct ftrace_event_call *__start_ftrace_events[]; |
2359 | extern struct ftrace_event_call *__stop_ftrace_events[]; | 2195 | extern struct ftrace_event_call *__stop_ftrace_events[]; |
2360 | 2196 | ||
@@ -2559,10 +2395,11 @@ static __init int event_trace_init(void) | |||
2559 | if (ret) | 2395 | if (ret) |
2560 | return ret; | 2396 | return ret; |
2561 | 2397 | ||
2398 | #ifdef CONFIG_MODULES | ||
2562 | ret = register_module_notifier(&trace_module_nb); | 2399 | ret = register_module_notifier(&trace_module_nb); |
2563 | if (ret) | 2400 | if (ret) |
2564 | pr_warning("Failed to register trace events module notifier\n"); | 2401 | pr_warning("Failed to register trace events module notifier\n"); |
2565 | 2402 | #endif | |
2566 | return 0; | 2403 | return 0; |
2567 | } | 2404 | } |
2568 | early_initcall(event_trace_memsetup); | 2405 | early_initcall(event_trace_memsetup); |
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index 8fd03657bc7d..559329d9bd2f 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c | |||
@@ -200,8 +200,8 @@ extern char *__bad_type_size(void); | |||
200 | #type, #name, offsetof(typeof(trace), name), \ | 200 | #type, #name, offsetof(typeof(trace), name), \ |
201 | sizeof(trace.name), is_signed_type(type) | 201 | sizeof(trace.name), is_signed_type(type) |
202 | 202 | ||
203 | static | 203 | static int __init |
204 | int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) | 204 | __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) |
205 | { | 205 | { |
206 | int i; | 206 | int i; |
207 | int pos = 0; | 207 | int pos = 0; |
@@ -228,7 +228,7 @@ int __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len) | |||
228 | return pos; | 228 | return pos; |
229 | } | 229 | } |
230 | 230 | ||
231 | static int set_syscall_print_fmt(struct ftrace_event_call *call) | 231 | static int __init set_syscall_print_fmt(struct ftrace_event_call *call) |
232 | { | 232 | { |
233 | char *print_fmt; | 233 | char *print_fmt; |
234 | int len; | 234 | int len; |
@@ -253,7 +253,7 @@ static int set_syscall_print_fmt(struct ftrace_event_call *call) | |||
253 | return 0; | 253 | return 0; |
254 | } | 254 | } |
255 | 255 | ||
256 | static void free_syscall_print_fmt(struct ftrace_event_call *call) | 256 | static void __init free_syscall_print_fmt(struct ftrace_event_call *call) |
257 | { | 257 | { |
258 | struct syscall_metadata *entry = call->data; | 258 | struct syscall_metadata *entry = call->data; |
259 | 259 | ||
@@ -459,7 +459,7 @@ static void unreg_event_syscall_exit(struct ftrace_event_file *file, | |||
459 | mutex_unlock(&syscall_trace_lock); | 459 | mutex_unlock(&syscall_trace_lock); |
460 | } | 460 | } |
461 | 461 | ||
462 | static int init_syscall_trace(struct ftrace_event_call *call) | 462 | static int __init init_syscall_trace(struct ftrace_event_call *call) |
463 | { | 463 | { |
464 | int id; | 464 | int id; |
465 | int num; | 465 | int num; |
diff --git a/kernel/uid16.c b/kernel/uid16.c index f6c83d7ef000..602e5bbbceff 100644 --- a/kernel/uid16.c +++ b/kernel/uid16.c | |||
@@ -176,7 +176,7 @@ SYSCALL_DEFINE2(setgroups16, int, gidsetsize, old_gid_t __user *, grouplist) | |||
176 | struct group_info *group_info; | 176 | struct group_info *group_info; |
177 | int retval; | 177 | int retval; |
178 | 178 | ||
179 | if (!nsown_capable(CAP_SETGID)) | 179 | if (!ns_capable(current_user_ns(), CAP_SETGID)) |
180 | return -EPERM; | 180 | return -EPERM; |
181 | if ((unsigned)gidsetsize > NGROUPS_MAX) | 181 | if ((unsigned)gidsetsize > NGROUPS_MAX) |
182 | return -EINVAL; | 182 | return -EINVAL; |
diff --git a/kernel/up.c b/kernel/up.c index c54c75e9faf7..630d72bf7e41 100644 --- a/kernel/up.c +++ b/kernel/up.c | |||
@@ -10,12 +10,64 @@ | |||
10 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | 10 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, |
11 | int wait) | 11 | int wait) |
12 | { | 12 | { |
13 | unsigned long flags; | ||
14 | |||
13 | WARN_ON(cpu != 0); | 15 | WARN_ON(cpu != 0); |
14 | 16 | ||
15 | local_irq_disable(); | 17 | local_irq_save(flags); |
16 | (func)(info); | 18 | func(info); |
17 | local_irq_enable(); | 19 | local_irq_restore(flags); |
18 | 20 | ||
19 | return 0; | 21 | return 0; |
20 | } | 22 | } |
21 | EXPORT_SYMBOL(smp_call_function_single); | 23 | EXPORT_SYMBOL(smp_call_function_single); |
24 | |||
25 | int on_each_cpu(smp_call_func_t func, void *info, int wait) | ||
26 | { | ||
27 | unsigned long flags; | ||
28 | |||
29 | local_irq_save(flags); | ||
30 | func(info); | ||
31 | local_irq_restore(flags); | ||
32 | return 0; | ||
33 | } | ||
34 | EXPORT_SYMBOL(on_each_cpu); | ||
35 | |||
36 | /* | ||
37 | * Note we still need to test the mask even for UP | ||
38 | * because we actually can get an empty mask from | ||
39 | * code that on SMP might call us without the local | ||
40 | * CPU in the mask. | ||
41 | */ | ||
42 | void on_each_cpu_mask(const struct cpumask *mask, | ||
43 | smp_call_func_t func, void *info, bool wait) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | |||
47 | if (cpumask_test_cpu(0, mask)) { | ||
48 | local_irq_save(flags); | ||
49 | func(info); | ||
50 | local_irq_restore(flags); | ||
51 | } | ||
52 | } | ||
53 | EXPORT_SYMBOL(on_each_cpu_mask); | ||
54 | |||
55 | /* | ||
56 | * Preemption is disabled here to make sure the cond_func is called under the | ||
57 | * same condtions in UP and SMP. | ||
58 | */ | ||
59 | void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info), | ||
60 | smp_call_func_t func, void *info, bool wait, | ||
61 | gfp_t gfp_flags) | ||
62 | { | ||
63 | unsigned long flags; | ||
64 | |||
65 | preempt_disable(); | ||
66 | if (cond_func(0, info)) { | ||
67 | local_irq_save(flags); | ||
68 | func(info); | ||
69 | local_irq_restore(flags); | ||
70 | } | ||
71 | preempt_enable(); | ||
72 | } | ||
73 | EXPORT_SYMBOL(on_each_cpu_cond); | ||
diff --git a/kernel/user.c b/kernel/user.c index 69b4c3d48cde..5bbb91988e69 100644 --- a/kernel/user.c +++ b/kernel/user.c | |||
@@ -51,8 +51,6 @@ struct user_namespace init_user_ns = { | |||
51 | .owner = GLOBAL_ROOT_UID, | 51 | .owner = GLOBAL_ROOT_UID, |
52 | .group = GLOBAL_ROOT_GID, | 52 | .group = GLOBAL_ROOT_GID, |
53 | .proc_inum = PROC_USER_INIT_INO, | 53 | .proc_inum = PROC_USER_INIT_INO, |
54 | .may_mount_sysfs = true, | ||
55 | .may_mount_proc = true, | ||
56 | }; | 54 | }; |
57 | EXPORT_SYMBOL_GPL(init_user_ns); | 55 | EXPORT_SYMBOL_GPL(init_user_ns); |
58 | 56 | ||
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c index 9064b919a406..13fb1134ba58 100644 --- a/kernel/user_namespace.c +++ b/kernel/user_namespace.c | |||
@@ -101,8 +101,6 @@ int create_user_ns(struct cred *new) | |||
101 | 101 | ||
102 | set_cred_user_ns(new, ns); | 102 | set_cred_user_ns(new, ns); |
103 | 103 | ||
104 | update_mnt_policy(ns); | ||
105 | |||
106 | return 0; | 104 | return 0; |
107 | } | 105 | } |
108 | 106 | ||
diff --git a/kernel/utsname.c b/kernel/utsname.c index 2fc8576efaa8..fd393124e507 100644 --- a/kernel/utsname.c +++ b/kernel/utsname.c | |||
@@ -114,7 +114,7 @@ static int utsns_install(struct nsproxy *nsproxy, void *new) | |||
114 | struct uts_namespace *ns = new; | 114 | struct uts_namespace *ns = new; |
115 | 115 | ||
116 | if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || | 116 | if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || |
117 | !nsown_capable(CAP_SYS_ADMIN)) | 117 | !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) |
118 | return -EPERM; | 118 | return -EPERM; |
119 | 119 | ||
120 | get_uts_ns(ns); | 120 | get_uts_ns(ns); |
diff --git a/kernel/workqueue.c b/kernel/workqueue.c index 29b79852a845..987293d03ebc 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c | |||
@@ -541,6 +541,8 @@ static int worker_pool_assign_id(struct worker_pool *pool) | |||
541 | * This must be called either with pwq_lock held or sched RCU read locked. | 541 | * This must be called either with pwq_lock held or sched RCU read locked. |
542 | * If the pwq needs to be used beyond the locking in effect, the caller is | 542 | * If the pwq needs to be used beyond the locking in effect, the caller is |
543 | * responsible for guaranteeing that the pwq stays online. | 543 | * responsible for guaranteeing that the pwq stays online. |
544 | * | ||
545 | * Return: The unbound pool_workqueue for @node. | ||
544 | */ | 546 | */ |
545 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, | 547 | static struct pool_workqueue *unbound_pwq_by_node(struct workqueue_struct *wq, |
546 | int node) | 548 | int node) |
@@ -639,8 +641,6 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
639 | * get_work_pool - return the worker_pool a given work was associated with | 641 | * get_work_pool - return the worker_pool a given work was associated with |
640 | * @work: the work item of interest | 642 | * @work: the work item of interest |
641 | * | 643 | * |
642 | * Return the worker_pool @work was last associated with. %NULL if none. | ||
643 | * | ||
644 | * Pools are created and destroyed under wq_pool_mutex, and allows read | 644 | * Pools are created and destroyed under wq_pool_mutex, and allows read |
645 | * access under sched-RCU read lock. As such, this function should be | 645 | * access under sched-RCU read lock. As such, this function should be |
646 | * called under wq_pool_mutex or with preemption disabled. | 646 | * called under wq_pool_mutex or with preemption disabled. |
@@ -649,6 +649,8 @@ static struct pool_workqueue *get_work_pwq(struct work_struct *work) | |||
649 | * mentioned locking is in effect. If the returned pool needs to be used | 649 | * mentioned locking is in effect. If the returned pool needs to be used |
650 | * beyond the critical section, the caller is responsible for ensuring the | 650 | * beyond the critical section, the caller is responsible for ensuring the |
651 | * returned pool is and stays online. | 651 | * returned pool is and stays online. |
652 | * | ||
653 | * Return: The worker_pool @work was last associated with. %NULL if none. | ||
652 | */ | 654 | */ |
653 | static struct worker_pool *get_work_pool(struct work_struct *work) | 655 | static struct worker_pool *get_work_pool(struct work_struct *work) |
654 | { | 656 | { |
@@ -672,7 +674,7 @@ static struct worker_pool *get_work_pool(struct work_struct *work) | |||
672 | * get_work_pool_id - return the worker pool ID a given work is associated with | 674 | * get_work_pool_id - return the worker pool ID a given work is associated with |
673 | * @work: the work item of interest | 675 | * @work: the work item of interest |
674 | * | 676 | * |
675 | * Return the worker_pool ID @work was last associated with. | 677 | * Return: The worker_pool ID @work was last associated with. |
676 | * %WORK_OFFQ_POOL_NONE if none. | 678 | * %WORK_OFFQ_POOL_NONE if none. |
677 | */ | 679 | */ |
678 | static int get_work_pool_id(struct work_struct *work) | 680 | static int get_work_pool_id(struct work_struct *work) |
@@ -831,7 +833,7 @@ void wq_worker_waking_up(struct task_struct *task, int cpu) | |||
831 | * CONTEXT: | 833 | * CONTEXT: |
832 | * spin_lock_irq(rq->lock) | 834 | * spin_lock_irq(rq->lock) |
833 | * | 835 | * |
834 | * RETURNS: | 836 | * Return: |
835 | * Worker task on @cpu to wake up, %NULL if none. | 837 | * Worker task on @cpu to wake up, %NULL if none. |
836 | */ | 838 | */ |
837 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) | 839 | struct task_struct *wq_worker_sleeping(struct task_struct *task, int cpu) |
@@ -966,8 +968,8 @@ static inline void worker_clr_flags(struct worker *worker, unsigned int flags) | |||
966 | * CONTEXT: | 968 | * CONTEXT: |
967 | * spin_lock_irq(pool->lock). | 969 | * spin_lock_irq(pool->lock). |
968 | * | 970 | * |
969 | * RETURNS: | 971 | * Return: |
970 | * Pointer to worker which is executing @work if found, NULL | 972 | * Pointer to worker which is executing @work if found, %NULL |
971 | * otherwise. | 973 | * otherwise. |
972 | */ | 974 | */ |
973 | static struct worker *find_worker_executing_work(struct worker_pool *pool, | 975 | static struct worker *find_worker_executing_work(struct worker_pool *pool, |
@@ -1155,14 +1157,16 @@ out_put: | |||
1155 | * @flags: place to store irq state | 1157 | * @flags: place to store irq state |
1156 | * | 1158 | * |
1157 | * Try to grab PENDING bit of @work. This function can handle @work in any | 1159 | * Try to grab PENDING bit of @work. This function can handle @work in any |
1158 | * stable state - idle, on timer or on worklist. Return values are | 1160 | * stable state - idle, on timer or on worklist. |
1159 | * | 1161 | * |
1162 | * Return: | ||
1160 | * 1 if @work was pending and we successfully stole PENDING | 1163 | * 1 if @work was pending and we successfully stole PENDING |
1161 | * 0 if @work was idle and we claimed PENDING | 1164 | * 0 if @work was idle and we claimed PENDING |
1162 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry | 1165 | * -EAGAIN if PENDING couldn't be grabbed at the moment, safe to busy-retry |
1163 | * -ENOENT if someone else is canceling @work, this state may persist | 1166 | * -ENOENT if someone else is canceling @work, this state may persist |
1164 | * for arbitrarily long | 1167 | * for arbitrarily long |
1165 | * | 1168 | * |
1169 | * Note: | ||
1166 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting | 1170 | * On >= 0 return, the caller owns @work's PENDING bit. To avoid getting |
1167 | * interrupted while holding PENDING and @work off queue, irq must be | 1171 | * interrupted while holding PENDING and @work off queue, irq must be |
1168 | * disabled on entry. This, combined with delayed_work->timer being | 1172 | * disabled on entry. This, combined with delayed_work->timer being |
@@ -1404,10 +1408,10 @@ retry: | |||
1404 | * @wq: workqueue to use | 1408 | * @wq: workqueue to use |
1405 | * @work: work to queue | 1409 | * @work: work to queue |
1406 | * | 1410 | * |
1407 | * Returns %false if @work was already on a queue, %true otherwise. | ||
1408 | * | ||
1409 | * We queue the work to a specific CPU, the caller must ensure it | 1411 | * We queue the work to a specific CPU, the caller must ensure it |
1410 | * can't go away. | 1412 | * can't go away. |
1413 | * | ||
1414 | * Return: %false if @work was already on a queue, %true otherwise. | ||
1411 | */ | 1415 | */ |
1412 | bool queue_work_on(int cpu, struct workqueue_struct *wq, | 1416 | bool queue_work_on(int cpu, struct workqueue_struct *wq, |
1413 | struct work_struct *work) | 1417 | struct work_struct *work) |
@@ -1477,7 +1481,7 @@ static void __queue_delayed_work(int cpu, struct workqueue_struct *wq, | |||
1477 | * @dwork: work to queue | 1481 | * @dwork: work to queue |
1478 | * @delay: number of jiffies to wait before queueing | 1482 | * @delay: number of jiffies to wait before queueing |
1479 | * | 1483 | * |
1480 | * Returns %false if @work was already on a queue, %true otherwise. If | 1484 | * Return: %false if @work was already on a queue, %true otherwise. If |
1481 | * @delay is zero and @dwork is idle, it will be scheduled for immediate | 1485 | * @delay is zero and @dwork is idle, it will be scheduled for immediate |
1482 | * execution. | 1486 | * execution. |
1483 | */ | 1487 | */ |
@@ -1513,7 +1517,7 @@ EXPORT_SYMBOL(queue_delayed_work_on); | |||
1513 | * zero, @work is guaranteed to be scheduled immediately regardless of its | 1517 | * zero, @work is guaranteed to be scheduled immediately regardless of its |
1514 | * current state. | 1518 | * current state. |
1515 | * | 1519 | * |
1516 | * Returns %false if @dwork was idle and queued, %true if @dwork was | 1520 | * Return: %false if @dwork was idle and queued, %true if @dwork was |
1517 | * pending and its timer was modified. | 1521 | * pending and its timer was modified. |
1518 | * | 1522 | * |
1519 | * This function is safe to call from any context including IRQ handler. | 1523 | * This function is safe to call from any context including IRQ handler. |
@@ -1628,7 +1632,7 @@ static void worker_leave_idle(struct worker *worker) | |||
1628 | * Might sleep. Called without any lock but returns with pool->lock | 1632 | * Might sleep. Called without any lock but returns with pool->lock |
1629 | * held. | 1633 | * held. |
1630 | * | 1634 | * |
1631 | * RETURNS: | 1635 | * Return: |
1632 | * %true if the associated pool is online (@worker is successfully | 1636 | * %true if the associated pool is online (@worker is successfully |
1633 | * bound), %false if offline. | 1637 | * bound), %false if offline. |
1634 | */ | 1638 | */ |
@@ -1689,7 +1693,7 @@ static struct worker *alloc_worker(void) | |||
1689 | * CONTEXT: | 1693 | * CONTEXT: |
1690 | * Might sleep. Does GFP_KERNEL allocations. | 1694 | * Might sleep. Does GFP_KERNEL allocations. |
1691 | * | 1695 | * |
1692 | * RETURNS: | 1696 | * Return: |
1693 | * Pointer to the newly created worker. | 1697 | * Pointer to the newly created worker. |
1694 | */ | 1698 | */ |
1695 | static struct worker *create_worker(struct worker_pool *pool) | 1699 | static struct worker *create_worker(struct worker_pool *pool) |
@@ -1789,6 +1793,8 @@ static void start_worker(struct worker *worker) | |||
1789 | * @pool: the target pool | 1793 | * @pool: the target pool |
1790 | * | 1794 | * |
1791 | * Grab the managership of @pool and create and start a new worker for it. | 1795 | * Grab the managership of @pool and create and start a new worker for it. |
1796 | * | ||
1797 | * Return: 0 on success. A negative error code otherwise. | ||
1792 | */ | 1798 | */ |
1793 | static int create_and_start_worker(struct worker_pool *pool) | 1799 | static int create_and_start_worker(struct worker_pool *pool) |
1794 | { | 1800 | { |
@@ -1933,7 +1939,7 @@ static void pool_mayday_timeout(unsigned long __pool) | |||
1933 | * multiple times. Does GFP_KERNEL allocations. Called only from | 1939 | * multiple times. Does GFP_KERNEL allocations. Called only from |
1934 | * manager. | 1940 | * manager. |
1935 | * | 1941 | * |
1936 | * RETURNS: | 1942 | * Return: |
1937 | * %false if no action was taken and pool->lock stayed locked, %true | 1943 | * %false if no action was taken and pool->lock stayed locked, %true |
1938 | * otherwise. | 1944 | * otherwise. |
1939 | */ | 1945 | */ |
@@ -1990,7 +1996,7 @@ restart: | |||
1990 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 1996 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
1991 | * multiple times. Called only from manager. | 1997 | * multiple times. Called only from manager. |
1992 | * | 1998 | * |
1993 | * RETURNS: | 1999 | * Return: |
1994 | * %false if no action was taken and pool->lock stayed locked, %true | 2000 | * %false if no action was taken and pool->lock stayed locked, %true |
1995 | * otherwise. | 2001 | * otherwise. |
1996 | */ | 2002 | */ |
@@ -2033,7 +2039,7 @@ static bool maybe_destroy_workers(struct worker_pool *pool) | |||
2033 | * spin_lock_irq(pool->lock) which may be released and regrabbed | 2039 | * spin_lock_irq(pool->lock) which may be released and regrabbed |
2034 | * multiple times. Does GFP_KERNEL allocations. | 2040 | * multiple times. Does GFP_KERNEL allocations. |
2035 | * | 2041 | * |
2036 | * RETURNS: | 2042 | * Return: |
2037 | * %false if the pool don't need management and the caller can safely start | 2043 | * %false if the pool don't need management and the caller can safely start |
2038 | * processing works, %true indicates that the function released pool->lock | 2044 | * processing works, %true indicates that the function released pool->lock |
2039 | * and reacquired it to perform some management function and that the | 2045 | * and reacquired it to perform some management function and that the |
@@ -2259,6 +2265,8 @@ static void process_scheduled_works(struct worker *worker) | |||
2259 | * work items regardless of their specific target workqueue. The only | 2265 | * work items regardless of their specific target workqueue. The only |
2260 | * exception is work items which belong to workqueues with a rescuer which | 2266 | * exception is work items which belong to workqueues with a rescuer which |
2261 | * will be explained in rescuer_thread(). | 2267 | * will be explained in rescuer_thread(). |
2268 | * | ||
2269 | * Return: 0 | ||
2262 | */ | 2270 | */ |
2263 | static int worker_thread(void *__worker) | 2271 | static int worker_thread(void *__worker) |
2264 | { | 2272 | { |
@@ -2357,6 +2365,8 @@ sleep: | |||
2357 | * those works so that forward progress can be guaranteed. | 2365 | * those works so that forward progress can be guaranteed. |
2358 | * | 2366 | * |
2359 | * This should happen rarely. | 2367 | * This should happen rarely. |
2368 | * | ||
2369 | * Return: 0 | ||
2360 | */ | 2370 | */ |
2361 | static int rescuer_thread(void *__rescuer) | 2371 | static int rescuer_thread(void *__rescuer) |
2362 | { | 2372 | { |
@@ -2529,7 +2539,7 @@ static void insert_wq_barrier(struct pool_workqueue *pwq, | |||
2529 | * CONTEXT: | 2539 | * CONTEXT: |
2530 | * mutex_lock(wq->mutex). | 2540 | * mutex_lock(wq->mutex). |
2531 | * | 2541 | * |
2532 | * RETURNS: | 2542 | * Return: |
2533 | * %true if @flush_color >= 0 and there's something to flush. %false | 2543 | * %true if @flush_color >= 0 and there's something to flush. %false |
2534 | * otherwise. | 2544 | * otherwise. |
2535 | */ | 2545 | */ |
@@ -2850,7 +2860,7 @@ static bool __flush_work(struct work_struct *work) | |||
2850 | * Wait until @work has finished execution. @work is guaranteed to be idle | 2860 | * Wait until @work has finished execution. @work is guaranteed to be idle |
2851 | * on return if it hasn't been requeued since flush started. | 2861 | * on return if it hasn't been requeued since flush started. |
2852 | * | 2862 | * |
2853 | * RETURNS: | 2863 | * Return: |
2854 | * %true if flush_work() waited for the work to finish execution, | 2864 | * %true if flush_work() waited for the work to finish execution, |
2855 | * %false if it was already idle. | 2865 | * %false if it was already idle. |
2856 | */ | 2866 | */ |
@@ -2902,7 +2912,7 @@ static bool __cancel_work_timer(struct work_struct *work, bool is_dwork) | |||
2902 | * The caller must ensure that the workqueue on which @work was last | 2912 | * The caller must ensure that the workqueue on which @work was last |
2903 | * queued can't be destroyed before this function returns. | 2913 | * queued can't be destroyed before this function returns. |
2904 | * | 2914 | * |
2905 | * RETURNS: | 2915 | * Return: |
2906 | * %true if @work was pending, %false otherwise. | 2916 | * %true if @work was pending, %false otherwise. |
2907 | */ | 2917 | */ |
2908 | bool cancel_work_sync(struct work_struct *work) | 2918 | bool cancel_work_sync(struct work_struct *work) |
@@ -2919,7 +2929,7 @@ EXPORT_SYMBOL_GPL(cancel_work_sync); | |||
2919 | * immediate execution. Like flush_work(), this function only | 2929 | * immediate execution. Like flush_work(), this function only |
2920 | * considers the last queueing instance of @dwork. | 2930 | * considers the last queueing instance of @dwork. |
2921 | * | 2931 | * |
2922 | * RETURNS: | 2932 | * Return: |
2923 | * %true if flush_work() waited for the work to finish execution, | 2933 | * %true if flush_work() waited for the work to finish execution, |
2924 | * %false if it was already idle. | 2934 | * %false if it was already idle. |
2925 | */ | 2935 | */ |
@@ -2937,11 +2947,15 @@ EXPORT_SYMBOL(flush_delayed_work); | |||
2937 | * cancel_delayed_work - cancel a delayed work | 2947 | * cancel_delayed_work - cancel a delayed work |
2938 | * @dwork: delayed_work to cancel | 2948 | * @dwork: delayed_work to cancel |
2939 | * | 2949 | * |
2940 | * Kill off a pending delayed_work. Returns %true if @dwork was pending | 2950 | * Kill off a pending delayed_work. |
2941 | * and canceled; %false if wasn't pending. Note that the work callback | 2951 | * |
2942 | * function may still be running on return, unless it returns %true and the | 2952 | * Return: %true if @dwork was pending and canceled; %false if it wasn't |
2943 | * work doesn't re-arm itself. Explicitly flush or use | 2953 | * pending. |
2944 | * cancel_delayed_work_sync() to wait on it. | 2954 | * |
2955 | * Note: | ||
2956 | * The work callback function may still be running on return, unless | ||
2957 | * it returns %true and the work doesn't re-arm itself. Explicitly flush or | ||
2958 | * use cancel_delayed_work_sync() to wait on it. | ||
2945 | * | 2959 | * |
2946 | * This function is safe to call from any context including IRQ handler. | 2960 | * This function is safe to call from any context including IRQ handler. |
2947 | */ | 2961 | */ |
@@ -2970,7 +2984,7 @@ EXPORT_SYMBOL(cancel_delayed_work); | |||
2970 | * | 2984 | * |
2971 | * This is cancel_work_sync() for delayed works. | 2985 | * This is cancel_work_sync() for delayed works. |
2972 | * | 2986 | * |
2973 | * RETURNS: | 2987 | * Return: |
2974 | * %true if @dwork was pending, %false otherwise. | 2988 | * %true if @dwork was pending, %false otherwise. |
2975 | */ | 2989 | */ |
2976 | bool cancel_delayed_work_sync(struct delayed_work *dwork) | 2990 | bool cancel_delayed_work_sync(struct delayed_work *dwork) |
@@ -2987,7 +3001,7 @@ EXPORT_SYMBOL(cancel_delayed_work_sync); | |||
2987 | * system workqueue and blocks until all CPUs have completed. | 3001 | * system workqueue and blocks until all CPUs have completed. |
2988 | * schedule_on_each_cpu() is very slow. | 3002 | * schedule_on_each_cpu() is very slow. |
2989 | * | 3003 | * |
2990 | * RETURNS: | 3004 | * Return: |
2991 | * 0 on success, -errno on failure. | 3005 | * 0 on success, -errno on failure. |
2992 | */ | 3006 | */ |
2993 | int schedule_on_each_cpu(work_func_t func) | 3007 | int schedule_on_each_cpu(work_func_t func) |
@@ -3055,7 +3069,7 @@ EXPORT_SYMBOL(flush_scheduled_work); | |||
3055 | * Executes the function immediately if process context is available, | 3069 | * Executes the function immediately if process context is available, |
3056 | * otherwise schedules the function for delayed execution. | 3070 | * otherwise schedules the function for delayed execution. |
3057 | * | 3071 | * |
3058 | * Returns: 0 - function was executed | 3072 | * Return: 0 - function was executed |
3059 | * 1 - function was scheduled for execution | 3073 | * 1 - function was scheduled for execution |
3060 | */ | 3074 | */ |
3061 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) | 3075 | int execute_in_process_context(work_func_t fn, struct execute_work *ew) |
@@ -3315,7 +3329,7 @@ static void wq_device_release(struct device *dev) | |||
3315 | * apply_workqueue_attrs() may race against userland updating the | 3329 | * apply_workqueue_attrs() may race against userland updating the |
3316 | * attributes. | 3330 | * attributes. |
3317 | * | 3331 | * |
3318 | * Returns 0 on success, -errno on failure. | 3332 | * Return: 0 on success, -errno on failure. |
3319 | */ | 3333 | */ |
3320 | int workqueue_sysfs_register(struct workqueue_struct *wq) | 3334 | int workqueue_sysfs_register(struct workqueue_struct *wq) |
3321 | { | 3335 | { |
@@ -3408,7 +3422,9 @@ void free_workqueue_attrs(struct workqueue_attrs *attrs) | |||
3408 | * @gfp_mask: allocation mask to use | 3422 | * @gfp_mask: allocation mask to use |
3409 | * | 3423 | * |
3410 | * Allocate a new workqueue_attrs, initialize with default settings and | 3424 | * Allocate a new workqueue_attrs, initialize with default settings and |
3411 | * return it. Returns NULL on failure. | 3425 | * return it. |
3426 | * | ||
3427 | * Return: The allocated new workqueue_attr on success. %NULL on failure. | ||
3412 | */ | 3428 | */ |
3413 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) | 3429 | struct workqueue_attrs *alloc_workqueue_attrs(gfp_t gfp_mask) |
3414 | { | 3430 | { |
@@ -3467,7 +3483,8 @@ static bool wqattrs_equal(const struct workqueue_attrs *a, | |||
3467 | * @pool: worker_pool to initialize | 3483 | * @pool: worker_pool to initialize |
3468 | * | 3484 | * |
3469 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. | 3485 | * Initiailize a newly zalloc'd @pool. It also allocates @pool->attrs. |
3470 | * Returns 0 on success, -errno on failure. Even on failure, all fields | 3486 | * |
3487 | * Return: 0 on success, -errno on failure. Even on failure, all fields | ||
3471 | * inside @pool proper are initialized and put_unbound_pool() can be called | 3488 | * inside @pool proper are initialized and put_unbound_pool() can be called |
3472 | * on @pool safely to release it. | 3489 | * on @pool safely to release it. |
3473 | */ | 3490 | */ |
@@ -3574,9 +3591,12 @@ static void put_unbound_pool(struct worker_pool *pool) | |||
3574 | * Obtain a worker_pool which has the same attributes as @attrs, bump the | 3591 | * Obtain a worker_pool which has the same attributes as @attrs, bump the |
3575 | * reference count and return it. If there already is a matching | 3592 | * reference count and return it. If there already is a matching |
3576 | * worker_pool, it will be used; otherwise, this function attempts to | 3593 | * worker_pool, it will be used; otherwise, this function attempts to |
3577 | * create a new one. On failure, returns NULL. | 3594 | * create a new one. |
3578 | * | 3595 | * |
3579 | * Should be called with wq_pool_mutex held. | 3596 | * Should be called with wq_pool_mutex held. |
3597 | * | ||
3598 | * Return: On success, a worker_pool with the same attributes as @attrs. | ||
3599 | * On failure, %NULL. | ||
3580 | */ | 3600 | */ |
3581 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) | 3601 | static struct worker_pool *get_unbound_pool(const struct workqueue_attrs *attrs) |
3582 | { | 3602 | { |
@@ -3812,9 +3832,7 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3812 | * | 3832 | * |
3813 | * Calculate the cpumask a workqueue with @attrs should use on @node. If | 3833 | * Calculate the cpumask a workqueue with @attrs should use on @node. If |
3814 | * @cpu_going_down is >= 0, that cpu is considered offline during | 3834 | * @cpu_going_down is >= 0, that cpu is considered offline during |
3815 | * calculation. The result is stored in @cpumask. This function returns | 3835 | * calculation. The result is stored in @cpumask. |
3816 | * %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3817 | * %false if equal. | ||
3818 | * | 3836 | * |
3819 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If | 3837 | * If NUMA affinity is not enabled, @attrs->cpumask is always used. If |
3820 | * enabled and @node has online CPUs requested by @attrs, the returned | 3838 | * enabled and @node has online CPUs requested by @attrs, the returned |
@@ -3823,6 +3841,9 @@ static void free_unbound_pwq(struct pool_workqueue *pwq) | |||
3823 | * | 3841 | * |
3824 | * The caller is responsible for ensuring that the cpumask of @node stays | 3842 | * The caller is responsible for ensuring that the cpumask of @node stays |
3825 | * stable. | 3843 | * stable. |
3844 | * | ||
3845 | * Return: %true if the resulting @cpumask is different from @attrs->cpumask, | ||
3846 | * %false if equal. | ||
3826 | */ | 3847 | */ |
3827 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, | 3848 | static bool wq_calc_node_cpumask(const struct workqueue_attrs *attrs, int node, |
3828 | int cpu_going_down, cpumask_t *cpumask) | 3849 | int cpu_going_down, cpumask_t *cpumask) |
@@ -3876,8 +3897,9 @@ static struct pool_workqueue *numa_pwq_tbl_install(struct workqueue_struct *wq, | |||
3876 | * items finish. Note that a work item which repeatedly requeues itself | 3897 | * items finish. Note that a work item which repeatedly requeues itself |
3877 | * back-to-back will stay on its current pwq. | 3898 | * back-to-back will stay on its current pwq. |
3878 | * | 3899 | * |
3879 | * Performs GFP_KERNEL allocations. Returns 0 on success and -errno on | 3900 | * Performs GFP_KERNEL allocations. |
3880 | * failure. | 3901 | * |
3902 | * Return: 0 on success and -errno on failure. | ||
3881 | */ | 3903 | */ |
3882 | int apply_workqueue_attrs(struct workqueue_struct *wq, | 3904 | int apply_workqueue_attrs(struct workqueue_struct *wq, |
3883 | const struct workqueue_attrs *attrs) | 3905 | const struct workqueue_attrs *attrs) |
@@ -4345,6 +4367,8 @@ EXPORT_SYMBOL_GPL(workqueue_set_max_active); | |||
4345 | * | 4367 | * |
4346 | * Determine whether %current is a workqueue rescuer. Can be used from | 4368 | * Determine whether %current is a workqueue rescuer. Can be used from |
4347 | * work functions to determine whether it's being run off the rescuer task. | 4369 | * work functions to determine whether it's being run off the rescuer task. |
4370 | * | ||
4371 | * Return: %true if %current is a workqueue rescuer. %false otherwise. | ||
4348 | */ | 4372 | */ |
4349 | bool current_is_workqueue_rescuer(void) | 4373 | bool current_is_workqueue_rescuer(void) |
4350 | { | 4374 | { |
@@ -4368,7 +4392,7 @@ bool current_is_workqueue_rescuer(void) | |||
4368 | * workqueue being congested on one CPU doesn't mean the workqueue is also | 4392 | * workqueue being congested on one CPU doesn't mean the workqueue is also |
4369 | * contested on other CPUs / NUMA nodes. | 4393 | * contested on other CPUs / NUMA nodes. |
4370 | * | 4394 | * |
4371 | * RETURNS: | 4395 | * Return: |
4372 | * %true if congested, %false otherwise. | 4396 | * %true if congested, %false otherwise. |
4373 | */ | 4397 | */ |
4374 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) | 4398 | bool workqueue_congested(int cpu, struct workqueue_struct *wq) |
@@ -4401,7 +4425,7 @@ EXPORT_SYMBOL_GPL(workqueue_congested); | |||
4401 | * synchronization around this function and the test result is | 4425 | * synchronization around this function and the test result is |
4402 | * unreliable and only useful as advisory hints or for debugging. | 4426 | * unreliable and only useful as advisory hints or for debugging. |
4403 | * | 4427 | * |
4404 | * RETURNS: | 4428 | * Return: |
4405 | * OR'd bitmask of WORK_BUSY_* bits. | 4429 | * OR'd bitmask of WORK_BUSY_* bits. |
4406 | */ | 4430 | */ |
4407 | unsigned int work_busy(struct work_struct *work) | 4431 | unsigned int work_busy(struct work_struct *work) |
@@ -4779,9 +4803,10 @@ static void work_for_cpu_fn(struct work_struct *work) | |||
4779 | * @fn: the function to run | 4803 | * @fn: the function to run |
4780 | * @arg: the function arg | 4804 | * @arg: the function arg |
4781 | * | 4805 | * |
4782 | * This will return the value @fn returns. | ||
4783 | * It is up to the caller to ensure that the cpu doesn't go offline. | 4806 | * It is up to the caller to ensure that the cpu doesn't go offline. |
4784 | * The caller must not hold any locks which would prevent @fn from completing. | 4807 | * The caller must not hold any locks which would prevent @fn from completing. |
4808 | * | ||
4809 | * Return: The value @fn returns. | ||
4785 | */ | 4810 | */ |
4786 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) | 4811 | long work_on_cpu(int cpu, long (*fn)(void *), void *arg) |
4787 | { | 4812 | { |
@@ -4853,7 +4878,7 @@ void freeze_workqueues_begin(void) | |||
4853 | * CONTEXT: | 4878 | * CONTEXT: |
4854 | * Grabs and releases wq_pool_mutex. | 4879 | * Grabs and releases wq_pool_mutex. |
4855 | * | 4880 | * |
4856 | * RETURNS: | 4881 | * Return: |
4857 | * %true if some freezable workqueues are still busy. %false if freezing | 4882 | * %true if some freezable workqueues are still busy. %false if freezing |
4858 | * is complete. | 4883 | * is complete. |
4859 | */ | 4884 | */ |