diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:39:15 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2015-04-15 19:39:15 -0400 |
commit | eea3a00264cf243a28e4331566ce67b86059339d (patch) | |
tree | 487f16389e0dfa32e9caa7604d1274a7dcda8f04 /kernel | |
parent | e7c82412433a8039616c7314533a0a1c025d99bf (diff) | |
parent | e693d73c20ffdb06840c9378f367bad849ac0d5d (diff) |
Merge branch 'akpm' (patches from Andrew)
Merge second patchbomb from Andrew Morton:
- the rest of MM
- various misc bits
- add ability to run /sbin/reboot at reboot time
- printk/vsprintf changes
- fiddle with seq_printf() return value
* akpm: (114 commits)
parisc: remove use of seq_printf return value
lru_cache: remove use of seq_printf return value
tracing: remove use of seq_printf return value
cgroup: remove use of seq_printf return value
proc: remove use of seq_printf return value
s390: remove use of seq_printf return value
cris fasttimer: remove use of seq_printf return value
cris: remove use of seq_printf return value
openrisc: remove use of seq_printf return value
ARM: plat-pxa: remove use of seq_printf return value
nios2: cpuinfo: remove use of seq_printf return value
microblaze: mb: remove use of seq_printf return value
ipc: remove use of seq_printf return value
rtc: remove use of seq_printf return value
power: wakeup: remove use of seq_printf return value
x86: mtrr: if: remove use of seq_printf return value
linux/bitmap.h: improve BITMAP_{LAST,FIRST}_WORD_MASK
MAINTAINERS: CREDITS: remove Stefano Brivio from B43
.mailmap: add Ricardo Ribalda
CREDITS: add Ricardo Ribalda Delgado
...
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 4 | ||||
-rw-r--r-- | kernel/capability.c | 35 | ||||
-rw-r--r-- | kernel/cgroup.c | 6 | ||||
-rw-r--r-- | kernel/cred.c | 3 | ||||
-rw-r--r-- | kernel/groups.c | 3 | ||||
-rw-r--r-- | kernel/hung_task.c | 4 | ||||
-rw-r--r-- | kernel/reboot.c | 53 | ||||
-rw-r--r-- | kernel/resource.c | 32 | ||||
-rw-r--r-- | kernel/sys.c | 2 | ||||
-rw-r--r-- | kernel/sys_ni.c | 14 | ||||
-rw-r--r-- | kernel/sysctl.c | 9 | ||||
-rw-r--r-- | kernel/trace/trace_stack.c | 4 |
12 files changed, 106 insertions, 63 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 1408b3353a3c..0f8f8b0bc1bf 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -9,7 +9,9 @@ obj-y = fork.o exec_domain.o panic.o \ | |||
9 | extable.o params.o \ | 9 | extable.o params.o \ |
10 | kthread.o sys_ni.o nsproxy.o \ | 10 | kthread.o sys_ni.o nsproxy.o \ |
11 | notifier.o ksysfs.o cred.o reboot.o \ | 11 | notifier.o ksysfs.o cred.o reboot.o \ |
12 | async.o range.o groups.o smpboot.o | 12 | async.o range.o smpboot.o |
13 | |||
14 | obj-$(CONFIG_MULTIUSER) += groups.o | ||
13 | 15 | ||
14 | ifdef CONFIG_FUNCTION_TRACER | 16 | ifdef CONFIG_FUNCTION_TRACER |
15 | # Do not trace debug files and internal ftrace files | 17 | # Do not trace debug files and internal ftrace files |
diff --git a/kernel/capability.c b/kernel/capability.c index 989f5bfc57dc..45432b54d5c6 100644 --- a/kernel/capability.c +++ b/kernel/capability.c | |||
@@ -35,6 +35,7 @@ static int __init file_caps_disable(char *str) | |||
35 | } | 35 | } |
36 | __setup("no_file_caps", file_caps_disable); | 36 | __setup("no_file_caps", file_caps_disable); |
37 | 37 | ||
38 | #ifdef CONFIG_MULTIUSER | ||
38 | /* | 39 | /* |
39 | * More recent versions of libcap are available from: | 40 | * More recent versions of libcap are available from: |
40 | * | 41 | * |
@@ -386,6 +387,24 @@ bool ns_capable(struct user_namespace *ns, int cap) | |||
386 | } | 387 | } |
387 | EXPORT_SYMBOL(ns_capable); | 388 | EXPORT_SYMBOL(ns_capable); |
388 | 389 | ||
390 | |||
391 | /** | ||
392 | * capable - Determine if the current task has a superior capability in effect | ||
393 | * @cap: The capability to be tested for | ||
394 | * | ||
395 | * Return true if the current task has the given superior capability currently | ||
396 | * available for use, false if not. | ||
397 | * | ||
398 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
399 | * assumption that it's about to be used. | ||
400 | */ | ||
401 | bool capable(int cap) | ||
402 | { | ||
403 | return ns_capable(&init_user_ns, cap); | ||
404 | } | ||
405 | EXPORT_SYMBOL(capable); | ||
406 | #endif /* CONFIG_MULTIUSER */ | ||
407 | |||
389 | /** | 408 | /** |
390 | * file_ns_capable - Determine if the file's opener had a capability in effect | 409 | * file_ns_capable - Determine if the file's opener had a capability in effect |
391 | * @file: The file we want to check | 410 | * @file: The file we want to check |
@@ -412,22 +431,6 @@ bool file_ns_capable(const struct file *file, struct user_namespace *ns, | |||
412 | EXPORT_SYMBOL(file_ns_capable); | 431 | EXPORT_SYMBOL(file_ns_capable); |
413 | 432 | ||
414 | /** | 433 | /** |
415 | * capable - Determine if the current task has a superior capability in effect | ||
416 | * @cap: The capability to be tested for | ||
417 | * | ||
418 | * Return true if the current task has the given superior capability currently | ||
419 | * available for use, false if not. | ||
420 | * | ||
421 | * This sets PF_SUPERPRIV on the task if the capability is available on the | ||
422 | * assumption that it's about to be used. | ||
423 | */ | ||
424 | bool capable(int cap) | ||
425 | { | ||
426 | return ns_capable(&init_user_ns, cap); | ||
427 | } | ||
428 | EXPORT_SYMBOL(capable); | ||
429 | |||
430 | /** | ||
431 | * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped | 434 | * capable_wrt_inode_uidgid - Check nsown_capable and uid and gid mapped |
432 | * @inode: The inode in question | 435 | * @inode: The inode in question |
433 | * @cap: The capability in question | 436 | * @cap: The capability in question |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index a220fdb66568..469dd547770c 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -4196,7 +4196,9 @@ static void *cgroup_pidlist_next(struct seq_file *s, void *v, loff_t *pos) | |||
4196 | 4196 | ||
4197 | static int cgroup_pidlist_show(struct seq_file *s, void *v) | 4197 | static int cgroup_pidlist_show(struct seq_file *s, void *v) |
4198 | { | 4198 | { |
4199 | return seq_printf(s, "%d\n", *(int *)v); | 4199 | seq_printf(s, "%d\n", *(int *)v); |
4200 | |||
4201 | return 0; | ||
4200 | } | 4202 | } |
4201 | 4203 | ||
4202 | static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, | 4204 | static u64 cgroup_read_notify_on_release(struct cgroup_subsys_state *css, |
@@ -5451,7 +5453,7 @@ struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, | |||
5451 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) | 5453 | struct cgroup_subsys_state *css_from_id(int id, struct cgroup_subsys *ss) |
5452 | { | 5454 | { |
5453 | WARN_ON_ONCE(!rcu_read_lock_held()); | 5455 | WARN_ON_ONCE(!rcu_read_lock_held()); |
5454 | return idr_find(&ss->css_idr, id); | 5456 | return id > 0 ? idr_find(&ss->css_idr, id) : NULL; |
5455 | } | 5457 | } |
5456 | 5458 | ||
5457 | #ifdef CONFIG_CGROUP_DEBUG | 5459 | #ifdef CONFIG_CGROUP_DEBUG |
diff --git a/kernel/cred.c b/kernel/cred.c index e0573a43c7df..ec1c07667ec1 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -29,6 +29,9 @@ | |||
29 | 29 | ||
30 | static struct kmem_cache *cred_jar; | 30 | static struct kmem_cache *cred_jar; |
31 | 31 | ||
32 | /* init to 2 - one for init_task, one to ensure it is never freed */ | ||
33 | struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; | ||
34 | |||
32 | /* | 35 | /* |
33 | * The initial credentials for the initial task | 36 | * The initial credentials for the initial task |
34 | */ | 37 | */ |
diff --git a/kernel/groups.c b/kernel/groups.c index 664411f171b5..74d431d25251 100644 --- a/kernel/groups.c +++ b/kernel/groups.c | |||
@@ -9,9 +9,6 @@ | |||
9 | #include <linux/user_namespace.h> | 9 | #include <linux/user_namespace.h> |
10 | #include <asm/uaccess.h> | 10 | #include <asm/uaccess.h> |
11 | 11 | ||
12 | /* init to 2 - one for init_task, one to ensure it is never freed */ | ||
13 | struct group_info init_groups = { .usage = ATOMIC_INIT(2) }; | ||
14 | |||
15 | struct group_info *groups_alloc(int gidsetsize) | 12 | struct group_info *groups_alloc(int gidsetsize) |
16 | { | 13 | { |
17 | struct group_info *group_info; | 14 | struct group_info *group_info; |
diff --git a/kernel/hung_task.c b/kernel/hung_task.c index 06db12434d72..e0f90c2b57aa 100644 --- a/kernel/hung_task.c +++ b/kernel/hung_task.c | |||
@@ -169,7 +169,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) | |||
169 | return; | 169 | return; |
170 | 170 | ||
171 | rcu_read_lock(); | 171 | rcu_read_lock(); |
172 | do_each_thread(g, t) { | 172 | for_each_process_thread(g, t) { |
173 | if (!max_count--) | 173 | if (!max_count--) |
174 | goto unlock; | 174 | goto unlock; |
175 | if (!--batch_count) { | 175 | if (!--batch_count) { |
@@ -180,7 +180,7 @@ static void check_hung_uninterruptible_tasks(unsigned long timeout) | |||
180 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ | 180 | /* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */ |
181 | if (t->state == TASK_UNINTERRUPTIBLE) | 181 | if (t->state == TASK_UNINTERRUPTIBLE) |
182 | check_hung_task(t, timeout); | 182 | check_hung_task(t, timeout); |
183 | } while_each_thread(g, t); | 183 | } |
184 | unlock: | 184 | unlock: |
185 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
186 | } | 186 | } |
diff --git a/kernel/reboot.c b/kernel/reboot.c index 5925f5ae8dff..d20c85d9f8c0 100644 --- a/kernel/reboot.c +++ b/kernel/reboot.c | |||
@@ -387,8 +387,9 @@ void ctrl_alt_del(void) | |||
387 | } | 387 | } |
388 | 388 | ||
389 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; | 389 | char poweroff_cmd[POWEROFF_CMD_PATH_LEN] = "/sbin/poweroff"; |
390 | static const char reboot_cmd[] = "/sbin/reboot"; | ||
390 | 391 | ||
391 | static int __orderly_poweroff(bool force) | 392 | static int run_cmd(const char *cmd) |
392 | { | 393 | { |
393 | char **argv; | 394 | char **argv; |
394 | static char *envp[] = { | 395 | static char *envp[] = { |
@@ -397,8 +398,7 @@ static int __orderly_poweroff(bool force) | |||
397 | NULL | 398 | NULL |
398 | }; | 399 | }; |
399 | int ret; | 400 | int ret; |
400 | 401 | argv = argv_split(GFP_KERNEL, cmd, NULL); | |
401 | argv = argv_split(GFP_KERNEL, poweroff_cmd, NULL); | ||
402 | if (argv) { | 402 | if (argv) { |
403 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); | 403 | ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); |
404 | argv_free(argv); | 404 | argv_free(argv); |
@@ -406,8 +406,33 @@ static int __orderly_poweroff(bool force) | |||
406 | ret = -ENOMEM; | 406 | ret = -ENOMEM; |
407 | } | 407 | } |
408 | 408 | ||
409 | return ret; | ||
410 | } | ||
411 | |||
412 | static int __orderly_reboot(void) | ||
413 | { | ||
414 | int ret; | ||
415 | |||
416 | ret = run_cmd(reboot_cmd); | ||
417 | |||
418 | if (ret) { | ||
419 | pr_warn("Failed to start orderly reboot: forcing the issue\n"); | ||
420 | emergency_sync(); | ||
421 | kernel_restart(NULL); | ||
422 | } | ||
423 | |||
424 | return ret; | ||
425 | } | ||
426 | |||
427 | static int __orderly_poweroff(bool force) | ||
428 | { | ||
429 | int ret; | ||
430 | |||
431 | ret = run_cmd(poweroff_cmd); | ||
432 | |||
409 | if (ret && force) { | 433 | if (ret && force) { |
410 | pr_warn("Failed to start orderly shutdown: forcing the issue\n"); | 434 | pr_warn("Failed to start orderly shutdown: forcing the issue\n"); |
435 | |||
411 | /* | 436 | /* |
412 | * I guess this should try to kick off some daemon to sync and | 437 | * I guess this should try to kick off some daemon to sync and |
413 | * poweroff asap. Or not even bother syncing if we're doing an | 438 | * poweroff asap. Or not even bother syncing if we're doing an |
@@ -436,15 +461,33 @@ static DECLARE_WORK(poweroff_work, poweroff_work_func); | |||
436 | * This may be called from any context to trigger a system shutdown. | 461 | * This may be called from any context to trigger a system shutdown. |
437 | * If the orderly shutdown fails, it will force an immediate shutdown. | 462 | * If the orderly shutdown fails, it will force an immediate shutdown. |
438 | */ | 463 | */ |
439 | int orderly_poweroff(bool force) | 464 | void orderly_poweroff(bool force) |
440 | { | 465 | { |
441 | if (force) /* do not override the pending "true" */ | 466 | if (force) /* do not override the pending "true" */ |
442 | poweroff_force = true; | 467 | poweroff_force = true; |
443 | schedule_work(&poweroff_work); | 468 | schedule_work(&poweroff_work); |
444 | return 0; | ||
445 | } | 469 | } |
446 | EXPORT_SYMBOL_GPL(orderly_poweroff); | 470 | EXPORT_SYMBOL_GPL(orderly_poweroff); |
447 | 471 | ||
472 | static void reboot_work_func(struct work_struct *work) | ||
473 | { | ||
474 | __orderly_reboot(); | ||
475 | } | ||
476 | |||
477 | static DECLARE_WORK(reboot_work, reboot_work_func); | ||
478 | |||
479 | /** | ||
480 | * orderly_reboot - Trigger an orderly system reboot | ||
481 | * | ||
482 | * This may be called from any context to trigger a system reboot. | ||
483 | * If the orderly reboot fails, it will force an immediate reboot. | ||
484 | */ | ||
485 | void orderly_reboot(void) | ||
486 | { | ||
487 | schedule_work(&reboot_work); | ||
488 | } | ||
489 | EXPORT_SYMBOL_GPL(orderly_reboot); | ||
490 | |||
448 | static int __init reboot_setup(char *str) | 491 | static int __init reboot_setup(char *str) |
449 | { | 492 | { |
450 | for (;;) { | 493 | for (;;) { |
diff --git a/kernel/resource.c b/kernel/resource.c index 19f2357dfda3..90552aab5f2d 100644 --- a/kernel/resource.c +++ b/kernel/resource.c | |||
@@ -1034,8 +1034,6 @@ resource_size_t resource_alignment(struct resource *res) | |||
1034 | * | 1034 | * |
1035 | * request_region creates a new busy region. | 1035 | * request_region creates a new busy region. |
1036 | * | 1036 | * |
1037 | * check_region returns non-zero if the area is already busy. | ||
1038 | * | ||
1039 | * release_region releases a matching busy region. | 1037 | * release_region releases a matching busy region. |
1040 | */ | 1038 | */ |
1041 | 1039 | ||
@@ -1098,36 +1096,6 @@ struct resource * __request_region(struct resource *parent, | |||
1098 | EXPORT_SYMBOL(__request_region); | 1096 | EXPORT_SYMBOL(__request_region); |
1099 | 1097 | ||
1100 | /** | 1098 | /** |
1101 | * __check_region - check if a resource region is busy or free | ||
1102 | * @parent: parent resource descriptor | ||
1103 | * @start: resource start address | ||
1104 | * @n: resource region size | ||
1105 | * | ||
1106 | * Returns 0 if the region is free at the moment it is checked, | ||
1107 | * returns %-EBUSY if the region is busy. | ||
1108 | * | ||
1109 | * NOTE: | ||
1110 | * This function is deprecated because its use is racy. | ||
1111 | * Even if it returns 0, a subsequent call to request_region() | ||
1112 | * may fail because another driver etc. just allocated the region. | ||
1113 | * Do NOT use it. It will be removed from the kernel. | ||
1114 | */ | ||
1115 | int __check_region(struct resource *parent, resource_size_t start, | ||
1116 | resource_size_t n) | ||
1117 | { | ||
1118 | struct resource * res; | ||
1119 | |||
1120 | res = __request_region(parent, start, n, "check-region", 0); | ||
1121 | if (!res) | ||
1122 | return -EBUSY; | ||
1123 | |||
1124 | release_resource(res); | ||
1125 | free_resource(res); | ||
1126 | return 0; | ||
1127 | } | ||
1128 | EXPORT_SYMBOL(__check_region); | ||
1129 | |||
1130 | /** | ||
1131 | * __release_region - release a previously reserved resource region | 1099 | * __release_region - release a previously reserved resource region |
1132 | * @parent: parent resource descriptor | 1100 | * @parent: parent resource descriptor |
1133 | * @start: resource start address | 1101 | * @start: resource start address |
diff --git a/kernel/sys.c b/kernel/sys.c index a03d9cd23ed7..3be344902316 100644 --- a/kernel/sys.c +++ b/kernel/sys.c | |||
@@ -325,6 +325,7 @@ out_unlock: | |||
325 | * SMP: There are not races, the GIDs are checked only by filesystem | 325 | * SMP: There are not races, the GIDs are checked only by filesystem |
326 | * operations (as far as semantic preservation is concerned). | 326 | * operations (as far as semantic preservation is concerned). |
327 | */ | 327 | */ |
328 | #ifdef CONFIG_MULTIUSER | ||
328 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) | 329 | SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid) |
329 | { | 330 | { |
330 | struct user_namespace *ns = current_user_ns(); | 331 | struct user_namespace *ns = current_user_ns(); |
@@ -815,6 +816,7 @@ change_okay: | |||
815 | commit_creds(new); | 816 | commit_creds(new); |
816 | return old_fsgid; | 817 | return old_fsgid; |
817 | } | 818 | } |
819 | #endif /* CONFIG_MULTIUSER */ | ||
818 | 820 | ||
819 | /** | 821 | /** |
820 | * sys_getpid - return the thread group id of the current process | 822 | * sys_getpid - return the thread group id of the current process |
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c index 5adcb0ae3a58..7995ef5868d8 100644 --- a/kernel/sys_ni.c +++ b/kernel/sys_ni.c | |||
@@ -159,6 +159,20 @@ cond_syscall(sys_uselib); | |||
159 | cond_syscall(sys_fadvise64); | 159 | cond_syscall(sys_fadvise64); |
160 | cond_syscall(sys_fadvise64_64); | 160 | cond_syscall(sys_fadvise64_64); |
161 | cond_syscall(sys_madvise); | 161 | cond_syscall(sys_madvise); |
162 | cond_syscall(sys_setuid); | ||
163 | cond_syscall(sys_setregid); | ||
164 | cond_syscall(sys_setgid); | ||
165 | cond_syscall(sys_setreuid); | ||
166 | cond_syscall(sys_setresuid); | ||
167 | cond_syscall(sys_getresuid); | ||
168 | cond_syscall(sys_setresgid); | ||
169 | cond_syscall(sys_getresgid); | ||
170 | cond_syscall(sys_setgroups); | ||
171 | cond_syscall(sys_getgroups); | ||
172 | cond_syscall(sys_setfsuid); | ||
173 | cond_syscall(sys_setfsgid); | ||
174 | cond_syscall(sys_capget); | ||
175 | cond_syscall(sys_capset); | ||
162 | 176 | ||
163 | /* arch-specific weak syscall entries */ | 177 | /* arch-specific weak syscall entries */ |
164 | cond_syscall(sys_pciconfig_read); | 178 | cond_syscall(sys_pciconfig_read); |
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 8c0eabd41886..42b7fc2860c1 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c | |||
@@ -1335,6 +1335,15 @@ static struct ctl_table vm_table[] = { | |||
1335 | .extra1 = &min_extfrag_threshold, | 1335 | .extra1 = &min_extfrag_threshold, |
1336 | .extra2 = &max_extfrag_threshold, | 1336 | .extra2 = &max_extfrag_threshold, |
1337 | }, | 1337 | }, |
1338 | { | ||
1339 | .procname = "compact_unevictable_allowed", | ||
1340 | .data = &sysctl_compact_unevictable_allowed, | ||
1341 | .maxlen = sizeof(int), | ||
1342 | .mode = 0644, | ||
1343 | .proc_handler = proc_dointvec, | ||
1344 | .extra1 = &zero, | ||
1345 | .extra2 = &one, | ||
1346 | }, | ||
1338 | 1347 | ||
1339 | #endif /* CONFIG_COMPACTION */ | 1348 | #endif /* CONFIG_COMPACTION */ |
1340 | { | 1349 | { |
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index c3e4fcfddd45..3f34496244e9 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c | |||
@@ -327,11 +327,11 @@ static void t_stop(struct seq_file *m, void *p) | |||
327 | local_irq_enable(); | 327 | local_irq_enable(); |
328 | } | 328 | } |
329 | 329 | ||
330 | static int trace_lookup_stack(struct seq_file *m, long i) | 330 | static void trace_lookup_stack(struct seq_file *m, long i) |
331 | { | 331 | { |
332 | unsigned long addr = stack_dump_trace[i]; | 332 | unsigned long addr = stack_dump_trace[i]; |
333 | 333 | ||
334 | return seq_printf(m, "%pS\n", (void *)addr); | 334 | seq_printf(m, "%pS\n", (void *)addr); |
335 | } | 335 | } |
336 | 336 | ||
337 | static void print_disabled(struct seq_file *m) | 337 | static void print_disabled(struct seq_file *m) |