aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile15
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c19
-rw-r--r--kernel/exit.c342
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/irq/chip.c46
-rw-r--r--kernel/irq/handle.c4
-rw-r--r--kernel/itimer.c2
-rw-r--r--kernel/module.c31
-rw-r--r--kernel/mutex-debug.c2
-rw-r--r--kernel/mutex.c29
-rw-r--r--kernel/nsproxy.c1
-rw-r--r--kernel/params.c20
-rw-r--r--kernel/pid.c208
-rw-r--r--kernel/pid_namespace.c197
-rw-r--r--kernel/posix-cpu-timers.c8
-rw-r--r--kernel/posix-timers.c2
-rw-r--r--kernel/printk.c16
-rw-r--r--kernel/profile.c1
-rw-r--r--kernel/ptrace.c11
-rw-r--r--kernel/rtmutex-debug.c12
-rw-r--r--kernel/rtmutex_common.h2
-rw-r--r--kernel/sched.c16
-rw-r--r--kernel/signal.c142
-rw-r--r--kernel/softirq.c8
-rw-r--r--kernel/sys.c57
-rw-r--r--kernel/sysctl.c25
-rw-r--r--kernel/sysctl_check.c151
-rw-r--r--kernel/time.c33
-rw-r--r--kernel/time/clockevents.c13
-rw-r--r--kernel/time/clocksource.c1
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/time/timekeeping.c6
-rw-r--r--kernel/timeconst.pl402
-rw-r--r--kernel/timer.c8
-rw-r--r--kernel/user.c10
-rw-r--r--kernel/user_namespace.c13
-rw-r--r--kernel/wait.c26
-rw-r--r--kernel/workqueue.c12
39 files changed, 1159 insertions, 748 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 685697c0a181..6c584c55a6e9 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -4,12 +4,12 @@
4 4
5obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \ 5obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
6 exit.o itimer.o time.o softirq.o resource.o \ 6 exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o capability.o ptrace.o timer.o user.o user_namespace.o \ 7 sysctl.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o \
12 utsname.o notifier.o ksysfs.o pm_qos_params.o 12 notifier.o ksysfs.o pm_qos_params.o
13 13
14obj-$(CONFIG_SYSCTL) += sysctl_check.o 14obj-$(CONFIG_SYSCTL) += sysctl_check.o
15obj-$(CONFIG_STACKTRACE) += stacktrace.o 15obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -42,6 +42,9 @@ obj-$(CONFIG_CGROUPS) += cgroup.o
42obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o 42obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
43obj-$(CONFIG_CPUSETS) += cpuset.o 43obj-$(CONFIG_CPUSETS) += cpuset.o
44obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o 44obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
45obj-$(CONFIG_UTS_NS) += utsname.o
46obj-$(CONFIG_USER_NS) += user_namespace.o
47obj-$(CONFIG_PID_NS) += pid_namespace.o
45obj-$(CONFIG_IKCONFIG) += configs.o 48obj-$(CONFIG_IKCONFIG) += configs.o
46obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o 49obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o
47obj-$(CONFIG_STOP_MACHINE) += stop_machine.o 50obj-$(CONFIG_STOP_MACHINE) += stop_machine.o
@@ -88,3 +91,11 @@ quiet_cmd_ikconfiggz = IKCFG $@
88targets += config_data.h 91targets += config_data.h
89$(obj)/config_data.h: $(obj)/config_data.gz FORCE 92$(obj)/config_data.h: $(obj)/config_data.gz FORCE
90 $(call if_changed,ikconfiggz) 93 $(call if_changed,ikconfiggz)
94
95$(obj)/time.o: $(obj)/timeconst.h
96
97quiet_cmd_timeconst = TIMEC $@
98 cmd_timeconst = $(PERL) $< $(CONFIG_HZ) > $@
99targets += timeconst.h
100$(obj)/timeconst.h: $(src)/timeconst.pl FORCE
101 $(call if_changed,timeconst)
diff --git a/kernel/cpu.c b/kernel/cpu.c
index e0d3a4f56ecb..2eff3f63abed 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -389,7 +389,7 @@ int disable_nonboot_cpus(void)
389 return error; 389 return error;
390} 390}
391 391
392void enable_nonboot_cpus(void) 392void __ref enable_nonboot_cpus(void)
393{ 393{
394 int cpu, error; 394 int cpu, error;
395 395
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 67b2bfe27814..3e296ed81d4d 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2255,13 +2255,14 @@ const struct file_operations proc_cpuset_operations = {
2255#endif /* CONFIG_PROC_PID_CPUSET */ 2255#endif /* CONFIG_PROC_PID_CPUSET */
2256 2256
2257/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */ 2257/* Display task cpus_allowed, mems_allowed in /proc/<pid>/status file. */
2258char *cpuset_task_status_allowed(struct task_struct *task, char *buffer) 2258void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2259{ 2259{
2260 buffer += sprintf(buffer, "Cpus_allowed:\t"); 2260 seq_printf(m, "Cpus_allowed:\t");
2261 buffer += cpumask_scnprintf(buffer, PAGE_SIZE, task->cpus_allowed); 2261 m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count,
2262 buffer += sprintf(buffer, "\n"); 2262 task->cpus_allowed);
2263 buffer += sprintf(buffer, "Mems_allowed:\t"); 2263 seq_printf(m, "\n");
2264 buffer += nodemask_scnprintf(buffer, PAGE_SIZE, task->mems_allowed); 2264 seq_printf(m, "Mems_allowed:\t");
2265 buffer += sprintf(buffer, "\n"); 2265 m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count,
2266 return buffer; 2266 task->mems_allowed);
2267 seq_printf(m, "\n");
2267} 2268}
diff --git a/kernel/exit.c b/kernel/exit.c
index eb9934a82fc1..3b893e78ce61 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -293,26 +293,27 @@ static void reparent_to_kthreadd(void)
293 switch_uid(INIT_USER); 293 switch_uid(INIT_USER);
294} 294}
295 295
296void __set_special_pids(pid_t session, pid_t pgrp) 296void __set_special_pids(struct pid *pid)
297{ 297{
298 struct task_struct *curr = current->group_leader; 298 struct task_struct *curr = current->group_leader;
299 pid_t nr = pid_nr(pid);
299 300
300 if (task_session_nr(curr) != session) { 301 if (task_session(curr) != pid) {
301 detach_pid(curr, PIDTYPE_SID); 302 detach_pid(curr, PIDTYPE_SID);
302 set_task_session(curr, session); 303 attach_pid(curr, PIDTYPE_SID, pid);
303 attach_pid(curr, PIDTYPE_SID, find_pid(session)); 304 set_task_session(curr, nr);
304 } 305 }
305 if (task_pgrp_nr(curr) != pgrp) { 306 if (task_pgrp(curr) != pid) {
306 detach_pid(curr, PIDTYPE_PGID); 307 detach_pid(curr, PIDTYPE_PGID);
307 set_task_pgrp(curr, pgrp); 308 attach_pid(curr, PIDTYPE_PGID, pid);
308 attach_pid(curr, PIDTYPE_PGID, find_pid(pgrp)); 309 set_task_pgrp(curr, nr);
309 } 310 }
310} 311}
311 312
312static void set_special_pids(pid_t session, pid_t pgrp) 313static void set_special_pids(struct pid *pid)
313{ 314{
314 write_lock_irq(&tasklist_lock); 315 write_lock_irq(&tasklist_lock);
315 __set_special_pids(session, pgrp); 316 __set_special_pids(pid);
316 write_unlock_irq(&tasklist_lock); 317 write_unlock_irq(&tasklist_lock);
317} 318}
318 319
@@ -383,7 +384,11 @@ void daemonize(const char *name, ...)
383 */ 384 */
384 current->flags |= PF_NOFREEZE; 385 current->flags |= PF_NOFREEZE;
385 386
386 set_special_pids(1, 1); 387 if (current->nsproxy != &init_nsproxy) {
388 get_nsproxy(&init_nsproxy);
389 switch_task_namespaces(current, &init_nsproxy);
390 }
391 set_special_pids(&init_struct_pid);
387 proc_clear_tty(current); 392 proc_clear_tty(current);
388 393
389 /* Block and flush all signals */ 394 /* Block and flush all signals */
@@ -398,11 +403,6 @@ void daemonize(const char *name, ...)
398 current->fs = fs; 403 current->fs = fs;
399 atomic_inc(&fs->count); 404 atomic_inc(&fs->count);
400 405
401 if (current->nsproxy != init_task.nsproxy) {
402 get_nsproxy(init_task.nsproxy);
403 switch_task_namespaces(current, init_task.nsproxy);
404 }
405
406 exit_files(current); 406 exit_files(current);
407 current->files = init_task.files; 407 current->files = init_task.files;
408 atomic_inc(&current->files->count); 408 atomic_inc(&current->files->count);
@@ -458,7 +458,7 @@ struct files_struct *get_files_struct(struct task_struct *task)
458 return files; 458 return files;
459} 459}
460 460
461void fastcall put_files_struct(struct files_struct *files) 461void put_files_struct(struct files_struct *files)
462{ 462{
463 struct fdtable *fdt; 463 struct fdtable *fdt;
464 464
@@ -745,24 +745,6 @@ static void exit_notify(struct task_struct *tsk)
745 struct task_struct *t; 745 struct task_struct *t;
746 struct pid *pgrp; 746 struct pid *pgrp;
747 747
748 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
749 && !thread_group_empty(tsk)) {
750 /*
751 * This occurs when there was a race between our exit
752 * syscall and a group signal choosing us as the one to
753 * wake up. It could be that we are the only thread
754 * alerted to check for pending signals, but another thread
755 * should be woken now to take the signal since we will not.
756 * Now we'll wake all the threads in the group just to make
757 * sure someone gets all the pending signals.
758 */
759 spin_lock_irq(&tsk->sighand->siglock);
760 for (t = next_thread(tsk); t != tsk; t = next_thread(t))
761 if (!signal_pending(t) && !(t->flags & PF_EXITING))
762 recalc_sigpending_and_wake(t);
763 spin_unlock_irq(&tsk->sighand->siglock);
764 }
765
766 /* 748 /*
767 * This does two things: 749 * This does two things:
768 * 750 *
@@ -905,7 +887,7 @@ static inline void exit_child_reaper(struct task_struct *tsk)
905 zap_pid_ns_processes(tsk->nsproxy->pid_ns); 887 zap_pid_ns_processes(tsk->nsproxy->pid_ns);
906} 888}
907 889
908fastcall NORET_TYPE void do_exit(long code) 890NORET_TYPE void do_exit(long code)
909{ 891{
910 struct task_struct *tsk = current; 892 struct task_struct *tsk = current;
911 int group_dead; 893 int group_dead;
@@ -947,7 +929,7 @@ fastcall NORET_TYPE void do_exit(long code)
947 schedule(); 929 schedule();
948 } 930 }
949 931
950 tsk->flags |= PF_EXITING; 932 exit_signals(tsk); /* sets PF_EXITING */
951 /* 933 /*
952 * tsk->flags are checked in the futex code to protect against 934 * tsk->flags are checked in the futex code to protect against
953 * an exiting task cleaning up the robust pi futexes. 935 * an exiting task cleaning up the robust pi futexes.
@@ -1108,20 +1090,23 @@ asmlinkage void sys_exit_group(int error_code)
1108 do_group_exit((error_code & 0xff) << 8); 1090 do_group_exit((error_code & 0xff) << 8);
1109} 1091}
1110 1092
1111static int eligible_child(pid_t pid, int options, struct task_struct *p) 1093static struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
1094{
1095 struct pid *pid = NULL;
1096 if (type == PIDTYPE_PID)
1097 pid = task->pids[type].pid;
1098 else if (type < PIDTYPE_MAX)
1099 pid = task->group_leader->pids[type].pid;
1100 return pid;
1101}
1102
1103static int eligible_child(enum pid_type type, struct pid *pid, int options,
1104 struct task_struct *p)
1112{ 1105{
1113 int err; 1106 int err;
1114 struct pid_namespace *ns;
1115 1107
1116 ns = current->nsproxy->pid_ns; 1108 if (type < PIDTYPE_MAX) {
1117 if (pid > 0) { 1109 if (task_pid_type(p, type) != pid)
1118 if (task_pid_nr_ns(p, ns) != pid)
1119 return 0;
1120 } else if (!pid) {
1121 if (task_pgrp_nr_ns(p, ns) != task_pgrp_vnr(current))
1122 return 0;
1123 } else if (pid != -1) {
1124 if (task_pgrp_nr_ns(p, ns) != -pid)
1125 return 0; 1110 return 0;
1126 } 1111 }
1127 1112
@@ -1140,18 +1125,16 @@ static int eligible_child(pid_t pid, int options, struct task_struct *p)
1140 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0)) 1125 if (((p->exit_signal != SIGCHLD) ^ ((options & __WCLONE) != 0))
1141 && !(options & __WALL)) 1126 && !(options & __WALL))
1142 return 0; 1127 return 0;
1143 /*
1144 * Do not consider thread group leaders that are
1145 * in a non-empty thread group:
1146 */
1147 if (delay_group_leader(p))
1148 return 2;
1149 1128
1150 err = security_task_wait(p); 1129 err = security_task_wait(p);
1151 if (err) 1130 if (likely(!err))
1152 return err; 1131 return 1;
1153 1132
1154 return 1; 1133 if (type != PIDTYPE_PID)
1134 return 0;
1135 /* This child was explicitly requested, abort */
1136 read_unlock(&tasklist_lock);
1137 return err;
1155} 1138}
1156 1139
1157static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid, 1140static int wait_noreap_copyout(struct task_struct *p, pid_t pid, uid_t uid,
@@ -1191,20 +1174,13 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1191{ 1174{
1192 unsigned long state; 1175 unsigned long state;
1193 int retval, status, traced; 1176 int retval, status, traced;
1194 struct pid_namespace *ns; 1177 pid_t pid = task_pid_vnr(p);
1195
1196 ns = current->nsproxy->pid_ns;
1197 1178
1198 if (unlikely(noreap)) { 1179 if (unlikely(noreap)) {
1199 pid_t pid = task_pid_nr_ns(p, ns);
1200 uid_t uid = p->uid; 1180 uid_t uid = p->uid;
1201 int exit_code = p->exit_code; 1181 int exit_code = p->exit_code;
1202 int why, status; 1182 int why, status;
1203 1183
1204 if (unlikely(p->exit_state != EXIT_ZOMBIE))
1205 return 0;
1206 if (unlikely(p->exit_signal == -1 && p->ptrace == 0))
1207 return 0;
1208 get_task_struct(p); 1184 get_task_struct(p);
1209 read_unlock(&tasklist_lock); 1185 read_unlock(&tasklist_lock);
1210 if ((exit_code & 0x7f) == 0) { 1186 if ((exit_code & 0x7f) == 0) {
@@ -1315,11 +1291,11 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1315 retval = put_user(status, &infop->si_status); 1291 retval = put_user(status, &infop->si_status);
1316 } 1292 }
1317 if (!retval && infop) 1293 if (!retval && infop)
1318 retval = put_user(task_pid_nr_ns(p, ns), &infop->si_pid); 1294 retval = put_user(pid, &infop->si_pid);
1319 if (!retval && infop) 1295 if (!retval && infop)
1320 retval = put_user(p->uid, &infop->si_uid); 1296 retval = put_user(p->uid, &infop->si_uid);
1321 if (!retval) 1297 if (!retval)
1322 retval = task_pid_nr_ns(p, ns); 1298 retval = pid;
1323 1299
1324 if (traced) { 1300 if (traced) {
1325 write_lock_irq(&tasklist_lock); 1301 write_lock_irq(&tasklist_lock);
@@ -1351,21 +1327,38 @@ static int wait_task_zombie(struct task_struct *p, int noreap,
1351 * the lock and this task is uninteresting. If we return nonzero, we have 1327 * the lock and this task is uninteresting. If we return nonzero, we have
1352 * released the lock and the system call should return. 1328 * released the lock and the system call should return.
1353 */ 1329 */
1354static int wait_task_stopped(struct task_struct *p, int delayed_group_leader, 1330static int wait_task_stopped(struct task_struct *p,
1355 int noreap, struct siginfo __user *infop, 1331 int noreap, struct siginfo __user *infop,
1356 int __user *stat_addr, struct rusage __user *ru) 1332 int __user *stat_addr, struct rusage __user *ru)
1357{ 1333{
1358 int retval, exit_code; 1334 int retval, exit_code, why;
1335 uid_t uid = 0; /* unneeded, required by compiler */
1359 pid_t pid; 1336 pid_t pid;
1360 1337
1361 if (!p->exit_code) 1338 exit_code = 0;
1362 return 0; 1339 spin_lock_irq(&p->sighand->siglock);
1363 if (delayed_group_leader && !(p->ptrace & PT_PTRACED) && 1340
1364 p->signal->group_stop_count > 0) 1341 if (unlikely(!task_is_stopped_or_traced(p)))
1342 goto unlock_sig;
1343
1344 if (!(p->ptrace & PT_PTRACED) && p->signal->group_stop_count > 0)
1365 /* 1345 /*
1366 * A group stop is in progress and this is the group leader. 1346 * A group stop is in progress and this is the group leader.
1367 * We won't report until all threads have stopped. 1347 * We won't report until all threads have stopped.
1368 */ 1348 */
1349 goto unlock_sig;
1350
1351 exit_code = p->exit_code;
1352 if (!exit_code)
1353 goto unlock_sig;
1354
1355 if (!noreap)
1356 p->exit_code = 0;
1357
1358 uid = p->uid;
1359unlock_sig:
1360 spin_unlock_irq(&p->sighand->siglock);
1361 if (!exit_code)
1369 return 0; 1362 return 0;
1370 1363
1371 /* 1364 /*
@@ -1375,65 +1368,15 @@ static int wait_task_stopped(struct task_struct *p, int delayed_group_leader,
1375 * keep holding onto the tasklist_lock while we call getrusage and 1368 * keep holding onto the tasklist_lock while we call getrusage and
1376 * possibly take page faults for user memory. 1369 * possibly take page faults for user memory.
1377 */ 1370 */
1378 pid = task_pid_nr_ns(p, current->nsproxy->pid_ns);
1379 get_task_struct(p); 1371 get_task_struct(p);
1372 pid = task_pid_vnr(p);
1373 why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1380 read_unlock(&tasklist_lock); 1374 read_unlock(&tasklist_lock);
1381 1375
1382 if (unlikely(noreap)) { 1376 if (unlikely(noreap))
1383 uid_t uid = p->uid;
1384 int why = (p->ptrace & PT_PTRACED) ? CLD_TRAPPED : CLD_STOPPED;
1385
1386 exit_code = p->exit_code;
1387 if (unlikely(!exit_code) || unlikely(p->exit_state))
1388 goto bail_ref;
1389 return wait_noreap_copyout(p, pid, uid, 1377 return wait_noreap_copyout(p, pid, uid,
1390 why, exit_code, 1378 why, exit_code,
1391 infop, ru); 1379 infop, ru);
1392 }
1393
1394 write_lock_irq(&tasklist_lock);
1395
1396 /*
1397 * This uses xchg to be atomic with the thread resuming and setting
1398 * it. It must also be done with the write lock held to prevent a
1399 * race with the EXIT_ZOMBIE case.
1400 */
1401 exit_code = xchg(&p->exit_code, 0);
1402 if (unlikely(p->exit_state)) {
1403 /*
1404 * The task resumed and then died. Let the next iteration
1405 * catch it in EXIT_ZOMBIE. Note that exit_code might
1406 * already be zero here if it resumed and did _exit(0).
1407 * The task itself is dead and won't touch exit_code again;
1408 * other processors in this function are locked out.
1409 */
1410 p->exit_code = exit_code;
1411 exit_code = 0;
1412 }
1413 if (unlikely(exit_code == 0)) {
1414 /*
1415 * Another thread in this function got to it first, or it
1416 * resumed, or it resumed and then died.
1417 */
1418 write_unlock_irq(&tasklist_lock);
1419bail_ref:
1420 put_task_struct(p);
1421 /*
1422 * We are returning to the wait loop without having successfully
1423 * removed the process and having released the lock. We cannot
1424 * continue, since the "p" task pointer is potentially stale.
1425 *
1426 * Return -EAGAIN, and do_wait() will restart the loop from the
1427 * beginning. Do _not_ re-acquire the lock.
1428 */
1429 return -EAGAIN;
1430 }
1431
1432 /* move to end of parent's list to avoid starvation */
1433 remove_parent(p);
1434 add_parent(p);
1435
1436 write_unlock_irq(&tasklist_lock);
1437 1380
1438 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0; 1381 retval = ru ? getrusage(p, RUSAGE_BOTH, ru) : 0;
1439 if (!retval && stat_addr) 1382 if (!retval && stat_addr)
@@ -1443,15 +1386,13 @@ bail_ref:
1443 if (!retval && infop) 1386 if (!retval && infop)
1444 retval = put_user(0, &infop->si_errno); 1387 retval = put_user(0, &infop->si_errno);
1445 if (!retval && infop) 1388 if (!retval && infop)
1446 retval = put_user((short)((p->ptrace & PT_PTRACED) 1389 retval = put_user(why, &infop->si_code);
1447 ? CLD_TRAPPED : CLD_STOPPED),
1448 &infop->si_code);
1449 if (!retval && infop) 1390 if (!retval && infop)
1450 retval = put_user(exit_code, &infop->si_status); 1391 retval = put_user(exit_code, &infop->si_status);
1451 if (!retval && infop) 1392 if (!retval && infop)
1452 retval = put_user(pid, &infop->si_pid); 1393 retval = put_user(pid, &infop->si_pid);
1453 if (!retval && infop) 1394 if (!retval && infop)
1454 retval = put_user(p->uid, &infop->si_uid); 1395 retval = put_user(uid, &infop->si_uid);
1455 if (!retval) 1396 if (!retval)
1456 retval = pid; 1397 retval = pid;
1457 put_task_struct(p); 1398 put_task_struct(p);
@@ -1473,7 +1414,6 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1473 int retval; 1414 int retval;
1474 pid_t pid; 1415 pid_t pid;
1475 uid_t uid; 1416 uid_t uid;
1476 struct pid_namespace *ns;
1477 1417
1478 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED)) 1418 if (!(p->signal->flags & SIGNAL_STOP_CONTINUED))
1479 return 0; 1419 return 0;
@@ -1488,8 +1428,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1488 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1428 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1489 spin_unlock_irq(&p->sighand->siglock); 1429 spin_unlock_irq(&p->sighand->siglock);
1490 1430
1491 ns = current->nsproxy->pid_ns; 1431 pid = task_pid_vnr(p);
1492 pid = task_pid_nr_ns(p, ns);
1493 uid = p->uid; 1432 uid = p->uid;
1494 get_task_struct(p); 1433 get_task_struct(p);
1495 read_unlock(&tasklist_lock); 1434 read_unlock(&tasklist_lock);
@@ -1500,7 +1439,7 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1500 if (!retval && stat_addr) 1439 if (!retval && stat_addr)
1501 retval = put_user(0xffff, stat_addr); 1440 retval = put_user(0xffff, stat_addr);
1502 if (!retval) 1441 if (!retval)
1503 retval = task_pid_nr_ns(p, ns); 1442 retval = pid;
1504 } else { 1443 } else {
1505 retval = wait_noreap_copyout(p, pid, uid, 1444 retval = wait_noreap_copyout(p, pid, uid,
1506 CLD_CONTINUED, SIGCONT, 1445 CLD_CONTINUED, SIGCONT,
@@ -1511,101 +1450,63 @@ static int wait_task_continued(struct task_struct *p, int noreap,
1511 return retval; 1450 return retval;
1512} 1451}
1513 1452
1514 1453static long do_wait(enum pid_type type, struct pid *pid, int options,
1515static inline int my_ptrace_child(struct task_struct *p) 1454 struct siginfo __user *infop, int __user *stat_addr,
1516{ 1455 struct rusage __user *ru)
1517 if (!(p->ptrace & PT_PTRACED))
1518 return 0;
1519 if (!(p->ptrace & PT_ATTACHED))
1520 return 1;
1521 /*
1522 * This child was PTRACE_ATTACH'd. We should be seeing it only if
1523 * we are the attacher. If we are the real parent, this is a race
1524 * inside ptrace_attach. It is waiting for the tasklist_lock,
1525 * which we have to switch the parent links, but has already set
1526 * the flags in p->ptrace.
1527 */
1528 return (p->parent != p->real_parent);
1529}
1530
1531static long do_wait(pid_t pid, int options, struct siginfo __user *infop,
1532 int __user *stat_addr, struct rusage __user *ru)
1533{ 1456{
1534 DECLARE_WAITQUEUE(wait, current); 1457 DECLARE_WAITQUEUE(wait, current);
1535 struct task_struct *tsk; 1458 struct task_struct *tsk;
1536 int flag, retval; 1459 int flag, retval;
1537 int allowed, denied;
1538 1460
1539 add_wait_queue(&current->signal->wait_chldexit,&wait); 1461 add_wait_queue(&current->signal->wait_chldexit,&wait);
1540repeat: 1462repeat:
1463 /* If there is nothing that can match our critier just get out */
1464 retval = -ECHILD;
1465 if ((type < PIDTYPE_MAX) && (!pid || hlist_empty(&pid->tasks[type])))
1466 goto end;
1467
1541 /* 1468 /*
1542 * We will set this flag if we see any child that might later 1469 * We will set this flag if we see any child that might later
1543 * match our criteria, even if we are not able to reap it yet. 1470 * match our criteria, even if we are not able to reap it yet.
1544 */ 1471 */
1545 flag = 0; 1472 flag = retval = 0;
1546 allowed = denied = 0;
1547 current->state = TASK_INTERRUPTIBLE; 1473 current->state = TASK_INTERRUPTIBLE;
1548 read_lock(&tasklist_lock); 1474 read_lock(&tasklist_lock);
1549 tsk = current; 1475 tsk = current;
1550 do { 1476 do {
1551 struct task_struct *p; 1477 struct task_struct *p;
1552 int ret;
1553 1478
1554 list_for_each_entry(p, &tsk->children, sibling) { 1479 list_for_each_entry(p, &tsk->children, sibling) {
1555 ret = eligible_child(pid, options, p); 1480 int ret = eligible_child(type, pid, options, p);
1556 if (!ret) 1481 if (!ret)
1557 continue; 1482 continue;
1558 1483
1559 if (unlikely(ret < 0)) { 1484 if (unlikely(ret < 0)) {
1560 denied = ret; 1485 retval = ret;
1561 continue; 1486 } else if (task_is_stopped_or_traced(p)) {
1562 }
1563 allowed = 1;
1564
1565 if (task_is_stopped_or_traced(p)) {
1566 /* 1487 /*
1567 * It's stopped now, so it might later 1488 * It's stopped now, so it might later
1568 * continue, exit, or stop again. 1489 * continue, exit, or stop again.
1569 *
1570 * When we hit the race with PTRACE_ATTACH, we
1571 * will not report this child. But the race
1572 * means it has not yet been moved to our
1573 * ptrace_children list, so we need to set the
1574 * flag here to avoid a spurious ECHILD when
1575 * the race happens with the only child.
1576 */ 1490 */
1577 flag = 1; 1491 flag = 1;
1492 if (!(p->ptrace & PT_PTRACED) &&
1493 !(options & WUNTRACED))
1494 continue;
1578 1495
1579 if (!my_ptrace_child(p)) { 1496 retval = wait_task_stopped(p,
1580 if (task_is_traced(p))
1581 continue;
1582 if (!(options & WUNTRACED))
1583 continue;
1584 }
1585
1586 retval = wait_task_stopped(p, ret == 2,
1587 (options & WNOWAIT), infop, 1497 (options & WNOWAIT), infop,
1588 stat_addr, ru); 1498 stat_addr, ru);
1589 if (retval == -EAGAIN) 1499 } else if (p->exit_state == EXIT_ZOMBIE &&
1590 goto repeat; 1500 !delay_group_leader(p)) {
1591 if (retval != 0) /* He released the lock. */
1592 goto end;
1593 } else if (p->exit_state == EXIT_ZOMBIE) {
1594 /* 1501 /*
1595 * Eligible but we cannot release it yet: 1502 * We don't reap group leaders with subthreads.
1596 */ 1503 */
1597 if (ret == 2)
1598 goto check_continued;
1599 if (!likely(options & WEXITED)) 1504 if (!likely(options & WEXITED))
1600 continue; 1505 continue;
1601 retval = wait_task_zombie(p, 1506 retval = wait_task_zombie(p,
1602 (options & WNOWAIT), infop, 1507 (options & WNOWAIT), infop,
1603 stat_addr, ru); 1508 stat_addr, ru);
1604 /* He released the lock. */
1605 if (retval != 0)
1606 goto end;
1607 } else if (p->exit_state != EXIT_DEAD) { 1509 } else if (p->exit_state != EXIT_DEAD) {
1608check_continued:
1609 /* 1510 /*
1610 * It's running now, so it might later 1511 * It's running now, so it might later
1611 * exit, stop, or stop and then continue. 1512 * exit, stop, or stop and then continue.
@@ -1616,17 +1517,20 @@ check_continued:
1616 retval = wait_task_continued(p, 1517 retval = wait_task_continued(p,
1617 (options & WNOWAIT), infop, 1518 (options & WNOWAIT), infop,
1618 stat_addr, ru); 1519 stat_addr, ru);
1619 if (retval != 0) /* He released the lock. */
1620 goto end;
1621 } 1520 }
1521 if (retval != 0) /* tasklist_lock released */
1522 goto end;
1622 } 1523 }
1623 if (!flag) { 1524 if (!flag) {
1624 list_for_each_entry(p, &tsk->ptrace_children, 1525 list_for_each_entry(p, &tsk->ptrace_children,
1625 ptrace_list) { 1526 ptrace_list) {
1626 if (!eligible_child(pid, options, p)) 1527 flag = eligible_child(type, pid, options, p);
1528 if (!flag)
1627 continue; 1529 continue;
1628 flag = 1; 1530 if (likely(flag > 0))
1629 break; 1531 break;
1532 retval = flag;
1533 goto end;
1630 } 1534 }
1631 } 1535 }
1632 if (options & __WNOTHREAD) 1536 if (options & __WNOTHREAD)
@@ -1634,10 +1538,9 @@ check_continued:
1634 tsk = next_thread(tsk); 1538 tsk = next_thread(tsk);
1635 BUG_ON(tsk->signal != current->signal); 1539 BUG_ON(tsk->signal != current->signal);
1636 } while (tsk != current); 1540 } while (tsk != current);
1637
1638 read_unlock(&tasklist_lock); 1541 read_unlock(&tasklist_lock);
1542
1639 if (flag) { 1543 if (flag) {
1640 retval = 0;
1641 if (options & WNOHANG) 1544 if (options & WNOHANG)
1642 goto end; 1545 goto end;
1643 retval = -ERESTARTSYS; 1546 retval = -ERESTARTSYS;
@@ -1647,14 +1550,12 @@ check_continued:
1647 goto repeat; 1550 goto repeat;
1648 } 1551 }
1649 retval = -ECHILD; 1552 retval = -ECHILD;
1650 if (unlikely(denied) && !allowed)
1651 retval = denied;
1652end: 1553end:
1653 current->state = TASK_RUNNING; 1554 current->state = TASK_RUNNING;
1654 remove_wait_queue(&current->signal->wait_chldexit,&wait); 1555 remove_wait_queue(&current->signal->wait_chldexit,&wait);
1655 if (infop) { 1556 if (infop) {
1656 if (retval > 0) 1557 if (retval > 0)
1657 retval = 0; 1558 retval = 0;
1658 else { 1559 else {
1659 /* 1560 /*
1660 * For a WNOHANG return, clear out all the fields 1561 * For a WNOHANG return, clear out all the fields
@@ -1678,10 +1579,12 @@ end:
1678 return retval; 1579 return retval;
1679} 1580}
1680 1581
1681asmlinkage long sys_waitid(int which, pid_t pid, 1582asmlinkage long sys_waitid(int which, pid_t upid,
1682 struct siginfo __user *infop, int options, 1583 struct siginfo __user *infop, int options,
1683 struct rusage __user *ru) 1584 struct rusage __user *ru)
1684{ 1585{
1586 struct pid *pid = NULL;
1587 enum pid_type type;
1685 long ret; 1588 long ret;
1686 1589
1687 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED)) 1590 if (options & ~(WNOHANG|WNOWAIT|WEXITED|WSTOPPED|WCONTINUED))
@@ -1691,37 +1594,58 @@ asmlinkage long sys_waitid(int which, pid_t pid,
1691 1594
1692 switch (which) { 1595 switch (which) {
1693 case P_ALL: 1596 case P_ALL:
1694 pid = -1; 1597 type = PIDTYPE_MAX;
1695 break; 1598 break;
1696 case P_PID: 1599 case P_PID:
1697 if (pid <= 0) 1600 type = PIDTYPE_PID;
1601 if (upid <= 0)
1698 return -EINVAL; 1602 return -EINVAL;
1699 break; 1603 break;
1700 case P_PGID: 1604 case P_PGID:
1701 if (pid <= 0) 1605 type = PIDTYPE_PGID;
1606 if (upid <= 0)
1702 return -EINVAL; 1607 return -EINVAL;
1703 pid = -pid;
1704 break; 1608 break;
1705 default: 1609 default:
1706 return -EINVAL; 1610 return -EINVAL;
1707 } 1611 }
1708 1612
1709 ret = do_wait(pid, options, infop, NULL, ru); 1613 if (type < PIDTYPE_MAX)
1614 pid = find_get_pid(upid);
1615 ret = do_wait(type, pid, options, infop, NULL, ru);
1616 put_pid(pid);
1710 1617
1711 /* avoid REGPARM breakage on x86: */ 1618 /* avoid REGPARM breakage on x86: */
1712 prevent_tail_call(ret); 1619 prevent_tail_call(ret);
1713 return ret; 1620 return ret;
1714} 1621}
1715 1622
1716asmlinkage long sys_wait4(pid_t pid, int __user *stat_addr, 1623asmlinkage long sys_wait4(pid_t upid, int __user *stat_addr,
1717 int options, struct rusage __user *ru) 1624 int options, struct rusage __user *ru)
1718{ 1625{
1626 struct pid *pid = NULL;
1627 enum pid_type type;
1719 long ret; 1628 long ret;
1720 1629
1721 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED| 1630 if (options & ~(WNOHANG|WUNTRACED|WCONTINUED|
1722 __WNOTHREAD|__WCLONE|__WALL)) 1631 __WNOTHREAD|__WCLONE|__WALL))
1723 return -EINVAL; 1632 return -EINVAL;
1724 ret = do_wait(pid, options | WEXITED, NULL, stat_addr, ru); 1633
1634 if (upid == -1)
1635 type = PIDTYPE_MAX;
1636 else if (upid < 0) {
1637 type = PIDTYPE_PGID;
1638 pid = find_get_pid(-upid);
1639 } else if (upid == 0) {
1640 type = PIDTYPE_PGID;
1641 pid = get_pid(task_pgrp(current));
1642 } else /* upid > 0 */ {
1643 type = PIDTYPE_PID;
1644 pid = find_get_pid(upid);
1645 }
1646
1647 ret = do_wait(type, pid, options | WEXITED, NULL, stat_addr, ru);
1648 put_pid(pid);
1725 1649
1726 /* avoid REGPARM breakage on x86: */ 1650 /* avoid REGPARM breakage on x86: */
1727 prevent_tail_call(ret); 1651 prevent_tail_call(ret);
diff --git a/kernel/fork.c b/kernel/fork.c
index b2ef8e4fad70..4363a4eb84e3 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -390,7 +390,7 @@ struct mm_struct * mm_alloc(void)
390 * is dropped: either by a lazy thread or by 390 * is dropped: either by a lazy thread or by
391 * mmput. Free the page directory and the mm. 391 * mmput. Free the page directory and the mm.
392 */ 392 */
393void fastcall __mmdrop(struct mm_struct *mm) 393void __mmdrop(struct mm_struct *mm)
394{ 394{
395 BUG_ON(mm == &init_mm); 395 BUG_ON(mm == &init_mm);
396 mm_free_pgd(mm); 396 mm_free_pgd(mm);
@@ -909,7 +909,6 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
909 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); 909 hrtimer_init(&sig->real_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
910 sig->it_real_incr.tv64 = 0; 910 sig->it_real_incr.tv64 = 0;
911 sig->real_timer.function = it_real_fn; 911 sig->real_timer.function = it_real_fn;
912 sig->tsk = tsk;
913 912
914 sig->it_virt_expires = cputime_zero; 913 sig->it_virt_expires = cputime_zero;
915 sig->it_virt_incr = cputime_zero; 914 sig->it_virt_incr = cputime_zero;
@@ -1338,6 +1337,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1338 if (clone_flags & CLONE_NEWPID) 1337 if (clone_flags & CLONE_NEWPID)
1339 p->nsproxy->pid_ns->child_reaper = p; 1338 p->nsproxy->pid_ns->child_reaper = p;
1340 1339
1340 p->signal->leader_pid = pid;
1341 p->signal->tty = current->signal->tty; 1341 p->signal->tty = current->signal->tty;
1342 set_task_pgrp(p, task_pgrp_nr(current)); 1342 set_task_pgrp(p, task_pgrp_nr(current));
1343 set_task_session(p, task_session_nr(current)); 1343 set_task_session(p, task_session_nr(current));
@@ -1488,13 +1488,7 @@ long do_fork(unsigned long clone_flags,
1488 if (!IS_ERR(p)) { 1488 if (!IS_ERR(p)) {
1489 struct completion vfork; 1489 struct completion vfork;
1490 1490
1491 /* 1491 nr = task_pid_vnr(p);
1492 * this is enough to call pid_nr_ns here, but this if
1493 * improves optimisation of regular fork()
1494 */
1495 nr = (clone_flags & CLONE_NEWPID) ?
1496 task_pid_nr_ns(p, current->nsproxy->pid_ns) :
1497 task_pid_vnr(p);
1498 1492
1499 if (clone_flags & CLONE_PARENT_SETTID) 1493 if (clone_flags & CLONE_PARENT_SETTID)
1500 put_user(nr, parent_tidptr); 1494 put_user(nr, parent_tidptr);
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 44019ce30a14..cc54c6276356 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -286,7 +286,7 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
286 * Note: The caller is expected to handle the ack, clear, mask and 286 * Note: The caller is expected to handle the ack, clear, mask and
287 * unmask issues if necessary. 287 * unmask issues if necessary.
288 */ 288 */
289void fastcall 289void
290handle_simple_irq(unsigned int irq, struct irq_desc *desc) 290handle_simple_irq(unsigned int irq, struct irq_desc *desc)
291{ 291{
292 struct irqaction *action; 292 struct irqaction *action;
@@ -327,7 +327,7 @@ out_unlock:
327 * it after the associated handler has acknowledged the device, so the 327 * it after the associated handler has acknowledged the device, so the
328 * interrupt line is back to inactive. 328 * interrupt line is back to inactive.
329 */ 329 */
330void fastcall 330void
331handle_level_irq(unsigned int irq, struct irq_desc *desc) 331handle_level_irq(unsigned int irq, struct irq_desc *desc)
332{ 332{
333 unsigned int cpu = smp_processor_id(); 333 unsigned int cpu = smp_processor_id();
@@ -375,7 +375,7 @@ out_unlock:
375 * for modern forms of interrupt handlers, which handle the flow 375 * for modern forms of interrupt handlers, which handle the flow
376 * details in hardware, transparently. 376 * details in hardware, transparently.
377 */ 377 */
378void fastcall 378void
379handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 379handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
380{ 380{
381 unsigned int cpu = smp_processor_id(); 381 unsigned int cpu = smp_processor_id();
@@ -434,7 +434,7 @@ out:
434 * the handler was running. If all pending interrupts are handled, the 434 * the handler was running. If all pending interrupts are handled, the
435 * loop is left. 435 * loop is left.
436 */ 436 */
437void fastcall 437void
438handle_edge_irq(unsigned int irq, struct irq_desc *desc) 438handle_edge_irq(unsigned int irq, struct irq_desc *desc)
439{ 439{
440 const unsigned int cpu = smp_processor_id(); 440 const unsigned int cpu = smp_processor_id();
@@ -505,7 +505,7 @@ out_unlock:
505 * 505 *
506 * Per CPU interrupts on SMP machines without locking requirements 506 * Per CPU interrupts on SMP machines without locking requirements
507 */ 507 */
508void fastcall 508void
509handle_percpu_irq(unsigned int irq, struct irq_desc *desc) 509handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
510{ 510{
511 irqreturn_t action_ret; 511 irqreturn_t action_ret;
@@ -589,3 +589,39 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
589 set_irq_chip(irq, chip); 589 set_irq_chip(irq, chip);
590 __set_irq_handler(irq, handle, 0, name); 590 __set_irq_handler(irq, handle, 0, name);
591} 591}
592
593void __init set_irq_noprobe(unsigned int irq)
594{
595 struct irq_desc *desc;
596 unsigned long flags;
597
598 if (irq >= NR_IRQS) {
599 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
600
601 return;
602 }
603
604 desc = irq_desc + irq;
605
606 spin_lock_irqsave(&desc->lock, flags);
607 desc->status |= IRQ_NOPROBE;
608 spin_unlock_irqrestore(&desc->lock, flags);
609}
610
611void __init set_irq_probe(unsigned int irq)
612{
613 struct irq_desc *desc;
614 unsigned long flags;
615
616 if (irq >= NR_IRQS) {
617 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
618
619 return;
620 }
621
622 desc = irq_desc + irq;
623
624 spin_lock_irqsave(&desc->lock, flags);
625 desc->status &= ~IRQ_NOPROBE;
626 spin_unlock_irqrestore(&desc->lock, flags);
627}
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index dc335ad27525..5fa6198e9139 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -25,7 +25,7 @@
25 * 25 *
26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
27 */ 27 */
28void fastcall 28void
29handle_bad_irq(unsigned int irq, struct irq_desc *desc) 29handle_bad_irq(unsigned int irq, struct irq_desc *desc)
30{ 30{
31 print_irq_desc(irq, desc); 31 print_irq_desc(irq, desc);
@@ -163,7 +163,7 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
163 * This is the original x86 implementation which is used for every 163 * This is the original x86 implementation which is used for every
164 * interrupt type. 164 * interrupt type.
165 */ 165 */
166fastcall unsigned int __do_IRQ(unsigned int irq) 166unsigned int __do_IRQ(unsigned int irq)
167{ 167{
168 struct irq_desc *desc = irq_desc + irq; 168 struct irq_desc *desc = irq_desc + irq;
169 struct irqaction *action; 169 struct irqaction *action;
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 2fab344dbf56..ab982747d9bd 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -132,7 +132,7 @@ enum hrtimer_restart it_real_fn(struct hrtimer *timer)
132 struct signal_struct *sig = 132 struct signal_struct *sig =
133 container_of(timer, struct signal_struct, real_timer); 133 container_of(timer, struct signal_struct, real_timer);
134 134
135 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); 135 kill_pid_info(SIGALRM, SEND_SIG_PRIV, sig->leader_pid);
136 136
137 return HRTIMER_NORESTART; 137 return HRTIMER_NORESTART;
138} 138}
diff --git a/kernel/module.c b/kernel/module.c
index bd60278ee703..4202da97a1da 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -46,6 +46,7 @@
46#include <asm/semaphore.h> 46#include <asm/semaphore.h>
47#include <asm/cacheflush.h> 47#include <asm/cacheflush.h>
48#include <linux/license.h> 48#include <linux/license.h>
49#include <asm/sections.h>
49 50
50#if 0 51#if 0
51#define DEBUGP printk 52#define DEBUGP printk
@@ -290,7 +291,7 @@ static unsigned long __find_symbol(const char *name,
290 } 291 }
291 } 292 }
292 DEBUGP("Failed to find symbol %s\n", name); 293 DEBUGP("Failed to find symbol %s\n", name);
293 return 0; 294 return -ENOENT;
294} 295}
295 296
296/* Search for module by name: must hold module_mutex. */ 297/* Search for module by name: must hold module_mutex. */
@@ -343,9 +344,6 @@ static inline unsigned int block_size(int val)
343 return val; 344 return val;
344} 345}
345 346
346/* Created by linker magic */
347extern char __per_cpu_start[], __per_cpu_end[];
348
349static void *percpu_modalloc(unsigned long size, unsigned long align, 347static void *percpu_modalloc(unsigned long size, unsigned long align,
350 const char *name) 348 const char *name)
351{ 349{
@@ -783,7 +781,7 @@ void __symbol_put(const char *symbol)
783 const unsigned long *crc; 781 const unsigned long *crc;
784 782
785 preempt_disable(); 783 preempt_disable();
786 if (!__find_symbol(symbol, &owner, &crc, 1)) 784 if (IS_ERR_VALUE(__find_symbol(symbol, &owner, &crc, 1)))
787 BUG(); 785 BUG();
788 module_put(owner); 786 module_put(owner);
789 preempt_enable(); 787 preempt_enable();
@@ -929,7 +927,8 @@ static inline int check_modstruct_version(Elf_Shdr *sechdrs,
929 const unsigned long *crc; 927 const unsigned long *crc;
930 struct module *owner; 928 struct module *owner;
931 929
932 if (!__find_symbol("struct_module", &owner, &crc, 1)) 930 if (IS_ERR_VALUE(__find_symbol("struct_module",
931 &owner, &crc, 1)))
933 BUG(); 932 BUG();
934 return check_version(sechdrs, versindex, "struct_module", mod, 933 return check_version(sechdrs, versindex, "struct_module", mod,
935 crc); 934 crc);
@@ -978,12 +977,12 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
978 977
979 ret = __find_symbol(name, &owner, &crc, 978 ret = __find_symbol(name, &owner, &crc,
980 !(mod->taints & TAINT_PROPRIETARY_MODULE)); 979 !(mod->taints & TAINT_PROPRIETARY_MODULE));
981 if (ret) { 980 if (!IS_ERR_VALUE(ret)) {
982 /* use_module can fail due to OOM, 981 /* use_module can fail due to OOM,
983 or module initialization or unloading */ 982 or module initialization or unloading */
984 if (!check_version(sechdrs, versindex, name, mod, crc) || 983 if (!check_version(sechdrs, versindex, name, mod, crc) ||
985 !use_module(mod, owner)) 984 !use_module(mod, owner))
986 ret = 0; 985 ret = -EINVAL;
987 } 986 }
988 return ret; 987 return ret;
989} 988}
@@ -1371,7 +1370,9 @@ void *__symbol_get(const char *symbol)
1371 1370
1372 preempt_disable(); 1371 preempt_disable();
1373 value = __find_symbol(symbol, &owner, &crc, 1); 1372 value = __find_symbol(symbol, &owner, &crc, 1);
1374 if (value && strong_try_module_get(owner) != 0) 1373 if (IS_ERR_VALUE(value))
1374 value = 0;
1375 else if (strong_try_module_get(owner))
1375 value = 0; 1376 value = 0;
1376 preempt_enable(); 1377 preempt_enable();
1377 1378
@@ -1391,14 +1392,16 @@ static int verify_export_symbols(struct module *mod)
1391 const unsigned long *crc; 1392 const unsigned long *crc;
1392 1393
1393 for (i = 0; i < mod->num_syms; i++) 1394 for (i = 0; i < mod->num_syms; i++)
1394 if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) { 1395 if (!IS_ERR_VALUE(__find_symbol(mod->syms[i].name,
1396 &owner, &crc, 1))) {
1395 name = mod->syms[i].name; 1397 name = mod->syms[i].name;
1396 ret = -ENOEXEC; 1398 ret = -ENOEXEC;
1397 goto dup; 1399 goto dup;
1398 } 1400 }
1399 1401
1400 for (i = 0; i < mod->num_gpl_syms; i++) 1402 for (i = 0; i < mod->num_gpl_syms; i++)
1401 if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) { 1403 if (!IS_ERR_VALUE(__find_symbol(mod->gpl_syms[i].name,
1404 &owner, &crc, 1))) {
1402 name = mod->gpl_syms[i].name; 1405 name = mod->gpl_syms[i].name;
1403 ret = -ENOEXEC; 1406 ret = -ENOEXEC;
1404 goto dup; 1407 goto dup;
@@ -1448,7 +1451,7 @@ static int simplify_symbols(Elf_Shdr *sechdrs,
1448 strtab + sym[i].st_name, mod); 1451 strtab + sym[i].st_name, mod);
1449 1452
1450 /* Ok if resolved. */ 1453 /* Ok if resolved. */
1451 if (sym[i].st_value != 0) 1454 if (!IS_ERR_VALUE(sym[i].st_value))
1452 break; 1455 break;
1453 /* Ok if weak. */ 1456 /* Ok if weak. */
1454 if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK) 1457 if (ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
@@ -2250,7 +2253,7 @@ static const char *get_ksymbol(struct module *mod,
2250 2253
2251/* For kallsyms to ask for address resolution. NULL means not found. Careful 2254/* For kallsyms to ask for address resolution. NULL means not found. Careful
2252 * not to lock to avoid deadlock on oopses, simply disable preemption. */ 2255 * not to lock to avoid deadlock on oopses, simply disable preemption. */
2253char *module_address_lookup(unsigned long addr, 2256const char *module_address_lookup(unsigned long addr,
2254 unsigned long *size, 2257 unsigned long *size,
2255 unsigned long *offset, 2258 unsigned long *offset,
2256 char **modname, 2259 char **modname,
@@ -2275,7 +2278,7 @@ char *module_address_lookup(unsigned long addr,
2275 ret = namebuf; 2278 ret = namebuf;
2276 } 2279 }
2277 preempt_enable(); 2280 preempt_enable();
2278 return (char *)ret; 2281 return ret;
2279} 2282}
2280 2283
2281int lookup_module_symbol_name(unsigned long addr, char *symname) 2284int lookup_module_symbol_name(unsigned long addr, char *symname)
diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
index d17436cdea1b..3aaa06c561de 100644
--- a/kernel/mutex-debug.c
+++ b/kernel/mutex-debug.c
@@ -107,7 +107,7 @@ void debug_mutex_init(struct mutex *lock, const char *name,
107 * use of the mutex is forbidden. The mutex must not be locked when 107 * use of the mutex is forbidden. The mutex must not be locked when
108 * this function is called. 108 * this function is called.
109 */ 109 */
110void fastcall mutex_destroy(struct mutex *lock) 110void mutex_destroy(struct mutex *lock)
111{ 111{
112 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock)); 112 DEBUG_LOCKS_WARN_ON(mutex_is_locked(lock));
113 lock->magic = NULL; 113 lock->magic = NULL;
diff --git a/kernel/mutex.c b/kernel/mutex.c
index d9ec9b666250..d046a345d365 100644
--- a/kernel/mutex.c
+++ b/kernel/mutex.c
@@ -58,7 +58,7 @@ EXPORT_SYMBOL(__mutex_init);
58 * We also put the fastpath first in the kernel image, to make sure the 58 * We also put the fastpath first in the kernel image, to make sure the
59 * branch is predicted by the CPU as default-untaken. 59 * branch is predicted by the CPU as default-untaken.
60 */ 60 */
61static void fastcall noinline __sched 61static void noinline __sched
62__mutex_lock_slowpath(atomic_t *lock_count); 62__mutex_lock_slowpath(atomic_t *lock_count);
63 63
64/*** 64/***
@@ -82,7 +82,7 @@ __mutex_lock_slowpath(atomic_t *lock_count);
82 * 82 *
83 * This function is similar to (but not equivalent to) down(). 83 * This function is similar to (but not equivalent to) down().
84 */ 84 */
85void inline fastcall __sched mutex_lock(struct mutex *lock) 85void inline __sched mutex_lock(struct mutex *lock)
86{ 86{
87 might_sleep(); 87 might_sleep();
88 /* 88 /*
@@ -95,8 +95,7 @@ void inline fastcall __sched mutex_lock(struct mutex *lock)
95EXPORT_SYMBOL(mutex_lock); 95EXPORT_SYMBOL(mutex_lock);
96#endif 96#endif
97 97
98static void fastcall noinline __sched 98static noinline void __sched __mutex_unlock_slowpath(atomic_t *lock_count);
99__mutex_unlock_slowpath(atomic_t *lock_count);
100 99
101/*** 100/***
102 * mutex_unlock - release the mutex 101 * mutex_unlock - release the mutex
@@ -109,7 +108,7 @@ __mutex_unlock_slowpath(atomic_t *lock_count);
109 * 108 *
110 * This function is similar to (but not equivalent to) up(). 109 * This function is similar to (but not equivalent to) up().
111 */ 110 */
112void fastcall __sched mutex_unlock(struct mutex *lock) 111void __sched mutex_unlock(struct mutex *lock)
113{ 112{
114 /* 113 /*
115 * The unlocking fastpath is the 0->1 transition from 'locked' 114 * The unlocking fastpath is the 0->1 transition from 'locked'
@@ -234,7 +233,7 @@ EXPORT_SYMBOL_GPL(mutex_lock_interruptible_nested);
234/* 233/*
235 * Release the lock, slowpath: 234 * Release the lock, slowpath:
236 */ 235 */
237static fastcall inline void 236static inline void
238__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested) 237__mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
239{ 238{
240 struct mutex *lock = container_of(lock_count, struct mutex, count); 239 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -271,7 +270,7 @@ __mutex_unlock_common_slowpath(atomic_t *lock_count, int nested)
271/* 270/*
272 * Release the lock, slowpath: 271 * Release the lock, slowpath:
273 */ 272 */
274static fastcall noinline void 273static noinline void
275__mutex_unlock_slowpath(atomic_t *lock_count) 274__mutex_unlock_slowpath(atomic_t *lock_count)
276{ 275{
277 __mutex_unlock_common_slowpath(lock_count, 1); 276 __mutex_unlock_common_slowpath(lock_count, 1);
@@ -282,10 +281,10 @@ __mutex_unlock_slowpath(atomic_t *lock_count)
282 * Here come the less common (and hence less performance-critical) APIs: 281 * Here come the less common (and hence less performance-critical) APIs:
283 * mutex_lock_interruptible() and mutex_trylock(). 282 * mutex_lock_interruptible() and mutex_trylock().
284 */ 283 */
285static int fastcall noinline __sched 284static noinline int __sched
286__mutex_lock_killable_slowpath(atomic_t *lock_count); 285__mutex_lock_killable_slowpath(atomic_t *lock_count);
287 286
288static noinline int fastcall __sched 287static noinline int __sched
289__mutex_lock_interruptible_slowpath(atomic_t *lock_count); 288__mutex_lock_interruptible_slowpath(atomic_t *lock_count);
290 289
291/*** 290/***
@@ -299,7 +298,7 @@ __mutex_lock_interruptible_slowpath(atomic_t *lock_count);
299 * 298 *
300 * This function is similar to (but not equivalent to) down_interruptible(). 299 * This function is similar to (but not equivalent to) down_interruptible().
301 */ 300 */
302int fastcall __sched mutex_lock_interruptible(struct mutex *lock) 301int __sched mutex_lock_interruptible(struct mutex *lock)
303{ 302{
304 might_sleep(); 303 might_sleep();
305 return __mutex_fastpath_lock_retval 304 return __mutex_fastpath_lock_retval
@@ -308,7 +307,7 @@ int fastcall __sched mutex_lock_interruptible(struct mutex *lock)
308 307
309EXPORT_SYMBOL(mutex_lock_interruptible); 308EXPORT_SYMBOL(mutex_lock_interruptible);
310 309
311int fastcall __sched mutex_lock_killable(struct mutex *lock) 310int __sched mutex_lock_killable(struct mutex *lock)
312{ 311{
313 might_sleep(); 312 might_sleep();
314 return __mutex_fastpath_lock_retval 313 return __mutex_fastpath_lock_retval
@@ -316,7 +315,7 @@ int fastcall __sched mutex_lock_killable(struct mutex *lock)
316} 315}
317EXPORT_SYMBOL(mutex_lock_killable); 316EXPORT_SYMBOL(mutex_lock_killable);
318 317
319static void fastcall noinline __sched 318static noinline void __sched
320__mutex_lock_slowpath(atomic_t *lock_count) 319__mutex_lock_slowpath(atomic_t *lock_count)
321{ 320{
322 struct mutex *lock = container_of(lock_count, struct mutex, count); 321 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -324,7 +323,7 @@ __mutex_lock_slowpath(atomic_t *lock_count)
324 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_); 323 __mutex_lock_common(lock, TASK_UNINTERRUPTIBLE, 0, _RET_IP_);
325} 324}
326 325
327static int fastcall noinline __sched 326static noinline int __sched
328__mutex_lock_killable_slowpath(atomic_t *lock_count) 327__mutex_lock_killable_slowpath(atomic_t *lock_count)
329{ 328{
330 struct mutex *lock = container_of(lock_count, struct mutex, count); 329 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -332,7 +331,7 @@ __mutex_lock_killable_slowpath(atomic_t *lock_count)
332 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_); 331 return __mutex_lock_common(lock, TASK_KILLABLE, 0, _RET_IP_);
333} 332}
334 333
335static noinline int fastcall __sched 334static noinline int __sched
336__mutex_lock_interruptible_slowpath(atomic_t *lock_count) 335__mutex_lock_interruptible_slowpath(atomic_t *lock_count)
337{ 336{
338 struct mutex *lock = container_of(lock_count, struct mutex, count); 337 struct mutex *lock = container_of(lock_count, struct mutex, count);
@@ -381,7 +380,7 @@ static inline int __mutex_trylock_slowpath(atomic_t *lock_count)
381 * This function must not be used in interrupt context. The 380 * This function must not be used in interrupt context. The
382 * mutex must be released by the same task that acquired it. 381 * mutex must be released by the same task that acquired it.
383 */ 382 */
384int fastcall __sched mutex_trylock(struct mutex *lock) 383int __sched mutex_trylock(struct mutex *lock)
385{ 384{
386 return __mutex_fastpath_trylock(&lock->count, 385 return __mutex_fastpath_trylock(&lock->count,
387 __mutex_trylock_slowpath); 386 __mutex_trylock_slowpath);
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c
index 79f871bc0ef4..f5d332cf8c63 100644
--- a/kernel/nsproxy.c
+++ b/kernel/nsproxy.c
@@ -21,6 +21,7 @@
21#include <linux/utsname.h> 21#include <linux/utsname.h>
22#include <linux/pid_namespace.h> 22#include <linux/pid_namespace.h>
23#include <net/net_namespace.h> 23#include <net/net_namespace.h>
24#include <linux/ipc_namespace.h>
24 25
25static struct kmem_cache *nsproxy_cachep; 26static struct kmem_cache *nsproxy_cachep;
26 27
diff --git a/kernel/params.c b/kernel/params.c
index e28c70628bb7..afc46a23eb6d 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -180,12 +180,12 @@ int parse_args(const char *name,
180#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \ 180#define STANDARD_PARAM_DEF(name, type, format, tmptype, strtolfn) \
181 int param_set_##name(const char *val, struct kernel_param *kp) \ 181 int param_set_##name(const char *val, struct kernel_param *kp) \
182 { \ 182 { \
183 char *endp; \
184 tmptype l; \ 183 tmptype l; \
184 int ret; \
185 \ 185 \
186 if (!val) return -EINVAL; \ 186 if (!val) return -EINVAL; \
187 l = strtolfn(val, &endp, 0); \ 187 ret = strtolfn(val, 0, &l); \
188 if (endp == val || ((type)l != l)) \ 188 if (ret == -EINVAL || ((type)l != l)) \
189 return -EINVAL; \ 189 return -EINVAL; \
190 *((type *)kp->arg) = l; \ 190 *((type *)kp->arg) = l; \
191 return 0; \ 191 return 0; \
@@ -195,13 +195,13 @@ int parse_args(const char *name,
195 return sprintf(buffer, format, *((type *)kp->arg)); \ 195 return sprintf(buffer, format, *((type *)kp->arg)); \
196 } 196 }
197 197
198STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, simple_strtoul); 198STANDARD_PARAM_DEF(byte, unsigned char, "%c", unsigned long, strict_strtoul);
199STANDARD_PARAM_DEF(short, short, "%hi", long, simple_strtol); 199STANDARD_PARAM_DEF(short, short, "%hi", long, strict_strtol);
200STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, simple_strtoul); 200STANDARD_PARAM_DEF(ushort, unsigned short, "%hu", unsigned long, strict_strtoul);
201STANDARD_PARAM_DEF(int, int, "%i", long, simple_strtol); 201STANDARD_PARAM_DEF(int, int, "%i", long, strict_strtol);
202STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, simple_strtoul); 202STANDARD_PARAM_DEF(uint, unsigned int, "%u", unsigned long, strict_strtoul);
203STANDARD_PARAM_DEF(long, long, "%li", long, simple_strtol); 203STANDARD_PARAM_DEF(long, long, "%li", long, strict_strtol);
204STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, simple_strtoul); 204STANDARD_PARAM_DEF(ulong, unsigned long, "%lu", unsigned long, strict_strtoul);
205 205
206int param_set_charp(const char *val, struct kernel_param *kp) 206int param_set_charp(const char *val, struct kernel_param *kp)
207{ 207{
diff --git a/kernel/pid.c b/kernel/pid.c
index 3b30bccdfcdc..477691576b33 100644
--- a/kernel/pid.c
+++ b/kernel/pid.c
@@ -41,7 +41,6 @@
41static struct hlist_head *pid_hash; 41static struct hlist_head *pid_hash;
42static int pidhash_shift; 42static int pidhash_shift;
43struct pid init_struct_pid = INIT_STRUCT_PID; 43struct pid init_struct_pid = INIT_STRUCT_PID;
44static struct kmem_cache *pid_ns_cachep;
45 44
46int pid_max = PID_MAX_DEFAULT; 45int pid_max = PID_MAX_DEFAULT;
47 46
@@ -112,7 +111,7 @@ EXPORT_SYMBOL(is_container_init);
112 111
113static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); 112static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock);
114 113
115static fastcall void free_pidmap(struct pid_namespace *pid_ns, int pid) 114static void free_pidmap(struct pid_namespace *pid_ns, int pid)
116{ 115{
117 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; 116 struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE;
118 int offset = pid & BITS_PER_PAGE_MASK; 117 int offset = pid & BITS_PER_PAGE_MASK;
@@ -181,7 +180,7 @@ static int alloc_pidmap(struct pid_namespace *pid_ns)
181 return -1; 180 return -1;
182} 181}
183 182
184static int next_pidmap(struct pid_namespace *pid_ns, int last) 183int next_pidmap(struct pid_namespace *pid_ns, int last)
185{ 184{
186 int offset; 185 int offset;
187 struct pidmap *map, *end; 186 struct pidmap *map, *end;
@@ -199,7 +198,7 @@ static int next_pidmap(struct pid_namespace *pid_ns, int last)
199 return -1; 198 return -1;
200} 199}
201 200
202fastcall void put_pid(struct pid *pid) 201void put_pid(struct pid *pid)
203{ 202{
204 struct pid_namespace *ns; 203 struct pid_namespace *ns;
205 204
@@ -221,7 +220,7 @@ static void delayed_put_pid(struct rcu_head *rhp)
221 put_pid(pid); 220 put_pid(pid);
222} 221}
223 222
224fastcall void free_pid(struct pid *pid) 223void free_pid(struct pid *pid)
225{ 224{
226 /* We can be called with write_lock_irq(&tasklist_lock) held */ 225 /* We can be called with write_lock_irq(&tasklist_lock) held */
227 int i; 226 int i;
@@ -287,7 +286,7 @@ out_free:
287 goto out; 286 goto out;
288} 287}
289 288
290struct pid * fastcall find_pid_ns(int nr, struct pid_namespace *ns) 289struct pid *find_pid_ns(int nr, struct pid_namespace *ns)
291{ 290{
292 struct hlist_node *elem; 291 struct hlist_node *elem;
293 struct upid *pnr; 292 struct upid *pnr;
@@ -317,7 +316,7 @@ EXPORT_SYMBOL_GPL(find_pid);
317/* 316/*
318 * attach_pid() must be called with the tasklist_lock write-held. 317 * attach_pid() must be called with the tasklist_lock write-held.
319 */ 318 */
320int fastcall attach_pid(struct task_struct *task, enum pid_type type, 319int attach_pid(struct task_struct *task, enum pid_type type,
321 struct pid *pid) 320 struct pid *pid)
322{ 321{
323 struct pid_link *link; 322 struct pid_link *link;
@@ -329,7 +328,7 @@ int fastcall attach_pid(struct task_struct *task, enum pid_type type,
329 return 0; 328 return 0;
330} 329}
331 330
332void fastcall detach_pid(struct task_struct *task, enum pid_type type) 331void detach_pid(struct task_struct *task, enum pid_type type)
333{ 332{
334 struct pid_link *link; 333 struct pid_link *link;
335 struct pid *pid; 334 struct pid *pid;
@@ -349,7 +348,7 @@ void fastcall detach_pid(struct task_struct *task, enum pid_type type)
349} 348}
350 349
351/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ 350/* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */
352void fastcall transfer_pid(struct task_struct *old, struct task_struct *new, 351void transfer_pid(struct task_struct *old, struct task_struct *new,
353 enum pid_type type) 352 enum pid_type type)
354{ 353{
355 new->pids[type].pid = old->pids[type].pid; 354 new->pids[type].pid = old->pids[type].pid;
@@ -357,7 +356,7 @@ void fastcall transfer_pid(struct task_struct *old, struct task_struct *new,
357 old->pids[type].pid = NULL; 356 old->pids[type].pid = NULL;
358} 357}
359 358
360struct task_struct * fastcall pid_task(struct pid *pid, enum pid_type type) 359struct task_struct *pid_task(struct pid *pid, enum pid_type type)
361{ 360{
362 struct task_struct *result = NULL; 361 struct task_struct *result = NULL;
363 if (pid) { 362 if (pid) {
@@ -409,7 +408,7 @@ struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
409 return pid; 408 return pid;
410} 409}
411 410
412struct task_struct *fastcall get_pid_task(struct pid *pid, enum pid_type type) 411struct task_struct *get_pid_task(struct pid *pid, enum pid_type type)
413{ 412{
414 struct task_struct *result; 413 struct task_struct *result;
415 rcu_read_lock(); 414 rcu_read_lock();
@@ -444,6 +443,12 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns)
444 return nr; 443 return nr;
445} 444}
446 445
446pid_t pid_vnr(struct pid *pid)
447{
448 return pid_nr_ns(pid, current->nsproxy->pid_ns);
449}
450EXPORT_SYMBOL_GPL(pid_vnr);
451
447pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) 452pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
448{ 453{
449 return pid_nr_ns(task_pid(tsk), ns); 454 return pid_nr_ns(task_pid(tsk), ns);
@@ -488,180 +493,6 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns)
488} 493}
489EXPORT_SYMBOL_GPL(find_get_pid); 494EXPORT_SYMBOL_GPL(find_get_pid);
490 495
491struct pid_cache {
492 int nr_ids;
493 char name[16];
494 struct kmem_cache *cachep;
495 struct list_head list;
496};
497
498static LIST_HEAD(pid_caches_lh);
499static DEFINE_MUTEX(pid_caches_mutex);
500
501/*
502 * creates the kmem cache to allocate pids from.
503 * @nr_ids: the number of numerical ids this pid will have to carry
504 */
505
506static struct kmem_cache *create_pid_cachep(int nr_ids)
507{
508 struct pid_cache *pcache;
509 struct kmem_cache *cachep;
510
511 mutex_lock(&pid_caches_mutex);
512 list_for_each_entry (pcache, &pid_caches_lh, list)
513 if (pcache->nr_ids == nr_ids)
514 goto out;
515
516 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
517 if (pcache == NULL)
518 goto err_alloc;
519
520 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
521 cachep = kmem_cache_create(pcache->name,
522 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
523 0, SLAB_HWCACHE_ALIGN, NULL);
524 if (cachep == NULL)
525 goto err_cachep;
526
527 pcache->nr_ids = nr_ids;
528 pcache->cachep = cachep;
529 list_add(&pcache->list, &pid_caches_lh);
530out:
531 mutex_unlock(&pid_caches_mutex);
532 return pcache->cachep;
533
534err_cachep:
535 kfree(pcache);
536err_alloc:
537 mutex_unlock(&pid_caches_mutex);
538 return NULL;
539}
540
541#ifdef CONFIG_PID_NS
542static struct pid_namespace *create_pid_namespace(int level)
543{
544 struct pid_namespace *ns;
545 int i;
546
547 ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
548 if (ns == NULL)
549 goto out;
550
551 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
552 if (!ns->pidmap[0].page)
553 goto out_free;
554
555 ns->pid_cachep = create_pid_cachep(level + 1);
556 if (ns->pid_cachep == NULL)
557 goto out_free_map;
558
559 kref_init(&ns->kref);
560 ns->last_pid = 0;
561 ns->child_reaper = NULL;
562 ns->level = level;
563
564 set_bit(0, ns->pidmap[0].page);
565 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
566
567 for (i = 1; i < PIDMAP_ENTRIES; i++) {
568 ns->pidmap[i].page = 0;
569 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
570 }
571
572 return ns;
573
574out_free_map:
575 kfree(ns->pidmap[0].page);
576out_free:
577 kmem_cache_free(pid_ns_cachep, ns);
578out:
579 return ERR_PTR(-ENOMEM);
580}
581
582static void destroy_pid_namespace(struct pid_namespace *ns)
583{
584 int i;
585
586 for (i = 0; i < PIDMAP_ENTRIES; i++)
587 kfree(ns->pidmap[i].page);
588 kmem_cache_free(pid_ns_cachep, ns);
589}
590
591struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
592{
593 struct pid_namespace *new_ns;
594
595 BUG_ON(!old_ns);
596 new_ns = get_pid_ns(old_ns);
597 if (!(flags & CLONE_NEWPID))
598 goto out;
599
600 new_ns = ERR_PTR(-EINVAL);
601 if (flags & CLONE_THREAD)
602 goto out_put;
603
604 new_ns = create_pid_namespace(old_ns->level + 1);
605 if (!IS_ERR(new_ns))
606 new_ns->parent = get_pid_ns(old_ns);
607
608out_put:
609 put_pid_ns(old_ns);
610out:
611 return new_ns;
612}
613
614void free_pid_ns(struct kref *kref)
615{
616 struct pid_namespace *ns, *parent;
617
618 ns = container_of(kref, struct pid_namespace, kref);
619
620 parent = ns->parent;
621 destroy_pid_namespace(ns);
622
623 if (parent != NULL)
624 put_pid_ns(parent);
625}
626#endif /* CONFIG_PID_NS */
627
628void zap_pid_ns_processes(struct pid_namespace *pid_ns)
629{
630 int nr;
631 int rc;
632
633 /*
634 * The last thread in the cgroup-init thread group is terminating.
635 * Find remaining pid_ts in the namespace, signal and wait for them
636 * to exit.
637 *
638 * Note: This signals each threads in the namespace - even those that
639 * belong to the same thread group, To avoid this, we would have
640 * to walk the entire tasklist looking a processes in this
641 * namespace, but that could be unnecessarily expensive if the
642 * pid namespace has just a few processes. Or we need to
643 * maintain a tasklist for each pid namespace.
644 *
645 */
646 read_lock(&tasklist_lock);
647 nr = next_pidmap(pid_ns, 1);
648 while (nr > 0) {
649 kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
650 nr = next_pidmap(pid_ns, nr);
651 }
652 read_unlock(&tasklist_lock);
653
654 do {
655 clear_thread_flag(TIF_SIGPENDING);
656 rc = sys_wait4(-1, NULL, __WALL, NULL);
657 } while (rc != -ECHILD);
658
659
660 /* Child reaper for the pid namespace is going away */
661 pid_ns->child_reaper = NULL;
662 return;
663}
664
665/* 496/*
666 * The pid hash table is scaled according to the amount of memory in the 497 * The pid hash table is scaled according to the amount of memory in the
667 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or 498 * machine. From a minimum of 16 slots up to 4096 slots at one gigabyte or
@@ -694,9 +525,6 @@ void __init pidmap_init(void)
694 set_bit(0, init_pid_ns.pidmap[0].page); 525 set_bit(0, init_pid_ns.pidmap[0].page);
695 atomic_dec(&init_pid_ns.pidmap[0].nr_free); 526 atomic_dec(&init_pid_ns.pidmap[0].nr_free);
696 527
697 init_pid_ns.pid_cachep = create_pid_cachep(1); 528 init_pid_ns.pid_cachep = KMEM_CACHE(pid,
698 if (init_pid_ns.pid_cachep == NULL) 529 SLAB_HWCACHE_ALIGN | SLAB_PANIC);
699 panic("Can't create pid_1 cachep\n");
700
701 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
702} 530}
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
new file mode 100644
index 000000000000..6d792b66d854
--- /dev/null
+++ b/kernel/pid_namespace.c
@@ -0,0 +1,197 @@
1/*
2 * Pid namespaces
3 *
4 * Authors:
5 * (C) 2007 Pavel Emelyanov <xemul@openvz.org>, OpenVZ, SWsoft Inc.
6 * (C) 2007 Sukadev Bhattiprolu <sukadev@us.ibm.com>, IBM
7 * Many thanks to Oleg Nesterov for comments and help
8 *
9 */
10
11#include <linux/pid.h>
12#include <linux/pid_namespace.h>
13#include <linux/syscalls.h>
14#include <linux/err.h>
15
16#define BITS_PER_PAGE (PAGE_SIZE*8)
17
18struct pid_cache {
19 int nr_ids;
20 char name[16];
21 struct kmem_cache *cachep;
22 struct list_head list;
23};
24
25static LIST_HEAD(pid_caches_lh);
26static DEFINE_MUTEX(pid_caches_mutex);
27static struct kmem_cache *pid_ns_cachep;
28
29/*
30 * creates the kmem cache to allocate pids from.
31 * @nr_ids: the number of numerical ids this pid will have to carry
32 */
33
34static struct kmem_cache *create_pid_cachep(int nr_ids)
35{
36 struct pid_cache *pcache;
37 struct kmem_cache *cachep;
38
39 mutex_lock(&pid_caches_mutex);
40 list_for_each_entry(pcache, &pid_caches_lh, list)
41 if (pcache->nr_ids == nr_ids)
42 goto out;
43
44 pcache = kmalloc(sizeof(struct pid_cache), GFP_KERNEL);
45 if (pcache == NULL)
46 goto err_alloc;
47
48 snprintf(pcache->name, sizeof(pcache->name), "pid_%d", nr_ids);
49 cachep = kmem_cache_create(pcache->name,
50 sizeof(struct pid) + (nr_ids - 1) * sizeof(struct upid),
51 0, SLAB_HWCACHE_ALIGN, NULL);
52 if (cachep == NULL)
53 goto err_cachep;
54
55 pcache->nr_ids = nr_ids;
56 pcache->cachep = cachep;
57 list_add(&pcache->list, &pid_caches_lh);
58out:
59 mutex_unlock(&pid_caches_mutex);
60 return pcache->cachep;
61
62err_cachep:
63 kfree(pcache);
64err_alloc:
65 mutex_unlock(&pid_caches_mutex);
66 return NULL;
67}
68
69static struct pid_namespace *create_pid_namespace(int level)
70{
71 struct pid_namespace *ns;
72 int i;
73
74 ns = kmem_cache_alloc(pid_ns_cachep, GFP_KERNEL);
75 if (ns == NULL)
76 goto out;
77
78 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL);
79 if (!ns->pidmap[0].page)
80 goto out_free;
81
82 ns->pid_cachep = create_pid_cachep(level + 1);
83 if (ns->pid_cachep == NULL)
84 goto out_free_map;
85
86 kref_init(&ns->kref);
87 ns->last_pid = 0;
88 ns->child_reaper = NULL;
89 ns->level = level;
90
91 set_bit(0, ns->pidmap[0].page);
92 atomic_set(&ns->pidmap[0].nr_free, BITS_PER_PAGE - 1);
93
94 for (i = 1; i < PIDMAP_ENTRIES; i++) {
95 ns->pidmap[i].page = 0;
96 atomic_set(&ns->pidmap[i].nr_free, BITS_PER_PAGE);
97 }
98
99 return ns;
100
101out_free_map:
102 kfree(ns->pidmap[0].page);
103out_free:
104 kmem_cache_free(pid_ns_cachep, ns);
105out:
106 return ERR_PTR(-ENOMEM);
107}
108
109static void destroy_pid_namespace(struct pid_namespace *ns)
110{
111 int i;
112
113 for (i = 0; i < PIDMAP_ENTRIES; i++)
114 kfree(ns->pidmap[i].page);
115 kmem_cache_free(pid_ns_cachep, ns);
116}
117
118struct pid_namespace *copy_pid_ns(unsigned long flags, struct pid_namespace *old_ns)
119{
120 struct pid_namespace *new_ns;
121
122 BUG_ON(!old_ns);
123 new_ns = get_pid_ns(old_ns);
124 if (!(flags & CLONE_NEWPID))
125 goto out;
126
127 new_ns = ERR_PTR(-EINVAL);
128 if (flags & CLONE_THREAD)
129 goto out_put;
130
131 new_ns = create_pid_namespace(old_ns->level + 1);
132 if (!IS_ERR(new_ns))
133 new_ns->parent = get_pid_ns(old_ns);
134
135out_put:
136 put_pid_ns(old_ns);
137out:
138 return new_ns;
139}
140
141void free_pid_ns(struct kref *kref)
142{
143 struct pid_namespace *ns, *parent;
144
145 ns = container_of(kref, struct pid_namespace, kref);
146
147 parent = ns->parent;
148 destroy_pid_namespace(ns);
149
150 if (parent != NULL)
151 put_pid_ns(parent);
152}
153
154void zap_pid_ns_processes(struct pid_namespace *pid_ns)
155{
156 int nr;
157 int rc;
158
159 /*
160 * The last thread in the cgroup-init thread group is terminating.
161 * Find remaining pid_ts in the namespace, signal and wait for them
162 * to exit.
163 *
164 * Note: This signals each threads in the namespace - even those that
165 * belong to the same thread group, To avoid this, we would have
166 * to walk the entire tasklist looking a processes in this
167 * namespace, but that could be unnecessarily expensive if the
168 * pid namespace has just a few processes. Or we need to
169 * maintain a tasklist for each pid namespace.
170 *
171 */
172 read_lock(&tasklist_lock);
173 nr = next_pidmap(pid_ns, 1);
174 while (nr > 0) {
175 kill_proc_info(SIGKILL, SEND_SIG_PRIV, nr);
176 nr = next_pidmap(pid_ns, nr);
177 }
178 read_unlock(&tasklist_lock);
179
180 do {
181 clear_thread_flag(TIF_SIGPENDING);
182 rc = sys_wait4(-1, NULL, __WALL, NULL);
183 } while (rc != -ECHILD);
184
185
186 /* Child reaper for the pid namespace is going away */
187 pid_ns->child_reaper = NULL;
188 return;
189}
190
191static __init int pid_namespaces_init(void)
192{
193 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
194 return 0;
195}
196
197__initcall(pid_namespaces_init);
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 0b7c82ac467e..2eae91f954ca 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -20,7 +20,7 @@ static int check_clock(const clockid_t which_clock)
20 return 0; 20 return 0;
21 21
22 read_lock(&tasklist_lock); 22 read_lock(&tasklist_lock);
23 p = find_task_by_pid(pid); 23 p = find_task_by_vpid(pid);
24 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ? 24 if (!p || !(CPUCLOCK_PERTHREAD(which_clock) ?
25 same_thread_group(p, current) : thread_group_leader(p))) { 25 same_thread_group(p, current) : thread_group_leader(p))) {
26 error = -EINVAL; 26 error = -EINVAL;
@@ -305,7 +305,7 @@ int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
305 */ 305 */
306 struct task_struct *p; 306 struct task_struct *p;
307 rcu_read_lock(); 307 rcu_read_lock();
308 p = find_task_by_pid(pid); 308 p = find_task_by_vpid(pid);
309 if (p) { 309 if (p) {
310 if (CPUCLOCK_PERTHREAD(which_clock)) { 310 if (CPUCLOCK_PERTHREAD(which_clock)) {
311 if (same_thread_group(p, current)) { 311 if (same_thread_group(p, current)) {
@@ -354,7 +354,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
354 if (pid == 0) { 354 if (pid == 0) {
355 p = current; 355 p = current;
356 } else { 356 } else {
357 p = find_task_by_pid(pid); 357 p = find_task_by_vpid(pid);
358 if (p && !same_thread_group(p, current)) 358 if (p && !same_thread_group(p, current))
359 p = NULL; 359 p = NULL;
360 } 360 }
@@ -362,7 +362,7 @@ int posix_cpu_timer_create(struct k_itimer *new_timer)
362 if (pid == 0) { 362 if (pid == 0) {
363 p = current->group_leader; 363 p = current->group_leader;
364 } else { 364 } else {
365 p = find_task_by_pid(pid); 365 p = find_task_by_vpid(pid);
366 if (p && !thread_group_leader(p)) 366 if (p && !thread_group_leader(p))
367 p = NULL; 367 p = NULL;
368 } 368 }
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 122d5c787fe2..ce268966007d 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -404,7 +404,7 @@ static struct task_struct * good_sigevent(sigevent_t * event)
404 struct task_struct *rtn = current->group_leader; 404 struct task_struct *rtn = current->group_leader;
405 405
406 if ((event->sigev_notify & SIGEV_THREAD_ID ) && 406 if ((event->sigev_notify & SIGEV_THREAD_ID ) &&
407 (!(rtn = find_task_by_pid(event->sigev_notify_thread_id)) || 407 (!(rtn = find_task_by_vpid(event->sigev_notify_thread_id)) ||
408 !same_thread_group(rtn, current) || 408 !same_thread_group(rtn, current) ||
409 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL)) 409 (event->sigev_notify & ~SIGEV_THREAD_ID) != SIGEV_SIGNAL))
410 return NULL; 410 return NULL;
diff --git a/kernel/printk.c b/kernel/printk.c
index 4a090621f379..bee36100f110 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -32,7 +32,6 @@
32#include <linux/security.h> 32#include <linux/security.h>
33#include <linux/bootmem.h> 33#include <linux/bootmem.h>
34#include <linux/syscalls.h> 34#include <linux/syscalls.h>
35#include <linux/jiffies.h>
36 35
37#include <asm/uaccess.h> 36#include <asm/uaccess.h>
38 37
@@ -567,19 +566,6 @@ static int printk_time = 0;
567#endif 566#endif
568module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR); 567module_param_named(time, printk_time, bool, S_IRUGO | S_IWUSR);
569 568
570static int __init printk_time_setup(char *str)
571{
572 if (*str)
573 return 0;
574 printk_time = 1;
575 printk(KERN_NOTICE "The 'time' option is deprecated and "
576 "is scheduled for removal in early 2008\n");
577 printk(KERN_NOTICE "Use 'printk.time=<value>' instead\n");
578 return 1;
579}
580
581__setup("time", printk_time_setup);
582
583/* Check if we have any console registered that can be called early in boot. */ 569/* Check if we have any console registered that can be called early in boot. */
584static int have_callable_console(void) 570static int have_callable_console(void)
585{ 571{
@@ -1265,6 +1251,7 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1265 return; 1251 return;
1266} 1252}
1267 1253
1254#if defined CONFIG_PRINTK
1268/* 1255/*
1269 * printk rate limiting, lifted from the networking subsystem. 1256 * printk rate limiting, lifted from the networking subsystem.
1270 * 1257 *
@@ -1334,3 +1321,4 @@ bool printk_timed_ratelimit(unsigned long *caller_jiffies,
1334 return false; 1321 return false;
1335} 1322}
1336EXPORT_SYMBOL(printk_timed_ratelimit); 1323EXPORT_SYMBOL(printk_timed_ratelimit);
1324#endif
diff --git a/kernel/profile.c b/kernel/profile.c
index e64c2da11c0f..3b7a1b055122 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -20,7 +20,6 @@
20#include <linux/mm.h> 20#include <linux/mm.h>
21#include <linux/cpumask.h> 21#include <linux/cpumask.h>
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/profile.h>
24#include <linux/highmem.h> 23#include <linux/highmem.h>
25#include <linux/mutex.h> 24#include <linux/mutex.h>
26#include <asm/sections.h> 25#include <asm/sections.h>
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 628b03ab88a5..fdb34e86f923 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -99,10 +99,12 @@ int ptrace_check_attach(struct task_struct *child, int kill)
99 * be changed by us so it's not changing right after this. 99 * be changed by us so it's not changing right after this.
100 */ 100 */
101 read_lock(&tasklist_lock); 101 read_lock(&tasklist_lock);
102 if ((child->ptrace & PT_PTRACED) && child->parent == current && 102 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
103 (!(child->ptrace & PT_ATTACHED) || child->real_parent != current)
104 && child->signal != NULL) {
105 ret = 0; 103 ret = 0;
104 /*
105 * child->sighand can't be NULL, release_task()
106 * does ptrace_unlink() before __exit_signal().
107 */
106 spin_lock_irq(&child->sighand->siglock); 108 spin_lock_irq(&child->sighand->siglock);
107 if (task_is_stopped(child)) 109 if (task_is_stopped(child))
108 child->state = TASK_TRACED; 110 child->state = TASK_TRACED;
@@ -200,8 +202,7 @@ repeat:
200 goto bad; 202 goto bad;
201 203
202 /* Go */ 204 /* Go */
203 task->ptrace |= PT_PTRACED | ((task->real_parent != current) 205 task->ptrace |= PT_PTRACED;
204 ? PT_ATTACHED : 0);
205 if (capable(CAP_SYS_PTRACE)) 206 if (capable(CAP_SYS_PTRACE))
206 task->ptrace |= PT_PTRACE_CAP; 207 task->ptrace |= PT_PTRACE_CAP;
207 208
diff --git a/kernel/rtmutex-debug.c b/kernel/rtmutex-debug.c
index 56d73cb8826d..5fcb4fe645e2 100644
--- a/kernel/rtmutex-debug.c
+++ b/kernel/rtmutex-debug.c
@@ -130,7 +130,7 @@ void debug_rt_mutex_deadlock(int detect, struct rt_mutex_waiter *act_waiter,
130 130
131 task = rt_mutex_owner(act_waiter->lock); 131 task = rt_mutex_owner(act_waiter->lock);
132 if (task && task != current) { 132 if (task && task != current) {
133 act_waiter->deadlock_task_pid = task->pid; 133 act_waiter->deadlock_task_pid = get_pid(task_pid(task));
134 act_waiter->deadlock_lock = lock; 134 act_waiter->deadlock_lock = lock;
135 } 135 }
136} 136}
@@ -142,9 +142,12 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
142 if (!waiter->deadlock_lock || !rt_trace_on) 142 if (!waiter->deadlock_lock || !rt_trace_on)
143 return; 143 return;
144 144
145 task = find_task_by_pid(waiter->deadlock_task_pid); 145 rcu_read_lock();
146 if (!task) 146 task = pid_task(waiter->deadlock_task_pid, PIDTYPE_PID);
147 if (!task) {
148 rcu_read_unlock();
147 return; 149 return;
150 }
148 151
149 TRACE_OFF_NOLOCK(); 152 TRACE_OFF_NOLOCK();
150 153
@@ -173,6 +176,7 @@ void debug_rt_mutex_print_deadlock(struct rt_mutex_waiter *waiter)
173 current->comm, task_pid_nr(current)); 176 current->comm, task_pid_nr(current));
174 dump_stack(); 177 dump_stack();
175 debug_show_all_locks(); 178 debug_show_all_locks();
179 rcu_read_unlock();
176 180
177 printk("[ turning off deadlock detection." 181 printk("[ turning off deadlock detection."
178 "Please report this trace. ]\n\n"); 182 "Please report this trace. ]\n\n");
@@ -203,10 +207,12 @@ void debug_rt_mutex_init_waiter(struct rt_mutex_waiter *waiter)
203 memset(waiter, 0x11, sizeof(*waiter)); 207 memset(waiter, 0x11, sizeof(*waiter));
204 plist_node_init(&waiter->list_entry, MAX_PRIO); 208 plist_node_init(&waiter->list_entry, MAX_PRIO);
205 plist_node_init(&waiter->pi_list_entry, MAX_PRIO); 209 plist_node_init(&waiter->pi_list_entry, MAX_PRIO);
210 waiter->deadlock_task_pid = NULL;
206} 211}
207 212
208void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter) 213void debug_rt_mutex_free_waiter(struct rt_mutex_waiter *waiter)
209{ 214{
215 put_pid(waiter->deadlock_task_pid);
210 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry)); 216 TRACE_WARN_ON(!plist_node_empty(&waiter->list_entry));
211 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry)); 217 TRACE_WARN_ON(!plist_node_empty(&waiter->pi_list_entry));
212 TRACE_WARN_ON(waiter->task); 218 TRACE_WARN_ON(waiter->task);
diff --git a/kernel/rtmutex_common.h b/kernel/rtmutex_common.h
index 2d3b83593ca3..e124bf5800ea 100644
--- a/kernel/rtmutex_common.h
+++ b/kernel/rtmutex_common.h
@@ -51,7 +51,7 @@ struct rt_mutex_waiter {
51 struct rt_mutex *lock; 51 struct rt_mutex *lock;
52#ifdef CONFIG_DEBUG_RT_MUTEXES 52#ifdef CONFIG_DEBUG_RT_MUTEXES
53 unsigned long ip; 53 unsigned long ip;
54 pid_t deadlock_task_pid; 54 struct pid *deadlock_task_pid;
55 struct rt_mutex *deadlock_lock; 55 struct rt_mutex *deadlock_lock;
56#endif 56#endif
57}; 57};
diff --git a/kernel/sched.c b/kernel/sched.c
index 9474b23c28bf..3eedd5260907 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -1893,13 +1893,13 @@ out:
1893 return success; 1893 return success;
1894} 1894}
1895 1895
1896int fastcall wake_up_process(struct task_struct *p) 1896int wake_up_process(struct task_struct *p)
1897{ 1897{
1898 return try_to_wake_up(p, TASK_ALL, 0); 1898 return try_to_wake_up(p, TASK_ALL, 0);
1899} 1899}
1900EXPORT_SYMBOL(wake_up_process); 1900EXPORT_SYMBOL(wake_up_process);
1901 1901
1902int fastcall wake_up_state(struct task_struct *p, unsigned int state) 1902int wake_up_state(struct task_struct *p, unsigned int state)
1903{ 1903{
1904 return try_to_wake_up(p, state, 0); 1904 return try_to_wake_up(p, state, 0);
1905} 1905}
@@ -1986,7 +1986,7 @@ void sched_fork(struct task_struct *p, int clone_flags)
1986 * that must be done for every newly created context, then puts the task 1986 * that must be done for every newly created context, then puts the task
1987 * on the runqueue and wakes it. 1987 * on the runqueue and wakes it.
1988 */ 1988 */
1989void fastcall wake_up_new_task(struct task_struct *p, unsigned long clone_flags) 1989void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
1990{ 1990{
1991 unsigned long flags; 1991 unsigned long flags;
1992 struct rq *rq; 1992 struct rq *rq;
@@ -3753,7 +3753,7 @@ void scheduler_tick(void)
3753 3753
3754#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT) 3754#if defined(CONFIG_PREEMPT) && defined(CONFIG_DEBUG_PREEMPT)
3755 3755
3756void fastcall add_preempt_count(int val) 3756void add_preempt_count(int val)
3757{ 3757{
3758 /* 3758 /*
3759 * Underflow? 3759 * Underflow?
@@ -3769,7 +3769,7 @@ void fastcall add_preempt_count(int val)
3769} 3769}
3770EXPORT_SYMBOL(add_preempt_count); 3770EXPORT_SYMBOL(add_preempt_count);
3771 3771
3772void fastcall sub_preempt_count(int val) 3772void sub_preempt_count(int val)
3773{ 3773{
3774 /* 3774 /*
3775 * Underflow? 3775 * Underflow?
@@ -4067,7 +4067,7 @@ static void __wake_up_common(wait_queue_head_t *q, unsigned int mode,
4067 * @nr_exclusive: how many wake-one or wake-many threads to wake up 4067 * @nr_exclusive: how many wake-one or wake-many threads to wake up
4068 * @key: is directly passed to the wakeup function 4068 * @key: is directly passed to the wakeup function
4069 */ 4069 */
4070void fastcall __wake_up(wait_queue_head_t *q, unsigned int mode, 4070void __wake_up(wait_queue_head_t *q, unsigned int mode,
4071 int nr_exclusive, void *key) 4071 int nr_exclusive, void *key)
4072{ 4072{
4073 unsigned long flags; 4073 unsigned long flags;
@@ -4081,7 +4081,7 @@ EXPORT_SYMBOL(__wake_up);
4081/* 4081/*
4082 * Same as __wake_up but called with the spinlock in wait_queue_head_t held. 4082 * Same as __wake_up but called with the spinlock in wait_queue_head_t held.
4083 */ 4083 */
4084void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode) 4084void __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4085{ 4085{
4086 __wake_up_common(q, mode, 1, 0, NULL); 4086 __wake_up_common(q, mode, 1, 0, NULL);
4087} 4087}
@@ -4099,7 +4099,7 @@ void fastcall __wake_up_locked(wait_queue_head_t *q, unsigned int mode)
4099 * 4099 *
4100 * On UP it can prevent extra preemption. 4100 * On UP it can prevent extra preemption.
4101 */ 4101 */
4102void fastcall 4102void
4103__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive) 4103__wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4104{ 4104{
4105 unsigned long flags; 4105 unsigned long flags;
diff --git a/kernel/signal.c b/kernel/signal.c
index 5d30ff561847..2c1f08defac2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1018,7 +1018,7 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1018} 1018}
1019 1019
1020/* 1020/*
1021 * kill_pgrp_info() sends a signal to a process group: this is what the tty 1021 * __kill_pgrp_info() sends a signal to a process group: this is what the tty
1022 * control characters do (^C, ^Z etc) 1022 * control characters do (^C, ^Z etc)
1023 */ 1023 */
1024 1024
@@ -1037,30 +1037,28 @@ int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1037 return success ? 0 : retval; 1037 return success ? 0 : retval;
1038} 1038}
1039 1039
1040int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1041{
1042 int retval;
1043
1044 read_lock(&tasklist_lock);
1045 retval = __kill_pgrp_info(sig, info, pgrp);
1046 read_unlock(&tasklist_lock);
1047
1048 return retval;
1049}
1050
1051int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1040int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1052{ 1041{
1053 int error; 1042 int error = -ESRCH;
1054 struct task_struct *p; 1043 struct task_struct *p;
1055 1044
1056 rcu_read_lock(); 1045 rcu_read_lock();
1057 if (unlikely(sig_needs_tasklist(sig))) 1046 if (unlikely(sig_needs_tasklist(sig)))
1058 read_lock(&tasklist_lock); 1047 read_lock(&tasklist_lock);
1059 1048
1049retry:
1060 p = pid_task(pid, PIDTYPE_PID); 1050 p = pid_task(pid, PIDTYPE_PID);
1061 error = -ESRCH; 1051 if (p) {
1062 if (p)
1063 error = group_send_sig_info(sig, info, p); 1052 error = group_send_sig_info(sig, info, p);
1053 if (unlikely(error == -ESRCH))
1054 /*
1055 * The task was unhashed in between, try again.
1056 * If it is dead, pid_task() will return NULL,
1057 * if we race with de_thread() it will find the
1058 * new leader.
1059 */
1060 goto retry;
1061 }
1064 1062
1065 if (unlikely(sig_needs_tasklist(sig))) 1063 if (unlikely(sig_needs_tasklist(sig)))
1066 read_unlock(&tasklist_lock); 1064 read_unlock(&tasklist_lock);
@@ -1125,14 +1123,22 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1125static int kill_something_info(int sig, struct siginfo *info, int pid) 1123static int kill_something_info(int sig, struct siginfo *info, int pid)
1126{ 1124{
1127 int ret; 1125 int ret;
1128 rcu_read_lock(); 1126
1129 if (!pid) { 1127 if (pid > 0) {
1130 ret = kill_pgrp_info(sig, info, task_pgrp(current)); 1128 rcu_read_lock();
1131 } else if (pid == -1) { 1129 ret = kill_pid_info(sig, info, find_vpid(pid));
1130 rcu_read_unlock();
1131 return ret;
1132 }
1133
1134 read_lock(&tasklist_lock);
1135 if (pid != -1) {
1136 ret = __kill_pgrp_info(sig, info,
1137 pid ? find_vpid(-pid) : task_pgrp(current));
1138 } else {
1132 int retval = 0, count = 0; 1139 int retval = 0, count = 0;
1133 struct task_struct * p; 1140 struct task_struct * p;
1134 1141
1135 read_lock(&tasklist_lock);
1136 for_each_process(p) { 1142 for_each_process(p) {
1137 if (p->pid > 1 && !same_thread_group(p, current)) { 1143 if (p->pid > 1 && !same_thread_group(p, current)) {
1138 int err = group_send_sig_info(sig, info, p); 1144 int err = group_send_sig_info(sig, info, p);
@@ -1141,14 +1147,10 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
1141 retval = err; 1147 retval = err;
1142 } 1148 }
1143 } 1149 }
1144 read_unlock(&tasklist_lock);
1145 ret = count ? retval : -ESRCH; 1150 ret = count ? retval : -ESRCH;
1146 } else if (pid < 0) {
1147 ret = kill_pgrp_info(sig, info, find_vpid(-pid));
1148 } else {
1149 ret = kill_pid_info(sig, info, find_vpid(pid));
1150 } 1151 }
1151 rcu_read_unlock(); 1152 read_unlock(&tasklist_lock);
1153
1152 return ret; 1154 return ret;
1153} 1155}
1154 1156
@@ -1196,20 +1198,6 @@ send_sig(int sig, struct task_struct *p, int priv)
1196 return send_sig_info(sig, __si_special(priv), p); 1198 return send_sig_info(sig, __si_special(priv), p);
1197} 1199}
1198 1200
1199/*
1200 * This is the entry point for "process-wide" signals.
1201 * They will go to an appropriate thread in the thread group.
1202 */
1203int
1204send_group_sig_info(int sig, struct siginfo *info, struct task_struct *p)
1205{
1206 int ret;
1207 read_lock(&tasklist_lock);
1208 ret = group_send_sig_info(sig, info, p);
1209 read_unlock(&tasklist_lock);
1210 return ret;
1211}
1212
1213void 1201void
1214force_sig(int sig, struct task_struct *p) 1202force_sig(int sig, struct task_struct *p)
1215{ 1203{
@@ -1237,7 +1225,13 @@ force_sigsegv(int sig, struct task_struct *p)
1237 1225
1238int kill_pgrp(struct pid *pid, int sig, int priv) 1226int kill_pgrp(struct pid *pid, int sig, int priv)
1239{ 1227{
1240 return kill_pgrp_info(sig, __si_special(priv), pid); 1228 int ret;
1229
1230 read_lock(&tasklist_lock);
1231 ret = __kill_pgrp_info(sig, __si_special(priv), pid);
1232 read_unlock(&tasklist_lock);
1233
1234 return ret;
1241} 1235}
1242EXPORT_SYMBOL(kill_pgrp); 1236EXPORT_SYMBOL(kill_pgrp);
1243 1237
@@ -1556,11 +1550,6 @@ static inline int may_ptrace_stop(void)
1556{ 1550{
1557 if (!likely(current->ptrace & PT_PTRACED)) 1551 if (!likely(current->ptrace & PT_PTRACED))
1558 return 0; 1552 return 0;
1559
1560 if (unlikely(current->parent == current->real_parent &&
1561 (current->ptrace & PT_ATTACHED)))
1562 return 0;
1563
1564 /* 1553 /*
1565 * Are we in the middle of do_coredump? 1554 * Are we in the middle of do_coredump?
1566 * If so and our tracer is also part of the coredump stopping 1555 * If so and our tracer is also part of the coredump stopping
@@ -1596,10 +1585,10 @@ static int sigkill_pending(struct task_struct *tsk)
1596 * That makes it a way to test a stopped process for 1585 * That makes it a way to test a stopped process for
1597 * being ptrace-stopped vs being job-control-stopped. 1586 * being ptrace-stopped vs being job-control-stopped.
1598 * 1587 *
1599 * If we actually decide not to stop at all because the tracer is gone, 1588 * If we actually decide not to stop at all because the tracer
1600 * we leave nostop_code in current->exit_code. 1589 * is gone, we keep current->exit_code unless clear_code.
1601 */ 1590 */
1602static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info) 1591static void ptrace_stop(int exit_code, int clear_code, siginfo_t *info)
1603{ 1592{
1604 int killed = 0; 1593 int killed = 0;
1605 1594
@@ -1643,11 +1632,12 @@ static void ptrace_stop(int exit_code, int nostop_code, siginfo_t *info)
1643 } else { 1632 } else {
1644 /* 1633 /*
1645 * By the time we got the lock, our tracer went away. 1634 * By the time we got the lock, our tracer went away.
1646 * Don't stop here. 1635 * Don't drop the lock yet, another tracer may come.
1647 */ 1636 */
1637 __set_current_state(TASK_RUNNING);
1638 if (clear_code)
1639 current->exit_code = 0;
1648 read_unlock(&tasklist_lock); 1640 read_unlock(&tasklist_lock);
1649 set_current_state(TASK_RUNNING);
1650 current->exit_code = nostop_code;
1651 } 1641 }
1652 1642
1653 /* 1643 /*
@@ -1680,7 +1670,7 @@ void ptrace_notify(int exit_code)
1680 1670
1681 /* Let the debugger run. */ 1671 /* Let the debugger run. */
1682 spin_lock_irq(&current->sighand->siglock); 1672 spin_lock_irq(&current->sighand->siglock);
1683 ptrace_stop(exit_code, 0, &info); 1673 ptrace_stop(exit_code, 1, &info);
1684 spin_unlock_irq(&current->sighand->siglock); 1674 spin_unlock_irq(&current->sighand->siglock);
1685} 1675}
1686 1676
@@ -1743,7 +1733,7 @@ static int do_signal_stop(int signr)
1743 * stop is always done with the siglock held, 1733 * stop is always done with the siglock held,
1744 * so this check has no races. 1734 * so this check has no races.
1745 */ 1735 */
1746 if (!t->exit_state && 1736 if (!(t->flags & PF_EXITING) &&
1747 !task_is_stopped_or_traced(t)) { 1737 !task_is_stopped_or_traced(t)) {
1748 stop_count++; 1738 stop_count++;
1749 signal_wake_up(t, 0); 1739 signal_wake_up(t, 0);
@@ -1787,7 +1777,7 @@ relock:
1787 ptrace_signal_deliver(regs, cookie); 1777 ptrace_signal_deliver(regs, cookie);
1788 1778
1789 /* Let the debugger run. */ 1779 /* Let the debugger run. */
1790 ptrace_stop(signr, signr, info); 1780 ptrace_stop(signr, 0, info);
1791 1781
1792 /* We're back. Did the debugger cancel the sig? */ 1782 /* We're back. Did the debugger cancel the sig? */
1793 signr = current->exit_code; 1783 signr = current->exit_code;
@@ -1904,6 +1894,48 @@ relock:
1904 return signr; 1894 return signr;
1905} 1895}
1906 1896
1897void exit_signals(struct task_struct *tsk)
1898{
1899 int group_stop = 0;
1900 struct task_struct *t;
1901
1902 if (thread_group_empty(tsk) || signal_group_exit(tsk->signal)) {
1903 tsk->flags |= PF_EXITING;
1904 return;
1905 }
1906
1907 spin_lock_irq(&tsk->sighand->siglock);
1908 /*
1909 * From now this task is not visible for group-wide signals,
1910 * see wants_signal(), do_signal_stop().
1911 */
1912 tsk->flags |= PF_EXITING;
1913 if (!signal_pending(tsk))
1914 goto out;
1915
1916 /* It could be that __group_complete_signal() choose us to
1917 * notify about group-wide signal. Another thread should be
1918 * woken now to take the signal since we will not.
1919 */
1920 for (t = tsk; (t = next_thread(t)) != tsk; )
1921 if (!signal_pending(t) && !(t->flags & PF_EXITING))
1922 recalc_sigpending_and_wake(t);
1923
1924 if (unlikely(tsk->signal->group_stop_count) &&
1925 !--tsk->signal->group_stop_count) {
1926 tsk->signal->flags = SIGNAL_STOP_STOPPED;
1927 group_stop = 1;
1928 }
1929out:
1930 spin_unlock_irq(&tsk->sighand->siglock);
1931
1932 if (unlikely(group_stop)) {
1933 read_lock(&tasklist_lock);
1934 do_notify_parent_cldstop(tsk, CLD_STOPPED);
1935 read_unlock(&tasklist_lock);
1936 }
1937}
1938
1907EXPORT_SYMBOL(recalc_sigpending); 1939EXPORT_SYMBOL(recalc_sigpending);
1908EXPORT_SYMBOL_GPL(dequeue_signal); 1940EXPORT_SYMBOL_GPL(dequeue_signal);
1909EXPORT_SYMBOL(flush_signals); 1941EXPORT_SYMBOL(flush_signals);
diff --git a/kernel/softirq.c b/kernel/softirq.c
index d7837d45419e..5b3aea5f471e 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -320,7 +320,7 @@ void irq_exit(void)
320/* 320/*
321 * This function must run with irqs disabled! 321 * This function must run with irqs disabled!
322 */ 322 */
323inline fastcall void raise_softirq_irqoff(unsigned int nr) 323inline void raise_softirq_irqoff(unsigned int nr)
324{ 324{
325 __raise_softirq_irqoff(nr); 325 __raise_softirq_irqoff(nr);
326 326
@@ -337,7 +337,7 @@ inline fastcall void raise_softirq_irqoff(unsigned int nr)
337 wakeup_softirqd(); 337 wakeup_softirqd();
338} 338}
339 339
340void fastcall raise_softirq(unsigned int nr) 340void raise_softirq(unsigned int nr)
341{ 341{
342 unsigned long flags; 342 unsigned long flags;
343 343
@@ -363,7 +363,7 @@ struct tasklet_head
363static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL }; 363static DEFINE_PER_CPU(struct tasklet_head, tasklet_vec) = { NULL };
364static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL }; 364static DEFINE_PER_CPU(struct tasklet_head, tasklet_hi_vec) = { NULL };
365 365
366void fastcall __tasklet_schedule(struct tasklet_struct *t) 366void __tasklet_schedule(struct tasklet_struct *t)
367{ 367{
368 unsigned long flags; 368 unsigned long flags;
369 369
@@ -376,7 +376,7 @@ void fastcall __tasklet_schedule(struct tasklet_struct *t)
376 376
377EXPORT_SYMBOL(__tasklet_schedule); 377EXPORT_SYMBOL(__tasklet_schedule);
378 378
379void fastcall __tasklet_hi_schedule(struct tasklet_struct *t) 379void __tasklet_hi_schedule(struct tasklet_struct *t)
380{ 380{
381 unsigned long flags; 381 unsigned long flags;
382 382
diff --git a/kernel/sys.c b/kernel/sys.c
index e3c08d4324de..a626116af5db 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -916,8 +916,8 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
916{ 916{
917 struct task_struct *p; 917 struct task_struct *p;
918 struct task_struct *group_leader = current->group_leader; 918 struct task_struct *group_leader = current->group_leader;
919 int err = -EINVAL; 919 struct pid *pgrp;
920 struct pid_namespace *ns; 920 int err;
921 921
922 if (!pid) 922 if (!pid)
923 pid = task_pid_vnr(group_leader); 923 pid = task_pid_vnr(group_leader);
@@ -929,12 +929,10 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
929 /* From this point forward we keep holding onto the tasklist lock 929 /* From this point forward we keep holding onto the tasklist lock
930 * so that our parent does not change from under us. -DaveM 930 * so that our parent does not change from under us. -DaveM
931 */ 931 */
932 ns = current->nsproxy->pid_ns;
933
934 write_lock_irq(&tasklist_lock); 932 write_lock_irq(&tasklist_lock);
935 933
936 err = -ESRCH; 934 err = -ESRCH;
937 p = find_task_by_pid_ns(pid, ns); 935 p = find_task_by_vpid(pid);
938 if (!p) 936 if (!p)
939 goto out; 937 goto out;
940 938
@@ -942,7 +940,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
942 if (!thread_group_leader(p)) 940 if (!thread_group_leader(p))
943 goto out; 941 goto out;
944 942
945 if (p->real_parent->tgid == group_leader->tgid) { 943 if (same_thread_group(p->real_parent, group_leader)) {
946 err = -EPERM; 944 err = -EPERM;
947 if (task_session(p) != task_session(group_leader)) 945 if (task_session(p) != task_session(group_leader))
948 goto out; 946 goto out;
@@ -959,10 +957,12 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
959 if (p->signal->leader) 957 if (p->signal->leader)
960 goto out; 958 goto out;
961 959
960 pgrp = task_pid(p);
962 if (pgid != pid) { 961 if (pgid != pid) {
963 struct task_struct *g; 962 struct task_struct *g;
964 963
965 g = find_task_by_pid_type_ns(PIDTYPE_PGID, pgid, ns); 964 pgrp = find_vpid(pgid);
965 g = pid_task(pgrp, PIDTYPE_PGID);
966 if (!g || task_session(g) != task_session(group_leader)) 966 if (!g || task_session(g) != task_session(group_leader))
967 goto out; 967 goto out;
968 } 968 }
@@ -971,13 +971,10 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
971 if (err) 971 if (err)
972 goto out; 972 goto out;
973 973
974 if (task_pgrp_nr_ns(p, ns) != pgid) { 974 if (task_pgrp(p) != pgrp) {
975 struct pid *pid;
976
977 detach_pid(p, PIDTYPE_PGID); 975 detach_pid(p, PIDTYPE_PGID);
978 pid = find_vpid(pgid); 976 attach_pid(p, PIDTYPE_PGID, pgrp);
979 attach_pid(p, PIDTYPE_PGID, pid); 977 set_task_pgrp(p, pid_nr(pgrp));
980 set_task_pgrp(p, pid_nr(pid));
981 } 978 }
982 979
983 err = 0; 980 err = 0;
@@ -994,17 +991,14 @@ asmlinkage long sys_getpgid(pid_t pid)
994 else { 991 else {
995 int retval; 992 int retval;
996 struct task_struct *p; 993 struct task_struct *p;
997 struct pid_namespace *ns;
998
999 ns = current->nsproxy->pid_ns;
1000 994
1001 read_lock(&tasklist_lock); 995 read_lock(&tasklist_lock);
1002 p = find_task_by_pid_ns(pid, ns); 996 p = find_task_by_vpid(pid);
1003 retval = -ESRCH; 997 retval = -ESRCH;
1004 if (p) { 998 if (p) {
1005 retval = security_task_getpgid(p); 999 retval = security_task_getpgid(p);
1006 if (!retval) 1000 if (!retval)
1007 retval = task_pgrp_nr_ns(p, ns); 1001 retval = task_pgrp_vnr(p);
1008 } 1002 }
1009 read_unlock(&tasklist_lock); 1003 read_unlock(&tasklist_lock);
1010 return retval; 1004 return retval;
@@ -1028,19 +1022,16 @@ asmlinkage long sys_getsid(pid_t pid)
1028 else { 1022 else {
1029 int retval; 1023 int retval;
1030 struct task_struct *p; 1024 struct task_struct *p;
1031 struct pid_namespace *ns;
1032
1033 ns = current->nsproxy->pid_ns;
1034 1025
1035 read_lock(&tasklist_lock); 1026 rcu_read_lock();
1036 p = find_task_by_pid_ns(pid, ns); 1027 p = find_task_by_vpid(pid);
1037 retval = -ESRCH; 1028 retval = -ESRCH;
1038 if (p) { 1029 if (p) {
1039 retval = security_task_getsid(p); 1030 retval = security_task_getsid(p);
1040 if (!retval) 1031 if (!retval)
1041 retval = task_session_nr_ns(p, ns); 1032 retval = task_session_vnr(p);
1042 } 1033 }
1043 read_unlock(&tasklist_lock); 1034 rcu_read_unlock();
1044 return retval; 1035 return retval;
1045 } 1036 }
1046} 1037}
@@ -1048,35 +1039,29 @@ asmlinkage long sys_getsid(pid_t pid)
1048asmlinkage long sys_setsid(void) 1039asmlinkage long sys_setsid(void)
1049{ 1040{
1050 struct task_struct *group_leader = current->group_leader; 1041 struct task_struct *group_leader = current->group_leader;
1051 pid_t session; 1042 struct pid *sid = task_pid(group_leader);
1043 pid_t session = pid_vnr(sid);
1052 int err = -EPERM; 1044 int err = -EPERM;
1053 1045
1054 write_lock_irq(&tasklist_lock); 1046 write_lock_irq(&tasklist_lock);
1055
1056 /* Fail if I am already a session leader */ 1047 /* Fail if I am already a session leader */
1057 if (group_leader->signal->leader) 1048 if (group_leader->signal->leader)
1058 goto out; 1049 goto out;
1059 1050
1060 session = group_leader->pid;
1061 /* Fail if a process group id already exists that equals the 1051 /* Fail if a process group id already exists that equals the
1062 * proposed session id. 1052 * proposed session id.
1063 *
1064 * Don't check if session id == 1 because kernel threads use this
1065 * session id and so the check will always fail and make it so
1066 * init cannot successfully call setsid.
1067 */ 1053 */
1068 if (session > 1 && find_task_by_pid_type_ns(PIDTYPE_PGID, 1054 if (pid_task(sid, PIDTYPE_PGID))
1069 session, &init_pid_ns))
1070 goto out; 1055 goto out;
1071 1056
1072 group_leader->signal->leader = 1; 1057 group_leader->signal->leader = 1;
1073 __set_special_pids(session, session); 1058 __set_special_pids(sid);
1074 1059
1075 spin_lock(&group_leader->sighand->siglock); 1060 spin_lock(&group_leader->sighand->siglock);
1076 group_leader->signal->tty = NULL; 1061 group_leader->signal->tty = NULL;
1077 spin_unlock(&group_leader->sighand->siglock); 1062 spin_unlock(&group_leader->sighand->siglock);
1078 1063
1079 err = task_pgrp_vnr(group_leader); 1064 err = session;
1080out: 1065out:
1081 write_unlock_irq(&tasklist_lock); 1066 write_unlock_irq(&tasklist_lock);
1082 return err; 1067 return err;
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8c98d8147d88..d41ef6b4cf72 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -37,7 +37,6 @@
37#include <linux/highuid.h> 37#include <linux/highuid.h>
38#include <linux/writeback.h> 38#include <linux/writeback.h>
39#include <linux/hugetlb.h> 39#include <linux/hugetlb.h>
40#include <linux/security.h>
41#include <linux/initrd.h> 40#include <linux/initrd.h>
42#include <linux/times.h> 41#include <linux/times.h>
43#include <linux/limits.h> 42#include <linux/limits.h>
@@ -74,8 +73,6 @@ extern int suid_dumpable;
74extern char core_pattern[]; 73extern char core_pattern[];
75extern int pid_max; 74extern int pid_max;
76extern int min_free_kbytes; 75extern int min_free_kbytes;
77extern int printk_ratelimit_jiffies;
78extern int printk_ratelimit_burst;
79extern int pid_max_min, pid_max_max; 76extern int pid_max_min, pid_max_max;
80extern int sysctl_drop_caches; 77extern int sysctl_drop_caches;
81extern int percpu_pagelist_fraction; 78extern int percpu_pagelist_fraction;
@@ -491,14 +488,6 @@ static struct ctl_table kern_table[] = {
491 .mode = 0644, 488 .mode = 0644,
492 .proc_handler = &proc_dointvec, 489 .proc_handler = &proc_dointvec,
493 }, 490 },
494 {
495 .ctl_name = KERN_PRINTK,
496 .procname = "printk",
497 .data = &console_loglevel,
498 .maxlen = 4*sizeof(int),
499 .mode = 0644,
500 .proc_handler = &proc_dointvec,
501 },
502#ifdef CONFIG_KMOD 491#ifdef CONFIG_KMOD
503 { 492 {
504 .ctl_name = KERN_MODPROBE, 493 .ctl_name = KERN_MODPROBE,
@@ -645,6 +634,15 @@ static struct ctl_table kern_table[] = {
645 .mode = 0644, 634 .mode = 0644,
646 .proc_handler = &proc_dointvec, 635 .proc_handler = &proc_dointvec,
647 }, 636 },
637#if defined CONFIG_PRINTK
638 {
639 .ctl_name = KERN_PRINTK,
640 .procname = "printk",
641 .data = &console_loglevel,
642 .maxlen = 4*sizeof(int),
643 .mode = 0644,
644 .proc_handler = &proc_dointvec,
645 },
648 { 646 {
649 .ctl_name = KERN_PRINTK_RATELIMIT, 647 .ctl_name = KERN_PRINTK_RATELIMIT,
650 .procname = "printk_ratelimit", 648 .procname = "printk_ratelimit",
@@ -662,6 +660,7 @@ static struct ctl_table kern_table[] = {
662 .mode = 0644, 660 .mode = 0644,
663 .proc_handler = &proc_dointvec, 661 .proc_handler = &proc_dointvec,
664 }, 662 },
663#endif
665 { 664 {
666 .ctl_name = KERN_NGROUPS_MAX, 665 .ctl_name = KERN_NGROUPS_MAX,
667 .procname = "ngroups_max", 666 .procname = "ngroups_max",
@@ -982,7 +981,7 @@ static struct ctl_table vm_table[] = {
982 .data = &nr_overcommit_huge_pages, 981 .data = &nr_overcommit_huge_pages,
983 .maxlen = sizeof(nr_overcommit_huge_pages), 982 .maxlen = sizeof(nr_overcommit_huge_pages),
984 .mode = 0644, 983 .mode = 0644,
985 .proc_handler = &proc_doulongvec_minmax, 984 .proc_handler = &hugetlb_overcommit_handler,
986 }, 985 },
987#endif 986#endif
988 { 987 {
@@ -2488,7 +2487,7 @@ static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp
2488 pid_t tmp; 2487 pid_t tmp;
2489 int r; 2488 int r;
2490 2489
2491 tmp = pid_nr_ns(cad_pid, current->nsproxy->pid_ns); 2490 tmp = pid_vnr(cad_pid);
2492 2491
2493 r = __do_proc_dointvec(&tmp, table, write, filp, buffer, 2492 r = __do_proc_dointvec(&tmp, table, write, filp, buffer,
2494 lenp, ppos, NULL, NULL); 2493 lenp, ppos, NULL, NULL);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
index 006365b69eaf..c09350d564f2 100644
--- a/kernel/sysctl_check.c
+++ b/kernel/sysctl_check.c
@@ -8,10 +8,10 @@
8struct trans_ctl_table { 8struct trans_ctl_table {
9 int ctl_name; 9 int ctl_name;
10 const char *procname; 10 const char *procname;
11 struct trans_ctl_table *child; 11 const struct trans_ctl_table *child;
12}; 12};
13 13
14static struct trans_ctl_table trans_random_table[] = { 14static const struct trans_ctl_table trans_random_table[] = {
15 { RANDOM_POOLSIZE, "poolsize" }, 15 { RANDOM_POOLSIZE, "poolsize" },
16 { RANDOM_ENTROPY_COUNT, "entropy_avail" }, 16 { RANDOM_ENTROPY_COUNT, "entropy_avail" },
17 { RANDOM_READ_THRESH, "read_wakeup_threshold" }, 17 { RANDOM_READ_THRESH, "read_wakeup_threshold" },
@@ -21,13 +21,13 @@ static struct trans_ctl_table trans_random_table[] = {
21 {} 21 {}
22}; 22};
23 23
24static struct trans_ctl_table trans_pty_table[] = { 24static const struct trans_ctl_table trans_pty_table[] = {
25 { PTY_MAX, "max" }, 25 { PTY_MAX, "max" },
26 { PTY_NR, "nr" }, 26 { PTY_NR, "nr" },
27 {} 27 {}
28}; 28};
29 29
30static struct trans_ctl_table trans_kern_table[] = { 30static const struct trans_ctl_table trans_kern_table[] = {
31 { KERN_OSTYPE, "ostype" }, 31 { KERN_OSTYPE, "ostype" },
32 { KERN_OSRELEASE, "osrelease" }, 32 { KERN_OSRELEASE, "osrelease" },
33 /* KERN_OSREV not used */ 33 /* KERN_OSREV not used */
@@ -107,7 +107,7 @@ static struct trans_ctl_table trans_kern_table[] = {
107 {} 107 {}
108}; 108};
109 109
110static struct trans_ctl_table trans_vm_table[] = { 110static const struct trans_ctl_table trans_vm_table[] = {
111 { VM_OVERCOMMIT_MEMORY, "overcommit_memory" }, 111 { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
112 { VM_PAGE_CLUSTER, "page-cluster" }, 112 { VM_PAGE_CLUSTER, "page-cluster" },
113 { VM_DIRTY_BACKGROUND, "dirty_background_ratio" }, 113 { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
@@ -139,7 +139,7 @@ static struct trans_ctl_table trans_vm_table[] = {
139 {} 139 {}
140}; 140};
141 141
142static struct trans_ctl_table trans_net_core_table[] = { 142static const struct trans_ctl_table trans_net_core_table[] = {
143 { NET_CORE_WMEM_MAX, "wmem_max" }, 143 { NET_CORE_WMEM_MAX, "wmem_max" },
144 { NET_CORE_RMEM_MAX, "rmem_max" }, 144 { NET_CORE_RMEM_MAX, "rmem_max" },
145 { NET_CORE_WMEM_DEFAULT, "wmem_default" }, 145 { NET_CORE_WMEM_DEFAULT, "wmem_default" },
@@ -165,14 +165,14 @@ static struct trans_ctl_table trans_net_core_table[] = {
165 {}, 165 {},
166}; 166};
167 167
168static struct trans_ctl_table trans_net_unix_table[] = { 168static const struct trans_ctl_table trans_net_unix_table[] = {
169 /* NET_UNIX_DESTROY_DELAY unused */ 169 /* NET_UNIX_DESTROY_DELAY unused */
170 /* NET_UNIX_DELETE_DELAY unused */ 170 /* NET_UNIX_DELETE_DELAY unused */
171 { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" }, 171 { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
172 {} 172 {}
173}; 173};
174 174
175static struct trans_ctl_table trans_net_ipv4_route_table[] = { 175static const struct trans_ctl_table trans_net_ipv4_route_table[] = {
176 { NET_IPV4_ROUTE_FLUSH, "flush" }, 176 { NET_IPV4_ROUTE_FLUSH, "flush" },
177 { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" }, 177 { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
178 { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" }, 178 { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
@@ -195,7 +195,7 @@ static struct trans_ctl_table trans_net_ipv4_route_table[] = {
195 {} 195 {}
196}; 196};
197 197
198static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = { 198static const struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
199 { NET_IPV4_CONF_FORWARDING, "forwarding" }, 199 { NET_IPV4_CONF_FORWARDING, "forwarding" },
200 { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" }, 200 { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
201 201
@@ -222,14 +222,14 @@ static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
222 {} 222 {}
223}; 223};
224 224
225static struct trans_ctl_table trans_net_ipv4_conf_table[] = { 225static const struct trans_ctl_table trans_net_ipv4_conf_table[] = {
226 { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table }, 226 { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
227 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table }, 227 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
228 { 0, NULL, trans_net_ipv4_conf_vars_table }, 228 { 0, NULL, trans_net_ipv4_conf_vars_table },
229 {} 229 {}
230}; 230};
231 231
232static struct trans_ctl_table trans_net_neigh_vars_table[] = { 232static const struct trans_ctl_table trans_net_neigh_vars_table[] = {
233 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" }, 233 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
234 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" }, 234 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
235 { NET_NEIGH_APP_SOLICIT, "app_solicit" }, 235 { NET_NEIGH_APP_SOLICIT, "app_solicit" },
@@ -251,13 +251,13 @@ static struct trans_ctl_table trans_net_neigh_vars_table[] = {
251 {} 251 {}
252}; 252};
253 253
254static struct trans_ctl_table trans_net_neigh_table[] = { 254static const struct trans_ctl_table trans_net_neigh_table[] = {
255 { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table }, 255 { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
256 { 0, NULL, trans_net_neigh_vars_table }, 256 { 0, NULL, trans_net_neigh_vars_table },
257 {} 257 {}
258}; 258};
259 259
260static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = { 260static const struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
261 { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" }, 261 { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
262 262
263 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" }, 263 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
@@ -294,7 +294,7 @@ static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
294 {} 294 {}
295}; 295};
296 296
297static struct trans_ctl_table trans_net_ipv4_table[] = { 297static const struct trans_ctl_table trans_net_ipv4_table[] = {
298 { NET_IPV4_FORWARD, "ip_forward" }, 298 { NET_IPV4_FORWARD, "ip_forward" },
299 { NET_IPV4_DYNADDR, "ip_dynaddr" }, 299 { NET_IPV4_DYNADDR, "ip_dynaddr" },
300 300
@@ -393,13 +393,13 @@ static struct trans_ctl_table trans_net_ipv4_table[] = {
393 {} 393 {}
394}; 394};
395 395
396static struct trans_ctl_table trans_net_ipx_table[] = { 396static const struct trans_ctl_table trans_net_ipx_table[] = {
397 { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" }, 397 { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
398 /* NET_IPX_FORWARDING unused */ 398 /* NET_IPX_FORWARDING unused */
399 {} 399 {}
400}; 400};
401 401
402static struct trans_ctl_table trans_net_atalk_table[] = { 402static const struct trans_ctl_table trans_net_atalk_table[] = {
403 { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" }, 403 { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
404 { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" }, 404 { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
405 { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" }, 405 { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
@@ -407,7 +407,7 @@ static struct trans_ctl_table trans_net_atalk_table[] = {
407 {}, 407 {},
408}; 408};
409 409
410static struct trans_ctl_table trans_net_netrom_table[] = { 410static const struct trans_ctl_table trans_net_netrom_table[] = {
411 { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" }, 411 { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
412 { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" }, 412 { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
413 { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" }, 413 { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
@@ -423,7 +423,7 @@ static struct trans_ctl_table trans_net_netrom_table[] = {
423 {} 423 {}
424}; 424};
425 425
426static struct trans_ctl_table trans_net_ax25_param_table[] = { 426static const struct trans_ctl_table trans_net_ax25_param_table[] = {
427 { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" }, 427 { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
428 { NET_AX25_DEFAULT_MODE, "ax25_default_mode" }, 428 { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
429 { NET_AX25_BACKOFF_TYPE, "backoff_type" }, 429 { NET_AX25_BACKOFF_TYPE, "backoff_type" },
@@ -441,12 +441,12 @@ static struct trans_ctl_table trans_net_ax25_param_table[] = {
441 {} 441 {}
442}; 442};
443 443
444static struct trans_ctl_table trans_net_ax25_table[] = { 444static const struct trans_ctl_table trans_net_ax25_table[] = {
445 { 0, NULL, trans_net_ax25_param_table }, 445 { 0, NULL, trans_net_ax25_param_table },
446 {} 446 {}
447}; 447};
448 448
449static struct trans_ctl_table trans_net_bridge_table[] = { 449static const struct trans_ctl_table trans_net_bridge_table[] = {
450 { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" }, 450 { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
451 { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" }, 451 { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
452 { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" }, 452 { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
@@ -455,7 +455,7 @@ static struct trans_ctl_table trans_net_bridge_table[] = {
455 {} 455 {}
456}; 456};
457 457
458static struct trans_ctl_table trans_net_rose_table[] = { 458static const struct trans_ctl_table trans_net_rose_table[] = {
459 { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, 459 { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
460 { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, 460 { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
461 { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, 461 { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
@@ -469,7 +469,7 @@ static struct trans_ctl_table trans_net_rose_table[] = {
469 {} 469 {}
470}; 470};
471 471
472static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = { 472static const struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
473 { NET_IPV6_FORWARDING, "forwarding" }, 473 { NET_IPV6_FORWARDING, "forwarding" },
474 { NET_IPV6_HOP_LIMIT, "hop_limit" }, 474 { NET_IPV6_HOP_LIMIT, "hop_limit" },
475 { NET_IPV6_MTU, "mtu" }, 475 { NET_IPV6_MTU, "mtu" },
@@ -497,14 +497,14 @@ static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
497 {} 497 {}
498}; 498};
499 499
500static struct trans_ctl_table trans_net_ipv6_conf_table[] = { 500static const struct trans_ctl_table trans_net_ipv6_conf_table[] = {
501 { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table }, 501 { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
502 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table }, 502 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
503 { 0, NULL, trans_net_ipv6_conf_var_table }, 503 { 0, NULL, trans_net_ipv6_conf_var_table },
504 {} 504 {}
505}; 505};
506 506
507static struct trans_ctl_table trans_net_ipv6_route_table[] = { 507static const struct trans_ctl_table trans_net_ipv6_route_table[] = {
508 { NET_IPV6_ROUTE_FLUSH, "flush" }, 508 { NET_IPV6_ROUTE_FLUSH, "flush" },
509 { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" }, 509 { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
510 { NET_IPV6_ROUTE_MAX_SIZE, "max_size" }, 510 { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
@@ -518,12 +518,12 @@ static struct trans_ctl_table trans_net_ipv6_route_table[] = {
518 {} 518 {}
519}; 519};
520 520
521static struct trans_ctl_table trans_net_ipv6_icmp_table[] = { 521static const struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
522 { NET_IPV6_ICMP_RATELIMIT, "ratelimit" }, 522 { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
523 {} 523 {}
524}; 524};
525 525
526static struct trans_ctl_table trans_net_ipv6_table[] = { 526static const struct trans_ctl_table trans_net_ipv6_table[] = {
527 { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table }, 527 { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
528 { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table }, 528 { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
529 { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table }, 529 { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
@@ -538,7 +538,7 @@ static struct trans_ctl_table trans_net_ipv6_table[] = {
538 {} 538 {}
539}; 539};
540 540
541static struct trans_ctl_table trans_net_x25_table[] = { 541static const struct trans_ctl_table trans_net_x25_table[] = {
542 { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" }, 542 { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
543 { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" }, 543 { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
544 { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" }, 544 { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
@@ -548,13 +548,13 @@ static struct trans_ctl_table trans_net_x25_table[] = {
548 {} 548 {}
549}; 549};
550 550
551static struct trans_ctl_table trans_net_tr_table[] = { 551static const struct trans_ctl_table trans_net_tr_table[] = {
552 { NET_TR_RIF_TIMEOUT, "rif_timeout" }, 552 { NET_TR_RIF_TIMEOUT, "rif_timeout" },
553 {} 553 {}
554}; 554};
555 555
556 556
557static struct trans_ctl_table trans_net_decnet_conf_vars[] = { 557static const struct trans_ctl_table trans_net_decnet_conf_vars[] = {
558 { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" }, 558 { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
559 { NET_DECNET_CONF_DEV_PRIORITY, "priority" }, 559 { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
560 { NET_DECNET_CONF_DEV_T2, "t2" }, 560 { NET_DECNET_CONF_DEV_T2, "t2" },
@@ -562,12 +562,12 @@ static struct trans_ctl_table trans_net_decnet_conf_vars[] = {
562 {} 562 {}
563}; 563};
564 564
565static struct trans_ctl_table trans_net_decnet_conf[] = { 565static const struct trans_ctl_table trans_net_decnet_conf[] = {
566 { 0, NULL, trans_net_decnet_conf_vars }, 566 { 0, NULL, trans_net_decnet_conf_vars },
567 {} 567 {}
568}; 568};
569 569
570static struct trans_ctl_table trans_net_decnet_table[] = { 570static const struct trans_ctl_table trans_net_decnet_table[] = {
571 { NET_DECNET_CONF, "conf", trans_net_decnet_conf }, 571 { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
572 { NET_DECNET_NODE_ADDRESS, "node_address" }, 572 { NET_DECNET_NODE_ADDRESS, "node_address" },
573 { NET_DECNET_NODE_NAME, "node_name" }, 573 { NET_DECNET_NODE_NAME, "node_name" },
@@ -585,7 +585,7 @@ static struct trans_ctl_table trans_net_decnet_table[] = {
585 {} 585 {}
586}; 586};
587 587
588static struct trans_ctl_table trans_net_sctp_table[] = { 588static const struct trans_ctl_table trans_net_sctp_table[] = {
589 { NET_SCTP_RTO_INITIAL, "rto_initial" }, 589 { NET_SCTP_RTO_INITIAL, "rto_initial" },
590 { NET_SCTP_RTO_MIN, "rto_min" }, 590 { NET_SCTP_RTO_MIN, "rto_min" },
591 { NET_SCTP_RTO_MAX, "rto_max" }, 591 { NET_SCTP_RTO_MAX, "rto_max" },
@@ -606,7 +606,7 @@ static struct trans_ctl_table trans_net_sctp_table[] = {
606 {} 606 {}
607}; 607};
608 608
609static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = { 609static const struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
610 { NET_LLC2_ACK_TIMEOUT, "ack" }, 610 { NET_LLC2_ACK_TIMEOUT, "ack" },
611 { NET_LLC2_P_TIMEOUT, "p" }, 611 { NET_LLC2_P_TIMEOUT, "p" },
612 { NET_LLC2_REJ_TIMEOUT, "rej" }, 612 { NET_LLC2_REJ_TIMEOUT, "rej" },
@@ -614,23 +614,23 @@ static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
614 {} 614 {}
615}; 615};
616 616
617static struct trans_ctl_table trans_net_llc_station_table[] = { 617static const struct trans_ctl_table trans_net_llc_station_table[] = {
618 { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" }, 618 { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
619 {} 619 {}
620}; 620};
621 621
622static struct trans_ctl_table trans_net_llc_llc2_table[] = { 622static const struct trans_ctl_table trans_net_llc_llc2_table[] = {
623 { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table }, 623 { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
624 {} 624 {}
625}; 625};
626 626
627static struct trans_ctl_table trans_net_llc_table[] = { 627static const struct trans_ctl_table trans_net_llc_table[] = {
628 { NET_LLC2, "llc2", trans_net_llc_llc2_table }, 628 { NET_LLC2, "llc2", trans_net_llc_llc2_table },
629 { NET_LLC_STATION, "station", trans_net_llc_station_table }, 629 { NET_LLC_STATION, "station", trans_net_llc_station_table },
630 {} 630 {}
631}; 631};
632 632
633static struct trans_ctl_table trans_net_netfilter_table[] = { 633static const struct trans_ctl_table trans_net_netfilter_table[] = {
634 { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" }, 634 { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
635 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" }, 635 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
636 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" }, 636 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
@@ -667,12 +667,12 @@ static struct trans_ctl_table trans_net_netfilter_table[] = {
667 {} 667 {}
668}; 668};
669 669
670static struct trans_ctl_table trans_net_dccp_table[] = { 670static const struct trans_ctl_table trans_net_dccp_table[] = {
671 { NET_DCCP_DEFAULT, "default" }, 671 { NET_DCCP_DEFAULT, "default" },
672 {} 672 {}
673}; 673};
674 674
675static struct trans_ctl_table trans_net_irda_table[] = { 675static const struct trans_ctl_table trans_net_irda_table[] = {
676 { NET_IRDA_DISCOVERY, "discovery" }, 676 { NET_IRDA_DISCOVERY, "discovery" },
677 { NET_IRDA_DEVNAME, "devname" }, 677 { NET_IRDA_DEVNAME, "devname" },
678 { NET_IRDA_DEBUG, "debug" }, 678 { NET_IRDA_DEBUG, "debug" },
@@ -690,7 +690,7 @@ static struct trans_ctl_table trans_net_irda_table[] = {
690 {} 690 {}
691}; 691};
692 692
693static struct trans_ctl_table trans_net_table[] = { 693static const struct trans_ctl_table trans_net_table[] = {
694 { NET_CORE, "core", trans_net_core_table }, 694 { NET_CORE, "core", trans_net_core_table },
695 /* NET_ETHER not used */ 695 /* NET_ETHER not used */
696 /* NET_802 not used */ 696 /* NET_802 not used */
@@ -716,7 +716,7 @@ static struct trans_ctl_table trans_net_table[] = {
716 {} 716 {}
717}; 717};
718 718
719static struct trans_ctl_table trans_fs_quota_table[] = { 719static const struct trans_ctl_table trans_fs_quota_table[] = {
720 { FS_DQ_LOOKUPS, "lookups" }, 720 { FS_DQ_LOOKUPS, "lookups" },
721 { FS_DQ_DROPS, "drops" }, 721 { FS_DQ_DROPS, "drops" },
722 { FS_DQ_READS, "reads" }, 722 { FS_DQ_READS, "reads" },
@@ -729,7 +729,7 @@ static struct trans_ctl_table trans_fs_quota_table[] = {
729 {} 729 {}
730}; 730};
731 731
732static struct trans_ctl_table trans_fs_xfs_table[] = { 732static const struct trans_ctl_table trans_fs_xfs_table[] = {
733 { XFS_RESTRICT_CHOWN, "restrict_chown" }, 733 { XFS_RESTRICT_CHOWN, "restrict_chown" },
734 { XFS_SGID_INHERIT, "irix_sgid_inherit" }, 734 { XFS_SGID_INHERIT, "irix_sgid_inherit" },
735 { XFS_SYMLINK_MODE, "irix_symlink_mode" }, 735 { XFS_SYMLINK_MODE, "irix_symlink_mode" },
@@ -750,24 +750,24 @@ static struct trans_ctl_table trans_fs_xfs_table[] = {
750 {} 750 {}
751}; 751};
752 752
753static struct trans_ctl_table trans_fs_ocfs2_nm_table[] = { 753static const struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
754 { 1, "hb_ctl_path" }, 754 { 1, "hb_ctl_path" },
755 {} 755 {}
756}; 756};
757 757
758static struct trans_ctl_table trans_fs_ocfs2_table[] = { 758static const struct trans_ctl_table trans_fs_ocfs2_table[] = {
759 { 1, "nm", trans_fs_ocfs2_nm_table }, 759 { 1, "nm", trans_fs_ocfs2_nm_table },
760 {} 760 {}
761}; 761};
762 762
763static struct trans_ctl_table trans_inotify_table[] = { 763static const struct trans_ctl_table trans_inotify_table[] = {
764 { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" }, 764 { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
765 { INOTIFY_MAX_USER_WATCHES, "max_user_watches" }, 765 { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
766 { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" }, 766 { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
767 {} 767 {}
768}; 768};
769 769
770static struct trans_ctl_table trans_fs_table[] = { 770static const struct trans_ctl_table trans_fs_table[] = {
771 { FS_NRINODE, "inode-nr" }, 771 { FS_NRINODE, "inode-nr" },
772 { FS_STATINODE, "inode-state" }, 772 { FS_STATINODE, "inode-state" },
773 /* FS_MAXINODE unused */ 773 /* FS_MAXINODE unused */
@@ -793,11 +793,11 @@ static struct trans_ctl_table trans_fs_table[] = {
793 {} 793 {}
794}; 794};
795 795
796static struct trans_ctl_table trans_debug_table[] = { 796static const struct trans_ctl_table trans_debug_table[] = {
797 {} 797 {}
798}; 798};
799 799
800static struct trans_ctl_table trans_cdrom_table[] = { 800static const struct trans_ctl_table trans_cdrom_table[] = {
801 { DEV_CDROM_INFO, "info" }, 801 { DEV_CDROM_INFO, "info" },
802 { DEV_CDROM_AUTOCLOSE, "autoclose" }, 802 { DEV_CDROM_AUTOCLOSE, "autoclose" },
803 { DEV_CDROM_AUTOEJECT, "autoeject" }, 803 { DEV_CDROM_AUTOEJECT, "autoeject" },
@@ -807,12 +807,12 @@ static struct trans_ctl_table trans_cdrom_table[] = {
807 {} 807 {}
808}; 808};
809 809
810static struct trans_ctl_table trans_ipmi_table[] = { 810static const struct trans_ctl_table trans_ipmi_table[] = {
811 { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" }, 811 { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
812 {} 812 {}
813}; 813};
814 814
815static struct trans_ctl_table trans_mac_hid_files[] = { 815static const struct trans_ctl_table trans_mac_hid_files[] = {
816 /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */ 816 /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
817 /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */ 817 /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
818 { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" }, 818 { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
@@ -822,35 +822,35 @@ static struct trans_ctl_table trans_mac_hid_files[] = {
822 {} 822 {}
823}; 823};
824 824
825static struct trans_ctl_table trans_raid_table[] = { 825static const struct trans_ctl_table trans_raid_table[] = {
826 { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" }, 826 { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
827 { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" }, 827 { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
828 {} 828 {}
829}; 829};
830 830
831static struct trans_ctl_table trans_scsi_table[] = { 831static const struct trans_ctl_table trans_scsi_table[] = {
832 { DEV_SCSI_LOGGING_LEVEL, "logging_level" }, 832 { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
833 {} 833 {}
834}; 834};
835 835
836static struct trans_ctl_table trans_parport_default_table[] = { 836static const struct trans_ctl_table trans_parport_default_table[] = {
837 { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" }, 837 { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
838 { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" }, 838 { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
839 {} 839 {}
840}; 840};
841 841
842static struct trans_ctl_table trans_parport_device_table[] = { 842static const struct trans_ctl_table trans_parport_device_table[] = {
843 { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" }, 843 { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
844 {} 844 {}
845}; 845};
846 846
847static struct trans_ctl_table trans_parport_devices_table[] = { 847static const struct trans_ctl_table trans_parport_devices_table[] = {
848 { DEV_PARPORT_DEVICES_ACTIVE, "active" }, 848 { DEV_PARPORT_DEVICES_ACTIVE, "active" },
849 { 0, NULL, trans_parport_device_table }, 849 { 0, NULL, trans_parport_device_table },
850 {} 850 {}
851}; 851};
852 852
853static struct trans_ctl_table trans_parport_parport_table[] = { 853static const struct trans_ctl_table trans_parport_parport_table[] = {
854 { DEV_PARPORT_SPINTIME, "spintime" }, 854 { DEV_PARPORT_SPINTIME, "spintime" },
855 { DEV_PARPORT_BASE_ADDR, "base-addr" }, 855 { DEV_PARPORT_BASE_ADDR, "base-addr" },
856 { DEV_PARPORT_IRQ, "irq" }, 856 { DEV_PARPORT_IRQ, "irq" },
@@ -864,13 +864,13 @@ static struct trans_ctl_table trans_parport_parport_table[] = {
864 { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" }, 864 { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
865 {} 865 {}
866}; 866};
867static struct trans_ctl_table trans_parport_table[] = { 867static const struct trans_ctl_table trans_parport_table[] = {
868 { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table }, 868 { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
869 { 0, NULL, trans_parport_parport_table }, 869 { 0, NULL, trans_parport_parport_table },
870 {} 870 {}
871}; 871};
872 872
873static struct trans_ctl_table trans_dev_table[] = { 873static const struct trans_ctl_table trans_dev_table[] = {
874 { DEV_CDROM, "cdrom", trans_cdrom_table }, 874 { DEV_CDROM, "cdrom", trans_cdrom_table },
875 /* DEV_HWMON unused */ 875 /* DEV_HWMON unused */
876 { DEV_PARPORT, "parport", trans_parport_table }, 876 { DEV_PARPORT, "parport", trans_parport_table },
@@ -881,19 +881,19 @@ static struct trans_ctl_table trans_dev_table[] = {
881 {} 881 {}
882}; 882};
883 883
884static struct trans_ctl_table trans_bus_isa_table[] = { 884static const struct trans_ctl_table trans_bus_isa_table[] = {
885 { BUS_ISA_MEM_BASE, "membase" }, 885 { BUS_ISA_MEM_BASE, "membase" },
886 { BUS_ISA_PORT_BASE, "portbase" }, 886 { BUS_ISA_PORT_BASE, "portbase" },
887 { BUS_ISA_PORT_SHIFT, "portshift" }, 887 { BUS_ISA_PORT_SHIFT, "portshift" },
888 {} 888 {}
889}; 889};
890 890
891static struct trans_ctl_table trans_bus_table[] = { 891static const struct trans_ctl_table trans_bus_table[] = {
892 { CTL_BUS_ISA, "isa", trans_bus_isa_table }, 892 { CTL_BUS_ISA, "isa", trans_bus_isa_table },
893 {} 893 {}
894}; 894};
895 895
896static struct trans_ctl_table trans_arlan_conf_table0[] = { 896static const struct trans_ctl_table trans_arlan_conf_table0[] = {
897 { 1, "spreadingCode" }, 897 { 1, "spreadingCode" },
898 { 2, "channelNumber" }, 898 { 2, "channelNumber" },
899 { 3, "scramblingDisable" }, 899 { 3, "scramblingDisable" },
@@ -964,7 +964,7 @@ static struct trans_ctl_table trans_arlan_conf_table0[] = {
964 {} 964 {}
965}; 965};
966 966
967static struct trans_ctl_table trans_arlan_conf_table1[] = { 967static const struct trans_ctl_table trans_arlan_conf_table1[] = {
968 { 1, "spreadingCode" }, 968 { 1, "spreadingCode" },
969 { 2, "channelNumber" }, 969 { 2, "channelNumber" },
970 { 3, "scramblingDisable" }, 970 { 3, "scramblingDisable" },
@@ -1035,7 +1035,7 @@ static struct trans_ctl_table trans_arlan_conf_table1[] = {
1035 {} 1035 {}
1036}; 1036};
1037 1037
1038static struct trans_ctl_table trans_arlan_conf_table2[] = { 1038static const struct trans_ctl_table trans_arlan_conf_table2[] = {
1039 { 1, "spreadingCode" }, 1039 { 1, "spreadingCode" },
1040 { 2, "channelNumber" }, 1040 { 2, "channelNumber" },
1041 { 3, "scramblingDisable" }, 1041 { 3, "scramblingDisable" },
@@ -1106,7 +1106,7 @@ static struct trans_ctl_table trans_arlan_conf_table2[] = {
1106 {} 1106 {}
1107}; 1107};
1108 1108
1109static struct trans_ctl_table trans_arlan_conf_table3[] = { 1109static const struct trans_ctl_table trans_arlan_conf_table3[] = {
1110 { 1, "spreadingCode" }, 1110 { 1, "spreadingCode" },
1111 { 2, "channelNumber" }, 1111 { 2, "channelNumber" },
1112 { 3, "scramblingDisable" }, 1112 { 3, "scramblingDisable" },
@@ -1177,7 +1177,7 @@ static struct trans_ctl_table trans_arlan_conf_table3[] = {
1177 {} 1177 {}
1178}; 1178};
1179 1179
1180static struct trans_ctl_table trans_arlan_table[] = { 1180static const struct trans_ctl_table trans_arlan_table[] = {
1181 { 1, "arlan0", trans_arlan_conf_table0 }, 1181 { 1, "arlan0", trans_arlan_conf_table0 },
1182 { 2, "arlan1", trans_arlan_conf_table1 }, 1182 { 2, "arlan1", trans_arlan_conf_table1 },
1183 { 3, "arlan2", trans_arlan_conf_table2 }, 1183 { 3, "arlan2", trans_arlan_conf_table2 },
@@ -1185,13 +1185,13 @@ static struct trans_ctl_table trans_arlan_table[] = {
1185 {} 1185 {}
1186}; 1186};
1187 1187
1188static struct trans_ctl_table trans_s390dbf_table[] = { 1188static const struct trans_ctl_table trans_s390dbf_table[] = {
1189 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" }, 1189 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
1190 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" }, 1190 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
1191 {} 1191 {}
1192}; 1192};
1193 1193
1194static struct trans_ctl_table trans_sunrpc_table[] = { 1194static const struct trans_ctl_table trans_sunrpc_table[] = {
1195 { CTL_RPCDEBUG, "rpc_debug" }, 1195 { CTL_RPCDEBUG, "rpc_debug" },
1196 { CTL_NFSDEBUG, "nfs_debug" }, 1196 { CTL_NFSDEBUG, "nfs_debug" },
1197 { CTL_NFSDDEBUG, "nfsd_debug" }, 1197 { CTL_NFSDDEBUG, "nfsd_debug" },
@@ -1203,7 +1203,7 @@ static struct trans_ctl_table trans_sunrpc_table[] = {
1203 {} 1203 {}
1204}; 1204};
1205 1205
1206static struct trans_ctl_table trans_pm_table[] = { 1206static const struct trans_ctl_table trans_pm_table[] = {
1207 { 1 /* CTL_PM_SUSPEND */, "suspend" }, 1207 { 1 /* CTL_PM_SUSPEND */, "suspend" },
1208 { 2 /* CTL_PM_CMODE */, "cmode" }, 1208 { 2 /* CTL_PM_CMODE */, "cmode" },
1209 { 3 /* CTL_PM_P0 */, "p0" }, 1209 { 3 /* CTL_PM_P0 */, "p0" },
@@ -1211,13 +1211,13 @@ static struct trans_ctl_table trans_pm_table[] = {
1211 {} 1211 {}
1212}; 1212};
1213 1213
1214static struct trans_ctl_table trans_frv_table[] = { 1214static const struct trans_ctl_table trans_frv_table[] = {
1215 { 1, "cache-mode" }, 1215 { 1, "cache-mode" },
1216 { 2, "pin-cxnr" }, 1216 { 2, "pin-cxnr" },
1217 {} 1217 {}
1218}; 1218};
1219 1219
1220static struct trans_ctl_table trans_root_table[] = { 1220static const struct trans_ctl_table trans_root_table[] = {
1221 { CTL_KERN, "kernel", trans_kern_table }, 1221 { CTL_KERN, "kernel", trans_kern_table },
1222 { CTL_VM, "vm", trans_vm_table }, 1222 { CTL_VM, "vm", trans_vm_table },
1223 { CTL_NET, "net", trans_net_table }, 1223 { CTL_NET, "net", trans_net_table },
@@ -1261,15 +1261,14 @@ static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
1261 return table; 1261 return table;
1262} 1262}
1263 1263
1264static struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table) 1264static const struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
1265{ 1265{
1266 struct ctl_table *test; 1266 struct ctl_table *test;
1267 struct trans_ctl_table *ref; 1267 const struct trans_ctl_table *ref;
1268 int depth, cur_depth; 1268 int cur_depth;
1269 1269
1270 depth = sysctl_depth(table); 1270 cur_depth = sysctl_depth(table);
1271 1271
1272 cur_depth = depth;
1273 ref = trans_root_table; 1272 ref = trans_root_table;
1274repeat: 1273repeat:
1275 test = sysctl_parent(table, cur_depth); 1274 test = sysctl_parent(table, cur_depth);
@@ -1437,7 +1436,7 @@ static void sysctl_check_leaf(struct nsproxy *namespaces,
1437 1436
1438static void sysctl_check_bin_path(struct ctl_table *table, const char **fail) 1437static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
1439{ 1438{
1440 struct trans_ctl_table *ref; 1439 const struct trans_ctl_table *ref;
1441 1440
1442 ref = sysctl_binary_lookup(table); 1441 ref = sysctl_binary_lookup(table);
1443 if (table->ctl_name && !ref) 1442 if (table->ctl_name && !ref)
diff --git a/kernel/time.c b/kernel/time.c
index 33af3e55570d..a5ec013b6c80 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -39,6 +39,8 @@
39#include <asm/uaccess.h> 39#include <asm/uaccess.h>
40#include <asm/unistd.h> 40#include <asm/unistd.h>
41 41
42#include "timeconst.h"
43
42/* 44/*
43 * The timezone where the local system is located. Used as a default by some 45 * The timezone where the local system is located. Used as a default by some
44 * programs who obtain this value by using gettimeofday. 46 * programs who obtain this value by using gettimeofday.
@@ -93,7 +95,8 @@ asmlinkage long sys_stime(time_t __user *tptr)
93 95
94#endif /* __ARCH_WANT_SYS_TIME */ 96#endif /* __ARCH_WANT_SYS_TIME */
95 97
96asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __user *tz) 98asmlinkage long sys_gettimeofday(struct timeval __user *tv,
99 struct timezone __user *tz)
97{ 100{
98 if (likely(tv != NULL)) { 101 if (likely(tv != NULL)) {
99 struct timeval ktv; 102 struct timeval ktv;
@@ -118,7 +121,7 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
118 * hard to make the program warp the clock precisely n hours) or 121 * hard to make the program warp the clock precisely n hours) or
119 * compile in the timezone information into the kernel. Bad, bad.... 122 * compile in the timezone information into the kernel. Bad, bad....
120 * 123 *
121 * - TYT, 1992-01-01 124 * - TYT, 1992-01-01
122 * 125 *
123 * The best thing to do is to keep the CMOS clock in universal time (UTC) 126 * The best thing to do is to keep the CMOS clock in universal time (UTC)
124 * as real UNIX machines always do it. This avoids all headaches about 127 * as real UNIX machines always do it. This avoids all headaches about
@@ -240,7 +243,11 @@ unsigned int inline jiffies_to_msecs(const unsigned long j)
240#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) 243#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
241 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); 244 return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
242#else 245#else
243 return (j * MSEC_PER_SEC) / HZ; 246# if BITS_PER_LONG == 32
247 return ((u64)HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
248# else
249 return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
250# endif
244#endif 251#endif
245} 252}
246EXPORT_SYMBOL(jiffies_to_msecs); 253EXPORT_SYMBOL(jiffies_to_msecs);
@@ -252,7 +259,11 @@ unsigned int inline jiffies_to_usecs(const unsigned long j)
252#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) 259#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
253 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC); 260 return (j + (HZ / USEC_PER_SEC) - 1)/(HZ / USEC_PER_SEC);
254#else 261#else
255 return (j * USEC_PER_SEC) / HZ; 262# if BITS_PER_LONG == 32
263 return ((u64)HZ_TO_USEC_MUL32 * j) >> HZ_TO_USEC_SHR32;
264# else
265 return (j * HZ_TO_USEC_NUM) / HZ_TO_USEC_DEN;
266# endif
256#endif 267#endif
257} 268}
258EXPORT_SYMBOL(jiffies_to_usecs); 269EXPORT_SYMBOL(jiffies_to_usecs);
@@ -267,7 +278,7 @@ EXPORT_SYMBOL(jiffies_to_usecs);
267 * 278 *
268 * This function should be only used for timestamps returned by 279 * This function should be only used for timestamps returned by
269 * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because 280 * current_kernel_time() or CURRENT_TIME, not with do_gettimeofday() because
270 * it doesn't handle the better resolution of the later. 281 * it doesn't handle the better resolution of the latter.
271 */ 282 */
272struct timespec timespec_trunc(struct timespec t, unsigned gran) 283struct timespec timespec_trunc(struct timespec t, unsigned gran)
273{ 284{
@@ -315,7 +326,7 @@ EXPORT_SYMBOL_GPL(getnstimeofday);
315 * This algorithm was first published by Gauss (I think). 326 * This algorithm was first published by Gauss (I think).
316 * 327 *
317 * WARNING: this function will overflow on 2106-02-07 06:28:16 on 328 * WARNING: this function will overflow on 2106-02-07 06:28:16 on
318 * machines were long is 32-bit! (However, as time_t is signed, we 329 * machines where long is 32-bit! (However, as time_t is signed, we
319 * will already get problems at other places on 2038-01-19 03:14:08) 330 * will already get problems at other places on 2038-01-19 03:14:08)
320 */ 331 */
321unsigned long 332unsigned long
@@ -352,7 +363,7 @@ EXPORT_SYMBOL(mktime);
352 * normalize to the timespec storage format 363 * normalize to the timespec storage format
353 * 364 *
354 * Note: The tv_nsec part is always in the range of 365 * Note: The tv_nsec part is always in the range of
355 * 0 <= tv_nsec < NSEC_PER_SEC 366 * 0 <= tv_nsec < NSEC_PER_SEC
356 * For negative values only the tv_sec field is negative ! 367 * For negative values only the tv_sec field is negative !
357 */ 368 */
358void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec) 369void set_normalized_timespec(struct timespec *ts, time_t sec, long nsec)
@@ -453,12 +464,13 @@ unsigned long msecs_to_jiffies(const unsigned int m)
453 /* 464 /*
454 * Generic case - multiply, round and divide. But first 465 * Generic case - multiply, round and divide. But first
455 * check that if we are doing a net multiplication, that 466 * check that if we are doing a net multiplication, that
456 * we wouldnt overflow: 467 * we wouldn't overflow:
457 */ 468 */
458 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET)) 469 if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
459 return MAX_JIFFY_OFFSET; 470 return MAX_JIFFY_OFFSET;
460 471
461 return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; 472 return ((u64)MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
473 >> MSEC_TO_HZ_SHR32;
462#endif 474#endif
463} 475}
464EXPORT_SYMBOL(msecs_to_jiffies); 476EXPORT_SYMBOL(msecs_to_jiffies);
@@ -472,7 +484,8 @@ unsigned long usecs_to_jiffies(const unsigned int u)
472#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC) 484#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
473 return u * (HZ / USEC_PER_SEC); 485 return u * (HZ / USEC_PER_SEC);
474#else 486#else
475 return (u * HZ + USEC_PER_SEC - 1) / USEC_PER_SEC; 487 return ((u64)USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
488 >> USEC_TO_HZ_SHR32;
476#endif 489#endif
477} 490}
478EXPORT_SYMBOL(usecs_to_jiffies); 491EXPORT_SYMBOL(usecs_to_jiffies);
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 3e59fce6dd43..3d1e3e1a1971 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -133,7 +133,7 @@ static void clockevents_do_notify(unsigned long reason, void *dev)
133} 133}
134 134
135/* 135/*
136 * Called after a notify add to make devices availble which were 136 * Called after a notify add to make devices available which were
137 * released from the notifier call. 137 * released from the notifier call.
138 */ 138 */
139static void clockevents_notify_released(void) 139static void clockevents_notify_released(void)
@@ -218,6 +218,8 @@ void clockevents_exchange_device(struct clock_event_device *old,
218 */ 218 */
219void clockevents_notify(unsigned long reason, void *arg) 219void clockevents_notify(unsigned long reason, void *arg)
220{ 220{
221 struct list_head *node, *tmp;
222
221 spin_lock(&clockevents_lock); 223 spin_lock(&clockevents_lock);
222 clockevents_do_notify(reason, arg); 224 clockevents_do_notify(reason, arg);
223 225
@@ -227,13 +229,8 @@ void clockevents_notify(unsigned long reason, void *arg)
227 * Unregister the clock event devices which were 229 * Unregister the clock event devices which were
228 * released from the users in the notify chain. 230 * released from the users in the notify chain.
229 */ 231 */
230 while (!list_empty(&clockevents_released)) { 232 list_for_each_safe(node, tmp, &clockevents_released)
231 struct clock_event_device *dev; 233 list_del(node);
232
233 dev = list_entry(clockevents_released.next,
234 struct clock_event_device, list);
235 list_del(&dev->list);
236 }
237 break; 234 break;
238 default: 235 default:
239 break; 236 break;
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 81afb3927ecc..548c436a776b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -91,7 +91,6 @@ static void clocksource_ratewd(struct clocksource *cs, int64_t delta)
91 cs->name, delta); 91 cs->name, delta);
92 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG); 92 cs->flags &= ~(CLOCK_SOURCE_VALID_FOR_HRES | CLOCK_SOURCE_WATCHDOG);
93 clocksource_change_rating(cs, 0); 93 clocksource_change_rating(cs, 0);
94 cs->flags &= ~CLOCK_SOURCE_WATCHDOG;
95 list_del(&cs->wd_list); 94 list_del(&cs->wd_list);
96} 95}
97 96
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 88267f0a8471..fa9bb73dbdb4 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -681,7 +681,7 @@ int tick_check_oneshot_change(int allow_nohz)
681 if (ts->nohz_mode != NOHZ_MODE_INACTIVE) 681 if (ts->nohz_mode != NOHZ_MODE_INACTIVE)
682 return 0; 682 return 0;
683 683
684 if (!timekeeping_is_continuous() || !tick_is_oneshot_available()) 684 if (!timekeeping_valid_for_hres() || !tick_is_oneshot_available())
685 return 0; 685 return 0;
686 686
687 if (!allow_nohz) 687 if (!allow_nohz)
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index cd5dbc4579c9..1af9fb050fe2 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -201,9 +201,9 @@ static inline s64 __get_nsec_offset(void) { return 0; }
201#endif 201#endif
202 202
203/** 203/**
204 * timekeeping_is_continuous - check to see if timekeeping is free running 204 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
205 */ 205 */
206int timekeeping_is_continuous(void) 206int timekeeping_valid_for_hres(void)
207{ 207{
208 unsigned long seq; 208 unsigned long seq;
209 int ret; 209 int ret;
@@ -364,7 +364,7 @@ static __always_inline int clocksource_bigadjust(s64 error, s64 *interval,
364 * with losing too many ticks, otherwise we would overadjust and 364 * with losing too many ticks, otherwise we would overadjust and
365 * produce an even larger error. The smaller the adjustment the 365 * produce an even larger error. The smaller the adjustment the
366 * faster we try to adjust for it, as lost ticks can do less harm 366 * faster we try to adjust for it, as lost ticks can do less harm
367 * here. This is tuned so that an error of about 1 msec is adusted 367 * here. This is tuned so that an error of about 1 msec is adjusted
368 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks). 368 * within about 1 sec (or 2^20 nsec in 2^SHIFT_HZ ticks).
369 */ 369 */
370 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ); 370 error2 = clock->error >> (TICK_LENGTH_SHIFT + 22 - 2 * SHIFT_HZ);
diff --git a/kernel/timeconst.pl b/kernel/timeconst.pl
new file mode 100644
index 000000000000..62b1287932ed
--- /dev/null
+++ b/kernel/timeconst.pl
@@ -0,0 +1,402 @@
1#!/usr/bin/perl
2# -----------------------------------------------------------------------
3#
4# Copyright 2007 rPath, Inc. - All Rights Reserved
5#
6# This file is part of the Linux kernel, and is made available under
7# the terms of the GNU General Public License version 2 or (at your
8# option) any later version; incorporated herein by reference.
9#
10# -----------------------------------------------------------------------
11#
12
13#
14# Usage: timeconst.pl HZ > timeconst.h
15#
16
17# Precomputed values for systems without Math::BigInt
18# Generated by:
19# timeconst.pl --can 24 32 48 64 100 122 128 200 250 256 300 512 1000 1024 1200
20%canned_values = (
21 24 => [
22 '0xa6aaaaab','0x2aaaaaa',26,
23 '0xa6aaaaaaaaaaaaab','0x2aaaaaaaaaaaaaa',58,
24 125,3,
25 '0xc49ba5e4','0x1fbe76c8b4',37,
26 '0xc49ba5e353f7ceda','0x1fbe76c8b439581062',69,
27 3,125,
28 '0xa2c2aaab','0xaaaa',16,
29 '0xa2c2aaaaaaaaaaab','0xaaaaaaaaaaaa',48,
30 125000,3,
31 '0xc9539b89','0x7fffbce4217d',47,
32 '0xc9539b8887229e91','0x7fffbce4217d2849cb25',79,
33 3,125000,
34 ], 32 => [
35 '0xfa000000','0x6000000',27,
36 '0xfa00000000000000','0x600000000000000',59,
37 125,4,
38 '0x83126e98','0xfdf3b645a',36,
39 '0x83126e978d4fdf3c','0xfdf3b645a1cac0831',68,
40 4,125,
41 '0xf4240000','0x0',17,
42 '0xf424000000000000','0x0',49,
43 31250,1,
44 '0x8637bd06','0x3fff79c842fa',46,
45 '0x8637bd05af6c69b6','0x3fff79c842fa5093964a',78,
46 1,31250,
47 ], 48 => [
48 '0xa6aaaaab','0x6aaaaaa',27,
49 '0xa6aaaaaaaaaaaaab','0x6aaaaaaaaaaaaaa',59,
50 125,6,
51 '0xc49ba5e4','0xfdf3b645a',36,
52 '0xc49ba5e353f7ceda','0xfdf3b645a1cac0831',68,
53 6,125,
54 '0xa2c2aaab','0x15555',17,
55 '0xa2c2aaaaaaaaaaab','0x1555555555555',49,
56 62500,3,
57 '0xc9539b89','0x3fffbce4217d',46,
58 '0xc9539b8887229e91','0x3fffbce4217d2849cb25',78,
59 3,62500,
60 ], 64 => [
61 '0xfa000000','0xe000000',28,
62 '0xfa00000000000000','0xe00000000000000',60,
63 125,8,
64 '0x83126e98','0x7ef9db22d',35,
65 '0x83126e978d4fdf3c','0x7ef9db22d0e560418',67,
66 8,125,
67 '0xf4240000','0x0',18,
68 '0xf424000000000000','0x0',50,
69 15625,1,
70 '0x8637bd06','0x1fff79c842fa',45,
71 '0x8637bd05af6c69b6','0x1fff79c842fa5093964a',77,
72 1,15625,
73 ], 100 => [
74 '0xa0000000','0x0',28,
75 '0xa000000000000000','0x0',60,
76 10,1,
77 '0xcccccccd','0x733333333',35,
78 '0xcccccccccccccccd','0x73333333333333333',67,
79 1,10,
80 '0x9c400000','0x0',18,
81 '0x9c40000000000000','0x0',50,
82 10000,1,
83 '0xd1b71759','0x1fff2e48e8a7',45,
84 '0xd1b71758e219652c','0x1fff2e48e8a71de69ad4',77,
85 1,10000,
86 ], 122 => [
87 '0x8325c53f','0xfbcda3a',28,
88 '0x8325c53ef368eb05','0xfbcda3ac10c9714',60,
89 500,61,
90 '0xf9db22d1','0x7fbe76c8b',35,
91 '0xf9db22d0e560418a','0x7fbe76c8b43958106',67,
92 61,500,
93 '0x8012e2a0','0x3ef36',18,
94 '0x8012e29f79b47583','0x3ef368eb04325',50,
95 500000,61,
96 '0xffda4053','0x1ffffbce4217',45,
97 '0xffda4052d666a983','0x1ffffbce4217d2849cb2',77,
98 61,500000,
99 ], 128 => [
100 '0xfa000000','0x1e000000',29,
101 '0xfa00000000000000','0x1e00000000000000',61,
102 125,16,
103 '0x83126e98','0x3f7ced916',34,
104 '0x83126e978d4fdf3c','0x3f7ced916872b020c',66,
105 16,125,
106 '0xf4240000','0x40000',19,
107 '0xf424000000000000','0x4000000000000',51,
108 15625,2,
109 '0x8637bd06','0xfffbce4217d',44,
110 '0x8637bd05af6c69b6','0xfffbce4217d2849cb25',76,
111 2,15625,
112 ], 200 => [
113 '0xa0000000','0x0',29,
114 '0xa000000000000000','0x0',61,
115 5,1,
116 '0xcccccccd','0x333333333',34,
117 '0xcccccccccccccccd','0x33333333333333333',66,
118 1,5,
119 '0x9c400000','0x0',19,
120 '0x9c40000000000000','0x0',51,
121 5000,1,
122 '0xd1b71759','0xfff2e48e8a7',44,
123 '0xd1b71758e219652c','0xfff2e48e8a71de69ad4',76,
124 1,5000,
125 ], 250 => [
126 '0x80000000','0x0',29,
127 '0x8000000000000000','0x0',61,
128 4,1,
129 '0x80000000','0x180000000',33,
130 '0x8000000000000000','0x18000000000000000',65,
131 1,4,
132 '0xfa000000','0x0',20,
133 '0xfa00000000000000','0x0',52,
134 4000,1,
135 '0x83126e98','0x7ff7ced9168',43,
136 '0x83126e978d4fdf3c','0x7ff7ced916872b020c4',75,
137 1,4000,
138 ], 256 => [
139 '0xfa000000','0x3e000000',30,
140 '0xfa00000000000000','0x3e00000000000000',62,
141 125,32,
142 '0x83126e98','0x1fbe76c8b',33,
143 '0x83126e978d4fdf3c','0x1fbe76c8b43958106',65,
144 32,125,
145 '0xf4240000','0xc0000',20,
146 '0xf424000000000000','0xc000000000000',52,
147 15625,4,
148 '0x8637bd06','0x7ffde7210be',43,
149 '0x8637bd05af6c69b6','0x7ffde7210be9424e592',75,
150 4,15625,
151 ], 300 => [
152 '0xd5555556','0x2aaaaaaa',30,
153 '0xd555555555555556','0x2aaaaaaaaaaaaaaa',62,
154 10,3,
155 '0x9999999a','0x1cccccccc',33,
156 '0x999999999999999a','0x1cccccccccccccccc',65,
157 3,10,
158 '0xd0555556','0xaaaaa',20,
159 '0xd055555555555556','0xaaaaaaaaaaaaa',52,
160 10000,3,
161 '0x9d495183','0x7ffcb923a29',43,
162 '0x9d495182a9930be1','0x7ffcb923a29c779a6b5',75,
163 3,10000,
164 ], 512 => [
165 '0xfa000000','0x7e000000',31,
166 '0xfa00000000000000','0x7e00000000000000',63,
167 125,64,
168 '0x83126e98','0xfdf3b645',32,
169 '0x83126e978d4fdf3c','0xfdf3b645a1cac083',64,
170 64,125,
171 '0xf4240000','0x1c0000',21,
172 '0xf424000000000000','0x1c000000000000',53,
173 15625,8,
174 '0x8637bd06','0x3ffef39085f',42,
175 '0x8637bd05af6c69b6','0x3ffef39085f4a1272c9',74,
176 8,15625,
177 ], 1000 => [
178 '0x80000000','0x0',31,
179 '0x8000000000000000','0x0',63,
180 1,1,
181 '0x80000000','0x0',31,
182 '0x8000000000000000','0x0',63,
183 1,1,
184 '0xfa000000','0x0',22,
185 '0xfa00000000000000','0x0',54,
186 1000,1,
187 '0x83126e98','0x1ff7ced9168',41,
188 '0x83126e978d4fdf3c','0x1ff7ced916872b020c4',73,
189 1,1000,
190 ], 1024 => [
191 '0xfa000000','0xfe000000',32,
192 '0xfa00000000000000','0xfe00000000000000',64,
193 125,128,
194 '0x83126e98','0x7ef9db22',31,
195 '0x83126e978d4fdf3c','0x7ef9db22d0e56041',63,
196 128,125,
197 '0xf4240000','0x3c0000',22,
198 '0xf424000000000000','0x3c000000000000',54,
199 15625,16,
200 '0x8637bd06','0x1fff79c842f',41,
201 '0x8637bd05af6c69b6','0x1fff79c842fa5093964',73,
202 16,15625,
203 ], 1200 => [
204 '0xd5555556','0xd5555555',32,
205 '0xd555555555555556','0xd555555555555555',64,
206 5,6,
207 '0x9999999a','0x66666666',31,
208 '0x999999999999999a','0x6666666666666666',63,
209 6,5,
210 '0xd0555556','0x2aaaaa',22,
211 '0xd055555555555556','0x2aaaaaaaaaaaaa',54,
212 2500,3,
213 '0x9d495183','0x1ffcb923a29',41,
214 '0x9d495182a9930be1','0x1ffcb923a29c779a6b5',73,
215 3,2500,
216 ]
217);
218
219$has_bigint = eval 'use Math::BigInt qw(bgcd); 1;';
220
221sub bint($)
222{
223 my($x) = @_;
224 return Math::BigInt->new($x);
225}
226
227#
228# Constants for division by reciprocal multiplication.
229# (bits, numerator, denominator)
230#
231sub fmul($$$)
232{
233 my ($b,$n,$d) = @_;
234
235 $n = bint($n);
236 $d = bint($d);
237
238 return scalar (($n << $b)+$d-bint(1))/$d;
239}
240
241sub fadj($$$)
242{
243 my($b,$n,$d) = @_;
244
245 $n = bint($n);
246 $d = bint($d);
247
248 $d = $d/bgcd($n, $d);
249 return scalar (($d-bint(1)) << $b)/$d;
250}
251
252sub fmuls($$$) {
253 my($b,$n,$d) = @_;
254 my($s,$m);
255 my($thres) = bint(1) << ($b-1);
256
257 $n = bint($n);
258 $d = bint($d);
259
260 for ($s = 0; 1; $s++) {
261 $m = fmul($s,$n,$d);
262 return $s if ($m >= $thres);
263 }
264 return 0;
265}
266
267# Provides mul, adj, and shr factors for a specific
268# (bit, time, hz) combination
269sub muladj($$$) {
270 my($b, $t, $hz) = @_;
271 my $s = fmuls($b, $t, $hz);
272 my $m = fmul($s, $t, $hz);
273 my $a = fadj($s, $t, $hz);
274 return ($m->as_hex(), $a->as_hex(), $s);
275}
276
277# Provides numerator, denominator values
278sub numden($$) {
279 my($n, $d) = @_;
280 my $g = bgcd($n, $d);
281 return ($n/$g, $d/$g);
282}
283
284# All values for a specific (time, hz) combo
285sub conversions($$) {
286 my ($t, $hz) = @_;
287 my @val = ();
288
289 # HZ_TO_xx
290 push(@val, muladj(32, $t, $hz));
291 push(@val, muladj(64, $t, $hz));
292 push(@val, numden($t, $hz));
293
294 # xx_TO_HZ
295 push(@val, muladj(32, $hz, $t));
296 push(@val, muladj(64, $hz, $t));
297 push(@val, numden($hz, $t));
298
299 return @val;
300}
301
302sub compute_values($) {
303 my($hz) = @_;
304 my @val = ();
305 my $s, $m, $a, $g;
306
307 if (!$has_bigint) {
308 die "$0: HZ == $hz not canned and ".
309 "Math::BigInt not available\n";
310 }
311
312 # MSEC conversions
313 push(@val, conversions(1000, $hz));
314
315 # USEC conversions
316 push(@val, conversions(1000000, $hz));
317
318 return @val;
319}
320
321sub output($@)
322{
323 my($hz, @val) = @_;
324 my $pfx, $bit, $suf, $s, $m, $a;
325
326 print "/* Automatically generated by kernel/timeconst.pl */\n";
327 print "/* Conversion constants for HZ == $hz */\n";
328 print "\n";
329 print "#ifndef KERNEL_TIMECONST_H\n";
330 print "#define KERNEL_TIMECONST_H\n";
331 print "\n";
332
333 print "#include <linux/param.h>\n";
334
335 print "\n";
336 print "#if HZ != $hz\n";
337 print "#error \"kernel/timeconst.h has the wrong HZ value!\"\n";
338 print "#endif\n";
339 print "\n";
340
341 foreach $pfx ('HZ_TO_MSEC','MSEC_TO_HZ',
342 'USEC_TO_HZ','HZ_TO_USEC') {
343 foreach $bit (32, 64) {
344 foreach $suf ('MUL', 'ADJ', 'SHR') {
345 printf "#define %-23s %s\n",
346 "${pfx}_$suf$bit", shift(@val);
347 }
348 }
349 foreach $suf ('NUM', 'DEN') {
350 printf "#define %-23s %s\n",
351 "${pfx}_$suf", shift(@val);
352 }
353 }
354
355 print "\n";
356 print "#endif /* KERNEL_TIMECONST_H */\n";
357}
358
359($hz) = @ARGV;
360
361# Use this to generate the %canned_values structure
362if ($hz eq '--can') {
363 shift(@ARGV);
364 @hzlist = sort {$a <=> $b} (@ARGV);
365
366 print "# Precomputed values for systems without Math::BigInt\n";
367 print "# Generated by:\n";
368 print "# timeconst.pl --can ", join(' ', @hzlist), "\n";
369 print "\%canned_values = (\n";
370 my $pf = "\t";
371 foreach $hz (@hzlist) {
372 my @values = compute_values($hz);
373 print "$pf$hz => [\n";
374 while (scalar(@values)) {
375 my $bit;
376 foreach $bit (32, 64) {
377 my $m = shift(@values);
378 my $a = shift(@values);
379 my $s = shift(@values);
380 print "\t\t\'",$m,"\',\'",$a,"\',",$s,",\n";
381 }
382 my $n = shift(@values);
383 my $d = shift(@values);
384 print "\t\t",$n,',',$d,",\n";
385 }
386 print "\t]";
387 $pf = ', ';
388 }
389 print "\n);\n";
390} else {
391 $hz += 0; # Force to number
392 if ($hz < 1) {
393 die "Usage: $0 HZ\n";
394 }
395
396 @val = @{$canned_values{$hz}};
397 if (!defined(@val)) {
398 @val = compute_values($hz);
399 }
400 output($hz, @val);
401}
402exit 0;
diff --git a/kernel/timer.c b/kernel/timer.c
index 70b29b59343f..99b00a25f88b 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -327,7 +327,7 @@ static void timer_stats_account_timer(struct timer_list *timer) {}
327 * init_timer() must be done to a timer prior calling *any* of the 327 * init_timer() must be done to a timer prior calling *any* of the
328 * other timer functions. 328 * other timer functions.
329 */ 329 */
330void fastcall init_timer(struct timer_list *timer) 330void init_timer(struct timer_list *timer)
331{ 331{
332 timer->entry.next = NULL; 332 timer->entry.next = NULL;
333 timer->base = __raw_get_cpu_var(tvec_bases); 333 timer->base = __raw_get_cpu_var(tvec_bases);
@@ -339,7 +339,7 @@ void fastcall init_timer(struct timer_list *timer)
339} 339}
340EXPORT_SYMBOL(init_timer); 340EXPORT_SYMBOL(init_timer);
341 341
342void fastcall init_timer_deferrable(struct timer_list *timer) 342void init_timer_deferrable(struct timer_list *timer)
343{ 343{
344 init_timer(timer); 344 init_timer(timer);
345 timer_set_deferrable(timer); 345 timer_set_deferrable(timer);
@@ -979,7 +979,7 @@ asmlinkage long sys_getppid(void)
979 int pid; 979 int pid;
980 980
981 rcu_read_lock(); 981 rcu_read_lock();
982 pid = task_tgid_nr_ns(current->real_parent, current->nsproxy->pid_ns); 982 pid = task_tgid_vnr(current->real_parent);
983 rcu_read_unlock(); 983 rcu_read_unlock();
984 984
985 return pid; 985 return pid;
@@ -1042,7 +1042,7 @@ static void process_timeout(unsigned long __data)
1042 * 1042 *
1043 * In all cases the return value is guaranteed to be non-negative. 1043 * In all cases the return value is guaranteed to be non-negative.
1044 */ 1044 */
1045fastcall signed long __sched schedule_timeout(signed long timeout) 1045signed long __sched schedule_timeout(signed long timeout)
1046{ 1046{
1047 struct timer_list timer; 1047 struct timer_list timer;
1048 unsigned long expire; 1048 unsigned long expire;
diff --git a/kernel/user.c b/kernel/user.c
index bc1c48d35cb3..7d7900c5a1fd 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -17,6 +17,14 @@
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/user_namespace.h> 18#include <linux/user_namespace.h>
19 19
20struct user_namespace init_user_ns = {
21 .kref = {
22 .refcount = ATOMIC_INIT(2),
23 },
24 .root_user = &root_user,
25};
26EXPORT_SYMBOL_GPL(init_user_ns);
27
20/* 28/*
21 * UID task count cache, to get fast user lookup in "alloc_uid" 29 * UID task count cache, to get fast user lookup in "alloc_uid"
22 * when changing user ID's (ie setuid() and friends). 30 * when changing user ID's (ie setuid() and friends).
@@ -427,6 +435,7 @@ void switch_uid(struct user_struct *new_user)
427 suid_keys(current); 435 suid_keys(current);
428} 436}
429 437
438#ifdef CONFIG_USER_NS
430void release_uids(struct user_namespace *ns) 439void release_uids(struct user_namespace *ns)
431{ 440{
432 int i; 441 int i;
@@ -451,6 +460,7 @@ void release_uids(struct user_namespace *ns)
451 460
452 free_uid(ns->root_user); 461 free_uid(ns->root_user);
453} 462}
463#endif
454 464
455static int __init uid_cache_init(void) 465static int __init uid_cache_init(void)
456{ 466{
diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
index 7af90fc4f0fd..4c9006275df7 100644
--- a/kernel/user_namespace.c
+++ b/kernel/user_namespace.c
@@ -10,17 +10,6 @@
10#include <linux/nsproxy.h> 10#include <linux/nsproxy.h>
11#include <linux/user_namespace.h> 11#include <linux/user_namespace.h>
12 12
13struct user_namespace init_user_ns = {
14 .kref = {
15 .refcount = ATOMIC_INIT(2),
16 },
17 .root_user = &root_user,
18};
19
20EXPORT_SYMBOL_GPL(init_user_ns);
21
22#ifdef CONFIG_USER_NS
23
24/* 13/*
25 * Clone a new ns copying an original user ns, setting refcount to 1 14 * Clone a new ns copying an original user ns, setting refcount to 1
26 * @old_ns: namespace to clone 15 * @old_ns: namespace to clone
@@ -84,5 +73,3 @@ void free_user_ns(struct kref *kref)
84 release_uids(ns); 73 release_uids(ns);
85 kfree(ns); 74 kfree(ns);
86} 75}
87
88#endif /* CONFIG_USER_NS */
diff --git a/kernel/wait.c b/kernel/wait.c
index f9876888a569..c275c56cf2d3 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -18,7 +18,7 @@ void init_waitqueue_head(wait_queue_head_t *q)
18 18
19EXPORT_SYMBOL(init_waitqueue_head); 19EXPORT_SYMBOL(init_waitqueue_head);
20 20
21void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 21void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22{ 22{
23 unsigned long flags; 23 unsigned long flags;
24 24
@@ -29,7 +29,7 @@ void fastcall add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
29} 29}
30EXPORT_SYMBOL(add_wait_queue); 30EXPORT_SYMBOL(add_wait_queue);
31 31
32void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait) 32void add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 35
@@ -40,7 +40,7 @@ void fastcall add_wait_queue_exclusive(wait_queue_head_t *q, wait_queue_t *wait)
40} 40}
41EXPORT_SYMBOL(add_wait_queue_exclusive); 41EXPORT_SYMBOL(add_wait_queue_exclusive);
42 42
43void fastcall remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 43void remove_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
44{ 44{
45 unsigned long flags; 45 unsigned long flags;
46 46
@@ -63,7 +63,7 @@ EXPORT_SYMBOL(remove_wait_queue);
63 * stops them from bleeding out - it would still allow subsequent 63 * stops them from bleeding out - it would still allow subsequent
64 * loads to move into the critical region). 64 * loads to move into the critical region).
65 */ 65 */
66void fastcall 66void
67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state) 67prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
68{ 68{
69 unsigned long flags; 69 unsigned long flags;
@@ -82,7 +82,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
82} 82}
83EXPORT_SYMBOL(prepare_to_wait); 83EXPORT_SYMBOL(prepare_to_wait);
84 84
85void fastcall 85void
86prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state) 86prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
87{ 87{
88 unsigned long flags; 88 unsigned long flags;
@@ -101,7 +101,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
101} 101}
102EXPORT_SYMBOL(prepare_to_wait_exclusive); 102EXPORT_SYMBOL(prepare_to_wait_exclusive);
103 103
104void fastcall finish_wait(wait_queue_head_t *q, wait_queue_t *wait) 104void finish_wait(wait_queue_head_t *q, wait_queue_t *wait)
105{ 105{
106 unsigned long flags; 106 unsigned long flags;
107 107
@@ -157,7 +157,7 @@ EXPORT_SYMBOL(wake_bit_function);
157 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are 157 * waiting, the actions of __wait_on_bit() and __wait_on_bit_lock() are
158 * permitted return codes. Nonzero return codes halt waiting and return. 158 * permitted return codes. Nonzero return codes halt waiting and return.
159 */ 159 */
160int __sched fastcall 160int __sched
161__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q, 161__wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
162 int (*action)(void *), unsigned mode) 162 int (*action)(void *), unsigned mode)
163{ 163{
@@ -173,7 +173,7 @@ __wait_on_bit(wait_queue_head_t *wq, struct wait_bit_queue *q,
173} 173}
174EXPORT_SYMBOL(__wait_on_bit); 174EXPORT_SYMBOL(__wait_on_bit);
175 175
176int __sched fastcall out_of_line_wait_on_bit(void *word, int bit, 176int __sched out_of_line_wait_on_bit(void *word, int bit,
177 int (*action)(void *), unsigned mode) 177 int (*action)(void *), unsigned mode)
178{ 178{
179 wait_queue_head_t *wq = bit_waitqueue(word, bit); 179 wait_queue_head_t *wq = bit_waitqueue(word, bit);
@@ -183,7 +183,7 @@ int __sched fastcall out_of_line_wait_on_bit(void *word, int bit,
183} 183}
184EXPORT_SYMBOL(out_of_line_wait_on_bit); 184EXPORT_SYMBOL(out_of_line_wait_on_bit);
185 185
186int __sched fastcall 186int __sched
187__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q, 187__wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
188 int (*action)(void *), unsigned mode) 188 int (*action)(void *), unsigned mode)
189{ 189{
@@ -201,7 +201,7 @@ __wait_on_bit_lock(wait_queue_head_t *wq, struct wait_bit_queue *q,
201} 201}
202EXPORT_SYMBOL(__wait_on_bit_lock); 202EXPORT_SYMBOL(__wait_on_bit_lock);
203 203
204int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit, 204int __sched out_of_line_wait_on_bit_lock(void *word, int bit,
205 int (*action)(void *), unsigned mode) 205 int (*action)(void *), unsigned mode)
206{ 206{
207 wait_queue_head_t *wq = bit_waitqueue(word, bit); 207 wait_queue_head_t *wq = bit_waitqueue(word, bit);
@@ -211,7 +211,7 @@ int __sched fastcall out_of_line_wait_on_bit_lock(void *word, int bit,
211} 211}
212EXPORT_SYMBOL(out_of_line_wait_on_bit_lock); 212EXPORT_SYMBOL(out_of_line_wait_on_bit_lock);
213 213
214void fastcall __wake_up_bit(wait_queue_head_t *wq, void *word, int bit) 214void __wake_up_bit(wait_queue_head_t *wq, void *word, int bit)
215{ 215{
216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit); 216 struct wait_bit_key key = __WAIT_BIT_KEY_INITIALIZER(word, bit);
217 if (waitqueue_active(wq)) 217 if (waitqueue_active(wq))
@@ -236,13 +236,13 @@ EXPORT_SYMBOL(__wake_up_bit);
236 * may need to use a less regular barrier, such fs/inode.c's smp_mb(), 236 * may need to use a less regular barrier, such fs/inode.c's smp_mb(),
237 * because spin_unlock() does not guarantee a memory barrier. 237 * because spin_unlock() does not guarantee a memory barrier.
238 */ 238 */
239void fastcall wake_up_bit(void *word, int bit) 239void wake_up_bit(void *word, int bit)
240{ 240{
241 __wake_up_bit(bit_waitqueue(word, bit), word, bit); 241 __wake_up_bit(bit_waitqueue(word, bit), word, bit);
242} 242}
243EXPORT_SYMBOL(wake_up_bit); 243EXPORT_SYMBOL(wake_up_bit);
244 244
245fastcall wait_queue_head_t *bit_waitqueue(void *word, int bit) 245wait_queue_head_t *bit_waitqueue(void *word, int bit)
246{ 246{
247 const int shift = BITS_PER_LONG == 32 ? 5 : 6; 247 const int shift = BITS_PER_LONG == 32 ? 5 : 6;
248 const struct zone *zone = page_zone(virt_to_page(word)); 248 const struct zone *zone = page_zone(virt_to_page(word));
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 52db48e7f6e7..ff06611655af 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -161,7 +161,7 @@ static void __queue_work(struct cpu_workqueue_struct *cwq,
161 * We queue the work to the CPU it was submitted, but there is no 161 * We queue the work to the CPU it was submitted, but there is no
162 * guarantee that it will be processed by that CPU. 162 * guarantee that it will be processed by that CPU.
163 */ 163 */
164int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work) 164int queue_work(struct workqueue_struct *wq, struct work_struct *work)
165{ 165{
166 int ret = 0; 166 int ret = 0;
167 167
@@ -175,7 +175,7 @@ int fastcall queue_work(struct workqueue_struct *wq, struct work_struct *work)
175} 175}
176EXPORT_SYMBOL_GPL(queue_work); 176EXPORT_SYMBOL_GPL(queue_work);
177 177
178void delayed_work_timer_fn(unsigned long __data) 178static void delayed_work_timer_fn(unsigned long __data)
179{ 179{
180 struct delayed_work *dwork = (struct delayed_work *)__data; 180 struct delayed_work *dwork = (struct delayed_work *)__data;
181 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work); 181 struct cpu_workqueue_struct *cwq = get_wq_data(&dwork->work);
@@ -192,7 +192,7 @@ void delayed_work_timer_fn(unsigned long __data)
192 * 192 *
193 * Returns 0 if @work was already on a queue, non-zero otherwise. 193 * Returns 0 if @work was already on a queue, non-zero otherwise.
194 */ 194 */
195int fastcall queue_delayed_work(struct workqueue_struct *wq, 195int queue_delayed_work(struct workqueue_struct *wq,
196 struct delayed_work *dwork, unsigned long delay) 196 struct delayed_work *dwork, unsigned long delay)
197{ 197{
198 timer_stats_timer_set_start_info(&dwork->timer); 198 timer_stats_timer_set_start_info(&dwork->timer);
@@ -388,7 +388,7 @@ static int flush_cpu_workqueue(struct cpu_workqueue_struct *cwq)
388 * This function used to run the workqueues itself. Now we just wait for the 388 * This function used to run the workqueues itself. Now we just wait for the
389 * helper threads to do it. 389 * helper threads to do it.
390 */ 390 */
391void fastcall flush_workqueue(struct workqueue_struct *wq) 391void flush_workqueue(struct workqueue_struct *wq)
392{ 392{
393 const cpumask_t *cpu_map = wq_cpu_map(wq); 393 const cpumask_t *cpu_map = wq_cpu_map(wq);
394 int cpu; 394 int cpu;
@@ -546,7 +546,7 @@ static struct workqueue_struct *keventd_wq __read_mostly;
546 * 546 *
547 * This puts a job in the kernel-global workqueue. 547 * This puts a job in the kernel-global workqueue.
548 */ 548 */
549int fastcall schedule_work(struct work_struct *work) 549int schedule_work(struct work_struct *work)
550{ 550{
551 return queue_work(keventd_wq, work); 551 return queue_work(keventd_wq, work);
552} 552}
@@ -560,7 +560,7 @@ EXPORT_SYMBOL(schedule_work);
560 * After waiting for a given time this puts a job in the kernel-global 560 * After waiting for a given time this puts a job in the kernel-global
561 * workqueue. 561 * workqueue.
562 */ 562 */
563int fastcall schedule_delayed_work(struct delayed_work *dwork, 563int schedule_delayed_work(struct delayed_work *dwork,
564 unsigned long delay) 564 unsigned long delay)
565{ 565{
566 timer_stats_timer_set_start_info(&dwork->timer); 566 timer_stats_timer_set_start_info(&dwork->timer);