aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/acct.c66
-rw-r--r--kernel/audit.c14
-rw-r--r--kernel/auditfilter.c4
-rw-r--r--kernel/auditsc.c10
-rw-r--r--kernel/capability.c195
-rw-r--r--kernel/compat.c60
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c12
-rw-r--r--kernel/delayacct.c6
-rw-r--r--kernel/dma.c8
-rw-r--r--kernel/exec_domain.c2
-rw-r--r--kernel/fork.c24
-rw-r--r--kernel/futex.c4
-rw-r--r--kernel/itimer.c4
-rw-r--r--kernel/kexec.c4
-rw-r--r--kernel/module.c38
-rw-r--r--kernel/panic.c10
-rw-r--r--kernel/params.c8
-rw-r--r--kernel/power/Kconfig11
-rw-r--r--kernel/power/disk.c156
-rw-r--r--kernel/power/main.c48
-rw-r--r--kernel/power/power.h21
-rw-r--r--kernel/power/process.c141
-rw-r--r--kernel/power/snapshot.c53
-rw-r--r--kernel/power/swsusp.c33
-rw-r--r--kernel/power/user.c4
-rw-r--r--kernel/printk.c16
-rw-r--r--kernel/relay.c6
-rw-r--r--kernel/sched.c94
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_stats.h8
-rw-r--r--kernel/signal.c1
-rw-r--r--kernel/sys.c1
-rw-r--r--kernel/sysctl.c276
-rw-r--r--kernel/sysctl_check.c1588
-rw-r--r--kernel/time.c14
-rw-r--r--kernel/timer.c9
-rw-r--r--kernel/tsacct.c4
39 files changed, 2395 insertions, 564 deletions
diff --git a/kernel/Makefile b/kernel/Makefile
index 2a999836ca18..d63fbb18798a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -9,7 +9,7 @@ obj-y = sched.o fork.o exec_domain.o panic.o printk.o profile.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \ 11 hrtimer.o rwsem.o latency.o nsproxy.o srcu.o die_notifier.o \
12 utsname.o 12 utsname.o sysctl_check.o
13 13
14obj-$(CONFIG_STACKTRACE) += stacktrace.o 14obj-$(CONFIG_STACKTRACE) += stacktrace.o
15obj-y += time/ 15obj-y += time/
diff --git a/kernel/acct.c b/kernel/acct.c
index 24f0f8b2ba72..fce53d8df8a7 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -329,16 +329,16 @@ static comp_t encode_comp_t(unsigned long value)
329 } 329 }
330 330
331 /* 331 /*
332 * If we need to round up, do it (and handle overflow correctly). 332 * If we need to round up, do it (and handle overflow correctly).
333 */ 333 */
334 if (rnd && (++value > MAXFRACT)) { 334 if (rnd && (++value > MAXFRACT)) {
335 value >>= EXPSIZE; 335 value >>= EXPSIZE;
336 exp++; 336 exp++;
337 } 337 }
338 338
339 /* 339 /*
340 * Clean it up and polish it off. 340 * Clean it up and polish it off.
341 */ 341 */
342 exp <<= MANTSIZE; /* Shift the exponent into place */ 342 exp <<= MANTSIZE; /* Shift the exponent into place */
343 exp += value; /* and add on the mantissa. */ 343 exp += value; /* and add on the mantissa. */
344 return exp; 344 return exp;
@@ -361,30 +361,30 @@ static comp_t encode_comp_t(unsigned long value)
361 361
362static comp2_t encode_comp2_t(u64 value) 362static comp2_t encode_comp2_t(u64 value)
363{ 363{
364 int exp, rnd; 364 int exp, rnd;
365 365
366 exp = (value > (MAXFRACT2>>1)); 366 exp = (value > (MAXFRACT2>>1));
367 rnd = 0; 367 rnd = 0;
368 while (value > MAXFRACT2) { 368 while (value > MAXFRACT2) {
369 rnd = value & 1; 369 rnd = value & 1;
370 value >>= 1; 370 value >>= 1;
371 exp++; 371 exp++;
372 } 372 }
373 373
374 /* 374 /*
375 * If we need to round up, do it (and handle overflow correctly). 375 * If we need to round up, do it (and handle overflow correctly).
376 */ 376 */
377 if (rnd && (++value > MAXFRACT2)) { 377 if (rnd && (++value > MAXFRACT2)) {
378 value >>= 1; 378 value >>= 1;
379 exp++; 379 exp++;
380 } 380 }
381 381
382 if (exp > MAXEXP2) { 382 if (exp > MAXEXP2) {
383 /* Overflow. Return largest representable number instead. */ 383 /* Overflow. Return largest representable number instead. */
384 return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1; 384 return (1ul << (MANTSIZE2+EXPSIZE2-1)) - 1;
385 } else { 385 } else {
386 return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1)); 386 return (value & (MAXFRACT2>>1)) | (exp << (MANTSIZE2-1));
387 } 387 }
388} 388}
389#endif 389#endif
390 390
@@ -501,14 +501,14 @@ static void do_acct_process(struct file *file)
501 ac.ac_swaps = encode_comp_t(0); 501 ac.ac_swaps = encode_comp_t(0);
502 502
503 /* 503 /*
504 * Kernel segment override to datasegment and write it 504 * Kernel segment override to datasegment and write it
505 * to the accounting file. 505 * to the accounting file.
506 */ 506 */
507 fs = get_fs(); 507 fs = get_fs();
508 set_fs(KERNEL_DS); 508 set_fs(KERNEL_DS);
509 /* 509 /*
510 * Accounting records are not subject to resource limits. 510 * Accounting records are not subject to resource limits.
511 */ 511 */
512 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; 512 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
513 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; 513 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
514 file->f_op->write(file, (char *)&ac, 514 file->f_op->write(file, (char *)&ac,
diff --git a/kernel/audit.c b/kernel/audit.c
index 2924251a6547..6977ea57a7e2 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -664,11 +664,11 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
664 if (sid) { 664 if (sid) {
665 if (selinux_sid_to_string( 665 if (selinux_sid_to_string(
666 sid, &ctx, &len)) { 666 sid, &ctx, &len)) {
667 audit_log_format(ab, 667 audit_log_format(ab,
668 " ssid=%u", sid); 668 " ssid=%u", sid);
669 /* Maybe call audit_panic? */ 669 /* Maybe call audit_panic? */
670 } else 670 } else
671 audit_log_format(ab, 671 audit_log_format(ab,
672 " subj=%s", ctx); 672 " subj=%s", ctx);
673 kfree(ctx); 673 kfree(ctx);
674 } 674 }
@@ -769,7 +769,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
769 sig_data->pid = audit_sig_pid; 769 sig_data->pid = audit_sig_pid;
770 memcpy(sig_data->ctx, ctx, len); 770 memcpy(sig_data->ctx, ctx, len);
771 kfree(ctx); 771 kfree(ctx);
772 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO, 772 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_SIGNAL_INFO,
773 0, 0, sig_data, sizeof(*sig_data) + len); 773 0, 0, sig_data, sizeof(*sig_data) + len);
774 kfree(sig_data); 774 kfree(sig_data);
775 break; 775 break;
@@ -1005,7 +1005,7 @@ unsigned int audit_serial(void)
1005 return ret; 1005 return ret;
1006} 1006}
1007 1007
1008static inline void audit_get_stamp(struct audit_context *ctx, 1008static inline void audit_get_stamp(struct audit_context *ctx,
1009 struct timespec *t, unsigned int *serial) 1009 struct timespec *t, unsigned int *serial)
1010{ 1010{
1011 if (ctx) 1011 if (ctx)
@@ -1056,7 +1056,7 @@ struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask,
1056 if (gfp_mask & __GFP_WAIT) 1056 if (gfp_mask & __GFP_WAIT)
1057 reserve = 0; 1057 reserve = 0;
1058 else 1058 else
1059 reserve = 5; /* Allow atomic callers to go up to five 1059 reserve = 5; /* Allow atomic callers to go up to five
1060 entries over the normal backlog limit */ 1060 entries over the normal backlog limit */
1061 1061
1062 while (audit_backlog_limit 1062 while (audit_backlog_limit
@@ -1319,7 +1319,7 @@ void audit_log_d_path(struct audit_buffer *ab, const char *prefix,
1319 if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */ 1319 if (IS_ERR(p)) { /* Should never happen since we send PATH_MAX */
1320 /* FIXME: can we save some information here? */ 1320 /* FIXME: can we save some information here? */
1321 audit_log_format(ab, "<too long>"); 1321 audit_log_format(ab, "<too long>");
1322 } else 1322 } else
1323 audit_log_untrustedstring(ab, p); 1323 audit_log_untrustedstring(ab, p);
1324 kfree(path); 1324 kfree(path);
1325} 1325}
@@ -1365,7 +1365,7 @@ void audit_log_end(struct audit_buffer *ab)
1365 * audit_log_vformat, and audit_log_end. It may be called 1365 * audit_log_vformat, and audit_log_end. It may be called
1366 * in any context. 1366 * in any context.
1367 */ 1367 */
1368void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type, 1368void audit_log(struct audit_context *ctx, gfp_t gfp_mask, int type,
1369 const char *fmt, ...) 1369 const char *fmt, ...)
1370{ 1370{
1371 struct audit_buffer *ab; 1371 struct audit_buffer *ab;
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 359645cff5b2..df66a21fb360 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1498,7 +1498,7 @@ int audit_receive_filter(int type, int pid, int uid, int seq, void *data,
1498 * auditctl to read from it... which isn't ever going to 1498 * auditctl to read from it... which isn't ever going to
1499 * happen if we're actually running in the context of auditctl 1499 * happen if we're actually running in the context of auditctl
1500 * trying to _send_ the stuff */ 1500 * trying to _send_ the stuff */
1501 1501
1502 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL); 1502 dest = kmalloc(sizeof(struct audit_netlink_list), GFP_KERNEL);
1503 if (!dest) 1503 if (!dest)
1504 return -ENOMEM; 1504 return -ENOMEM;
@@ -1678,7 +1678,7 @@ int audit_filter_type(int type)
1678{ 1678{
1679 struct audit_entry *e; 1679 struct audit_entry *e;
1680 int result = 0; 1680 int result = 0;
1681 1681
1682 rcu_read_lock(); 1682 rcu_read_lock();
1683 if (list_empty(&audit_filter_list[AUDIT_FILTER_TYPE])) 1683 if (list_empty(&audit_filter_list[AUDIT_FILTER_TYPE]))
1684 goto unlock_and_return; 1684 goto unlock_and_return;
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 938e60a61882..e19b5a33aede 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -320,7 +320,7 @@ static int audit_filter_rules(struct task_struct *tsk,
320 result = audit_comparator(tsk->personality, f->op, f->val); 320 result = audit_comparator(tsk->personality, f->op, f->val);
321 break; 321 break;
322 case AUDIT_ARCH: 322 case AUDIT_ARCH:
323 if (ctx) 323 if (ctx)
324 result = audit_comparator(ctx->arch, f->op, f->val); 324 result = audit_comparator(ctx->arch, f->op, f->val);
325 break; 325 break;
326 326
@@ -898,7 +898,7 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
898 if (context->personality != PER_LINUX) 898 if (context->personality != PER_LINUX)
899 audit_log_format(ab, " per=%lx", context->personality); 899 audit_log_format(ab, " per=%lx", context->personality);
900 if (context->return_valid) 900 if (context->return_valid)
901 audit_log_format(ab, " success=%s exit=%ld", 901 audit_log_format(ab, " success=%s exit=%ld",
902 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", 902 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
903 context->return_code); 903 context->return_code);
904 904
@@ -1135,8 +1135,8 @@ void audit_free(struct task_struct *tsk)
1135 return; 1135 return;
1136 1136
1137 /* Check for system calls that do not go through the exit 1137 /* Check for system calls that do not go through the exit
1138 * function (e.g., exit_group), then free context block. 1138 * function (e.g., exit_group), then free context block.
1139 * We use GFP_ATOMIC here because we might be doing this 1139 * We use GFP_ATOMIC here because we might be doing this
1140 * in the context of the idle thread */ 1140 * in the context of the idle thread */
1141 /* that can happen only if we are called from do_exit() */ 1141 /* that can happen only if we are called from do_exit() */
1142 if (context->in_syscall && context->auditable) 1142 if (context->in_syscall && context->auditable)
@@ -1316,7 +1316,7 @@ void __audit_getname(const char *name)
1316 context->pwdmnt = mntget(current->fs->pwdmnt); 1316 context->pwdmnt = mntget(current->fs->pwdmnt);
1317 read_unlock(&current->fs->lock); 1317 read_unlock(&current->fs->lock);
1318 } 1318 }
1319 1319
1320} 1320}
1321 1321
1322/* audit_putname - intercept a putname request 1322/* audit_putname - intercept a putname request
diff --git a/kernel/capability.c b/kernel/capability.c
index 4e350a36ed6a..cbc5fd60c0f3 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -3,9 +3,9 @@
3 * 3 *
4 * Copyright (C) 1997 Andrew Main <zefram@fysh.org> 4 * Copyright (C) 1997 Andrew Main <zefram@fysh.org>
5 * 5 *
6 * Integrated into 2.1.97+, Andrew G. Morgan <morgan@transmeta.com> 6 * Integrated into 2.1.97+, Andrew G. Morgan <morgan@kernel.org>
7 * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net> 7 * 30 May 2002: Cleanup, Robert M. Love <rml@tech9.net>
8 */ 8 */
9 9
10#include <linux/capability.h> 10#include <linux/capability.h>
11#include <linux/mm.h> 11#include <linux/mm.h>
@@ -14,9 +14,6 @@
14#include <linux/syscalls.h> 14#include <linux/syscalls.h>
15#include <asm/uaccess.h> 15#include <asm/uaccess.h>
16 16
17unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
18kernel_cap_t cap_bset = CAP_INIT_EFF_SET;
19
20/* 17/*
21 * This lock protects task->cap_* for all tasks including current. 18 * This lock protects task->cap_* for all tasks including current.
22 * Locking rule: acquire this prior to tasklist_lock. 19 * Locking rule: acquire this prior to tasklist_lock.
@@ -40,49 +37,49 @@ static DEFINE_SPINLOCK(task_capability_lock);
40 */ 37 */
41asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr) 38asmlinkage long sys_capget(cap_user_header_t header, cap_user_data_t dataptr)
42{ 39{
43 int ret = 0; 40 int ret = 0;
44 pid_t pid; 41 pid_t pid;
45 __u32 version; 42 __u32 version;
46 struct task_struct *target; 43 struct task_struct *target;
47 struct __user_cap_data_struct data; 44 struct __user_cap_data_struct data;
48 45
49 if (get_user(version, &header->version)) 46 if (get_user(version, &header->version))
50 return -EFAULT; 47 return -EFAULT;
51 48
52 if (version != _LINUX_CAPABILITY_VERSION) { 49 if (version != _LINUX_CAPABILITY_VERSION) {
53 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) 50 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
54 return -EFAULT; 51 return -EFAULT;
55 return -EINVAL; 52 return -EINVAL;
56 } 53 }
57 54
58 if (get_user(pid, &header->pid)) 55 if (get_user(pid, &header->pid))
59 return -EFAULT; 56 return -EFAULT;
60 57
61 if (pid < 0) 58 if (pid < 0)
62 return -EINVAL; 59 return -EINVAL;
63 60
64 spin_lock(&task_capability_lock); 61 spin_lock(&task_capability_lock);
65 read_lock(&tasklist_lock); 62 read_lock(&tasklist_lock);
66 63
67 if (pid && pid != current->pid) { 64 if (pid && pid != current->pid) {
68 target = find_task_by_pid(pid); 65 target = find_task_by_pid(pid);
69 if (!target) { 66 if (!target) {
70 ret = -ESRCH; 67 ret = -ESRCH;
71 goto out; 68 goto out;
72 } 69 }
73 } else 70 } else
74 target = current; 71 target = current;
75 72
76 ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted); 73 ret = security_capget(target, &data.effective, &data.inheritable, &data.permitted);
77 74
78out: 75out:
79 read_unlock(&tasklist_lock); 76 read_unlock(&tasklist_lock);
80 spin_unlock(&task_capability_lock); 77 spin_unlock(&task_capability_lock);
81 78
82 if (!ret && copy_to_user(dataptr, &data, sizeof data)) 79 if (!ret && copy_to_user(dataptr, &data, sizeof data))
83 return -EFAULT; 80 return -EFAULT;
84 81
85 return ret; 82 return ret;
86} 83}
87 84
88/* 85/*
@@ -115,7 +112,7 @@ static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
115 } while_each_pid_task(pgrp, PIDTYPE_PGID, g); 112 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
116 113
117 if (!found) 114 if (!found)
118 ret = 0; 115 ret = 0;
119 return ret; 116 return ret;
120} 117}
121 118
@@ -169,68 +166,68 @@ static inline int cap_set_all(kernel_cap_t *effective,
169 */ 166 */
170asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data) 167asmlinkage long sys_capset(cap_user_header_t header, const cap_user_data_t data)
171{ 168{
172 kernel_cap_t inheritable, permitted, effective; 169 kernel_cap_t inheritable, permitted, effective;
173 __u32 version; 170 __u32 version;
174 struct task_struct *target; 171 struct task_struct *target;
175 int ret; 172 int ret;
176 pid_t pid; 173 pid_t pid;
177 174
178 if (get_user(version, &header->version)) 175 if (get_user(version, &header->version))
179 return -EFAULT; 176 return -EFAULT;
180 177
181 if (version != _LINUX_CAPABILITY_VERSION) { 178 if (version != _LINUX_CAPABILITY_VERSION) {
182 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version)) 179 if (put_user(_LINUX_CAPABILITY_VERSION, &header->version))
183 return -EFAULT; 180 return -EFAULT;
184 return -EINVAL; 181 return -EINVAL;
185 } 182 }
186 183
187 if (get_user(pid, &header->pid)) 184 if (get_user(pid, &header->pid))
188 return -EFAULT; 185 return -EFAULT;
189 186
190 if (pid && pid != current->pid && !capable(CAP_SETPCAP)) 187 if (pid && pid != current->pid && !capable(CAP_SETPCAP))
191 return -EPERM; 188 return -EPERM;
192 189
193 if (copy_from_user(&effective, &data->effective, sizeof(effective)) || 190 if (copy_from_user(&effective, &data->effective, sizeof(effective)) ||
194 copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) || 191 copy_from_user(&inheritable, &data->inheritable, sizeof(inheritable)) ||
195 copy_from_user(&permitted, &data->permitted, sizeof(permitted))) 192 copy_from_user(&permitted, &data->permitted, sizeof(permitted)))
196 return -EFAULT; 193 return -EFAULT;
197 194
198 spin_lock(&task_capability_lock); 195 spin_lock(&task_capability_lock);
199 read_lock(&tasklist_lock); 196 read_lock(&tasklist_lock);
200 197
201 if (pid > 0 && pid != current->pid) { 198 if (pid > 0 && pid != current->pid) {
202 target = find_task_by_pid(pid); 199 target = find_task_by_pid(pid);
203 if (!target) { 200 if (!target) {
204 ret = -ESRCH; 201 ret = -ESRCH;
205 goto out; 202 goto out;
206 } 203 }
207 } else 204 } else
208 target = current; 205 target = current;
209 206
210 ret = 0; 207 ret = 0;
211 208
212 /* having verified that the proposed changes are legal, 209 /* having verified that the proposed changes are legal,
213 we now put them into effect. */ 210 we now put them into effect. */
214 if (pid < 0) { 211 if (pid < 0) {
215 if (pid == -1) /* all procs other than current and init */ 212 if (pid == -1) /* all procs other than current and init */
216 ret = cap_set_all(&effective, &inheritable, &permitted); 213 ret = cap_set_all(&effective, &inheritable, &permitted);
217 214
218 else /* all procs in process group */ 215 else /* all procs in process group */
219 ret = cap_set_pg(-pid, &effective, &inheritable, 216 ret = cap_set_pg(-pid, &effective, &inheritable,
220 &permitted); 217 &permitted);
221 } else { 218 } else {
222 ret = security_capset_check(target, &effective, &inheritable, 219 ret = security_capset_check(target, &effective, &inheritable,
223 &permitted); 220 &permitted);
224 if (!ret) 221 if (!ret)
225 security_capset_set(target, &effective, &inheritable, 222 security_capset_set(target, &effective, &inheritable,
226 &permitted); 223 &permitted);
227 } 224 }
228 225
229out: 226out:
230 read_unlock(&tasklist_lock); 227 read_unlock(&tasklist_lock);
231 spin_unlock(&task_capability_lock); 228 spin_unlock(&task_capability_lock);
232 229
233 return ret; 230 return ret;
234} 231}
235 232
236int __capable(struct task_struct *t, int cap) 233int __capable(struct task_struct *t, int cap)
diff --git a/kernel/compat.c b/kernel/compat.c
index 252a446fb0d0..42a1ed4b61b1 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -212,8 +212,8 @@ asmlinkage long compat_sys_setrlimit(unsigned int resource,
212 int ret; 212 int ret;
213 mm_segment_t old_fs = get_fs (); 213 mm_segment_t old_fs = get_fs ();
214 214
215 if (resource >= RLIM_NLIMITS) 215 if (resource >= RLIM_NLIMITS)
216 return -EINVAL; 216 return -EINVAL;
217 217
218 if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) || 218 if (!access_ok(VERIFY_READ, rlim, sizeof(*rlim)) ||
219 __get_user(r.rlim_cur, &rlim->rlim_cur) || 219 __get_user(r.rlim_cur, &rlim->rlim_cur) ||
@@ -442,21 +442,21 @@ asmlinkage long compat_sys_sched_getaffinity(compat_pid_t pid, unsigned int len,
442 442
443int get_compat_itimerspec(struct itimerspec *dst, 443int get_compat_itimerspec(struct itimerspec *dst,
444 const struct compat_itimerspec __user *src) 444 const struct compat_itimerspec __user *src)
445{ 445{
446 if (get_compat_timespec(&dst->it_interval, &src->it_interval) || 446 if (get_compat_timespec(&dst->it_interval, &src->it_interval) ||
447 get_compat_timespec(&dst->it_value, &src->it_value)) 447 get_compat_timespec(&dst->it_value, &src->it_value))
448 return -EFAULT; 448 return -EFAULT;
449 return 0; 449 return 0;
450} 450}
451 451
452int put_compat_itimerspec(struct compat_itimerspec __user *dst, 452int put_compat_itimerspec(struct compat_itimerspec __user *dst,
453 const struct itimerspec *src) 453 const struct itimerspec *src)
454{ 454{
455 if (put_compat_timespec(&src->it_interval, &dst->it_interval) || 455 if (put_compat_timespec(&src->it_interval, &dst->it_interval) ||
456 put_compat_timespec(&src->it_value, &dst->it_value)) 456 put_compat_timespec(&src->it_value, &dst->it_value))
457 return -EFAULT; 457 return -EFAULT;
458 return 0; 458 return 0;
459} 459}
460 460
461long compat_sys_timer_create(clockid_t which_clock, 461long compat_sys_timer_create(clockid_t which_clock,
462 struct compat_sigevent __user *timer_event_spec, 462 struct compat_sigevent __user *timer_event_spec,
@@ -477,9 +477,9 @@ long compat_sys_timer_create(clockid_t which_clock,
477} 477}
478 478
479long compat_sys_timer_settime(timer_t timer_id, int flags, 479long compat_sys_timer_settime(timer_t timer_id, int flags,
480 struct compat_itimerspec __user *new, 480 struct compat_itimerspec __user *new,
481 struct compat_itimerspec __user *old) 481 struct compat_itimerspec __user *old)
482{ 482{
483 long err; 483 long err;
484 mm_segment_t oldfs; 484 mm_segment_t oldfs;
485 struct itimerspec newts, oldts; 485 struct itimerspec newts, oldts;
@@ -487,58 +487,58 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
487 if (!new) 487 if (!new)
488 return -EINVAL; 488 return -EINVAL;
489 if (get_compat_itimerspec(&newts, new)) 489 if (get_compat_itimerspec(&newts, new))
490 return -EFAULT; 490 return -EFAULT;
491 oldfs = get_fs(); 491 oldfs = get_fs();
492 set_fs(KERNEL_DS); 492 set_fs(KERNEL_DS);
493 err = sys_timer_settime(timer_id, flags, 493 err = sys_timer_settime(timer_id, flags,
494 (struct itimerspec __user *) &newts, 494 (struct itimerspec __user *) &newts,
495 (struct itimerspec __user *) &oldts); 495 (struct itimerspec __user *) &oldts);
496 set_fs(oldfs); 496 set_fs(oldfs);
497 if (!err && old && put_compat_itimerspec(old, &oldts)) 497 if (!err && old && put_compat_itimerspec(old, &oldts))
498 return -EFAULT; 498 return -EFAULT;
499 return err; 499 return err;
500} 500}
501 501
502long compat_sys_timer_gettime(timer_t timer_id, 502long compat_sys_timer_gettime(timer_t timer_id,
503 struct compat_itimerspec __user *setting) 503 struct compat_itimerspec __user *setting)
504{ 504{
505 long err; 505 long err;
506 mm_segment_t oldfs; 506 mm_segment_t oldfs;
507 struct itimerspec ts; 507 struct itimerspec ts;
508 508
509 oldfs = get_fs(); 509 oldfs = get_fs();
510 set_fs(KERNEL_DS); 510 set_fs(KERNEL_DS);
511 err = sys_timer_gettime(timer_id, 511 err = sys_timer_gettime(timer_id,
512 (struct itimerspec __user *) &ts); 512 (struct itimerspec __user *) &ts);
513 set_fs(oldfs); 513 set_fs(oldfs);
514 if (!err && put_compat_itimerspec(setting, &ts)) 514 if (!err && put_compat_itimerspec(setting, &ts))
515 return -EFAULT; 515 return -EFAULT;
516 return err; 516 return err;
517} 517}
518 518
519long compat_sys_clock_settime(clockid_t which_clock, 519long compat_sys_clock_settime(clockid_t which_clock,
520 struct compat_timespec __user *tp) 520 struct compat_timespec __user *tp)
521{ 521{
522 long err; 522 long err;
523 mm_segment_t oldfs; 523 mm_segment_t oldfs;
524 struct timespec ts; 524 struct timespec ts;
525 525
526 if (get_compat_timespec(&ts, tp)) 526 if (get_compat_timespec(&ts, tp))
527 return -EFAULT; 527 return -EFAULT;
528 oldfs = get_fs(); 528 oldfs = get_fs();
529 set_fs(KERNEL_DS); 529 set_fs(KERNEL_DS);
530 err = sys_clock_settime(which_clock, 530 err = sys_clock_settime(which_clock,
531 (struct timespec __user *) &ts); 531 (struct timespec __user *) &ts);
532 set_fs(oldfs); 532 set_fs(oldfs);
533 return err; 533 return err;
534} 534}
535 535
536long compat_sys_clock_gettime(clockid_t which_clock, 536long compat_sys_clock_gettime(clockid_t which_clock,
537 struct compat_timespec __user *tp) 537 struct compat_timespec __user *tp)
538{ 538{
539 long err; 539 long err;
540 mm_segment_t oldfs; 540 mm_segment_t oldfs;
541 struct timespec ts; 541 struct timespec ts;
542 542
543 oldfs = get_fs(); 543 oldfs = get_fs();
544 set_fs(KERNEL_DS); 544 set_fs(KERNEL_DS);
@@ -546,16 +546,16 @@ long compat_sys_clock_gettime(clockid_t which_clock,
546 (struct timespec __user *) &ts); 546 (struct timespec __user *) &ts);
547 set_fs(oldfs); 547 set_fs(oldfs);
548 if (!err && put_compat_timespec(&ts, tp)) 548 if (!err && put_compat_timespec(&ts, tp))
549 return -EFAULT; 549 return -EFAULT;
550 return err; 550 return err;
551} 551}
552 552
553long compat_sys_clock_getres(clockid_t which_clock, 553long compat_sys_clock_getres(clockid_t which_clock,
554 struct compat_timespec __user *tp) 554 struct compat_timespec __user *tp)
555{ 555{
556 long err; 556 long err;
557 mm_segment_t oldfs; 557 mm_segment_t oldfs;
558 struct timespec ts; 558 struct timespec ts;
559 559
560 oldfs = get_fs(); 560 oldfs = get_fs();
561 set_fs(KERNEL_DS); 561 set_fs(KERNEL_DS);
@@ -563,9 +563,9 @@ long compat_sys_clock_getres(clockid_t which_clock,
563 (struct timespec __user *) &ts); 563 (struct timespec __user *) &ts);
564 set_fs(oldfs); 564 set_fs(oldfs);
565 if (!err && tp && put_compat_timespec(&ts, tp)) 565 if (!err && tp && put_compat_timespec(&ts, tp))
566 return -EFAULT; 566 return -EFAULT;
567 return err; 567 return err;
568} 568}
569 569
570static long compat_clock_nanosleep_restart(struct restart_block *restart) 570static long compat_clock_nanosleep_restart(struct restart_block *restart)
571{ 571{
@@ -597,10 +597,10 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
597{ 597{
598 long err; 598 long err;
599 mm_segment_t oldfs; 599 mm_segment_t oldfs;
600 struct timespec in, out; 600 struct timespec in, out;
601 struct restart_block *restart; 601 struct restart_block *restart;
602 602
603 if (get_compat_timespec(&in, rqtp)) 603 if (get_compat_timespec(&in, rqtp))
604 return -EFAULT; 604 return -EFAULT;
605 605
606 oldfs = get_fs(); 606 oldfs = get_fs();
@@ -619,8 +619,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
619 restart->fn = compat_clock_nanosleep_restart; 619 restart->fn = compat_clock_nanosleep_restart;
620 restart->arg1 = (unsigned long) rmtp; 620 restart->arg1 = (unsigned long) rmtp;
621 } 621 }
622 return err; 622 return err;
623} 623}
624 624
625/* 625/*
626 * We currently only need the following fields from the sigevent 626 * We currently only need the following fields from the sigevent
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 38033db8d8ec..a21f71af9d81 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -150,6 +150,7 @@ static int _cpu_down(unsigned int cpu, int tasks_frozen)
150 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod, 150 err = __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_PREPARE | mod,
151 hcpu, -1, &nr_calls); 151 hcpu, -1, &nr_calls);
152 if (err == NOTIFY_BAD) { 152 if (err == NOTIFY_BAD) {
153 nr_calls--;
153 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod, 154 __raw_notifier_call_chain(&cpu_chain, CPU_DOWN_FAILED | mod,
154 hcpu, nr_calls, NULL); 155 hcpu, nr_calls, NULL);
155 printk("%s: attempt to take down CPU %u failed\n", 156 printk("%s: attempt to take down CPU %u failed\n",
@@ -233,6 +234,7 @@ static int __cpuinit _cpu_up(unsigned int cpu, int tasks_frozen)
233 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu, 234 ret = __raw_notifier_call_chain(&cpu_chain, CPU_UP_PREPARE | mod, hcpu,
234 -1, &nr_calls); 235 -1, &nr_calls);
235 if (ret == NOTIFY_BAD) { 236 if (ret == NOTIFY_BAD) {
237 nr_calls--;
236 printk("%s: attempt to bring up CPU %u failed\n", 238 printk("%s: attempt to bring up CPU %u failed\n",
237 __FUNCTION__, cpu); 239 __FUNCTION__, cpu);
238 ret = -EINVAL; 240 ret = -EINVAL;
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 2eb2e50db0d6..64950fa5d321 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -2431,12 +2431,12 @@ int __cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
2431 node = zone_to_nid(z); 2431 node = zone_to_nid(z);
2432 if (node_isset(node, current->mems_allowed)) 2432 if (node_isset(node, current->mems_allowed))
2433 return 1; 2433 return 1;
2434 /* 2434 /*
2435 * Allow tasks that have access to memory reserves because they have 2435 * Allow tasks that have access to memory reserves because they have
2436 * been OOM killed to get memory anywhere. 2436 * been OOM killed to get memory anywhere.
2437 */ 2437 */
2438 if (unlikely(test_thread_flag(TIF_MEMDIE))) 2438 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2439 return 1; 2439 return 1;
2440 return 0; 2440 return 0;
2441} 2441}
2442 2442
diff --git a/kernel/delayacct.c b/kernel/delayacct.c
index 09e9574eeb26..10e43fd8b721 100644
--- a/kernel/delayacct.c
+++ b/kernel/delayacct.c
@@ -115,6 +115,12 @@ int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
115 tmp += timespec_to_ns(&ts); 115 tmp += timespec_to_ns(&ts);
116 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; 116 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp;
117 117
118 tmp = (s64)d->cpu_scaled_run_real_total;
119 cputime_to_timespec(tsk->utimescaled + tsk->stimescaled, &ts);
120 tmp += timespec_to_ns(&ts);
121 d->cpu_scaled_run_real_total =
122 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp;
123
118 /* 124 /*
119 * No locking available for sched_info (and too expensive to add one) 125 * No locking available for sched_info (and too expensive to add one)
120 * Mitigate by taking snapshot of values 126 * Mitigate by taking snapshot of values
diff --git a/kernel/dma.c b/kernel/dma.c
index 937b13ca33ba..6a82bb716dac 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -20,7 +20,7 @@
20#include <asm/dma.h> 20#include <asm/dma.h>
21#include <asm/system.h> 21#include <asm/system.h>
22 22
23 23
24 24
25/* A note on resource allocation: 25/* A note on resource allocation:
26 * 26 *
@@ -95,7 +95,7 @@ void free_dma(unsigned int dmanr)
95 if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) { 95 if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) {
96 printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr); 96 printk(KERN_WARNING "Trying to free free DMA%d\n", dmanr);
97 return; 97 return;
98 } 98 }
99 99
100} /* free_dma */ 100} /* free_dma */
101 101
@@ -121,8 +121,8 @@ static int proc_dma_show(struct seq_file *m, void *v)
121 121
122 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { 122 for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) {
123 if (dma_chan_busy[i].lock) { 123 if (dma_chan_busy[i].lock) {
124 seq_printf(m, "%2d: %s\n", i, 124 seq_printf(m, "%2d: %s\n", i,
125 dma_chan_busy[i].device_id); 125 dma_chan_busy[i].device_id);
126 } 126 }
127 } 127 }
128 return 0; 128 return 0;
diff --git a/kernel/exec_domain.c b/kernel/exec_domain.c
index 3c2eaea66b1e..a9e6bad9f706 100644
--- a/kernel/exec_domain.c
+++ b/kernel/exec_domain.c
@@ -57,7 +57,7 @@ lookup_exec_domain(u_long personality)
57{ 57{
58 struct exec_domain * ep; 58 struct exec_domain * ep;
59 u_long pers = personality(personality); 59 u_long pers = personality(personality);
60 60
61 read_lock(&exec_domains_lock); 61 read_lock(&exec_domains_lock);
62 for (ep = exec_domains; ep; ep = ep->next) { 62 for (ep = exec_domains; ep; ep = ep->next) {
63 if (pers >= ep->pers_low && pers <= ep->pers_high) 63 if (pers >= ep->pers_low && pers <= ep->pers_high)
diff --git a/kernel/fork.c b/kernel/fork.c
index 490495a39c7e..2ce28f165e31 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -268,7 +268,7 @@ static inline int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
268 get_file(file); 268 get_file(file);
269 if (tmp->vm_flags & VM_DENYWRITE) 269 if (tmp->vm_flags & VM_DENYWRITE)
270 atomic_dec(&inode->i_writecount); 270 atomic_dec(&inode->i_writecount);
271 271
272 /* insert tmp into the share list, just after mpnt */ 272 /* insert tmp into the share list, just after mpnt */
273 spin_lock(&file->f_mapping->i_mmap_lock); 273 spin_lock(&file->f_mapping->i_mmap_lock);
274 tmp->vm_truncate_count = mpnt->vm_truncate_count; 274 tmp->vm_truncate_count = mpnt->vm_truncate_count;
@@ -331,7 +331,7 @@ static inline void mm_free_pgd(struct mm_struct * mm)
331#define mm_free_pgd(mm) 331#define mm_free_pgd(mm)
332#endif /* CONFIG_MMU */ 332#endif /* CONFIG_MMU */
333 333
334 __cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock); 334__cacheline_aligned_in_smp DEFINE_SPINLOCK(mmlist_lock);
335 335
336#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL)) 336#define allocate_mm() (kmem_cache_alloc(mm_cachep, GFP_KERNEL))
337#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm))) 337#define free_mm(mm) (kmem_cache_free(mm_cachep, (mm)))
@@ -738,8 +738,8 @@ static struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
738 /* compute the remainder to be cleared */ 738 /* compute the remainder to be cleared */
739 size = (new_fdt->max_fds - open_files) * sizeof(struct file *); 739 size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
740 740
741 /* This is long word aligned thus could use a optimized version */ 741 /* This is long word aligned thus could use a optimized version */
742 memset(new_fds, 0, size); 742 memset(new_fds, 0, size);
743 743
744 if (new_fdt->max_fds > open_files) { 744 if (new_fdt->max_fds > open_files) {
745 int left = (new_fdt->max_fds-open_files)/8; 745 int left = (new_fdt->max_fds-open_files)/8;
@@ -942,6 +942,7 @@ static inline void copy_flags(unsigned long clone_flags, struct task_struct *p)
942 if (!(clone_flags & CLONE_PTRACE)) 942 if (!(clone_flags & CLONE_PTRACE))
943 p->ptrace = 0; 943 p->ptrace = 0;
944 p->flags = new_flags; 944 p->flags = new_flags;
945 clear_freeze_flag(p);
945} 946}
946 947
947asmlinkage long sys_set_tid_address(int __user *tidptr) 948asmlinkage long sys_set_tid_address(int __user *tidptr)
@@ -1058,6 +1059,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1058 p->utime = cputime_zero; 1059 p->utime = cputime_zero;
1059 p->stime = cputime_zero; 1060 p->stime = cputime_zero;
1060 p->gtime = cputime_zero; 1061 p->gtime = cputime_zero;
1062 p->utimescaled = cputime_zero;
1063 p->stimescaled = cputime_zero;
1061 1064
1062#ifdef CONFIG_TASK_XACCT 1065#ifdef CONFIG_TASK_XACCT
1063 p->rchar = 0; /* I/O counter: bytes read */ 1066 p->rchar = 0; /* I/O counter: bytes read */
@@ -1068,12 +1071,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1068 task_io_accounting_init(p); 1071 task_io_accounting_init(p);
1069 acct_clear_integrals(p); 1072 acct_clear_integrals(p);
1070 1073
1071 p->it_virt_expires = cputime_zero; 1074 p->it_virt_expires = cputime_zero;
1072 p->it_prof_expires = cputime_zero; 1075 p->it_prof_expires = cputime_zero;
1073 p->it_sched_expires = 0; 1076 p->it_sched_expires = 0;
1074 INIT_LIST_HEAD(&p->cpu_timers[0]); 1077 INIT_LIST_HEAD(&p->cpu_timers[0]);
1075 INIT_LIST_HEAD(&p->cpu_timers[1]); 1078 INIT_LIST_HEAD(&p->cpu_timers[1]);
1076 INIT_LIST_HEAD(&p->cpu_timers[2]); 1079 INIT_LIST_HEAD(&p->cpu_timers[2]);
1077 1080
1078 p->lock_depth = -1; /* -1 = no lock */ 1081 p->lock_depth = -1; /* -1 = no lock */
1079 do_posix_clock_monotonic_gettime(&p->start_time); 1082 do_posix_clock_monotonic_gettime(&p->start_time);
@@ -1083,7 +1086,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1083 p->security = NULL; 1086 p->security = NULL;
1084#endif 1087#endif
1085 p->io_context = NULL; 1088 p->io_context = NULL;
1086 p->io_wait = NULL;
1087 p->audit_context = NULL; 1089 p->audit_context = NULL;
1088 cpuset_fork(p); 1090 cpuset_fork(p);
1089#ifdef CONFIG_NUMA 1091#ifdef CONFIG_NUMA
@@ -1239,7 +1241,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1239 * A fatal signal pending means that current will exit, so the new 1241 * A fatal signal pending means that current will exit, so the new
1240 * thread can't slip out of an OOM kill (or normal SIGKILL). 1242 * thread can't slip out of an OOM kill (or normal SIGKILL).
1241 */ 1243 */
1242 recalc_sigpending(); 1244 recalc_sigpending();
1243 if (signal_pending(current)) { 1245 if (signal_pending(current)) {
1244 spin_unlock(&current->sighand->siglock); 1246 spin_unlock(&current->sighand->siglock);
1245 write_unlock_irq(&tasklist_lock); 1247 write_unlock_irq(&tasklist_lock);
diff --git a/kernel/futex.c b/kernel/futex.c
index d725676d84f3..e45a65e41686 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -293,7 +293,7 @@ EXPORT_SYMBOL_GPL(get_futex_key_refs);
293 */ 293 */
294void drop_futex_key_refs(union futex_key *key) 294void drop_futex_key_refs(union futex_key *key)
295{ 295{
296 if (key->both.ptr == 0) 296 if (!key->both.ptr)
297 return; 297 return;
298 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) { 298 switch (key->both.offset & (FUT_OFF_INODE|FUT_OFF_MMSHARED)) {
299 case FUT_OFF_INODE: 299 case FUT_OFF_INODE:
@@ -1046,7 +1046,7 @@ static int unqueue_me(struct futex_q *q)
1046 retry: 1046 retry:
1047 lock_ptr = q->lock_ptr; 1047 lock_ptr = q->lock_ptr;
1048 barrier(); 1048 barrier();
1049 if (lock_ptr != 0) { 1049 if (lock_ptr != NULL) {
1050 spin_lock(lock_ptr); 1050 spin_lock(lock_ptr);
1051 /* 1051 /*
1052 * q->lock_ptr can change between reading it and 1052 * q->lock_ptr can change between reading it and
diff --git a/kernel/itimer.c b/kernel/itimer.c
index 3205e8e114fa..2fab344dbf56 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -130,7 +130,7 @@ asmlinkage long sys_getitimer(int which, struct itimerval __user *value)
130enum hrtimer_restart it_real_fn(struct hrtimer *timer) 130enum hrtimer_restart it_real_fn(struct hrtimer *timer)
131{ 131{
132 struct signal_struct *sig = 132 struct signal_struct *sig =
133 container_of(timer, struct signal_struct, real_timer); 133 container_of(timer, struct signal_struct, real_timer);
134 134
135 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk); 135 send_group_sig_info(SIGALRM, SEND_SIG_PRIV, sig->tsk);
136 136
@@ -291,6 +291,6 @@ asmlinkage long sys_setitimer(int which,
291 return error; 291 return error;
292 292
293 if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer))) 293 if (copy_to_user(ovalue, &get_buffer, sizeof(get_buffer)))
294 return -EFAULT; 294 return -EFAULT;
295 return 0; 295 return 0;
296} 296}
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 7885269b0da2..e9f1b4ea504d 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -785,7 +785,7 @@ static int kimage_load_normal_segment(struct kimage *image,
785 size_t uchunk, mchunk; 785 size_t uchunk, mchunk;
786 786
787 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr); 787 page = kimage_alloc_page(image, GFP_HIGHUSER, maddr);
788 if (page == 0) { 788 if (!page) {
789 result = -ENOMEM; 789 result = -ENOMEM;
790 goto out; 790 goto out;
791 } 791 }
@@ -844,7 +844,7 @@ static int kimage_load_crash_segment(struct kimage *image,
844 size_t uchunk, mchunk; 844 size_t uchunk, mchunk;
845 845
846 page = pfn_to_page(maddr >> PAGE_SHIFT); 846 page = pfn_to_page(maddr >> PAGE_SHIFT);
847 if (page == 0) { 847 if (!page) {
848 result = -ENOMEM; 848 result = -ENOMEM;
849 goto out; 849 goto out;
850 } 850 }
diff --git a/kernel/module.c b/kernel/module.c
index a389b423c279..7734595bd329 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -105,7 +105,7 @@ void __module_put_and_exit(struct module *mod, long code)
105 do_exit(code); 105 do_exit(code);
106} 106}
107EXPORT_SYMBOL(__module_put_and_exit); 107EXPORT_SYMBOL(__module_put_and_exit);
108 108
109/* Find a module section: 0 means not found. */ 109/* Find a module section: 0 means not found. */
110static unsigned int find_sec(Elf_Ehdr *hdr, 110static unsigned int find_sec(Elf_Ehdr *hdr,
111 Elf_Shdr *sechdrs, 111 Elf_Shdr *sechdrs,
@@ -179,7 +179,7 @@ static unsigned long __find_symbol(const char *name,
179 struct module *mod; 179 struct module *mod;
180 const struct kernel_symbol *ks; 180 const struct kernel_symbol *ks;
181 181
182 /* Core kernel first. */ 182 /* Core kernel first. */
183 *owner = NULL; 183 *owner = NULL;
184 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab); 184 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
185 if (ks) { 185 if (ks) {
@@ -231,7 +231,7 @@ static unsigned long __find_symbol(const char *name,
231 return ks->value; 231 return ks->value;
232 } 232 }
233 233
234 /* Now try modules. */ 234 /* Now try modules. */
235 list_for_each_entry(mod, &modules, list) { 235 list_for_each_entry(mod, &modules, list) {
236 *owner = mod; 236 *owner = mod;
237 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms); 237 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
@@ -285,7 +285,7 @@ static unsigned long __find_symbol(const char *name,
285 } 285 }
286 } 286 }
287 DEBUGP("Failed to find symbol %s\n", name); 287 DEBUGP("Failed to find symbol %s\n", name);
288 return 0; 288 return 0;
289} 289}
290 290
291/* Search for module by name: must hold module_mutex. */ 291/* Search for module by name: must hold module_mutex. */
@@ -441,7 +441,7 @@ static int percpu_modinit(void)
441 } 441 }
442 442
443 return 0; 443 return 0;
444} 444}
445__initcall(percpu_modinit); 445__initcall(percpu_modinit);
446#else /* ... !CONFIG_SMP */ 446#else /* ... !CONFIG_SMP */
447static inline void *percpu_modalloc(unsigned long size, unsigned long align, 447static inline void *percpu_modalloc(unsigned long size, unsigned long align,
@@ -483,8 +483,8 @@ static int modinfo_##field##_exists(struct module *mod) \
483} \ 483} \
484static void free_modinfo_##field(struct module *mod) \ 484static void free_modinfo_##field(struct module *mod) \
485{ \ 485{ \
486 kfree(mod->field); \ 486 kfree(mod->field); \
487 mod->field = NULL; \ 487 mod->field = NULL; \
488} \ 488} \
489static struct module_attribute modinfo_##field = { \ 489static struct module_attribute modinfo_##field = { \
490 .attr = { .name = __stringify(field), .mode = 0444 }, \ 490 .attr = { .name = __stringify(field), .mode = 0444 }, \
@@ -990,7 +990,7 @@ static void add_sect_attrs(struct module *mod, unsigned int nsect,
990 struct module_sect_attrs *sect_attrs; 990 struct module_sect_attrs *sect_attrs;
991 struct module_sect_attr *sattr; 991 struct module_sect_attr *sattr;
992 struct attribute **gattr; 992 struct attribute **gattr;
993 993
994 /* Count loaded sections and allocate structures */ 994 /* Count loaded sections and allocate structures */
995 for (i = 0; i < nsect; i++) 995 for (i = 0; i < nsect; i++)
996 if (sechdrs[i].sh_flags & SHF_ALLOC) 996 if (sechdrs[i].sh_flags & SHF_ALLOC)
@@ -1348,14 +1348,14 @@ static int verify_export_symbols(struct module *mod)
1348 const unsigned long *crc; 1348 const unsigned long *crc;
1349 1349
1350 for (i = 0; i < mod->num_syms; i++) 1350 for (i = 0; i < mod->num_syms; i++)
1351 if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) { 1351 if (__find_symbol(mod->syms[i].name, &owner, &crc, 1)) {
1352 name = mod->syms[i].name; 1352 name = mod->syms[i].name;
1353 ret = -ENOEXEC; 1353 ret = -ENOEXEC;
1354 goto dup; 1354 goto dup;
1355 } 1355 }
1356 1356
1357 for (i = 0; i < mod->num_gpl_syms; i++) 1357 for (i = 0; i < mod->num_gpl_syms; i++)
1358 if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) { 1358 if (__find_symbol(mod->gpl_syms[i].name, &owner, &crc, 1)) {
1359 name = mod->gpl_syms[i].name; 1359 name = mod->gpl_syms[i].name;
1360 ret = -ENOEXEC; 1360 ret = -ENOEXEC;
1361 goto dup; 1361 goto dup;
@@ -1929,7 +1929,7 @@ static struct module *load_module(void __user *umod,
1929 mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr; 1929 mod->unused_crcs = (void *)sechdrs[unusedgplcrcindex].sh_addr;
1930 1930
1931#ifdef CONFIG_MODVERSIONS 1931#ifdef CONFIG_MODVERSIONS
1932 if ((mod->num_syms && !crcindex) || 1932 if ((mod->num_syms && !crcindex) ||
1933 (mod->num_gpl_syms && !gplcrcindex) || 1933 (mod->num_gpl_syms && !gplcrcindex) ||
1934 (mod->num_gpl_future_syms && !gplfuturecrcindex) || 1934 (mod->num_gpl_future_syms && !gplfuturecrcindex) ||
1935 (mod->num_unused_syms && !unusedcrcindex) || 1935 (mod->num_unused_syms && !unusedcrcindex) ||
@@ -2016,7 +2016,7 @@ static struct module *load_module(void __user *umod,
2016 if (err < 0) 2016 if (err < 0)
2017 goto arch_cleanup; 2017 goto arch_cleanup;
2018 2018
2019 err = mod_sysfs_setup(mod, 2019 err = mod_sysfs_setup(mod,
2020 (struct kernel_param *) 2020 (struct kernel_param *)
2021 sechdrs[setupindex].sh_addr, 2021 sechdrs[setupindex].sh_addr,
2022 sechdrs[setupindex].sh_size 2022 sechdrs[setupindex].sh_size
@@ -2028,8 +2028,8 @@ static struct module *load_module(void __user *umod,
2028 2028
2029 /* Size of section 0 is 0, so this works well if no unwind info. */ 2029 /* Size of section 0 is 0, so this works well if no unwind info. */
2030 mod->unwind_info = unwind_add_table(mod, 2030 mod->unwind_info = unwind_add_table(mod,
2031 (void *)sechdrs[unwindex].sh_addr, 2031 (void *)sechdrs[unwindex].sh_addr,
2032 sechdrs[unwindex].sh_size); 2032 sechdrs[unwindex].sh_size);
2033 2033
2034 /* Get rid of temporary copy */ 2034 /* Get rid of temporary copy */
2035 vfree(hdr); 2035 vfree(hdr);
@@ -2146,7 +2146,7 @@ static inline int within(unsigned long addr, void *start, unsigned long size)
2146 */ 2146 */
2147static inline int is_arm_mapping_symbol(const char *str) 2147static inline int is_arm_mapping_symbol(const char *str)
2148{ 2148{
2149 return str[0] == '$' && strchr("atd", str[1]) 2149 return str[0] == '$' && strchr("atd", str[1])
2150 && (str[2] == '\0' || str[2] == '.'); 2150 && (str[2] == '\0' || str[2] == '.');
2151} 2151}
2152 2152
@@ -2161,11 +2161,11 @@ static const char *get_ksymbol(struct module *mod,
2161 /* At worse, next value is at end of module */ 2161 /* At worse, next value is at end of module */
2162 if (within(addr, mod->module_init, mod->init_size)) 2162 if (within(addr, mod->module_init, mod->init_size))
2163 nextval = (unsigned long)mod->module_init+mod->init_text_size; 2163 nextval = (unsigned long)mod->module_init+mod->init_text_size;
2164 else 2164 else
2165 nextval = (unsigned long)mod->module_core+mod->core_text_size; 2165 nextval = (unsigned long)mod->module_core+mod->core_text_size;
2166 2166
2167 /* Scan for closest preceeding symbol, and next symbol. (ELF 2167 /* Scan for closest preceeding symbol, and next symbol. (ELF
2168 starts real symbols at 1). */ 2168 starts real symbols at 1). */
2169 for (i = 1; i < mod->num_symtab; i++) { 2169 for (i = 1; i < mod->num_symtab; i++) {
2170 if (mod->symtab[i].st_shndx == SHN_UNDEF) 2170 if (mod->symtab[i].st_shndx == SHN_UNDEF)
2171 continue; 2171 continue;
@@ -2407,7 +2407,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
2407 list_for_each_entry(mod, &modules, list) { 2407 list_for_each_entry(mod, &modules, list) {
2408 if (mod->num_exentries == 0) 2408 if (mod->num_exentries == 0)
2409 continue; 2409 continue;
2410 2410
2411 e = search_extable(mod->extable, 2411 e = search_extable(mod->extable,
2412 mod->extable + mod->num_exentries - 1, 2412 mod->extable + mod->num_exentries - 1,
2413 addr); 2413 addr);
@@ -2417,7 +2417,7 @@ const struct exception_table_entry *search_module_extables(unsigned long addr)
2417 preempt_enable(); 2417 preempt_enable();
2418 2418
2419 /* Now, if we found one, we are running inside it now, hence 2419 /* Now, if we found one, we are running inside it now, hence
2420 we cannot unload the module, hence no refcnt needed. */ 2420 we cannot unload the module, hence no refcnt needed. */
2421 return e; 2421 return e;
2422} 2422}
2423 2423
diff --git a/kernel/panic.c b/kernel/panic.c
index f64f4c1ac11f..3886bd8230fe 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -56,14 +56,14 @@ EXPORT_SYMBOL(panic_blink);
56 * 56 *
57 * This function never returns. 57 * This function never returns.
58 */ 58 */
59 59
60NORET_TYPE void panic(const char * fmt, ...) 60NORET_TYPE void panic(const char * fmt, ...)
61{ 61{
62 long i; 62 long i;
63 static char buf[1024]; 63 static char buf[1024];
64 va_list args; 64 va_list args;
65#if defined(CONFIG_S390) 65#if defined(CONFIG_S390)
66 unsigned long caller = (unsigned long) __builtin_return_address(0); 66 unsigned long caller = (unsigned long) __builtin_return_address(0);
67#endif 67#endif
68 68
69 /* 69 /*
@@ -128,7 +128,7 @@ NORET_TYPE void panic(const char * fmt, ...)
128 } 128 }
129#endif 129#endif
130#if defined(CONFIG_S390) 130#if defined(CONFIG_S390)
131 disabled_wait(caller); 131 disabled_wait(caller);
132#endif 132#endif
133 local_irq_enable(); 133 local_irq_enable();
134 for (i = 0;;) { 134 for (i = 0;;) {
@@ -154,7 +154,7 @@ EXPORT_SYMBOL(panic);
154 * 154 *
155 * The string is overwritten by the next call to print_taint(). 155 * The string is overwritten by the next call to print_taint().
156 */ 156 */
157 157
158const char *print_tainted(void) 158const char *print_tainted(void)
159{ 159{
160 static char buf[20]; 160 static char buf[20];
@@ -164,7 +164,7 @@ const char *print_tainted(void)
164 tainted & TAINT_FORCED_MODULE ? 'F' : ' ', 164 tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
165 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', 165 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
166 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', 166 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
167 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', 167 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
168 tainted & TAINT_BAD_PAGE ? 'B' : ' ', 168 tainted & TAINT_BAD_PAGE ? 'B' : ' ',
169 tainted & TAINT_USER ? 'U' : ' ', 169 tainted & TAINT_USER ? 'U' : ' ',
170 tainted & TAINT_DIE ? 'D' : ' '); 170 tainted & TAINT_DIE ? 'D' : ' ');
diff --git a/kernel/params.c b/kernel/params.c
index 1d6aca288cdc..16f269e9ddc9 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -592,11 +592,17 @@ static void __init param_sysfs_builtin(void)
592 592
593 for (i=0; i < __stop___param - __start___param; i++) { 593 for (i=0; i < __stop___param - __start___param; i++) {
594 char *dot; 594 char *dot;
595 size_t kplen;
595 596
596 kp = &__start___param[i]; 597 kp = &__start___param[i];
598 kplen = strlen(kp->name);
597 599
598 /* We do not handle args without periods. */ 600 /* We do not handle args without periods. */
599 dot = memchr(kp->name, '.', MAX_KBUILD_MODNAME); 601 if (kplen > MAX_KBUILD_MODNAME) {
602 DEBUGP("kernel parameter name is too long: %s\n", kp->name);
603 continue;
604 }
605 dot = memchr(kp->name, '.', kplen);
600 if (!dot) { 606 if (!dot) {
601 DEBUGP("couldn't find period in %s\n", kp->name); 607 DEBUGP("couldn't find period in %s\n", kp->name);
602 continue; 608 continue;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index 14b0e10dc95c..8e186c678149 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -44,17 +44,6 @@ config PM_VERBOSE
44 ---help--- 44 ---help---
45 This option enables verbose messages from the Power Management code. 45 This option enables verbose messages from the Power Management code.
46 46
47config DISABLE_CONSOLE_SUSPEND
48 bool "Keep console(s) enabled during suspend/resume (DANGEROUS)"
49 depends on PM_DEBUG && PM_SLEEP
50 default n
51 ---help---
52 This option turns off the console suspend mechanism that prevents
53 debug messages from reaching the console during the suspend/resume
54 operations. This may be helpful when debugging device drivers'
55 suspend/resume routines, but may itself lead to problems, for example
56 if netconsole is used.
57
58config PM_TRACE 47config PM_TRACE
59 bool "Suspend/resume event tracing" 48 bool "Suspend/resume event tracing"
60 depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL 49 depends on PM_DEBUG && X86 && PM_SLEEP && EXPERIMENTAL
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index eb72255b5c86..8b15f777010a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -45,17 +45,18 @@ enum {
45 45
46static int hibernation_mode = HIBERNATION_SHUTDOWN; 46static int hibernation_mode = HIBERNATION_SHUTDOWN;
47 47
48static struct hibernation_ops *hibernation_ops; 48static struct platform_hibernation_ops *hibernation_ops;
49 49
50/** 50/**
51 * hibernation_set_ops - set the global hibernate operations 51 * hibernation_set_ops - set the global hibernate operations
52 * @ops: the hibernation operations to use in subsequent hibernation transitions 52 * @ops: the hibernation operations to use in subsequent hibernation transitions
53 */ 53 */
54 54
55void hibernation_set_ops(struct hibernation_ops *ops) 55void hibernation_set_ops(struct platform_hibernation_ops *ops)
56{ 56{
57 if (ops && !(ops->prepare && ops->enter && ops->finish 57 if (ops && !(ops->start && ops->pre_snapshot && ops->finish
58 && ops->pre_restore && ops->restore_cleanup)) { 58 && ops->prepare && ops->enter && ops->pre_restore
59 && ops->restore_cleanup)) {
59 WARN_ON(1); 60 WARN_ON(1);
60 return; 61 return;
61 } 62 }
@@ -69,16 +70,37 @@ void hibernation_set_ops(struct hibernation_ops *ops)
69 mutex_unlock(&pm_mutex); 70 mutex_unlock(&pm_mutex);
70} 71}
71 72
73/**
74 * platform_start - tell the platform driver that we're starting
75 * hibernation
76 */
77
78static int platform_start(int platform_mode)
79{
80 return (platform_mode && hibernation_ops) ?
81 hibernation_ops->start() : 0;
82}
72 83
73/** 84/**
74 * platform_prepare - prepare the machine for hibernation using the 85 * platform_pre_snapshot - prepare the machine for hibernation using the
75 * platform driver if so configured and return an error code if it fails 86 * platform driver if so configured and return an error code if it fails
76 */ 87 */
77 88
78static int platform_prepare(int platform_mode) 89static int platform_pre_snapshot(int platform_mode)
79{ 90{
80 return (platform_mode && hibernation_ops) ? 91 return (platform_mode && hibernation_ops) ?
81 hibernation_ops->prepare() : 0; 92 hibernation_ops->pre_snapshot() : 0;
93}
94
95/**
96 * platform_leave - prepare the machine for switching to the normal mode
97 * of operation using the platform driver (called with interrupts disabled)
98 */
99
100static void platform_leave(int platform_mode)
101{
102 if (platform_mode && hibernation_ops)
103 hibernation_ops->leave();
82} 104}
83 105
84/** 106/**
@@ -118,6 +140,51 @@ static void platform_restore_cleanup(int platform_mode)
118} 140}
119 141
120/** 142/**
143 * create_image - freeze devices that need to be frozen with interrupts
144 * off, create the hibernation image and thaw those devices. Control
145 * reappears in this routine after a restore.
146 */
147
148int create_image(int platform_mode)
149{
150 int error;
151
152 error = arch_prepare_suspend();
153 if (error)
154 return error;
155
156 local_irq_disable();
157 /* At this point, device_suspend() has been called, but *not*
158 * device_power_down(). We *must* call device_power_down() now.
159 * Otherwise, drivers for some devices (e.g. interrupt controllers)
160 * become desynchronized with the actual state of the hardware
161 * at resume time, and evil weirdness ensues.
162 */
163 error = device_power_down(PMSG_FREEZE);
164 if (error) {
165 printk(KERN_ERR "Some devices failed to power down, "
166 KERN_ERR "aborting suspend\n");
167 goto Enable_irqs;
168 }
169
170 save_processor_state();
171 error = swsusp_arch_suspend();
172 if (error)
173 printk(KERN_ERR "Error %d while creating the image\n", error);
174 /* Restore control flow magically appears here */
175 restore_processor_state();
176 if (!in_suspend)
177 platform_leave(platform_mode);
178 /* NOTE: device_power_up() is just a resume() for devices
179 * that suspended with irqs off ... no overall powerup.
180 */
181 device_power_up();
182 Enable_irqs:
183 local_irq_enable();
184 return error;
185}
186
187/**
121 * hibernation_snapshot - quiesce devices and create the hibernation 188 * hibernation_snapshot - quiesce devices and create the hibernation
122 * snapshot image. 189 * snapshot image.
123 * @platform_mode - if set, use the platform driver, if available, to 190 * @platform_mode - if set, use the platform driver, if available, to
@@ -135,12 +202,16 @@ int hibernation_snapshot(int platform_mode)
135 if (error) 202 if (error)
136 return error; 203 return error;
137 204
205 error = platform_start(platform_mode);
206 if (error)
207 return error;
208
138 suspend_console(); 209 suspend_console();
139 error = device_suspend(PMSG_FREEZE); 210 error = device_suspend(PMSG_FREEZE);
140 if (error) 211 if (error)
141 goto Resume_console; 212 goto Resume_console;
142 213
143 error = platform_prepare(platform_mode); 214 error = platform_pre_snapshot(platform_mode);
144 if (error) 215 if (error)
145 goto Resume_devices; 216 goto Resume_devices;
146 217
@@ -148,7 +219,7 @@ int hibernation_snapshot(int platform_mode)
148 if (!error) { 219 if (!error) {
149 if (hibernation_mode != HIBERNATION_TEST) { 220 if (hibernation_mode != HIBERNATION_TEST) {
150 in_suspend = 1; 221 in_suspend = 1;
151 error = swsusp_suspend(); 222 error = create_image(platform_mode);
152 /* Control returns here after successful restore */ 223 /* Control returns here after successful restore */
153 } else { 224 } else {
154 printk("swsusp debug: Waiting for 5 seconds.\n"); 225 printk("swsusp debug: Waiting for 5 seconds.\n");
@@ -207,21 +278,50 @@ int hibernation_platform_enter(void)
207{ 278{
208 int error; 279 int error;
209 280
210 if (hibernation_ops) { 281 if (!hibernation_ops)
211 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); 282 return -ENOSYS;
212 /* 283
213 * We have cancelled the power transition by running 284 /*
214 * hibernation_ops->finish() before saving the image, so we 285 * We have cancelled the power transition by running
215 * should let the firmware know that we're going to enter the 286 * hibernation_ops->finish() before saving the image, so we should let
216 * sleep state after all 287 * the firmware know that we're going to enter the sleep state after all
217 */ 288 */
218 error = hibernation_ops->prepare(); 289 error = hibernation_ops->start();
219 sysdev_shutdown(); 290 if (error)
220 if (!error) 291 return error;
221 error = hibernation_ops->enter(); 292
222 } else { 293 suspend_console();
223 error = -ENOSYS; 294 error = device_suspend(PMSG_SUSPEND);
295 if (error)
296 goto Resume_console;
297
298 error = hibernation_ops->prepare();
299 if (error)
300 goto Resume_devices;
301
302 error = disable_nonboot_cpus();
303 if (error)
304 goto Finish;
305
306 local_irq_disable();
307 error = device_power_down(PMSG_SUSPEND);
308 if (!error) {
309 hibernation_ops->enter();
310 /* We should never get here */
311 while (1);
224 } 312 }
313 local_irq_enable();
314
315 /*
316 * We don't need to reenable the nonboot CPUs or resume consoles, since
317 * the system is going to be halted anyway.
318 */
319 Finish:
320 hibernation_ops->finish();
321 Resume_devices:
322 device_resume();
323 Resume_console:
324 resume_console();
225 return error; 325 return error;
226} 326}
227 327
@@ -238,14 +338,14 @@ static void power_down(void)
238 case HIBERNATION_TEST: 338 case HIBERNATION_TEST:
239 case HIBERNATION_TESTPROC: 339 case HIBERNATION_TESTPROC:
240 break; 340 break;
241 case HIBERNATION_SHUTDOWN:
242 kernel_power_off();
243 break;
244 case HIBERNATION_REBOOT: 341 case HIBERNATION_REBOOT:
245 kernel_restart(NULL); 342 kernel_restart(NULL);
246 break; 343 break;
247 case HIBERNATION_PLATFORM: 344 case HIBERNATION_PLATFORM:
248 hibernation_platform_enter(); 345 hibernation_platform_enter();
346 case HIBERNATION_SHUTDOWN:
347 kernel_power_off();
348 break;
249 } 349 }
250 kernel_halt(); 350 kernel_halt();
251 /* 351 /*
@@ -298,6 +398,10 @@ int hibernate(void)
298 if (error) 398 if (error)
299 goto Exit; 399 goto Exit;
300 400
401 printk("Syncing filesystems ... ");
402 sys_sync();
403 printk("done.\n");
404
301 error = prepare_processes(); 405 error = prepare_processes();
302 if (error) 406 if (error)
303 goto Finish; 407 goto Finish;
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 350b485b3b60..3cdf95b1dc92 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -20,6 +20,7 @@
20#include <linux/resume-trace.h> 20#include <linux/resume-trace.h>
21#include <linux/freezer.h> 21#include <linux/freezer.h>
22#include <linux/vmstat.h> 22#include <linux/vmstat.h>
23#include <linux/syscalls.h>
23 24
24#include "power.h" 25#include "power.h"
25 26
@@ -32,39 +33,32 @@ DEFINE_MUTEX(pm_mutex);
32/* This is just an arbitrary number */ 33/* This is just an arbitrary number */
33#define FREE_PAGE_NUMBER (100) 34#define FREE_PAGE_NUMBER (100)
34 35
35struct pm_ops *pm_ops; 36static struct platform_suspend_ops *suspend_ops;
36 37
37/** 38/**
38 * pm_set_ops - Set the global power method table. 39 * suspend_set_ops - Set the global suspend method table.
39 * @ops: Pointer to ops structure. 40 * @ops: Pointer to ops structure.
40 */ 41 */
41 42
42void pm_set_ops(struct pm_ops * ops) 43void suspend_set_ops(struct platform_suspend_ops *ops)
43{ 44{
44 mutex_lock(&pm_mutex); 45 mutex_lock(&pm_mutex);
45 pm_ops = ops; 46 suspend_ops = ops;
46 mutex_unlock(&pm_mutex); 47 mutex_unlock(&pm_mutex);
47} 48}
48 49
49/** 50/**
50 * pm_valid_only_mem - generic memory-only valid callback 51 * suspend_valid_only_mem - generic memory-only valid callback
51 * 52 *
52 * pm_ops drivers that implement mem suspend only and only need 53 * Platform drivers that implement mem suspend only and only need
53 * to check for that in their .valid callback can use this instead 54 * to check for that in their .valid callback can use this instead
54 * of rolling their own .valid callback. 55 * of rolling their own .valid callback.
55 */ 56 */
56int pm_valid_only_mem(suspend_state_t state) 57int suspend_valid_only_mem(suspend_state_t state)
57{ 58{
58 return state == PM_SUSPEND_MEM; 59 return state == PM_SUSPEND_MEM;
59} 60}
60 61
61
62static inline void pm_finish(suspend_state_t state)
63{
64 if (pm_ops->finish)
65 pm_ops->finish(state);
66}
67
68/** 62/**
69 * suspend_prepare - Do prep work before entering low-power state. 63 * suspend_prepare - Do prep work before entering low-power state.
70 * 64 *
@@ -76,7 +70,7 @@ static int suspend_prepare(void)
76 int error; 70 int error;
77 unsigned int free_pages; 71 unsigned int free_pages;
78 72
79 if (!pm_ops || !pm_ops->enter) 73 if (!suspend_ops || !suspend_ops->enter)
80 return -EPERM; 74 return -EPERM;
81 75
82 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE); 76 error = pm_notifier_call_chain(PM_SUSPEND_PREPARE);
@@ -128,7 +122,7 @@ void __attribute__ ((weak)) arch_suspend_enable_irqs(void)
128 * 122 *
129 * This function should be called after devices have been suspended. 123 * This function should be called after devices have been suspended.
130 */ 124 */
131int suspend_enter(suspend_state_t state) 125static int suspend_enter(suspend_state_t state)
132{ 126{
133 int error = 0; 127 int error = 0;
134 128
@@ -139,7 +133,7 @@ int suspend_enter(suspend_state_t state)
139 printk(KERN_ERR "Some devices failed to power down\n"); 133 printk(KERN_ERR "Some devices failed to power down\n");
140 goto Done; 134 goto Done;
141 } 135 }
142 error = pm_ops->enter(state); 136 error = suspend_ops->enter(state);
143 device_power_up(); 137 device_power_up();
144 Done: 138 Done:
145 arch_suspend_enable_irqs(); 139 arch_suspend_enable_irqs();
@@ -156,11 +150,11 @@ int suspend_devices_and_enter(suspend_state_t state)
156{ 150{
157 int error; 151 int error;
158 152
159 if (!pm_ops) 153 if (!suspend_ops)
160 return -ENOSYS; 154 return -ENOSYS;
161 155
162 if (pm_ops->set_target) { 156 if (suspend_ops->set_target) {
163 error = pm_ops->set_target(state); 157 error = suspend_ops->set_target(state);
164 if (error) 158 if (error)
165 return error; 159 return error;
166 } 160 }
@@ -170,8 +164,8 @@ int suspend_devices_and_enter(suspend_state_t state)
170 printk(KERN_ERR "Some devices failed to suspend\n"); 164 printk(KERN_ERR "Some devices failed to suspend\n");
171 goto Resume_console; 165 goto Resume_console;
172 } 166 }
173 if (pm_ops->prepare) { 167 if (suspend_ops->prepare) {
174 error = pm_ops->prepare(state); 168 error = suspend_ops->prepare();
175 if (error) 169 if (error)
176 goto Resume_devices; 170 goto Resume_devices;
177 } 171 }
@@ -180,7 +174,8 @@ int suspend_devices_and_enter(suspend_state_t state)
180 suspend_enter(state); 174 suspend_enter(state);
181 175
182 enable_nonboot_cpus(); 176 enable_nonboot_cpus();
183 pm_finish(state); 177 if (suspend_ops->finish)
178 suspend_ops->finish();
184 Resume_devices: 179 Resume_devices:
185 device_resume(); 180 device_resume();
186 Resume_console: 181 Resume_console:
@@ -214,7 +209,7 @@ static inline int valid_state(suspend_state_t state)
214 /* All states need lowlevel support and need to be valid 209 /* All states need lowlevel support and need to be valid
215 * to the lowlevel implementation, no valid callback 210 * to the lowlevel implementation, no valid callback
216 * implies that none are valid. */ 211 * implies that none are valid. */
217 if (!pm_ops || !pm_ops->valid || !pm_ops->valid(state)) 212 if (!suspend_ops || !suspend_ops->valid || !suspend_ops->valid(state))
218 return 0; 213 return 0;
219 return 1; 214 return 1;
220} 215}
@@ -236,9 +231,14 @@ static int enter_state(suspend_state_t state)
236 231
237 if (!valid_state(state)) 232 if (!valid_state(state))
238 return -ENODEV; 233 return -ENODEV;
234
239 if (!mutex_trylock(&pm_mutex)) 235 if (!mutex_trylock(&pm_mutex))
240 return -EBUSY; 236 return -EBUSY;
241 237
238 printk("Syncing filesystems ... ");
239 sys_sync();
240 printk("done.\n");
241
242 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]); 242 pr_debug("PM: Preparing system for %s sleep\n", pm_states[state]);
243 if ((error = suspend_prepare())) 243 if ((error = suspend_prepare()))
244 goto Unlock; 244 goto Unlock;
diff --git a/kernel/power/power.h b/kernel/power/power.h
index 95fbf2dd3fe3..195dc4611764 100644
--- a/kernel/power/power.h
+++ b/kernel/power/power.h
@@ -11,14 +11,32 @@ struct swsusp_info {
11 unsigned long size; 11 unsigned long size;
12} __attribute__((aligned(PAGE_SIZE))); 12} __attribute__((aligned(PAGE_SIZE)));
13 13
14#ifdef CONFIG_HIBERNATION
15#ifdef CONFIG_ARCH_HIBERNATION_HEADER
16/* Maximum size of architecture specific data in a hibernation header */
17#define MAX_ARCH_HEADER_SIZE (sizeof(struct new_utsname) + 4)
14 18
19extern int arch_hibernation_header_save(void *addr, unsigned int max_size);
20extern int arch_hibernation_header_restore(void *addr);
21
22static inline int init_header_complete(struct swsusp_info *info)
23{
24 return arch_hibernation_header_save(info, MAX_ARCH_HEADER_SIZE);
25}
26
27static inline char *check_image_kernel(struct swsusp_info *info)
28{
29 return arch_hibernation_header_restore(info) ?
30 "architecture specific data" : NULL;
31}
32#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
15 33
16#ifdef CONFIG_HIBERNATION
17/* 34/*
18 * Keep some memory free so that I/O operations can succeed without paging 35 * Keep some memory free so that I/O operations can succeed without paging
19 * [Might this be more than 4 MB?] 36 * [Might this be more than 4 MB?]
20 */ 37 */
21#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT) 38#define PAGES_FOR_IO ((4096 * 1024) >> PAGE_SHIFT)
39
22/* 40/*
23 * Keep 1 MB of memory free so that device drivers can allocate some pages in 41 * Keep 1 MB of memory free so that device drivers can allocate some pages in
24 * their .suspend() routines without breaking the suspend to disk. 42 * their .suspend() routines without breaking the suspend to disk.
@@ -165,7 +183,6 @@ extern int swsusp_swap_in_use(void);
165extern int swsusp_check(void); 183extern int swsusp_check(void);
166extern int swsusp_shrink_memory(void); 184extern int swsusp_shrink_memory(void);
167extern void swsusp_free(void); 185extern void swsusp_free(void);
168extern int swsusp_suspend(void);
169extern int swsusp_resume(void); 186extern int swsusp_resume(void);
170extern int swsusp_read(unsigned int *flags_p); 187extern int swsusp_read(unsigned int *flags_p);
171extern int swsusp_write(unsigned int flags); 188extern int swsusp_write(unsigned int flags);
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 3434940a3df1..6533923e711b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -75,21 +75,79 @@ void refrigerator(void)
75 __set_current_state(save); 75 __set_current_state(save);
76} 76}
77 77
78static void freeze_task(struct task_struct *p) 78static void fake_signal_wake_up(struct task_struct *p, int resume)
79{ 79{
80 unsigned long flags; 80 unsigned long flags;
81 81
82 if (!freezing(p)) { 82 spin_lock_irqsave(&p->sighand->siglock, flags);
83 signal_wake_up(p, resume);
84 spin_unlock_irqrestore(&p->sighand->siglock, flags);
85}
86
87static void send_fake_signal(struct task_struct *p)
88{
89 if (p->state == TASK_STOPPED)
90 force_sig_specific(SIGSTOP, p);
91 fake_signal_wake_up(p, p->state == TASK_STOPPED);
92}
93
94static int has_mm(struct task_struct *p)
95{
96 return (p->mm && !(p->flags & PF_BORROWED_MM));
97}
98
99/**
100 * freeze_task - send a freeze request to given task
101 * @p: task to send the request to
102 * @with_mm_only: if set, the request will only be sent if the task has its
103 * own mm
104 * Return value: 0, if @with_mm_only is set and the task has no mm of its
105 * own or the task is frozen, 1, otherwise
106 *
107 * The freeze request is sent by seting the tasks's TIF_FREEZE flag and
108 * either sending a fake signal to it or waking it up, depending on whether
109 * or not it has its own mm (ie. it is a user land task). If @with_mm_only
110 * is set and the task has no mm of its own (ie. it is a kernel thread),
111 * its TIF_FREEZE flag should not be set.
112 *
113 * The task_lock() is necessary to prevent races with exit_mm() or
114 * use_mm()/unuse_mm() from occuring.
115 */
116static int freeze_task(struct task_struct *p, int with_mm_only)
117{
118 int ret = 1;
119
120 task_lock(p);
121 if (freezing(p)) {
122 if (has_mm(p)) {
123 if (!signal_pending(p))
124 fake_signal_wake_up(p, 0);
125 } else {
126 if (with_mm_only)
127 ret = 0;
128 else
129 wake_up_state(p, TASK_INTERRUPTIBLE);
130 }
131 } else {
83 rmb(); 132 rmb();
84 if (!frozen(p)) { 133 if (frozen(p)) {
85 set_freeze_flag(p); 134 ret = 0;
86 if (p->state == TASK_STOPPED) 135 } else {
87 force_sig_specific(SIGSTOP, p); 136 if (has_mm(p)) {
88 spin_lock_irqsave(&p->sighand->siglock, flags); 137 set_freeze_flag(p);
89 signal_wake_up(p, p->state == TASK_STOPPED); 138 send_fake_signal(p);
90 spin_unlock_irqrestore(&p->sighand->siglock, flags); 139 } else {
140 if (with_mm_only) {
141 ret = 0;
142 } else {
143 set_freeze_flag(p);
144 wake_up_state(p, TASK_INTERRUPTIBLE);
145 }
146 }
91 } 147 }
92 } 148 }
149 task_unlock(p);
150 return ret;
93} 151}
94 152
95static void cancel_freezing(struct task_struct *p) 153static void cancel_freezing(struct task_struct *p)
@@ -110,6 +168,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
110 struct task_struct *g, *p; 168 struct task_struct *g, *p;
111 unsigned long end_time; 169 unsigned long end_time;
112 unsigned int todo; 170 unsigned int todo;
171 struct timeval start, end;
172 s64 elapsed_csecs64;
173 unsigned int elapsed_csecs;
174
175 do_gettimeofday(&start);
113 176
114 end_time = jiffies + TIMEOUT; 177 end_time = jiffies + TIMEOUT;
115 do { 178 do {
@@ -119,31 +182,14 @@ static int try_to_freeze_tasks(int freeze_user_space)
119 if (frozen(p) || !freezeable(p)) 182 if (frozen(p) || !freezeable(p))
120 continue; 183 continue;
121 184
122 if (freeze_user_space) { 185 if (p->state == TASK_TRACED && frozen(p->parent)) {
123 if (p->state == TASK_TRACED && 186 cancel_freezing(p);
124 frozen(p->parent)) { 187 continue;
125 cancel_freezing(p);
126 continue;
127 }
128 /*
129 * Kernel threads should not have TIF_FREEZE set
130 * at this point, so we must ensure that either
131 * p->mm is not NULL *and* PF_BORROWED_MM is
132 * unset, or TIF_FRREZE is left unset.
133 * The task_lock() is necessary to prevent races
134 * with exit_mm() or use_mm()/unuse_mm() from
135 * occuring.
136 */
137 task_lock(p);
138 if (!p->mm || (p->flags & PF_BORROWED_MM)) {
139 task_unlock(p);
140 continue;
141 }
142 freeze_task(p);
143 task_unlock(p);
144 } else {
145 freeze_task(p);
146 } 188 }
189
190 if (!freeze_task(p, freeze_user_space))
191 continue;
192
147 if (!freezer_should_skip(p)) 193 if (!freezer_should_skip(p))
148 todo++; 194 todo++;
149 } while_each_thread(g, p); 195 } while_each_thread(g, p);
@@ -153,6 +199,11 @@ static int try_to_freeze_tasks(int freeze_user_space)
153 break; 199 break;
154 } while (todo); 200 } while (todo);
155 201
202 do_gettimeofday(&end);
203 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
204 do_div(elapsed_csecs64, NSEC_PER_SEC / 100);
205 elapsed_csecs = elapsed_csecs64;
206
156 if (todo) { 207 if (todo) {
157 /* This does not unfreeze processes that are already frozen 208 /* This does not unfreeze processes that are already frozen
158 * (we have slightly ugly calling convention in that respect, 209 * (we have slightly ugly calling convention in that respect,
@@ -160,10 +211,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
160 * but it cleans up leftover PF_FREEZE requests. 211 * but it cleans up leftover PF_FREEZE requests.
161 */ 212 */
162 printk("\n"); 213 printk("\n");
163 printk(KERN_ERR "Freezing of %s timed out after %d seconds " 214 printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
164 "(%d tasks refusing to freeze):\n", 215 "(%d tasks refusing to freeze):\n",
165 freeze_user_space ? "user space " : "tasks ", 216 elapsed_csecs / 100, elapsed_csecs % 100, todo);
166 TIMEOUT / HZ, todo);
167 show_state(); 217 show_state();
168 read_lock(&tasklist_lock); 218 read_lock(&tasklist_lock);
169 do_each_thread(g, p) { 219 do_each_thread(g, p) {
@@ -174,6 +224,9 @@ static int try_to_freeze_tasks(int freeze_user_space)
174 task_unlock(p); 224 task_unlock(p);
175 } while_each_thread(g, p); 225 } while_each_thread(g, p);
176 read_unlock(&tasklist_lock); 226 read_unlock(&tasklist_lock);
227 } else {
228 printk("(elapsed %d.%02d seconds) ", elapsed_csecs / 100,
229 elapsed_csecs % 100);
177 } 230 }
178 231
179 return todo ? -EBUSY : 0; 232 return todo ? -EBUSY : 0;
@@ -186,19 +239,21 @@ int freeze_processes(void)
186{ 239{
187 int error; 240 int error;
188 241
189 printk("Stopping tasks ... "); 242 printk("Freezing user space processes ... ");
190 error = try_to_freeze_tasks(FREEZER_USER_SPACE); 243 error = try_to_freeze_tasks(FREEZER_USER_SPACE);
191 if (error) 244 if (error)
192 return error; 245 goto Exit;
246 printk("done.\n");
193 247
194 sys_sync(); 248 printk("Freezing remaining freezable tasks ... ");
195 error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS); 249 error = try_to_freeze_tasks(FREEZER_KERNEL_THREADS);
196 if (error) 250 if (error)
197 return error; 251 goto Exit;
198 252 printk("done.");
199 printk("done.\n"); 253 Exit:
200 BUG_ON(in_atomic()); 254 BUG_ON(in_atomic());
201 return 0; 255 printk("\n");
256 return error;
202} 257}
203 258
204static void thaw_tasks(int thaw_user_space) 259static void thaw_tasks(int thaw_user_space)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index a686590d88c1..ccc95ac07bed 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1239,17 +1239,39 @@ asmlinkage int swsusp_save(void)
1239 return 0; 1239 return 0;
1240} 1240}
1241 1241
1242static void init_header(struct swsusp_info *info) 1242#ifndef CONFIG_ARCH_HIBERNATION_HEADER
1243static int init_header_complete(struct swsusp_info *info)
1243{ 1244{
1244 memset(info, 0, sizeof(struct swsusp_info)); 1245 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1245 info->version_code = LINUX_VERSION_CODE; 1246 info->version_code = LINUX_VERSION_CODE;
1247 return 0;
1248}
1249
1250static char *check_image_kernel(struct swsusp_info *info)
1251{
1252 if (info->version_code != LINUX_VERSION_CODE)
1253 return "kernel version";
1254 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1255 return "system type";
1256 if (strcmp(info->uts.release,init_utsname()->release))
1257 return "kernel release";
1258 if (strcmp(info->uts.version,init_utsname()->version))
1259 return "version";
1260 if (strcmp(info->uts.machine,init_utsname()->machine))
1261 return "machine";
1262 return NULL;
1263}
1264#endif /* CONFIG_ARCH_HIBERNATION_HEADER */
1265
1266static int init_header(struct swsusp_info *info)
1267{
1268 memset(info, 0, sizeof(struct swsusp_info));
1246 info->num_physpages = num_physpages; 1269 info->num_physpages = num_physpages;
1247 memcpy(&info->uts, init_utsname(), sizeof(struct new_utsname));
1248 info->cpus = num_online_cpus();
1249 info->image_pages = nr_copy_pages; 1270 info->image_pages = nr_copy_pages;
1250 info->pages = nr_copy_pages + nr_meta_pages + 1; 1271 info->pages = nr_copy_pages + nr_meta_pages + 1;
1251 info->size = info->pages; 1272 info->size = info->pages;
1252 info->size <<= PAGE_SHIFT; 1273 info->size <<= PAGE_SHIFT;
1274 return init_header_complete(info);
1253} 1275}
1254 1276
1255/** 1277/**
@@ -1303,7 +1325,11 @@ int snapshot_read_next(struct snapshot_handle *handle, size_t count)
1303 return -ENOMEM; 1325 return -ENOMEM;
1304 } 1326 }
1305 if (!handle->offset) { 1327 if (!handle->offset) {
1306 init_header((struct swsusp_info *)buffer); 1328 int error;
1329
1330 error = init_header((struct swsusp_info *)buffer);
1331 if (error)
1332 return error;
1307 handle->buffer = buffer; 1333 handle->buffer = buffer;
1308 memory_bm_position_reset(&orig_bm); 1334 memory_bm_position_reset(&orig_bm);
1309 memory_bm_position_reset(&copy_bm); 1335 memory_bm_position_reset(&copy_bm);
@@ -1394,22 +1420,13 @@ duplicate_memory_bitmap(struct memory_bitmap *dst, struct memory_bitmap *src)
1394 } 1420 }
1395} 1421}
1396 1422
1397static inline int check_header(struct swsusp_info *info) 1423static int check_header(struct swsusp_info *info)
1398{ 1424{
1399 char *reason = NULL; 1425 char *reason;
1400 1426
1401 if (info->version_code != LINUX_VERSION_CODE) 1427 reason = check_image_kernel(info);
1402 reason = "kernel version"; 1428 if (!reason && info->num_physpages != num_physpages)
1403 if (info->num_physpages != num_physpages)
1404 reason = "memory size"; 1429 reason = "memory size";
1405 if (strcmp(info->uts.sysname,init_utsname()->sysname))
1406 reason = "system type";
1407 if (strcmp(info->uts.release,init_utsname()->release))
1408 reason = "kernel release";
1409 if (strcmp(info->uts.version,init_utsname()->version))
1410 reason = "version";
1411 if (strcmp(info->uts.machine,init_utsname()->machine))
1412 reason = "machine";
1413 if (reason) { 1430 if (reason) {
1414 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason); 1431 printk(KERN_ERR "swsusp: Resume mismatch: %s\n", reason);
1415 return -EPERM; 1432 return -EPERM;
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 5da304c8f1f6..e1722d3155f1 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -270,39 +270,6 @@ int swsusp_shrink_memory(void)
270 return 0; 270 return 0;
271} 271}
272 272
273int swsusp_suspend(void)
274{
275 int error;
276
277 if ((error = arch_prepare_suspend()))
278 return error;
279
280 local_irq_disable();
281 /* At this point, device_suspend() has been called, but *not*
282 * device_power_down(). We *must* device_power_down() now.
283 * Otherwise, drivers for some devices (e.g. interrupt controllers)
284 * become desynchronized with the actual state of the hardware
285 * at resume time, and evil weirdness ensues.
286 */
287 if ((error = device_power_down(PMSG_FREEZE))) {
288 printk(KERN_ERR "Some devices failed to power down, aborting suspend\n");
289 goto Enable_irqs;
290 }
291
292 save_processor_state();
293 if ((error = swsusp_arch_suspend()))
294 printk(KERN_ERR "Error %d suspending\n", error);
295 /* Restore control flow magically appears here */
296 restore_processor_state();
297 /* NOTE: device_power_up() is just a resume() for devices
298 * that suspended with irqs off ... no overall powerup.
299 */
300 device_power_up();
301 Enable_irqs:
302 local_irq_enable();
303 return error;
304}
305
306int swsusp_resume(void) 273int swsusp_resume(void)
307{ 274{
308 int error; 275 int error;
diff --git a/kernel/power/user.c b/kernel/power/user.c
index bd0723a7df3f..5bd321bcbb75 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -153,6 +153,10 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
153 mutex_lock(&pm_mutex); 153 mutex_lock(&pm_mutex);
154 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE); 154 error = pm_notifier_call_chain(PM_HIBERNATION_PREPARE);
155 if (!error) { 155 if (!error) {
156 printk("Syncing filesystems ... ");
157 sys_sync();
158 printk("done.\n");
159
156 error = freeze_processes(); 160 error = freeze_processes();
157 if (error) 161 if (error)
158 thaw_processes(); 162 thaw_processes();
diff --git a/kernel/printk.c b/kernel/printk.c
index 52493474f0ab..a30fe33de395 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -862,7 +862,16 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
862 return -1; 862 return -1;
863} 863}
864 864
865#ifndef CONFIG_DISABLE_CONSOLE_SUSPEND 865int console_suspend_enabled = 1;
866EXPORT_SYMBOL(console_suspend_enabled);
867
868static int __init console_suspend_disable(char *str)
869{
870 console_suspend_enabled = 0;
871 return 1;
872}
873__setup("no_console_suspend", console_suspend_disable);
874
866/** 875/**
867 * suspend_console - suspend the console subsystem 876 * suspend_console - suspend the console subsystem
868 * 877 *
@@ -870,6 +879,8 @@ int update_console_cmdline(char *name, int idx, char *name_new, int idx_new, cha
870 */ 879 */
871void suspend_console(void) 880void suspend_console(void)
872{ 881{
882 if (!console_suspend_enabled)
883 return;
873 printk("Suspending console(s)\n"); 884 printk("Suspending console(s)\n");
874 acquire_console_sem(); 885 acquire_console_sem();
875 console_suspended = 1; 886 console_suspended = 1;
@@ -877,10 +888,11 @@ void suspend_console(void)
877 888
878void resume_console(void) 889void resume_console(void)
879{ 890{
891 if (!console_suspend_enabled)
892 return;
880 console_suspended = 0; 893 console_suspended = 0;
881 release_console_sem(); 894 release_console_sem();
882} 895}
883#endif /* CONFIG_DISABLE_CONSOLE_SUSPEND */
884 896
885/** 897/**
886 * acquire_console_sem - lock the console system for exclusive use. 898 * acquire_console_sem - lock the console system for exclusive use.
diff --git a/kernel/relay.c b/kernel/relay.c
index ad855017bc59..61134eb7a0c8 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -370,7 +370,7 @@ void relay_reset(struct rchan *chan)
370 if (!chan) 370 if (!chan)
371 return; 371 return;
372 372
373 if (chan->is_global && chan->buf[0]) { 373 if (chan->is_global && chan->buf[0]) {
374 __relay_reset(chan->buf[0], 0); 374 __relay_reset(chan->buf[0], 0);
375 return; 375 return;
376 } 376 }
@@ -850,13 +850,13 @@ static int relay_file_read_avail(struct rchan_buf *buf, size_t read_pos)
850 buf->subbufs_consumed = consumed; 850 buf->subbufs_consumed = consumed;
851 buf->bytes_consumed = 0; 851 buf->bytes_consumed = 0;
852 } 852 }
853 853
854 produced = (produced % n_subbufs) * subbuf_size + buf->offset; 854 produced = (produced % n_subbufs) * subbuf_size + buf->offset;
855 consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed; 855 consumed = (consumed % n_subbufs) * subbuf_size + buf->bytes_consumed;
856 856
857 if (consumed > produced) 857 if (consumed > produced)
858 produced += n_subbufs * subbuf_size; 858 produced += n_subbufs * subbuf_size;
859 859
860 if (consumed == produced) 860 if (consumed == produced)
861 return 0; 861 return 0;
862 862
diff --git a/kernel/sched.c b/kernel/sched.c
index 92721d1534b8..ed90be46fb31 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -266,7 +266,8 @@ struct rt_rq {
266 * acquire operations must be ordered by ascending &runqueue. 266 * acquire operations must be ordered by ascending &runqueue.
267 */ 267 */
268struct rq { 268struct rq {
269 spinlock_t lock; /* runqueue lock */ 269 /* runqueue lock: */
270 spinlock_t lock;
270 271
271 /* 272 /*
272 * nr_running and cpu_load should be in the same cacheline because 273 * nr_running and cpu_load should be in the same cacheline because
@@ -279,13 +280,15 @@ struct rq {
279#ifdef CONFIG_NO_HZ 280#ifdef CONFIG_NO_HZ
280 unsigned char in_nohz_recently; 281 unsigned char in_nohz_recently;
281#endif 282#endif
282 struct load_weight load; /* capture load from *all* tasks on this cpu */ 283 /* capture load from *all* tasks on this cpu: */
284 struct load_weight load;
283 unsigned long nr_load_updates; 285 unsigned long nr_load_updates;
284 u64 nr_switches; 286 u64 nr_switches;
285 287
286 struct cfs_rq cfs; 288 struct cfs_rq cfs;
287#ifdef CONFIG_FAIR_GROUP_SCHED 289#ifdef CONFIG_FAIR_GROUP_SCHED
288 struct list_head leaf_cfs_rq_list; /* list of leaf cfs_rq on this cpu */ 290 /* list of leaf cfs_rq on this cpu: */
291 struct list_head leaf_cfs_rq_list;
289#endif 292#endif
290 struct rt_rq rt; 293 struct rt_rq rt;
291 294
@@ -317,7 +320,8 @@ struct rq {
317 /* For active balancing */ 320 /* For active balancing */
318 int active_balance; 321 int active_balance;
319 int push_cpu; 322 int push_cpu;
320 int cpu; /* cpu of this runqueue */ 323 /* cpu of this runqueue: */
324 int cpu;
321 325
322 struct task_struct *migration_thread; 326 struct task_struct *migration_thread;
323 struct list_head migration_queue; 327 struct list_head migration_queue;
@@ -328,22 +332,22 @@ struct rq {
328 struct sched_info rq_sched_info; 332 struct sched_info rq_sched_info;
329 333
330 /* sys_sched_yield() stats */ 334 /* sys_sched_yield() stats */
331 unsigned long yld_exp_empty; 335 unsigned int yld_exp_empty;
332 unsigned long yld_act_empty; 336 unsigned int yld_act_empty;
333 unsigned long yld_both_empty; 337 unsigned int yld_both_empty;
334 unsigned long yld_count; 338 unsigned int yld_count;
335 339
336 /* schedule() stats */ 340 /* schedule() stats */
337 unsigned long sched_switch; 341 unsigned int sched_switch;
338 unsigned long sched_count; 342 unsigned int sched_count;
339 unsigned long sched_goidle; 343 unsigned int sched_goidle;
340 344
341 /* try_to_wake_up() stats */ 345 /* try_to_wake_up() stats */
342 unsigned long ttwu_count; 346 unsigned int ttwu_count;
343 unsigned long ttwu_local; 347 unsigned int ttwu_local;
344 348
345 /* BKL stats */ 349 /* BKL stats */
346 unsigned long bkl_count; 350 unsigned int bkl_count;
347#endif 351#endif
348 struct lock_class_key rq_lock_key; 352 struct lock_class_key rq_lock_key;
349}; 353};
@@ -449,12 +453,12 @@ enum {
449}; 453};
450 454
451const_debug unsigned int sysctl_sched_features = 455const_debug unsigned int sysctl_sched_features =
452 SCHED_FEAT_NEW_FAIR_SLEEPERS *1 | 456 SCHED_FEAT_NEW_FAIR_SLEEPERS * 1 |
453 SCHED_FEAT_START_DEBIT *1 | 457 SCHED_FEAT_START_DEBIT * 1 |
454 SCHED_FEAT_TREE_AVG *0 | 458 SCHED_FEAT_TREE_AVG * 0 |
455 SCHED_FEAT_APPROX_AVG *0 | 459 SCHED_FEAT_APPROX_AVG * 0 |
456 SCHED_FEAT_WAKEUP_PREEMPT *1 | 460 SCHED_FEAT_WAKEUP_PREEMPT * 1 |
457 SCHED_FEAT_PREEMPT_RESTRICT *1; 461 SCHED_FEAT_PREEMPT_RESTRICT * 1;
458 462
459#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x) 463#define sched_feat(x) (sysctl_sched_features & SCHED_FEAT_##x)
460 464
@@ -3334,6 +3338,16 @@ void account_guest_time(struct task_struct *p, cputime_t cputime)
3334} 3338}
3335 3339
3336/* 3340/*
3341 * Account scaled user cpu time to a process.
3342 * @p: the process that the cpu time gets accounted to
3343 * @cputime: the cpu time spent in user space since the last update
3344 */
3345void account_user_time_scaled(struct task_struct *p, cputime_t cputime)
3346{
3347 p->utimescaled = cputime_add(p->utimescaled, cputime);
3348}
3349
3350/*
3337 * Account system cpu time to a process. 3351 * Account system cpu time to a process.
3338 * @p: the process that the cpu time gets accounted to 3352 * @p: the process that the cpu time gets accounted to
3339 * @hardirq_offset: the offset to subtract from hardirq_count() 3353 * @hardirq_offset: the offset to subtract from hardirq_count()
@@ -3371,6 +3385,17 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
3371} 3385}
3372 3386
3373/* 3387/*
3388 * Account scaled system cpu time to a process.
3389 * @p: the process that the cpu time gets accounted to
3390 * @hardirq_offset: the offset to subtract from hardirq_count()
3391 * @cputime: the cpu time spent in kernel space since the last update
3392 */
3393void account_system_time_scaled(struct task_struct *p, cputime_t cputime)
3394{
3395 p->stimescaled = cputime_add(p->stimescaled, cputime);
3396}
3397
3398/*
3374 * Account for involuntary wait time. 3399 * Account for involuntary wait time.
3375 * @p: the process from which the cpu time has been stolen 3400 * @p: the process from which the cpu time has been stolen
3376 * @steal: the cpu time spent in involuntary wait 3401 * @steal: the cpu time spent in involuntary wait
@@ -3859,7 +3884,10 @@ EXPORT_SYMBOL(wait_for_completion_timeout);
3859 3884
3860int __sched wait_for_completion_interruptible(struct completion *x) 3885int __sched wait_for_completion_interruptible(struct completion *x)
3861{ 3886{
3862 return wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); 3887 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
3888 if (t == -ERESTARTSYS)
3889 return t;
3890 return 0;
3863} 3891}
3864EXPORT_SYMBOL(wait_for_completion_interruptible); 3892EXPORT_SYMBOL(wait_for_completion_interruptible);
3865 3893
@@ -4794,18 +4822,18 @@ static void show_task(struct task_struct *p)
4794 unsigned state; 4822 unsigned state;
4795 4823
4796 state = p->state ? __ffs(p->state) + 1 : 0; 4824 state = p->state ? __ffs(p->state) + 1 : 0;
4797 printk("%-13.13s %c", p->comm, 4825 printk(KERN_INFO "%-13.13s %c", p->comm,
4798 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?'); 4826 state < sizeof(stat_nam) - 1 ? stat_nam[state] : '?');
4799#if BITS_PER_LONG == 32 4827#if BITS_PER_LONG == 32
4800 if (state == TASK_RUNNING) 4828 if (state == TASK_RUNNING)
4801 printk(" running "); 4829 printk(KERN_CONT " running ");
4802 else 4830 else
4803 printk(" %08lx ", thread_saved_pc(p)); 4831 printk(KERN_CONT " %08lx ", thread_saved_pc(p));
4804#else 4832#else
4805 if (state == TASK_RUNNING) 4833 if (state == TASK_RUNNING)
4806 printk(" running task "); 4834 printk(KERN_CONT " running task ");
4807 else 4835 else
4808 printk(" %016lx ", thread_saved_pc(p)); 4836 printk(KERN_CONT " %016lx ", thread_saved_pc(p));
4809#endif 4837#endif
4810#ifdef CONFIG_DEBUG_STACK_USAGE 4838#ifdef CONFIG_DEBUG_STACK_USAGE
4811 { 4839 {
@@ -4815,7 +4843,7 @@ static void show_task(struct task_struct *p)
4815 free = (unsigned long)n - (unsigned long)end_of_stack(p); 4843 free = (unsigned long)n - (unsigned long)end_of_stack(p);
4816 } 4844 }
4817#endif 4845#endif
4818 printk("%5lu %5d %6d\n", free, p->pid, p->parent->pid); 4846 printk(KERN_CONT "%5lu %5d %6d\n", free, p->pid, p->parent->pid);
4819 4847
4820 if (state != TASK_RUNNING) 4848 if (state != TASK_RUNNING)
4821 show_stack(p, NULL); 4849 show_stack(p, NULL);
@@ -5364,7 +5392,7 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
5364 return table; 5392 return table;
5365} 5393}
5366 5394
5367static ctl_table *sd_alloc_ctl_cpu_table(int cpu) 5395static ctl_table * sd_alloc_ctl_cpu_table(int cpu)
5368{ 5396{
5369 struct ctl_table *entry, *table; 5397 struct ctl_table *entry, *table;
5370 struct sched_domain *sd; 5398 struct sched_domain *sd;
@@ -5598,20 +5626,20 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5598 } 5626 }
5599 5627
5600 if (!group->__cpu_power) { 5628 if (!group->__cpu_power) {
5601 printk("\n"); 5629 printk(KERN_CONT "\n");
5602 printk(KERN_ERR "ERROR: domain->cpu_power not " 5630 printk(KERN_ERR "ERROR: domain->cpu_power not "
5603 "set\n"); 5631 "set\n");
5604 break; 5632 break;
5605 } 5633 }
5606 5634
5607 if (!cpus_weight(group->cpumask)) { 5635 if (!cpus_weight(group->cpumask)) {
5608 printk("\n"); 5636 printk(KERN_CONT "\n");
5609 printk(KERN_ERR "ERROR: empty group\n"); 5637 printk(KERN_ERR "ERROR: empty group\n");
5610 break; 5638 break;
5611 } 5639 }
5612 5640
5613 if (cpus_intersects(groupmask, group->cpumask)) { 5641 if (cpus_intersects(groupmask, group->cpumask)) {
5614 printk("\n"); 5642 printk(KERN_CONT "\n");
5615 printk(KERN_ERR "ERROR: repeated CPUs\n"); 5643 printk(KERN_ERR "ERROR: repeated CPUs\n");
5616 break; 5644 break;
5617 } 5645 }
@@ -5619,11 +5647,11 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5619 cpus_or(groupmask, groupmask, group->cpumask); 5647 cpus_or(groupmask, groupmask, group->cpumask);
5620 5648
5621 cpumask_scnprintf(str, NR_CPUS, group->cpumask); 5649 cpumask_scnprintf(str, NR_CPUS, group->cpumask);
5622 printk(" %s", str); 5650 printk(KERN_CONT " %s", str);
5623 5651
5624 group = group->next; 5652 group = group->next;
5625 } while (group != sd->groups); 5653 } while (group != sd->groups);
5626 printk("\n"); 5654 printk(KERN_CONT "\n");
5627 5655
5628 if (!cpus_equal(sd->span, groupmask)) 5656 if (!cpus_equal(sd->span, groupmask))
5629 printk(KERN_ERR "ERROR: groups don't span " 5657 printk(KERN_ERR "ERROR: groups don't span "
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index a5e517ec07c3..e6fb392e5164 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -137,7 +137,7 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running); 137 SEQ_printf(m, " .%-30s: %ld\n", "nr_running", cfs_rq->nr_running);
138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight); 138 SEQ_printf(m, " .%-30s: %ld\n", "load", cfs_rq->load.weight);
139#ifdef CONFIG_SCHEDSTATS 139#ifdef CONFIG_SCHEDSTATS
140 SEQ_printf(m, " .%-30s: %ld\n", "bkl_count", 140 SEQ_printf(m, " .%-30s: %d\n", "bkl_count",
141 rq->bkl_count); 141 rq->bkl_count);
142#endif 142#endif
143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over", 143 SEQ_printf(m, " .%-30s: %ld\n", "nr_spread_over",
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 1c084842c3e7..ef1a7df80ea2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -21,7 +21,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
21 21
22 /* runqueue-specific stats */ 22 /* runqueue-specific stats */
23 seq_printf(seq, 23 seq_printf(seq,
24 "cpu%d %lu %lu %lu %lu %lu %lu %lu %lu %lu %llu %llu %lu", 24 "cpu%d %u %u %u %u %u %u %u %u %u %llu %llu %lu",
25 cpu, rq->yld_both_empty, 25 cpu, rq->yld_both_empty,
26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count, 26 rq->yld_act_empty, rq->yld_exp_empty, rq->yld_count,
27 rq->sched_switch, rq->sched_count, rq->sched_goidle, 27 rq->sched_switch, rq->sched_count, rq->sched_goidle,
@@ -42,8 +42,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
42 seq_printf(seq, "domain%d %s", dcount++, mask_str); 42 seq_printf(seq, "domain%d %s", dcount++, mask_str);
43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES; 43 for (itype = CPU_IDLE; itype < CPU_MAX_IDLE_TYPES;
44 itype++) { 44 itype++) {
45 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu " 45 seq_printf(seq, " %u %u %u %u %u %u %u %u",
46 "%lu",
47 sd->lb_count[itype], 46 sd->lb_count[itype],
48 sd->lb_balanced[itype], 47 sd->lb_balanced[itype],
49 sd->lb_failed[itype], 48 sd->lb_failed[itype],
@@ -53,8 +52,7 @@ static int show_schedstat(struct seq_file *seq, void *v)
53 sd->lb_nobusyq[itype], 52 sd->lb_nobusyq[itype],
54 sd->lb_nobusyg[itype]); 53 sd->lb_nobusyg[itype]);
55 } 54 }
56 seq_printf(seq, " %lu %lu %lu %lu %lu %lu %lu %lu %lu" 55 seq_printf(seq, " %u %u %u %u %u %u %u %u %u %u %u %u\n",
57 " %lu %lu %lu\n",
58 sd->alb_count, sd->alb_failed, sd->alb_pushed, 56 sd->alb_count, sd->alb_failed, sd->alb_pushed,
59 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed, 57 sd->sbe_count, sd->sbe_balanced, sd->sbe_pushed,
60 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed, 58 sd->sbf_count, sd->sbf_balanced, sd->sbf_pushed,
diff --git a/kernel/signal.c b/kernel/signal.c
index 2124ffadcfde..e4f059cd9867 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -99,7 +99,6 @@ static inline int has_pending_signals(sigset_t *signal, sigset_t *blocked)
99static int recalc_sigpending_tsk(struct task_struct *t) 99static int recalc_sigpending_tsk(struct task_struct *t)
100{ 100{
101 if (t->signal->group_stop_count > 0 || 101 if (t->signal->group_stop_count > 0 ||
102 (freezing(t)) ||
103 PENDING(&t->pending, &t->blocked) || 102 PENDING(&t->pending, &t->blocked) ||
104 PENDING(&t->signal->shared_pending, &t->blocked)) { 103 PENDING(&t->signal->shared_pending, &t->blocked)) {
105 set_tsk_thread_flag(t, TIF_SIGPENDING); 104 set_tsk_thread_flag(t, TIF_SIGPENDING);
diff --git a/kernel/sys.c b/kernel/sys.c
index 8ae2e636eb1b..bc8879c822a5 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -105,7 +105,6 @@ EXPORT_SYMBOL(cad_pid);
105 */ 105 */
106 106
107void (*pm_power_off_prepare)(void); 107void (*pm_power_off_prepare)(void);
108EXPORT_SYMBOL(pm_power_off_prepare);
109 108
110/* 109/*
111 * Notifier list for kernel code which wants to be called 110 * Notifier list for kernel code which wants to be called
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index dde3d53e8adc..067554bda8b7 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -24,7 +24,7 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/sysctl.h> 25#include <linux/sysctl.h>
26#include <linux/proc_fs.h> 26#include <linux/proc_fs.h>
27#include <linux/capability.h> 27#include <linux/security.h>
28#include <linux/ctype.h> 28#include <linux/ctype.h>
29#include <linux/utsname.h> 29#include <linux/utsname.h>
30#include <linux/smp_lock.h> 30#include <linux/smp_lock.h>
@@ -55,6 +55,8 @@
55#include <asm/stacktrace.h> 55#include <asm/stacktrace.h>
56#endif 56#endif
57 57
58static int deprecated_sysctl_warning(struct __sysctl_args *args);
59
58#if defined(CONFIG_SYSCTL) 60#if defined(CONFIG_SYSCTL)
59 61
60/* External variables not in a header file. */ 62/* External variables not in a header file. */
@@ -142,32 +144,29 @@ extern int max_lock_depth;
142 144
143#ifdef CONFIG_SYSCTL_SYSCALL 145#ifdef CONFIG_SYSCTL_SYSCALL
144static int parse_table(int __user *, int, void __user *, size_t __user *, 146static int parse_table(int __user *, int, void __user *, size_t __user *,
145 void __user *, size_t, ctl_table *); 147 void __user *, size_t, struct ctl_table *);
146#endif 148#endif
147 149
148 150
149#ifdef CONFIG_PROC_SYSCTL 151#ifdef CONFIG_PROC_SYSCTL
150static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, 152static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
151 void __user *buffer, size_t *lenp, loff_t *ppos); 153 void __user *buffer, size_t *lenp, loff_t *ppos);
152static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp, 154static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
153 void __user *buffer, size_t *lenp, loff_t *ppos); 155 void __user *buffer, size_t *lenp, loff_t *ppos);
154#endif 156#endif
155 157
156static ctl_table root_table[]; 158static struct ctl_table root_table[];
157static struct ctl_table_header root_table_header = 159static struct ctl_table_header root_table_header =
158 { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) }; 160 { root_table, LIST_HEAD_INIT(root_table_header.ctl_entry) };
159 161
160static ctl_table kern_table[]; 162static struct ctl_table kern_table[];
161static ctl_table vm_table[]; 163static struct ctl_table vm_table[];
162static ctl_table fs_table[]; 164static struct ctl_table fs_table[];
163static ctl_table debug_table[]; 165static struct ctl_table debug_table[];
164static ctl_table dev_table[]; 166static struct ctl_table dev_table[];
165extern ctl_table random_table[]; 167extern struct ctl_table random_table[];
166#ifdef CONFIG_UNIX98_PTYS
167extern ctl_table pty_table[];
168#endif
169#ifdef CONFIG_INOTIFY_USER 168#ifdef CONFIG_INOTIFY_USER
170extern ctl_table inotify_table[]; 169extern struct ctl_table inotify_table[];
171#endif 170#endif
172 171
173#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT 172#ifdef HAVE_ARCH_PICK_MMAP_LAYOUT
@@ -179,7 +178,7 @@ extern int lock_stat;
179 178
180/* The default sysctl tables: */ 179/* The default sysctl tables: */
181 180
182static ctl_table root_table[] = { 181static struct ctl_table root_table[] = {
183 { 182 {
184 .ctl_name = CTL_KERN, 183 .ctl_name = CTL_KERN,
185 .procname = "kernel", 184 .procname = "kernel",
@@ -232,7 +231,7 @@ static unsigned long min_wakeup_granularity_ns; /* 0 usecs */
232static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */ 231static unsigned long max_wakeup_granularity_ns = 1000000000; /* 1 second */
233#endif 232#endif
234 233
235static ctl_table kern_table[] = { 234static struct ctl_table kern_table[] = {
236#ifdef CONFIG_SCHED_DEBUG 235#ifdef CONFIG_SCHED_DEBUG
237 { 236 {
238 .ctl_name = CTL_UNNUMBERED, 237 .ctl_name = CTL_UNNUMBERED,
@@ -365,7 +364,6 @@ static ctl_table kern_table[] = {
365 }, 364 },
366#ifdef CONFIG_PROC_SYSCTL 365#ifdef CONFIG_PROC_SYSCTL
367 { 366 {
368 .ctl_name = KERN_TAINTED,
369 .procname = "tainted", 367 .procname = "tainted",
370 .data = &tainted, 368 .data = &tainted,
371 .maxlen = sizeof(int), 369 .maxlen = sizeof(int),
@@ -373,14 +371,15 @@ static ctl_table kern_table[] = {
373 .proc_handler = &proc_dointvec_taint, 371 .proc_handler = &proc_dointvec_taint,
374 }, 372 },
375#endif 373#endif
374#ifdef CONFIG_SECURITY_CAPABILITIES
376 { 375 {
377 .ctl_name = KERN_CAP_BSET,
378 .procname = "cap-bound", 376 .procname = "cap-bound",
379 .data = &cap_bset, 377 .data = &cap_bset,
380 .maxlen = sizeof(kernel_cap_t), 378 .maxlen = sizeof(kernel_cap_t),
381 .mode = 0600, 379 .mode = 0600,
382 .proc_handler = &proc_dointvec_bset, 380 .proc_handler = &proc_dointvec_bset,
383 }, 381 },
382#endif /* def CONFIG_SECURITY_CAPABILITIES */
384#ifdef CONFIG_BLK_DEV_INITRD 383#ifdef CONFIG_BLK_DEV_INITRD
385 { 384 {
386 .ctl_name = KERN_REALROOTDEV, 385 .ctl_name = KERN_REALROOTDEV,
@@ -514,7 +513,6 @@ static ctl_table kern_table[] = {
514#endif 513#endif
515#ifdef CONFIG_PROC_SYSCTL 514#ifdef CONFIG_PROC_SYSCTL
516 { 515 {
517 .ctl_name = KERN_CADPID,
518 .procname = "cad_pid", 516 .procname = "cad_pid",
519 .data = NULL, 517 .data = NULL,
520 .maxlen = sizeof (int), 518 .maxlen = sizeof (int),
@@ -536,14 +534,6 @@ static ctl_table kern_table[] = {
536 .mode = 0555, 534 .mode = 0555,
537 .child = random_table, 535 .child = random_table,
538 }, 536 },
539#ifdef CONFIG_UNIX98_PTYS
540 {
541 .ctl_name = KERN_PTY,
542 .procname = "pty",
543 .mode = 0555,
544 .child = pty_table,
545 },
546#endif
547 { 537 {
548 .ctl_name = KERN_OVERFLOWUID, 538 .ctl_name = KERN_OVERFLOWUID,
549 .procname = "overflowuid", 539 .procname = "overflowuid",
@@ -650,7 +640,6 @@ static ctl_table kern_table[] = {
650 .proc_handler = &proc_dointvec, 640 .proc_handler = &proc_dointvec,
651 }, 641 },
652 { 642 {
653 .ctl_name = KERN_NMI_WATCHDOG,
654 .procname = "nmi_watchdog", 643 .procname = "nmi_watchdog",
655 .data = &nmi_watchdog_enabled, 644 .data = &nmi_watchdog_enabled,
656 .maxlen = sizeof (int), 645 .maxlen = sizeof (int),
@@ -706,7 +695,6 @@ static ctl_table kern_table[] = {
706#endif 695#endif
707#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) 696#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
708 { 697 {
709 .ctl_name = KERN_ACPI_VIDEO_FLAGS,
710 .procname = "acpi_video_flags", 698 .procname = "acpi_video_flags",
711 .data = &acpi_realmode_flags, 699 .data = &acpi_realmode_flags,
712 .maxlen = sizeof (unsigned long), 700 .maxlen = sizeof (unsigned long),
@@ -783,7 +771,7 @@ static ctl_table kern_table[] = {
783 { .ctl_name = 0 } 771 { .ctl_name = 0 }
784}; 772};
785 773
786static ctl_table vm_table[] = { 774static struct ctl_table vm_table[] = {
787 { 775 {
788 .ctl_name = VM_OVERCOMMIT_MEMORY, 776 .ctl_name = VM_OVERCOMMIT_MEMORY,
789 .procname = "overcommit_memory", 777 .procname = "overcommit_memory",
@@ -847,7 +835,6 @@ static ctl_table vm_table[] = {
847 .extra2 = &one_hundred, 835 .extra2 = &one_hundred,
848 }, 836 },
849 { 837 {
850 .ctl_name = VM_DIRTY_WB_CS,
851 .procname = "dirty_writeback_centisecs", 838 .procname = "dirty_writeback_centisecs",
852 .data = &dirty_writeback_interval, 839 .data = &dirty_writeback_interval,
853 .maxlen = sizeof(dirty_writeback_interval), 840 .maxlen = sizeof(dirty_writeback_interval),
@@ -855,7 +842,6 @@ static ctl_table vm_table[] = {
855 .proc_handler = &dirty_writeback_centisecs_handler, 842 .proc_handler = &dirty_writeback_centisecs_handler,
856 }, 843 },
857 { 844 {
858 .ctl_name = VM_DIRTY_EXPIRE_CS,
859 .procname = "dirty_expire_centisecs", 845 .procname = "dirty_expire_centisecs",
860 .data = &dirty_expire_interval, 846 .data = &dirty_expire_interval,
861 .maxlen = sizeof(dirty_expire_interval), 847 .maxlen = sizeof(dirty_expire_interval),
@@ -883,7 +869,6 @@ static ctl_table vm_table[] = {
883 }, 869 },
884#ifdef CONFIG_HUGETLB_PAGE 870#ifdef CONFIG_HUGETLB_PAGE
885 { 871 {
886 .ctl_name = VM_HUGETLB_PAGES,
887 .procname = "nr_hugepages", 872 .procname = "nr_hugepages",
888 .data = &max_huge_pages, 873 .data = &max_huge_pages,
889 .maxlen = sizeof(unsigned long), 874 .maxlen = sizeof(unsigned long),
@@ -1093,12 +1078,12 @@ static ctl_table vm_table[] = {
1093}; 1078};
1094 1079
1095#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) 1080#if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
1096static ctl_table binfmt_misc_table[] = { 1081static struct ctl_table binfmt_misc_table[] = {
1097 { .ctl_name = 0 } 1082 { .ctl_name = 0 }
1098}; 1083};
1099#endif 1084#endif
1100 1085
1101static ctl_table fs_table[] = { 1086static struct ctl_table fs_table[] = {
1102 { 1087 {
1103 .ctl_name = FS_NRINODE, 1088 .ctl_name = FS_NRINODE,
1104 .procname = "inode-nr", 1089 .procname = "inode-nr",
@@ -1116,7 +1101,6 @@ static ctl_table fs_table[] = {
1116 .proc_handler = &proc_dointvec, 1101 .proc_handler = &proc_dointvec,
1117 }, 1102 },
1118 { 1103 {
1119 .ctl_name = FS_NRFILE,
1120 .procname = "file-nr", 1104 .procname = "file-nr",
1121 .data = &files_stat, 1105 .data = &files_stat,
1122 .maxlen = 3*sizeof(int), 1106 .maxlen = 3*sizeof(int),
@@ -1192,7 +1176,6 @@ static ctl_table fs_table[] = {
1192 .extra2 = &two, 1176 .extra2 = &two,
1193 }, 1177 },
1194 { 1178 {
1195 .ctl_name = FS_AIO_NR,
1196 .procname = "aio-nr", 1179 .procname = "aio-nr",
1197 .data = &aio_nr, 1180 .data = &aio_nr,
1198 .maxlen = sizeof(aio_nr), 1181 .maxlen = sizeof(aio_nr),
@@ -1200,7 +1183,6 @@ static ctl_table fs_table[] = {
1200 .proc_handler = &proc_doulongvec_minmax, 1183 .proc_handler = &proc_doulongvec_minmax,
1201 }, 1184 },
1202 { 1185 {
1203 .ctl_name = FS_AIO_MAX_NR,
1204 .procname = "aio-max-nr", 1186 .procname = "aio-max-nr",
1205 .data = &aio_max_nr, 1187 .data = &aio_max_nr,
1206 .maxlen = sizeof(aio_max_nr), 1188 .maxlen = sizeof(aio_max_nr),
@@ -1239,7 +1221,7 @@ static ctl_table fs_table[] = {
1239 { .ctl_name = 0 } 1221 { .ctl_name = 0 }
1240}; 1222};
1241 1223
1242static ctl_table debug_table[] = { 1224static struct ctl_table debug_table[] = {
1243#if defined(CONFIG_X86) || defined(CONFIG_PPC) 1225#if defined(CONFIG_X86) || defined(CONFIG_PPC)
1244 { 1226 {
1245 .ctl_name = CTL_UNNUMBERED, 1227 .ctl_name = CTL_UNNUMBERED,
@@ -1253,7 +1235,7 @@ static ctl_table debug_table[] = {
1253 { .ctl_name = 0 } 1235 { .ctl_name = 0 }
1254}; 1236};
1255 1237
1256static ctl_table dev_table[] = { 1238static struct ctl_table dev_table[] = {
1257 { .ctl_name = 0 } 1239 { .ctl_name = 0 }
1258}; 1240};
1259 1241
@@ -1369,10 +1351,15 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
1369 if (copy_from_user(&tmp, args, sizeof(tmp))) 1351 if (copy_from_user(&tmp, args, sizeof(tmp)))
1370 return -EFAULT; 1352 return -EFAULT;
1371 1353
1354 error = deprecated_sysctl_warning(&tmp);
1355 if (error)
1356 goto out;
1357
1372 lock_kernel(); 1358 lock_kernel();
1373 error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp, 1359 error = do_sysctl(tmp.name, tmp.nlen, tmp.oldval, tmp.oldlenp,
1374 tmp.newval, tmp.newlen); 1360 tmp.newval, tmp.newlen);
1375 unlock_kernel(); 1361 unlock_kernel();
1362out:
1376 return error; 1363 return error;
1377} 1364}
1378#endif /* CONFIG_SYSCTL_SYSCALL */ 1365#endif /* CONFIG_SYSCTL_SYSCALL */
@@ -1393,7 +1380,7 @@ static int test_perm(int mode, int op)
1393 return -EACCES; 1380 return -EACCES;
1394} 1381}
1395 1382
1396int sysctl_perm(ctl_table *table, int op) 1383int sysctl_perm(struct ctl_table *table, int op)
1397{ 1384{
1398 int error; 1385 int error;
1399 error = security_sysctl(table, op); 1386 error = security_sysctl(table, op);
@@ -1406,7 +1393,7 @@ int sysctl_perm(ctl_table *table, int op)
1406static int parse_table(int __user *name, int nlen, 1393static int parse_table(int __user *name, int nlen,
1407 void __user *oldval, size_t __user *oldlenp, 1394 void __user *oldval, size_t __user *oldlenp,
1408 void __user *newval, size_t newlen, 1395 void __user *newval, size_t newlen,
1409 ctl_table *table) 1396 struct ctl_table *table)
1410{ 1397{
1411 int n; 1398 int n;
1412repeat: 1399repeat:
@@ -1437,13 +1424,12 @@ repeat:
1437} 1424}
1438 1425
1439/* Perform the actual read/write of a sysctl table entry. */ 1426/* Perform the actual read/write of a sysctl table entry. */
1440int do_sysctl_strategy (ctl_table *table, 1427int do_sysctl_strategy (struct ctl_table *table,
1441 int __user *name, int nlen, 1428 int __user *name, int nlen,
1442 void __user *oldval, size_t __user *oldlenp, 1429 void __user *oldval, size_t __user *oldlenp,
1443 void __user *newval, size_t newlen) 1430 void __user *newval, size_t newlen)
1444{ 1431{
1445 int op = 0, rc; 1432 int op = 0, rc;
1446 size_t len;
1447 1433
1448 if (oldval) 1434 if (oldval)
1449 op |= 004; 1435 op |= 004;
@@ -1464,25 +1450,10 @@ int do_sysctl_strategy (ctl_table *table,
1464 /* If there is no strategy routine, or if the strategy returns 1450 /* If there is no strategy routine, or if the strategy returns
1465 * zero, proceed with automatic r/w */ 1451 * zero, proceed with automatic r/w */
1466 if (table->data && table->maxlen) { 1452 if (table->data && table->maxlen) {
1467 if (oldval && oldlenp) { 1453 rc = sysctl_data(table, name, nlen, oldval, oldlenp,
1468 if (get_user(len, oldlenp)) 1454 newval, newlen);
1469 return -EFAULT; 1455 if (rc < 0)
1470 if (len) { 1456 return rc;
1471 if (len > table->maxlen)
1472 len = table->maxlen;
1473 if(copy_to_user(oldval, table->data, len))
1474 return -EFAULT;
1475 if(put_user(len, oldlenp))
1476 return -EFAULT;
1477 }
1478 }
1479 if (newval && newlen) {
1480 len = newlen;
1481 if (len > table->maxlen)
1482 len = table->maxlen;
1483 if(copy_from_user(table->data, newval, len))
1484 return -EFAULT;
1485 }
1486 } 1457 }
1487 return 0; 1458 return 0;
1488} 1459}
@@ -1499,7 +1470,9 @@ static void sysctl_set_parent(struct ctl_table *parent, struct ctl_table *table)
1499 1470
1500static __init int sysctl_init(void) 1471static __init int sysctl_init(void)
1501{ 1472{
1473 int err;
1502 sysctl_set_parent(NULL, root_table); 1474 sysctl_set_parent(NULL, root_table);
1475 err = sysctl_check_table(root_table);
1503 return 0; 1476 return 0;
1504} 1477}
1505 1478
@@ -1512,7 +1485,7 @@ core_initcall(sysctl_init);
1512 * Register a sysctl table hierarchy. @table should be a filled in ctl_table 1485 * Register a sysctl table hierarchy. @table should be a filled in ctl_table
1513 * array. An entry with a ctl_name of 0 terminates the table. 1486 * array. An entry with a ctl_name of 0 terminates the table.
1514 * 1487 *
1515 * The members of the &ctl_table structure are used as follows: 1488 * The members of the &struct ctl_table structure are used as follows:
1516 * 1489 *
1517 * ctl_name - This is the numeric sysctl value used by sysctl(2). The number 1490 * ctl_name - This is the numeric sysctl value used by sysctl(2). The number
1518 * must be unique within that level of sysctl 1491 * must be unique within that level of sysctl
@@ -1573,7 +1546,7 @@ core_initcall(sysctl_init);
1573 * This routine returns %NULL on a failure to register, and a pointer 1546 * This routine returns %NULL on a failure to register, and a pointer
1574 * to the table header on success. 1547 * to the table header on success.
1575 */ 1548 */
1576struct ctl_table_header *register_sysctl_table(ctl_table * table) 1549struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
1577{ 1550{
1578 struct ctl_table_header *tmp; 1551 struct ctl_table_header *tmp;
1579 tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL); 1552 tmp = kmalloc(sizeof(struct ctl_table_header), GFP_KERNEL);
@@ -1584,6 +1557,10 @@ struct ctl_table_header *register_sysctl_table(ctl_table * table)
1584 tmp->used = 0; 1557 tmp->used = 0;
1585 tmp->unregistering = NULL; 1558 tmp->unregistering = NULL;
1586 sysctl_set_parent(NULL, table); 1559 sysctl_set_parent(NULL, table);
1560 if (sysctl_check_table(tmp->ctl_table)) {
1561 kfree(tmp);
1562 return NULL;
1563 }
1587 spin_lock(&sysctl_lock); 1564 spin_lock(&sysctl_lock);
1588 list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry); 1565 list_add_tail(&tmp->ctl_entry, &root_table_header.ctl_entry);
1589 spin_unlock(&sysctl_lock); 1566 spin_unlock(&sysctl_lock);
@@ -1607,7 +1584,7 @@ void unregister_sysctl_table(struct ctl_table_header * header)
1607} 1584}
1608 1585
1609#else /* !CONFIG_SYSCTL */ 1586#else /* !CONFIG_SYSCTL */
1610struct ctl_table_header *register_sysctl_table(ctl_table * table) 1587struct ctl_table_header *register_sysctl_table(struct ctl_table * table)
1611{ 1588{
1612 return NULL; 1589 return NULL;
1613} 1590}
@@ -1700,7 +1677,7 @@ static int _proc_do_string(void* data, int maxlen, int write,
1700 * 1677 *
1701 * Returns 0 on success. 1678 * Returns 0 on success.
1702 */ 1679 */
1703int proc_dostring(ctl_table *table, int write, struct file *filp, 1680int proc_dostring(struct ctl_table *table, int write, struct file *filp,
1704 void __user *buffer, size_t *lenp, loff_t *ppos) 1681 void __user *buffer, size_t *lenp, loff_t *ppos)
1705{ 1682{
1706 return _proc_do_string(table->data, table->maxlen, write, filp, 1683 return _proc_do_string(table->data, table->maxlen, write, filp,
@@ -1727,7 +1704,7 @@ static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
1727 return 0; 1704 return 0;
1728} 1705}
1729 1706
1730static int __do_proc_dointvec(void *tbl_data, ctl_table *table, 1707static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table,
1731 int write, struct file *filp, void __user *buffer, 1708 int write, struct file *filp, void __user *buffer,
1732 size_t *lenp, loff_t *ppos, 1709 size_t *lenp, loff_t *ppos,
1733 int (*conv)(int *negp, unsigned long *lvalp, int *valp, 1710 int (*conv)(int *negp, unsigned long *lvalp, int *valp,
@@ -1837,7 +1814,7 @@ static int __do_proc_dointvec(void *tbl_data, ctl_table *table,
1837#undef TMPBUFLEN 1814#undef TMPBUFLEN
1838} 1815}
1839 1816
1840static int do_proc_dointvec(ctl_table *table, int write, struct file *filp, 1817static int do_proc_dointvec(struct ctl_table *table, int write, struct file *filp,
1841 void __user *buffer, size_t *lenp, loff_t *ppos, 1818 void __user *buffer, size_t *lenp, loff_t *ppos,
1842 int (*conv)(int *negp, unsigned long *lvalp, int *valp, 1819 int (*conv)(int *negp, unsigned long *lvalp, int *valp,
1843 int write, void *data), 1820 int write, void *data),
@@ -1861,7 +1838,7 @@ static int do_proc_dointvec(ctl_table *table, int write, struct file *filp,
1861 * 1838 *
1862 * Returns 0 on success. 1839 * Returns 0 on success.
1863 */ 1840 */
1864int proc_dointvec(ctl_table *table, int write, struct file *filp, 1841int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
1865 void __user *buffer, size_t *lenp, loff_t *ppos) 1842 void __user *buffer, size_t *lenp, loff_t *ppos)
1866{ 1843{
1867 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 1844 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -1897,11 +1874,12 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
1897 return 0; 1874 return 0;
1898} 1875}
1899 1876
1877#ifdef CONFIG_SECURITY_CAPABILITIES
1900/* 1878/*
1901 * init may raise the set. 1879 * init may raise the set.
1902 */ 1880 */
1903 1881
1904int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, 1882int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
1905 void __user *buffer, size_t *lenp, loff_t *ppos) 1883 void __user *buffer, size_t *lenp, loff_t *ppos)
1906{ 1884{
1907 int op; 1885 int op;
@@ -1914,11 +1892,12 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
1914 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 1892 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
1915 do_proc_dointvec_bset_conv,&op); 1893 do_proc_dointvec_bset_conv,&op);
1916} 1894}
1895#endif /* def CONFIG_SECURITY_CAPABILITIES */
1917 1896
1918/* 1897/*
1919 * Taint values can only be increased 1898 * Taint values can only be increased
1920 */ 1899 */
1921static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp, 1900static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp,
1922 void __user *buffer, size_t *lenp, loff_t *ppos) 1901 void __user *buffer, size_t *lenp, loff_t *ppos)
1923{ 1902{
1924 int op; 1903 int op;
@@ -1977,7 +1956,7 @@ static int do_proc_dointvec_minmax_conv(int *negp, unsigned long *lvalp,
1977 * 1956 *
1978 * Returns 0 on success. 1957 * Returns 0 on success.
1979 */ 1958 */
1980int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, 1959int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
1981 void __user *buffer, size_t *lenp, loff_t *ppos) 1960 void __user *buffer, size_t *lenp, loff_t *ppos)
1982{ 1961{
1983 struct do_proc_dointvec_minmax_conv_param param = { 1962 struct do_proc_dointvec_minmax_conv_param param = {
@@ -1988,7 +1967,7 @@ int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp,
1988 do_proc_dointvec_minmax_conv, &param); 1967 do_proc_dointvec_minmax_conv, &param);
1989} 1968}
1990 1969
1991static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write, 1970static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write,
1992 struct file *filp, 1971 struct file *filp,
1993 void __user *buffer, 1972 void __user *buffer,
1994 size_t *lenp, loff_t *ppos, 1973 size_t *lenp, loff_t *ppos,
@@ -2093,7 +2072,7 @@ static int __do_proc_doulongvec_minmax(void *data, ctl_table *table, int write,
2093#undef TMPBUFLEN 2072#undef TMPBUFLEN
2094} 2073}
2095 2074
2096static int do_proc_doulongvec_minmax(ctl_table *table, int write, 2075static int do_proc_doulongvec_minmax(struct ctl_table *table, int write,
2097 struct file *filp, 2076 struct file *filp,
2098 void __user *buffer, 2077 void __user *buffer,
2099 size_t *lenp, loff_t *ppos, 2078 size_t *lenp, loff_t *ppos,
@@ -2121,7 +2100,7 @@ static int do_proc_doulongvec_minmax(ctl_table *table, int write,
2121 * 2100 *
2122 * Returns 0 on success. 2101 * Returns 0 on success.
2123 */ 2102 */
2124int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, 2103int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
2125 void __user *buffer, size_t *lenp, loff_t *ppos) 2104 void __user *buffer, size_t *lenp, loff_t *ppos)
2126{ 2105{
2127 return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l); 2106 return do_proc_doulongvec_minmax(table, write, filp, buffer, lenp, ppos, 1l, 1l);
@@ -2145,7 +2124,7 @@ int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp,
2145 * 2124 *
2146 * Returns 0 on success. 2125 * Returns 0 on success.
2147 */ 2126 */
2148int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, 2127int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2149 struct file *filp, 2128 struct file *filp,
2150 void __user *buffer, 2129 void __user *buffer,
2151 size_t *lenp, loff_t *ppos) 2130 size_t *lenp, loff_t *ppos)
@@ -2238,7 +2217,7 @@ static int do_proc_dointvec_ms_jiffies_conv(int *negp, unsigned long *lvalp,
2238 * 2217 *
2239 * Returns 0 on success. 2218 * Returns 0 on success.
2240 */ 2219 */
2241int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, 2220int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
2242 void __user *buffer, size_t *lenp, loff_t *ppos) 2221 void __user *buffer, size_t *lenp, loff_t *ppos)
2243{ 2222{
2244 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2223 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2261,7 +2240,7 @@ int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp,
2261 * 2240 *
2262 * Returns 0 on success. 2241 * Returns 0 on success.
2263 */ 2242 */
2264int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, 2243int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
2265 void __user *buffer, size_t *lenp, loff_t *ppos) 2244 void __user *buffer, size_t *lenp, loff_t *ppos)
2266{ 2245{
2267 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2246 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
@@ -2285,14 +2264,14 @@ int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp,
2285 * 2264 *
2286 * Returns 0 on success. 2265 * Returns 0 on success.
2287 */ 2266 */
2288int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp, 2267int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
2289 void __user *buffer, size_t *lenp, loff_t *ppos) 2268 void __user *buffer, size_t *lenp, loff_t *ppos)
2290{ 2269{
2291 return do_proc_dointvec(table, write, filp, buffer, lenp, ppos, 2270 return do_proc_dointvec(table, write, filp, buffer, lenp, ppos,
2292 do_proc_dointvec_ms_jiffies_conv, NULL); 2271 do_proc_dointvec_ms_jiffies_conv, NULL);
2293} 2272}
2294 2273
2295static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, 2274static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
2296 void __user *buffer, size_t *lenp, loff_t *ppos) 2275 void __user *buffer, size_t *lenp, loff_t *ppos)
2297{ 2276{
2298 struct pid *new_pid; 2277 struct pid *new_pid;
@@ -2316,55 +2295,55 @@ static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
2316 2295
2317#else /* CONFIG_PROC_FS */ 2296#else /* CONFIG_PROC_FS */
2318 2297
2319int proc_dostring(ctl_table *table, int write, struct file *filp, 2298int proc_dostring(struct ctl_table *table, int write, struct file *filp,
2320 void __user *buffer, size_t *lenp, loff_t *ppos) 2299 void __user *buffer, size_t *lenp, loff_t *ppos)
2321{ 2300{
2322 return -ENOSYS; 2301 return -ENOSYS;
2323} 2302}
2324 2303
2325int proc_dointvec(ctl_table *table, int write, struct file *filp, 2304int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
2326 void __user *buffer, size_t *lenp, loff_t *ppos) 2305 void __user *buffer, size_t *lenp, loff_t *ppos)
2327{ 2306{
2328 return -ENOSYS; 2307 return -ENOSYS;
2329} 2308}
2330 2309
2331int proc_dointvec_bset(ctl_table *table, int write, struct file *filp, 2310int proc_dointvec_bset(struct ctl_table *table, int write, struct file *filp,
2332 void __user *buffer, size_t *lenp, loff_t *ppos) 2311 void __user *buffer, size_t *lenp, loff_t *ppos)
2333{ 2312{
2334 return -ENOSYS; 2313 return -ENOSYS;
2335} 2314}
2336 2315
2337int proc_dointvec_minmax(ctl_table *table, int write, struct file *filp, 2316int proc_dointvec_minmax(struct ctl_table *table, int write, struct file *filp,
2338 void __user *buffer, size_t *lenp, loff_t *ppos) 2317 void __user *buffer, size_t *lenp, loff_t *ppos)
2339{ 2318{
2340 return -ENOSYS; 2319 return -ENOSYS;
2341} 2320}
2342 2321
2343int proc_dointvec_jiffies(ctl_table *table, int write, struct file *filp, 2322int proc_dointvec_jiffies(struct ctl_table *table, int write, struct file *filp,
2344 void __user *buffer, size_t *lenp, loff_t *ppos) 2323 void __user *buffer, size_t *lenp, loff_t *ppos)
2345{ 2324{
2346 return -ENOSYS; 2325 return -ENOSYS;
2347} 2326}
2348 2327
2349int proc_dointvec_userhz_jiffies(ctl_table *table, int write, struct file *filp, 2328int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, struct file *filp,
2350 void __user *buffer, size_t *lenp, loff_t *ppos) 2329 void __user *buffer, size_t *lenp, loff_t *ppos)
2351{ 2330{
2352 return -ENOSYS; 2331 return -ENOSYS;
2353} 2332}
2354 2333
2355int proc_dointvec_ms_jiffies(ctl_table *table, int write, struct file *filp, 2334int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, struct file *filp,
2356 void __user *buffer, size_t *lenp, loff_t *ppos) 2335 void __user *buffer, size_t *lenp, loff_t *ppos)
2357{ 2336{
2358 return -ENOSYS; 2337 return -ENOSYS;
2359} 2338}
2360 2339
2361int proc_doulongvec_minmax(ctl_table *table, int write, struct file *filp, 2340int proc_doulongvec_minmax(struct ctl_table *table, int write, struct file *filp,
2362 void __user *buffer, size_t *lenp, loff_t *ppos) 2341 void __user *buffer, size_t *lenp, loff_t *ppos)
2363{ 2342{
2364 return -ENOSYS; 2343 return -ENOSYS;
2365} 2344}
2366 2345
2367int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write, 2346int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2368 struct file *filp, 2347 struct file *filp,
2369 void __user *buffer, 2348 void __user *buffer,
2370 size_t *lenp, loff_t *ppos) 2349 size_t *lenp, loff_t *ppos)
@@ -2381,8 +2360,42 @@ int proc_doulongvec_ms_jiffies_minmax(ctl_table *table, int write,
2381 * General sysctl support routines 2360 * General sysctl support routines
2382 */ 2361 */
2383 2362
2363/* The generic sysctl data routine (used if no strategy routine supplied) */
2364int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
2365 void __user *oldval, size_t __user *oldlenp,
2366 void __user *newval, size_t newlen)
2367{
2368 size_t len;
2369
2370 /* Get out of I don't have a variable */
2371 if (!table->data || !table->maxlen)
2372 return -ENOTDIR;
2373
2374 if (oldval && oldlenp) {
2375 if (get_user(len, oldlenp))
2376 return -EFAULT;
2377 if (len) {
2378 if (len > table->maxlen)
2379 len = table->maxlen;
2380 if (copy_to_user(oldval, table->data, len))
2381 return -EFAULT;
2382 if (put_user(len, oldlenp))
2383 return -EFAULT;
2384 }
2385 }
2386
2387 if (newval && newlen) {
2388 if (newlen > table->maxlen)
2389 newlen = table->maxlen;
2390
2391 if (copy_from_user(table->data, newval, newlen))
2392 return -EFAULT;
2393 }
2394 return 1;
2395}
2396
2384/* The generic string strategy routine: */ 2397/* The generic string strategy routine: */
2385int sysctl_string(ctl_table *table, int __user *name, int nlen, 2398int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
2386 void __user *oldval, size_t __user *oldlenp, 2399 void __user *oldval, size_t __user *oldlenp,
2387 void __user *newval, size_t newlen) 2400 void __user *newval, size_t newlen)
2388{ 2401{
@@ -2428,7 +2441,7 @@ int sysctl_string(ctl_table *table, int __user *name, int nlen,
2428 * are between the minimum and maximum values given in the arrays 2441 * are between the minimum and maximum values given in the arrays
2429 * table->extra1 and table->extra2, respectively. 2442 * table->extra1 and table->extra2, respectively.
2430 */ 2443 */
2431int sysctl_intvec(ctl_table *table, int __user *name, int nlen, 2444int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
2432 void __user *oldval, size_t __user *oldlenp, 2445 void __user *oldval, size_t __user *oldlenp,
2433 void __user *newval, size_t newlen) 2446 void __user *newval, size_t newlen)
2434{ 2447{
@@ -2464,7 +2477,7 @@ int sysctl_intvec(ctl_table *table, int __user *name, int nlen,
2464} 2477}
2465 2478
2466/* Strategy function to convert jiffies to seconds */ 2479/* Strategy function to convert jiffies to seconds */
2467int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, 2480int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
2468 void __user *oldval, size_t __user *oldlenp, 2481 void __user *oldval, size_t __user *oldlenp,
2469 void __user *newval, size_t newlen) 2482 void __user *newval, size_t newlen)
2470{ 2483{
@@ -2498,7 +2511,7 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
2498} 2511}
2499 2512
2500/* Strategy function to convert jiffies to seconds */ 2513/* Strategy function to convert jiffies to seconds */
2501int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, 2514int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
2502 void __user *oldval, size_t __user *oldlenp, 2515 void __user *oldval, size_t __user *oldlenp,
2503 void __user *newval, size_t newlen) 2516 void __user *newval, size_t newlen)
2504{ 2517{
@@ -2538,59 +2551,50 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2538 2551
2539asmlinkage long sys_sysctl(struct __sysctl_args __user *args) 2552asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
2540{ 2553{
2541 static int msg_count;
2542 struct __sysctl_args tmp; 2554 struct __sysctl_args tmp;
2543 int name[CTL_MAXNAME]; 2555 int error;
2544 int i;
2545 2556
2546 /* Read in the sysctl name for better debug message logging */
2547 if (copy_from_user(&tmp, args, sizeof(tmp))) 2557 if (copy_from_user(&tmp, args, sizeof(tmp)))
2548 return -EFAULT; 2558 return -EFAULT;
2549 if (tmp.nlen <= 0 || tmp.nlen >= CTL_MAXNAME)
2550 return -ENOTDIR;
2551 for (i = 0; i < tmp.nlen; i++)
2552 if (get_user(name[i], tmp.name + i))
2553 return -EFAULT;
2554 2559
2555 /* Ignore accesses to kernel.version */ 2560 error = deprecated_sysctl_warning(&tmp);
2556 if ((tmp.nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
2557 goto out;
2558 2561
2559 if (msg_count < 5) { 2562 /* If no error reading the parameters then just -ENOSYS ... */
2560 msg_count++; 2563 if (!error)
2561 printk(KERN_INFO 2564 error = -ENOSYS;
2562 "warning: process `%s' used the removed sysctl " 2565
2563 "system call with ", current->comm); 2566 return error;
2564 for (i = 0; i < tmp.nlen; i++) 2567}
2565 printk("%d.", name[i]); 2568
2566 printk("\n"); 2569int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
2567 } 2570 void __user *oldval, size_t __user *oldlenp,
2568out: 2571 void __user *newval, size_t newlen)
2572{
2569 return -ENOSYS; 2573 return -ENOSYS;
2570} 2574}
2571 2575
2572int sysctl_string(ctl_table *table, int __user *name, int nlen, 2576int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
2573 void __user *oldval, size_t __user *oldlenp, 2577 void __user *oldval, size_t __user *oldlenp,
2574 void __user *newval, size_t newlen) 2578 void __user *newval, size_t newlen)
2575{ 2579{
2576 return -ENOSYS; 2580 return -ENOSYS;
2577} 2581}
2578 2582
2579int sysctl_intvec(ctl_table *table, int __user *name, int nlen, 2583int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
2580 void __user *oldval, size_t __user *oldlenp, 2584 void __user *oldval, size_t __user *oldlenp,
2581 void __user *newval, size_t newlen) 2585 void __user *newval, size_t newlen)
2582{ 2586{
2583 return -ENOSYS; 2587 return -ENOSYS;
2584} 2588}
2585 2589
2586int sysctl_jiffies(ctl_table *table, int __user *name, int nlen, 2590int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
2587 void __user *oldval, size_t __user *oldlenp, 2591 void __user *oldval, size_t __user *oldlenp,
2588 void __user *newval, size_t newlen) 2592 void __user *newval, size_t newlen)
2589{ 2593{
2590 return -ENOSYS; 2594 return -ENOSYS;
2591} 2595}
2592 2596
2593int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen, 2597int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen,
2594 void __user *oldval, size_t __user *oldlenp, 2598 void __user *oldval, size_t __user *oldlenp,
2595 void __user *newval, size_t newlen) 2599 void __user *newval, size_t newlen)
2596{ 2600{
@@ -2599,6 +2603,33 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2599 2603
2600#endif /* CONFIG_SYSCTL_SYSCALL */ 2604#endif /* CONFIG_SYSCTL_SYSCALL */
2601 2605
2606static int deprecated_sysctl_warning(struct __sysctl_args *args)
2607{
2608 static int msg_count;
2609 int name[CTL_MAXNAME];
2610 int i;
2611
2612 /* Read in the sysctl name for better debug message logging */
2613 for (i = 0; i < args->nlen; i++)
2614 if (get_user(name[i], args->name + i))
2615 return -EFAULT;
2616
2617 /* Ignore accesses to kernel.version */
2618 if ((args->nlen == 2) && (name[0] == CTL_KERN) && (name[1] == KERN_VERSION))
2619 return 0;
2620
2621 if (msg_count < 5) {
2622 msg_count++;
2623 printk(KERN_INFO
2624 "warning: process `%s' used the deprecated sysctl "
2625 "system call with ", current->comm);
2626 for (i = 0; i < args->nlen; i++)
2627 printk("%d.", name[i]);
2628 printk("\n");
2629 }
2630 return 0;
2631}
2632
2602/* 2633/*
2603 * No sense putting this after each symbol definition, twice, 2634 * No sense putting this after each symbol definition, twice,
2604 * exception granted :-) 2635 * exception granted :-)
@@ -2616,4 +2647,5 @@ EXPORT_SYMBOL(sysctl_intvec);
2616EXPORT_SYMBOL(sysctl_jiffies); 2647EXPORT_SYMBOL(sysctl_jiffies);
2617EXPORT_SYMBOL(sysctl_ms_jiffies); 2648EXPORT_SYMBOL(sysctl_ms_jiffies);
2618EXPORT_SYMBOL(sysctl_string); 2649EXPORT_SYMBOL(sysctl_string);
2650EXPORT_SYMBOL(sysctl_data);
2619EXPORT_SYMBOL(unregister_sysctl_table); 2651EXPORT_SYMBOL(unregister_sysctl_table);
diff --git a/kernel/sysctl_check.c b/kernel/sysctl_check.c
new file mode 100644
index 000000000000..3c9ef5a7d575
--- /dev/null
+++ b/kernel/sysctl_check.c
@@ -0,0 +1,1588 @@
1#include <linux/stat.h>
2#include <linux/sysctl.h>
3#include "../arch/s390/appldata/appldata.h"
4#include "../fs/xfs/linux-2.6/xfs_sysctl.h"
5#include <linux/sunrpc/debug.h>
6#include <linux/string.h>
7#include <net/ip_vs.h>
8
9struct trans_ctl_table {
10 int ctl_name;
11 const char *procname;
12 struct trans_ctl_table *child;
13};
14
15static struct trans_ctl_table trans_random_table[] = {
16 { RANDOM_POOLSIZE, "poolsize" },
17 { RANDOM_ENTROPY_COUNT, "entropy_avail" },
18 { RANDOM_READ_THRESH, "read_wakeup_threshold" },
19 { RANDOM_WRITE_THRESH, "write_wakeup_threshold" },
20 { RANDOM_BOOT_ID, "boot_id" },
21 { RANDOM_UUID, "uuid" },
22 {}
23};
24
25static struct trans_ctl_table trans_pty_table[] = {
26 { PTY_MAX, "max" },
27 { PTY_NR, "nr" },
28 {}
29};
30
31static struct trans_ctl_table trans_kern_table[] = {
32 { KERN_OSTYPE, "ostype" },
33 { KERN_OSRELEASE, "osrelease" },
34 /* KERN_OSREV not used */
35 { KERN_VERSION, "version" },
36 /* KERN_SECUREMASK not used */
37 /* KERN_PROF not used */
38 { KERN_NODENAME, "hostname" },
39 { KERN_DOMAINNAME, "domainname" },
40
41#ifdef CONFIG_SECURITY_CAPABILITIES
42 { KERN_CAP_BSET, "cap-bound" },
43#endif /* def CONFIG_SECURITY_CAPABILITIES */
44
45 { KERN_PANIC, "panic" },
46 { KERN_REALROOTDEV, "real-root-dev" },
47
48 { KERN_SPARC_REBOOT, "reboot-cmd" },
49 { KERN_CTLALTDEL, "ctrl-alt-del" },
50 { KERN_PRINTK, "printk" },
51
52 /* KERN_NAMETRANS not used */
53 /* KERN_PPC_HTABRECLAIM not used */
54 /* KERN_PPC_ZEROPAGED not used */
55 { KERN_PPC_POWERSAVE_NAP, "powersave-nap" },
56
57 { KERN_MODPROBE, "modprobe" },
58 { KERN_SG_BIG_BUFF, "sg-big-buff" },
59 { KERN_ACCT, "acct" },
60 { KERN_PPC_L2CR, "l2cr" },
61
62 /* KERN_RTSIGNR not used */
63 /* KERN_RTSIGMAX not used */
64
65 { KERN_SHMMAX, "shmmax" },
66 { KERN_MSGMAX, "msgmax" },
67 { KERN_MSGMNB, "msgmnb" },
68 /* KERN_MSGPOOL not used*/
69 { KERN_SYSRQ, "sysrq" },
70 { KERN_MAX_THREADS, "threads-max" },
71 { KERN_RANDOM, "random", trans_random_table },
72 { KERN_SHMALL, "shmall" },
73 { KERN_MSGMNI, "msgmni" },
74 { KERN_SEM, "sem" },
75 { KERN_SPARC_STOP_A, "stop-a" },
76 { KERN_SHMMNI, "shmmni" },
77
78 { KERN_OVERFLOWUID, "overflowuid" },
79 { KERN_OVERFLOWGID, "overflowgid" },
80
81 { KERN_HOTPLUG, "hotplug", },
82 { KERN_IEEE_EMULATION_WARNINGS, "ieee_emulation_warnings" },
83
84 { KERN_S390_USER_DEBUG_LOGGING, "userprocess_debug" },
85 { KERN_CORE_USES_PID, "core_uses_pid" },
86 { KERN_TAINTED, "tainted" },
87 { KERN_CADPID, "cad_pid" },
88 { KERN_PIDMAX, "pid_max" },
89 { KERN_CORE_PATTERN, "core_pattern" },
90 { KERN_PANIC_ON_OOPS, "panic_on_oops" },
91 { KERN_HPPA_PWRSW, "soft-power" },
92 { KERN_HPPA_UNALIGNED, "unaligned-trap" },
93
94 { KERN_PRINTK_RATELIMIT, "printk_ratelimit" },
95 { KERN_PRINTK_RATELIMIT_BURST, "printk_ratelimit_burst" },
96
97 { KERN_PTY, "pty", trans_pty_table },
98 { KERN_NGROUPS_MAX, "ngroups_max" },
99 { KERN_SPARC_SCONS_PWROFF, "scons_poweroff" },
100 { KERN_HZ_TIMER, "hz_timer" },
101 { KERN_UNKNOWN_NMI_PANIC, "unknown_nmi_panic" },
102 { KERN_BOOTLOADER_TYPE, "bootloader_type" },
103 { KERN_RANDOMIZE, "randomize_va_space" },
104
105 { KERN_SPIN_RETRY, "spin_retry" },
106 { KERN_ACPI_VIDEO_FLAGS, "acpi_video_flags" },
107 { KERN_IA64_UNALIGNED, "ignore-unaligned-usertrap" },
108 { KERN_COMPAT_LOG, "compat-log" },
109 { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
110 { KERN_NMI_WATCHDOG, "nmi_watchdog" },
111 { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
112 {}
113};
114
115static struct trans_ctl_table trans_vm_table[] = {
116 { VM_OVERCOMMIT_MEMORY, "overcommit_memory" },
117 { VM_PAGE_CLUSTER, "page-cluster" },
118 { VM_DIRTY_BACKGROUND, "dirty_background_ratio" },
119 { VM_DIRTY_RATIO, "dirty_ratio" },
120 { VM_DIRTY_WB_CS, "dirty_writeback_centisecs" },
121 { VM_DIRTY_EXPIRE_CS, "dirty_expire_centisecs" },
122 { VM_NR_PDFLUSH_THREADS, "nr_pdflush_threads" },
123 { VM_OVERCOMMIT_RATIO, "overcommit_ratio" },
124 /* VM_PAGEBUF unused */
125 { VM_HUGETLB_PAGES, "nr_hugepages" },
126 { VM_SWAPPINESS, "swappiness" },
127 { VM_LOWMEM_RESERVE_RATIO, "lowmem_reserve_ratio" },
128 { VM_MIN_FREE_KBYTES, "min_free_kbytes" },
129 { VM_MAX_MAP_COUNT, "max_map_count" },
130 { VM_LAPTOP_MODE, "laptop_mode" },
131 { VM_BLOCK_DUMP, "block_dump" },
132 { VM_HUGETLB_GROUP, "hugetlb_shm_group" },
133 { VM_VFS_CACHE_PRESSURE, "vfs_cache_pressure" },
134 { VM_LEGACY_VA_LAYOUT, "legacy_va_layout" },
135 /* VM_SWAP_TOKEN_TIMEOUT unused */
136 { VM_DROP_PAGECACHE, "drop_caches" },
137 { VM_PERCPU_PAGELIST_FRACTION, "percpu_pagelist_fraction" },
138 { VM_ZONE_RECLAIM_MODE, "zone_reclaim_mode" },
139 { VM_MIN_UNMAPPED, "min_unmapped_ratio" },
140 { VM_PANIC_ON_OOM, "panic_on_oom" },
141 { VM_VDSO_ENABLED, "vdso_enabled" },
142 { VM_MIN_SLAB, "min_slab_ratio" },
143 { VM_CMM_PAGES, "cmm_pages" },
144 { VM_CMM_TIMED_PAGES, "cmm_timed_pages" },
145 { VM_CMM_TIMEOUT, "cmm_timeout" },
146
147 {}
148};
149
150static struct trans_ctl_table trans_net_core_table[] = {
151 { NET_CORE_WMEM_MAX, "wmem_max" },
152 { NET_CORE_RMEM_MAX, "rmem_max" },
153 { NET_CORE_WMEM_DEFAULT, "wmem_default" },
154 { NET_CORE_RMEM_DEFAULT, "rmem_default" },
155 /* NET_CORE_DESTROY_DELAY unused */
156 { NET_CORE_MAX_BACKLOG, "netdev_max_backlog" },
157 /* NET_CORE_FASTROUTE unused */
158 { NET_CORE_MSG_COST, "message_cost" },
159 { NET_CORE_MSG_BURST, "message_burst" },
160 { NET_CORE_OPTMEM_MAX, "optmem_max" },
161 /* NET_CORE_HOT_LIST_LENGTH unused */
162 /* NET_CORE_DIVERT_VERSION unused */
163 /* NET_CORE_NO_CONG_THRESH unused */
164 /* NET_CORE_NO_CONG unused */
165 /* NET_CORE_LO_CONG unused */
166 /* NET_CORE_MOD_CONG unused */
167 { NET_CORE_DEV_WEIGHT, "dev_weight" },
168 { NET_CORE_SOMAXCONN, "somaxconn" },
169 { NET_CORE_BUDGET, "netdev_budget" },
170 { NET_CORE_AEVENT_ETIME, "xfrm_aevent_etime" },
171 { NET_CORE_AEVENT_RSEQTH, "xfrm_aevent_rseqth" },
172 { NET_CORE_WARNINGS, "warnings" },
173 {},
174};
175
176static struct trans_ctl_table trans_net_unix_table[] = {
177 /* NET_UNIX_DESTROY_DELAY unused */
178 /* NET_UNIX_DELETE_DELAY unused */
179 { NET_UNIX_MAX_DGRAM_QLEN, "max_dgram_qlen" },
180 {}
181};
182
183static struct trans_ctl_table trans_net_ipv4_route_table[] = {
184 { NET_IPV4_ROUTE_FLUSH, "flush" },
185 { NET_IPV4_ROUTE_MIN_DELAY, "min_delay" },
186 { NET_IPV4_ROUTE_MAX_DELAY, "max_delay" },
187 { NET_IPV4_ROUTE_GC_THRESH, "gc_thresh" },
188 { NET_IPV4_ROUTE_MAX_SIZE, "max_size" },
189 { NET_IPV4_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
190 { NET_IPV4_ROUTE_GC_TIMEOUT, "gc_timeout" },
191 { NET_IPV4_ROUTE_GC_INTERVAL, "gc_interval" },
192 { NET_IPV4_ROUTE_REDIRECT_LOAD, "redirect_load" },
193 { NET_IPV4_ROUTE_REDIRECT_NUMBER, "redirect_number" },
194 { NET_IPV4_ROUTE_REDIRECT_SILENCE, "redirect_silence" },
195 { NET_IPV4_ROUTE_ERROR_COST, "error_cost" },
196 { NET_IPV4_ROUTE_ERROR_BURST, "error_burst" },
197 { NET_IPV4_ROUTE_GC_ELASTICITY, "gc_elasticity" },
198 { NET_IPV4_ROUTE_MTU_EXPIRES, "mtu_expires" },
199 { NET_IPV4_ROUTE_MIN_PMTU, "min_pmtu" },
200 { NET_IPV4_ROUTE_MIN_ADVMSS, "min_adv_mss" },
201 { NET_IPV4_ROUTE_SECRET_INTERVAL, "secret_interval" },
202 { NET_IPV4_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
203 {}
204};
205
206static struct trans_ctl_table trans_net_ipv4_conf_vars_table[] = {
207 { NET_IPV4_CONF_FORWARDING, "forwarding" },
208 { NET_IPV4_CONF_MC_FORWARDING, "mc_forwarding" },
209
210 { NET_IPV4_CONF_PROXY_ARP, "proxy_arp" },
211 { NET_IPV4_CONF_ACCEPT_REDIRECTS, "accept_redirects" },
212 { NET_IPV4_CONF_SECURE_REDIRECTS, "secure_redirects" },
213 { NET_IPV4_CONF_SEND_REDIRECTS, "send_redirects" },
214 { NET_IPV4_CONF_SHARED_MEDIA, "shared_media" },
215 { NET_IPV4_CONF_RP_FILTER, "rp_filter" },
216 { NET_IPV4_CONF_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
217 { NET_IPV4_CONF_BOOTP_RELAY, "bootp_relay" },
218 { NET_IPV4_CONF_LOG_MARTIANS, "log_martians" },
219 { NET_IPV4_CONF_TAG, "tag" },
220 { NET_IPV4_CONF_ARPFILTER, "arp_filter" },
221 { NET_IPV4_CONF_MEDIUM_ID, "medium_id" },
222 { NET_IPV4_CONF_NOXFRM, "disable_xfrm" },
223 { NET_IPV4_CONF_NOPOLICY, "disable_policy" },
224 { NET_IPV4_CONF_FORCE_IGMP_VERSION, "force_igmp_version" },
225
226 { NET_IPV4_CONF_ARP_ANNOUNCE, "arp_announce" },
227 { NET_IPV4_CONF_ARP_IGNORE, "arp_ignore" },
228 { NET_IPV4_CONF_PROMOTE_SECONDARIES, "promote_secondaries" },
229 { NET_IPV4_CONF_ARP_ACCEPT, "arp_accept" },
230 {}
231};
232
233static struct trans_ctl_table trans_net_ipv4_conf_table[] = {
234 { NET_PROTO_CONF_ALL, "all", trans_net_ipv4_conf_vars_table },
235 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv4_conf_vars_table },
236 { 0, NULL, trans_net_ipv4_conf_vars_table },
237 {}
238};
239
240
241static struct trans_ctl_table trans_net_ipv4_vs_table[] = {
242 { NET_IPV4_VS_AMEMTHRESH, "amemthresh" },
243 { NET_IPV4_VS_DEBUG_LEVEL, "debug_level" },
244 { NET_IPV4_VS_AMDROPRATE, "am_droprate" },
245 { NET_IPV4_VS_DROP_ENTRY, "drop_entry" },
246 { NET_IPV4_VS_DROP_PACKET, "drop_packet" },
247 { NET_IPV4_VS_SECURE_TCP, "secure_tcp" },
248 { NET_IPV4_VS_TO_ES, "timeout_established" },
249 { NET_IPV4_VS_TO_SS, "timeout_synsent" },
250 { NET_IPV4_VS_TO_SR, "timeout_synrecv" },
251 { NET_IPV4_VS_TO_FW, "timeout_finwait" },
252 { NET_IPV4_VS_TO_TW, "timeout_timewait" },
253 { NET_IPV4_VS_TO_CL, "timeout_close" },
254 { NET_IPV4_VS_TO_CW, "timeout_closewait" },
255 { NET_IPV4_VS_TO_LA, "timeout_lastack" },
256 { NET_IPV4_VS_TO_LI, "timeout_listen" },
257 { NET_IPV4_VS_TO_SA, "timeout_synack" },
258 { NET_IPV4_VS_TO_UDP, "timeout_udp" },
259 { NET_IPV4_VS_TO_ICMP, "timeout_icmp" },
260 { NET_IPV4_VS_CACHE_BYPASS, "cache_bypass" },
261 { NET_IPV4_VS_EXPIRE_NODEST_CONN, "expire_nodest_conn" },
262 { NET_IPV4_VS_EXPIRE_QUIESCENT_TEMPLATE, "expire_quiescent_template" },
263 { NET_IPV4_VS_SYNC_THRESHOLD, "sync_threshold" },
264 { NET_IPV4_VS_NAT_ICMP_SEND, "nat_icmp_send" },
265 { NET_IPV4_VS_LBLC_EXPIRE, "lblc_expiration" },
266 { NET_IPV4_VS_LBLCR_EXPIRE, "lblcr_expiration" },
267 {}
268};
269
270static struct trans_ctl_table trans_net_neigh_vars_table[] = {
271 { NET_NEIGH_MCAST_SOLICIT, "mcast_solicit" },
272 { NET_NEIGH_UCAST_SOLICIT, "ucast_solicit" },
273 { NET_NEIGH_APP_SOLICIT, "app_solicit" },
274 { NET_NEIGH_RETRANS_TIME, "retrans_time" },
275 { NET_NEIGH_REACHABLE_TIME, "base_reachable_time" },
276 { NET_NEIGH_DELAY_PROBE_TIME, "delay_first_probe_time" },
277 { NET_NEIGH_GC_STALE_TIME, "gc_stale_time" },
278 { NET_NEIGH_UNRES_QLEN, "unres_qlen" },
279 { NET_NEIGH_PROXY_QLEN, "proxy_qlen" },
280 { NET_NEIGH_ANYCAST_DELAY, "anycast_delay" },
281 { NET_NEIGH_PROXY_DELAY, "proxy_delay" },
282 { NET_NEIGH_LOCKTIME, "locktime" },
283 { NET_NEIGH_GC_INTERVAL, "gc_interval" },
284 { NET_NEIGH_GC_THRESH1, "gc_thresh1" },
285 { NET_NEIGH_GC_THRESH2, "gc_thresh2" },
286 { NET_NEIGH_GC_THRESH3, "gc_thresh3" },
287 { NET_NEIGH_RETRANS_TIME_MS, "retrans_time_ms" },
288 { NET_NEIGH_REACHABLE_TIME_MS, "base_reachable_time_ms" },
289 {}
290};
291
292static struct trans_ctl_table trans_net_neigh_table[] = {
293 { NET_PROTO_CONF_DEFAULT, "default", trans_net_neigh_vars_table },
294 { 0, NULL, trans_net_neigh_vars_table },
295 {}
296};
297
298static struct trans_ctl_table trans_net_ipv4_netfilter_table[] = {
299 { NET_IPV4_NF_CONNTRACK_MAX, "ip_conntrack_max" },
300
301 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "ip_conntrack_tcp_timeout_syn_sent" },
302 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "ip_conntrack_tcp_timeout_syn_recv" },
303 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "ip_conntrack_tcp_timeout_established" },
304 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "ip_conntrack_tcp_timeout_fin_wait" },
305 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "ip_conntrack_tcp_timeout_close_wait" },
306 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "ip_conntrack_tcp_timeout_last_ack" },
307 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "ip_conntrack_tcp_timeout_time_wait" },
308 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "ip_conntrack_tcp_timeout_close" },
309
310 { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT, "ip_conntrack_udp_timeout" },
311 { NET_IPV4_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "ip_conntrack_udp_timeout_stream" },
312 { NET_IPV4_NF_CONNTRACK_ICMP_TIMEOUT, "ip_conntrack_icmp_timeout" },
313 { NET_IPV4_NF_CONNTRACK_GENERIC_TIMEOUT, "ip_conntrack_generic_timeout" },
314
315 { NET_IPV4_NF_CONNTRACK_BUCKETS, "ip_conntrack_buckets" },
316 { NET_IPV4_NF_CONNTRACK_LOG_INVALID, "ip_conntrack_log_invalid" },
317 { NET_IPV4_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "ip_conntrack_tcp_timeout_max_retrans" },
318 { NET_IPV4_NF_CONNTRACK_TCP_LOOSE, "ip_conntrack_tcp_loose" },
319 { NET_IPV4_NF_CONNTRACK_TCP_BE_LIBERAL, "ip_conntrack_tcp_be_liberal" },
320 { NET_IPV4_NF_CONNTRACK_TCP_MAX_RETRANS, "ip_conntrack_tcp_max_retrans" },
321
322 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "ip_conntrack_sctp_timeout_closed" },
323 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "ip_conntrack_sctp_timeout_cookie_wait" },
324 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "ip_conntrack_sctp_timeout_cookie_echoed" },
325 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "ip_conntrack_sctp_timeout_established" },
326 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "ip_conntrack_sctp_timeout_shutdown_sent" },
327 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "ip_conntrack_sctp_timeout_shutdown_recd" },
328 { NET_IPV4_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "ip_conntrack_sctp_timeout_shutdown_ack_sent" },
329
330 { NET_IPV4_NF_CONNTRACK_COUNT, "ip_conntrack_count" },
331 { NET_IPV4_NF_CONNTRACK_CHECKSUM, "ip_conntrack_checksum" },
332 {}
333};
334
335static struct trans_ctl_table trans_net_ipv4_table[] = {
336 { NET_IPV4_FORWARD, "ip_forward" },
337 { NET_IPV4_DYNADDR, "ip_dynaddr" },
338
339 { NET_IPV4_CONF, "conf", trans_net_ipv4_conf_table },
340 { NET_IPV4_NEIGH, "neigh", trans_net_neigh_table },
341 { NET_IPV4_ROUTE, "route", trans_net_ipv4_route_table },
342 /* NET_IPV4_FIB_HASH unused */
343 { NET_IPV4_NETFILTER, "netfilter", trans_net_ipv4_netfilter_table },
344 { NET_IPV4_VS, "vs", trans_net_ipv4_vs_table },
345
346 { NET_IPV4_TCP_TIMESTAMPS, "tcp_timestamps" },
347 { NET_IPV4_TCP_WINDOW_SCALING, "tcp_window_scaling" },
348 { NET_IPV4_TCP_SACK, "tcp_sack" },
349 { NET_IPV4_TCP_RETRANS_COLLAPSE, "tcp_retrans_collapse" },
350 { NET_IPV4_DEFAULT_TTL, "ip_default_ttl" },
351 /* NET_IPV4_AUTOCONFIG unused */
352 { NET_IPV4_NO_PMTU_DISC, "ip_no_pmtu_disc" },
353 { NET_IPV4_TCP_SYN_RETRIES, "tcp_syn_retries" },
354 { NET_IPV4_IPFRAG_HIGH_THRESH, "ipfrag_high_thresh" },
355 { NET_IPV4_IPFRAG_LOW_THRESH, "ipfrag_low_thresh" },
356 { NET_IPV4_IPFRAG_TIME, "ipfrag_time" },
357 /* NET_IPV4_TCP_MAX_KA_PROBES unused */
358 { NET_IPV4_TCP_KEEPALIVE_TIME, "tcp_keepalive_time" },
359 { NET_IPV4_TCP_KEEPALIVE_PROBES, "tcp_keepalive_probes" },
360 { NET_IPV4_TCP_RETRIES1, "tcp_retries1" },
361 { NET_IPV4_TCP_RETRIES2, "tcp_retries2" },
362 { NET_IPV4_TCP_FIN_TIMEOUT, "tcp_fin_timeout" },
363 /* NET_IPV4_IP_MASQ_DEBUG unused */
364 { NET_TCP_SYNCOOKIES, "tcp_syncookies" },
365 { NET_TCP_STDURG, "tcp_stdurg" },
366 { NET_TCP_RFC1337, "tcp_rfc1337" },
367 /* NET_TCP_SYN_TAILDROP unused */
368 { NET_TCP_MAX_SYN_BACKLOG, "tcp_max_syn_backlog" },
369 { NET_IPV4_LOCAL_PORT_RANGE, "ip_local_port_range" },
370 { NET_IPV4_ICMP_ECHO_IGNORE_ALL, "icmp_echo_ignore_all" },
371 { NET_IPV4_ICMP_ECHO_IGNORE_BROADCASTS, "icmp_echo_ignore_broadcasts" },
372 /* NET_IPV4_ICMP_SOURCEQUENCH_RATE unused */
373 /* NET_IPV4_ICMP_DESTUNREACH_RATE unused */
374 /* NET_IPV4_ICMP_TIMEEXCEED_RATE unused */
375 /* NET_IPV4_ICMP_PARAMPROB_RATE unused */
376 /* NET_IPV4_ICMP_ECHOREPLY_RATE unused */
377 { NET_IPV4_ICMP_IGNORE_BOGUS_ERROR_RESPONSES, "icmp_ignore_bogus_error_responses" },
378 { NET_IPV4_IGMP_MAX_MEMBERSHIPS, "igmp_max_memberships" },
379 { NET_TCP_TW_RECYCLE, "tcp_tw_recycle" },
380 /* NET_IPV4_ALWAYS_DEFRAG unused */
381 { NET_IPV4_TCP_KEEPALIVE_INTVL, "tcp_keepalive_intvl" },
382 { NET_IPV4_INET_PEER_THRESHOLD, "inet_peer_threshold" },
383 { NET_IPV4_INET_PEER_MINTTL, "inet_peer_minttl" },
384 { NET_IPV4_INET_PEER_MAXTTL, "inet_peer_maxttl" },
385 { NET_IPV4_INET_PEER_GC_MINTIME, "inet_peer_gc_mintime" },
386 { NET_IPV4_INET_PEER_GC_MAXTIME, "inet_peer_gc_maxtime" },
387 { NET_TCP_ORPHAN_RETRIES, "tcp_orphan_retries" },
388 { NET_TCP_ABORT_ON_OVERFLOW, "tcp_abort_on_overflow" },
389 { NET_TCP_SYNACK_RETRIES, "tcp_synack_retries" },
390 { NET_TCP_MAX_ORPHANS, "tcp_max_orphans" },
391 { NET_TCP_MAX_TW_BUCKETS, "tcp_max_tw_buckets" },
392 { NET_TCP_FACK, "tcp_fack" },
393 { NET_TCP_REORDERING, "tcp_reordering" },
394 { NET_TCP_ECN, "tcp_ecn" },
395 { NET_TCP_DSACK, "tcp_dsack" },
396 { NET_TCP_MEM, "tcp_mem" },
397 { NET_TCP_WMEM, "tcp_wmem" },
398 { NET_TCP_RMEM, "tcp_rmem" },
399 { NET_TCP_APP_WIN, "tcp_app_win" },
400 { NET_TCP_ADV_WIN_SCALE, "tcp_adv_win_scale" },
401 { NET_IPV4_NONLOCAL_BIND, "ip_nonlocal_bind" },
402 { NET_IPV4_ICMP_RATELIMIT, "icmp_ratelimit" },
403 { NET_IPV4_ICMP_RATEMASK, "icmp_ratemask" },
404 { NET_TCP_TW_REUSE, "tcp_tw_reuse" },
405 { NET_TCP_FRTO, "tcp_frto" },
406 { NET_TCP_LOW_LATENCY, "tcp_low_latency" },
407 { NET_IPV4_IPFRAG_SECRET_INTERVAL, "ipfrag_secret_interval" },
408 { NET_IPV4_IGMP_MAX_MSF, "igmp_max_msf" },
409 { NET_TCP_NO_METRICS_SAVE, "tcp_no_metrics_save" },
410 /* NET_TCP_DEFAULT_WIN_SCALE unused */
411 { NET_TCP_MODERATE_RCVBUF, "tcp_moderate_rcvbuf" },
412 { NET_TCP_TSO_WIN_DIVISOR, "tcp_tso_win_divisor" },
413 /* NET_TCP_BIC_BETA unused */
414 { NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR, "icmp_errors_use_inbound_ifaddr" },
415 { NET_TCP_CONG_CONTROL, "tcp_congestion_control" },
416 { NET_TCP_ABC, "tcp_abc" },
417 { NET_IPV4_IPFRAG_MAX_DIST, "ipfrag_max_dist" },
418 { NET_TCP_MTU_PROBING, "tcp_mtu_probing" },
419 { NET_TCP_BASE_MSS, "tcp_base_mss" },
420 { NET_IPV4_TCP_WORKAROUND_SIGNED_WINDOWS, "tcp_workaround_signed_windows" },
421 { NET_TCP_DMA_COPYBREAK, "tcp_dma_copybreak" },
422 { NET_TCP_SLOW_START_AFTER_IDLE, "tcp_slow_start_after_idle" },
423 { NET_CIPSOV4_CACHE_ENABLE, "cipso_cache_enable" },
424 { NET_CIPSOV4_CACHE_BUCKET_SIZE, "cipso_cache_bucket_size" },
425 { NET_CIPSOV4_RBM_OPTFMT, "cipso_rbm_optfmt" },
426 { NET_CIPSOV4_RBM_STRICTVALID, "cipso_rbm_strictvalid" },
427 { NET_TCP_AVAIL_CONG_CONTROL, "tcp_available_congestion_control" },
428 { NET_TCP_ALLOWED_CONG_CONTROL, "tcp_allowed_congestion_control" },
429 { NET_TCP_MAX_SSTHRESH, "tcp_max_ssthresh" },
430 { NET_TCP_FRTO_RESPONSE, "tcp_frto_response" },
431 { 2088 /* NET_IPQ_QMAX */, "ip_queue_maxlen" },
432 {}
433};
434
435static struct trans_ctl_table trans_net_ipx_table[] = {
436 { NET_IPX_PPROP_BROADCASTING, "ipx_pprop_broadcasting" },
437 /* NET_IPX_FORWARDING unused */
438 {}
439};
440
441static struct trans_ctl_table trans_net_atalk_table[] = {
442 { NET_ATALK_AARP_EXPIRY_TIME, "aarp-expiry-time" },
443 { NET_ATALK_AARP_TICK_TIME, "aarp-tick-time" },
444 { NET_ATALK_AARP_RETRANSMIT_LIMIT, "aarp-retransmit-limit" },
445 { NET_ATALK_AARP_RESOLVE_TIME, "aarp-resolve-time" },
446 {},
447};
448
449static struct trans_ctl_table trans_net_netrom_table[] = {
450 { NET_NETROM_DEFAULT_PATH_QUALITY, "default_path_quality" },
451 { NET_NETROM_OBSOLESCENCE_COUNT_INITIALISER, "obsolescence_count_initialiser" },
452 { NET_NETROM_NETWORK_TTL_INITIALISER, "network_ttl_initialiser" },
453 { NET_NETROM_TRANSPORT_TIMEOUT, "transport_timeout" },
454 { NET_NETROM_TRANSPORT_MAXIMUM_TRIES, "transport_maximum_tries" },
455 { NET_NETROM_TRANSPORT_ACKNOWLEDGE_DELAY, "transport_acknowledge_delay" },
456 { NET_NETROM_TRANSPORT_BUSY_DELAY, "transport_busy_delay" },
457 { NET_NETROM_TRANSPORT_REQUESTED_WINDOW_SIZE, "transport_requested_window_size" },
458 { NET_NETROM_TRANSPORT_NO_ACTIVITY_TIMEOUT, "transport_no_activity_timeout" },
459 { NET_NETROM_ROUTING_CONTROL, "routing_control" },
460 { NET_NETROM_LINK_FAILS_COUNT, "link_fails_count" },
461 { NET_NETROM_RESET, "reset" },
462 {}
463};
464
465static struct trans_ctl_table trans_net_ax25_table[] = {
466 { NET_AX25_IP_DEFAULT_MODE, "ip_default_mode" },
467 { NET_AX25_DEFAULT_MODE, "ax25_default_mode" },
468 { NET_AX25_BACKOFF_TYPE, "backoff_type" },
469 { NET_AX25_CONNECT_MODE, "connect_mode" },
470 { NET_AX25_STANDARD_WINDOW, "standard_window_size" },
471 { NET_AX25_EXTENDED_WINDOW, "extended_window_size" },
472 { NET_AX25_T1_TIMEOUT, "t1_timeout" },
473 { NET_AX25_T2_TIMEOUT, "t2_timeout" },
474 { NET_AX25_T3_TIMEOUT, "t3_timeout" },
475 { NET_AX25_IDLE_TIMEOUT, "idle_timeout" },
476 { NET_AX25_N2, "maximum_retry_count" },
477 { NET_AX25_PACLEN, "maximum_packet_length" },
478 { NET_AX25_PROTOCOL, "protocol" },
479 { NET_AX25_DAMA_SLAVE_TIMEOUT, "dama_slave_timeout" },
480 {}
481};
482
483static struct trans_ctl_table trans_net_bridge_table[] = {
484 { NET_BRIDGE_NF_CALL_ARPTABLES, "bridge-nf-call-arptables" },
485 { NET_BRIDGE_NF_CALL_IPTABLES, "bridge-nf-call-iptables" },
486 { NET_BRIDGE_NF_CALL_IP6TABLES, "bridge-nf-call-ip6tables" },
487 { NET_BRIDGE_NF_FILTER_VLAN_TAGGED, "bridge-nf-filter-vlan-tagged" },
488 { NET_BRIDGE_NF_FILTER_PPPOE_TAGGED, "bridge-nf-filter-pppoe-tagged" },
489 {}
490};
491
492static struct trans_ctl_table trans_net_rose_table[] = {
493 { NET_ROSE_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
494 { NET_ROSE_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
495 { NET_ROSE_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
496 { NET_ROSE_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
497 { NET_ROSE_ACK_HOLD_BACK_TIMEOUT, "acknowledge_hold_back_timeout" },
498 { NET_ROSE_ROUTING_CONTROL, "routing_control" },
499 { NET_ROSE_LINK_FAIL_TIMEOUT, "link_fail_timeout" },
500 { NET_ROSE_MAX_VCS, "maximum_virtual_circuits" },
501 { NET_ROSE_WINDOW_SIZE, "window_size" },
502 { NET_ROSE_NO_ACTIVITY_TIMEOUT, "no_activity_timeout" },
503 {}
504};
505
506static struct trans_ctl_table trans_net_ipv6_conf_var_table[] = {
507 { NET_IPV6_FORWARDING, "forwarding" },
508 { NET_IPV6_HOP_LIMIT, "hop_limit" },
509 { NET_IPV6_MTU, "mtu" },
510 { NET_IPV6_ACCEPT_RA, "accept_ra" },
511 { NET_IPV6_ACCEPT_REDIRECTS, "accept_redirects" },
512 { NET_IPV6_AUTOCONF, "autoconf" },
513 { NET_IPV6_DAD_TRANSMITS, "dad_transmits" },
514 { NET_IPV6_RTR_SOLICITS, "router_solicitations" },
515 { NET_IPV6_RTR_SOLICIT_INTERVAL, "router_solicitation_interval" },
516 { NET_IPV6_RTR_SOLICIT_DELAY, "router_solicitation_delay" },
517 { NET_IPV6_USE_TEMPADDR, "use_tempaddr" },
518 { NET_IPV6_TEMP_VALID_LFT, "temp_valid_lft" },
519 { NET_IPV6_TEMP_PREFERED_LFT, "temp_prefered_lft" },
520 { NET_IPV6_REGEN_MAX_RETRY, "regen_max_retry" },
521 { NET_IPV6_MAX_DESYNC_FACTOR, "max_desync_factor" },
522 { NET_IPV6_MAX_ADDRESSES, "max_addresses" },
523 { NET_IPV6_FORCE_MLD_VERSION, "force_mld_version" },
524 { NET_IPV6_ACCEPT_RA_DEFRTR, "accept_ra_defrtr" },
525 { NET_IPV6_ACCEPT_RA_PINFO, "accept_ra_pinfo" },
526 { NET_IPV6_ACCEPT_RA_RTR_PREF, "accept_ra_rtr_pref" },
527 { NET_IPV6_RTR_PROBE_INTERVAL, "router_probe_interval" },
528 { NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN, "accept_ra_rt_info_max_plen" },
529 { NET_IPV6_PROXY_NDP, "proxy_ndp" },
530 { NET_IPV6_ACCEPT_SOURCE_ROUTE, "accept_source_route" },
531 {}
532};
533
534static struct trans_ctl_table trans_net_ipv6_conf_table[] = {
535 { NET_PROTO_CONF_ALL, "all", trans_net_ipv6_conf_var_table },
536 { NET_PROTO_CONF_DEFAULT, "default", trans_net_ipv6_conf_var_table },
537 { 0, NULL, trans_net_ipv6_conf_var_table },
538 {}
539};
540
541static struct trans_ctl_table trans_net_ipv6_route_table[] = {
542 { NET_IPV6_ROUTE_FLUSH, "flush" },
543 { NET_IPV6_ROUTE_GC_THRESH, "gc_thresh" },
544 { NET_IPV6_ROUTE_MAX_SIZE, "max_size" },
545 { NET_IPV6_ROUTE_GC_MIN_INTERVAL, "gc_min_interval" },
546 { NET_IPV6_ROUTE_GC_TIMEOUT, "gc_timeout" },
547 { NET_IPV6_ROUTE_GC_INTERVAL, "gc_interval" },
548 { NET_IPV6_ROUTE_GC_ELASTICITY, "gc_elasticity" },
549 { NET_IPV6_ROUTE_MTU_EXPIRES, "mtu_expires" },
550 { NET_IPV6_ROUTE_MIN_ADVMSS, "min_adv_mss" },
551 { NET_IPV6_ROUTE_GC_MIN_INTERVAL_MS, "gc_min_interval_ms" },
552 {}
553};
554
555static struct trans_ctl_table trans_net_ipv6_icmp_table[] = {
556 { NET_IPV6_ICMP_RATELIMIT, "ratelimit" },
557 {}
558};
559
560static struct trans_ctl_table trans_net_ipv6_table[] = {
561 { NET_IPV6_CONF, "conf", trans_net_ipv6_conf_table },
562 { NET_IPV6_NEIGH, "neigh", trans_net_neigh_table },
563 { NET_IPV6_ROUTE, "route", trans_net_ipv6_route_table },
564 { NET_IPV6_ICMP, "icmp", trans_net_ipv6_icmp_table },
565 { NET_IPV6_BINDV6ONLY, "bindv6only" },
566 { NET_IPV6_IP6FRAG_HIGH_THRESH, "ip6frag_high_thresh" },
567 { NET_IPV6_IP6FRAG_LOW_THRESH, "ip6frag_low_thresh" },
568 { NET_IPV6_IP6FRAG_TIME, "ip6frag_time" },
569 { NET_IPV6_IP6FRAG_SECRET_INTERVAL, "ip6frag_secret_interval" },
570 { NET_IPV6_MLD_MAX_MSF, "mld_max_msf" },
571 { 2088 /* IPQ_QMAX */, "ip6_queue_maxlen" },
572 {}
573};
574
575static struct trans_ctl_table trans_net_x25_table[] = {
576 { NET_X25_RESTART_REQUEST_TIMEOUT, "restart_request_timeout" },
577 { NET_X25_CALL_REQUEST_TIMEOUT, "call_request_timeout" },
578 { NET_X25_RESET_REQUEST_TIMEOUT, "reset_request_timeout" },
579 { NET_X25_CLEAR_REQUEST_TIMEOUT, "clear_request_timeout" },
580 { NET_X25_ACK_HOLD_BACK_TIMEOUT, "acknowledgement_hold_back_timeout" },
581 { NET_X25_FORWARD, "x25_forward" },
582 {}
583};
584
585static struct trans_ctl_table trans_net_tr_table[] = {
586 { NET_TR_RIF_TIMEOUT, "rif_timeout" },
587 {}
588};
589
590
591static struct trans_ctl_table trans_net_decnet_conf_vars[] = {
592 { NET_DECNET_CONF_DEV_FORWARDING, "forwarding" },
593 { NET_DECNET_CONF_DEV_PRIORITY, "priority" },
594 { NET_DECNET_CONF_DEV_T2, "t2" },
595 { NET_DECNET_CONF_DEV_T3, "t3" },
596 {}
597};
598
599static struct trans_ctl_table trans_net_decnet_conf[] = {
600 { 0, NULL, trans_net_decnet_conf_vars },
601 {}
602};
603
604static struct trans_ctl_table trans_net_decnet_table[] = {
605 { NET_DECNET_CONF, "conf", trans_net_decnet_conf },
606 { NET_DECNET_NODE_ADDRESS, "node_address" },
607 { NET_DECNET_NODE_NAME, "node_name" },
608 { NET_DECNET_DEFAULT_DEVICE, "default_device" },
609 { NET_DECNET_TIME_WAIT, "time_wait" },
610 { NET_DECNET_DN_COUNT, "dn_count" },
611 { NET_DECNET_DI_COUNT, "di_count" },
612 { NET_DECNET_DR_COUNT, "dr_count" },
613 { NET_DECNET_DST_GC_INTERVAL, "dst_gc_interval" },
614 { NET_DECNET_NO_FC_MAX_CWND, "no_fc_max_cwnd" },
615 { NET_DECNET_MEM, "decnet_mem" },
616 { NET_DECNET_RMEM, "decnet_rmem" },
617 { NET_DECNET_WMEM, "decnet_wmem" },
618 { NET_DECNET_DEBUG_LEVEL, "debug" },
619 {}
620};
621
622static struct trans_ctl_table trans_net_sctp_table[] = {
623 { NET_SCTP_RTO_INITIAL, "rto_initial" },
624 { NET_SCTP_RTO_MIN, "rto_min" },
625 { NET_SCTP_RTO_MAX, "rto_max" },
626 { NET_SCTP_RTO_ALPHA, "rto_alpha_exp_divisor" },
627 { NET_SCTP_RTO_BETA, "rto_beta_exp_divisor" },
628 { NET_SCTP_VALID_COOKIE_LIFE, "valid_cookie_life" },
629 { NET_SCTP_ASSOCIATION_MAX_RETRANS, "association_max_retrans" },
630 { NET_SCTP_PATH_MAX_RETRANS, "path_max_retrans" },
631 { NET_SCTP_MAX_INIT_RETRANSMITS, "max_init_retransmits" },
632 { NET_SCTP_HB_INTERVAL, "hb_interval" },
633 { NET_SCTP_PRESERVE_ENABLE, "cookie_preserve_enable" },
634 { NET_SCTP_MAX_BURST, "max_burst" },
635 { NET_SCTP_ADDIP_ENABLE, "addip_enable" },
636 { NET_SCTP_PRSCTP_ENABLE, "prsctp_enable" },
637 { NET_SCTP_SNDBUF_POLICY, "sndbuf_policy" },
638 { NET_SCTP_SACK_TIMEOUT, "sack_timeout" },
639 { NET_SCTP_RCVBUF_POLICY, "rcvbuf_policy" },
640 {}
641};
642
643static struct trans_ctl_table trans_net_llc_llc2_timeout_table[] = {
644 { NET_LLC2_ACK_TIMEOUT, "ack" },
645 { NET_LLC2_P_TIMEOUT, "p" },
646 { NET_LLC2_REJ_TIMEOUT, "rej" },
647 { NET_LLC2_BUSY_TIMEOUT, "busy" },
648 {}
649};
650
651static struct trans_ctl_table trans_net_llc_station_table[] = {
652 { NET_LLC_STATION_ACK_TIMEOUT, "ack_timeout" },
653 {}
654};
655
656static struct trans_ctl_table trans_net_llc_llc2_table[] = {
657 { NET_LLC2, "timeout", trans_net_llc_llc2_timeout_table },
658 {}
659};
660
661static struct trans_ctl_table trans_net_llc_table[] = {
662 { NET_LLC2, "llc2", trans_net_llc_llc2_table },
663 { NET_LLC_STATION, "station", trans_net_llc_station_table },
664 {}
665};
666
667static struct trans_ctl_table trans_net_netfilter_table[] = {
668 { NET_NF_CONNTRACK_MAX, "nf_conntrack_max" },
669 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT, "nf_conntrack_tcp_timeout_syn_sent" },
670 { NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV, "nf_conntrack_tcp_timeout_syn_recv" },
671 { NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED, "nf_conntrack_tcp_timeout_established" },
672 { NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT, "nf_conntrack_tcp_timeout_fin_wait" },
673 { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT, "nf_conntrack_tcp_timeout_close_wait" },
674 { NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK, "nf_conntrack_tcp_timeout_last_ack" },
675 { NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT, "nf_conntrack_tcp_timeout_time_wait" },
676 { NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE, "nf_conntrack_tcp_timeout_close" },
677 { NET_NF_CONNTRACK_UDP_TIMEOUT, "nf_conntrack_udp_timeout" },
678 { NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM, "nf_conntrack_udp_timeout_stream" },
679 { NET_NF_CONNTRACK_ICMP_TIMEOUT, "nf_conntrack_icmp_timeout" },
680 { NET_NF_CONNTRACK_GENERIC_TIMEOUT, "nf_conntrack_generic_timeout" },
681 { NET_NF_CONNTRACK_BUCKETS, "nf_conntrack_buckets" },
682 { NET_NF_CONNTRACK_LOG_INVALID, "nf_conntrack_log_invalid" },
683 { NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS, "nf_conntrack_tcp_timeout_max_retrans" },
684 { NET_NF_CONNTRACK_TCP_LOOSE, "nf_conntrack_tcp_loose" },
685 { NET_NF_CONNTRACK_TCP_BE_LIBERAL, "nf_conntrack_tcp_be_liberal" },
686 { NET_NF_CONNTRACK_TCP_MAX_RETRANS, "nf_conntrack_tcp_max_retrans" },
687 { NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED, "nf_conntrack_sctp_timeout_closed" },
688 { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT, "nf_conntrack_sctp_timeout_cookie_wait" },
689 { NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED, "nf_conntrack_sctp_timeout_cookie_echoed" },
690 { NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED, "nf_conntrack_sctp_timeout_established" },
691 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT, "nf_conntrack_sctp_timeout_shutdown_sent" },
692 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD, "nf_conntrack_sctp_timeout_shutdown_recd" },
693 { NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT, "nf_conntrack_sctp_timeout_shutdown_ack_sent" },
694 { NET_NF_CONNTRACK_COUNT, "nf_conntrack_count" },
695 { NET_NF_CONNTRACK_ICMPV6_TIMEOUT, "nf_conntrack_icmpv6_timeout" },
696 { NET_NF_CONNTRACK_FRAG6_TIMEOUT, "nf_conntrack_frag6_timeout" },
697 { NET_NF_CONNTRACK_FRAG6_LOW_THRESH, "nf_conntrack_frag6_low_thresh" },
698 { NET_NF_CONNTRACK_FRAG6_HIGH_THRESH, "nf_conntrack_frag6_high_thresh" },
699 { NET_NF_CONNTRACK_CHECKSUM, "nf_conntrack_checksum" },
700
701 {}
702};
703
704static struct trans_ctl_table trans_net_dccp_table[] = {
705 { NET_DCCP_DEFAULT, "default" },
706 {}
707};
708
709static struct trans_ctl_table trans_net_irda_table[] = {
710 { NET_IRDA_DISCOVERY, "discovery" },
711 { NET_IRDA_DEVNAME, "devname" },
712 { NET_IRDA_DEBUG, "debug" },
713 { NET_IRDA_FAST_POLL, "fast_poll_increase" },
714 { NET_IRDA_DISCOVERY_SLOTS, "discovery_slots" },
715 { NET_IRDA_DISCOVERY_TIMEOUT, "discovery_timeout" },
716 { NET_IRDA_SLOT_TIMEOUT, "slot_timeout" },
717 { NET_IRDA_MAX_BAUD_RATE, "max_baud_rate" },
718 { NET_IRDA_MIN_TX_TURN_TIME, "min_tx_turn_time" },
719 { NET_IRDA_MAX_TX_DATA_SIZE, "max_tx_data_size" },
720 { NET_IRDA_MAX_TX_WINDOW, "max_tx_window" },
721 { NET_IRDA_MAX_NOREPLY_TIME, "max_noreply_time" },
722 { NET_IRDA_WARN_NOREPLY_TIME, "warn_noreply_time" },
723 { NET_IRDA_LAP_KEEPALIVE_TIME, "lap_keepalive_time" },
724 {}
725};
726
727static struct trans_ctl_table trans_net_table[] = {
728 { NET_CORE, "core", trans_net_core_table },
729 /* NET_ETHER not used */
730 /* NET_802 not used */
731 { NET_UNIX, "unix", trans_net_unix_table },
732 { NET_IPV4, "ipv4", trans_net_ipv4_table },
733 { NET_IPX, "ipx", trans_net_ipx_table },
734 { NET_ATALK, "atalk", trans_net_atalk_table },
735 { NET_NETROM, "netrom", trans_net_netrom_table },
736 { NET_AX25, "ax25", trans_net_ax25_table },
737 { NET_BRIDGE, "bridge", trans_net_bridge_table },
738 { NET_ROSE, "rose", trans_net_rose_table },
739 { NET_IPV6, "ipv6", trans_net_ipv6_table },
740 { NET_X25, "x25", trans_net_x25_table },
741 { NET_TR, "tr", trans_net_tr_table },
742 { NET_DECNET, "decnet", trans_net_decnet_table },
743 /* NET_ECONET not used */
744 { NET_SCTP, "sctp", trans_net_sctp_table },
745 { NET_LLC, "llc", trans_net_llc_table },
746 { NET_NETFILTER, "netfilter", trans_net_netfilter_table },
747 { NET_DCCP, "dccp", trans_net_dccp_table },
748 { NET_IRDA, "irda", trans_net_irda_table },
749 { 2089, "nf_conntrack_max" },
750 {}
751};
752
753static struct trans_ctl_table trans_fs_quota_table[] = {
754 { FS_DQ_LOOKUPS, "lookups" },
755 { FS_DQ_DROPS, "drops" },
756 { FS_DQ_READS, "reads" },
757 { FS_DQ_WRITES, "writes" },
758 { FS_DQ_CACHE_HITS, "cache_hits" },
759 { FS_DQ_ALLOCATED, "allocated_dquots" },
760 { FS_DQ_FREE, "free_dquots" },
761 { FS_DQ_SYNCS, "syncs" },
762 { FS_DQ_WARNINGS, "warnings" },
763 {}
764};
765
766static struct trans_ctl_table trans_fs_xfs_table[] = {
767 { XFS_RESTRICT_CHOWN, "restrict_chown" },
768 { XFS_SGID_INHERIT, "irix_sgid_inherit" },
769 { XFS_SYMLINK_MODE, "irix_symlink_mode" },
770 { XFS_PANIC_MASK, "panic_mask" },
771
772 { XFS_ERRLEVEL, "error_level" },
773 { XFS_SYNCD_TIMER, "xfssyncd_centisecs" },
774 { XFS_INHERIT_SYNC, "inherit_sync" },
775 { XFS_INHERIT_NODUMP, "inherit_nodump" },
776 { XFS_INHERIT_NOATIME, "inherit_noatime" },
777 { XFS_BUF_TIMER, "xfsbufd_centisecs" },
778 { XFS_BUF_AGE, "age_buffer_centisecs" },
779 { XFS_INHERIT_NOSYM, "inherit_nosymlinks" },
780 { XFS_ROTORSTEP, "rotorstep" },
781 { XFS_INHERIT_NODFRG, "inherit_nodefrag" },
782 { XFS_FILESTREAM_TIMER, "filestream_centisecs" },
783 { XFS_STATS_CLEAR, "stats_clear" },
784 {}
785};
786
787static struct trans_ctl_table trans_fs_ocfs2_nm_table[] = {
788 { 1, "hb_ctl_path" },
789 {}
790};
791
792static struct trans_ctl_table trans_fs_ocfs2_table[] = {
793 { 1, "nm", trans_fs_ocfs2_nm_table },
794 {}
795};
796
797static struct trans_ctl_table trans_inotify_table[] = {
798 { INOTIFY_MAX_USER_INSTANCES, "max_user_instances" },
799 { INOTIFY_MAX_USER_WATCHES, "max_user_watches" },
800 { INOTIFY_MAX_QUEUED_EVENTS, "max_queued_events" },
801 {}
802};
803
804static struct trans_ctl_table trans_fs_table[] = {
805 { FS_NRINODE, "inode-nr" },
806 { FS_STATINODE, "inode-state" },
807 /* FS_MAXINODE unused */
808 /* FS_NRDQUOT unused */
809 /* FS_MAXDQUOT unused */
810 { FS_NRFILE, "file-nr" },
811 { FS_MAXFILE, "file-max" },
812 { FS_DENTRY, "dentry-state" },
813 /* FS_NRSUPER unused */
814 /* FS_MAXUPSER unused */
815 { FS_OVERFLOWUID, "overflowuid" },
816 { FS_OVERFLOWGID, "overflowgid" },
817 { FS_LEASES, "leases-enable" },
818 { FS_DIR_NOTIFY, "dir-notify-enable" },
819 { FS_LEASE_TIME, "lease-break-time" },
820 { FS_DQSTATS, "quota", trans_fs_quota_table },
821 { FS_XFS, "xfs", trans_fs_xfs_table },
822 { FS_AIO_NR, "aio-nr" },
823 { FS_AIO_MAX_NR, "aio-max-nr" },
824 { FS_INOTIFY, "inotify", trans_inotify_table },
825 { FS_OCFS2, "ocfs2", trans_fs_ocfs2_table },
826 { KERN_SETUID_DUMPABLE, "suid_dumpable" },
827 {}
828};
829
830static struct trans_ctl_table trans_debug_table[] = {
831 {}
832};
833
834static struct trans_ctl_table trans_cdrom_table[] = {
835 { DEV_CDROM_INFO, "info" },
836 { DEV_CDROM_AUTOCLOSE, "autoclose" },
837 { DEV_CDROM_AUTOEJECT, "autoeject" },
838 { DEV_CDROM_DEBUG, "debug" },
839 { DEV_CDROM_LOCK, "lock" },
840 { DEV_CDROM_CHECK_MEDIA, "check_media" },
841 {}
842};
843
844static struct trans_ctl_table trans_ipmi_table[] = {
845 { DEV_IPMI_POWEROFF_POWERCYCLE, "poweroff_powercycle" },
846 {}
847};
848
849static struct trans_ctl_table trans_mac_hid_files[] = {
850 /* DEV_MAC_HID_KEYBOARD_SENDS_LINUX_KEYCODES unused */
851 /* DEV_MAC_HID_KEYBOARD_LOCK_KEYCODES unused */
852 { DEV_MAC_HID_MOUSE_BUTTON_EMULATION, "mouse_button_emulation" },
853 { DEV_MAC_HID_MOUSE_BUTTON2_KEYCODE, "mouse_button2_keycode" },
854 { DEV_MAC_HID_MOUSE_BUTTON3_KEYCODE, "mouse_button3_keycode" },
855 /* DEV_MAC_HID_ADB_MOUSE_SENDS_KEYCODES unused */
856 {}
857};
858
859static struct trans_ctl_table trans_raid_table[] = {
860 { DEV_RAID_SPEED_LIMIT_MIN, "speed_limit_min" },
861 { DEV_RAID_SPEED_LIMIT_MAX, "speed_limit_max" },
862 {}
863};
864
865static struct trans_ctl_table trans_scsi_table[] = {
866 { DEV_SCSI_LOGGING_LEVEL, "logging_level" },
867 {}
868};
869
870static struct trans_ctl_table trans_parport_default_table[] = {
871 { DEV_PARPORT_DEFAULT_TIMESLICE, "timeslice" },
872 { DEV_PARPORT_DEFAULT_SPINTIME, "spintime" },
873 {}
874};
875
876static struct trans_ctl_table trans_parport_device_table[] = {
877 { DEV_PARPORT_DEVICE_TIMESLICE, "timeslice" },
878 {}
879};
880
881static struct trans_ctl_table trans_parport_devices_table[] = {
882 { DEV_PARPORT_DEVICES_ACTIVE, "active" },
883 { 0, NULL, trans_parport_device_table },
884 {}
885};
886
887static struct trans_ctl_table trans_parport_parport_table[] = {
888 { DEV_PARPORT_SPINTIME, "spintime" },
889 { DEV_PARPORT_BASE_ADDR, "base-addr" },
890 { DEV_PARPORT_IRQ, "irq" },
891 { DEV_PARPORT_DMA, "dma" },
892 { DEV_PARPORT_MODES, "modes" },
893 { DEV_PARPORT_DEVICES, "devices", trans_parport_devices_table },
894 { DEV_PARPORT_AUTOPROBE, "autoprobe" },
895 { DEV_PARPORT_AUTOPROBE + 1, "autoprobe0" },
896 { DEV_PARPORT_AUTOPROBE + 2, "autoprobe1" },
897 { DEV_PARPORT_AUTOPROBE + 3, "autoprobe2" },
898 { DEV_PARPORT_AUTOPROBE + 4, "autoprobe3" },
899 {}
900};
901static struct trans_ctl_table trans_parport_table[] = {
902 { DEV_PARPORT_DEFAULT, "default", trans_parport_default_table },
903 { 0, NULL, trans_parport_parport_table },
904 {}
905};
906
907static struct trans_ctl_table trans_dev_table[] = {
908 { DEV_CDROM, "cdrom", trans_cdrom_table },
909 /* DEV_HWMON unused */
910 { DEV_PARPORT, "parport", trans_parport_table },
911 { DEV_RAID, "raid", trans_raid_table },
912 { DEV_MAC_HID, "mac_hid", trans_mac_hid_files },
913 { DEV_SCSI, "scsi", trans_scsi_table },
914 { DEV_IPMI, "ipmi", trans_ipmi_table },
915 {}
916};
917
918static struct trans_ctl_table trans_bus_isa_table[] = {
919 { BUS_ISA_MEM_BASE, "membase" },
920 { BUS_ISA_PORT_BASE, "portbase" },
921 { BUS_ISA_PORT_SHIFT, "portshift" },
922 {}
923};
924
925static struct trans_ctl_table trans_bus_table[] = {
926 { CTL_BUS_ISA, "isa", trans_bus_isa_table },
927 {}
928};
929
930static struct trans_ctl_table trans_arlan_conf_table0[] = {
931 { 1, "spreadingCode" },
932 { 2, "channelNumber" },
933 { 3, "scramblingDisable" },
934 { 4, "txAttenuation" },
935 { 5, "systemId" },
936 { 6, "maxDatagramSize" },
937 { 7, "maxFrameSize" },
938 { 8, "maxRetries" },
939 { 9, "receiveMode" },
940 { 10, "priority" },
941 { 11, "rootOrRepeater" },
942 { 12, "SID" },
943 { 13, "registrationMode" },
944 { 14, "registrationFill" },
945 { 15, "localTalkAddress" },
946 { 16, "codeFormat" },
947 { 17, "numChannels" },
948 { 18, "channel1" },
949 { 19, "channel2" },
950 { 20, "channel3" },
951 { 21, "channel4" },
952 { 22, "txClear" },
953 { 23, "txRetries" },
954 { 24, "txRouting" },
955 { 25, "txScrambled" },
956 { 26, "rxParameter" },
957 { 27, "txTimeoutMs" },
958 { 28, "waitCardTimeout" },
959 { 29, "channelSet" },
960 { 30, "name" },
961 { 31, "waitTime" },
962 { 32, "lParameter" },
963 { 33, "_15" },
964 { 34, "headerSize" },
965 { 36, "tx_delay_ms" },
966 { 37, "retries" },
967 { 38, "ReTransmitPacketMaxSize" },
968 { 39, "waitReTransmitPacketMaxSize" },
969 { 40, "fastReTransCount" },
970 { 41, "driverRetransmissions" },
971 { 42, "txAckTimeoutMs" },
972 { 43, "registrationInterrupts" },
973 { 44, "hardwareType" },
974 { 45, "radioType" },
975 { 46, "writeEEPROM" },
976 { 47, "writeRadioType" },
977 { 48, "entry_exit_debug" },
978 { 49, "debug" },
979 { 50, "in_speed" },
980 { 51, "out_speed" },
981 { 52, "in_speed10" },
982 { 53, "out_speed10" },
983 { 54, "in_speed_max" },
984 { 55, "out_speed_max" },
985 { 56, "measure_rate" },
986 { 57, "pre_Command_Wait" },
987 { 58, "rx_tweak1" },
988 { 59, "rx_tweak2" },
989 { 60, "tx_queue_len" },
990
991 { 150, "arlan0-txRing" },
992 { 151, "arlan0-rxRing" },
993 { 152, "arlan0-18" },
994 { 153, "arlan0-ring" },
995 { 154, "arlan0-shm-cpy" },
996 { 155, "config0" },
997 { 156, "reset0" },
998 {}
999};
1000
1001static struct trans_ctl_table trans_arlan_conf_table1[] = {
1002 { 1, "spreadingCode" },
1003 { 2, "channelNumber" },
1004 { 3, "scramblingDisable" },
1005 { 4, "txAttenuation" },
1006 { 5, "systemId" },
1007 { 6, "maxDatagramSize" },
1008 { 7, "maxFrameSize" },
1009 { 8, "maxRetries" },
1010 { 9, "receiveMode" },
1011 { 10, "priority" },
1012 { 11, "rootOrRepeater" },
1013 { 12, "SID" },
1014 { 13, "registrationMode" },
1015 { 14, "registrationFill" },
1016 { 15, "localTalkAddress" },
1017 { 16, "codeFormat" },
1018 { 17, "numChannels" },
1019 { 18, "channel1" },
1020 { 19, "channel2" },
1021 { 20, "channel3" },
1022 { 21, "channel4" },
1023 { 22, "txClear" },
1024 { 23, "txRetries" },
1025 { 24, "txRouting" },
1026 { 25, "txScrambled" },
1027 { 26, "rxParameter" },
1028 { 27, "txTimeoutMs" },
1029 { 28, "waitCardTimeout" },
1030 { 29, "channelSet" },
1031 { 30, "name" },
1032 { 31, "waitTime" },
1033 { 32, "lParameter" },
1034 { 33, "_15" },
1035 { 34, "headerSize" },
1036 { 36, "tx_delay_ms" },
1037 { 37, "retries" },
1038 { 38, "ReTransmitPacketMaxSize" },
1039 { 39, "waitReTransmitPacketMaxSize" },
1040 { 40, "fastReTransCount" },
1041 { 41, "driverRetransmissions" },
1042 { 42, "txAckTimeoutMs" },
1043 { 43, "registrationInterrupts" },
1044 { 44, "hardwareType" },
1045 { 45, "radioType" },
1046 { 46, "writeEEPROM" },
1047 { 47, "writeRadioType" },
1048 { 48, "entry_exit_debug" },
1049 { 49, "debug" },
1050 { 50, "in_speed" },
1051 { 51, "out_speed" },
1052 { 52, "in_speed10" },
1053 { 53, "out_speed10" },
1054 { 54, "in_speed_max" },
1055 { 55, "out_speed_max" },
1056 { 56, "measure_rate" },
1057 { 57, "pre_Command_Wait" },
1058 { 58, "rx_tweak1" },
1059 { 59, "rx_tweak2" },
1060 { 60, "tx_queue_len" },
1061
1062 { 150, "arlan1-txRing" },
1063 { 151, "arlan1-rxRing" },
1064 { 152, "arlan1-18" },
1065 { 153, "arlan1-ring" },
1066 { 154, "arlan1-shm-cpy" },
1067 { 155, "config1" },
1068 { 156, "reset1" },
1069 {}
1070};
1071
1072static struct trans_ctl_table trans_arlan_conf_table2[] = {
1073 { 1, "spreadingCode" },
1074 { 2, "channelNumber" },
1075 { 3, "scramblingDisable" },
1076 { 4, "txAttenuation" },
1077 { 5, "systemId" },
1078 { 6, "maxDatagramSize" },
1079 { 7, "maxFrameSize" },
1080 { 8, "maxRetries" },
1081 { 9, "receiveMode" },
1082 { 10, "priority" },
1083 { 11, "rootOrRepeater" },
1084 { 12, "SID" },
1085 { 13, "registrationMode" },
1086 { 14, "registrationFill" },
1087 { 15, "localTalkAddress" },
1088 { 16, "codeFormat" },
1089 { 17, "numChannels" },
1090 { 18, "channel1" },
1091 { 19, "channel2" },
1092 { 20, "channel3" },
1093 { 21, "channel4" },
1094 { 22, "txClear" },
1095 { 23, "txRetries" },
1096 { 24, "txRouting" },
1097 { 25, "txScrambled" },
1098 { 26, "rxParameter" },
1099 { 27, "txTimeoutMs" },
1100 { 28, "waitCardTimeout" },
1101 { 29, "channelSet" },
1102 { 30, "name" },
1103 { 31, "waitTime" },
1104 { 32, "lParameter" },
1105 { 33, "_15" },
1106 { 34, "headerSize" },
1107 { 36, "tx_delay_ms" },
1108 { 37, "retries" },
1109 { 38, "ReTransmitPacketMaxSize" },
1110 { 39, "waitReTransmitPacketMaxSize" },
1111 { 40, "fastReTransCount" },
1112 { 41, "driverRetransmissions" },
1113 { 42, "txAckTimeoutMs" },
1114 { 43, "registrationInterrupts" },
1115 { 44, "hardwareType" },
1116 { 45, "radioType" },
1117 { 46, "writeEEPROM" },
1118 { 47, "writeRadioType" },
1119 { 48, "entry_exit_debug" },
1120 { 49, "debug" },
1121 { 50, "in_speed" },
1122 { 51, "out_speed" },
1123 { 52, "in_speed10" },
1124 { 53, "out_speed10" },
1125 { 54, "in_speed_max" },
1126 { 55, "out_speed_max" },
1127 { 56, "measure_rate" },
1128 { 57, "pre_Command_Wait" },
1129 { 58, "rx_tweak1" },
1130 { 59, "rx_tweak2" },
1131 { 60, "tx_queue_len" },
1132
1133 { 150, "arlan2-txRing" },
1134 { 151, "arlan2-rxRing" },
1135 { 152, "arlan2-18" },
1136 { 153, "arlan2-ring" },
1137 { 154, "arlan2-shm-cpy" },
1138 { 155, "config2" },
1139 { 156, "reset2" },
1140 {}
1141};
1142
1143static struct trans_ctl_table trans_arlan_conf_table3[] = {
1144 { 1, "spreadingCode" },
1145 { 2, "channelNumber" },
1146 { 3, "scramblingDisable" },
1147 { 4, "txAttenuation" },
1148 { 5, "systemId" },
1149 { 6, "maxDatagramSize" },
1150 { 7, "maxFrameSize" },
1151 { 8, "maxRetries" },
1152 { 9, "receiveMode" },
1153 { 10, "priority" },
1154 { 11, "rootOrRepeater" },
1155 { 12, "SID" },
1156 { 13, "registrationMode" },
1157 { 14, "registrationFill" },
1158 { 15, "localTalkAddress" },
1159 { 16, "codeFormat" },
1160 { 17, "numChannels" },
1161 { 18, "channel1" },
1162 { 19, "channel2" },
1163 { 20, "channel3" },
1164 { 21, "channel4" },
1165 { 22, "txClear" },
1166 { 23, "txRetries" },
1167 { 24, "txRouting" },
1168 { 25, "txScrambled" },
1169 { 26, "rxParameter" },
1170 { 27, "txTimeoutMs" },
1171 { 28, "waitCardTimeout" },
1172 { 29, "channelSet" },
1173 { 30, "name" },
1174 { 31, "waitTime" },
1175 { 32, "lParameter" },
1176 { 33, "_15" },
1177 { 34, "headerSize" },
1178 { 36, "tx_delay_ms" },
1179 { 37, "retries" },
1180 { 38, "ReTransmitPacketMaxSize" },
1181 { 39, "waitReTransmitPacketMaxSize" },
1182 { 40, "fastReTransCount" },
1183 { 41, "driverRetransmissions" },
1184 { 42, "txAckTimeoutMs" },
1185 { 43, "registrationInterrupts" },
1186 { 44, "hardwareType" },
1187 { 45, "radioType" },
1188 { 46, "writeEEPROM" },
1189 { 47, "writeRadioType" },
1190 { 48, "entry_exit_debug" },
1191 { 49, "debug" },
1192 { 50, "in_speed" },
1193 { 51, "out_speed" },
1194 { 52, "in_speed10" },
1195 { 53, "out_speed10" },
1196 { 54, "in_speed_max" },
1197 { 55, "out_speed_max" },
1198 { 56, "measure_rate" },
1199 { 57, "pre_Command_Wait" },
1200 { 58, "rx_tweak1" },
1201 { 59, "rx_tweak2" },
1202 { 60, "tx_queue_len" },
1203
1204 { 150, "arlan3-txRing" },
1205 { 151, "arlan3-rxRing" },
1206 { 152, "arlan3-18" },
1207 { 153, "arlan3-ring" },
1208 { 154, "arlan3-shm-cpy" },
1209 { 155, "config3" },
1210 { 156, "reset3" },
1211 {}
1212};
1213
1214static struct trans_ctl_table trans_arlan_table[] = {
1215 { 1, "arlan0", trans_arlan_conf_table0 },
1216 { 2, "arlan1", trans_arlan_conf_table1 },
1217 { 3, "arlan2", trans_arlan_conf_table2 },
1218 { 4, "arlan3", trans_arlan_conf_table3 },
1219 {}
1220};
1221
1222static struct trans_ctl_table trans_appldata_table[] = {
1223 { CTL_APPLDATA_TIMER, "timer" },
1224 { CTL_APPLDATA_INTERVAL, "interval" },
1225 { CTL_APPLDATA_OS, "os" },
1226 { CTL_APPLDATA_NET_SUM, "net_sum" },
1227 { CTL_APPLDATA_MEM, "mem" },
1228 {}
1229
1230};
1231
1232static struct trans_ctl_table trans_s390dbf_table[] = {
1233 { 5678 /* CTL_S390DBF_STOPPABLE */, "debug_stoppable" },
1234 { 5679 /* CTL_S390DBF_ACTIVE */, "debug_active" },
1235 {}
1236};
1237
1238static struct trans_ctl_table trans_sunrpc_table[] = {
1239 { CTL_RPCDEBUG, "rpc_debug" },
1240 { CTL_NFSDEBUG, "nfs_debug" },
1241 { CTL_NFSDDEBUG, "nfsd_debug" },
1242 { CTL_NLMDEBUG, "nlm_debug" },
1243 { CTL_SLOTTABLE_UDP, "udp_slot_table_entries" },
1244 { CTL_SLOTTABLE_TCP, "tcp_slot_table_entries" },
1245 { CTL_MIN_RESVPORT, "min_resvport" },
1246 { CTL_MAX_RESVPORT, "max_resvport" },
1247 {}
1248};
1249
1250static struct trans_ctl_table trans_pm_table[] = {
1251 { 1 /* CTL_PM_SUSPEND */, "suspend" },
1252 { 2 /* CTL_PM_CMODE */, "cmode" },
1253 { 3 /* CTL_PM_P0 */, "p0" },
1254 { 4 /* CTL_PM_CM */, "cm" },
1255 {}
1256};
1257
1258static struct trans_ctl_table trans_frv_table[] = {
1259 { 1, "cache-mode" },
1260 { 2, "pin-cxnr" },
1261 {}
1262};
1263
1264static struct trans_ctl_table trans_root_table[] = {
1265 { CTL_KERN, "kernel", trans_kern_table },
1266 { CTL_VM, "vm", trans_vm_table },
1267 { CTL_NET, "net", trans_net_table },
1268 /* CTL_PROC not used */
1269 { CTL_FS, "fs", trans_fs_table },
1270 { CTL_DEBUG, "debug", trans_debug_table },
1271 { CTL_DEV, "dev", trans_dev_table },
1272 { CTL_BUS, "bus", trans_bus_table },
1273 { CTL_ABI, "abi" },
1274 /* CTL_CPU not used */
1275 { CTL_ARLAN, "arlan", trans_arlan_table },
1276 { CTL_APPLDATA, "appldata", trans_appldata_table },
1277 { CTL_S390DBF, "s390dbf", trans_s390dbf_table },
1278 { CTL_SUNRPC, "sunrpc", trans_sunrpc_table },
1279 { CTL_PM, "pm", trans_pm_table },
1280 { CTL_FRV, "frv", trans_frv_table },
1281 {}
1282};
1283
1284
1285
1286
1287static int sysctl_depth(struct ctl_table *table)
1288{
1289 struct ctl_table *tmp;
1290 int depth;
1291
1292 depth = 0;
1293 for (tmp = table; tmp->parent; tmp = tmp->parent)
1294 depth++;
1295
1296 return depth;
1297}
1298
1299static struct ctl_table *sysctl_parent(struct ctl_table *table, int n)
1300{
1301 int i;
1302
1303 for (i = 0; table && i < n; i++)
1304 table = table->parent;
1305
1306 return table;
1307}
1308
1309static struct trans_ctl_table *sysctl_binary_lookup(struct ctl_table *table)
1310{
1311 struct ctl_table *test;
1312 struct trans_ctl_table *ref;
1313 int depth, cur_depth;
1314
1315 depth = sysctl_depth(table);
1316
1317 cur_depth = depth;
1318 ref = trans_root_table;
1319repeat:
1320 test = sysctl_parent(table, cur_depth);
1321 for (; ref->ctl_name || ref->procname || ref->child; ref++) {
1322 int match = 0;
1323
1324 if (cur_depth && !ref->child)
1325 continue;
1326
1327 if (test->procname && ref->procname &&
1328 (strcmp(test->procname, ref->procname) == 0))
1329 match++;
1330
1331 if (test->ctl_name && ref->ctl_name &&
1332 (test->ctl_name == ref->ctl_name))
1333 match++;
1334
1335 if (!ref->ctl_name && !ref->procname)
1336 match++;
1337
1338 if (match) {
1339 if (cur_depth != 0) {
1340 cur_depth--;
1341 ref = ref->child;
1342 goto repeat;
1343 }
1344 goto out;
1345 }
1346 }
1347 ref = NULL;
1348out:
1349 return ref;
1350}
1351
1352static void sysctl_print_path(struct ctl_table *table)
1353{
1354 struct ctl_table *tmp;
1355 int depth, i;
1356 depth = sysctl_depth(table);
1357 if (table->procname) {
1358 for (i = depth; i >= 0; i--) {
1359 tmp = sysctl_parent(table, i);
1360 printk("/%s", tmp->procname?tmp->procname:"");
1361 }
1362 }
1363 printk(" ");
1364 if (table->ctl_name) {
1365 for (i = depth; i >= 0; i--) {
1366 tmp = sysctl_parent(table, i);
1367 printk(".%d", tmp->ctl_name);
1368 }
1369 }
1370}
1371
1372static void sysctl_repair_table(struct ctl_table *table)
1373{
1374 /* Don't complain about the classic default
1375 * sysctl strategy routine. Maybe later we
1376 * can get the tables fixed and complain about
1377 * this.
1378 */
1379 if (table->ctl_name && table->procname &&
1380 (table->proc_handler == proc_dointvec) &&
1381 (!table->strategy)) {
1382 table->strategy = sysctl_data;
1383 }
1384}
1385
1386static struct ctl_table *sysctl_check_lookup(struct ctl_table *table)
1387{
1388 struct ctl_table_header *head;
1389 struct ctl_table *ref, *test;
1390 int depth, cur_depth;
1391
1392 depth = sysctl_depth(table);
1393
1394 for (head = sysctl_head_next(NULL); head;
1395 head = sysctl_head_next(head)) {
1396 cur_depth = depth;
1397 ref = head->ctl_table;
1398repeat:
1399 test = sysctl_parent(table, cur_depth);
1400 for (; ref->ctl_name || ref->procname; ref++) {
1401 int match = 0;
1402 if (cur_depth && !ref->child)
1403 continue;
1404
1405 if (test->procname && ref->procname &&
1406 (strcmp(test->procname, ref->procname) == 0))
1407 match++;
1408
1409 if (test->ctl_name && ref->ctl_name &&
1410 (test->ctl_name == ref->ctl_name))
1411 match++;
1412
1413 if (match) {
1414 if (cur_depth != 0) {
1415 cur_depth--;
1416 ref = ref->child;
1417 goto repeat;
1418 }
1419 goto out;
1420 }
1421 }
1422 }
1423 ref = NULL;
1424out:
1425 sysctl_head_finish(head);
1426 return ref;
1427}
1428
1429static void set_fail(const char **fail, struct ctl_table *table, const char *str)
1430{
1431 if (*fail) {
1432 printk(KERN_ERR "sysctl table check failed: ");
1433 sysctl_print_path(table);
1434 printk(" %s\n", *fail);
1435 }
1436 *fail = str;
1437}
1438
1439static int sysctl_check_dir(struct ctl_table *table)
1440{
1441 struct ctl_table *ref;
1442 int error;
1443
1444 error = 0;
1445 ref = sysctl_check_lookup(table);
1446 if (ref) {
1447 int match = 0;
1448 if ((!table->procname && !ref->procname) ||
1449 (table->procname && ref->procname &&
1450 (strcmp(table->procname, ref->procname) == 0)))
1451 match++;
1452
1453 if ((!table->ctl_name && !ref->ctl_name) ||
1454 (table->ctl_name && ref->ctl_name &&
1455 (table->ctl_name == ref->ctl_name)))
1456 match++;
1457
1458 if (match != 2) {
1459 printk(KERN_ERR "%s: failed: ", __func__);
1460 sysctl_print_path(table);
1461 printk(" ref: ");
1462 sysctl_print_path(ref);
1463 printk("\n");
1464 error = -EINVAL;
1465 }
1466 }
1467 return error;
1468}
1469
1470static void sysctl_check_leaf(struct ctl_table *table, const char **fail)
1471{
1472 struct ctl_table *ref;
1473
1474 ref = sysctl_check_lookup(table);
1475 if (ref && (ref != table))
1476 set_fail(fail, table, "Sysctl already exists");
1477}
1478
1479static void sysctl_check_bin_path(struct ctl_table *table, const char **fail)
1480{
1481 struct trans_ctl_table *ref;
1482
1483 ref = sysctl_binary_lookup(table);
1484 if (table->ctl_name && !ref)
1485 set_fail(fail, table, "Unknown sysctl binary path");
1486 if (ref) {
1487 if (ref->procname &&
1488 (!table->procname ||
1489 (strcmp(table->procname, ref->procname) != 0)))
1490 set_fail(fail, table, "procname does not match binary path procname");
1491
1492 if (ref->ctl_name && table->ctl_name &&
1493 (table->ctl_name != ref->ctl_name))
1494 set_fail(fail, table, "ctl_name does not match binary path ctl_name");
1495 }
1496}
1497
1498int sysctl_check_table(struct ctl_table *table)
1499{
1500 int error = 0;
1501 for (; table->ctl_name || table->procname; table++) {
1502 const char *fail = NULL;
1503
1504 sysctl_repair_table(table);
1505 if (table->parent) {
1506 if (table->procname && !table->parent->procname)
1507 set_fail(&fail, table, "Parent without procname");
1508 if (table->ctl_name && !table->parent->ctl_name)
1509 set_fail(&fail, table, "Parent without ctl_name");
1510 }
1511 if (!table->procname)
1512 set_fail(&fail, table, "No procname");
1513 if (table->child) {
1514 if (table->data)
1515 set_fail(&fail, table, "Directory with data?");
1516 if (table->maxlen)
1517 set_fail(&fail, table, "Directory with maxlen?");
1518 if ((table->mode & (S_IRUGO|S_IXUGO)) != table->mode)
1519 set_fail(&fail, table, "Writable sysctl directory");
1520 if (table->proc_handler)
1521 set_fail(&fail, table, "Directory with proc_handler");
1522 if (table->strategy)
1523 set_fail(&fail, table, "Directory with strategy");
1524 if (table->extra1)
1525 set_fail(&fail, table, "Directory with extra1");
1526 if (table->extra2)
1527 set_fail(&fail, table, "Directory with extra2");
1528 if (sysctl_check_dir(table))
1529 set_fail(&fail, table, "Inconsistent directory names");
1530 } else {
1531 if ((table->strategy == sysctl_data) ||
1532 (table->strategy == sysctl_string) ||
1533 (table->strategy == sysctl_intvec) ||
1534 (table->strategy == sysctl_jiffies) ||
1535 (table->strategy == sysctl_ms_jiffies) ||
1536 (table->proc_handler == proc_dostring) ||
1537 (table->proc_handler == proc_dointvec) ||
1538#ifdef CONFIG_SECURITY_CAPABILITIES
1539 (table->proc_handler == proc_dointvec_bset) ||
1540#endif /* def CONFIG_SECURITY_CAPABILITIES */
1541 (table->proc_handler == proc_dointvec_minmax) ||
1542 (table->proc_handler == proc_dointvec_jiffies) ||
1543 (table->proc_handler == proc_dointvec_userhz_jiffies) ||
1544 (table->proc_handler == proc_dointvec_ms_jiffies) ||
1545 (table->proc_handler == proc_doulongvec_minmax) ||
1546 (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
1547 if (!table->data)
1548 set_fail(&fail, table, "No data");
1549 if (!table->maxlen)
1550 set_fail(&fail, table, "No maxlen");
1551 }
1552 if ((table->proc_handler == proc_doulongvec_minmax) ||
1553 (table->proc_handler == proc_doulongvec_ms_jiffies_minmax)) {
1554 if (table->maxlen > sizeof (unsigned long)) {
1555 if (!table->extra1)
1556 set_fail(&fail, table, "No min");
1557 if (!table->extra2)
1558 set_fail(&fail, table, "No max");
1559 }
1560 }
1561#ifdef CONFIG_SYSCTL_SYSCALL
1562 if (table->ctl_name && !table->strategy)
1563 set_fail(&fail, table, "Missing strategy");
1564#endif
1565#if 0
1566 if (!table->ctl_name && table->strategy)
1567 set_fail(&fail, table, "Strategy without ctl_name");
1568#endif
1569#ifdef CONFIG_PROC_FS
1570 if (table->procname && !table->proc_handler)
1571 set_fail(&fail, table, "No proc_handler");
1572#endif
1573#if 0
1574 if (!table->procname && table->proc_handler)
1575 set_fail(&fail, table, "proc_handler without procname");
1576#endif
1577 sysctl_check_leaf(table, &fail);
1578 }
1579 sysctl_check_bin_path(table, &fail);
1580 if (fail) {
1581 set_fail(&fail, table, NULL);
1582 error = -EINVAL;
1583 }
1584 if (table->child)
1585 error |= sysctl_check_table(table->child);
1586 }
1587 return error;
1588}
diff --git a/kernel/time.c b/kernel/time.c
index 2d5b6a682138..09d3c45c4da7 100644
--- a/kernel/time.c
+++ b/kernel/time.c
@@ -9,9 +9,9 @@
9 */ 9 */
10/* 10/*
11 * Modification history kernel/time.c 11 * Modification history kernel/time.c
12 * 12 *
13 * 1993-09-02 Philip Gladstone 13 * 1993-09-02 Philip Gladstone
14 * Created file with time related functions from sched.c and adjtimex() 14 * Created file with time related functions from sched.c and adjtimex()
15 * 1993-10-08 Torsten Duwe 15 * 1993-10-08 Torsten Duwe
16 * adjtime interface update and CMOS clock write code 16 * adjtime interface update and CMOS clock write code
17 * 1995-08-13 Torsten Duwe 17 * 1995-08-13 Torsten Duwe
@@ -30,6 +30,7 @@
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/timex.h> 31#include <linux/timex.h>
32#include <linux/capability.h> 32#include <linux/capability.h>
33#include <linux/clocksource.h>
33#include <linux/errno.h> 34#include <linux/errno.h>
34#include <linux/syscalls.h> 35#include <linux/syscalls.h>
35#include <linux/security.h> 36#include <linux/security.h>
@@ -38,7 +39,7 @@
38#include <asm/uaccess.h> 39#include <asm/uaccess.h>
39#include <asm/unistd.h> 40#include <asm/unistd.h>
40 41
41/* 42/*
42 * The timezone where the local system is located. Used as a default by some 43 * The timezone where the local system is located. Used as a default by some
43 * programs who obtain this value by using gettimeofday. 44 * programs who obtain this value by using gettimeofday.
44 */ 45 */
@@ -71,7 +72,7 @@ asmlinkage long sys_time(time_t __user * tloc)
71 * why not move it into the appropriate arch directory (for those 72 * why not move it into the appropriate arch directory (for those
72 * architectures that need it). 73 * architectures that need it).
73 */ 74 */
74 75
75asmlinkage long sys_stime(time_t __user *tptr) 76asmlinkage long sys_stime(time_t __user *tptr)
76{ 77{
77 struct timespec tv; 78 struct timespec tv;
@@ -110,10 +111,10 @@ asmlinkage long sys_gettimeofday(struct timeval __user *tv, struct timezone __us
110/* 111/*
111 * Adjust the time obtained from the CMOS to be UTC time instead of 112 * Adjust the time obtained from the CMOS to be UTC time instead of
112 * local time. 113 * local time.
113 * 114 *
114 * This is ugly, but preferable to the alternatives. Otherwise we 115 * This is ugly, but preferable to the alternatives. Otherwise we
115 * would either need to write a program to do it in /etc/rc (and risk 116 * would either need to write a program to do it in /etc/rc (and risk
116 * confusion if the program gets run more than once; it would also be 117 * confusion if the program gets run more than once; it would also be
117 * hard to make the program warp the clock precisely n hours) or 118 * hard to make the program warp the clock precisely n hours) or
118 * compile in the timezone information into the kernel. Bad, bad.... 119 * compile in the timezone information into the kernel. Bad, bad....
119 * 120 *
@@ -158,6 +159,7 @@ int do_sys_settimeofday(struct timespec *tv, struct timezone *tz)
158 if (tz) { 159 if (tz) {
159 /* SMP safe, global irq locking makes it work. */ 160 /* SMP safe, global irq locking makes it work. */
160 sys_tz = *tz; 161 sys_tz = *tz;
162 update_vsyscall_tz();
161 if (firsttime) { 163 if (firsttime) {
162 firsttime = 0; 164 firsttime = 0;
163 if (!tv) 165 if (!tv)
diff --git a/kernel/timer.c b/kernel/timer.c
index 6ce1952eea7d..8521d10fbb27 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -817,7 +817,7 @@ unsigned long next_timer_interrupt(void)
817#endif 817#endif
818 818
819/* 819/*
820 * Called from the timer interrupt handler to charge one tick to the current 820 * Called from the timer interrupt handler to charge one tick to the current
821 * process. user_tick is 1 if the tick is user time, 0 for system. 821 * process. user_tick is 1 if the tick is user time, 0 for system.
822 */ 822 */
823void update_process_times(int user_tick) 823void update_process_times(int user_tick)
@@ -826,10 +826,13 @@ void update_process_times(int user_tick)
826 int cpu = smp_processor_id(); 826 int cpu = smp_processor_id();
827 827
828 /* Note: this timer irq context must be accounted for as well. */ 828 /* Note: this timer irq context must be accounted for as well. */
829 if (user_tick) 829 if (user_tick) {
830 account_user_time(p, jiffies_to_cputime(1)); 830 account_user_time(p, jiffies_to_cputime(1));
831 else 831 account_user_time_scaled(p, jiffies_to_cputime(1));
832 } else {
832 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1)); 833 account_system_time(p, HARDIRQ_OFFSET, jiffies_to_cputime(1));
834 account_system_time_scaled(p, jiffies_to_cputime(1));
835 }
833 run_local_timers(); 836 run_local_timers();
834 if (rcu_pending(cpu)) 837 if (rcu_pending(cpu))
835 rcu_check_callbacks(cpu, user_tick); 838 rcu_check_callbacks(cpu, user_tick);
diff --git a/kernel/tsacct.c b/kernel/tsacct.c
index c122131a122f..4ab1b584961b 100644
--- a/kernel/tsacct.c
+++ b/kernel/tsacct.c
@@ -62,6 +62,10 @@ void bacct_add_tsk(struct taskstats *stats, struct task_struct *tsk)
62 rcu_read_unlock(); 62 rcu_read_unlock();
63 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC; 63 stats->ac_utime = cputime_to_msecs(tsk->utime) * USEC_PER_MSEC;
64 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC; 64 stats->ac_stime = cputime_to_msecs(tsk->stime) * USEC_PER_MSEC;
65 stats->ac_utimescaled =
66 cputime_to_msecs(tsk->utimescaled) * USEC_PER_MSEC;
67 stats->ac_stimescaled =
68 cputime_to_msecs(tsk->stimescaled) * USEC_PER_MSEC;
65 stats->ac_minflt = tsk->min_flt; 69 stats->ac_minflt = tsk->min_flt;
66 stats->ac_majflt = tsk->maj_flt; 70 stats->ac_majflt = tsk->maj_flt;
67 71