aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-13 01:43:25 -0500
committerTrond Myklebust <Trond.Myklebust@netapp.com>2007-02-13 01:43:25 -0500
commitd9bc125caf592b7d081021f32ce5b717efdf70c8 (patch)
tree263b7066ba22ddce21db610c0300f6eaac6f2064 /kernel
parent43d78ef2ba5bec26d0315859e8324bfc0be23766 (diff)
parentec2f9d1331f658433411c58077871e1eef4ee1b4 (diff)
Merge branch 'master' of /home/trondmy/kernel/linux-2.6/
Conflicts: net/sunrpc/auth_gss/gss_krb5_crypto.c net/sunrpc/auth_gss/gss_spkm3_token.c net/sunrpc/clnt.c Merge with mainline and fix conflicts.
Diffstat (limited to 'kernel')
-rw-r--r--kernel/auditfilter.c2
-rw-r--r--kernel/capability.c8
-rw-r--r--kernel/compat.c66
-rw-r--r--kernel/cpu.c2
-rw-r--r--kernel/cpuset.c4
-rw-r--r--kernel/exit.c67
-rw-r--r--kernel/fork.c4
-rw-r--r--kernel/hrtimer.c6
-rw-r--r--kernel/irq/Makefile2
-rw-r--r--kernel/irq/chip.c28
-rw-r--r--kernel/irq/devres.c88
-rw-r--r--kernel/irq/manage.c35
-rw-r--r--kernel/irq/proc.c1
-rw-r--r--kernel/kfifo.c10
-rw-r--r--kernel/kthread.c6
-rw-r--r--kernel/lockdep.c42
-rw-r--r--kernel/lockdep_proc.c43
-rw-r--r--kernel/module.c76
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/params.c28
-rw-r--r--kernel/posix-timers.c3
-rw-r--r--kernel/power/Kconfig26
-rw-r--r--kernel/power/disk.c115
-rw-r--r--kernel/power/main.c38
-rw-r--r--kernel/power/snapshot.c4
-rw-r--r--kernel/power/swsusp.c5
-rw-r--r--kernel/power/user.c155
-rw-r--r--kernel/printk.c18
-rw-r--r--kernel/profile.c1
-rw-r--r--kernel/relay.c192
-rw-r--r--kernel/resource.c62
-rw-r--r--kernel/sched.c27
-rw-r--r--kernel/signal.c52
-rw-r--r--kernel/sys.c51
-rw-r--r--kernel/sysctl.c91
-rw-r--r--kernel/time/clocksource.c1
-rw-r--r--kernel/timer.c84
-rw-r--r--kernel/workqueue.c6
38 files changed, 1031 insertions, 424 deletions
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index 9c8c23227c7f..87865f8b4ce3 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -1601,8 +1601,8 @@ static int audit_filter_user_rules(struct netlink_skb_parms *cb,
1601 1601
1602int audit_filter_user(struct netlink_skb_parms *cb, int type) 1602int audit_filter_user(struct netlink_skb_parms *cb, int type)
1603{ 1603{
1604 enum audit_state state = AUDIT_DISABLED;
1604 struct audit_entry *e; 1605 struct audit_entry *e;
1605 enum audit_state state;
1606 int ret = 1; 1606 int ret = 1;
1607 1607
1608 rcu_read_lock(); 1608 rcu_read_lock();
diff --git a/kernel/capability.c b/kernel/capability.c
index edb845a6e84a..c8d3c7762034 100644
--- a/kernel/capability.c
+++ b/kernel/capability.c
@@ -92,15 +92,17 @@ out:
92 * cap_set_pg - set capabilities for all processes in a given process 92 * cap_set_pg - set capabilities for all processes in a given process
93 * group. We call this holding task_capability_lock and tasklist_lock. 93 * group. We call this holding task_capability_lock and tasklist_lock.
94 */ 94 */
95static inline int cap_set_pg(int pgrp, kernel_cap_t *effective, 95static inline int cap_set_pg(int pgrp_nr, kernel_cap_t *effective,
96 kernel_cap_t *inheritable, 96 kernel_cap_t *inheritable,
97 kernel_cap_t *permitted) 97 kernel_cap_t *permitted)
98{ 98{
99 struct task_struct *g, *target; 99 struct task_struct *g, *target;
100 int ret = -EPERM; 100 int ret = -EPERM;
101 int found = 0; 101 int found = 0;
102 struct pid *pgrp;
102 103
103 do_each_task_pid(pgrp, PIDTYPE_PGID, g) { 104 pgrp = find_pid(pgrp_nr);
105 do_each_pid_task(pgrp, PIDTYPE_PGID, g) {
104 target = g; 106 target = g;
105 while_each_thread(g, target) { 107 while_each_thread(g, target) {
106 if (!security_capset_check(target, effective, 108 if (!security_capset_check(target, effective,
@@ -113,7 +115,7 @@ static inline int cap_set_pg(int pgrp, kernel_cap_t *effective,
113 } 115 }
114 found = 1; 116 found = 1;
115 } 117 }
116 } while_each_task_pid(pgrp, PIDTYPE_PGID, g); 118 } while_each_pid_task(pgrp, PIDTYPE_PGID, g);
117 119
118 if (!found) 120 if (!found)
119 ret = 0; 121 ret = 0;
diff --git a/kernel/compat.c b/kernel/compat.c
index 6952dd057300..cebb4c28c039 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -1016,3 +1016,69 @@ asmlinkage long compat_sys_migrate_pages(compat_pid_t pid,
1016 return sys_migrate_pages(pid, nr_bits + 1, old, new); 1016 return sys_migrate_pages(pid, nr_bits + 1, old, new);
1017} 1017}
1018#endif 1018#endif
1019
1020struct compat_sysinfo {
1021 s32 uptime;
1022 u32 loads[3];
1023 u32 totalram;
1024 u32 freeram;
1025 u32 sharedram;
1026 u32 bufferram;
1027 u32 totalswap;
1028 u32 freeswap;
1029 u16 procs;
1030 u16 pad;
1031 u32 totalhigh;
1032 u32 freehigh;
1033 u32 mem_unit;
1034 char _f[20-2*sizeof(u32)-sizeof(int)];
1035};
1036
1037asmlinkage long
1038compat_sys_sysinfo(struct compat_sysinfo __user *info)
1039{
1040 struct sysinfo s;
1041
1042 do_sysinfo(&s);
1043
1044 /* Check to see if any memory value is too large for 32-bit and scale
1045 * down if needed
1046 */
1047 if ((s.totalram >> 32) || (s.totalswap >> 32)) {
1048 int bitcount = 0;
1049
1050 while (s.mem_unit < PAGE_SIZE) {
1051 s.mem_unit <<= 1;
1052 bitcount++;
1053 }
1054
1055 s.totalram >>= bitcount;
1056 s.freeram >>= bitcount;
1057 s.sharedram >>= bitcount;
1058 s.bufferram >>= bitcount;
1059 s.totalswap >>= bitcount;
1060 s.freeswap >>= bitcount;
1061 s.totalhigh >>= bitcount;
1062 s.freehigh >>= bitcount;
1063 }
1064
1065 if (!access_ok(VERIFY_WRITE, info, sizeof(struct compat_sysinfo)) ||
1066 __put_user (s.uptime, &info->uptime) ||
1067 __put_user (s.loads[0], &info->loads[0]) ||
1068 __put_user (s.loads[1], &info->loads[1]) ||
1069 __put_user (s.loads[2], &info->loads[2]) ||
1070 __put_user (s.totalram, &info->totalram) ||
1071 __put_user (s.freeram, &info->freeram) ||
1072 __put_user (s.sharedram, &info->sharedram) ||
1073 __put_user (s.bufferram, &info->bufferram) ||
1074 __put_user (s.totalswap, &info->totalswap) ||
1075 __put_user (s.freeswap, &info->freeswap) ||
1076 __put_user (s.procs, &info->procs) ||
1077 __put_user (s.totalhigh, &info->totalhigh) ||
1078 __put_user (s.freehigh, &info->freehigh) ||
1079 __put_user (s.mem_unit, &info->mem_unit))
1080 return -EFAULT;
1081
1082 return 0;
1083}
1084
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 7406fe6966f9..3d4206ada5c9 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -309,6 +309,8 @@ void enable_nonboot_cpus(void)
309 mutex_lock(&cpu_add_remove_lock); 309 mutex_lock(&cpu_add_remove_lock);
310 cpu_hotplug_disabled = 0; 310 cpu_hotplug_disabled = 0;
311 mutex_unlock(&cpu_add_remove_lock); 311 mutex_unlock(&cpu_add_remove_lock);
312 if (cpus_empty(frozen_cpus))
313 return;
312 314
313 printk("Enabling non-boot CPUs ...\n"); 315 printk("Enabling non-boot CPUs ...\n");
314 for_each_cpu_mask(cpu, frozen_cpus) { 316 for_each_cpu_mask(cpu, frozen_cpus) {
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 6b05dc69c959..f382b0f775e1 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1540,7 +1540,7 @@ static const struct file_operations cpuset_file_operations = {
1540 .release = cpuset_file_release, 1540 .release = cpuset_file_release,
1541}; 1541};
1542 1542
1543static struct inode_operations cpuset_dir_inode_operations = { 1543static const struct inode_operations cpuset_dir_inode_operations = {
1544 .lookup = simple_lookup, 1544 .lookup = simple_lookup,
1545 .mkdir = cpuset_mkdir, 1545 .mkdir = cpuset_mkdir,
1546 .rmdir = cpuset_rmdir, 1546 .rmdir = cpuset_rmdir,
@@ -2656,7 +2656,7 @@ static int cpuset_open(struct inode *inode, struct file *file)
2656 return single_open(file, proc_cpuset_show, pid); 2656 return single_open(file, proc_cpuset_show, pid);
2657} 2657}
2658 2658
2659struct file_operations proc_cpuset_operations = { 2659const struct file_operations proc_cpuset_operations = {
2660 .open = cpuset_open, 2660 .open = cpuset_open,
2661 .read = seq_read, 2661 .read = seq_read,
2662 .llseek = seq_lseek, 2662 .llseek = seq_lseek,
diff --git a/kernel/exit.c b/kernel/exit.c
index fec12eb12471..f132349c0325 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -185,21 +185,19 @@ repeat:
185 * This checks not only the pgrp, but falls back on the pid if no 185 * This checks not only the pgrp, but falls back on the pid if no
186 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly 186 * satisfactory pgrp is found. I dunno - gdb doesn't work correctly
187 * without this... 187 * without this...
188 *
189 * The caller must hold rcu lock or the tasklist lock.
188 */ 190 */
189int session_of_pgrp(int pgrp) 191struct pid *session_of_pgrp(struct pid *pgrp)
190{ 192{
191 struct task_struct *p; 193 struct task_struct *p;
192 int sid = 0; 194 struct pid *sid = NULL;
193
194 read_lock(&tasklist_lock);
195 195
196 p = find_task_by_pid_type(PIDTYPE_PGID, pgrp); 196 p = pid_task(pgrp, PIDTYPE_PGID);
197 if (p == NULL) 197 if (p == NULL)
198 p = find_task_by_pid(pgrp); 198 p = pid_task(pgrp, PIDTYPE_PID);
199 if (p != NULL) 199 if (p != NULL)
200 sid = process_session(p); 200 sid = task_session(p);
201
202 read_unlock(&tasklist_lock);
203 201
204 return sid; 202 return sid;
205} 203}
@@ -212,53 +210,52 @@ int session_of_pgrp(int pgrp)
212 * 210 *
213 * "I ask you, have you ever known what it is to be an orphan?" 211 * "I ask you, have you ever known what it is to be an orphan?"
214 */ 212 */
215static int will_become_orphaned_pgrp(int pgrp, struct task_struct *ignored_task) 213static int will_become_orphaned_pgrp(struct pid *pgrp, struct task_struct *ignored_task)
216{ 214{
217 struct task_struct *p; 215 struct task_struct *p;
218 int ret = 1; 216 int ret = 1;
219 217
220 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 218 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
221 if (p == ignored_task 219 if (p == ignored_task
222 || p->exit_state 220 || p->exit_state
223 || is_init(p->real_parent)) 221 || is_init(p->real_parent))
224 continue; 222 continue;
225 if (process_group(p->real_parent) != pgrp && 223 if (task_pgrp(p->real_parent) != pgrp &&
226 process_session(p->real_parent) == process_session(p)) { 224 task_session(p->real_parent) == task_session(p)) {
227 ret = 0; 225 ret = 0;
228 break; 226 break;
229 } 227 }
230 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 228 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
231 return ret; /* (sighing) "Often!" */ 229 return ret; /* (sighing) "Often!" */
232} 230}
233 231
234int is_orphaned_pgrp(int pgrp) 232int is_current_pgrp_orphaned(void)
235{ 233{
236 int retval; 234 int retval;
237 235
238 read_lock(&tasklist_lock); 236 read_lock(&tasklist_lock);
239 retval = will_become_orphaned_pgrp(pgrp, NULL); 237 retval = will_become_orphaned_pgrp(task_pgrp(current), NULL);
240 read_unlock(&tasklist_lock); 238 read_unlock(&tasklist_lock);
241 239
242 return retval; 240 return retval;
243} 241}
244 242
245static int has_stopped_jobs(int pgrp) 243static int has_stopped_jobs(struct pid *pgrp)
246{ 244{
247 int retval = 0; 245 int retval = 0;
248 struct task_struct *p; 246 struct task_struct *p;
249 247
250 do_each_task_pid(pgrp, PIDTYPE_PGID, p) { 248 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
251 if (p->state != TASK_STOPPED) 249 if (p->state != TASK_STOPPED)
252 continue; 250 continue;
253 retval = 1; 251 retval = 1;
254 break; 252 break;
255 } while_each_task_pid(pgrp, PIDTYPE_PGID, p); 253 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
256 return retval; 254 return retval;
257} 255}
258 256
259/** 257/**
260 * reparent_to_init - Reparent the calling kernel thread to the init task 258 * reparent_to_init - Reparent the calling kernel thread to the init task of the pid space that the thread belongs to.
261 * of the pid space that the thread belongs to.
262 * 259 *
263 * If a kernel thread is launched as a result of a system call, or if 260 * If a kernel thread is launched as a result of a system call, or if
264 * it ever exits, it should generally reparent itself to init so that 261 * it ever exits, it should generally reparent itself to init so that
@@ -431,8 +428,10 @@ static void close_files(struct files_struct * files)
431 while (set) { 428 while (set) {
432 if (set & 1) { 429 if (set & 1) {
433 struct file * file = xchg(&fdt->fd[i], NULL); 430 struct file * file = xchg(&fdt->fd[i], NULL);
434 if (file) 431 if (file) {
435 filp_close(file, files); 432 filp_close(file, files);
433 cond_resched();
434 }
436 } 435 }
437 i++; 436 i++;
438 set >>= 1; 437 set >>= 1;
@@ -649,14 +648,14 @@ reparent_thread(struct task_struct *p, struct task_struct *father, int traced)
649 * than we are, and it was the only connection 648 * than we are, and it was the only connection
650 * outside, so the child pgrp is now orphaned. 649 * outside, so the child pgrp is now orphaned.
651 */ 650 */
652 if ((process_group(p) != process_group(father)) && 651 if ((task_pgrp(p) != task_pgrp(father)) &&
653 (process_session(p) == process_session(father))) { 652 (task_session(p) == task_session(father))) {
654 int pgrp = process_group(p); 653 struct pid *pgrp = task_pgrp(p);
655 654
656 if (will_become_orphaned_pgrp(pgrp, NULL) && 655 if (will_become_orphaned_pgrp(pgrp, NULL) &&
657 has_stopped_jobs(pgrp)) { 656 has_stopped_jobs(pgrp)) {
658 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, pgrp); 657 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
659 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, pgrp); 658 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
660 } 659 }
661 } 660 }
662} 661}
@@ -736,6 +735,7 @@ static void exit_notify(struct task_struct *tsk)
736 int state; 735 int state;
737 struct task_struct *t; 736 struct task_struct *t;
738 struct list_head ptrace_dead, *_p, *_n; 737 struct list_head ptrace_dead, *_p, *_n;
738 struct pid *pgrp;
739 739
740 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT) 740 if (signal_pending(tsk) && !(tsk->signal->flags & SIGNAL_GROUP_EXIT)
741 && !thread_group_empty(tsk)) { 741 && !thread_group_empty(tsk)) {
@@ -788,12 +788,13 @@ static void exit_notify(struct task_struct *tsk)
788 788
789 t = tsk->real_parent; 789 t = tsk->real_parent;
790 790
791 if ((process_group(t) != process_group(tsk)) && 791 pgrp = task_pgrp(tsk);
792 (process_session(t) == process_session(tsk)) && 792 if ((task_pgrp(t) != pgrp) &&
793 will_become_orphaned_pgrp(process_group(tsk), tsk) && 793 (task_session(t) != task_session(tsk)) &&
794 has_stopped_jobs(process_group(tsk))) { 794 will_become_orphaned_pgrp(pgrp, tsk) &&
795 __kill_pg_info(SIGHUP, SEND_SIG_PRIV, process_group(tsk)); 795 has_stopped_jobs(pgrp)) {
796 __kill_pg_info(SIGCONT, SEND_SIG_PRIV, process_group(tsk)); 796 __kill_pgrp_info(SIGHUP, SEND_SIG_PRIV, pgrp);
797 __kill_pgrp_info(SIGCONT, SEND_SIG_PRIV, pgrp);
797 } 798 }
798 799
799 /* Let father know we died 800 /* Let father know we died
diff --git a/kernel/fork.c b/kernel/fork.c
index d57118da73ff..0b6293d94d96 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -869,7 +869,7 @@ static inline int copy_signal(unsigned long clone_flags, struct task_struct * ts
869 sig->it_prof_incr = cputime_zero; 869 sig->it_prof_incr = cputime_zero;
870 870
871 sig->leader = 0; /* session leadership doesn't inherit */ 871 sig->leader = 0; /* session leadership doesn't inherit */
872 sig->tty_old_pgrp = 0; 872 sig->tty_old_pgrp = NULL;
873 873
874 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 874 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero;
875 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 875 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
@@ -1038,10 +1038,12 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1038 p->utime = cputime_zero; 1038 p->utime = cputime_zero;
1039 p->stime = cputime_zero; 1039 p->stime = cputime_zero;
1040 p->sched_time = 0; 1040 p->sched_time = 0;
1041#ifdef CONFIG_TASK_XACCT
1041 p->rchar = 0; /* I/O counter: bytes read */ 1042 p->rchar = 0; /* I/O counter: bytes read */
1042 p->wchar = 0; /* I/O counter: bytes written */ 1043 p->wchar = 0; /* I/O counter: bytes written */
1043 p->syscr = 0; /* I/O counter: read syscalls */ 1044 p->syscr = 0; /* I/O counter: read syscalls */
1044 p->syscw = 0; /* I/O counter: write syscalls */ 1045 p->syscw = 0; /* I/O counter: write syscalls */
1046#endif
1045 task_io_accounting_init(p); 1047 task_io_accounting_init(p);
1046 acct_clear_integrals(p); 1048 acct_clear_integrals(p);
1047 1049
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index d0ba190dfeb6..f44e499e8fca 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -102,7 +102,7 @@ static DEFINE_PER_CPU(struct hrtimer_base, hrtimer_bases[MAX_HRTIMER_BASES]) =
102 * 102 *
103 * The function calculates the monotonic clock from the realtime 103 * The function calculates the monotonic clock from the realtime
104 * clock and the wall_to_monotonic offset and stores the result 104 * clock and the wall_to_monotonic offset and stores the result
105 * in normalized timespec format in the variable pointed to by ts. 105 * in normalized timespec format in the variable pointed to by @ts.
106 */ 106 */
107void ktime_get_ts(struct timespec *ts) 107void ktime_get_ts(struct timespec *ts)
108{ 108{
@@ -583,8 +583,8 @@ EXPORT_SYMBOL_GPL(hrtimer_init);
583 * @which_clock: which clock to query 583 * @which_clock: which clock to query
584 * @tp: pointer to timespec variable to store the resolution 584 * @tp: pointer to timespec variable to store the resolution
585 * 585 *
586 * Store the resolution of the clock selected by which_clock in the 586 * Store the resolution of the clock selected by @which_clock in the
587 * variable pointed to by tp. 587 * variable pointed to by @tp.
588 */ 588 */
589int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp) 589int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
590{ 590{
diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile
index 1dab0ac3f797..681c52dbfe22 100644
--- a/kernel/irq/Makefile
+++ b/kernel/irq/Makefile
@@ -1,5 +1,5 @@
1 1
2obj-y := handle.o manage.o spurious.o resend.o chip.o 2obj-y := handle.o manage.o spurious.o resend.o chip.o devres.o
3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o 3obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
4obj-$(CONFIG_PROC_FS) += proc.o 4obj-$(CONFIG_PROC_FS) += proc.o
5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o 5obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index d27b25855743..475e8a71bcdc 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -39,6 +39,7 @@ void dynamic_irq_init(unsigned int irq)
39 desc->chip = &no_irq_chip; 39 desc->chip = &no_irq_chip;
40 desc->handle_irq = handle_bad_irq; 40 desc->handle_irq = handle_bad_irq;
41 desc->depth = 1; 41 desc->depth = 1;
42 desc->msi_desc = NULL;
42 desc->handler_data = NULL; 43 desc->handler_data = NULL;
43 desc->chip_data = NULL; 44 desc->chip_data = NULL;
44 desc->action = NULL; 45 desc->action = NULL;
@@ -74,6 +75,9 @@ void dynamic_irq_cleanup(unsigned int irq)
74 WARN_ON(1); 75 WARN_ON(1);
75 return; 76 return;
76 } 77 }
78 desc->msi_desc = NULL;
79 desc->handler_data = NULL;
80 desc->chip_data = NULL;
77 desc->handle_irq = handle_bad_irq; 81 desc->handle_irq = handle_bad_irq;
78 desc->chip = &no_irq_chip; 82 desc->chip = &no_irq_chip;
79 spin_unlock_irqrestore(&desc->lock, flags); 83 spin_unlock_irqrestore(&desc->lock, flags);
@@ -162,6 +166,30 @@ int set_irq_data(unsigned int irq, void *data)
162EXPORT_SYMBOL(set_irq_data); 166EXPORT_SYMBOL(set_irq_data);
163 167
164/** 168/**
169 * set_irq_data - set irq type data for an irq
170 * @irq: Interrupt number
171 * @data: Pointer to interrupt specific data
172 *
173 * Set the hardware irq controller data for an irq
174 */
175int set_irq_msi(unsigned int irq, struct msi_desc *entry)
176{
177 struct irq_desc *desc;
178 unsigned long flags;
179
180 if (irq >= NR_IRQS) {
181 printk(KERN_ERR
182 "Trying to install msi data for IRQ%d\n", irq);
183 return -EINVAL;
184 }
185 desc = irq_desc + irq;
186 spin_lock_irqsave(&desc->lock, flags);
187 desc->msi_desc = entry;
188 spin_unlock_irqrestore(&desc->lock, flags);
189 return 0;
190}
191
192/**
165 * set_irq_chip_data - set irq chip data for an irq 193 * set_irq_chip_data - set irq chip data for an irq
166 * @irq: Interrupt number 194 * @irq: Interrupt number
167 * @data: Pointer to chip specific data 195 * @data: Pointer to chip specific data
diff --git a/kernel/irq/devres.c b/kernel/irq/devres.c
new file mode 100644
index 000000000000..85a430da0fb6
--- /dev/null
+++ b/kernel/irq/devres.c
@@ -0,0 +1,88 @@
1#include <linux/module.h>
2#include <linux/interrupt.h>
3
4/*
5 * Device resource management aware IRQ request/free implementation.
6 */
7struct irq_devres {
8 unsigned int irq;
9 void *dev_id;
10};
11
12static void devm_irq_release(struct device *dev, void *res)
13{
14 struct irq_devres *this = res;
15
16 free_irq(this->irq, this->dev_id);
17}
18
19static int devm_irq_match(struct device *dev, void *res, void *data)
20{
21 struct irq_devres *this = res, *match = data;
22
23 return this->irq == match->irq && this->dev_id == match->dev_id;
24}
25
26/**
27 * devm_request_irq - allocate an interrupt line for a managed device
28 * @dev: device to request interrupt for
29 * @irq: Interrupt line to allocate
30 * @handler: Function to be called when the IRQ occurs
31 * @irqflags: Interrupt type flags
32 * @devname: An ascii name for the claiming device
33 * @dev_id: A cookie passed back to the handler function
34 *
35 * Except for the extra @dev argument, this function takes the
36 * same arguments and performs the same function as
37 * request_irq(). IRQs requested with this function will be
38 * automatically freed on driver detach.
39 *
40 * If an IRQ allocated with this function needs to be freed
41 * separately, dev_free_irq() must be used.
42 */
43int devm_request_irq(struct device *dev, unsigned int irq,
44 irq_handler_t handler, unsigned long irqflags,
45 const char *devname, void *dev_id)
46{
47 struct irq_devres *dr;
48 int rc;
49
50 dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
51 GFP_KERNEL);
52 if (!dr)
53 return -ENOMEM;
54
55 rc = request_irq(irq, handler, irqflags, devname, dev_id);
56 if (rc) {
57 kfree(dr);
58 return rc;
59 }
60
61 dr->irq = irq;
62 dr->dev_id = dev_id;
63 devres_add(dev, dr);
64
65 return 0;
66}
67EXPORT_SYMBOL(devm_request_irq);
68
69/**
70 * devm_free_irq - free an interrupt
71 * @dev: device to free interrupt for
72 * @irq: Interrupt line to free
73 * @dev_id: Device identity to free
74 *
75 * Except for the extra @dev argument, this function takes the
76 * same arguments and performs the same function as free_irq().
77 * This function instead of free_irq() should be used to manually
78 * free IRQs allocated with dev_request_irq().
79 */
80void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
81{
82 struct irq_devres match_data = { irq, dev_id };
83
84 free_irq(irq, dev_id);
85 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
86 &match_data));
87}
88EXPORT_SYMBOL(devm_free_irq);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8b961adc3bd2..7c85d69188ef 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -328,12 +328,14 @@ int setup_irq(unsigned int irq, struct irqaction *new)
328 return 0; 328 return 0;
329 329
330mismatch: 330mismatch:
331#ifdef CONFIG_DEBUG_SHIRQ
331 if (!(new->flags & IRQF_PROBE_SHARED)) { 332 if (!(new->flags & IRQF_PROBE_SHARED)) {
332 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq); 333 printk(KERN_ERR "IRQ handler type mismatch for IRQ %d\n", irq);
333 if (old_name) 334 if (old_name)
334 printk(KERN_ERR "current handler: %s\n", old_name); 335 printk(KERN_ERR "current handler: %s\n", old_name);
335 dump_stack(); 336 dump_stack();
336 } 337 }
338#endif
337 spin_unlock_irqrestore(&desc->lock, flags); 339 spin_unlock_irqrestore(&desc->lock, flags);
338 return -EBUSY; 340 return -EBUSY;
339} 341}
@@ -357,6 +359,7 @@ void free_irq(unsigned int irq, void *dev_id)
357 struct irq_desc *desc; 359 struct irq_desc *desc;
358 struct irqaction **p; 360 struct irqaction **p;
359 unsigned long flags; 361 unsigned long flags;
362 irqreturn_t (*handler)(int, void *) = NULL;
360 363
361 WARN_ON(in_interrupt()); 364 WARN_ON(in_interrupt());
362 if (irq >= NR_IRQS) 365 if (irq >= NR_IRQS)
@@ -396,6 +399,8 @@ void free_irq(unsigned int irq, void *dev_id)
396 399
397 /* Make sure it's not being used on another CPU */ 400 /* Make sure it's not being used on another CPU */
398 synchronize_irq(irq); 401 synchronize_irq(irq);
402 if (action->flags & IRQF_SHARED)
403 handler = action->handler;
399 kfree(action); 404 kfree(action);
400 return; 405 return;
401 } 406 }
@@ -403,6 +408,17 @@ void free_irq(unsigned int irq, void *dev_id)
403 spin_unlock_irqrestore(&desc->lock, flags); 408 spin_unlock_irqrestore(&desc->lock, flags);
404 return; 409 return;
405 } 410 }
411#ifdef CONFIG_DEBUG_SHIRQ
412 if (handler) {
413 /*
414 * It's a shared IRQ -- the driver ought to be prepared for it
415 * to happen even now it's being freed, so let's make sure....
416 * We do this after actually deregistering it, to make sure that
417 * a 'real' IRQ doesn't run in parallel with our fake
418 */
419 handler(irq, dev_id);
420 }
421#endif
406} 422}
407EXPORT_SYMBOL(free_irq); 423EXPORT_SYMBOL(free_irq);
408 424
@@ -475,6 +491,25 @@ int request_irq(unsigned int irq, irq_handler_t handler,
475 491
476 select_smp_affinity(irq); 492 select_smp_affinity(irq);
477 493
494#ifdef CONFIG_DEBUG_SHIRQ
495 if (irqflags & IRQF_SHARED) {
496 /*
497 * It's a shared IRQ -- the driver ought to be prepared for it
498 * to happen immediately, so let's make sure....
499 * We do this before actually registering it, to make sure that
500 * a 'real' IRQ doesn't run in parallel with our fake
501 */
502 if (irqflags & IRQF_DISABLED) {
503 unsigned long flags;
504
505 local_irq_save(flags);
506 handler(irq, dev_id);
507 local_irq_restore(flags);
508 } else
509 handler(irq, dev_id);
510 }
511#endif
512
478 retval = setup_irq(irq, action); 513 retval = setup_irq(irq, action);
479 if (retval) 514 if (retval)
480 kfree(action); 515 kfree(action);
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 61f5c717a8f5..6d3be06e8ce6 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -136,7 +136,6 @@ void register_irq_proc(unsigned int irq)
136 entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir); 136 entry = create_proc_entry("smp_affinity", 0600, irq_desc[irq].dir);
137 137
138 if (entry) { 138 if (entry) {
139 entry->nlink = 1;
140 entry->data = (void *)(long)irq; 139 entry->data = (void *)(long)irq;
141 entry->read_proc = irq_affinity_read_proc; 140 entry->read_proc = irq_affinity_read_proc;
142 entry->write_proc = irq_affinity_write_proc; 141 entry->write_proc = irq_affinity_write_proc;
diff --git a/kernel/kfifo.c b/kernel/kfifo.c
index 5d1d907378a2..cee419143fd4 100644
--- a/kernel/kfifo.c
+++ b/kernel/kfifo.c
@@ -32,8 +32,8 @@
32 * @gfp_mask: get_free_pages mask, passed to kmalloc() 32 * @gfp_mask: get_free_pages mask, passed to kmalloc()
33 * @lock: the lock to be used to protect the fifo buffer 33 * @lock: the lock to be used to protect the fifo buffer
34 * 34 *
35 * Do NOT pass the kfifo to kfifo_free() after use ! Simply free the 35 * Do NOT pass the kfifo to kfifo_free() after use! Simply free the
36 * struct kfifo with kfree(). 36 * &struct kfifo with kfree().
37 */ 37 */
38struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size, 38struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
39 gfp_t gfp_mask, spinlock_t *lock) 39 gfp_t gfp_mask, spinlock_t *lock)
@@ -108,7 +108,7 @@ EXPORT_SYMBOL(kfifo_free);
108 * @buffer: the data to be added. 108 * @buffer: the data to be added.
109 * @len: the length of the data to be added. 109 * @len: the length of the data to be added.
110 * 110 *
111 * This function copies at most 'len' bytes from the 'buffer' into 111 * This function copies at most @len bytes from the @buffer into
112 * the FIFO depending on the free space, and returns the number of 112 * the FIFO depending on the free space, and returns the number of
113 * bytes copied. 113 * bytes copied.
114 * 114 *
@@ -155,8 +155,8 @@ EXPORT_SYMBOL(__kfifo_put);
155 * @buffer: where the data must be copied. 155 * @buffer: where the data must be copied.
156 * @len: the size of the destination buffer. 156 * @len: the size of the destination buffer.
157 * 157 *
158 * This function copies at most 'len' bytes from the FIFO into the 158 * This function copies at most @len bytes from the FIFO into the
159 * 'buffer' and returns the number of copied bytes. 159 * @buffer and returns the number of copied bytes.
160 * 160 *
161 * Note that with only one concurrent reader and one concurrent 161 * Note that with only one concurrent reader and one concurrent
162 * writer, you don't need extra locking to use these functions. 162 * writer, you don't need extra locking to use these functions.
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 1db8c72d0d38..87c50ccd1d4e 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -50,7 +50,7 @@ static struct kthread_stop_info kthread_stop_info;
50/** 50/**
51 * kthread_should_stop - should this kthread return now? 51 * kthread_should_stop - should this kthread return now?
52 * 52 *
53 * When someone calls kthread_stop on your kthread, it will be woken 53 * When someone calls kthread_stop() on your kthread, it will be woken
54 * and this will return true. You should then return, and your return 54 * and this will return true. You should then return, and your return
55 * value will be passed through to kthread_stop(). 55 * value will be passed through to kthread_stop().
56 */ 56 */
@@ -143,7 +143,7 @@ static void keventd_create_kthread(struct work_struct *work)
143 * it. See also kthread_run(), kthread_create_on_cpu(). 143 * it. See also kthread_run(), kthread_create_on_cpu().
144 * 144 *
145 * When woken, the thread will run @threadfn() with @data as its 145 * When woken, the thread will run @threadfn() with @data as its
146 * argument. @threadfn can either call do_exit() directly if it is a 146 * argument. @threadfn() can either call do_exit() directly if it is a
147 * standalone thread for which noone will call kthread_stop(), or 147 * standalone thread for which noone will call kthread_stop(), or
148 * return when 'kthread_should_stop()' is true (which means 148 * return when 'kthread_should_stop()' is true (which means
149 * kthread_stop() has been called). The return value should be zero 149 * kthread_stop() has been called). The return value should be zero
@@ -192,7 +192,7 @@ EXPORT_SYMBOL(kthread_create);
192 * 192 *
193 * Description: This function is equivalent to set_cpus_allowed(), 193 * Description: This function is equivalent to set_cpus_allowed(),
194 * except that @cpu doesn't need to be online, and the thread must be 194 * except that @cpu doesn't need to be online, and the thread must be
195 * stopped (i.e., just returned from kthread_create(). 195 * stopped (i.e., just returned from kthread_create()).
196 */ 196 */
197void kthread_bind(struct task_struct *k, unsigned int cpu) 197void kthread_bind(struct task_struct *k, unsigned int cpu)
198{ 198{
diff --git a/kernel/lockdep.c b/kernel/lockdep.c
index 509efd49540f..592c576d77a7 100644
--- a/kernel/lockdep.c
+++ b/kernel/lockdep.c
@@ -70,6 +70,9 @@ static int graph_lock(void)
70 70
71static inline int graph_unlock(void) 71static inline int graph_unlock(void)
72{ 72{
73 if (debug_locks && !__raw_spin_is_locked(&lockdep_lock))
74 return DEBUG_LOCKS_WARN_ON(1);
75
73 __raw_spin_unlock(&lockdep_lock); 76 __raw_spin_unlock(&lockdep_lock);
74 return 0; 77 return 0;
75} 78}
@@ -487,7 +490,7 @@ static void print_lock_dependencies(struct lock_class *class, int depth)
487 * Add a new dependency to the head of the list: 490 * Add a new dependency to the head of the list:
488 */ 491 */
489static int add_lock_to_list(struct lock_class *class, struct lock_class *this, 492static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
490 struct list_head *head, unsigned long ip) 493 struct list_head *head, unsigned long ip, int distance)
491{ 494{
492 struct lock_list *entry; 495 struct lock_list *entry;
493 /* 496 /*
@@ -499,6 +502,7 @@ static int add_lock_to_list(struct lock_class *class, struct lock_class *this,
499 return 0; 502 return 0;
500 503
501 entry->class = this; 504 entry->class = this;
505 entry->distance = distance;
502 if (!save_trace(&entry->trace)) 506 if (!save_trace(&entry->trace))
503 return 0; 507 return 0;
504 508
@@ -712,6 +716,9 @@ find_usage_backwards(struct lock_class *source, unsigned int depth)
712 struct lock_list *entry; 716 struct lock_list *entry;
713 int ret; 717 int ret;
714 718
719 if (!__raw_spin_is_locked(&lockdep_lock))
720 return DEBUG_LOCKS_WARN_ON(1);
721
715 if (depth > max_recursion_depth) 722 if (depth > max_recursion_depth)
716 max_recursion_depth = depth; 723 max_recursion_depth = depth;
717 if (depth >= RECURSION_LIMIT) 724 if (depth >= RECURSION_LIMIT)
@@ -900,7 +907,7 @@ check_deadlock(struct task_struct *curr, struct held_lock *next,
900 */ 907 */
901static int 908static int
902check_prev_add(struct task_struct *curr, struct held_lock *prev, 909check_prev_add(struct task_struct *curr, struct held_lock *prev,
903 struct held_lock *next) 910 struct held_lock *next, int distance)
904{ 911{
905 struct lock_list *entry; 912 struct lock_list *entry;
906 int ret; 913 int ret;
@@ -978,8 +985,11 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
978 * L2 added to its dependency list, due to the first chain.) 985 * L2 added to its dependency list, due to the first chain.)
979 */ 986 */
980 list_for_each_entry(entry, &prev->class->locks_after, entry) { 987 list_for_each_entry(entry, &prev->class->locks_after, entry) {
981 if (entry->class == next->class) 988 if (entry->class == next->class) {
989 if (distance == 1)
990 entry->distance = 1;
982 return 2; 991 return 2;
992 }
983 } 993 }
984 994
985 /* 995 /*
@@ -987,12 +997,13 @@ check_prev_add(struct task_struct *curr, struct held_lock *prev,
987 * to the previous lock's dependency list: 997 * to the previous lock's dependency list:
988 */ 998 */
989 ret = add_lock_to_list(prev->class, next->class, 999 ret = add_lock_to_list(prev->class, next->class,
990 &prev->class->locks_after, next->acquire_ip); 1000 &prev->class->locks_after, next->acquire_ip, distance);
1001
991 if (!ret) 1002 if (!ret)
992 return 0; 1003 return 0;
993 1004
994 ret = add_lock_to_list(next->class, prev->class, 1005 ret = add_lock_to_list(next->class, prev->class,
995 &next->class->locks_before, next->acquire_ip); 1006 &next->class->locks_before, next->acquire_ip, distance);
996 if (!ret) 1007 if (!ret)
997 return 0; 1008 return 0;
998 1009
@@ -1040,13 +1051,14 @@ check_prevs_add(struct task_struct *curr, struct held_lock *next)
1040 goto out_bug; 1051 goto out_bug;
1041 1052
1042 for (;;) { 1053 for (;;) {
1054 int distance = curr->lockdep_depth - depth + 1;
1043 hlock = curr->held_locks + depth-1; 1055 hlock = curr->held_locks + depth-1;
1044 /* 1056 /*
1045 * Only non-recursive-read entries get new dependencies 1057 * Only non-recursive-read entries get new dependencies
1046 * added: 1058 * added:
1047 */ 1059 */
1048 if (hlock->read != 2) { 1060 if (hlock->read != 2) {
1049 if (!check_prev_add(curr, hlock, next)) 1061 if (!check_prev_add(curr, hlock, next, distance))
1050 return 0; 1062 return 0;
1051 /* 1063 /*
1052 * Stop after the first non-trylock entry, 1064 * Stop after the first non-trylock entry,
@@ -1293,7 +1305,8 @@ out_unlock_set:
1293 if (!subclass || force) 1305 if (!subclass || force)
1294 lock->class_cache = class; 1306 lock->class_cache = class;
1295 1307
1296 DEBUG_LOCKS_WARN_ON(class->subclass != subclass); 1308 if (DEBUG_LOCKS_WARN_ON(class->subclass != subclass))
1309 return NULL;
1297 1310
1298 return class; 1311 return class;
1299} 1312}
@@ -1308,7 +1321,8 @@ static inline int lookup_chain_cache(u64 chain_key, struct lock_class *class)
1308 struct list_head *hash_head = chainhashentry(chain_key); 1321 struct list_head *hash_head = chainhashentry(chain_key);
1309 struct lock_chain *chain; 1322 struct lock_chain *chain;
1310 1323
1311 DEBUG_LOCKS_WARN_ON(!irqs_disabled()); 1324 if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
1325 return 0;
1312 /* 1326 /*
1313 * We can walk it lock-free, because entries only get added 1327 * We can walk it lock-free, because entries only get added
1314 * to the hash: 1328 * to the hash:
@@ -1394,7 +1408,9 @@ static void check_chain_key(struct task_struct *curr)
1394 return; 1408 return;
1395 } 1409 }
1396 id = hlock->class - lock_classes; 1410 id = hlock->class - lock_classes;
1397 DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS); 1411 if (DEBUG_LOCKS_WARN_ON(id >= MAX_LOCKDEP_KEYS))
1412 return;
1413
1398 if (prev_hlock && (prev_hlock->irq_context != 1414 if (prev_hlock && (prev_hlock->irq_context !=
1399 hlock->irq_context)) 1415 hlock->irq_context))
1400 chain_key = 0; 1416 chain_key = 0;
@@ -2205,7 +2221,11 @@ out_calc_hash:
2205 if (!check_prevs_add(curr, hlock)) 2221 if (!check_prevs_add(curr, hlock))
2206 return 0; 2222 return 0;
2207 graph_unlock(); 2223 graph_unlock();
2208 } 2224 } else
2225 /* after lookup_chain_cache(): */
2226 if (unlikely(!debug_locks))
2227 return 0;
2228
2209 curr->lockdep_depth++; 2229 curr->lockdep_depth++;
2210 check_chain_key(curr); 2230 check_chain_key(curr);
2211 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { 2231 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) {
@@ -2214,6 +2234,7 @@ out_calc_hash:
2214 printk("turning off the locking correctness validator.\n"); 2234 printk("turning off the locking correctness validator.\n");
2215 return 0; 2235 return 0;
2216 } 2236 }
2237
2217 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) 2238 if (unlikely(curr->lockdep_depth > max_lockdep_depth))
2218 max_lockdep_depth = curr->lockdep_depth; 2239 max_lockdep_depth = curr->lockdep_depth;
2219 2240
@@ -2764,4 +2785,3 @@ void debug_show_held_locks(struct task_struct *task)
2764} 2785}
2765 2786
2766EXPORT_SYMBOL_GPL(debug_show_held_locks); 2787EXPORT_SYMBOL_GPL(debug_show_held_locks);
2767
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index b554b40a4aa6..88fc611b3ae9 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -77,12 +77,29 @@ static unsigned long count_backward_deps(struct lock_class *class)
77 return ret; 77 return ret;
78} 78}
79 79
80static void print_name(struct seq_file *m, struct lock_class *class)
81{
82 char str[128];
83 const char *name = class->name;
84
85 if (!name) {
86 name = __get_key_name(class->key, str);
87 seq_printf(m, "%s", name);
88 } else{
89 seq_printf(m, "%s", name);
90 if (class->name_version > 1)
91 seq_printf(m, "#%d", class->name_version);
92 if (class->subclass)
93 seq_printf(m, "/%d", class->subclass);
94 }
95}
96
80static int l_show(struct seq_file *m, void *v) 97static int l_show(struct seq_file *m, void *v)
81{ 98{
82 unsigned long nr_forward_deps, nr_backward_deps; 99 unsigned long nr_forward_deps, nr_backward_deps;
83 struct lock_class *class = m->private; 100 struct lock_class *class = m->private;
84 char str[128], c1, c2, c3, c4; 101 struct lock_list *entry;
85 const char *name; 102 char c1, c2, c3, c4;
86 103
87 seq_printf(m, "%p", class->key); 104 seq_printf(m, "%p", class->key);
88#ifdef CONFIG_DEBUG_LOCKDEP 105#ifdef CONFIG_DEBUG_LOCKDEP
@@ -97,16 +114,16 @@ static int l_show(struct seq_file *m, void *v)
97 get_usage_chars(class, &c1, &c2, &c3, &c4); 114 get_usage_chars(class, &c1, &c2, &c3, &c4);
98 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4); 115 seq_printf(m, " %c%c%c%c", c1, c2, c3, c4);
99 116
100 name = class->name; 117 seq_printf(m, ": ");
101 if (!name) { 118 print_name(m, class);
102 name = __get_key_name(class->key, str); 119 seq_puts(m, "\n");
103 seq_printf(m, ": %s", name); 120
104 } else{ 121 list_for_each_entry(entry, &class->locks_after, entry) {
105 seq_printf(m, ": %s", name); 122 if (entry->distance == 1) {
106 if (class->name_version > 1) 123 seq_printf(m, " -> [%p] ", entry->class);
107 seq_printf(m, "#%d", class->name_version); 124 print_name(m, entry->class);
108 if (class->subclass) 125 seq_puts(m, "\n");
109 seq_printf(m, "/%d", class->subclass); 126 }
110 } 127 }
111 seq_puts(m, "\n"); 128 seq_puts(m, "\n");
112 129
@@ -227,7 +244,7 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
227 244
228 sum_forward_deps += count_forward_deps(class); 245 sum_forward_deps += count_forward_deps(class);
229 } 246 }
230#ifdef CONFIG_LOCKDEP_DEBUG 247#ifdef CONFIG_DEBUG_LOCKDEP
231 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused); 248 DEBUG_LOCKS_WARN_ON(debug_atomic_read(&nr_unused_locks) != nr_unused);
232#endif 249#endif
233 seq_printf(m, " lock-classes: %11lu [max: %lu]\n", 250 seq_printf(m, " lock-classes: %11lu [max: %lu]\n",
diff --git a/kernel/module.c b/kernel/module.c
index d0f2260a0210..8a94e054230c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -537,6 +537,8 @@ static int already_uses(struct module *a, struct module *b)
537static int use_module(struct module *a, struct module *b) 537static int use_module(struct module *a, struct module *b)
538{ 538{
539 struct module_use *use; 539 struct module_use *use;
540 int no_warn;
541
540 if (b == NULL || already_uses(a, b)) return 1; 542 if (b == NULL || already_uses(a, b)) return 1;
541 543
542 if (!strong_try_module_get(b)) 544 if (!strong_try_module_get(b))
@@ -552,6 +554,7 @@ static int use_module(struct module *a, struct module *b)
552 554
553 use->module_which_uses = a; 555 use->module_which_uses = a;
554 list_add(&use->list, &b->modules_which_use_me); 556 list_add(&use->list, &b->modules_which_use_me);
557 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
555 return 1; 558 return 1;
556} 559}
557 560
@@ -569,6 +572,7 @@ static void module_unload_free(struct module *mod)
569 module_put(i); 572 module_put(i);
570 list_del(&use->list); 573 list_del(&use->list);
571 kfree(use); 574 kfree(use);
575 sysfs_remove_link(i->holders_dir, mod->name);
572 /* There can be at most one match. */ 576 /* There can be at most one match. */
573 break; 577 break;
574 } 578 }
@@ -1106,9 +1110,7 @@ static void module_remove_modinfo_attrs(struct module *mod)
1106 kfree(mod->modinfo_attrs); 1110 kfree(mod->modinfo_attrs);
1107} 1111}
1108 1112
1109static int mod_sysfs_setup(struct module *mod, 1113static int mod_sysfs_init(struct module *mod)
1110 struct kernel_param *kparam,
1111 unsigned int num_params)
1112{ 1114{
1113 int err; 1115 int err;
1114 1116
@@ -1125,21 +1127,30 @@ static int mod_sysfs_setup(struct module *mod,
1125 kobj_set_kset_s(&mod->mkobj, module_subsys); 1127 kobj_set_kset_s(&mod->mkobj, module_subsys);
1126 mod->mkobj.mod = mod; 1128 mod->mkobj.mod = mod;
1127 1129
1128 /* delay uevent until full sysfs population */
1129 kobject_init(&mod->mkobj.kobj); 1130 kobject_init(&mod->mkobj.kobj);
1131
1132out:
1133 return err;
1134}
1135
1136static int mod_sysfs_setup(struct module *mod,
1137 struct kernel_param *kparam,
1138 unsigned int num_params)
1139{
1140 int err;
1141
1142 /* delay uevent until full sysfs population */
1130 err = kobject_add(&mod->mkobj.kobj); 1143 err = kobject_add(&mod->mkobj.kobj);
1131 if (err) 1144 if (err)
1132 goto out; 1145 goto out;
1133 1146
1134 mod->drivers_dir = kobject_add_dir(&mod->mkobj.kobj, "drivers"); 1147 mod->holders_dir = kobject_add_dir(&mod->mkobj.kobj, "holders");
1135 if (!mod->drivers_dir) { 1148 if (!mod->holders_dir)
1136 err = -ENOMEM;
1137 goto out_unreg; 1149 goto out_unreg;
1138 }
1139 1150
1140 err = module_param_sysfs_setup(mod, kparam, num_params); 1151 err = module_param_sysfs_setup(mod, kparam, num_params);
1141 if (err) 1152 if (err)
1142 goto out_unreg_drivers; 1153 goto out_unreg_holders;
1143 1154
1144 err = module_add_modinfo_attrs(mod); 1155 err = module_add_modinfo_attrs(mod);
1145 if (err) 1156 if (err)
@@ -1150,8 +1161,8 @@ static int mod_sysfs_setup(struct module *mod,
1150 1161
1151out_unreg_param: 1162out_unreg_param:
1152 module_param_sysfs_remove(mod); 1163 module_param_sysfs_remove(mod);
1153out_unreg_drivers: 1164out_unreg_holders:
1154 kobject_unregister(mod->drivers_dir); 1165 kobject_unregister(mod->holders_dir);
1155out_unreg: 1166out_unreg:
1156 kobject_del(&mod->mkobj.kobj); 1167 kobject_del(&mod->mkobj.kobj);
1157 kobject_put(&mod->mkobj.kobj); 1168 kobject_put(&mod->mkobj.kobj);
@@ -1163,7 +1174,10 @@ static void mod_kobject_remove(struct module *mod)
1163{ 1174{
1164 module_remove_modinfo_attrs(mod); 1175 module_remove_modinfo_attrs(mod);
1165 module_param_sysfs_remove(mod); 1176 module_param_sysfs_remove(mod);
1166 kobject_unregister(mod->drivers_dir); 1177 if (mod->mkobj.drivers_dir)
1178 kobject_unregister(mod->mkobj.drivers_dir);
1179 if (mod->holders_dir)
1180 kobject_unregister(mod->holders_dir);
1167 1181
1168 kobject_unregister(&mod->mkobj.kobj); 1182 kobject_unregister(&mod->mkobj.kobj);
1169} 1183}
@@ -1768,6 +1782,10 @@ static struct module *load_module(void __user *umod,
1768 /* Now we've moved module, initialize linked lists, etc. */ 1782 /* Now we've moved module, initialize linked lists, etc. */
1769 module_unload_init(mod); 1783 module_unload_init(mod);
1770 1784
1785 /* Initialize kobject, so we can reference it. */
1786 if (mod_sysfs_init(mod) != 0)
1787 goto cleanup;
1788
1771 /* Set up license info based on the info section */ 1789 /* Set up license info based on the info section */
1772 set_license(mod, get_modinfo(sechdrs, infoindex, "license")); 1790 set_license(mod, get_modinfo(sechdrs, infoindex, "license"));
1773 1791
@@ -2340,19 +2358,43 @@ static char *make_driver_name(struct device_driver *drv)
2340 return driver_name; 2358 return driver_name;
2341} 2359}
2342 2360
2361static void module_create_drivers_dir(struct module_kobject *mk)
2362{
2363 if (!mk || mk->drivers_dir)
2364 return;
2365
2366 mk->drivers_dir = kobject_add_dir(&mk->kobj, "drivers");
2367}
2368
2343void module_add_driver(struct module *mod, struct device_driver *drv) 2369void module_add_driver(struct module *mod, struct device_driver *drv)
2344{ 2370{
2345 char *driver_name; 2371 char *driver_name;
2346 int no_warn; 2372 int no_warn;
2373 struct module_kobject *mk = NULL;
2374
2375 if (!drv)
2376 return;
2377
2378 if (mod)
2379 mk = &mod->mkobj;
2380 else if (drv->mod_name) {
2381 struct kobject *mkobj;
2382
2383 /* Lookup built-in module entry in /sys/modules */
2384 mkobj = kset_find_obj(&module_subsys.kset, drv->mod_name);
2385 if (mkobj)
2386 mk = container_of(mkobj, struct module_kobject, kobj);
2387 }
2347 2388
2348 if (!mod || !drv) 2389 if (!mk)
2349 return; 2390 return;
2350 2391
2351 /* Don't check return codes; these calls are idempotent */ 2392 /* Don't check return codes; these calls are idempotent */
2352 no_warn = sysfs_create_link(&drv->kobj, &mod->mkobj.kobj, "module"); 2393 no_warn = sysfs_create_link(&drv->kobj, &mk->kobj, "module");
2353 driver_name = make_driver_name(drv); 2394 driver_name = make_driver_name(drv);
2354 if (driver_name) { 2395 if (driver_name) {
2355 no_warn = sysfs_create_link(mod->drivers_dir, &drv->kobj, 2396 module_create_drivers_dir(mk);
2397 no_warn = sysfs_create_link(mk->drivers_dir, &drv->kobj,
2356 driver_name); 2398 driver_name);
2357 kfree(driver_name); 2399 kfree(driver_name);
2358 } 2400 }
@@ -2367,10 +2409,10 @@ void module_remove_driver(struct device_driver *drv)
2367 return; 2409 return;
2368 2410
2369 sysfs_remove_link(&drv->kobj, "module"); 2411 sysfs_remove_link(&drv->kobj, "module");
2370 if (drv->owner && drv->owner->drivers_dir) { 2412 if (drv->owner && drv->owner->mkobj.drivers_dir) {
2371 driver_name = make_driver_name(drv); 2413 driver_name = make_driver_name(drv);
2372 if (driver_name) { 2414 if (driver_name) {
2373 sysfs_remove_link(drv->owner->drivers_dir, 2415 sysfs_remove_link(drv->owner->mkobj.drivers_dir,
2374 driver_name); 2416 driver_name);
2375 kfree(driver_name); 2417 kfree(driver_name);
2376 } 2418 }
diff --git a/kernel/panic.c b/kernel/panic.c
index 525e365f7239..623d1828259a 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -150,6 +150,7 @@ EXPORT_SYMBOL(panic);
150 * 'R' - User forced a module unload. 150 * 'R' - User forced a module unload.
151 * 'M' - Machine had a machine check experience. 151 * 'M' - Machine had a machine check experience.
152 * 'B' - System has hit bad_page. 152 * 'B' - System has hit bad_page.
153 * 'U' - Userspace-defined naughtiness.
153 * 154 *
154 * The string is overwritten by the next call to print_taint(). 155 * The string is overwritten by the next call to print_taint().
155 */ 156 */
@@ -158,13 +159,14 @@ const char *print_tainted(void)
158{ 159{
159 static char buf[20]; 160 static char buf[20];
160 if (tainted) { 161 if (tainted) {
161 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c", 162 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c",
162 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', 163 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G',
163 tainted & TAINT_FORCED_MODULE ? 'F' : ' ', 164 tainted & TAINT_FORCED_MODULE ? 'F' : ' ',
164 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', 165 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ',
165 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', 166 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ',
166 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', 167 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ',
167 tainted & TAINT_BAD_PAGE ? 'B' : ' '); 168 tainted & TAINT_BAD_PAGE ? 'B' : ' ',
169 tainted & TAINT_USER ? 'U' : ' ');
168 } 170 }
169 else 171 else
170 snprintf(buf, sizeof(buf), "Not tainted"); 172 snprintf(buf, sizeof(buf), "Not tainted");
diff --git a/kernel/params.c b/kernel/params.c
index 718945da8f58..553cf7d6a4be 100644
--- a/kernel/params.c
+++ b/kernel/params.c
@@ -30,6 +30,8 @@
30#define DEBUGP(fmt, a...) 30#define DEBUGP(fmt, a...)
31#endif 31#endif
32 32
33static struct kobj_type module_ktype;
34
33static inline char dash2underscore(char c) 35static inline char dash2underscore(char c)
34{ 36{
35 if (c == '-') 37 if (c == '-')
@@ -561,14 +563,11 @@ static void __init kernel_param_sysfs_setup(const char *name,
561 mk->mod = THIS_MODULE; 563 mk->mod = THIS_MODULE;
562 kobj_set_kset_s(mk, module_subsys); 564 kobj_set_kset_s(mk, module_subsys);
563 kobject_set_name(&mk->kobj, name); 565 kobject_set_name(&mk->kobj, name);
564 ret = kobject_register(&mk->kobj); 566 kobject_init(&mk->kobj);
567 ret = kobject_add(&mk->kobj);
565 BUG_ON(ret < 0); 568 BUG_ON(ret < 0);
566 569 param_sysfs_setup(mk, kparam, num_params, name_skip);
567 /* no need to keep the kobject if no parameter is exported */ 570 kobject_uevent(&mk->kobj, KOBJ_ADD);
568 if (!param_sysfs_setup(mk, kparam, num_params, name_skip)) {
569 kobject_unregister(&mk->kobj);
570 kfree(mk);
571 }
572} 571}
573 572
574/* 573/*
@@ -674,6 +673,19 @@ static struct sysfs_ops module_sysfs_ops = {
674 .store = module_attr_store, 673 .store = module_attr_store,
675}; 674};
676 675
676static int uevent_filter(struct kset *kset, struct kobject *kobj)
677{
678 struct kobj_type *ktype = get_ktype(kobj);
679
680 if (ktype == &module_ktype)
681 return 1;
682 return 0;
683}
684
685static struct kset_uevent_ops module_uevent_ops = {
686 .filter = uevent_filter,
687};
688
677#else 689#else
678static struct sysfs_ops module_sysfs_ops = { 690static struct sysfs_ops module_sysfs_ops = {
679 .show = NULL, 691 .show = NULL,
@@ -685,7 +697,7 @@ static struct kobj_type module_ktype = {
685 .sysfs_ops = &module_sysfs_ops, 697 .sysfs_ops = &module_sysfs_ops,
686}; 698};
687 699
688decl_subsys(module, &module_ktype, NULL); 700decl_subsys(module, &module_ktype, &module_uevent_ops);
689 701
690/* 702/*
691 * param_sysfs_init - wrapper for built-in params support 703 * param_sysfs_init - wrapper for built-in params support
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5fe87de10ff0..a1bf61617839 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -399,10 +399,9 @@ EXPORT_SYMBOL_GPL(register_posix_clock);
399static struct k_itimer * alloc_posix_timer(void) 399static struct k_itimer * alloc_posix_timer(void)
400{ 400{
401 struct k_itimer *tmr; 401 struct k_itimer *tmr;
402 tmr = kmem_cache_alloc(posix_timers_cache, GFP_KERNEL); 402 tmr = kmem_cache_zalloc(posix_timers_cache, GFP_KERNEL);
403 if (!tmr) 403 if (!tmr)
404 return tmr; 404 return tmr;
405 memset(tmr, 0, sizeof (struct k_itimer));
406 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) { 405 if (unlikely(!(tmr->sigq = sigqueue_alloc()))) {
407 kmem_cache_free(posix_timers_cache, tmr); 406 kmem_cache_free(posix_timers_cache, tmr);
408 tmr = NULL; 407 tmr = NULL;
diff --git a/kernel/power/Kconfig b/kernel/power/Kconfig
index ed296225dcd4..95f6657fff73 100644
--- a/kernel/power/Kconfig
+++ b/kernel/power/Kconfig
@@ -131,3 +131,29 @@ config SUSPEND_SMP
131 bool 131 bool
132 depends on HOTPLUG_CPU && X86 && PM 132 depends on HOTPLUG_CPU && X86 && PM
133 default y 133 default y
134
135config APM_EMULATION
136 tristate "Advanced Power Management Emulation"
137 depends on PM && SYS_SUPPORTS_APM_EMULATION
138 help
139 APM is a BIOS specification for saving power using several different
140 techniques. This is mostly useful for battery powered laptops with
141 APM compliant BIOSes. If you say Y here, the system time will be
142 reset after a RESUME operation, the /proc/apm device will provide
143 battery status information, and user-space programs will receive
144 notification of APM "events" (e.g. battery status change).
145
146 In order to use APM, you will need supporting software. For location
147 and more information, read <file:Documentation/pm.txt> and the
148 Battery Powered Linux mini-HOWTO, available from
149 <http://www.tldp.org/docs.html#howto>.
150
151 This driver does not spin down disk drives (see the hdparm(8)
152 manpage ("man 8 hdparm") for that), and it doesn't turn off
153 VESA-compliant "green" monitors.
154
155 Generally, if you don't have a battery in your machine, there isn't
156 much point in using this driver and you should say N. If you get
157 random kernel OOPSes or reboots that don't seem to be related to
158 anything, try disabling/enabling this option (or disabling/enabling
159 APM in your BIOS).
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index 88fc5d7ac737..406b20adb27a 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -87,52 +87,24 @@ static inline void platform_finish(void)
87 } 87 }
88} 88}
89 89
90static void unprepare_processes(void)
91{
92 thaw_processes();
93 pm_restore_console();
94}
95
90static int prepare_processes(void) 96static int prepare_processes(void)
91{ 97{
92 int error = 0; 98 int error = 0;
93 99
94 pm_prepare_console(); 100 pm_prepare_console();
95
96 error = disable_nonboot_cpus();
97 if (error)
98 goto enable_cpus;
99
100 if (freeze_processes()) { 101 if (freeze_processes()) {
101 error = -EBUSY; 102 error = -EBUSY;
102 goto thaw; 103 unprepare_processes();
103 } 104 }
104
105 if (pm_disk_mode == PM_DISK_TESTPROC) {
106 printk("swsusp debug: Waiting for 5 seconds.\n");
107 mdelay(5000);
108 goto thaw;
109 }
110
111 error = platform_prepare();
112 if (error)
113 goto thaw;
114
115 /* Free memory before shutting down devices. */
116 if (!(error = swsusp_shrink_memory()))
117 return 0;
118
119 platform_finish();
120 thaw:
121 thaw_processes();
122 enable_cpus:
123 enable_nonboot_cpus();
124 pm_restore_console();
125 return error; 105 return error;
126} 106}
127 107
128static void unprepare_processes(void)
129{
130 platform_finish();
131 thaw_processes();
132 enable_nonboot_cpus();
133 pm_restore_console();
134}
135
136/** 108/**
137 * pm_suspend_disk - The granpappy of hibernation power management. 109 * pm_suspend_disk - The granpappy of hibernation power management.
138 * 110 *
@@ -150,29 +122,45 @@ int pm_suspend_disk(void)
150 if (error) 122 if (error)
151 return error; 123 return error;
152 124
153 if (pm_disk_mode == PM_DISK_TESTPROC) 125 if (pm_disk_mode == PM_DISK_TESTPROC) {
154 return 0; 126 printk("swsusp debug: Waiting for 5 seconds.\n");
127 mdelay(5000);
128 goto Thaw;
129 }
130 /* Free memory before shutting down devices. */
131 error = swsusp_shrink_memory();
132 if (error)
133 goto Thaw;
134
135 error = platform_prepare();
136 if (error)
137 goto Thaw;
155 138
156 suspend_console(); 139 suspend_console();
157 error = device_suspend(PMSG_FREEZE); 140 error = device_suspend(PMSG_FREEZE);
158 if (error) { 141 if (error) {
159 resume_console(); 142 printk(KERN_ERR "PM: Some devices failed to suspend\n");
160 printk("Some devices failed to suspend\n"); 143 goto Resume_devices;
161 goto Thaw;
162 } 144 }
145 error = disable_nonboot_cpus();
146 if (error)
147 goto Enable_cpus;
163 148
164 if (pm_disk_mode == PM_DISK_TEST) { 149 if (pm_disk_mode == PM_DISK_TEST) {
165 printk("swsusp debug: Waiting for 5 seconds.\n"); 150 printk("swsusp debug: Waiting for 5 seconds.\n");
166 mdelay(5000); 151 mdelay(5000);
167 goto Done; 152 goto Enable_cpus;
168 } 153 }
169 154
170 pr_debug("PM: snapshotting memory.\n"); 155 pr_debug("PM: snapshotting memory.\n");
171 in_suspend = 1; 156 in_suspend = 1;
172 if ((error = swsusp_suspend())) 157 error = swsusp_suspend();
173 goto Done; 158 if (error)
159 goto Enable_cpus;
174 160
175 if (in_suspend) { 161 if (in_suspend) {
162 enable_nonboot_cpus();
163 platform_finish();
176 device_resume(); 164 device_resume();
177 resume_console(); 165 resume_console();
178 pr_debug("PM: writing image.\n"); 166 pr_debug("PM: writing image.\n");
@@ -188,7 +176,10 @@ int pm_suspend_disk(void)
188 } 176 }
189 177
190 swsusp_free(); 178 swsusp_free();
191 Done: 179 Enable_cpus:
180 enable_nonboot_cpus();
181 Resume_devices:
182 platform_finish();
192 device_resume(); 183 device_resume();
193 resume_console(); 184 resume_console();
194 Thaw: 185 Thaw:
@@ -237,19 +228,28 @@ static int software_resume(void)
237 228
238 pr_debug("PM: Checking swsusp image.\n"); 229 pr_debug("PM: Checking swsusp image.\n");
239 230
240 if ((error = swsusp_check())) 231 error = swsusp_check();
232 if (error)
241 goto Done; 233 goto Done;
242 234
243 pr_debug("PM: Preparing processes for restore.\n"); 235 pr_debug("PM: Preparing processes for restore.\n");
244 236
245 if ((error = prepare_processes())) { 237 error = prepare_processes();
238 if (error) {
246 swsusp_close(); 239 swsusp_close();
247 goto Done; 240 goto Done;
248 } 241 }
249 242
243 error = platform_prepare();
244 if (error) {
245 swsusp_free();
246 goto Thaw;
247 }
248
250 pr_debug("PM: Reading swsusp image.\n"); 249 pr_debug("PM: Reading swsusp image.\n");
251 250
252 if ((error = swsusp_read())) { 251 error = swsusp_read();
252 if (error) {
253 swsusp_free(); 253 swsusp_free();
254 goto Thaw; 254 goto Thaw;
255 } 255 }
@@ -257,21 +257,22 @@ static int software_resume(void)
257 pr_debug("PM: Preparing devices for restore.\n"); 257 pr_debug("PM: Preparing devices for restore.\n");
258 258
259 suspend_console(); 259 suspend_console();
260 if ((error = device_suspend(PMSG_PRETHAW))) { 260 error = device_suspend(PMSG_PRETHAW);
261 resume_console(); 261 if (error)
262 printk("Some devices failed to suspend\n"); 262 goto Free;
263 swsusp_free();
264 goto Thaw;
265 }
266 263
267 mb(); 264 error = disable_nonboot_cpus();
265 if (!error)
266 swsusp_resume();
268 267
269 pr_debug("PM: Restoring saved image.\n"); 268 enable_nonboot_cpus();
270 swsusp_resume(); 269 Free:
271 pr_debug("PM: Restore failed, recovering.n"); 270 swsusp_free();
271 platform_finish();
272 device_resume(); 272 device_resume();
273 resume_console(); 273 resume_console();
274 Thaw: 274 Thaw:
275 printk(KERN_ERR "PM: Restore failed, recovering.\n");
275 unprepare_processes(); 276 unprepare_processes();
276 Done: 277 Done:
277 /* For success case, the suspend path will release the lock */ 278 /* For success case, the suspend path will release the lock */
diff --git a/kernel/power/main.c b/kernel/power/main.c
index ff3a6182f5f0..e1c413120469 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -20,6 +20,7 @@
20#include <linux/cpu.h> 20#include <linux/cpu.h>
21#include <linux/resume-trace.h> 21#include <linux/resume-trace.h>
22#include <linux/freezer.h> 22#include <linux/freezer.h>
23#include <linux/vmstat.h>
23 24
24#include "power.h" 25#include "power.h"
25 26
@@ -43,6 +44,11 @@ void pm_set_ops(struct pm_ops * ops)
43 mutex_unlock(&pm_mutex); 44 mutex_unlock(&pm_mutex);
44} 45}
45 46
47static inline void pm_finish(suspend_state_t state)
48{
49 if (pm_ops->finish)
50 pm_ops->finish(state);
51}
46 52
47/** 53/**
48 * suspend_prepare - Do prep work before entering low-power state. 54 * suspend_prepare - Do prep work before entering low-power state.
@@ -63,16 +69,13 @@ static int suspend_prepare(suspend_state_t state)
63 69
64 pm_prepare_console(); 70 pm_prepare_console();
65 71
66 error = disable_nonboot_cpus();
67 if (error)
68 goto Enable_cpu;
69
70 if (freeze_processes()) { 72 if (freeze_processes()) {
71 error = -EAGAIN; 73 error = -EAGAIN;
72 goto Thaw; 74 goto Thaw;
73 } 75 }
74 76
75 if ((free_pages = nr_free_pages()) < FREE_PAGE_NUMBER) { 77 if ((free_pages = global_page_state(NR_FREE_PAGES))
78 < FREE_PAGE_NUMBER) {
76 pr_debug("PM: free some memory\n"); 79 pr_debug("PM: free some memory\n");
77 shrink_all_memory(FREE_PAGE_NUMBER - free_pages); 80 shrink_all_memory(FREE_PAGE_NUMBER - free_pages);
78 if (nr_free_pages() < FREE_PAGE_NUMBER) { 81 if (nr_free_pages() < FREE_PAGE_NUMBER) {
@@ -88,18 +91,22 @@ static int suspend_prepare(suspend_state_t state)
88 } 91 }
89 92
90 suspend_console(); 93 suspend_console();
91 if ((error = device_suspend(PMSG_SUSPEND))) { 94 error = device_suspend(PMSG_SUSPEND);
95 if (error) {
92 printk(KERN_ERR "Some devices failed to suspend\n"); 96 printk(KERN_ERR "Some devices failed to suspend\n");
93 goto Finish; 97 goto Resume_devices;
94 } 98 }
95 return 0; 99 error = disable_nonboot_cpus();
96 Finish: 100 if (!error)
97 if (pm_ops->finish) 101 return 0;
98 pm_ops->finish(state); 102
103 enable_nonboot_cpus();
104 Resume_devices:
105 pm_finish(state);
106 device_resume();
107 resume_console();
99 Thaw: 108 Thaw:
100 thaw_processes(); 109 thaw_processes();
101 Enable_cpu:
102 enable_nonboot_cpus();
103 pm_restore_console(); 110 pm_restore_console();
104 return error; 111 return error;
105} 112}
@@ -134,12 +141,11 @@ int suspend_enter(suspend_state_t state)
134 141
135static void suspend_finish(suspend_state_t state) 142static void suspend_finish(suspend_state_t state)
136{ 143{
144 enable_nonboot_cpus();
145 pm_finish(state);
137 device_resume(); 146 device_resume();
138 resume_console(); 147 resume_console();
139 thaw_processes(); 148 thaw_processes();
140 enable_nonboot_cpus();
141 if (pm_ops && pm_ops->finish)
142 pm_ops->finish(state);
143 pm_restore_console(); 149 pm_restore_console();
144} 150}
145 151
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index c024606221c4..fc53ad068128 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -591,7 +591,7 @@ static unsigned int count_free_highmem_pages(void)
591 591
592 for_each_zone(zone) 592 for_each_zone(zone)
593 if (populated_zone(zone) && is_highmem(zone)) 593 if (populated_zone(zone) && is_highmem(zone))
594 cnt += zone->free_pages; 594 cnt += zone_page_state(zone, NR_FREE_PAGES);
595 595
596 return cnt; 596 return cnt;
597} 597}
@@ -869,7 +869,7 @@ static int enough_free_mem(unsigned int nr_pages, unsigned int nr_highmem)
869 for_each_zone(zone) { 869 for_each_zone(zone) {
870 meta += snapshot_additional_pages(zone); 870 meta += snapshot_additional_pages(zone);
871 if (!is_highmem(zone)) 871 if (!is_highmem(zone))
872 free += zone->free_pages; 872 free += zone_page_state(zone, NR_FREE_PAGES);
873 } 873 }
874 874
875 nr_pages += count_pages_for_highmem(nr_highmem); 875 nr_pages += count_pages_for_highmem(nr_highmem);
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c
index 31aa0390c777..7fb834397a0d 100644
--- a/kernel/power/swsusp.c
+++ b/kernel/power/swsusp.c
@@ -230,9 +230,10 @@ int swsusp_shrink_memory(void)
230 for_each_zone (zone) 230 for_each_zone (zone)
231 if (populated_zone(zone)) { 231 if (populated_zone(zone)) {
232 if (is_highmem(zone)) { 232 if (is_highmem(zone)) {
233 highmem_size -= zone->free_pages; 233 highmem_size -=
234 zone_page_state(zone, NR_FREE_PAGES);
234 } else { 235 } else {
235 tmp -= zone->free_pages; 236 tmp -= zone_page_state(zone, NR_FREE_PAGES);
236 tmp += zone->lowmem_reserve[ZONE_NORMAL]; 237 tmp += zone->lowmem_reserve[ZONE_NORMAL];
237 tmp += snapshot_additional_pages(zone); 238 tmp += snapshot_additional_pages(zone);
238 } 239 }
diff --git a/kernel/power/user.c b/kernel/power/user.c
index f7b7a785a5c6..dd09efe7df54 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -37,6 +37,7 @@ static struct snapshot_data {
37 int mode; 37 int mode;
38 char frozen; 38 char frozen;
39 char ready; 39 char ready;
40 char platform_suspend;
40} snapshot_state; 41} snapshot_state;
41 42
42static atomic_t device_available = ATOMIC_INIT(1); 43static atomic_t device_available = ATOMIC_INIT(1);
@@ -66,6 +67,7 @@ static int snapshot_open(struct inode *inode, struct file *filp)
66 data->bitmap = NULL; 67 data->bitmap = NULL;
67 data->frozen = 0; 68 data->frozen = 0;
68 data->ready = 0; 69 data->ready = 0;
70 data->platform_suspend = 0;
69 71
70 return 0; 72 return 0;
71} 73}
@@ -122,6 +124,92 @@ static ssize_t snapshot_write(struct file *filp, const char __user *buf,
122 return res; 124 return res;
123} 125}
124 126
127static inline int platform_prepare(void)
128{
129 int error = 0;
130
131 if (pm_ops && pm_ops->prepare)
132 error = pm_ops->prepare(PM_SUSPEND_DISK);
133
134 return error;
135}
136
137static inline void platform_finish(void)
138{
139 if (pm_ops && pm_ops->finish)
140 pm_ops->finish(PM_SUSPEND_DISK);
141}
142
143static inline int snapshot_suspend(int platform_suspend)
144{
145 int error;
146
147 mutex_lock(&pm_mutex);
148 /* Free memory before shutting down devices. */
149 error = swsusp_shrink_memory();
150 if (error)
151 goto Finish;
152
153 if (platform_suspend) {
154 error = platform_prepare();
155 if (error)
156 goto Finish;
157 }
158 suspend_console();
159 error = device_suspend(PMSG_FREEZE);
160 if (error)
161 goto Resume_devices;
162
163 error = disable_nonboot_cpus();
164 if (!error) {
165 in_suspend = 1;
166 error = swsusp_suspend();
167 }
168 enable_nonboot_cpus();
169 Resume_devices:
170 if (platform_suspend)
171 platform_finish();
172
173 device_resume();
174 resume_console();
175 Finish:
176 mutex_unlock(&pm_mutex);
177 return error;
178}
179
180static inline int snapshot_restore(int platform_suspend)
181{
182 int error;
183
184 mutex_lock(&pm_mutex);
185 pm_prepare_console();
186 if (platform_suspend) {
187 error = platform_prepare();
188 if (error)
189 goto Finish;
190 }
191 suspend_console();
192 error = device_suspend(PMSG_PRETHAW);
193 if (error)
194 goto Resume_devices;
195
196 error = disable_nonboot_cpus();
197 if (!error)
198 error = swsusp_resume();
199
200 enable_nonboot_cpus();
201 Resume_devices:
202 if (platform_suspend)
203 platform_finish();
204
205 device_resume();
206 resume_console();
207 Finish:
208 pm_restore_console();
209 mutex_unlock(&pm_mutex);
210 return error;
211}
212
125static int snapshot_ioctl(struct inode *inode, struct file *filp, 213static int snapshot_ioctl(struct inode *inode, struct file *filp,
126 unsigned int cmd, unsigned long arg) 214 unsigned int cmd, unsigned long arg)
127{ 215{
@@ -145,14 +233,9 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
145 if (data->frozen) 233 if (data->frozen)
146 break; 234 break;
147 mutex_lock(&pm_mutex); 235 mutex_lock(&pm_mutex);
148 error = disable_nonboot_cpus(); 236 if (freeze_processes()) {
149 if (!error) { 237 thaw_processes();
150 error = freeze_processes(); 238 error = -EBUSY;
151 if (error) {
152 thaw_processes();
153 enable_nonboot_cpus();
154 error = -EBUSY;
155 }
156 } 239 }
157 mutex_unlock(&pm_mutex); 240 mutex_unlock(&pm_mutex);
158 if (!error) 241 if (!error)
@@ -164,7 +247,6 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
164 break; 247 break;
165 mutex_lock(&pm_mutex); 248 mutex_lock(&pm_mutex);
166 thaw_processes(); 249 thaw_processes();
167 enable_nonboot_cpus();
168 mutex_unlock(&pm_mutex); 250 mutex_unlock(&pm_mutex);
169 data->frozen = 0; 251 data->frozen = 0;
170 break; 252 break;
@@ -174,20 +256,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
174 error = -EPERM; 256 error = -EPERM;
175 break; 257 break;
176 } 258 }
177 mutex_lock(&pm_mutex); 259 error = snapshot_suspend(data->platform_suspend);
178 /* Free memory before shutting down devices. */
179 error = swsusp_shrink_memory();
180 if (!error) {
181 suspend_console();
182 error = device_suspend(PMSG_FREEZE);
183 if (!error) {
184 in_suspend = 1;
185 error = swsusp_suspend();
186 device_resume();
187 }
188 resume_console();
189 }
190 mutex_unlock(&pm_mutex);
191 if (!error) 260 if (!error)
192 error = put_user(in_suspend, (unsigned int __user *)arg); 261 error = put_user(in_suspend, (unsigned int __user *)arg);
193 if (!error) 262 if (!error)
@@ -201,17 +270,7 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
201 error = -EPERM; 270 error = -EPERM;
202 break; 271 break;
203 } 272 }
204 mutex_lock(&pm_mutex); 273 error = snapshot_restore(data->platform_suspend);
205 pm_prepare_console();
206 suspend_console();
207 error = device_suspend(PMSG_PRETHAW);
208 if (!error) {
209 error = swsusp_resume();
210 device_resume();
211 }
212 resume_console();
213 pm_restore_console();
214 mutex_unlock(&pm_mutex);
215 break; 274 break;
216 275
217 case SNAPSHOT_FREE: 276 case SNAPSHOT_FREE:
@@ -282,6 +341,11 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
282 break; 341 break;
283 342
284 case SNAPSHOT_S2RAM: 343 case SNAPSHOT_S2RAM:
344 if (!pm_ops) {
345 error = -ENOSYS;
346 break;
347 }
348
285 if (!data->frozen) { 349 if (!data->frozen) {
286 error = -EPERM; 350 error = -EPERM;
287 break; 351 break;
@@ -319,28 +383,35 @@ static int snapshot_ioctl(struct inode *inode, struct file *filp,
319 break; 383 break;
320 384
321 case SNAPSHOT_PMOPS: 385 case SNAPSHOT_PMOPS:
386 error = -EINVAL;
387
322 switch (arg) { 388 switch (arg) {
323 389
324 case PMOPS_PREPARE: 390 case PMOPS_PREPARE:
325 if (pm_ops->prepare) { 391 if (pm_ops && pm_ops->enter) {
326 error = pm_ops->prepare(PM_SUSPEND_DISK); 392 data->platform_suspend = 1;
393 error = 0;
394 } else {
395 error = -ENOSYS;
327 } 396 }
328 break; 397 break;
329 398
330 case PMOPS_ENTER: 399 case PMOPS_ENTER:
331 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK); 400 if (data->platform_suspend) {
332 error = pm_ops->enter(PM_SUSPEND_DISK); 401 kernel_shutdown_prepare(SYSTEM_SUSPEND_DISK);
402 error = pm_ops->enter(PM_SUSPEND_DISK);
403 error = 0;
404 }
333 break; 405 break;
334 406
335 case PMOPS_FINISH: 407 case PMOPS_FINISH:
336 if (pm_ops && pm_ops->finish) { 408 if (data->platform_suspend)
337 pm_ops->finish(PM_SUSPEND_DISK); 409 error = 0;
338 } 410
339 break; 411 break;
340 412
341 default: 413 default:
342 printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg); 414 printk(KERN_ERR "SNAPSHOT_PMOPS: invalid argument %ld\n", arg);
343 error = -EINVAL;
344 415
345 } 416 }
346 break; 417 break;
diff --git a/kernel/printk.c b/kernel/printk.c
index c770e1a4e882..0c151877ff71 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -483,7 +483,7 @@ static int have_callable_console(void)
483 * printk - print a kernel message 483 * printk - print a kernel message
484 * @fmt: format string 484 * @fmt: format string
485 * 485 *
486 * This is printk. It can be called from any context. We want it to work. 486 * This is printk(). It can be called from any context. We want it to work.
487 * 487 *
488 * We try to grab the console_sem. If we succeed, it's easy - we log the output and 488 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
489 * call the console drivers. If we fail to get the semaphore we place the output 489 * call the console drivers. If we fail to get the semaphore we place the output
@@ -529,7 +529,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
529 zap_locks(); 529 zap_locks();
530 530
531 /* This stops the holder of console_sem just where we want him */ 531 /* This stops the holder of console_sem just where we want him */
532 local_irq_save(flags); 532 raw_local_irq_save(flags);
533 lockdep_off(); 533 lockdep_off();
534 spin_lock(&logbuf_lock); 534 spin_lock(&logbuf_lock);
535 printk_cpu = smp_processor_id(); 535 printk_cpu = smp_processor_id();
@@ -618,7 +618,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
618 up(&console_sem); 618 up(&console_sem);
619 } 619 }
620 lockdep_on(); 620 lockdep_on();
621 local_irq_restore(flags); 621 raw_local_irq_restore(flags);
622 } else { 622 } else {
623 /* 623 /*
624 * Someone else owns the drivers. We drop the spinlock, which 624 * Someone else owns the drivers. We drop the spinlock, which
@@ -628,7 +628,7 @@ asmlinkage int vprintk(const char *fmt, va_list args)
628 printk_cpu = UINT_MAX; 628 printk_cpu = UINT_MAX;
629 spin_unlock(&logbuf_lock); 629 spin_unlock(&logbuf_lock);
630 lockdep_on(); 630 lockdep_on();
631 local_irq_restore(flags); 631 raw_local_irq_restore(flags);
632 } 632 }
633 633
634 preempt_enable(); 634 preempt_enable();
@@ -783,6 +783,12 @@ int is_console_locked(void)
783 return console_locked; 783 return console_locked;
784} 784}
785 785
786void wake_up_klogd(void)
787{
788 if (!oops_in_progress && waitqueue_active(&log_wait))
789 wake_up_interruptible(&log_wait);
790}
791
786/** 792/**
787 * release_console_sem - unlock the console system 793 * release_console_sem - unlock the console system
788 * 794 *
@@ -825,8 +831,8 @@ void release_console_sem(void)
825 console_locked = 0; 831 console_locked = 0;
826 up(&console_sem); 832 up(&console_sem);
827 spin_unlock_irqrestore(&logbuf_lock, flags); 833 spin_unlock_irqrestore(&logbuf_lock, flags);
828 if (wake_klogd && !oops_in_progress && waitqueue_active(&log_wait)) 834 if (wake_klogd)
829 wake_up_interruptible(&log_wait); 835 wake_up_klogd();
830} 836}
831EXPORT_SYMBOL(release_console_sem); 837EXPORT_SYMBOL(release_console_sem);
832 838
diff --git a/kernel/profile.c b/kernel/profile.c
index d6579d511069..9bfadb248dd8 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -449,7 +449,6 @@ void create_prof_cpu_mask(struct proc_dir_entry *root_irq_dir)
449 /* create /proc/irq/prof_cpu_mask */ 449 /* create /proc/irq/prof_cpu_mask */
450 if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir))) 450 if (!(entry = create_proc_entry("prof_cpu_mask", 0600, root_irq_dir)))
451 return; 451 return;
452 entry->nlink = 1;
453 entry->data = (void *)&prof_cpu_mask; 452 entry->data = (void *)&prof_cpu_mask;
454 entry->read_proc = prof_cpu_mask_read_proc; 453 entry->read_proc = prof_cpu_mask_read_proc;
455 entry->write_proc = prof_cpu_mask_write_proc; 454 entry->write_proc = prof_cpu_mask_write_proc;
diff --git a/kernel/relay.c b/kernel/relay.c
index 284e2e8b4eed..ef8a935710a2 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -7,6 +7,8 @@
7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com) 7 * Copyright (C) 1999-2005 - Karim Yaghmour (karim@opersys.com)
8 * 8 *
9 * Moved to kernel/relay.c by Paul Mundt, 2006. 9 * Moved to kernel/relay.c by Paul Mundt, 2006.
10 * November 2006 - CPU hotplug support by Mathieu Desnoyers
11 * (mathieu.desnoyers@polymtl.ca)
10 * 12 *
11 * This file is released under the GPL. 13 * This file is released under the GPL.
12 */ 14 */
@@ -18,6 +20,11 @@
18#include <linux/relay.h> 20#include <linux/relay.h>
19#include <linux/vmalloc.h> 21#include <linux/vmalloc.h>
20#include <linux/mm.h> 22#include <linux/mm.h>
23#include <linux/cpu.h>
24
25/* list of open channels, for cpu hotplug */
26static DEFINE_MUTEX(relay_channels_mutex);
27static LIST_HEAD(relay_channels);
21 28
22/* 29/*
23 * close() vm_op implementation for relay file mapping. 30 * close() vm_op implementation for relay file mapping.
@@ -187,6 +194,7 @@ void relay_destroy_buf(struct rchan_buf *buf)
187 __free_page(buf->page_array[i]); 194 __free_page(buf->page_array[i]);
188 kfree(buf->page_array); 195 kfree(buf->page_array);
189 } 196 }
197 chan->buf[buf->cpu] = NULL;
190 kfree(buf->padding); 198 kfree(buf->padding);
191 kfree(buf); 199 kfree(buf);
192 kref_put(&chan->kref, relay_destroy_channel); 200 kref_put(&chan->kref, relay_destroy_channel);
@@ -320,7 +328,7 @@ static void wakeup_readers(struct work_struct *work)
320 * @buf: the channel buffer 328 * @buf: the channel buffer
321 * @init: 1 if this is a first-time initialization 329 * @init: 1 if this is a first-time initialization
322 * 330 *
323 * See relay_reset for description of effect. 331 * See relay_reset() for description of effect.
324 */ 332 */
325static void __relay_reset(struct rchan_buf *buf, unsigned int init) 333static void __relay_reset(struct rchan_buf *buf, unsigned int init)
326{ 334{
@@ -356,57 +364,75 @@ static void __relay_reset(struct rchan_buf *buf, unsigned int init)
356 * and restarting the channel in its initial state. The buffers 364 * and restarting the channel in its initial state. The buffers
357 * are not freed, so any mappings are still in effect. 365 * are not freed, so any mappings are still in effect.
358 * 366 *
359 * NOTE: Care should be taken that the channel isn't actually 367 * NOTE. Care should be taken that the channel isn't actually
360 * being used by anything when this call is made. 368 * being used by anything when this call is made.
361 */ 369 */
362void relay_reset(struct rchan *chan) 370void relay_reset(struct rchan *chan)
363{ 371{
364 unsigned int i; 372 unsigned int i;
365 struct rchan_buf *prev = NULL;
366 373
367 if (!chan) 374 if (!chan)
368 return; 375 return;
369 376
370 for (i = 0; i < NR_CPUS; i++) { 377 if (chan->is_global && chan->buf[0]) {
371 if (!chan->buf[i] || chan->buf[i] == prev) 378 __relay_reset(chan->buf[0], 0);
372 break; 379 return;
373 __relay_reset(chan->buf[i], 0);
374 prev = chan->buf[i];
375 } 380 }
381
382 mutex_lock(&relay_channels_mutex);
383 for_each_online_cpu(i)
384 if (chan->buf[i])
385 __relay_reset(chan->buf[i], 0);
386 mutex_unlock(&relay_channels_mutex);
376} 387}
377EXPORT_SYMBOL_GPL(relay_reset); 388EXPORT_SYMBOL_GPL(relay_reset);
378 389
379/* 390/*
380 * relay_open_buf - create a new relay channel buffer 391 * relay_open_buf - create a new relay channel buffer
381 * 392 *
382 * Internal - used by relay_open(). 393 * used by relay_open() and CPU hotplug.
383 */ 394 */
384static struct rchan_buf *relay_open_buf(struct rchan *chan, 395static struct rchan_buf *relay_open_buf(struct rchan *chan, unsigned int cpu)
385 const char *filename,
386 struct dentry *parent,
387 int *is_global)
388{ 396{
389 struct rchan_buf *buf; 397 struct rchan_buf *buf = NULL;
390 struct dentry *dentry; 398 struct dentry *dentry;
399 char *tmpname;
391 400
392 if (*is_global) 401 if (chan->is_global)
393 return chan->buf[0]; 402 return chan->buf[0];
394 403
404 tmpname = kzalloc(NAME_MAX + 1, GFP_KERNEL);
405 if (!tmpname)
406 goto end;
407 snprintf(tmpname, NAME_MAX, "%s%d", chan->base_filename, cpu);
408
395 buf = relay_create_buf(chan); 409 buf = relay_create_buf(chan);
396 if (!buf) 410 if (!buf)
397 return NULL; 411 goto free_name;
412
413 buf->cpu = cpu;
414 __relay_reset(buf, 1);
398 415
399 /* Create file in fs */ 416 /* Create file in fs */
400 dentry = chan->cb->create_buf_file(filename, parent, S_IRUSR, 417 dentry = chan->cb->create_buf_file(tmpname, chan->parent, S_IRUSR,
401 buf, is_global); 418 buf, &chan->is_global);
402 if (!dentry) { 419 if (!dentry)
403 relay_destroy_buf(buf); 420 goto free_buf;
404 return NULL;
405 }
406 421
407 buf->dentry = dentry; 422 buf->dentry = dentry;
408 __relay_reset(buf, 1);
409 423
424 if(chan->is_global) {
425 chan->buf[0] = buf;
426 buf->cpu = 0;
427 }
428
429 goto free_name;
430
431free_buf:
432 relay_destroy_buf(buf);
433free_name:
434 kfree(tmpname);
435end:
410 return buf; 436 return buf;
411} 437}
412 438
@@ -448,31 +474,71 @@ static void setup_callbacks(struct rchan *chan,
448} 474}
449 475
450/** 476/**
477 *
478 * relay_hotcpu_callback - CPU hotplug callback
479 * @nb: notifier block
480 * @action: hotplug action to take
481 * @hcpu: CPU number
482 *
483 * Returns the success/failure of the operation. (NOTIFY_OK, NOTIFY_BAD)
484 */
485static int __cpuinit relay_hotcpu_callback(struct notifier_block *nb,
486 unsigned long action,
487 void *hcpu)
488{
489 unsigned int hotcpu = (unsigned long)hcpu;
490 struct rchan *chan;
491
492 switch(action) {
493 case CPU_UP_PREPARE:
494 mutex_lock(&relay_channels_mutex);
495 list_for_each_entry(chan, &relay_channels, list) {
496 if (chan->buf[hotcpu])
497 continue;
498 chan->buf[hotcpu] = relay_open_buf(chan, hotcpu);
499 if(!chan->buf[hotcpu]) {
500 printk(KERN_ERR
501 "relay_hotcpu_callback: cpu %d buffer "
502 "creation failed\n", hotcpu);
503 mutex_unlock(&relay_channels_mutex);
504 return NOTIFY_BAD;
505 }
506 }
507 mutex_unlock(&relay_channels_mutex);
508 break;
509 case CPU_DEAD:
510 /* No need to flush the cpu : will be flushed upon
511 * final relay_flush() call. */
512 break;
513 }
514 return NOTIFY_OK;
515}
516
517/**
451 * relay_open - create a new relay channel 518 * relay_open - create a new relay channel
452 * @base_filename: base name of files to create 519 * @base_filename: base name of files to create
453 * @parent: dentry of parent directory, %NULL for root directory 520 * @parent: dentry of parent directory, %NULL for root directory
454 * @subbuf_size: size of sub-buffers 521 * @subbuf_size: size of sub-buffers
455 * @n_subbufs: number of sub-buffers 522 * @n_subbufs: number of sub-buffers
456 * @cb: client callback functions 523 * @cb: client callback functions
524 * @private_data: user-defined data
457 * 525 *
458 * Returns channel pointer if successful, %NULL otherwise. 526 * Returns channel pointer if successful, %NULL otherwise.
459 * 527 *
460 * Creates a channel buffer for each cpu using the sizes and 528 * Creates a channel buffer for each cpu using the sizes and
461 * attributes specified. The created channel buffer files 529 * attributes specified. The created channel buffer files
462 * will be named base_filename0...base_filenameN-1. File 530 * will be named base_filename0...base_filenameN-1. File
463 * permissions will be S_IRUSR. 531 * permissions will be %S_IRUSR.
464 */ 532 */
465struct rchan *relay_open(const char *base_filename, 533struct rchan *relay_open(const char *base_filename,
466 struct dentry *parent, 534 struct dentry *parent,
467 size_t subbuf_size, 535 size_t subbuf_size,
468 size_t n_subbufs, 536 size_t n_subbufs,
469 struct rchan_callbacks *cb) 537 struct rchan_callbacks *cb,
538 void *private_data)
470{ 539{
471 unsigned int i; 540 unsigned int i;
472 struct rchan *chan; 541 struct rchan *chan;
473 char *tmpname;
474 int is_global = 0;
475
476 if (!base_filename) 542 if (!base_filename)
477 return NULL; 543 return NULL;
478 544
@@ -487,38 +553,32 @@ struct rchan *relay_open(const char *base_filename,
487 chan->n_subbufs = n_subbufs; 553 chan->n_subbufs = n_subbufs;
488 chan->subbuf_size = subbuf_size; 554 chan->subbuf_size = subbuf_size;
489 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs); 555 chan->alloc_size = FIX_SIZE(subbuf_size * n_subbufs);
556 chan->parent = parent;
557 chan->private_data = private_data;
558 strlcpy(chan->base_filename, base_filename, NAME_MAX);
490 setup_callbacks(chan, cb); 559 setup_callbacks(chan, cb);
491 kref_init(&chan->kref); 560 kref_init(&chan->kref);
492 561
493 tmpname = kmalloc(NAME_MAX + 1, GFP_KERNEL); 562 mutex_lock(&relay_channels_mutex);
494 if (!tmpname)
495 goto free_chan;
496
497 for_each_online_cpu(i) { 563 for_each_online_cpu(i) {
498 sprintf(tmpname, "%s%d", base_filename, i); 564 chan->buf[i] = relay_open_buf(chan, i);
499 chan->buf[i] = relay_open_buf(chan, tmpname, parent,
500 &is_global);
501 if (!chan->buf[i]) 565 if (!chan->buf[i])
502 goto free_bufs; 566 goto free_bufs;
503
504 chan->buf[i]->cpu = i;
505 } 567 }
568 list_add(&chan->list, &relay_channels);
569 mutex_unlock(&relay_channels_mutex);
506 570
507 kfree(tmpname);
508 return chan; 571 return chan;
509 572
510free_bufs: 573free_bufs:
511 for (i = 0; i < NR_CPUS; i++) { 574 for_each_online_cpu(i) {
512 if (!chan->buf[i]) 575 if (!chan->buf[i])
513 break; 576 break;
514 relay_close_buf(chan->buf[i]); 577 relay_close_buf(chan->buf[i]);
515 if (is_global)
516 break;
517 } 578 }
518 kfree(tmpname);
519 579
520free_chan:
521 kref_put(&chan->kref, relay_destroy_channel); 580 kref_put(&chan->kref, relay_destroy_channel);
581 mutex_unlock(&relay_channels_mutex);
522 return NULL; 582 return NULL;
523} 583}
524EXPORT_SYMBOL_GPL(relay_open); 584EXPORT_SYMBOL_GPL(relay_open);
@@ -588,7 +648,7 @@ EXPORT_SYMBOL_GPL(relay_switch_subbuf);
588 * subbufs_consumed should be the number of sub-buffers newly consumed, 648 * subbufs_consumed should be the number of sub-buffers newly consumed,
589 * not the total consumed. 649 * not the total consumed.
590 * 650 *
591 * NOTE: Kernel clients don't need to call this function if the channel 651 * NOTE. Kernel clients don't need to call this function if the channel
592 * mode is 'overwrite'. 652 * mode is 'overwrite'.
593 */ 653 */
594void relay_subbufs_consumed(struct rchan *chan, 654void relay_subbufs_consumed(struct rchan *chan,
@@ -619,24 +679,26 @@ EXPORT_SYMBOL_GPL(relay_subbufs_consumed);
619void relay_close(struct rchan *chan) 679void relay_close(struct rchan *chan)
620{ 680{
621 unsigned int i; 681 unsigned int i;
622 struct rchan_buf *prev = NULL;
623 682
624 if (!chan) 683 if (!chan)
625 return; 684 return;
626 685
627 for (i = 0; i < NR_CPUS; i++) { 686 mutex_lock(&relay_channels_mutex);
628 if (!chan->buf[i] || chan->buf[i] == prev) 687 if (chan->is_global && chan->buf[0])
629 break; 688 relay_close_buf(chan->buf[0]);
630 relay_close_buf(chan->buf[i]); 689 else
631 prev = chan->buf[i]; 690 for_each_possible_cpu(i)
632 } 691 if (chan->buf[i])
692 relay_close_buf(chan->buf[i]);
633 693
634 if (chan->last_toobig) 694 if (chan->last_toobig)
635 printk(KERN_WARNING "relay: one or more items not logged " 695 printk(KERN_WARNING "relay: one or more items not logged "
636 "[item size (%Zd) > sub-buffer size (%Zd)]\n", 696 "[item size (%Zd) > sub-buffer size (%Zd)]\n",
637 chan->last_toobig, chan->subbuf_size); 697 chan->last_toobig, chan->subbuf_size);
638 698
699 list_del(&chan->list);
639 kref_put(&chan->kref, relay_destroy_channel); 700 kref_put(&chan->kref, relay_destroy_channel);
701 mutex_unlock(&relay_channels_mutex);
640} 702}
641EXPORT_SYMBOL_GPL(relay_close); 703EXPORT_SYMBOL_GPL(relay_close);
642 704
@@ -649,17 +711,20 @@ EXPORT_SYMBOL_GPL(relay_close);
649void relay_flush(struct rchan *chan) 711void relay_flush(struct rchan *chan)
650{ 712{
651 unsigned int i; 713 unsigned int i;
652 struct rchan_buf *prev = NULL;
653 714
654 if (!chan) 715 if (!chan)
655 return; 716 return;
656 717
657 for (i = 0; i < NR_CPUS; i++) { 718 if (chan->is_global && chan->buf[0]) {
658 if (!chan->buf[i] || chan->buf[i] == prev) 719 relay_switch_subbuf(chan->buf[0], 0);
659 break; 720 return;
660 relay_switch_subbuf(chan->buf[i], 0);
661 prev = chan->buf[i];
662 } 721 }
722
723 mutex_lock(&relay_channels_mutex);
724 for_each_possible_cpu(i)
725 if (chan->buf[i])
726 relay_switch_subbuf(chan->buf[i], 0);
727 mutex_unlock(&relay_channels_mutex);
663} 728}
664EXPORT_SYMBOL_GPL(relay_flush); 729EXPORT_SYMBOL_GPL(relay_flush);
665 730
@@ -684,7 +749,7 @@ static int relay_file_open(struct inode *inode, struct file *filp)
684 * @filp: the file 749 * @filp: the file
685 * @vma: the vma describing what to map 750 * @vma: the vma describing what to map
686 * 751 *
687 * Calls upon relay_mmap_buf to map the file into user space. 752 * Calls upon relay_mmap_buf() to map the file into user space.
688 */ 753 */
689static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma) 754static int relay_file_mmap(struct file *filp, struct vm_area_struct *vma)
690{ 755{
@@ -826,7 +891,7 @@ static size_t relay_file_read_subbuf_avail(size_t read_pos,
826 * @read_pos: file read position 891 * @read_pos: file read position
827 * @buf: relay channel buffer 892 * @buf: relay channel buffer
828 * 893 *
829 * If the read_pos is in the middle of padding, return the 894 * If the @read_pos is in the middle of padding, return the
830 * position of the first actually available byte, otherwise 895 * position of the first actually available byte, otherwise
831 * return the original value. 896 * return the original value.
832 */ 897 */
@@ -1022,3 +1087,12 @@ const struct file_operations relay_file_operations = {
1022 .sendfile = relay_file_sendfile, 1087 .sendfile = relay_file_sendfile,
1023}; 1088};
1024EXPORT_SYMBOL_GPL(relay_file_operations); 1089EXPORT_SYMBOL_GPL(relay_file_operations);
1090
1091static __init int relay_init(void)
1092{
1093
1094 hotcpu_notifier(relay_hotcpu_callback, 0);
1095 return 0;
1096}
1097
1098module_init(relay_init);
diff --git a/kernel/resource.c b/kernel/resource.c
index 7b9a497419d9..2a3f88636580 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -17,6 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/device.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22 23
@@ -618,6 +619,67 @@ void __release_region(struct resource *parent, resource_size_t start,
618EXPORT_SYMBOL(__release_region); 619EXPORT_SYMBOL(__release_region);
619 620
620/* 621/*
622 * Managed region resource
623 */
624struct region_devres {
625 struct resource *parent;
626 resource_size_t start;
627 resource_size_t n;
628};
629
630static void devm_region_release(struct device *dev, void *res)
631{
632 struct region_devres *this = res;
633
634 __release_region(this->parent, this->start, this->n);
635}
636
637static int devm_region_match(struct device *dev, void *res, void *match_data)
638{
639 struct region_devres *this = res, *match = match_data;
640
641 return this->parent == match->parent &&
642 this->start == match->start && this->n == match->n;
643}
644
645struct resource * __devm_request_region(struct device *dev,
646 struct resource *parent, resource_size_t start,
647 resource_size_t n, const char *name)
648{
649 struct region_devres *dr = NULL;
650 struct resource *res;
651
652 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
653 GFP_KERNEL);
654 if (!dr)
655 return NULL;
656
657 dr->parent = parent;
658 dr->start = start;
659 dr->n = n;
660
661 res = __request_region(parent, start, n, name);
662 if (res)
663 devres_add(dev, dr);
664 else
665 devres_free(dr);
666
667 return res;
668}
669EXPORT_SYMBOL(__devm_request_region);
670
671void __devm_release_region(struct device *dev, struct resource *parent,
672 resource_size_t start, resource_size_t n)
673{
674 struct region_devres match_data = { parent, start, n };
675
676 __release_region(parent, start, n);
677 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
678 &match_data));
679}
680EXPORT_SYMBOL(__devm_release_region);
681
682/*
621 * Called from init/main.c to reserve IO ports. 683 * Called from init/main.c to reserve IO ports.
622 */ 684 */
623#define MAXRESERVE 4 685#define MAXRESERVE 4
diff --git a/kernel/sched.c b/kernel/sched.c
index cca93cc0dd7d..08f86178aa34 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -57,6 +57,16 @@
57#include <asm/unistd.h> 57#include <asm/unistd.h>
58 58
59/* 59/*
60 * Scheduler clock - returns current time in nanosec units.
61 * This is default implementation.
62 * Architectures and sub-architectures can override this.
63 */
64unsigned long long __attribute__((weak)) sched_clock(void)
65{
66 return (unsigned long long)jiffies * (1000000000 / HZ);
67}
68
69/*
60 * Convert user-nice values [ -20 ... 0 ... 19 ] 70 * Convert user-nice values [ -20 ... 0 ... 19 ]
61 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ], 71 * to static priority [ MAX_RT_PRIO..MAX_PRIO-1 ],
62 * and back. 72 * and back.
@@ -2887,14 +2897,16 @@ static void active_load_balance(struct rq *busiest_rq, int busiest_cpu)
2887static void update_load(struct rq *this_rq) 2897static void update_load(struct rq *this_rq)
2888{ 2898{
2889 unsigned long this_load; 2899 unsigned long this_load;
2890 int i, scale; 2900 unsigned int i, scale;
2891 2901
2892 this_load = this_rq->raw_weighted_load; 2902 this_load = this_rq->raw_weighted_load;
2893 2903
2894 /* Update our load: */ 2904 /* Update our load: */
2895 for (i = 0, scale = 1; i < 3; i++, scale <<= 1) { 2905 for (i = 0, scale = 1; i < 3; i++, scale += scale) {
2896 unsigned long old_load, new_load; 2906 unsigned long old_load, new_load;
2897 2907
2908 /* scale is effectively 1 << i now, and >> i divides by scale */
2909
2898 old_load = this_rq->cpu_load[i]; 2910 old_load = this_rq->cpu_load[i];
2899 new_load = this_load; 2911 new_load = this_load;
2900 /* 2912 /*
@@ -2904,7 +2916,7 @@ static void update_load(struct rq *this_rq)
2904 */ 2916 */
2905 if (new_load > old_load) 2917 if (new_load > old_load)
2906 new_load += scale-1; 2918 new_load += scale-1;
2907 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) / scale; 2919 this_rq->cpu_load[i] = (old_load*(scale-1) + new_load) >> i;
2908 } 2920 }
2909} 2921}
2910 2922
@@ -4193,13 +4205,12 @@ static void __setscheduler(struct task_struct *p, int policy, int prio)
4193} 4205}
4194 4206
4195/** 4207/**
4196 * sched_setscheduler - change the scheduling policy and/or RT priority of 4208 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread.
4197 * a thread.
4198 * @p: the task in question. 4209 * @p: the task in question.
4199 * @policy: new policy. 4210 * @policy: new policy.
4200 * @param: structure containing the new RT priority. 4211 * @param: structure containing the new RT priority.
4201 * 4212 *
4202 * NOTE: the task may be already dead 4213 * NOTE that the task may be already dead.
4203 */ 4214 */
4204int sched_setscheduler(struct task_struct *p, int policy, 4215int sched_setscheduler(struct task_struct *p, int policy,
4205 struct sched_param *param) 4216 struct sched_param *param)
@@ -4567,7 +4578,7 @@ asmlinkage long sys_sched_getaffinity(pid_t pid, unsigned int len,
4567/** 4578/**
4568 * sys_sched_yield - yield the current processor to other threads. 4579 * sys_sched_yield - yield the current processor to other threads.
4569 * 4580 *
4570 * this function yields the current CPU by moving the calling thread 4581 * This function yields the current CPU by moving the calling thread
4571 * to the expired array. If there are no other threads running on this 4582 * to the expired array. If there are no other threads running on this
4572 * CPU then this function will return. 4583 * CPU then this function will return.
4573 */ 4584 */
@@ -4694,7 +4705,7 @@ EXPORT_SYMBOL(cond_resched_softirq);
4694/** 4705/**
4695 * yield - yield the current processor to other threads. 4706 * yield - yield the current processor to other threads.
4696 * 4707 *
4697 * this is a shortcut for kernel-space yielding - it marks the 4708 * This is a shortcut for kernel-space yielding - it marks the
4698 * thread runnable and calls sys_sched_yield(). 4709 * thread runnable and calls sys_sched_yield().
4699 */ 4710 */
4700void __sched yield(void) 4711void __sched yield(void)
diff --git a/kernel/signal.c b/kernel/signal.c
index 5630255d2e2a..8072e568bbe0 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1096,42 +1096,21 @@ int kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp)
1096 return retval; 1096 return retval;
1097} 1097}
1098 1098
1099int __kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1100{
1101 if (pgrp <= 0)
1102 return -EINVAL;
1103
1104 return __kill_pgrp_info(sig, info, find_pid(pgrp));
1105}
1106
1107int
1108kill_pg_info(int sig, struct siginfo *info, pid_t pgrp)
1109{
1110 int retval;
1111
1112 read_lock(&tasklist_lock);
1113 retval = __kill_pg_info(sig, info, pgrp);
1114 read_unlock(&tasklist_lock);
1115
1116 return retval;
1117}
1118
1119int kill_pid_info(int sig, struct siginfo *info, struct pid *pid) 1099int kill_pid_info(int sig, struct siginfo *info, struct pid *pid)
1120{ 1100{
1121 int error; 1101 int error;
1122 int acquired_tasklist_lock = 0;
1123 struct task_struct *p; 1102 struct task_struct *p;
1124 1103
1125 rcu_read_lock(); 1104 rcu_read_lock();
1126 if (unlikely(sig_needs_tasklist(sig))) { 1105 if (unlikely(sig_needs_tasklist(sig)))
1127 read_lock(&tasklist_lock); 1106 read_lock(&tasklist_lock);
1128 acquired_tasklist_lock = 1; 1107
1129 }
1130 p = pid_task(pid, PIDTYPE_PID); 1108 p = pid_task(pid, PIDTYPE_PID);
1131 error = -ESRCH; 1109 error = -ESRCH;
1132 if (p) 1110 if (p)
1133 error = group_send_sig_info(sig, info, p); 1111 error = group_send_sig_info(sig, info, p);
1134 if (unlikely(acquired_tasklist_lock)) 1112
1113 if (unlikely(sig_needs_tasklist(sig)))
1135 read_unlock(&tasklist_lock); 1114 read_unlock(&tasklist_lock);
1136 rcu_read_unlock(); 1115 rcu_read_unlock();
1137 return error; 1116 return error;
@@ -1192,8 +1171,10 @@ EXPORT_SYMBOL_GPL(kill_pid_info_as_uid);
1192 1171
1193static int kill_something_info(int sig, struct siginfo *info, int pid) 1172static int kill_something_info(int sig, struct siginfo *info, int pid)
1194{ 1173{
1174 int ret;
1175 rcu_read_lock();
1195 if (!pid) { 1176 if (!pid) {
1196 return kill_pg_info(sig, info, process_group(current)); 1177 ret = kill_pgrp_info(sig, info, task_pgrp(current));
1197 } else if (pid == -1) { 1178 } else if (pid == -1) {
1198 int retval = 0, count = 0; 1179 int retval = 0, count = 0;
1199 struct task_struct * p; 1180 struct task_struct * p;
@@ -1208,12 +1189,14 @@ static int kill_something_info(int sig, struct siginfo *info, int pid)
1208 } 1189 }
1209 } 1190 }
1210 read_unlock(&tasklist_lock); 1191 read_unlock(&tasklist_lock);
1211 return count ? retval : -ESRCH; 1192 ret = count ? retval : -ESRCH;
1212 } else if (pid < 0) { 1193 } else if (pid < 0) {
1213 return kill_pg_info(sig, info, -pid); 1194 ret = kill_pgrp_info(sig, info, find_pid(-pid));
1214 } else { 1195 } else {
1215 return kill_proc_info(sig, info, pid); 1196 ret = kill_pid_info(sig, info, find_pid(pid));
1216 } 1197 }
1198 rcu_read_unlock();
1199 return ret;
1217} 1200}
1218 1201
1219/* 1202/*
@@ -1312,12 +1295,6 @@ int kill_pid(struct pid *pid, int sig, int priv)
1312EXPORT_SYMBOL(kill_pid); 1295EXPORT_SYMBOL(kill_pid);
1313 1296
1314int 1297int
1315kill_pg(pid_t pgrp, int sig, int priv)
1316{
1317 return kill_pg_info(sig, __si_special(priv), pgrp);
1318}
1319
1320int
1321kill_proc(pid_t pid, int sig, int priv) 1298kill_proc(pid_t pid, int sig, int priv)
1322{ 1299{
1323 return kill_proc_info(sig, __si_special(priv), pid); 1300 return kill_proc_info(sig, __si_special(priv), pid);
@@ -1906,7 +1883,7 @@ relock:
1906 1883
1907 /* signals can be posted during this window */ 1884 /* signals can be posted during this window */
1908 1885
1909 if (is_orphaned_pgrp(process_group(current))) 1886 if (is_current_pgrp_orphaned())
1910 goto relock; 1887 goto relock;
1911 1888
1912 spin_lock_irq(&current->sighand->siglock); 1889 spin_lock_irq(&current->sighand->siglock);
@@ -1956,7 +1933,6 @@ EXPORT_SYMBOL(recalc_sigpending);
1956EXPORT_SYMBOL_GPL(dequeue_signal); 1933EXPORT_SYMBOL_GPL(dequeue_signal);
1957EXPORT_SYMBOL(flush_signals); 1934EXPORT_SYMBOL(flush_signals);
1958EXPORT_SYMBOL(force_sig); 1935EXPORT_SYMBOL(force_sig);
1959EXPORT_SYMBOL(kill_pg);
1960EXPORT_SYMBOL(kill_proc); 1936EXPORT_SYMBOL(kill_proc);
1961EXPORT_SYMBOL(ptrace_notify); 1937EXPORT_SYMBOL(ptrace_notify);
1962EXPORT_SYMBOL(send_sig); 1938EXPORT_SYMBOL(send_sig);
@@ -2283,7 +2259,7 @@ static int do_tkill(int tgid, int pid, int sig)
2283 * @pid: the PID of the thread 2259 * @pid: the PID of the thread
2284 * @sig: signal to be sent 2260 * @sig: signal to be sent
2285 * 2261 *
2286 * This syscall also checks the tgid and returns -ESRCH even if the PID 2262 * This syscall also checks the @tgid and returns -ESRCH even if the PID
2287 * exists but it's not belonging to the target process anymore. This 2263 * exists but it's not belonging to the target process anymore. This
2288 * method solves the problem of threads exiting and PIDs getting reused. 2264 * method solves the problem of threads exiting and PIDs getting reused.
2289 */ 2265 */
diff --git a/kernel/sys.c b/kernel/sys.c
index 6e2101dec0fc..123b165080e6 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -215,7 +215,7 @@ EXPORT_SYMBOL_GPL(atomic_notifier_chain_unregister);
215 * This routine uses RCU to synchronize with changes to the chain. 215 * This routine uses RCU to synchronize with changes to the chain.
216 * 216 *
217 * If the return value of the notifier can be and'ed 217 * If the return value of the notifier can be and'ed
218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain 218 * with %NOTIFY_STOP_MASK then atomic_notifier_call_chain()
219 * will return immediately, with the return value of 219 * will return immediately, with the return value of
220 * the notifier function which halted execution. 220 * the notifier function which halted execution.
221 * Otherwise the return value is the return value 221 * Otherwise the return value is the return value
@@ -313,7 +313,7 @@ EXPORT_SYMBOL_GPL(blocking_notifier_chain_unregister);
313 * run in a process context, so they are allowed to block. 313 * run in a process context, so they are allowed to block.
314 * 314 *
315 * If the return value of the notifier can be and'ed 315 * If the return value of the notifier can be and'ed
316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain 316 * with %NOTIFY_STOP_MASK then blocking_notifier_call_chain()
317 * will return immediately, with the return value of 317 * will return immediately, with the return value of
318 * the notifier function which halted execution. 318 * the notifier function which halted execution.
319 * Otherwise the return value is the return value 319 * Otherwise the return value is the return value
@@ -393,7 +393,7 @@ EXPORT_SYMBOL_GPL(raw_notifier_chain_unregister);
393 * All locking must be provided by the caller. 393 * All locking must be provided by the caller.
394 * 394 *
395 * If the return value of the notifier can be and'ed 395 * If the return value of the notifier can be and'ed
396 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain 396 * with %NOTIFY_STOP_MASK then raw_notifier_call_chain()
397 * will return immediately, with the return value of 397 * will return immediately, with the return value of
398 * the notifier function which halted execution. 398 * the notifier function which halted execution.
399 * Otherwise the return value is the return value 399 * Otherwise the return value is the return value
@@ -487,7 +487,7 @@ EXPORT_SYMBOL_GPL(srcu_notifier_chain_unregister);
487 * run in a process context, so they are allowed to block. 487 * run in a process context, so they are allowed to block.
488 * 488 *
489 * If the return value of the notifier can be and'ed 489 * If the return value of the notifier can be and'ed
490 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain 490 * with %NOTIFY_STOP_MASK then srcu_notifier_call_chain()
491 * will return immediately, with the return value of 491 * will return immediately, with the return value of
492 * the notifier function which halted execution. 492 * the notifier function which halted execution.
493 * Otherwise the return value is the return value 493 * Otherwise the return value is the return value
@@ -538,7 +538,7 @@ EXPORT_SYMBOL_GPL(srcu_init_notifier_head);
538 * Registers a function with the list of functions 538 * Registers a function with the list of functions
539 * to be called at reboot time. 539 * to be called at reboot time.
540 * 540 *
541 * Currently always returns zero, as blocking_notifier_chain_register 541 * Currently always returns zero, as blocking_notifier_chain_register()
542 * always returns zero. 542 * always returns zero.
543 */ 543 */
544 544
@@ -596,6 +596,7 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
596 struct task_struct *g, *p; 596 struct task_struct *g, *p;
597 struct user_struct *user; 597 struct user_struct *user;
598 int error = -EINVAL; 598 int error = -EINVAL;
599 struct pid *pgrp;
599 600
600 if (which > 2 || which < 0) 601 if (which > 2 || which < 0)
601 goto out; 602 goto out;
@@ -610,18 +611,21 @@ asmlinkage long sys_setpriority(int which, int who, int niceval)
610 read_lock(&tasklist_lock); 611 read_lock(&tasklist_lock);
611 switch (which) { 612 switch (which) {
612 case PRIO_PROCESS: 613 case PRIO_PROCESS:
613 if (!who) 614 if (who)
614 who = current->pid; 615 p = find_task_by_pid(who);
615 p = find_task_by_pid(who); 616 else
617 p = current;
616 if (p) 618 if (p)
617 error = set_one_prio(p, niceval, error); 619 error = set_one_prio(p, niceval, error);
618 break; 620 break;
619 case PRIO_PGRP: 621 case PRIO_PGRP:
620 if (!who) 622 if (who)
621 who = process_group(current); 623 pgrp = find_pid(who);
622 do_each_task_pid(who, PIDTYPE_PGID, p) { 624 else
625 pgrp = task_pgrp(current);
626 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
623 error = set_one_prio(p, niceval, error); 627 error = set_one_prio(p, niceval, error);
624 } while_each_task_pid(who, PIDTYPE_PGID, p); 628 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
625 break; 629 break;
626 case PRIO_USER: 630 case PRIO_USER:
627 user = current->user; 631 user = current->user;
@@ -656,6 +660,7 @@ asmlinkage long sys_getpriority(int which, int who)
656 struct task_struct *g, *p; 660 struct task_struct *g, *p;
657 struct user_struct *user; 661 struct user_struct *user;
658 long niceval, retval = -ESRCH; 662 long niceval, retval = -ESRCH;
663 struct pid *pgrp;
659 664
660 if (which > 2 || which < 0) 665 if (which > 2 || which < 0)
661 return -EINVAL; 666 return -EINVAL;
@@ -663,9 +668,10 @@ asmlinkage long sys_getpriority(int which, int who)
663 read_lock(&tasklist_lock); 668 read_lock(&tasklist_lock);
664 switch (which) { 669 switch (which) {
665 case PRIO_PROCESS: 670 case PRIO_PROCESS:
666 if (!who) 671 if (who)
667 who = current->pid; 672 p = find_task_by_pid(who);
668 p = find_task_by_pid(who); 673 else
674 p = current;
669 if (p) { 675 if (p) {
670 niceval = 20 - task_nice(p); 676 niceval = 20 - task_nice(p);
671 if (niceval > retval) 677 if (niceval > retval)
@@ -673,13 +679,15 @@ asmlinkage long sys_getpriority(int which, int who)
673 } 679 }
674 break; 680 break;
675 case PRIO_PGRP: 681 case PRIO_PGRP:
676 if (!who) 682 if (who)
677 who = process_group(current); 683 pgrp = find_pid(who);
678 do_each_task_pid(who, PIDTYPE_PGID, p) { 684 else
685 pgrp = task_pgrp(current);
686 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
679 niceval = 20 - task_nice(p); 687 niceval = 20 - task_nice(p);
680 if (niceval > retval) 688 if (niceval > retval)
681 retval = niceval; 689 retval = niceval;
682 } while_each_task_pid(who, PIDTYPE_PGID, p); 690 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
683 break; 691 break;
684 case PRIO_USER: 692 case PRIO_USER:
685 user = current->user; 693 user = current->user;
@@ -1388,7 +1396,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1388 1396
1389 if (p->real_parent == group_leader) { 1397 if (p->real_parent == group_leader) {
1390 err = -EPERM; 1398 err = -EPERM;
1391 if (process_session(p) != process_session(group_leader)) 1399 if (task_session(p) != task_session(group_leader))
1392 goto out; 1400 goto out;
1393 err = -EACCES; 1401 err = -EACCES;
1394 if (p->did_exec) 1402 if (p->did_exec)
@@ -1407,7 +1415,7 @@ asmlinkage long sys_setpgid(pid_t pid, pid_t pgid)
1407 struct task_struct *g = 1415 struct task_struct *g =
1408 find_task_by_pid_type(PIDTYPE_PGID, pgid); 1416 find_task_by_pid_type(PIDTYPE_PGID, pgid);
1409 1417
1410 if (!g || process_session(g) != process_session(group_leader)) 1418 if (!g || task_session(g) != task_session(group_leader))
1411 goto out; 1419 goto out;
1412 } 1420 }
1413 1421
@@ -1510,7 +1518,6 @@ asmlinkage long sys_setsid(void)
1510 1518
1511 spin_lock(&group_leader->sighand->siglock); 1519 spin_lock(&group_leader->sighand->siglock);
1512 group_leader->signal->tty = NULL; 1520 group_leader->signal->tty = NULL;
1513 group_leader->signal->tty_old_pgrp = 0;
1514 spin_unlock(&group_leader->sighand->siglock); 1521 spin_unlock(&group_leader->sighand->siglock);
1515 1522
1516 err = process_group(group_leader); 1523 err = process_group(group_leader);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 600b33358ded..e0ac6cd79fcf 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -151,6 +151,8 @@ static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
151#ifdef CONFIG_PROC_SYSCTL 151#ifdef CONFIG_PROC_SYSCTL
152static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp, 152static int proc_do_cad_pid(ctl_table *table, int write, struct file *filp,
153 void __user *buffer, size_t *lenp, loff_t *ppos); 153 void __user *buffer, size_t *lenp, loff_t *ppos);
154static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
155 void __user *buffer, size_t *lenp, loff_t *ppos);
154#endif 156#endif
155 157
156static ctl_table root_table[]; 158static ctl_table root_table[];
@@ -174,6 +176,7 @@ extern ctl_table inotify_table[];
174int sysctl_legacy_va_layout; 176int sysctl_legacy_va_layout;
175#endif 177#endif
176 178
179
177static void *get_uts(ctl_table *table, int write) 180static void *get_uts(ctl_table *table, int write)
178{ 181{
179 char *which = table->data; 182 char *which = table->data;
@@ -344,14 +347,16 @@ static ctl_table kern_table[] = {
344 .proc_handler = &proc_dostring, 347 .proc_handler = &proc_dostring,
345 .strategy = &sysctl_string, 348 .strategy = &sysctl_string,
346 }, 349 },
350#ifdef CONFIG_PROC_SYSCTL
347 { 351 {
348 .ctl_name = KERN_TAINTED, 352 .ctl_name = KERN_TAINTED,
349 .procname = "tainted", 353 .procname = "tainted",
350 .data = &tainted, 354 .data = &tainted,
351 .maxlen = sizeof(int), 355 .maxlen = sizeof(int),
352 .mode = 0444, 356 .mode = 0644,
353 .proc_handler = &proc_dointvec, 357 .proc_handler = &proc_dointvec_taint,
354 }, 358 },
359#endif
355 { 360 {
356 .ctl_name = KERN_CAP_BSET, 361 .ctl_name = KERN_CAP_BSET,
357 .procname = "cap-bound", 362 .procname = "cap-bound",
@@ -1681,13 +1686,12 @@ static int _proc_do_string(void* data, int maxlen, int write,
1681 size_t len; 1686 size_t len;
1682 char __user *p; 1687 char __user *p;
1683 char c; 1688 char c;
1684 1689
1685 if (!data || !maxlen || !*lenp || 1690 if (!data || !maxlen || !*lenp) {
1686 (*ppos && !write)) {
1687 *lenp = 0; 1691 *lenp = 0;
1688 return 0; 1692 return 0;
1689 } 1693 }
1690 1694
1691 if (write) { 1695 if (write) {
1692 len = 0; 1696 len = 0;
1693 p = buffer; 1697 p = buffer;
@@ -1708,6 +1712,15 @@ static int _proc_do_string(void* data, int maxlen, int write,
1708 len = strlen(data); 1712 len = strlen(data);
1709 if (len > maxlen) 1713 if (len > maxlen)
1710 len = maxlen; 1714 len = maxlen;
1715
1716 if (*ppos > len) {
1717 *lenp = 0;
1718 return 0;
1719 }
1720
1721 data += *ppos;
1722 len -= *ppos;
1723
1711 if (len > *lenp) 1724 if (len > *lenp)
1712 len = *lenp; 1725 len = *lenp;
1713 if (len) 1726 if (len)
@@ -1927,6 +1940,7 @@ int proc_dointvec(ctl_table *table, int write, struct file *filp,
1927 1940
1928#define OP_SET 0 1941#define OP_SET 0
1929#define OP_AND 1 1942#define OP_AND 1
1943#define OP_OR 2
1930 1944
1931static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp, 1945static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
1932 int *valp, 1946 int *valp,
@@ -1938,6 +1952,7 @@ static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
1938 switch(op) { 1952 switch(op) {
1939 case OP_SET: *valp = val; break; 1953 case OP_SET: *valp = val; break;
1940 case OP_AND: *valp &= val; break; 1954 case OP_AND: *valp &= val; break;
1955 case OP_OR: *valp |= val; break;
1941 } 1956 }
1942 } else { 1957 } else {
1943 int val = *valp; 1958 int val = *valp;
@@ -1961,7 +1976,7 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
1961{ 1976{
1962 int op; 1977 int op;
1963 1978
1964 if (!capable(CAP_SYS_MODULE)) { 1979 if (write && !capable(CAP_SYS_MODULE)) {
1965 return -EPERM; 1980 return -EPERM;
1966 } 1981 }
1967 1982
@@ -1970,6 +1985,22 @@ int proc_dointvec_bset(ctl_table *table, int write, struct file *filp,
1970 do_proc_dointvec_bset_conv,&op); 1985 do_proc_dointvec_bset_conv,&op);
1971} 1986}
1972 1987
1988/*
1989 * Taint values can only be increased
1990 */
1991static int proc_dointvec_taint(ctl_table *table, int write, struct file *filp,
1992 void __user *buffer, size_t *lenp, loff_t *ppos)
1993{
1994 int op;
1995
1996 if (!capable(CAP_SYS_ADMIN))
1997 return -EPERM;
1998
1999 op = OP_OR;
2000 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos,
2001 do_proc_dointvec_bset_conv,&op);
2002}
2003
1973struct do_proc_dointvec_minmax_conv_param { 2004struct do_proc_dointvec_minmax_conv_param {
1974 int *min; 2005 int *min;
1975 int *max; 2006 int *max;
@@ -2553,17 +2584,23 @@ int sysctl_jiffies(ctl_table *table, int __user *name, int nlen,
2553 void __user *oldval, size_t __user *oldlenp, 2584 void __user *oldval, size_t __user *oldlenp,
2554 void __user *newval, size_t newlen) 2585 void __user *newval, size_t newlen)
2555{ 2586{
2556 if (oldval) { 2587 if (oldval && oldlenp) {
2557 size_t olen; 2588 size_t olen;
2558 if (oldlenp) { 2589
2559 if (get_user(olen, oldlenp)) 2590 if (get_user(olen, oldlenp))
2591 return -EFAULT;
2592 if (olen) {
2593 int val;
2594
2595 if (olen < sizeof(int))
2596 return -EINVAL;
2597
2598 val = *(int *)(table->data) / HZ;
2599 if (put_user(val, (int __user *)oldval))
2600 return -EFAULT;
2601 if (put_user(sizeof(int), oldlenp))
2560 return -EFAULT; 2602 return -EFAULT;
2561 if (olen!=sizeof(int))
2562 return -EINVAL;
2563 } 2603 }
2564 if (put_user(*(int *)(table->data)/HZ, (int __user *)oldval) ||
2565 (oldlenp && put_user(sizeof(int),oldlenp)))
2566 return -EFAULT;
2567 } 2604 }
2568 if (newval && newlen) { 2605 if (newval && newlen) {
2569 int new; 2606 int new;
@@ -2581,17 +2618,23 @@ int sysctl_ms_jiffies(ctl_table *table, int __user *name, int nlen,
2581 void __user *oldval, size_t __user *oldlenp, 2618 void __user *oldval, size_t __user *oldlenp,
2582 void __user *newval, size_t newlen) 2619 void __user *newval, size_t newlen)
2583{ 2620{
2584 if (oldval) { 2621 if (oldval && oldlenp) {
2585 size_t olen; 2622 size_t olen;
2586 if (oldlenp) { 2623
2587 if (get_user(olen, oldlenp)) 2624 if (get_user(olen, oldlenp))
2625 return -EFAULT;
2626 if (olen) {
2627 int val;
2628
2629 if (olen < sizeof(int))
2630 return -EINVAL;
2631
2632 val = jiffies_to_msecs(*(int *)(table->data));
2633 if (put_user(val, (int __user *)oldval))
2634 return -EFAULT;
2635 if (put_user(sizeof(int), oldlenp))
2588 return -EFAULT; 2636 return -EFAULT;
2589 if (olen!=sizeof(int))
2590 return -EINVAL;
2591 } 2637 }
2592 if (put_user(jiffies_to_msecs(*(int *)(table->data)), (int __user *)oldval) ||
2593 (oldlenp && put_user(sizeof(int),oldlenp)))
2594 return -EFAULT;
2595 } 2638 }
2596 if (newval && newlen) { 2639 if (newval && newlen) {
2597 int new; 2640 int new;
@@ -2732,12 +2775,14 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
2732{ 2775{
2733 return -ENOSYS; 2776 return -ENOSYS;
2734} 2777}
2778#ifdef CONFIG_SYSVIPC
2735static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, 2779static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen,
2736 void __user *oldval, size_t __user *oldlenp, 2780 void __user *oldval, size_t __user *oldlenp,
2737 void __user *newval, size_t newlen) 2781 void __user *newval, size_t newlen)
2738{ 2782{
2739 return -ENOSYS; 2783 return -ENOSYS;
2740} 2784}
2785#endif
2741#endif /* CONFIG_SYSCTL_SYSCALL */ 2786#endif /* CONFIG_SYSCTL_SYSCALL */
2742 2787
2743/* 2788/*
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 22504afc0d34..d9ef176c4e09 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -28,6 +28,7 @@
28#include <linux/sysdev.h> 28#include <linux/sysdev.h>
29#include <linux/init.h> 29#include <linux/init.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/sched.h> /* for spin_unlock_irq() using preempt_count() m68k */
31 32
32/* XXX - Would like a better way for initializing curr_clocksource */ 33/* XXX - Would like a better way for initializing curr_clocksource */
33extern struct clocksource clocksource_jiffies; 34extern struct clocksource clocksource_jiffies;
diff --git a/kernel/timer.c b/kernel/timer.c
index c2a8ccfc2882..8533c3796082 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -85,7 +85,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
85 * @j: the time in (absolute) jiffies that should be rounded 85 * @j: the time in (absolute) jiffies that should be rounded
86 * @cpu: the processor number on which the timeout will happen 86 * @cpu: the processor number on which the timeout will happen
87 * 87 *
88 * __round_jiffies rounds an absolute time in the future (in jiffies) 88 * __round_jiffies() rounds an absolute time in the future (in jiffies)
89 * up or down to (approximately) full seconds. This is useful for timers 89 * up or down to (approximately) full seconds. This is useful for timers
90 * for which the exact time they fire does not matter too much, as long as 90 * for which the exact time they fire does not matter too much, as long as
91 * they fire approximately every X seconds. 91 * they fire approximately every X seconds.
@@ -98,7 +98,7 @@ static DEFINE_PER_CPU(tvec_base_t *, tvec_bases) = &boot_tvec_bases;
98 * processors firing at the exact same time, which could lead 98 * processors firing at the exact same time, which could lead
99 * to lock contention or spurious cache line bouncing. 99 * to lock contention or spurious cache line bouncing.
100 * 100 *
101 * The return value is the rounded version of the "j" parameter. 101 * The return value is the rounded version of the @j parameter.
102 */ 102 */
103unsigned long __round_jiffies(unsigned long j, int cpu) 103unsigned long __round_jiffies(unsigned long j, int cpu)
104{ 104{
@@ -142,7 +142,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
142 * @j: the time in (relative) jiffies that should be rounded 142 * @j: the time in (relative) jiffies that should be rounded
143 * @cpu: the processor number on which the timeout will happen 143 * @cpu: the processor number on which the timeout will happen
144 * 144 *
145 * __round_jiffies_relative rounds a time delta in the future (in jiffies) 145 * __round_jiffies_relative() rounds a time delta in the future (in jiffies)
146 * up or down to (approximately) full seconds. This is useful for timers 146 * up or down to (approximately) full seconds. This is useful for timers
147 * for which the exact time they fire does not matter too much, as long as 147 * for which the exact time they fire does not matter too much, as long as
148 * they fire approximately every X seconds. 148 * they fire approximately every X seconds.
@@ -155,7 +155,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies);
155 * processors firing at the exact same time, which could lead 155 * processors firing at the exact same time, which could lead
156 * to lock contention or spurious cache line bouncing. 156 * to lock contention or spurious cache line bouncing.
157 * 157 *
158 * The return value is the rounded version of the "j" parameter. 158 * The return value is the rounded version of the @j parameter.
159 */ 159 */
160unsigned long __round_jiffies_relative(unsigned long j, int cpu) 160unsigned long __round_jiffies_relative(unsigned long j, int cpu)
161{ 161{
@@ -173,7 +173,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
173 * round_jiffies - function to round jiffies to a full second 173 * round_jiffies - function to round jiffies to a full second
174 * @j: the time in (absolute) jiffies that should be rounded 174 * @j: the time in (absolute) jiffies that should be rounded
175 * 175 *
176 * round_jiffies rounds an absolute time in the future (in jiffies) 176 * round_jiffies() rounds an absolute time in the future (in jiffies)
177 * up or down to (approximately) full seconds. This is useful for timers 177 * up or down to (approximately) full seconds. This is useful for timers
178 * for which the exact time they fire does not matter too much, as long as 178 * for which the exact time they fire does not matter too much, as long as
179 * they fire approximately every X seconds. 179 * they fire approximately every X seconds.
@@ -182,7 +182,7 @@ EXPORT_SYMBOL_GPL(__round_jiffies_relative);
182 * at the same time, rather than at various times spread out. The goal 182 * at the same time, rather than at various times spread out. The goal
183 * of this is to have the CPU wake up less, which saves power. 183 * of this is to have the CPU wake up less, which saves power.
184 * 184 *
185 * The return value is the rounded version of the "j" parameter. 185 * The return value is the rounded version of the @j parameter.
186 */ 186 */
187unsigned long round_jiffies(unsigned long j) 187unsigned long round_jiffies(unsigned long j)
188{ 188{
@@ -194,7 +194,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
194 * round_jiffies_relative - function to round jiffies to a full second 194 * round_jiffies_relative - function to round jiffies to a full second
195 * @j: the time in (relative) jiffies that should be rounded 195 * @j: the time in (relative) jiffies that should be rounded
196 * 196 *
197 * round_jiffies_relative rounds a time delta in the future (in jiffies) 197 * round_jiffies_relative() rounds a time delta in the future (in jiffies)
198 * up or down to (approximately) full seconds. This is useful for timers 198 * up or down to (approximately) full seconds. This is useful for timers
199 * for which the exact time they fire does not matter too much, as long as 199 * for which the exact time they fire does not matter too much, as long as
200 * they fire approximately every X seconds. 200 * they fire approximately every X seconds.
@@ -203,7 +203,7 @@ EXPORT_SYMBOL_GPL(round_jiffies);
203 * at the same time, rather than at various times spread out. The goal 203 * at the same time, rather than at various times spread out. The goal
204 * of this is to have the CPU wake up less, which saves power. 204 * of this is to have the CPU wake up less, which saves power.
205 * 205 *
206 * The return value is the rounded version of the "j" parameter. 206 * The return value is the rounded version of the @j parameter.
207 */ 207 */
208unsigned long round_jiffies_relative(unsigned long j) 208unsigned long round_jiffies_relative(unsigned long j)
209{ 209{
@@ -387,7 +387,7 @@ void add_timer_on(struct timer_list *timer, int cpu)
387 * @timer: the timer to be modified 387 * @timer: the timer to be modified
388 * @expires: new timeout in jiffies 388 * @expires: new timeout in jiffies
389 * 389 *
390 * mod_timer is a more efficient way to update the expire field of an 390 * mod_timer() is a more efficient way to update the expire field of an
391 * active timer (if the timer is inactive it will be activated) 391 * active timer (if the timer is inactive it will be activated)
392 * 392 *
393 * mod_timer(timer, expires) is equivalent to: 393 * mod_timer(timer, expires) is equivalent to:
@@ -490,7 +490,7 @@ out:
490 * the timer it also makes sure the handler has finished executing on other 490 * the timer it also makes sure the handler has finished executing on other
491 * CPUs. 491 * CPUs.
492 * 492 *
493 * Synchronization rules: callers must prevent restarting of the timer, 493 * Synchronization rules: Callers must prevent restarting of the timer,
494 * otherwise this function is meaningless. It must not be called from 494 * otherwise this function is meaningless. It must not be called from
495 * interrupt contexts. The caller must not hold locks which would prevent 495 * interrupt contexts. The caller must not hold locks which would prevent
496 * completion of the timer's handler. The timer's handler must not call 496 * completion of the timer's handler. The timer's handler must not call
@@ -1392,17 +1392,16 @@ asmlinkage long sys_gettid(void)
1392} 1392}
1393 1393
1394/** 1394/**
1395 * sys_sysinfo - fill in sysinfo struct 1395 * do_sysinfo - fill in sysinfo struct
1396 * @info: pointer to buffer to fill 1396 * @info: pointer to buffer to fill
1397 */ 1397 */
1398asmlinkage long sys_sysinfo(struct sysinfo __user *info) 1398int do_sysinfo(struct sysinfo *info)
1399{ 1399{
1400 struct sysinfo val;
1401 unsigned long mem_total, sav_total; 1400 unsigned long mem_total, sav_total;
1402 unsigned int mem_unit, bitcount; 1401 unsigned int mem_unit, bitcount;
1403 unsigned long seq; 1402 unsigned long seq;
1404 1403
1405 memset((char *)&val, 0, sizeof(struct sysinfo)); 1404 memset(info, 0, sizeof(struct sysinfo));
1406 1405
1407 do { 1406 do {
1408 struct timespec tp; 1407 struct timespec tp;
@@ -1422,17 +1421,17 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1422 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC; 1421 tp.tv_nsec = tp.tv_nsec - NSEC_PER_SEC;
1423 tp.tv_sec++; 1422 tp.tv_sec++;
1424 } 1423 }
1425 val.uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0); 1424 info->uptime = tp.tv_sec + (tp.tv_nsec ? 1 : 0);
1426 1425
1427 val.loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT); 1426 info->loads[0] = avenrun[0] << (SI_LOAD_SHIFT - FSHIFT);
1428 val.loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT); 1427 info->loads[1] = avenrun[1] << (SI_LOAD_SHIFT - FSHIFT);
1429 val.loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT); 1428 info->loads[2] = avenrun[2] << (SI_LOAD_SHIFT - FSHIFT);
1430 1429
1431 val.procs = nr_threads; 1430 info->procs = nr_threads;
1432 } while (read_seqretry(&xtime_lock, seq)); 1431 } while (read_seqretry(&xtime_lock, seq));
1433 1432
1434 si_meminfo(&val); 1433 si_meminfo(info);
1435 si_swapinfo(&val); 1434 si_swapinfo(info);
1436 1435
1437 /* 1436 /*
1438 * If the sum of all the available memory (i.e. ram + swap) 1437 * If the sum of all the available memory (i.e. ram + swap)
@@ -1443,11 +1442,11 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1443 * -Erik Andersen <andersee@debian.org> 1442 * -Erik Andersen <andersee@debian.org>
1444 */ 1443 */
1445 1444
1446 mem_total = val.totalram + val.totalswap; 1445 mem_total = info->totalram + info->totalswap;
1447 if (mem_total < val.totalram || mem_total < val.totalswap) 1446 if (mem_total < info->totalram || mem_total < info->totalswap)
1448 goto out; 1447 goto out;
1449 bitcount = 0; 1448 bitcount = 0;
1450 mem_unit = val.mem_unit; 1449 mem_unit = info->mem_unit;
1451 while (mem_unit > 1) { 1450 while (mem_unit > 1) {
1452 bitcount++; 1451 bitcount++;
1453 mem_unit >>= 1; 1452 mem_unit >>= 1;
@@ -1459,22 +1458,31 @@ asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1459 1458
1460 /* 1459 /*
1461 * If mem_total did not overflow, multiply all memory values by 1460 * If mem_total did not overflow, multiply all memory values by
1462 * val.mem_unit and set it to 1. This leaves things compatible 1461 * info->mem_unit and set it to 1. This leaves things compatible
1463 * with 2.2.x, and also retains compatibility with earlier 2.4.x 1462 * with 2.2.x, and also retains compatibility with earlier 2.4.x
1464 * kernels... 1463 * kernels...
1465 */ 1464 */
1466 1465
1467 val.mem_unit = 1; 1466 info->mem_unit = 1;
1468 val.totalram <<= bitcount; 1467 info->totalram <<= bitcount;
1469 val.freeram <<= bitcount; 1468 info->freeram <<= bitcount;
1470 val.sharedram <<= bitcount; 1469 info->sharedram <<= bitcount;
1471 val.bufferram <<= bitcount; 1470 info->bufferram <<= bitcount;
1472 val.totalswap <<= bitcount; 1471 info->totalswap <<= bitcount;
1473 val.freeswap <<= bitcount; 1472 info->freeswap <<= bitcount;
1474 val.totalhigh <<= bitcount; 1473 info->totalhigh <<= bitcount;
1475 val.freehigh <<= bitcount; 1474 info->freehigh <<= bitcount;
1475
1476out:
1477 return 0;
1478}
1479
1480asmlinkage long sys_sysinfo(struct sysinfo __user *info)
1481{
1482 struct sysinfo val;
1483
1484 do_sysinfo(&val);
1476 1485
1477 out:
1478 if (copy_to_user(info, &val, sizeof(struct sysinfo))) 1486 if (copy_to_user(info, &val, sizeof(struct sysinfo)))
1479 return -EFAULT; 1487 return -EFAULT;
1480 1488
@@ -1624,7 +1632,7 @@ struct time_interpolator *time_interpolator __read_mostly;
1624static struct time_interpolator *time_interpolator_list __read_mostly; 1632static struct time_interpolator *time_interpolator_list __read_mostly;
1625static DEFINE_SPINLOCK(time_interpolator_lock); 1633static DEFINE_SPINLOCK(time_interpolator_lock);
1626 1634
1627static inline u64 time_interpolator_get_cycles(unsigned int src) 1635static inline cycles_t time_interpolator_get_cycles(unsigned int src)
1628{ 1636{
1629 unsigned long (*x)(void); 1637 unsigned long (*x)(void);
1630 1638
@@ -1650,8 +1658,8 @@ static inline u64 time_interpolator_get_counter(int writelock)
1650 1658
1651 if (time_interpolator->jitter) 1659 if (time_interpolator->jitter)
1652 { 1660 {
1653 u64 lcycle; 1661 cycles_t lcycle;
1654 u64 now; 1662 cycles_t now;
1655 1663
1656 do { 1664 do {
1657 lcycle = time_interpolator->last_cycle; 1665 lcycle = time_interpolator->last_cycle;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index a3da07c5af28..020d1fff57dc 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -656,8 +656,7 @@ void flush_scheduled_work(void)
656EXPORT_SYMBOL(flush_scheduled_work); 656EXPORT_SYMBOL(flush_scheduled_work);
657 657
658/** 658/**
659 * cancel_rearming_delayed_workqueue - reliably kill off a delayed 659 * cancel_rearming_delayed_workqueue - reliably kill off a delayed work whose handler rearms the delayed work.
660 * work whose handler rearms the delayed work.
661 * @wq: the controlling workqueue structure 660 * @wq: the controlling workqueue structure
662 * @dwork: the delayed work struct 661 * @dwork: the delayed work struct
663 */ 662 */
@@ -670,8 +669,7 @@ void cancel_rearming_delayed_workqueue(struct workqueue_struct *wq,
670EXPORT_SYMBOL(cancel_rearming_delayed_workqueue); 669EXPORT_SYMBOL(cancel_rearming_delayed_workqueue);
671 670
672/** 671/**
673 * cancel_rearming_delayed_work - reliably kill off a delayed keventd 672 * cancel_rearming_delayed_work - reliably kill off a delayed keventd work whose handler rearms the delayed work.
674 * work whose handler rearms the delayed work.
675 * @dwork: the delayed work struct 673 * @dwork: the delayed work struct
676 */ 674 */
677void cancel_rearming_delayed_work(struct delayed_work *dwork) 675void cancel_rearming_delayed_work(struct delayed_work *dwork)