aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/Kconfig.freezer2
-rw-r--r--kernel/Makefile3
-rw-r--r--kernel/acct.c2
-rw-r--r--kernel/auditsc.c9
-rw-r--r--kernel/cgroup.c269
-rw-r--r--kernel/cgroup_debug.c4
-rw-r--r--kernel/cgroup_freezer.c379
-rw-r--r--kernel/compat.c111
-rw-r--r--kernel/configs.c9
-rw-r--r--kernel/cpu.c24
-rw-r--r--kernel/cpuset.c19
-rw-r--r--kernel/dma-coherent.c2
-rw-r--r--kernel/dma.c2
-rw-r--r--kernel/exit.c38
-rw-r--r--kernel/fork.c100
-rw-r--r--kernel/freezer.c154
-rw-r--r--kernel/hrtimer.c15
-rw-r--r--kernel/irq/autoprobe.c43
-rw-r--r--kernel/irq/chip.c101
-rw-r--r--kernel/irq/handle.c27
-rw-r--r--kernel/irq/internals.h7
-rw-r--r--kernel/irq/manage.c130
-rw-r--r--kernel/irq/migration.c14
-rw-r--r--kernel/irq/proc.c45
-rw-r--r--kernel/irq/resend.c6
-rw-r--r--kernel/irq/spurious.c162
-rw-r--r--kernel/itimer.c33
-rw-r--r--kernel/kallsyms.c1
-rw-r--r--kernel/kexec.c3
-rw-r--r--kernel/kmod.c67
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/ksysfs.c35
-rw-r--r--kernel/kthread.c10
-rw-r--r--kernel/marker.c36
-rw-r--r--kernel/module.c137
-rw-r--r--kernel/notifier.c2
-rw-r--r--kernel/panic.c67
-rw-r--r--kernel/posix-cpu-timers.c512
-rw-r--r--kernel/posix-timers.c153
-rw-r--r--kernel/power/disk.c11
-rw-r--r--kernel/power/main.c7
-rw-r--r--kernel/power/process.c119
-rw-r--r--kernel/power/user.c10
-rw-r--r--kernel/printk.c42
-rw-r--r--kernel/profile.c41
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/rcuclassic.c337
-rw-r--r--kernel/rcupreempt.c10
-rw-r--r--kernel/rcupreempt_trace.c7
-rw-r--r--kernel/rcutorture.c2
-rw-r--r--kernel/resource.c150
-rw-r--r--kernel/sched.c413
-rw-r--r--kernel/sched_clock.c6
-rw-r--r--kernel/sched_debug.c2
-rw-r--r--kernel/sched_fair.c235
-rw-r--r--kernel/sched_features.h1
-rw-r--r--kernel/sched_idletask.c6
-rw-r--r--kernel/sched_rt.c61
-rw-r--r--kernel/sched_stats.h86
-rw-r--r--kernel/signal.c11
-rw-r--r--kernel/softirq.c152
-rw-r--r--kernel/softlockup.c2
-rw-r--r--kernel/sys.c113
-rw-r--r--kernel/sys_ni.c6
-rw-r--r--kernel/sysctl.c133
-rw-r--r--kernel/time/Kconfig1
-rw-r--r--kernel/time/clocksource.c3
-rw-r--r--kernel/time/jiffies.c1
-rw-r--r--kernel/time/ntp.c93
-rw-r--r--kernel/time/tick-broadcast.c13
-rw-r--r--kernel/time/tick-internal.h2
-rw-r--r--kernel/time/tick-sched.c106
-rw-r--r--kernel/time/timekeeping.c122
-rw-r--r--kernel/time/timer_list.c20
-rw-r--r--kernel/timer.c12
-rw-r--r--kernel/trace/Kconfig64
-rw-r--r--kernel/trace/Makefile4
-rw-r--r--kernel/trace/ftrace.c275
-rw-r--r--kernel/trace/ring_buffer.c2014
-rw-r--r--kernel/trace/trace.c1845
-rw-r--r--kernel/trace/trace.h211
-rw-r--r--kernel/trace/trace_boot.c126
-rw-r--r--kernel/trace/trace_functions.c2
-rw-r--r--kernel/trace/trace_irqsoff.c19
-rw-r--r--kernel/trace/trace_mmiotrace.c116
-rw-r--r--kernel/trace/trace_nop.c64
-rw-r--r--kernel/trace/trace_sched_switch.c137
-rw-r--r--kernel/trace/trace_sched_wakeup.c148
-rw-r--r--kernel/trace/trace_selftest.c83
-rw-r--r--kernel/trace/trace_stack.c310
-rw-r--r--kernel/trace/trace_sysprof.c2
-rw-r--r--kernel/tracepoint.c477
-rw-r--r--kernel/user.c4
-rw-r--r--kernel/utsname_sysctl.c5
-rw-r--r--kernel/wait.c14
-rw-r--r--kernel/workqueue.c2
96 files changed, 7842 insertions, 3153 deletions
diff --git a/kernel/Kconfig.freezer b/kernel/Kconfig.freezer
new file mode 100644
index 000000000000..a3bb4cb52539
--- /dev/null
+++ b/kernel/Kconfig.freezer
@@ -0,0 +1,2 @@
1config FREEZER
2 def_bool PM_SLEEP || CGROUP_FREEZER
diff --git a/kernel/Makefile b/kernel/Makefile
index 4e1d7df7c3e2..305f11dbef21 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -24,6 +24,7 @@ CFLAGS_REMOVE_sched_clock.o = -pg
24CFLAGS_REMOVE_sched.o = -mno-spe -pg 24CFLAGS_REMOVE_sched.o = -mno-spe -pg
25endif 25endif
26 26
27obj-$(CONFIG_FREEZER) += freezer.o
27obj-$(CONFIG_PROFILING) += profile.o 28obj-$(CONFIG_PROFILING) += profile.o
28obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o 29obj-$(CONFIG_SYSCTL_SYSCALL_CHECK) += sysctl_check.o
29obj-$(CONFIG_STACKTRACE) += stacktrace.o 30obj-$(CONFIG_STACKTRACE) += stacktrace.o
@@ -55,6 +56,7 @@ obj-$(CONFIG_BACKTRACE_SELF_TEST) += backtracetest.o
55obj-$(CONFIG_COMPAT) += compat.o 56obj-$(CONFIG_COMPAT) += compat.o
56obj-$(CONFIG_CGROUPS) += cgroup.o 57obj-$(CONFIG_CGROUPS) += cgroup.o
57obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o 58obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o
59obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o
58obj-$(CONFIG_CPUSETS) += cpuset.o 60obj-$(CONFIG_CPUSETS) += cpuset.o
59obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o 61obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o
60obj-$(CONFIG_UTS_NS) += utsname.o 62obj-$(CONFIG_UTS_NS) += utsname.o
@@ -83,6 +85,7 @@ obj-$(CONFIG_SYSCTL) += utsname_sysctl.o
83obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o 85obj-$(CONFIG_TASK_DELAY_ACCT) += delayacct.o
84obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o 86obj-$(CONFIG_TASKSTATS) += taskstats.o tsacct.o
85obj-$(CONFIG_MARKERS) += marker.o 87obj-$(CONFIG_MARKERS) += marker.o
88obj-$(CONFIG_TRACEPOINTS) += tracepoint.o
86obj-$(CONFIG_LATENCYTOP) += latencytop.o 89obj-$(CONFIG_LATENCYTOP) += latencytop.o
87obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 90obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
88obj-$(CONFIG_FTRACE) += trace/ 91obj-$(CONFIG_FTRACE) += trace/
diff --git a/kernel/acct.c b/kernel/acct.c
index dd68b9059418..f6006a60df5d 100644
--- a/kernel/acct.c
+++ b/kernel/acct.c
@@ -548,7 +548,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
548#endif 548#endif
549 549
550 spin_lock_irq(&current->sighand->siglock); 550 spin_lock_irq(&current->sighand->siglock);
551 tty = current->signal->tty; 551 tty = current->signal->tty; /* Safe as we hold the siglock */
552 ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0; 552 ac.ac_tty = tty ? old_encode_dev(tty_devnum(tty)) : 0;
553 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime))); 553 ac.ac_utime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_utime)));
554 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime))); 554 ac.ac_stime = encode_comp_t(jiffies_to_AHZ(cputime_to_jiffies(pacct->ac_stime)));
diff --git a/kernel/auditsc.c b/kernel/auditsc.c
index 59cedfb040e7..cf5bc2f5f9c3 100644
--- a/kernel/auditsc.c
+++ b/kernel/auditsc.c
@@ -246,8 +246,8 @@ static int audit_match_perm(struct audit_context *ctx, int mask)
246 unsigned n; 246 unsigned n;
247 if (unlikely(!ctx)) 247 if (unlikely(!ctx))
248 return 0; 248 return 0;
249
250 n = ctx->major; 249 n = ctx->major;
250
251 switch (audit_classify_syscall(ctx->arch, n)) { 251 switch (audit_classify_syscall(ctx->arch, n)) {
252 case 0: /* native */ 252 case 0: /* native */
253 if ((mask & AUDIT_PERM_WRITE) && 253 if ((mask & AUDIT_PERM_WRITE) &&
@@ -1204,13 +1204,13 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1204 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no", 1204 (context->return_valid==AUDITSC_SUCCESS)?"yes":"no",
1205 context->return_code); 1205 context->return_code);
1206 1206
1207 mutex_lock(&tty_mutex); 1207 spin_lock_irq(&tsk->sighand->siglock);
1208 read_lock(&tasklist_lock);
1209 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name) 1208 if (tsk->signal && tsk->signal->tty && tsk->signal->tty->name)
1210 tty = tsk->signal->tty->name; 1209 tty = tsk->signal->tty->name;
1211 else 1210 else
1212 tty = "(none)"; 1211 tty = "(none)";
1213 read_unlock(&tasklist_lock); 1212 spin_unlock_irq(&tsk->sighand->siglock);
1213
1214 audit_log_format(ab, 1214 audit_log_format(ab,
1215 " a0=%lx a1=%lx a2=%lx a3=%lx items=%d" 1215 " a0=%lx a1=%lx a2=%lx a3=%lx items=%d"
1216 " ppid=%d pid=%d auid=%u uid=%u gid=%u" 1216 " ppid=%d pid=%d auid=%u uid=%u gid=%u"
@@ -1230,7 +1230,6 @@ static void audit_log_exit(struct audit_context *context, struct task_struct *ts
1230 context->egid, context->sgid, context->fsgid, tty, 1230 context->egid, context->sgid, context->fsgid, tty,
1231 tsk->sessionid); 1231 tsk->sessionid);
1232 1232
1233 mutex_unlock(&tty_mutex);
1234 1233
1235 audit_log_task_info(ab, tsk); 1234 audit_log_task_info(ab, tsk);
1236 if (context->filterkey) { 1235 if (context->filterkey) {
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0123d75ec9a..046c1609606b 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -241,7 +241,6 @@ static void unlink_css_set(struct css_set *cg)
241 struct cg_cgroup_link *link; 241 struct cg_cgroup_link *link;
242 struct cg_cgroup_link *saved_link; 242 struct cg_cgroup_link *saved_link;
243 243
244 write_lock(&css_set_lock);
245 hlist_del(&cg->hlist); 244 hlist_del(&cg->hlist);
246 css_set_count--; 245 css_set_count--;
247 246
@@ -251,16 +250,25 @@ static void unlink_css_set(struct css_set *cg)
251 list_del(&link->cgrp_link_list); 250 list_del(&link->cgrp_link_list);
252 kfree(link); 251 kfree(link);
253 } 252 }
254
255 write_unlock(&css_set_lock);
256} 253}
257 254
258static void __release_css_set(struct kref *k, int taskexit) 255static void __put_css_set(struct css_set *cg, int taskexit)
259{ 256{
260 int i; 257 int i;
261 struct css_set *cg = container_of(k, struct css_set, ref); 258 /*
262 259 * Ensure that the refcount doesn't hit zero while any readers
260 * can see it. Similar to atomic_dec_and_lock(), but for an
261 * rwlock
262 */
263 if (atomic_add_unless(&cg->refcount, -1, 1))
264 return;
265 write_lock(&css_set_lock);
266 if (!atomic_dec_and_test(&cg->refcount)) {
267 write_unlock(&css_set_lock);
268 return;
269 }
263 unlink_css_set(cg); 270 unlink_css_set(cg);
271 write_unlock(&css_set_lock);
264 272
265 rcu_read_lock(); 273 rcu_read_lock();
266 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) { 274 for (i = 0; i < CGROUP_SUBSYS_COUNT; i++) {
@@ -276,32 +284,22 @@ static void __release_css_set(struct kref *k, int taskexit)
276 kfree(cg); 284 kfree(cg);
277} 285}
278 286
279static void release_css_set(struct kref *k)
280{
281 __release_css_set(k, 0);
282}
283
284static void release_css_set_taskexit(struct kref *k)
285{
286 __release_css_set(k, 1);
287}
288
289/* 287/*
290 * refcounted get/put for css_set objects 288 * refcounted get/put for css_set objects
291 */ 289 */
292static inline void get_css_set(struct css_set *cg) 290static inline void get_css_set(struct css_set *cg)
293{ 291{
294 kref_get(&cg->ref); 292 atomic_inc(&cg->refcount);
295} 293}
296 294
297static inline void put_css_set(struct css_set *cg) 295static inline void put_css_set(struct css_set *cg)
298{ 296{
299 kref_put(&cg->ref, release_css_set); 297 __put_css_set(cg, 0);
300} 298}
301 299
302static inline void put_css_set_taskexit(struct css_set *cg) 300static inline void put_css_set_taskexit(struct css_set *cg)
303{ 301{
304 kref_put(&cg->ref, release_css_set_taskexit); 302 __put_css_set(cg, 1);
305} 303}
306 304
307/* 305/*
@@ -427,7 +425,7 @@ static struct css_set *find_css_set(
427 return NULL; 425 return NULL;
428 } 426 }
429 427
430 kref_init(&res->ref); 428 atomic_set(&res->refcount, 1);
431 INIT_LIST_HEAD(&res->cg_links); 429 INIT_LIST_HEAD(&res->cg_links);
432 INIT_LIST_HEAD(&res->tasks); 430 INIT_LIST_HEAD(&res->tasks);
433 INIT_HLIST_NODE(&res->hlist); 431 INIT_HLIST_NODE(&res->hlist);
@@ -870,6 +868,14 @@ static struct super_operations cgroup_ops = {
870 .remount_fs = cgroup_remount, 868 .remount_fs = cgroup_remount,
871}; 869};
872 870
871static void init_cgroup_housekeeping(struct cgroup *cgrp)
872{
873 INIT_LIST_HEAD(&cgrp->sibling);
874 INIT_LIST_HEAD(&cgrp->children);
875 INIT_LIST_HEAD(&cgrp->css_sets);
876 INIT_LIST_HEAD(&cgrp->release_list);
877 init_rwsem(&cgrp->pids_mutex);
878}
873static void init_cgroup_root(struct cgroupfs_root *root) 879static void init_cgroup_root(struct cgroupfs_root *root)
874{ 880{
875 struct cgroup *cgrp = &root->top_cgroup; 881 struct cgroup *cgrp = &root->top_cgroup;
@@ -878,10 +884,7 @@ static void init_cgroup_root(struct cgroupfs_root *root)
878 root->number_of_cgroups = 1; 884 root->number_of_cgroups = 1;
879 cgrp->root = root; 885 cgrp->root = root;
880 cgrp->top_cgroup = cgrp; 886 cgrp->top_cgroup = cgrp;
881 INIT_LIST_HEAD(&cgrp->sibling); 887 init_cgroup_housekeeping(cgrp);
882 INIT_LIST_HEAD(&cgrp->children);
883 INIT_LIST_HEAD(&cgrp->css_sets);
884 INIT_LIST_HEAD(&cgrp->release_list);
885} 888}
886 889
887static int cgroup_test_super(struct super_block *sb, void *data) 890static int cgroup_test_super(struct super_block *sb, void *data)
@@ -1728,7 +1731,7 @@ int cgroup_task_count(const struct cgroup *cgrp)
1728 1731
1729 read_lock(&css_set_lock); 1732 read_lock(&css_set_lock);
1730 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) { 1733 list_for_each_entry(link, &cgrp->css_sets, cgrp_link_list) {
1731 count += atomic_read(&link->cg->ref.refcount); 1734 count += atomic_read(&link->cg->refcount);
1732 } 1735 }
1733 read_unlock(&css_set_lock); 1736 read_unlock(&css_set_lock);
1734 return count; 1737 return count;
@@ -1997,16 +2000,7 @@ int cgroup_scan_tasks(struct cgroup_scanner *scan)
1997 * but we cannot guarantee that the information we produce is correct 2000 * but we cannot guarantee that the information we produce is correct
1998 * unless we produce it entirely atomically. 2001 * unless we produce it entirely atomically.
1999 * 2002 *
2000 * Upon tasks file open(), a struct ctr_struct is allocated, that
2001 * will have a pointer to an array (also allocated here). The struct
2002 * ctr_struct * is stored in file->private_data. Its resources will
2003 * be freed by release() when the file is closed. The array is used
2004 * to sprintf the PIDs and then used by read().
2005 */ 2003 */
2006struct ctr_struct {
2007 char *buf;
2008 int bufsz;
2009};
2010 2004
2011/* 2005/*
2012 * Load into 'pidarray' up to 'npids' of the tasks using cgroup 2006 * Load into 'pidarray' up to 'npids' of the tasks using cgroup
@@ -2088,42 +2082,132 @@ static int cmppid(const void *a, const void *b)
2088 return *(pid_t *)a - *(pid_t *)b; 2082 return *(pid_t *)a - *(pid_t *)b;
2089} 2083}
2090 2084
2085
2091/* 2086/*
2092 * Convert array 'a' of 'npids' pid_t's to a string of newline separated 2087 * seq_file methods for the "tasks" file. The seq_file position is the
2093 * decimal pids in 'buf'. Don't write more than 'sz' chars, but return 2088 * next pid to display; the seq_file iterator is a pointer to the pid
2094 * count 'cnt' of how many chars would be written if buf were large enough. 2089 * in the cgroup->tasks_pids array.
2095 */ 2090 */
2096static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) 2091
2092static void *cgroup_tasks_start(struct seq_file *s, loff_t *pos)
2097{ 2093{
2098 int cnt = 0; 2094 /*
2099 int i; 2095 * Initially we receive a position value that corresponds to
2096 * one more than the last pid shown (or 0 on the first call or
2097 * after a seek to the start). Use a binary-search to find the
2098 * next pid to display, if any
2099 */
2100 struct cgroup *cgrp = s->private;
2101 int index = 0, pid = *pos;
2102 int *iter;
2103
2104 down_read(&cgrp->pids_mutex);
2105 if (pid) {
2106 int end = cgrp->pids_length;
2107 int i;
2108 while (index < end) {
2109 int mid = (index + end) / 2;
2110 if (cgrp->tasks_pids[mid] == pid) {
2111 index = mid;
2112 break;
2113 } else if (cgrp->tasks_pids[mid] <= pid)
2114 index = mid + 1;
2115 else
2116 end = mid;
2117 }
2118 }
2119 /* If we're off the end of the array, we're done */
2120 if (index >= cgrp->pids_length)
2121 return NULL;
2122 /* Update the abstract position to be the actual pid that we found */
2123 iter = cgrp->tasks_pids + index;
2124 *pos = *iter;
2125 return iter;
2126}
2127
2128static void cgroup_tasks_stop(struct seq_file *s, void *v)
2129{
2130 struct cgroup *cgrp = s->private;
2131 up_read(&cgrp->pids_mutex);
2132}
2133
2134static void *cgroup_tasks_next(struct seq_file *s, void *v, loff_t *pos)
2135{
2136 struct cgroup *cgrp = s->private;
2137 int *p = v;
2138 int *end = cgrp->tasks_pids + cgrp->pids_length;
2139
2140 /*
2141 * Advance to the next pid in the array. If this goes off the
2142 * end, we're done
2143 */
2144 p++;
2145 if (p >= end) {
2146 return NULL;
2147 } else {
2148 *pos = *p;
2149 return p;
2150 }
2151}
2152
2153static int cgroup_tasks_show(struct seq_file *s, void *v)
2154{
2155 return seq_printf(s, "%d\n", *(int *)v);
2156}
2157
2158static struct seq_operations cgroup_tasks_seq_operations = {
2159 .start = cgroup_tasks_start,
2160 .stop = cgroup_tasks_stop,
2161 .next = cgroup_tasks_next,
2162 .show = cgroup_tasks_show,
2163};
2164
2165static void release_cgroup_pid_array(struct cgroup *cgrp)
2166{
2167 down_write(&cgrp->pids_mutex);
2168 BUG_ON(!cgrp->pids_use_count);
2169 if (!--cgrp->pids_use_count) {
2170 kfree(cgrp->tasks_pids);
2171 cgrp->tasks_pids = NULL;
2172 cgrp->pids_length = 0;
2173 }
2174 up_write(&cgrp->pids_mutex);
2175}
2176
2177static int cgroup_tasks_release(struct inode *inode, struct file *file)
2178{
2179 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2100 2180
2101 for (i = 0; i < npids; i++) 2181 if (!(file->f_mode & FMODE_READ))
2102 cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); 2182 return 0;
2103 return cnt; 2183
2184 release_cgroup_pid_array(cgrp);
2185 return seq_release(inode, file);
2104} 2186}
2105 2187
2188static struct file_operations cgroup_tasks_operations = {
2189 .read = seq_read,
2190 .llseek = seq_lseek,
2191 .write = cgroup_file_write,
2192 .release = cgroup_tasks_release,
2193};
2194
2106/* 2195/*
2107 * Handle an open on 'tasks' file. Prepare a buffer listing the 2196 * Handle an open on 'tasks' file. Prepare an array containing the
2108 * process id's of tasks currently attached to the cgroup being opened. 2197 * process id's of tasks currently attached to the cgroup being opened.
2109 *
2110 * Does not require any specific cgroup mutexes, and does not take any.
2111 */ 2198 */
2199
2112static int cgroup_tasks_open(struct inode *unused, struct file *file) 2200static int cgroup_tasks_open(struct inode *unused, struct file *file)
2113{ 2201{
2114 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); 2202 struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent);
2115 struct ctr_struct *ctr;
2116 pid_t *pidarray; 2203 pid_t *pidarray;
2117 int npids; 2204 int npids;
2118 char c; 2205 int retval;
2119 2206
2207 /* Nothing to do for write-only files */
2120 if (!(file->f_mode & FMODE_READ)) 2208 if (!(file->f_mode & FMODE_READ))
2121 return 0; 2209 return 0;
2122 2210
2123 ctr = kmalloc(sizeof(*ctr), GFP_KERNEL);
2124 if (!ctr)
2125 goto err0;
2126
2127 /* 2211 /*
2128 * If cgroup gets more users after we read count, we won't have 2212 * If cgroup gets more users after we read count, we won't have
2129 * enough space - tough. This race is indistinguishable to the 2213 * enough space - tough. This race is indistinguishable to the
@@ -2131,57 +2215,31 @@ static int cgroup_tasks_open(struct inode *unused, struct file *file)
2131 * show up until sometime later on. 2215 * show up until sometime later on.
2132 */ 2216 */
2133 npids = cgroup_task_count(cgrp); 2217 npids = cgroup_task_count(cgrp);
2134 if (npids) { 2218 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL);
2135 pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); 2219 if (!pidarray)
2136 if (!pidarray) 2220 return -ENOMEM;
2137 goto err1; 2221 npids = pid_array_load(pidarray, npids, cgrp);
2138 2222 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
2139 npids = pid_array_load(pidarray, npids, cgrp);
2140 sort(pidarray, npids, sizeof(pid_t), cmppid, NULL);
2141
2142 /* Call pid_array_to_buf() twice, first just to get bufsz */
2143 ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1;
2144 ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL);
2145 if (!ctr->buf)
2146 goto err2;
2147 ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids);
2148
2149 kfree(pidarray);
2150 } else {
2151 ctr->buf = NULL;
2152 ctr->bufsz = 0;
2153 }
2154 file->private_data = ctr;
2155 return 0;
2156
2157err2:
2158 kfree(pidarray);
2159err1:
2160 kfree(ctr);
2161err0:
2162 return -ENOMEM;
2163}
2164
2165static ssize_t cgroup_tasks_read(struct cgroup *cgrp,
2166 struct cftype *cft,
2167 struct file *file, char __user *buf,
2168 size_t nbytes, loff_t *ppos)
2169{
2170 struct ctr_struct *ctr = file->private_data;
2171 2223
2172 return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); 2224 /*
2173} 2225 * Store the array in the cgroup, freeing the old
2226 * array if necessary
2227 */
2228 down_write(&cgrp->pids_mutex);
2229 kfree(cgrp->tasks_pids);
2230 cgrp->tasks_pids = pidarray;
2231 cgrp->pids_length = npids;
2232 cgrp->pids_use_count++;
2233 up_write(&cgrp->pids_mutex);
2174 2234
2175static int cgroup_tasks_release(struct inode *unused_inode, 2235 file->f_op = &cgroup_tasks_operations;
2176 struct file *file)
2177{
2178 struct ctr_struct *ctr;
2179 2236
2180 if (file->f_mode & FMODE_READ) { 2237 retval = seq_open(file, &cgroup_tasks_seq_operations);
2181 ctr = file->private_data; 2238 if (retval) {
2182 kfree(ctr->buf); 2239 release_cgroup_pid_array(cgrp);
2183 kfree(ctr); 2240 return retval;
2184 } 2241 }
2242 ((struct seq_file *)file->private_data)->private = cgrp;
2185 return 0; 2243 return 0;
2186} 2244}
2187 2245
@@ -2210,7 +2268,6 @@ static struct cftype files[] = {
2210 { 2268 {
2211 .name = "tasks", 2269 .name = "tasks",
2212 .open = cgroup_tasks_open, 2270 .open = cgroup_tasks_open,
2213 .read = cgroup_tasks_read,
2214 .write_u64 = cgroup_tasks_write, 2271 .write_u64 = cgroup_tasks_write,
2215 .release = cgroup_tasks_release, 2272 .release = cgroup_tasks_release,
2216 .private = FILE_TASKLIST, 2273 .private = FILE_TASKLIST,
@@ -2300,10 +2357,7 @@ static long cgroup_create(struct cgroup *parent, struct dentry *dentry,
2300 2357
2301 mutex_lock(&cgroup_mutex); 2358 mutex_lock(&cgroup_mutex);
2302 2359
2303 INIT_LIST_HEAD(&cgrp->sibling); 2360 init_cgroup_housekeeping(cgrp);
2304 INIT_LIST_HEAD(&cgrp->children);
2305 INIT_LIST_HEAD(&cgrp->css_sets);
2306 INIT_LIST_HEAD(&cgrp->release_list);
2307 2361
2308 cgrp->parent = parent; 2362 cgrp->parent = parent;
2309 cgrp->root = parent->root; 2363 cgrp->root = parent->root;
@@ -2495,8 +2549,7 @@ static void __init cgroup_init_subsys(struct cgroup_subsys *ss)
2495int __init cgroup_init_early(void) 2549int __init cgroup_init_early(void)
2496{ 2550{
2497 int i; 2551 int i;
2498 kref_init(&init_css_set.ref); 2552 atomic_set(&init_css_set.refcount, 1);
2499 kref_get(&init_css_set.ref);
2500 INIT_LIST_HEAD(&init_css_set.cg_links); 2553 INIT_LIST_HEAD(&init_css_set.cg_links);
2501 INIT_LIST_HEAD(&init_css_set.tasks); 2554 INIT_LIST_HEAD(&init_css_set.tasks);
2502 INIT_HLIST_NODE(&init_css_set.hlist); 2555 INIT_HLIST_NODE(&init_css_set.hlist);
@@ -2735,6 +2788,8 @@ void cgroup_fork_callbacks(struct task_struct *child)
2735 * Called on every change to mm->owner. mm_init_owner() does not 2788 * Called on every change to mm->owner. mm_init_owner() does not
2736 * invoke this routine, since it assigns the mm->owner the first time 2789 * invoke this routine, since it assigns the mm->owner the first time
2737 * and does not change it. 2790 * and does not change it.
2791 *
2792 * The callbacks are invoked with mmap_sem held in read mode.
2738 */ 2793 */
2739void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new) 2794void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
2740{ 2795{
@@ -2750,7 +2805,7 @@ void cgroup_mm_owner_callbacks(struct task_struct *old, struct task_struct *new)
2750 if (oldcgrp == newcgrp) 2805 if (oldcgrp == newcgrp)
2751 continue; 2806 continue;
2752 if (ss->mm_owner_changed) 2807 if (ss->mm_owner_changed)
2753 ss->mm_owner_changed(ss, oldcgrp, newcgrp); 2808 ss->mm_owner_changed(ss, oldcgrp, newcgrp, new);
2754 } 2809 }
2755 } 2810 }
2756} 2811}
diff --git a/kernel/cgroup_debug.c b/kernel/cgroup_debug.c
index c3dc3aba4c02..daca6209202d 100644
--- a/kernel/cgroup_debug.c
+++ b/kernel/cgroup_debug.c
@@ -57,7 +57,7 @@ static u64 current_css_set_refcount_read(struct cgroup *cont,
57 u64 count; 57 u64 count;
58 58
59 rcu_read_lock(); 59 rcu_read_lock();
60 count = atomic_read(&current->cgroups->ref.refcount); 60 count = atomic_read(&current->cgroups->refcount);
61 rcu_read_unlock(); 61 rcu_read_unlock();
62 return count; 62 return count;
63} 63}
@@ -90,7 +90,7 @@ static struct cftype files[] = {
90 { 90 {
91 .name = "releasable", 91 .name = "releasable",
92 .read_u64 = releasable_read, 92 .read_u64 = releasable_read,
93 } 93 },
94}; 94};
95 95
96static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont) 96static int debug_populate(struct cgroup_subsys *ss, struct cgroup *cont)
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
new file mode 100644
index 000000000000..e95056954498
--- /dev/null
+++ b/kernel/cgroup_freezer.c
@@ -0,0 +1,379 @@
1/*
2 * cgroup_freezer.c - control group freezer subsystem
3 *
4 * Copyright IBM Corporation, 2007
5 *
6 * Author : Cedric Le Goater <clg@fr.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of version 2.1 of the GNU Lesser General Public License
10 * as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it would be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 */
16
17#include <linux/module.h>
18#include <linux/cgroup.h>
19#include <linux/fs.h>
20#include <linux/uaccess.h>
21#include <linux/freezer.h>
22#include <linux/seq_file.h>
23
24enum freezer_state {
25 CGROUP_THAWED = 0,
26 CGROUP_FREEZING,
27 CGROUP_FROZEN,
28};
29
30struct freezer {
31 struct cgroup_subsys_state css;
32 enum freezer_state state;
33 spinlock_t lock; /* protects _writes_ to state */
34};
35
36static inline struct freezer *cgroup_freezer(
37 struct cgroup *cgroup)
38{
39 return container_of(
40 cgroup_subsys_state(cgroup, freezer_subsys_id),
41 struct freezer, css);
42}
43
44static inline struct freezer *task_freezer(struct task_struct *task)
45{
46 return container_of(task_subsys_state(task, freezer_subsys_id),
47 struct freezer, css);
48}
49
50int cgroup_frozen(struct task_struct *task)
51{
52 struct freezer *freezer;
53 enum freezer_state state;
54
55 task_lock(task);
56 freezer = task_freezer(task);
57 state = freezer->state;
58 task_unlock(task);
59
60 return state == CGROUP_FROZEN;
61}
62
63/*
64 * cgroups_write_string() limits the size of freezer state strings to
65 * CGROUP_LOCAL_BUFFER_SIZE
66 */
67static const char *freezer_state_strs[] = {
68 "THAWED",
69 "FREEZING",
70 "FROZEN",
71};
72
73/*
74 * State diagram
75 * Transitions are caused by userspace writes to the freezer.state file.
76 * The values in parenthesis are state labels. The rest are edge labels.
77 *
78 * (THAWED) --FROZEN--> (FREEZING) --FROZEN--> (FROZEN)
79 * ^ ^ | |
80 * | \_______THAWED_______/ |
81 * \__________________________THAWED____________/
82 */
83
84struct cgroup_subsys freezer_subsys;
85
86/* Locks taken and their ordering
87 * ------------------------------
88 * css_set_lock
89 * cgroup_mutex (AKA cgroup_lock)
90 * task->alloc_lock (AKA task_lock)
91 * freezer->lock
92 * task->sighand->siglock
93 *
94 * cgroup code forces css_set_lock to be taken before task->alloc_lock
95 *
96 * freezer_create(), freezer_destroy():
97 * cgroup_mutex [ by cgroup core ]
98 *
99 * can_attach():
100 * cgroup_mutex
101 *
102 * cgroup_frozen():
103 * task->alloc_lock (to get task's cgroup)
104 *
105 * freezer_fork() (preserving fork() performance means can't take cgroup_mutex):
106 * task->alloc_lock (to get task's cgroup)
107 * freezer->lock
108 * sighand->siglock (if the cgroup is freezing)
109 *
110 * freezer_read():
111 * cgroup_mutex
112 * freezer->lock
113 * read_lock css_set_lock (cgroup iterator start)
114 *
115 * freezer_write() (freeze):
116 * cgroup_mutex
117 * freezer->lock
118 * read_lock css_set_lock (cgroup iterator start)
119 * sighand->siglock
120 *
121 * freezer_write() (unfreeze):
122 * cgroup_mutex
123 * freezer->lock
124 * read_lock css_set_lock (cgroup iterator start)
125 * task->alloc_lock (to prevent races with freeze_task())
126 * sighand->siglock
127 */
128static struct cgroup_subsys_state *freezer_create(struct cgroup_subsys *ss,
129 struct cgroup *cgroup)
130{
131 struct freezer *freezer;
132
133 freezer = kzalloc(sizeof(struct freezer), GFP_KERNEL);
134 if (!freezer)
135 return ERR_PTR(-ENOMEM);
136
137 spin_lock_init(&freezer->lock);
138 freezer->state = CGROUP_THAWED;
139 return &freezer->css;
140}
141
142static void freezer_destroy(struct cgroup_subsys *ss,
143 struct cgroup *cgroup)
144{
145 kfree(cgroup_freezer(cgroup));
146}
147
148/* Task is frozen or will freeze immediately when next it gets woken */
149static bool is_task_frozen_enough(struct task_struct *task)
150{
151 return frozen(task) ||
152 (task_is_stopped_or_traced(task) && freezing(task));
153}
154
155/*
156 * The call to cgroup_lock() in the freezer.state write method prevents
157 * a write to that file racing against an attach, and hence the
158 * can_attach() result will remain valid until the attach completes.
159 */
160static int freezer_can_attach(struct cgroup_subsys *ss,
161 struct cgroup *new_cgroup,
162 struct task_struct *task)
163{
164 struct freezer *freezer;
165 int retval;
166
167 /* Anything frozen can't move or be moved to/from */
168
169 if (is_task_frozen_enough(task))
170 return -EBUSY;
171
172 freezer = cgroup_freezer(new_cgroup);
173 if (freezer->state == CGROUP_FROZEN)
174 return -EBUSY;
175
176 retval = 0;
177 task_lock(task);
178 freezer = task_freezer(task);
179 if (freezer->state == CGROUP_FROZEN)
180 retval = -EBUSY;
181 task_unlock(task);
182 return retval;
183}
184
185static void freezer_fork(struct cgroup_subsys *ss, struct task_struct *task)
186{
187 struct freezer *freezer;
188
189 task_lock(task);
190 freezer = task_freezer(task);
191 task_unlock(task);
192
193 BUG_ON(freezer->state == CGROUP_FROZEN);
194 spin_lock_irq(&freezer->lock);
195 /* Locking avoids race with FREEZING -> THAWED transitions. */
196 if (freezer->state == CGROUP_FREEZING)
197 freeze_task(task, true);
198 spin_unlock_irq(&freezer->lock);
199}
200
201/*
202 * caller must hold freezer->lock
203 */
204static void update_freezer_state(struct cgroup *cgroup,
205 struct freezer *freezer)
206{
207 struct cgroup_iter it;
208 struct task_struct *task;
209 unsigned int nfrozen = 0, ntotal = 0;
210
211 cgroup_iter_start(cgroup, &it);
212 while ((task = cgroup_iter_next(cgroup, &it))) {
213 ntotal++;
214 if (is_task_frozen_enough(task))
215 nfrozen++;
216 }
217
218 /*
219 * Transition to FROZEN when no new tasks can be added ensures
220 * that we never exist in the FROZEN state while there are unfrozen
221 * tasks.
222 */
223 if (nfrozen == ntotal)
224 freezer->state = CGROUP_FROZEN;
225 else if (nfrozen > 0)
226 freezer->state = CGROUP_FREEZING;
227 else
228 freezer->state = CGROUP_THAWED;
229 cgroup_iter_end(cgroup, &it);
230}
231
232static int freezer_read(struct cgroup *cgroup, struct cftype *cft,
233 struct seq_file *m)
234{
235 struct freezer *freezer;
236 enum freezer_state state;
237
238 if (!cgroup_lock_live_group(cgroup))
239 return -ENODEV;
240
241 freezer = cgroup_freezer(cgroup);
242 spin_lock_irq(&freezer->lock);
243 state = freezer->state;
244 if (state == CGROUP_FREEZING) {
245 /* We change from FREEZING to FROZEN lazily if the cgroup was
246 * only partially frozen when we exitted write. */
247 update_freezer_state(cgroup, freezer);
248 state = freezer->state;
249 }
250 spin_unlock_irq(&freezer->lock);
251 cgroup_unlock();
252
253 seq_puts(m, freezer_state_strs[state]);
254 seq_putc(m, '\n');
255 return 0;
256}
257
258static int try_to_freeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
259{
260 struct cgroup_iter it;
261 struct task_struct *task;
262 unsigned int num_cant_freeze_now = 0;
263
264 freezer->state = CGROUP_FREEZING;
265 cgroup_iter_start(cgroup, &it);
266 while ((task = cgroup_iter_next(cgroup, &it))) {
267 if (!freeze_task(task, true))
268 continue;
269 if (is_task_frozen_enough(task))
270 continue;
271 if (!freezing(task) && !freezer_should_skip(task))
272 num_cant_freeze_now++;
273 }
274 cgroup_iter_end(cgroup, &it);
275
276 return num_cant_freeze_now ? -EBUSY : 0;
277}
278
279static int unfreeze_cgroup(struct cgroup *cgroup, struct freezer *freezer)
280{
281 struct cgroup_iter it;
282 struct task_struct *task;
283
284 cgroup_iter_start(cgroup, &it);
285 while ((task = cgroup_iter_next(cgroup, &it))) {
286 int do_wake;
287
288 task_lock(task);
289 do_wake = __thaw_process(task);
290 task_unlock(task);
291 if (do_wake)
292 wake_up_process(task);
293 }
294 cgroup_iter_end(cgroup, &it);
295 freezer->state = CGROUP_THAWED;
296
297 return 0;
298}
299
300static int freezer_change_state(struct cgroup *cgroup,
301 enum freezer_state goal_state)
302{
303 struct freezer *freezer;
304 int retval = 0;
305
306 freezer = cgroup_freezer(cgroup);
307 spin_lock_irq(&freezer->lock);
308 update_freezer_state(cgroup, freezer);
309 if (goal_state == freezer->state)
310 goto out;
311 switch (freezer->state) {
312 case CGROUP_THAWED:
313 retval = try_to_freeze_cgroup(cgroup, freezer);
314 break;
315 case CGROUP_FREEZING:
316 if (goal_state == CGROUP_FROZEN) {
317 /* Userspace is retrying after
318 * "/bin/echo FROZEN > freezer.state" returned -EBUSY */
319 retval = try_to_freeze_cgroup(cgroup, freezer);
320 break;
321 }
322 /* state == FREEZING and goal_state == THAWED, so unfreeze */
323 case CGROUP_FROZEN:
324 retval = unfreeze_cgroup(cgroup, freezer);
325 break;
326 default:
327 break;
328 }
329out:
330 spin_unlock_irq(&freezer->lock);
331
332 return retval;
333}
334
335static int freezer_write(struct cgroup *cgroup,
336 struct cftype *cft,
337 const char *buffer)
338{
339 int retval;
340 enum freezer_state goal_state;
341
342 if (strcmp(buffer, freezer_state_strs[CGROUP_THAWED]) == 0)
343 goal_state = CGROUP_THAWED;
344 else if (strcmp(buffer, freezer_state_strs[CGROUP_FROZEN]) == 0)
345 goal_state = CGROUP_FROZEN;
346 else
347 return -EIO;
348
349 if (!cgroup_lock_live_group(cgroup))
350 return -ENODEV;
351 retval = freezer_change_state(cgroup, goal_state);
352 cgroup_unlock();
353 return retval;
354}
355
356static struct cftype files[] = {
357 {
358 .name = "state",
359 .read_seq_string = freezer_read,
360 .write_string = freezer_write,
361 },
362};
363
364static int freezer_populate(struct cgroup_subsys *ss, struct cgroup *cgroup)
365{
366 return cgroup_add_files(cgroup, ss, files, ARRAY_SIZE(files));
367}
368
369struct cgroup_subsys freezer_subsys = {
370 .name = "freezer",
371 .create = freezer_create,
372 .destroy = freezer_destroy,
373 .populate = freezer_populate,
374 .subsys_id = freezer_subsys_id,
375 .can_attach = freezer_can_attach,
376 .attach = NULL,
377 .fork = freezer_fork,
378 .exit = NULL,
379};
diff --git a/kernel/compat.c b/kernel/compat.c
index 32c254a8ab9a..8eafe3eb50d9 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -23,9 +23,68 @@
23#include <linux/timex.h> 23#include <linux/timex.h>
24#include <linux/migrate.h> 24#include <linux/migrate.h>
25#include <linux/posix-timers.h> 25#include <linux/posix-timers.h>
26#include <linux/times.h>
26 27
27#include <asm/uaccess.h> 28#include <asm/uaccess.h>
28 29
30/*
31 * Note that the native side is already converted to a timespec, because
32 * that's what we want anyway.
33 */
34static int compat_get_timeval(struct timespec *o,
35 struct compat_timeval __user *i)
36{
37 long usec;
38
39 if (get_user(o->tv_sec, &i->tv_sec) ||
40 get_user(usec, &i->tv_usec))
41 return -EFAULT;
42 o->tv_nsec = usec * 1000;
43 return 0;
44}
45
46static int compat_put_timeval(struct compat_timeval __user *o,
47 struct timeval *i)
48{
49 return (put_user(i->tv_sec, &o->tv_sec) ||
50 put_user(i->tv_usec, &o->tv_usec)) ? -EFAULT : 0;
51}
52
53asmlinkage long compat_sys_gettimeofday(struct compat_timeval __user *tv,
54 struct timezone __user *tz)
55{
56 if (tv) {
57 struct timeval ktv;
58 do_gettimeofday(&ktv);
59 if (compat_put_timeval(tv, &ktv))
60 return -EFAULT;
61 }
62 if (tz) {
63 if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
64 return -EFAULT;
65 }
66
67 return 0;
68}
69
70asmlinkage long compat_sys_settimeofday(struct compat_timeval __user *tv,
71 struct timezone __user *tz)
72{
73 struct timespec kts;
74 struct timezone ktz;
75
76 if (tv) {
77 if (compat_get_timeval(&kts, tv))
78 return -EFAULT;
79 }
80 if (tz) {
81 if (copy_from_user(&ktz, tz, sizeof(ktz)))
82 return -EFAULT;
83 }
84
85 return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
86}
87
29int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts) 88int get_compat_timespec(struct timespec *ts, const struct compat_timespec __user *cts)
30{ 89{
31 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) || 90 return (!access_ok(VERIFY_READ, cts, sizeof(*cts)) ||
@@ -150,49 +209,23 @@ asmlinkage long compat_sys_setitimer(int which,
150 return 0; 209 return 0;
151} 210}
152 211
212static compat_clock_t clock_t_to_compat_clock_t(clock_t x)
213{
214 return compat_jiffies_to_clock_t(clock_t_to_jiffies(x));
215}
216
153asmlinkage long compat_sys_times(struct compat_tms __user *tbuf) 217asmlinkage long compat_sys_times(struct compat_tms __user *tbuf)
154{ 218{
155 /*
156 * In the SMP world we might just be unlucky and have one of
157 * the times increment as we use it. Since the value is an
158 * atomically safe type this is just fine. Conceptually its
159 * as if the syscall took an instant longer to occur.
160 */
161 if (tbuf) { 219 if (tbuf) {
220 struct tms tms;
162 struct compat_tms tmp; 221 struct compat_tms tmp;
163 struct task_struct *tsk = current; 222
164 struct task_struct *t; 223 do_sys_times(&tms);
165 cputime_t utime, stime, cutime, cstime; 224 /* Convert our struct tms to the compat version. */
166 225 tmp.tms_utime = clock_t_to_compat_clock_t(tms.tms_utime);
167 read_lock(&tasklist_lock); 226 tmp.tms_stime = clock_t_to_compat_clock_t(tms.tms_stime);
168 utime = tsk->signal->utime; 227 tmp.tms_cutime = clock_t_to_compat_clock_t(tms.tms_cutime);
169 stime = tsk->signal->stime; 228 tmp.tms_cstime = clock_t_to_compat_clock_t(tms.tms_cstime);
170 t = tsk;
171 do {
172 utime = cputime_add(utime, t->utime);
173 stime = cputime_add(stime, t->stime);
174 t = next_thread(t);
175 } while (t != tsk);
176
177 /*
178 * While we have tasklist_lock read-locked, no dying thread
179 * can be updating current->signal->[us]time. Instead,
180 * we got their counts included in the live thread loop.
181 * However, another thread can come in right now and
182 * do a wait call that updates current->signal->c[us]time.
183 * To make sure we always see that pair updated atomically,
184 * we take the siglock around fetching them.
185 */
186 spin_lock_irq(&tsk->sighand->siglock);
187 cutime = tsk->signal->cutime;
188 cstime = tsk->signal->cstime;
189 spin_unlock_irq(&tsk->sighand->siglock);
190 read_unlock(&tasklist_lock);
191
192 tmp.tms_utime = compat_jiffies_to_clock_t(cputime_to_jiffies(utime));
193 tmp.tms_stime = compat_jiffies_to_clock_t(cputime_to_jiffies(stime));
194 tmp.tms_cutime = compat_jiffies_to_clock_t(cputime_to_jiffies(cutime));
195 tmp.tms_cstime = compat_jiffies_to_clock_t(cputime_to_jiffies(cstime));
196 if (copy_to_user(tbuf, &tmp, sizeof(tmp))) 229 if (copy_to_user(tbuf, &tmp, sizeof(tmp)))
197 return -EFAULT; 230 return -EFAULT;
198 } 231 }
diff --git a/kernel/configs.c b/kernel/configs.c
index 4c345210ed8c..abaee684ecbf 100644
--- a/kernel/configs.c
+++ b/kernel/configs.c
@@ -54,9 +54,6 @@
54 54
55#ifdef CONFIG_IKCONFIG_PROC 55#ifdef CONFIG_IKCONFIG_PROC
56 56
57/**************************************************/
58/* globals and useful constants */
59
60static ssize_t 57static ssize_t
61ikconfig_read_current(struct file *file, char __user *buf, 58ikconfig_read_current(struct file *file, char __user *buf,
62 size_t len, loff_t * offset) 59 size_t len, loff_t * offset)
@@ -71,9 +68,6 @@ static const struct file_operations ikconfig_file_ops = {
71 .read = ikconfig_read_current, 68 .read = ikconfig_read_current,
72}; 69};
73 70
74/***************************************************/
75/* ikconfig_init: start up everything we need to */
76
77static int __init ikconfig_init(void) 71static int __init ikconfig_init(void)
78{ 72{
79 struct proc_dir_entry *entry; 73 struct proc_dir_entry *entry;
@@ -89,9 +83,6 @@ static int __init ikconfig_init(void)
89 return 0; 83 return 0;
90} 84}
91 85
92/***************************************************/
93/* ikconfig_cleanup: clean up our mess */
94
95static void __exit ikconfig_cleanup(void) 86static void __exit ikconfig_cleanup(void)
96{ 87{
97 remove_proc_entry("config.gz", NULL); 88 remove_proc_entry("config.gz", NULL);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index f17e9854c246..86d49045daed 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -199,13 +199,14 @@ static int __ref take_cpu_down(void *_param)
199 struct take_cpu_down_param *param = _param; 199 struct take_cpu_down_param *param = _param;
200 int err; 200 int err;
201 201
202 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
203 param->hcpu);
204 /* Ensure this CPU doesn't handle any more interrupts. */ 202 /* Ensure this CPU doesn't handle any more interrupts. */
205 err = __cpu_disable(); 203 err = __cpu_disable();
206 if (err < 0) 204 if (err < 0)
207 return err; 205 return err;
208 206
207 raw_notifier_call_chain(&cpu_chain, CPU_DYING | param->mod,
208 param->hcpu);
209
209 /* Force idle task to run as soon as we yield: it should 210 /* Force idle task to run as soon as we yield: it should
210 immediately notice cpu is offline and die quickly. */ 211 immediately notice cpu is offline and die quickly. */
211 sched_idle_next(); 212 sched_idle_next();
@@ -453,6 +454,25 @@ out:
453} 454}
454#endif /* CONFIG_PM_SLEEP_SMP */ 455#endif /* CONFIG_PM_SLEEP_SMP */
455 456
457/**
458 * notify_cpu_starting(cpu) - call the CPU_STARTING notifiers
459 * @cpu: cpu that just started
460 *
461 * This function calls the cpu_chain notifiers with CPU_STARTING.
462 * It must be called by the arch code on the new cpu, before the new cpu
463 * enables interrupts and before the "boot" cpu returns from __cpu_up().
464 */
465void notify_cpu_starting(unsigned int cpu)
466{
467 unsigned long val = CPU_STARTING;
468
469#ifdef CONFIG_PM_SLEEP_SMP
470 if (cpu_isset(cpu, frozen_cpus))
471 val = CPU_STARTING_FROZEN;
472#endif /* CONFIG_PM_SLEEP_SMP */
473 raw_notifier_call_chain(&cpu_chain, val, (void *)(long)cpu);
474}
475
456#endif /* CONFIG_SMP */ 476#endif /* CONFIG_SMP */
457 477
458/* 478/*
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 827cd9adccb2..3e00526f52ec 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1172,7 +1172,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1172{ 1172{
1173 struct cpuset trialcs; 1173 struct cpuset trialcs;
1174 int err; 1174 int err;
1175 int cpus_nonempty, balance_flag_changed; 1175 int balance_flag_changed;
1176 1176
1177 trialcs = *cs; 1177 trialcs = *cs;
1178 if (turning_on) 1178 if (turning_on)
@@ -1184,7 +1184,6 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1184 if (err < 0) 1184 if (err < 0)
1185 return err; 1185 return err;
1186 1186
1187 cpus_nonempty = !cpus_empty(trialcs.cpus_allowed);
1188 balance_flag_changed = (is_sched_load_balance(cs) != 1187 balance_flag_changed = (is_sched_load_balance(cs) !=
1189 is_sched_load_balance(&trialcs)); 1188 is_sched_load_balance(&trialcs));
1190 1189
@@ -1192,7 +1191,7 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1192 cs->flags = trialcs.flags; 1191 cs->flags = trialcs.flags;
1193 mutex_unlock(&callback_mutex); 1192 mutex_unlock(&callback_mutex);
1194 1193
1195 if (cpus_nonempty && balance_flag_changed) 1194 if (!cpus_empty(trialcs.cpus_allowed) && balance_flag_changed)
1196 async_rebuild_sched_domains(); 1195 async_rebuild_sched_domains();
1197 1196
1198 return 0; 1197 return 0;
@@ -1921,7 +1920,7 @@ static void remove_tasks_in_empty_cpuset(struct cpuset *cs)
1921 * that has tasks along with an empty 'mems'. But if we did see such 1920 * that has tasks along with an empty 'mems'. But if we did see such
1922 * a cpuset, we'd handle it just like we do if its 'cpus' was empty. 1921 * a cpuset, we'd handle it just like we do if its 'cpus' was empty.
1923 */ 1922 */
1924static void scan_for_empty_cpusets(const struct cpuset *root) 1923static void scan_for_empty_cpusets(struct cpuset *root)
1925{ 1924{
1926 LIST_HEAD(queue); 1925 LIST_HEAD(queue);
1927 struct cpuset *cp; /* scans cpusets being updated */ 1926 struct cpuset *cp; /* scans cpusets being updated */
@@ -2437,19 +2436,15 @@ const struct file_operations proc_cpuset_operations = {
2437void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task) 2436void cpuset_task_status_allowed(struct seq_file *m, struct task_struct *task)
2438{ 2437{
2439 seq_printf(m, "Cpus_allowed:\t"); 2438 seq_printf(m, "Cpus_allowed:\t");
2440 m->count += cpumask_scnprintf(m->buf + m->count, m->size - m->count, 2439 seq_cpumask(m, &task->cpus_allowed);
2441 task->cpus_allowed);
2442 seq_printf(m, "\n"); 2440 seq_printf(m, "\n");
2443 seq_printf(m, "Cpus_allowed_list:\t"); 2441 seq_printf(m, "Cpus_allowed_list:\t");
2444 m->count += cpulist_scnprintf(m->buf + m->count, m->size - m->count, 2442 seq_cpumask_list(m, &task->cpus_allowed);
2445 task->cpus_allowed);
2446 seq_printf(m, "\n"); 2443 seq_printf(m, "\n");
2447 seq_printf(m, "Mems_allowed:\t"); 2444 seq_printf(m, "Mems_allowed:\t");
2448 m->count += nodemask_scnprintf(m->buf + m->count, m->size - m->count, 2445 seq_nodemask(m, &task->mems_allowed);
2449 task->mems_allowed);
2450 seq_printf(m, "\n"); 2446 seq_printf(m, "\n");
2451 seq_printf(m, "Mems_allowed_list:\t"); 2447 seq_printf(m, "Mems_allowed_list:\t");
2452 m->count += nodelist_scnprintf(m->buf + m->count, m->size - m->count, 2448 seq_nodemask_list(m, &task->mems_allowed);
2453 task->mems_allowed);
2454 seq_printf(m, "\n"); 2449 seq_printf(m, "\n");
2455} 2450}
diff --git a/kernel/dma-coherent.c b/kernel/dma-coherent.c
index c1d4d5b4c61c..f013a0c2e111 100644
--- a/kernel/dma-coherent.c
+++ b/kernel/dma-coherent.c
@@ -124,6 +124,7 @@ int dma_alloc_from_coherent(struct device *dev, ssize_t size,
124 } 124 }
125 return (mem != NULL); 125 return (mem != NULL);
126} 126}
127EXPORT_SYMBOL(dma_alloc_from_coherent);
127 128
128/** 129/**
129 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool 130 * dma_release_from_coherent() - try to free the memory allocated from per-device coherent memory pool
@@ -151,3 +152,4 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
151 } 152 }
152 return 0; 153 return 0;
153} 154}
155EXPORT_SYMBOL(dma_release_from_coherent);
diff --git a/kernel/dma.c b/kernel/dma.c
index d2c60a822790..f903189c5304 100644
--- a/kernel/dma.c
+++ b/kernel/dma.c
@@ -1,4 +1,4 @@
1/* $Id: dma.c,v 1.7 1994/12/28 03:35:33 root Exp root $ 1/*
2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c. 2 * linux/kernel/dma.c: A DMA channel allocator. Inspired by linux/kernel/irq.c.
3 * 3 *
4 * Written by Hennus Bergman, 1992. 4 * Written by Hennus Bergman, 1992.
diff --git a/kernel/exit.c b/kernel/exit.c
index 85a83c831856..80137a5d9467 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -47,6 +47,7 @@
47#include <linux/blkdev.h> 47#include <linux/blkdev.h>
48#include <linux/task_io_accounting_ops.h> 48#include <linux/task_io_accounting_ops.h>
49#include <linux/tracehook.h> 49#include <linux/tracehook.h>
50#include <trace/sched.h>
50 51
51#include <asm/uaccess.h> 52#include <asm/uaccess.h>
52#include <asm/unistd.h> 53#include <asm/unistd.h>
@@ -112,8 +113,6 @@ static void __exit_signal(struct task_struct *tsk)
112 * We won't ever get here for the group leader, since it 113 * We won't ever get here for the group leader, since it
113 * will have been the last reference on the signal_struct. 114 * will have been the last reference on the signal_struct.
114 */ 115 */
115 sig->utime = cputime_add(sig->utime, task_utime(tsk));
116 sig->stime = cputime_add(sig->stime, task_stime(tsk));
117 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); 116 sig->gtime = cputime_add(sig->gtime, task_gtime(tsk));
118 sig->min_flt += tsk->min_flt; 117 sig->min_flt += tsk->min_flt;
119 sig->maj_flt += tsk->maj_flt; 118 sig->maj_flt += tsk->maj_flt;
@@ -122,7 +121,6 @@ static void __exit_signal(struct task_struct *tsk)
122 sig->inblock += task_io_get_inblock(tsk); 121 sig->inblock += task_io_get_inblock(tsk);
123 sig->oublock += task_io_get_oublock(tsk); 122 sig->oublock += task_io_get_oublock(tsk);
124 task_io_accounting_add(&sig->ioac, &tsk->ioac); 123 task_io_accounting_add(&sig->ioac, &tsk->ioac);
125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
126 sig = NULL; /* Marker for below. */ 124 sig = NULL; /* Marker for below. */
127 } 125 }
128 126
@@ -149,7 +147,10 @@ static void __exit_signal(struct task_struct *tsk)
149 147
150static void delayed_put_task_struct(struct rcu_head *rhp) 148static void delayed_put_task_struct(struct rcu_head *rhp)
151{ 149{
152 put_task_struct(container_of(rhp, struct task_struct, rcu)); 150 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
151
152 trace_sched_process_free(tsk);
153 put_task_struct(tsk);
153} 154}
154 155
155 156
@@ -640,24 +641,23 @@ retry:
640assign_new_owner: 641assign_new_owner:
641 BUG_ON(c == p); 642 BUG_ON(c == p);
642 get_task_struct(c); 643 get_task_struct(c);
644 read_unlock(&tasklist_lock);
645 down_write(&mm->mmap_sem);
643 /* 646 /*
644 * The task_lock protects c->mm from changing. 647 * The task_lock protects c->mm from changing.
645 * We always want mm->owner->mm == mm 648 * We always want mm->owner->mm == mm
646 */ 649 */
647 task_lock(c); 650 task_lock(c);
648 /*
649 * Delay read_unlock() till we have the task_lock()
650 * to ensure that c does not slip away underneath us
651 */
652 read_unlock(&tasklist_lock);
653 if (c->mm != mm) { 651 if (c->mm != mm) {
654 task_unlock(c); 652 task_unlock(c);
653 up_write(&mm->mmap_sem);
655 put_task_struct(c); 654 put_task_struct(c);
656 goto retry; 655 goto retry;
657 } 656 }
658 cgroup_mm_owner_callbacks(mm->owner, c); 657 cgroup_mm_owner_callbacks(mm->owner, c);
659 mm->owner = c; 658 mm->owner = c;
660 task_unlock(c); 659 task_unlock(c);
660 up_write(&mm->mmap_sem);
661 put_task_struct(c); 661 put_task_struct(c);
662} 662}
663#endif /* CONFIG_MM_OWNER */ 663#endif /* CONFIG_MM_OWNER */
@@ -1074,6 +1074,8 @@ NORET_TYPE void do_exit(long code)
1074 1074
1075 if (group_dead) 1075 if (group_dead)
1076 acct_process(); 1076 acct_process();
1077 trace_sched_process_exit(tsk);
1078
1077 exit_sem(tsk); 1079 exit_sem(tsk);
1078 exit_files(tsk); 1080 exit_files(tsk);
1079 exit_fs(tsk); 1081 exit_fs(tsk);
@@ -1302,6 +1304,7 @@ static int wait_task_zombie(struct task_struct *p, int options,
1302 if (likely(!traced)) { 1304 if (likely(!traced)) {
1303 struct signal_struct *psig; 1305 struct signal_struct *psig;
1304 struct signal_struct *sig; 1306 struct signal_struct *sig;
1307 struct task_cputime cputime;
1305 1308
1306 /* 1309 /*
1307 * The resource counters for the group leader are in its 1310 * The resource counters for the group leader are in its
@@ -1317,20 +1320,23 @@ static int wait_task_zombie(struct task_struct *p, int options,
1317 * need to protect the access to p->parent->signal fields, 1320 * need to protect the access to p->parent->signal fields,
1318 * as other threads in the parent group can be right 1321 * as other threads in the parent group can be right
1319 * here reaping other children at the same time. 1322 * here reaping other children at the same time.
1323 *
1324 * We use thread_group_cputime() to get times for the thread
1325 * group, which consolidates times for all threads in the
1326 * group including the group leader.
1320 */ 1327 */
1321 spin_lock_irq(&p->parent->sighand->siglock); 1328 spin_lock_irq(&p->parent->sighand->siglock);
1322 psig = p->parent->signal; 1329 psig = p->parent->signal;
1323 sig = p->signal; 1330 sig = p->signal;
1331 thread_group_cputime(p, &cputime);
1324 psig->cutime = 1332 psig->cutime =
1325 cputime_add(psig->cutime, 1333 cputime_add(psig->cutime,
1326 cputime_add(p->utime, 1334 cputime_add(cputime.utime,
1327 cputime_add(sig->utime, 1335 sig->cutime));
1328 sig->cutime)));
1329 psig->cstime = 1336 psig->cstime =
1330 cputime_add(psig->cstime, 1337 cputime_add(psig->cstime,
1331 cputime_add(p->stime, 1338 cputime_add(cputime.stime,
1332 cputime_add(sig->stime, 1339 sig->cstime));
1333 sig->cstime)));
1334 psig->cgtime = 1340 psig->cgtime =
1335 cputime_add(psig->cgtime, 1341 cputime_add(psig->cgtime,
1336 cputime_add(p->gtime, 1342 cputime_add(p->gtime,
@@ -1675,6 +1681,8 @@ static long do_wait(enum pid_type type, struct pid *pid, int options,
1675 struct task_struct *tsk; 1681 struct task_struct *tsk;
1676 int retval; 1682 int retval;
1677 1683
1684 trace_sched_process_wait(pid);
1685
1678 add_wait_queue(&current->signal->wait_chldexit,&wait); 1686 add_wait_queue(&current->signal->wait_chldexit,&wait);
1679repeat: 1687repeat:
1680 /* 1688 /*
diff --git a/kernel/fork.c b/kernel/fork.c
index 7ce2ebe84796..4d093552dd6e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -58,6 +58,7 @@
58#include <linux/tty.h> 58#include <linux/tty.h>
59#include <linux/proc_fs.h> 59#include <linux/proc_fs.h>
60#include <linux/blkdev.h> 60#include <linux/blkdev.h>
61#include <trace/sched.h>
61 62
62#include <asm/pgtable.h> 63#include <asm/pgtable.h>
63#include <asm/pgalloc.h> 64#include <asm/pgalloc.h>
@@ -759,15 +760,44 @@ void __cleanup_sighand(struct sighand_struct *sighand)
759 kmem_cache_free(sighand_cachep, sighand); 760 kmem_cache_free(sighand_cachep, sighand);
760} 761}
761 762
763
764/*
765 * Initialize POSIX timer handling for a thread group.
766 */
767static void posix_cpu_timers_init_group(struct signal_struct *sig)
768{
769 /* Thread group counters. */
770 thread_group_cputime_init(sig);
771
772 /* Expiration times and increments. */
773 sig->it_virt_expires = cputime_zero;
774 sig->it_virt_incr = cputime_zero;
775 sig->it_prof_expires = cputime_zero;
776 sig->it_prof_incr = cputime_zero;
777
778 /* Cached expiration times. */
779 sig->cputime_expires.prof_exp = cputime_zero;
780 sig->cputime_expires.virt_exp = cputime_zero;
781 sig->cputime_expires.sched_exp = 0;
782
783 /* The timer lists. */
784 INIT_LIST_HEAD(&sig->cpu_timers[0]);
785 INIT_LIST_HEAD(&sig->cpu_timers[1]);
786 INIT_LIST_HEAD(&sig->cpu_timers[2]);
787}
788
762static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) 789static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
763{ 790{
764 struct signal_struct *sig; 791 struct signal_struct *sig;
765 int ret; 792 int ret;
766 793
767 if (clone_flags & CLONE_THREAD) { 794 if (clone_flags & CLONE_THREAD) {
768 atomic_inc(&current->signal->count); 795 ret = thread_group_cputime_clone_thread(current);
769 atomic_inc(&current->signal->live); 796 if (likely(!ret)) {
770 return 0; 797 atomic_inc(&current->signal->count);
798 atomic_inc(&current->signal->live);
799 }
800 return ret;
771 } 801 }
772 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 802 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
773 tsk->signal = sig; 803 tsk->signal = sig;
@@ -795,39 +825,25 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
795 sig->it_real_incr.tv64 = 0; 825 sig->it_real_incr.tv64 = 0;
796 sig->real_timer.function = it_real_fn; 826 sig->real_timer.function = it_real_fn;
797 827
798 sig->it_virt_expires = cputime_zero;
799 sig->it_virt_incr = cputime_zero;
800 sig->it_prof_expires = cputime_zero;
801 sig->it_prof_incr = cputime_zero;
802
803 sig->leader = 0; /* session leadership doesn't inherit */ 828 sig->leader = 0; /* session leadership doesn't inherit */
804 sig->tty_old_pgrp = NULL; 829 sig->tty_old_pgrp = NULL;
830 sig->tty = NULL;
805 831
806 sig->utime = sig->stime = sig->cutime = sig->cstime = cputime_zero; 832 sig->cutime = sig->cstime = cputime_zero;
807 sig->gtime = cputime_zero; 833 sig->gtime = cputime_zero;
808 sig->cgtime = cputime_zero; 834 sig->cgtime = cputime_zero;
809 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0; 835 sig->nvcsw = sig->nivcsw = sig->cnvcsw = sig->cnivcsw = 0;
810 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0; 836 sig->min_flt = sig->maj_flt = sig->cmin_flt = sig->cmaj_flt = 0;
811 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0; 837 sig->inblock = sig->oublock = sig->cinblock = sig->coublock = 0;
812 task_io_accounting_init(&sig->ioac); 838 task_io_accounting_init(&sig->ioac);
813 sig->sum_sched_runtime = 0;
814 INIT_LIST_HEAD(&sig->cpu_timers[0]);
815 INIT_LIST_HEAD(&sig->cpu_timers[1]);
816 INIT_LIST_HEAD(&sig->cpu_timers[2]);
817 taskstats_tgid_init(sig); 839 taskstats_tgid_init(sig);
818 840
819 task_lock(current->group_leader); 841 task_lock(current->group_leader);
820 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim); 842 memcpy(sig->rlim, current->signal->rlim, sizeof sig->rlim);
821 task_unlock(current->group_leader); 843 task_unlock(current->group_leader);
822 844
823 if (sig->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { 845 posix_cpu_timers_init_group(sig);
824 /* 846
825 * New sole thread in the process gets an expiry time
826 * of the whole CPU time limit.
827 */
828 tsk->it_prof_expires =
829 secs_to_cputime(sig->rlim[RLIMIT_CPU].rlim_cur);
830 }
831 acct_init_pacct(&sig->pacct); 847 acct_init_pacct(&sig->pacct);
832 848
833 tty_audit_fork(sig); 849 tty_audit_fork(sig);
@@ -837,7 +853,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
837 853
838void __cleanup_signal(struct signal_struct *sig) 854void __cleanup_signal(struct signal_struct *sig)
839{ 855{
856 thread_group_cputime_free(sig);
840 exit_thread_group_keys(sig); 857 exit_thread_group_keys(sig);
858 tty_kref_put(sig->tty);
841 kmem_cache_free(signal_cachep, sig); 859 kmem_cache_free(signal_cachep, sig);
842} 860}
843 861
@@ -886,6 +904,19 @@ void mm_init_owner(struct mm_struct *mm, struct task_struct *p)
886#endif /* CONFIG_MM_OWNER */ 904#endif /* CONFIG_MM_OWNER */
887 905
888/* 906/*
907 * Initialize POSIX timer handling for a single task.
908 */
909static void posix_cpu_timers_init(struct task_struct *tsk)
910{
911 tsk->cputime_expires.prof_exp = cputime_zero;
912 tsk->cputime_expires.virt_exp = cputime_zero;
913 tsk->cputime_expires.sched_exp = 0;
914 INIT_LIST_HEAD(&tsk->cpu_timers[0]);
915 INIT_LIST_HEAD(&tsk->cpu_timers[1]);
916 INIT_LIST_HEAD(&tsk->cpu_timers[2]);
917}
918
919/*
889 * This creates a new process as a copy of the old one, 920 * This creates a new process as a copy of the old one,
890 * but does not actually start it yet. 921 * but does not actually start it yet.
891 * 922 *
@@ -995,12 +1026,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
995 task_io_accounting_init(&p->ioac); 1026 task_io_accounting_init(&p->ioac);
996 acct_clear_integrals(p); 1027 acct_clear_integrals(p);
997 1028
998 p->it_virt_expires = cputime_zero; 1029 posix_cpu_timers_init(p);
999 p->it_prof_expires = cputime_zero;
1000 p->it_sched_expires = 0;
1001 INIT_LIST_HEAD(&p->cpu_timers[0]);
1002 INIT_LIST_HEAD(&p->cpu_timers[1]);
1003 INIT_LIST_HEAD(&p->cpu_timers[2]);
1004 1030
1005 p->lock_depth = -1; /* -1 = no lock */ 1031 p->lock_depth = -1; /* -1 = no lock */
1006 do_posix_clock_monotonic_gettime(&p->start_time); 1032 do_posix_clock_monotonic_gettime(&p->start_time);
@@ -1201,21 +1227,6 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1201 if (clone_flags & CLONE_THREAD) { 1227 if (clone_flags & CLONE_THREAD) {
1202 p->group_leader = current->group_leader; 1228 p->group_leader = current->group_leader;
1203 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1229 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1204
1205 if (!cputime_eq(current->signal->it_virt_expires,
1206 cputime_zero) ||
1207 !cputime_eq(current->signal->it_prof_expires,
1208 cputime_zero) ||
1209 current->signal->rlim[RLIMIT_CPU].rlim_cur != RLIM_INFINITY ||
1210 !list_empty(&current->signal->cpu_timers[0]) ||
1211 !list_empty(&current->signal->cpu_timers[1]) ||
1212 !list_empty(&current->signal->cpu_timers[2])) {
1213 /*
1214 * Have child wake up on its first tick to check
1215 * for process CPU timers.
1216 */
1217 p->it_prof_expires = jiffies_to_cputime(1);
1218 }
1219 } 1230 }
1220 1231
1221 if (likely(p->pid)) { 1232 if (likely(p->pid)) {
@@ -1227,7 +1238,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1227 p->nsproxy->pid_ns->child_reaper = p; 1238 p->nsproxy->pid_ns->child_reaper = p;
1228 1239
1229 p->signal->leader_pid = pid; 1240 p->signal->leader_pid = pid;
1230 p->signal->tty = current->signal->tty; 1241 tty_kref_put(p->signal->tty);
1242 p->signal->tty = tty_kref_get(current->signal->tty);
1231 set_task_pgrp(p, task_pgrp_nr(current)); 1243 set_task_pgrp(p, task_pgrp_nr(current));
1232 set_task_session(p, task_session_nr(current)); 1244 set_task_session(p, task_session_nr(current));
1233 attach_pid(p, PIDTYPE_PGID, task_pgrp(current)); 1245 attach_pid(p, PIDTYPE_PGID, task_pgrp(current));
@@ -1361,6 +1373,8 @@ long do_fork(unsigned long clone_flags,
1361 if (!IS_ERR(p)) { 1373 if (!IS_ERR(p)) {
1362 struct completion vfork; 1374 struct completion vfork;
1363 1375
1376 trace_sched_process_fork(current, p);
1377
1364 nr = task_pid_vnr(p); 1378 nr = task_pid_vnr(p);
1365 1379
1366 if (clone_flags & CLONE_PARENT_SETTID) 1380 if (clone_flags & CLONE_PARENT_SETTID)
diff --git a/kernel/freezer.c b/kernel/freezer.c
new file mode 100644
index 000000000000..ba6248b323ef
--- /dev/null
+++ b/kernel/freezer.c
@@ -0,0 +1,154 @@
1/*
2 * kernel/freezer.c - Function to freeze a process
3 *
4 * Originally from kernel/power/process.c
5 */
6
7#include <linux/interrupt.h>
8#include <linux/suspend.h>
9#include <linux/module.h>
10#include <linux/syscalls.h>
11#include <linux/freezer.h>
12
13/*
14 * freezing is complete, mark current process as frozen
15 */
16static inline void frozen_process(void)
17{
18 if (!unlikely(current->flags & PF_NOFREEZE)) {
19 current->flags |= PF_FROZEN;
20 wmb();
21 }
22 clear_freeze_flag(current);
23}
24
25/* Refrigerator is place where frozen processes are stored :-). */
26void refrigerator(void)
27{
28 /* Hmm, should we be allowed to suspend when there are realtime
29 processes around? */
30 long save;
31
32 task_lock(current);
33 if (freezing(current)) {
34 frozen_process();
35 task_unlock(current);
36 } else {
37 task_unlock(current);
38 return;
39 }
40 save = current->state;
41 pr_debug("%s entered refrigerator\n", current->comm);
42
43 spin_lock_irq(&current->sighand->siglock);
44 recalc_sigpending(); /* We sent fake signal, clean it up */
45 spin_unlock_irq(&current->sighand->siglock);
46
47 for (;;) {
48 set_current_state(TASK_UNINTERRUPTIBLE);
49 if (!frozen(current))
50 break;
51 schedule();
52 }
53 pr_debug("%s left refrigerator\n", current->comm);
54 __set_current_state(save);
55}
56EXPORT_SYMBOL(refrigerator);
57
58static void fake_signal_wake_up(struct task_struct *p)
59{
60 unsigned long flags;
61
62 spin_lock_irqsave(&p->sighand->siglock, flags);
63 signal_wake_up(p, 0);
64 spin_unlock_irqrestore(&p->sighand->siglock, flags);
65}
66
67/**
68 * freeze_task - send a freeze request to given task
69 * @p: task to send the request to
70 * @sig_only: if set, the request will only be sent if the task has the
71 * PF_FREEZER_NOSIG flag unset
72 * Return value: 'false', if @sig_only is set and the task has
73 * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
74 *
75 * The freeze request is sent by setting the tasks's TIF_FREEZE flag and
76 * either sending a fake signal to it or waking it up, depending on whether
77 * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
78 * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
79 * TIF_FREEZE flag will not be set.
80 */
81bool freeze_task(struct task_struct *p, bool sig_only)
82{
83 /*
84 * We first check if the task is freezing and next if it has already
85 * been frozen to avoid the race with frozen_process() which first marks
86 * the task as frozen and next clears its TIF_FREEZE.
87 */
88 if (!freezing(p)) {
89 rmb();
90 if (frozen(p))
91 return false;
92
93 if (!sig_only || should_send_signal(p))
94 set_freeze_flag(p);
95 else
96 return false;
97 }
98
99 if (should_send_signal(p)) {
100 if (!signal_pending(p))
101 fake_signal_wake_up(p);
102 } else if (sig_only) {
103 return false;
104 } else {
105 wake_up_state(p, TASK_INTERRUPTIBLE);
106 }
107
108 return true;
109}
110
111void cancel_freezing(struct task_struct *p)
112{
113 unsigned long flags;
114
115 if (freezing(p)) {
116 pr_debug(" clean up: %s\n", p->comm);
117 clear_freeze_flag(p);
118 spin_lock_irqsave(&p->sighand->siglock, flags);
119 recalc_sigpending_and_wake(p);
120 spin_unlock_irqrestore(&p->sighand->siglock, flags);
121 }
122}
123
124/*
125 * Wake up a frozen process
126 *
127 * task_lock() is needed to prevent the race with refrigerator() which may
128 * occur if the freezing of tasks fails. Namely, without the lock, if the
129 * freezing of tasks failed, thaw_tasks() might have run before a task in
130 * refrigerator() could call frozen_process(), in which case the task would be
131 * frozen and no one would thaw it.
132 */
133int __thaw_process(struct task_struct *p)
134{
135 if (frozen(p)) {
136 p->flags &= ~PF_FROZEN;
137 return 1;
138 }
139 clear_freeze_flag(p);
140 return 0;
141}
142
143int thaw_process(struct task_struct *p)
144{
145 task_lock(p);
146 if (__thaw_process(p) == 1) {
147 task_unlock(p);
148 wake_up_process(p);
149 return 1;
150 }
151 task_unlock(p);
152 return 0;
153}
154EXPORT_SYMBOL(thaw_process);
diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
index cdec83e722fa..95978f48e039 100644
--- a/kernel/hrtimer.c
+++ b/kernel/hrtimer.c
@@ -1403,9 +1403,7 @@ void hrtimer_run_queues(void)
1403 if (!base->first) 1403 if (!base->first)
1404 continue; 1404 continue;
1405 1405
1406 if (base->get_softirq_time) 1406 if (gettime) {
1407 base->softirq_time = base->get_softirq_time();
1408 else if (gettime) {
1409 hrtimer_get_softirq_time(cpu_base); 1407 hrtimer_get_softirq_time(cpu_base);
1410 gettime = 0; 1408 gettime = 0;
1411 } 1409 }
@@ -1688,9 +1686,11 @@ static void migrate_hrtimers(int cpu)
1688 new_base = &get_cpu_var(hrtimer_bases); 1686 new_base = &get_cpu_var(hrtimer_bases);
1689 1687
1690 tick_cancel_sched_timer(cpu); 1688 tick_cancel_sched_timer(cpu);
1691 1689 /*
1692 local_irq_disable(); 1690 * The caller is globally serialized and nobody else
1693 spin_lock(&new_base->lock); 1691 * takes two locks at once, deadlock is not possible.
1692 */
1693 spin_lock_irq(&new_base->lock);
1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1694 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1695 1695
1696 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { 1696 for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
@@ -1703,8 +1703,7 @@ static void migrate_hrtimers(int cpu)
1703 raise = 1; 1703 raise = 1;
1704 1704
1705 spin_unlock(&old_base->lock); 1705 spin_unlock(&old_base->lock);
1706 spin_unlock(&new_base->lock); 1706 spin_unlock_irq(&new_base->lock);
1707 local_irq_enable();
1708 put_cpu_var(hrtimer_bases); 1707 put_cpu_var(hrtimer_bases);
1709 1708
1710 if (raise) 1709 if (raise)
diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c
index 533068cfb607..cc0f7321b8ce 100644
--- a/kernel/irq/autoprobe.c
+++ b/kernel/irq/autoprobe.c
@@ -30,17 +30,16 @@ static DEFINE_MUTEX(probing_active);
30unsigned long probe_irq_on(void) 30unsigned long probe_irq_on(void)
31{ 31{
32 struct irq_desc *desc; 32 struct irq_desc *desc;
33 unsigned long mask; 33 unsigned long mask = 0;
34 unsigned int i; 34 unsigned int status;
35 int i;
35 36
36 mutex_lock(&probing_active); 37 mutex_lock(&probing_active);
37 /* 38 /*
38 * something may have generated an irq long ago and we want to 39 * something may have generated an irq long ago and we want to
39 * flush such a longstanding irq before considering it as spurious. 40 * flush such a longstanding irq before considering it as spurious.
40 */ 41 */
41 for (i = NR_IRQS-1; i > 0; i--) { 42 for_each_irq_desc_reverse(i, desc) {
42 desc = irq_desc + i;
43
44 spin_lock_irq(&desc->lock); 43 spin_lock_irq(&desc->lock);
45 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 44 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
46 /* 45 /*
@@ -68,9 +67,7 @@ unsigned long probe_irq_on(void)
68 * (we must startup again here because if a longstanding irq 67 * (we must startup again here because if a longstanding irq
69 * happened in the previous stage, it may have masked itself) 68 * happened in the previous stage, it may have masked itself)
70 */ 69 */
71 for (i = NR_IRQS-1; i > 0; i--) { 70 for_each_irq_desc_reverse(i, desc) {
72 desc = irq_desc + i;
73
74 spin_lock_irq(&desc->lock); 71 spin_lock_irq(&desc->lock);
75 if (!desc->action && !(desc->status & IRQ_NOPROBE)) { 72 if (!desc->action && !(desc->status & IRQ_NOPROBE)) {
76 desc->status |= IRQ_AUTODETECT | IRQ_WAITING; 73 desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
@@ -88,11 +85,7 @@ unsigned long probe_irq_on(void)
88 /* 85 /*
89 * Now filter out any obviously spurious interrupts 86 * Now filter out any obviously spurious interrupts
90 */ 87 */
91 mask = 0; 88 for_each_irq_desc(i, desc) {
92 for (i = 0; i < NR_IRQS; i++) {
93 unsigned int status;
94
95 desc = irq_desc + i;
96 spin_lock_irq(&desc->lock); 89 spin_lock_irq(&desc->lock);
97 status = desc->status; 90 status = desc->status;
98 91
@@ -126,14 +119,11 @@ EXPORT_SYMBOL(probe_irq_on);
126 */ 119 */
127unsigned int probe_irq_mask(unsigned long val) 120unsigned int probe_irq_mask(unsigned long val)
128{ 121{
129 unsigned int mask; 122 unsigned int status, mask = 0;
123 struct irq_desc *desc;
130 int i; 124 int i;
131 125
132 mask = 0; 126 for_each_irq_desc(i, desc) {
133 for (i = 0; i < NR_IRQS; i++) {
134 struct irq_desc *desc = irq_desc + i;
135 unsigned int status;
136
137 spin_lock_irq(&desc->lock); 127 spin_lock_irq(&desc->lock);
138 status = desc->status; 128 status = desc->status;
139 129
@@ -171,20 +161,19 @@ EXPORT_SYMBOL(probe_irq_mask);
171 */ 161 */
172int probe_irq_off(unsigned long val) 162int probe_irq_off(unsigned long val)
173{ 163{
174 int i, irq_found = 0, nr_irqs = 0; 164 int i, irq_found = 0, nr_of_irqs = 0;
175 165 struct irq_desc *desc;
176 for (i = 0; i < NR_IRQS; i++) { 166 unsigned int status;
177 struct irq_desc *desc = irq_desc + i;
178 unsigned int status;
179 167
168 for_each_irq_desc(i, desc) {
180 spin_lock_irq(&desc->lock); 169 spin_lock_irq(&desc->lock);
181 status = desc->status; 170 status = desc->status;
182 171
183 if (status & IRQ_AUTODETECT) { 172 if (status & IRQ_AUTODETECT) {
184 if (!(status & IRQ_WAITING)) { 173 if (!(status & IRQ_WAITING)) {
185 if (!nr_irqs) 174 if (!nr_of_irqs)
186 irq_found = i; 175 irq_found = i;
187 nr_irqs++; 176 nr_of_irqs++;
188 } 177 }
189 desc->status = status & ~IRQ_AUTODETECT; 178 desc->status = status & ~IRQ_AUTODETECT;
190 desc->chip->shutdown(i); 179 desc->chip->shutdown(i);
@@ -193,7 +182,7 @@ int probe_irq_off(unsigned long val)
193 } 182 }
194 mutex_unlock(&probing_active); 183 mutex_unlock(&probing_active);
195 184
196 if (nr_irqs > 1) 185 if (nr_of_irqs > 1)
197 irq_found = -irq_found; 186 irq_found = -irq_found;
198 187
199 return irq_found; 188 return irq_found;
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 3cd441ebf5d2..4895fde4eb93 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -24,16 +24,15 @@
24 */ 24 */
25void dynamic_irq_init(unsigned int irq) 25void dynamic_irq_init(unsigned int irq)
26{ 26{
27 struct irq_desc *desc; 27 struct irq_desc *desc = irq_to_desc(irq);
28 unsigned long flags; 28 unsigned long flags;
29 29
30 if (irq >= NR_IRQS) { 30 if (!desc) {
31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq); 31 WARN(1, KERN_ERR "Trying to initialize invalid IRQ%d\n", irq);
32 return; 32 return;
33 } 33 }
34 34
35 /* Ensure we don't have left over values from a previous use of this irq */ 35 /* Ensure we don't have left over values from a previous use of this irq */
36 desc = irq_desc + irq;
37 spin_lock_irqsave(&desc->lock, flags); 36 spin_lock_irqsave(&desc->lock, flags);
38 desc->status = IRQ_DISABLED; 37 desc->status = IRQ_DISABLED;
39 desc->chip = &no_irq_chip; 38 desc->chip = &no_irq_chip;
@@ -57,15 +56,14 @@ void dynamic_irq_init(unsigned int irq)
57 */ 56 */
58void dynamic_irq_cleanup(unsigned int irq) 57void dynamic_irq_cleanup(unsigned int irq)
59{ 58{
60 struct irq_desc *desc; 59 struct irq_desc *desc = irq_to_desc(irq);
61 unsigned long flags; 60 unsigned long flags;
62 61
63 if (irq >= NR_IRQS) { 62 if (!desc) {
64 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq); 63 WARN(1, KERN_ERR "Trying to cleanup invalid IRQ%d\n", irq);
65 return; 64 return;
66 } 65 }
67 66
68 desc = irq_desc + irq;
69 spin_lock_irqsave(&desc->lock, flags); 67 spin_lock_irqsave(&desc->lock, flags);
70 if (desc->action) { 68 if (desc->action) {
71 spin_unlock_irqrestore(&desc->lock, flags); 69 spin_unlock_irqrestore(&desc->lock, flags);
@@ -89,10 +87,10 @@ void dynamic_irq_cleanup(unsigned int irq)
89 */ 87 */
90int set_irq_chip(unsigned int irq, struct irq_chip *chip) 88int set_irq_chip(unsigned int irq, struct irq_chip *chip)
91{ 89{
92 struct irq_desc *desc; 90 struct irq_desc *desc = irq_to_desc(irq);
93 unsigned long flags; 91 unsigned long flags;
94 92
95 if (irq >= NR_IRQS) { 93 if (!desc) {
96 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq); 94 WARN(1, KERN_ERR "Trying to install chip for IRQ%d\n", irq);
97 return -EINVAL; 95 return -EINVAL;
98 } 96 }
@@ -100,7 +98,6 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
100 if (!chip) 98 if (!chip)
101 chip = &no_irq_chip; 99 chip = &no_irq_chip;
102 100
103 desc = irq_desc + irq;
104 spin_lock_irqsave(&desc->lock, flags); 101 spin_lock_irqsave(&desc->lock, flags);
105 irq_chip_set_defaults(chip); 102 irq_chip_set_defaults(chip);
106 desc->chip = chip; 103 desc->chip = chip;
@@ -111,27 +108,27 @@ int set_irq_chip(unsigned int irq, struct irq_chip *chip)
111EXPORT_SYMBOL(set_irq_chip); 108EXPORT_SYMBOL(set_irq_chip);
112 109
113/** 110/**
114 * set_irq_type - set the irq type for an irq 111 * set_irq_type - set the irq trigger type for an irq
115 * @irq: irq number 112 * @irq: irq number
116 * @type: interrupt type - see include/linux/interrupt.h 113 * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h
117 */ 114 */
118int set_irq_type(unsigned int irq, unsigned int type) 115int set_irq_type(unsigned int irq, unsigned int type)
119{ 116{
120 struct irq_desc *desc; 117 struct irq_desc *desc = irq_to_desc(irq);
121 unsigned long flags; 118 unsigned long flags;
122 int ret = -ENXIO; 119 int ret = -ENXIO;
123 120
124 if (irq >= NR_IRQS) { 121 if (!desc) {
125 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq); 122 printk(KERN_ERR "Trying to set irq type for IRQ%d\n", irq);
126 return -ENODEV; 123 return -ENODEV;
127 } 124 }
128 125
129 desc = irq_desc + irq; 126 if (type == IRQ_TYPE_NONE)
130 if (desc->chip->set_type) { 127 return 0;
131 spin_lock_irqsave(&desc->lock, flags); 128
132 ret = desc->chip->set_type(irq, type); 129 spin_lock_irqsave(&desc->lock, flags);
133 spin_unlock_irqrestore(&desc->lock, flags); 130 ret = __irq_set_trigger(desc, irq, flags);
134 } 131 spin_unlock_irqrestore(&desc->lock, flags);
135 return ret; 132 return ret;
136} 133}
137EXPORT_SYMBOL(set_irq_type); 134EXPORT_SYMBOL(set_irq_type);
@@ -145,16 +142,15 @@ EXPORT_SYMBOL(set_irq_type);
145 */ 142 */
146int set_irq_data(unsigned int irq, void *data) 143int set_irq_data(unsigned int irq, void *data)
147{ 144{
148 struct irq_desc *desc; 145 struct irq_desc *desc = irq_to_desc(irq);
149 unsigned long flags; 146 unsigned long flags;
150 147
151 if (irq >= NR_IRQS) { 148 if (!desc) {
152 printk(KERN_ERR 149 printk(KERN_ERR
153 "Trying to install controller data for IRQ%d\n", irq); 150 "Trying to install controller data for IRQ%d\n", irq);
154 return -EINVAL; 151 return -EINVAL;
155 } 152 }
156 153
157 desc = irq_desc + irq;
158 spin_lock_irqsave(&desc->lock, flags); 154 spin_lock_irqsave(&desc->lock, flags);
159 desc->handler_data = data; 155 desc->handler_data = data;
160 spin_unlock_irqrestore(&desc->lock, flags); 156 spin_unlock_irqrestore(&desc->lock, flags);
@@ -171,15 +167,15 @@ EXPORT_SYMBOL(set_irq_data);
171 */ 167 */
172int set_irq_msi(unsigned int irq, struct msi_desc *entry) 168int set_irq_msi(unsigned int irq, struct msi_desc *entry)
173{ 169{
174 struct irq_desc *desc; 170 struct irq_desc *desc = irq_to_desc(irq);
175 unsigned long flags; 171 unsigned long flags;
176 172
177 if (irq >= NR_IRQS) { 173 if (!desc) {
178 printk(KERN_ERR 174 printk(KERN_ERR
179 "Trying to install msi data for IRQ%d\n", irq); 175 "Trying to install msi data for IRQ%d\n", irq);
180 return -EINVAL; 176 return -EINVAL;
181 } 177 }
182 desc = irq_desc + irq; 178
183 spin_lock_irqsave(&desc->lock, flags); 179 spin_lock_irqsave(&desc->lock, flags);
184 desc->msi_desc = entry; 180 desc->msi_desc = entry;
185 if (entry) 181 if (entry)
@@ -197,10 +193,16 @@ int set_irq_msi(unsigned int irq, struct msi_desc *entry)
197 */ 193 */
198int set_irq_chip_data(unsigned int irq, void *data) 194int set_irq_chip_data(unsigned int irq, void *data)
199{ 195{
200 struct irq_desc *desc = irq_desc + irq; 196 struct irq_desc *desc = irq_to_desc(irq);
201 unsigned long flags; 197 unsigned long flags;
202 198
203 if (irq >= NR_IRQS || !desc->chip) { 199 if (!desc) {
200 printk(KERN_ERR
201 "Trying to install chip data for IRQ%d\n", irq);
202 return -EINVAL;
203 }
204
205 if (!desc->chip) {
204 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq); 206 printk(KERN_ERR "BUG: bad set_irq_chip_data(IRQ#%d)\n", irq);
205 return -EINVAL; 207 return -EINVAL;
206 } 208 }
@@ -218,7 +220,7 @@ EXPORT_SYMBOL(set_irq_chip_data);
218 */ 220 */
219static void default_enable(unsigned int irq) 221static void default_enable(unsigned int irq)
220{ 222{
221 struct irq_desc *desc = irq_desc + irq; 223 struct irq_desc *desc = irq_to_desc(irq);
222 224
223 desc->chip->unmask(irq); 225 desc->chip->unmask(irq);
224 desc->status &= ~IRQ_MASKED; 226 desc->status &= ~IRQ_MASKED;
@@ -236,8 +238,9 @@ static void default_disable(unsigned int irq)
236 */ 238 */
237static unsigned int default_startup(unsigned int irq) 239static unsigned int default_startup(unsigned int irq)
238{ 240{
239 irq_desc[irq].chip->enable(irq); 241 struct irq_desc *desc = irq_to_desc(irq);
240 242
243 desc->chip->enable(irq);
241 return 0; 244 return 0;
242} 245}
243 246
@@ -246,7 +249,7 @@ static unsigned int default_startup(unsigned int irq)
246 */ 249 */
247static void default_shutdown(unsigned int irq) 250static void default_shutdown(unsigned int irq)
248{ 251{
249 struct irq_desc *desc = irq_desc + irq; 252 struct irq_desc *desc = irq_to_desc(irq);
250 253
251 desc->chip->mask(irq); 254 desc->chip->mask(irq);
252 desc->status |= IRQ_MASKED; 255 desc->status |= IRQ_MASKED;
@@ -305,14 +308,13 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
305{ 308{
306 struct irqaction *action; 309 struct irqaction *action;
307 irqreturn_t action_ret; 310 irqreturn_t action_ret;
308 const unsigned int cpu = smp_processor_id();
309 311
310 spin_lock(&desc->lock); 312 spin_lock(&desc->lock);
311 313
312 if (unlikely(desc->status & IRQ_INPROGRESS)) 314 if (unlikely(desc->status & IRQ_INPROGRESS))
313 goto out_unlock; 315 goto out_unlock;
314 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 316 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
315 kstat_cpu(cpu).irqs[irq]++; 317 kstat_incr_irqs_this_cpu(irq, desc);
316 318
317 action = desc->action; 319 action = desc->action;
318 if (unlikely(!action || (desc->status & IRQ_DISABLED))) 320 if (unlikely(!action || (desc->status & IRQ_DISABLED)))
@@ -344,7 +346,6 @@ out_unlock:
344void 346void
345handle_level_irq(unsigned int irq, struct irq_desc *desc) 347handle_level_irq(unsigned int irq, struct irq_desc *desc)
346{ 348{
347 unsigned int cpu = smp_processor_id();
348 struct irqaction *action; 349 struct irqaction *action;
349 irqreturn_t action_ret; 350 irqreturn_t action_ret;
350 351
@@ -354,7 +355,7 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
354 if (unlikely(desc->status & IRQ_INPROGRESS)) 355 if (unlikely(desc->status & IRQ_INPROGRESS))
355 goto out_unlock; 356 goto out_unlock;
356 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 357 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
357 kstat_cpu(cpu).irqs[irq]++; 358 kstat_incr_irqs_this_cpu(irq, desc);
358 359
359 /* 360 /*
360 * If its disabled or no action available 361 * If its disabled or no action available
@@ -392,7 +393,6 @@ out_unlock:
392void 393void
393handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) 394handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
394{ 395{
395 unsigned int cpu = smp_processor_id();
396 struct irqaction *action; 396 struct irqaction *action;
397 irqreturn_t action_ret; 397 irqreturn_t action_ret;
398 398
@@ -402,7 +402,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
402 goto out; 402 goto out;
403 403
404 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 404 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
405 kstat_cpu(cpu).irqs[irq]++; 405 kstat_incr_irqs_this_cpu(irq, desc);
406 406
407 /* 407 /*
408 * If its disabled or no action available 408 * If its disabled or no action available
@@ -451,8 +451,6 @@ out:
451void 451void
452handle_edge_irq(unsigned int irq, struct irq_desc *desc) 452handle_edge_irq(unsigned int irq, struct irq_desc *desc)
453{ 453{
454 const unsigned int cpu = smp_processor_id();
455
456 spin_lock(&desc->lock); 454 spin_lock(&desc->lock);
457 455
458 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); 456 desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
@@ -468,8 +466,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
468 mask_ack_irq(desc, irq); 466 mask_ack_irq(desc, irq);
469 goto out_unlock; 467 goto out_unlock;
470 } 468 }
471 469 kstat_incr_irqs_this_cpu(irq, desc);
472 kstat_cpu(cpu).irqs[irq]++;
473 470
474 /* Start handling the irq */ 471 /* Start handling the irq */
475 desc->chip->ack(irq); 472 desc->chip->ack(irq);
@@ -524,7 +521,7 @@ handle_percpu_irq(unsigned int irq, struct irq_desc *desc)
524{ 521{
525 irqreturn_t action_ret; 522 irqreturn_t action_ret;
526 523
527 kstat_this_cpu.irqs[irq]++; 524 kstat_incr_irqs_this_cpu(irq, desc);
528 525
529 if (desc->chip->ack) 526 if (desc->chip->ack)
530 desc->chip->ack(irq); 527 desc->chip->ack(irq);
@@ -541,17 +538,15 @@ void
541__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, 538__set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
542 const char *name) 539 const char *name)
543{ 540{
544 struct irq_desc *desc; 541 struct irq_desc *desc = irq_to_desc(irq);
545 unsigned long flags; 542 unsigned long flags;
546 543
547 if (irq >= NR_IRQS) { 544 if (!desc) {
548 printk(KERN_ERR 545 printk(KERN_ERR
549 "Trying to install type control for IRQ%d\n", irq); 546 "Trying to install type control for IRQ%d\n", irq);
550 return; 547 return;
551 } 548 }
552 549
553 desc = irq_desc + irq;
554
555 if (!handle) 550 if (!handle)
556 handle = handle_bad_irq; 551 handle = handle_bad_irq;
557 else if (desc->chip == &no_irq_chip) { 552 else if (desc->chip == &no_irq_chip) {
@@ -583,7 +578,7 @@ __set_irq_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained,
583 desc->status &= ~IRQ_DISABLED; 578 desc->status &= ~IRQ_DISABLED;
584 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE; 579 desc->status |= IRQ_NOREQUEST | IRQ_NOPROBE;
585 desc->depth = 0; 580 desc->depth = 0;
586 desc->chip->unmask(irq); 581 desc->chip->startup(irq);
587 } 582 }
588 spin_unlock_irqrestore(&desc->lock, flags); 583 spin_unlock_irqrestore(&desc->lock, flags);
589} 584}
@@ -606,17 +601,14 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
606 601
607void __init set_irq_noprobe(unsigned int irq) 602void __init set_irq_noprobe(unsigned int irq)
608{ 603{
609 struct irq_desc *desc; 604 struct irq_desc *desc = irq_to_desc(irq);
610 unsigned long flags; 605 unsigned long flags;
611 606
612 if (irq >= NR_IRQS) { 607 if (!desc) {
613 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq); 608 printk(KERN_ERR "Trying to mark IRQ%d non-probeable\n", irq);
614
615 return; 609 return;
616 } 610 }
617 611
618 desc = irq_desc + irq;
619
620 spin_lock_irqsave(&desc->lock, flags); 612 spin_lock_irqsave(&desc->lock, flags);
621 desc->status |= IRQ_NOPROBE; 613 desc->status |= IRQ_NOPROBE;
622 spin_unlock_irqrestore(&desc->lock, flags); 614 spin_unlock_irqrestore(&desc->lock, flags);
@@ -624,17 +616,14 @@ void __init set_irq_noprobe(unsigned int irq)
624 616
625void __init set_irq_probe(unsigned int irq) 617void __init set_irq_probe(unsigned int irq)
626{ 618{
627 struct irq_desc *desc; 619 struct irq_desc *desc = irq_to_desc(irq);
628 unsigned long flags; 620 unsigned long flags;
629 621
630 if (irq >= NR_IRQS) { 622 if (!desc) {
631 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq); 623 printk(KERN_ERR "Trying to mark IRQ%d probeable\n", irq);
632
633 return; 624 return;
634 } 625 }
635 626
636 desc = irq_desc + irq;
637
638 spin_lock_irqsave(&desc->lock, flags); 627 spin_lock_irqsave(&desc->lock, flags);
639 desc->status &= ~IRQ_NOPROBE; 628 desc->status &= ~IRQ_NOPROBE;
640 spin_unlock_irqrestore(&desc->lock, flags); 629 spin_unlock_irqrestore(&desc->lock, flags);
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 5fa6198e9139..c815b42d0f5b 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -25,11 +25,10 @@
25 * 25 *
26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage. 26 * Handles spurious and unhandled IRQ's. It also prints a debugmessage.
27 */ 27 */
28void 28void handle_bad_irq(unsigned int irq, struct irq_desc *desc)
29handle_bad_irq(unsigned int irq, struct irq_desc *desc)
30{ 29{
31 print_irq_desc(irq, desc); 30 print_irq_desc(irq, desc);
32 kstat_this_cpu.irqs[irq]++; 31 kstat_incr_irqs_this_cpu(irq, desc);
33 ack_bad_irq(irq); 32 ack_bad_irq(irq);
34} 33}
35 34
@@ -47,6 +46,9 @@ handle_bad_irq(unsigned int irq, struct irq_desc *desc)
47 * 46 *
48 * Controller mappings for all interrupt sources: 47 * Controller mappings for all interrupt sources:
49 */ 48 */
49int nr_irqs = NR_IRQS;
50EXPORT_SYMBOL_GPL(nr_irqs);
51
50struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = { 52struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
51 [0 ... NR_IRQS-1] = { 53 [0 ... NR_IRQS-1] = {
52 .status = IRQ_DISABLED, 54 .status = IRQ_DISABLED,
@@ -66,7 +68,9 @@ struct irq_desc irq_desc[NR_IRQS] __cacheline_aligned_in_smp = {
66 */ 68 */
67static void ack_bad(unsigned int irq) 69static void ack_bad(unsigned int irq)
68{ 70{
69 print_irq_desc(irq, irq_desc + irq); 71 struct irq_desc *desc = irq_to_desc(irq);
72
73 print_irq_desc(irq, desc);
70 ack_bad_irq(irq); 74 ack_bad_irq(irq);
71} 75}
72 76
@@ -131,8 +135,6 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
131 irqreturn_t ret, retval = IRQ_NONE; 135 irqreturn_t ret, retval = IRQ_NONE;
132 unsigned int status = 0; 136 unsigned int status = 0;
133 137
134 handle_dynamic_tick(action);
135
136 if (!(action->flags & IRQF_DISABLED)) 138 if (!(action->flags & IRQF_DISABLED))
137 local_irq_enable_in_hardirq(); 139 local_irq_enable_in_hardirq();
138 140
@@ -165,11 +167,12 @@ irqreturn_t handle_IRQ_event(unsigned int irq, struct irqaction *action)
165 */ 167 */
166unsigned int __do_IRQ(unsigned int irq) 168unsigned int __do_IRQ(unsigned int irq)
167{ 169{
168 struct irq_desc *desc = irq_desc + irq; 170 struct irq_desc *desc = irq_to_desc(irq);
169 struct irqaction *action; 171 struct irqaction *action;
170 unsigned int status; 172 unsigned int status;
171 173
172 kstat_this_cpu.irqs[irq]++; 174 kstat_incr_irqs_this_cpu(irq, desc);
175
173 if (CHECK_IRQ_PER_CPU(desc->status)) { 176 if (CHECK_IRQ_PER_CPU(desc->status)) {
174 irqreturn_t action_ret; 177 irqreturn_t action_ret;
175 178
@@ -256,8 +259,8 @@ out:
256} 259}
257#endif 260#endif
258 261
259#ifdef CONFIG_TRACE_IRQFLAGS
260 262
263#ifdef CONFIG_TRACE_IRQFLAGS
261/* 264/*
262 * lockdep: we want to handle all irq_desc locks as a single lock-class: 265 * lockdep: we want to handle all irq_desc locks as a single lock-class:
263 */ 266 */
@@ -265,10 +268,10 @@ static struct lock_class_key irq_desc_lock_class;
265 268
266void early_init_irq_lock_class(void) 269void early_init_irq_lock_class(void)
267{ 270{
271 struct irq_desc *desc;
268 int i; 272 int i;
269 273
270 for (i = 0; i < NR_IRQS; i++) 274 for_each_irq_desc(i, desc)
271 lockdep_set_class(&irq_desc[i].lock, &irq_desc_lock_class); 275 lockdep_set_class(&desc->lock, &irq_desc_lock_class);
272} 276}
273
274#endif 277#endif
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 08a849a22447..c9767e641980 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -10,12 +10,15 @@ extern void irq_chip_set_defaults(struct irq_chip *chip);
10/* Set default handler: */ 10/* Set default handler: */
11extern void compat_irq_chip_set_default_handler(struct irq_desc *desc); 11extern void compat_irq_chip_set_default_handler(struct irq_desc *desc);
12 12
13extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
14 unsigned long flags);
15
13#ifdef CONFIG_PROC_FS 16#ifdef CONFIG_PROC_FS
14extern void register_irq_proc(unsigned int irq); 17extern void register_irq_proc(unsigned int irq, struct irq_desc *desc);
15extern void register_handler_proc(unsigned int irq, struct irqaction *action); 18extern void register_handler_proc(unsigned int irq, struct irqaction *action);
16extern void unregister_handler_proc(unsigned int irq, struct irqaction *action); 19extern void unregister_handler_proc(unsigned int irq, struct irqaction *action);
17#else 20#else
18static inline void register_irq_proc(unsigned int irq) { } 21static inline void register_irq_proc(unsigned int irq, struct irq_desc *desc) { }
19static inline void register_handler_proc(unsigned int irq, 22static inline void register_handler_proc(unsigned int irq,
20 struct irqaction *action) { } 23 struct irqaction *action) { }
21static inline void unregister_handler_proc(unsigned int irq, 24static inline void unregister_handler_proc(unsigned int irq,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 0314074fa232..c498a1b8c621 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -31,10 +31,10 @@ cpumask_t irq_default_affinity = CPU_MASK_ALL;
31 */ 31 */
32void synchronize_irq(unsigned int irq) 32void synchronize_irq(unsigned int irq)
33{ 33{
34 struct irq_desc *desc = irq_desc + irq; 34 struct irq_desc *desc = irq_to_desc(irq);
35 unsigned int status; 35 unsigned int status;
36 36
37 if (irq >= NR_IRQS) 37 if (!desc)
38 return; 38 return;
39 39
40 do { 40 do {
@@ -64,7 +64,7 @@ EXPORT_SYMBOL(synchronize_irq);
64 */ 64 */
65int irq_can_set_affinity(unsigned int irq) 65int irq_can_set_affinity(unsigned int irq)
66{ 66{
67 struct irq_desc *desc = irq_desc + irq; 67 struct irq_desc *desc = irq_to_desc(irq);
68 68
69 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip || 69 if (CHECK_IRQ_PER_CPU(desc->status) || !desc->chip ||
70 !desc->chip->set_affinity) 70 !desc->chip->set_affinity)
@@ -81,15 +81,21 @@ int irq_can_set_affinity(unsigned int irq)
81 */ 81 */
82int irq_set_affinity(unsigned int irq, cpumask_t cpumask) 82int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
83{ 83{
84 struct irq_desc *desc = irq_desc + irq; 84 struct irq_desc *desc = irq_to_desc(irq);
85 85
86 if (!desc->chip->set_affinity) 86 if (!desc->chip->set_affinity)
87 return -EINVAL; 87 return -EINVAL;
88 88
89 set_balance_irq_affinity(irq, cpumask);
90
91#ifdef CONFIG_GENERIC_PENDING_IRQ 89#ifdef CONFIG_GENERIC_PENDING_IRQ
92 set_pending_irq(irq, cpumask); 90 if (desc->status & IRQ_MOVE_PCNTXT || desc->status & IRQ_DISABLED) {
91 unsigned long flags;
92
93 spin_lock_irqsave(&desc->lock, flags);
94 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask);
96 spin_unlock_irqrestore(&desc->lock, flags);
97 } else
98 set_pending_irq(irq, cpumask);
93#else 99#else
94 desc->affinity = cpumask; 100 desc->affinity = cpumask;
95 desc->chip->set_affinity(irq, cpumask); 101 desc->chip->set_affinity(irq, cpumask);
@@ -104,16 +110,17 @@ int irq_set_affinity(unsigned int irq, cpumask_t cpumask)
104int irq_select_affinity(unsigned int irq) 110int irq_select_affinity(unsigned int irq)
105{ 111{
106 cpumask_t mask; 112 cpumask_t mask;
113 struct irq_desc *desc;
107 114
108 if (!irq_can_set_affinity(irq)) 115 if (!irq_can_set_affinity(irq))
109 return 0; 116 return 0;
110 117
111 cpus_and(mask, cpu_online_map, irq_default_affinity); 118 cpus_and(mask, cpu_online_map, irq_default_affinity);
112 119
113 irq_desc[irq].affinity = mask; 120 desc = irq_to_desc(irq);
114 irq_desc[irq].chip->set_affinity(irq, mask); 121 desc->affinity = mask;
122 desc->chip->set_affinity(irq, mask);
115 123
116 set_balance_irq_affinity(irq, mask);
117 return 0; 124 return 0;
118} 125}
119#endif 126#endif
@@ -133,10 +140,10 @@ int irq_select_affinity(unsigned int irq)
133 */ 140 */
134void disable_irq_nosync(unsigned int irq) 141void disable_irq_nosync(unsigned int irq)
135{ 142{
136 struct irq_desc *desc = irq_desc + irq; 143 struct irq_desc *desc = irq_to_desc(irq);
137 unsigned long flags; 144 unsigned long flags;
138 145
139 if (irq >= NR_IRQS) 146 if (!desc)
140 return; 147 return;
141 148
142 spin_lock_irqsave(&desc->lock, flags); 149 spin_lock_irqsave(&desc->lock, flags);
@@ -162,9 +169,9 @@ EXPORT_SYMBOL(disable_irq_nosync);
162 */ 169 */
163void disable_irq(unsigned int irq) 170void disable_irq(unsigned int irq)
164{ 171{
165 struct irq_desc *desc = irq_desc + irq; 172 struct irq_desc *desc = irq_to_desc(irq);
166 173
167 if (irq >= NR_IRQS) 174 if (!desc)
168 return; 175 return;
169 176
170 disable_irq_nosync(irq); 177 disable_irq_nosync(irq);
@@ -204,10 +211,10 @@ static void __enable_irq(struct irq_desc *desc, unsigned int irq)
204 */ 211 */
205void enable_irq(unsigned int irq) 212void enable_irq(unsigned int irq)
206{ 213{
207 struct irq_desc *desc = irq_desc + irq; 214 struct irq_desc *desc = irq_to_desc(irq);
208 unsigned long flags; 215 unsigned long flags;
209 216
210 if (irq >= NR_IRQS) 217 if (!desc)
211 return; 218 return;
212 219
213 spin_lock_irqsave(&desc->lock, flags); 220 spin_lock_irqsave(&desc->lock, flags);
@@ -216,9 +223,9 @@ void enable_irq(unsigned int irq)
216} 223}
217EXPORT_SYMBOL(enable_irq); 224EXPORT_SYMBOL(enable_irq);
218 225
219int set_irq_wake_real(unsigned int irq, unsigned int on) 226static int set_irq_wake_real(unsigned int irq, unsigned int on)
220{ 227{
221 struct irq_desc *desc = irq_desc + irq; 228 struct irq_desc *desc = irq_to_desc(irq);
222 int ret = -ENXIO; 229 int ret = -ENXIO;
223 230
224 if (desc->chip->set_wake) 231 if (desc->chip->set_wake)
@@ -241,7 +248,7 @@ int set_irq_wake_real(unsigned int irq, unsigned int on)
241 */ 248 */
242int set_irq_wake(unsigned int irq, unsigned int on) 249int set_irq_wake(unsigned int irq, unsigned int on)
243{ 250{
244 struct irq_desc *desc = irq_desc + irq; 251 struct irq_desc *desc = irq_to_desc(irq);
245 unsigned long flags; 252 unsigned long flags;
246 int ret = 0; 253 int ret = 0;
247 254
@@ -281,12 +288,16 @@ EXPORT_SYMBOL(set_irq_wake);
281 */ 288 */
282int can_request_irq(unsigned int irq, unsigned long irqflags) 289int can_request_irq(unsigned int irq, unsigned long irqflags)
283{ 290{
291 struct irq_desc *desc = irq_to_desc(irq);
284 struct irqaction *action; 292 struct irqaction *action;
285 293
286 if (irq >= NR_IRQS || irq_desc[irq].status & IRQ_NOREQUEST) 294 if (!desc)
295 return 0;
296
297 if (desc->status & IRQ_NOREQUEST)
287 return 0; 298 return 0;
288 299
289 action = irq_desc[irq].action; 300 action = desc->action;
290 if (action) 301 if (action)
291 if (irqflags & action->flags & IRQF_SHARED) 302 if (irqflags & action->flags & IRQF_SHARED)
292 action = NULL; 303 action = NULL;
@@ -305,10 +316,11 @@ void compat_irq_chip_set_default_handler(struct irq_desc *desc)
305 desc->handle_irq = NULL; 316 desc->handle_irq = NULL;
306} 317}
307 318
308static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq, 319int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
309 unsigned long flags) 320 unsigned long flags)
310{ 321{
311 int ret; 322 int ret;
323 struct irq_chip *chip = desc->chip;
312 324
313 if (!chip || !chip->set_type) { 325 if (!chip || !chip->set_type) {
314 /* 326 /*
@@ -326,6 +338,11 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
326 pr_err("setting trigger mode %d for irq %u failed (%pF)\n", 338 pr_err("setting trigger mode %d for irq %u failed (%pF)\n",
327 (int)(flags & IRQF_TRIGGER_MASK), 339 (int)(flags & IRQF_TRIGGER_MASK),
328 irq, chip->set_type); 340 irq, chip->set_type);
341 else {
342 /* note that IRQF_TRIGGER_MASK == IRQ_TYPE_SENSE_MASK */
343 desc->status &= ~IRQ_TYPE_SENSE_MASK;
344 desc->status |= flags & IRQ_TYPE_SENSE_MASK;
345 }
329 346
330 return ret; 347 return ret;
331} 348}
@@ -334,16 +351,16 @@ static int __irq_set_trigger(struct irq_chip *chip, unsigned int irq,
334 * Internal function to register an irqaction - typically used to 351 * Internal function to register an irqaction - typically used to
335 * allocate special interrupts that are part of the architecture. 352 * allocate special interrupts that are part of the architecture.
336 */ 353 */
337int setup_irq(unsigned int irq, struct irqaction *new) 354static int
355__setup_irq(unsigned int irq, struct irq_desc * desc, struct irqaction *new)
338{ 356{
339 struct irq_desc *desc = irq_desc + irq;
340 struct irqaction *old, **p; 357 struct irqaction *old, **p;
341 const char *old_name = NULL; 358 const char *old_name = NULL;
342 unsigned long flags; 359 unsigned long flags;
343 int shared = 0; 360 int shared = 0;
344 int ret; 361 int ret;
345 362
346 if (irq >= NR_IRQS) 363 if (!desc)
347 return -EINVAL; 364 return -EINVAL;
348 365
349 if (desc->chip == &no_irq_chip) 366 if (desc->chip == &no_irq_chip)
@@ -404,7 +421,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
404 421
405 /* Setup the type (level, edge polarity) if configured: */ 422 /* Setup the type (level, edge polarity) if configured: */
406 if (new->flags & IRQF_TRIGGER_MASK) { 423 if (new->flags & IRQF_TRIGGER_MASK) {
407 ret = __irq_set_trigger(desc->chip, irq, new->flags); 424 ret = __irq_set_trigger(desc, irq, new->flags);
408 425
409 if (ret) { 426 if (ret) {
410 spin_unlock_irqrestore(&desc->lock, flags); 427 spin_unlock_irqrestore(&desc->lock, flags);
@@ -423,16 +440,21 @@ int setup_irq(unsigned int irq, struct irqaction *new)
423 if (!(desc->status & IRQ_NOAUTOEN)) { 440 if (!(desc->status & IRQ_NOAUTOEN)) {
424 desc->depth = 0; 441 desc->depth = 0;
425 desc->status &= ~IRQ_DISABLED; 442 desc->status &= ~IRQ_DISABLED;
426 if (desc->chip->startup) 443 desc->chip->startup(irq);
427 desc->chip->startup(irq);
428 else
429 desc->chip->enable(irq);
430 } else 444 } else
431 /* Undo nested disables: */ 445 /* Undo nested disables: */
432 desc->depth = 1; 446 desc->depth = 1;
433 447
434 /* Set default affinity mask once everything is setup */ 448 /* Set default affinity mask once everything is setup */
435 irq_select_affinity(irq); 449 irq_select_affinity(irq);
450
451 } else if ((new->flags & IRQF_TRIGGER_MASK)
452 && (new->flags & IRQF_TRIGGER_MASK)
453 != (desc->status & IRQ_TYPE_SENSE_MASK)) {
454 /* hope the handler works with the actual trigger mode... */
455 pr_warning("IRQ %d uses trigger mode %d; requested %d\n",
456 irq, (int)(desc->status & IRQ_TYPE_SENSE_MASK),
457 (int)(new->flags & IRQF_TRIGGER_MASK));
436 } 458 }
437 459
438 *p = new; 460 *p = new;
@@ -457,7 +479,7 @@ int setup_irq(unsigned int irq, struct irqaction *new)
457 spin_unlock_irqrestore(&desc->lock, flags); 479 spin_unlock_irqrestore(&desc->lock, flags);
458 480
459 new->irq = irq; 481 new->irq = irq;
460 register_irq_proc(irq); 482 register_irq_proc(irq, desc);
461 new->dir = NULL; 483 new->dir = NULL;
462 register_handler_proc(irq, new); 484 register_handler_proc(irq, new);
463 485
@@ -477,6 +499,20 @@ mismatch:
477} 499}
478 500
479/** 501/**
502 * setup_irq - setup an interrupt
503 * @irq: Interrupt line to setup
504 * @act: irqaction for the interrupt
505 *
506 * Used to statically setup interrupts in the early boot process.
507 */
508int setup_irq(unsigned int irq, struct irqaction *act)
509{
510 struct irq_desc *desc = irq_to_desc(irq);
511
512 return __setup_irq(irq, desc, act);
513}
514
515/**
480 * free_irq - free an interrupt 516 * free_irq - free an interrupt
481 * @irq: Interrupt line to free 517 * @irq: Interrupt line to free
482 * @dev_id: Device identity to free 518 * @dev_id: Device identity to free
@@ -492,15 +528,15 @@ mismatch:
492 */ 528 */
493void free_irq(unsigned int irq, void *dev_id) 529void free_irq(unsigned int irq, void *dev_id)
494{ 530{
495 struct irq_desc *desc; 531 struct irq_desc *desc = irq_to_desc(irq);
496 struct irqaction **p; 532 struct irqaction **p;
497 unsigned long flags; 533 unsigned long flags;
498 534
499 WARN_ON(in_interrupt()); 535 WARN_ON(in_interrupt());
500 if (irq >= NR_IRQS) 536
537 if (!desc)
501 return; 538 return;
502 539
503 desc = irq_desc + irq;
504 spin_lock_irqsave(&desc->lock, flags); 540 spin_lock_irqsave(&desc->lock, flags);
505 p = &desc->action; 541 p = &desc->action;
506 for (;;) { 542 for (;;) {
@@ -589,12 +625,14 @@ EXPORT_SYMBOL(free_irq);
589 * IRQF_SHARED Interrupt is shared 625 * IRQF_SHARED Interrupt is shared
590 * IRQF_DISABLED Disable local interrupts while processing 626 * IRQF_DISABLED Disable local interrupts while processing
591 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy 627 * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
628 * IRQF_TRIGGER_* Specify active edge(s) or level
592 * 629 *
593 */ 630 */
594int request_irq(unsigned int irq, irq_handler_t handler, 631int request_irq(unsigned int irq, irq_handler_t handler,
595 unsigned long irqflags, const char *devname, void *dev_id) 632 unsigned long irqflags, const char *devname, void *dev_id)
596{ 633{
597 struct irqaction *action; 634 struct irqaction *action;
635 struct irq_desc *desc;
598 int retval; 636 int retval;
599 637
600#ifdef CONFIG_LOCKDEP 638#ifdef CONFIG_LOCKDEP
@@ -611,9 +649,12 @@ int request_irq(unsigned int irq, irq_handler_t handler,
611 */ 649 */
612 if ((irqflags & IRQF_SHARED) && !dev_id) 650 if ((irqflags & IRQF_SHARED) && !dev_id)
613 return -EINVAL; 651 return -EINVAL;
614 if (irq >= NR_IRQS) 652
653 desc = irq_to_desc(irq);
654 if (!desc)
615 return -EINVAL; 655 return -EINVAL;
616 if (irq_desc[irq].status & IRQ_NOREQUEST) 656
657 if (desc->status & IRQ_NOREQUEST)
617 return -EINVAL; 658 return -EINVAL;
618 if (!handler) 659 if (!handler)
619 return -EINVAL; 660 return -EINVAL;
@@ -629,26 +670,29 @@ int request_irq(unsigned int irq, irq_handler_t handler,
629 action->next = NULL; 670 action->next = NULL;
630 action->dev_id = dev_id; 671 action->dev_id = dev_id;
631 672
673 retval = __setup_irq(irq, desc, action);
674 if (retval)
675 kfree(action);
676
632#ifdef CONFIG_DEBUG_SHIRQ 677#ifdef CONFIG_DEBUG_SHIRQ
633 if (irqflags & IRQF_SHARED) { 678 if (irqflags & IRQF_SHARED) {
634 /* 679 /*
635 * It's a shared IRQ -- the driver ought to be prepared for it 680 * It's a shared IRQ -- the driver ought to be prepared for it
636 * to happen immediately, so let's make sure.... 681 * to happen immediately, so let's make sure....
637 * We do this before actually registering it, to make sure that 682 * We disable the irq to make sure that a 'real' IRQ doesn't
638 * a 'real' IRQ doesn't run in parallel with our fake 683 * run in parallel with our fake.
639 */ 684 */
640 unsigned long flags; 685 unsigned long flags;
641 686
687 disable_irq(irq);
642 local_irq_save(flags); 688 local_irq_save(flags);
689
643 handler(irq, dev_id); 690 handler(irq, dev_id);
691
644 local_irq_restore(flags); 692 local_irq_restore(flags);
693 enable_irq(irq);
645 } 694 }
646#endif 695#endif
647
648 retval = setup_irq(irq, action);
649 if (retval)
650 kfree(action);
651
652 return retval; 696 return retval;
653} 697}
654EXPORT_SYMBOL(request_irq); 698EXPORT_SYMBOL(request_irq);
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index 77b7acc875c5..90b920d3f52b 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -3,18 +3,18 @@
3 3
4void set_pending_irq(unsigned int irq, cpumask_t mask) 4void set_pending_irq(unsigned int irq, cpumask_t mask)
5{ 5{
6 struct irq_desc *desc = irq_desc + irq; 6 struct irq_desc *desc = irq_to_desc(irq);
7 unsigned long flags; 7 unsigned long flags;
8 8
9 spin_lock_irqsave(&desc->lock, flags); 9 spin_lock_irqsave(&desc->lock, flags);
10 desc->status |= IRQ_MOVE_PENDING; 10 desc->status |= IRQ_MOVE_PENDING;
11 irq_desc[irq].pending_mask = mask; 11 desc->pending_mask = mask;
12 spin_unlock_irqrestore(&desc->lock, flags); 12 spin_unlock_irqrestore(&desc->lock, flags);
13} 13}
14 14
15void move_masked_irq(int irq) 15void move_masked_irq(int irq)
16{ 16{
17 struct irq_desc *desc = irq_desc + irq; 17 struct irq_desc *desc = irq_to_desc(irq);
18 cpumask_t tmp; 18 cpumask_t tmp;
19 19
20 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 20 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
@@ -30,7 +30,7 @@ void move_masked_irq(int irq)
30 30
31 desc->status &= ~IRQ_MOVE_PENDING; 31 desc->status &= ~IRQ_MOVE_PENDING;
32 32
33 if (unlikely(cpus_empty(irq_desc[irq].pending_mask))) 33 if (unlikely(cpus_empty(desc->pending_mask)))
34 return; 34 return;
35 35
36 if (!desc->chip->set_affinity) 36 if (!desc->chip->set_affinity)
@@ -38,7 +38,7 @@ void move_masked_irq(int irq)
38 38
39 assert_spin_locked(&desc->lock); 39 assert_spin_locked(&desc->lock);
40 40
41 cpus_and(tmp, irq_desc[irq].pending_mask, cpu_online_map); 41 cpus_and(tmp, desc->pending_mask, cpu_online_map);
42 42
43 /* 43 /*
44 * If there was a valid mask to work with, please 44 * If there was a valid mask to work with, please
@@ -55,12 +55,12 @@ void move_masked_irq(int irq)
55 if (likely(!cpus_empty(tmp))) { 55 if (likely(!cpus_empty(tmp))) {
56 desc->chip->set_affinity(irq,tmp); 56 desc->chip->set_affinity(irq,tmp);
57 } 57 }
58 cpus_clear(irq_desc[irq].pending_mask); 58 cpus_clear(desc->pending_mask);
59} 59}
60 60
61void move_native_irq(int irq) 61void move_native_irq(int irq)
62{ 62{
63 struct irq_desc *desc = irq_desc + irq; 63 struct irq_desc *desc = irq_to_desc(irq);
64 64
65 if (likely(!(desc->status & IRQ_MOVE_PENDING))) 65 if (likely(!(desc->status & IRQ_MOVE_PENDING)))
66 return; 66 return;
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index a09dd29c2fd7..fac014a81b24 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -19,7 +19,7 @@ static struct proc_dir_entry *root_irq_dir;
19 19
20static int irq_affinity_proc_show(struct seq_file *m, void *v) 20static int irq_affinity_proc_show(struct seq_file *m, void *v)
21{ 21{
22 struct irq_desc *desc = irq_desc + (long)m->private; 22 struct irq_desc *desc = irq_to_desc((long)m->private);
23 cpumask_t *mask = &desc->affinity; 23 cpumask_t *mask = &desc->affinity;
24 24
25#ifdef CONFIG_GENERIC_PENDING_IRQ 25#ifdef CONFIG_GENERIC_PENDING_IRQ
@@ -43,7 +43,7 @@ static ssize_t irq_affinity_proc_write(struct file *file,
43 cpumask_t new_value; 43 cpumask_t new_value;
44 int err; 44 int err;
45 45
46 if (!irq_desc[irq].chip->set_affinity || no_irq_affinity || 46 if (!irq_to_desc(irq)->chip->set_affinity || no_irq_affinity ||
47 irq_balancing_disabled(irq)) 47 irq_balancing_disabled(irq))
48 return -EIO; 48 return -EIO;
49 49
@@ -132,20 +132,20 @@ static const struct file_operations default_affinity_proc_fops = {
132static int irq_spurious_read(char *page, char **start, off_t off, 132static int irq_spurious_read(char *page, char **start, off_t off,
133 int count, int *eof, void *data) 133 int count, int *eof, void *data)
134{ 134{
135 struct irq_desc *d = &irq_desc[(long) data]; 135 struct irq_desc *desc = irq_to_desc((long) data);
136 return sprintf(page, "count %u\n" 136 return sprintf(page, "count %u\n"
137 "unhandled %u\n" 137 "unhandled %u\n"
138 "last_unhandled %u ms\n", 138 "last_unhandled %u ms\n",
139 d->irq_count, 139 desc->irq_count,
140 d->irqs_unhandled, 140 desc->irqs_unhandled,
141 jiffies_to_msecs(d->last_unhandled)); 141 jiffies_to_msecs(desc->last_unhandled));
142} 142}
143 143
144#define MAX_NAMELEN 128 144#define MAX_NAMELEN 128
145 145
146static int name_unique(unsigned int irq, struct irqaction *new_action) 146static int name_unique(unsigned int irq, struct irqaction *new_action)
147{ 147{
148 struct irq_desc *desc = irq_desc + irq; 148 struct irq_desc *desc = irq_to_desc(irq);
149 struct irqaction *action; 149 struct irqaction *action;
150 unsigned long flags; 150 unsigned long flags;
151 int ret = 1; 151 int ret = 1;
@@ -165,8 +165,9 @@ static int name_unique(unsigned int irq, struct irqaction *new_action)
165void register_handler_proc(unsigned int irq, struct irqaction *action) 165void register_handler_proc(unsigned int irq, struct irqaction *action)
166{ 166{
167 char name [MAX_NAMELEN]; 167 char name [MAX_NAMELEN];
168 struct irq_desc *desc = irq_to_desc(irq);
168 169
169 if (!irq_desc[irq].dir || action->dir || !action->name || 170 if (!desc->dir || action->dir || !action->name ||
170 !name_unique(irq, action)) 171 !name_unique(irq, action))
171 return; 172 return;
172 173
@@ -174,36 +175,34 @@ void register_handler_proc(unsigned int irq, struct irqaction *action)
174 snprintf(name, MAX_NAMELEN, "%s", action->name); 175 snprintf(name, MAX_NAMELEN, "%s", action->name);
175 176
176 /* create /proc/irq/1234/handler/ */ 177 /* create /proc/irq/1234/handler/ */
177 action->dir = proc_mkdir(name, irq_desc[irq].dir); 178 action->dir = proc_mkdir(name, desc->dir);
178} 179}
179 180
180#undef MAX_NAMELEN 181#undef MAX_NAMELEN
181 182
182#define MAX_NAMELEN 10 183#define MAX_NAMELEN 10
183 184
184void register_irq_proc(unsigned int irq) 185void register_irq_proc(unsigned int irq, struct irq_desc *desc)
185{ 186{
186 char name [MAX_NAMELEN]; 187 char name [MAX_NAMELEN];
187 struct proc_dir_entry *entry; 188 struct proc_dir_entry *entry;
188 189
189 if (!root_irq_dir || 190 if (!root_irq_dir || (desc->chip == &no_irq_chip) || desc->dir)
190 (irq_desc[irq].chip == &no_irq_chip) ||
191 irq_desc[irq].dir)
192 return; 191 return;
193 192
194 memset(name, 0, MAX_NAMELEN); 193 memset(name, 0, MAX_NAMELEN);
195 sprintf(name, "%d", irq); 194 sprintf(name, "%d", irq);
196 195
197 /* create /proc/irq/1234 */ 196 /* create /proc/irq/1234 */
198 irq_desc[irq].dir = proc_mkdir(name, root_irq_dir); 197 desc->dir = proc_mkdir(name, root_irq_dir);
199 198
200#ifdef CONFIG_SMP 199#ifdef CONFIG_SMP
201 /* create /proc/irq/<irq>/smp_affinity */ 200 /* create /proc/irq/<irq>/smp_affinity */
202 proc_create_data("smp_affinity", 0600, irq_desc[irq].dir, 201 proc_create_data("smp_affinity", 0600, desc->dir,
203 &irq_affinity_proc_fops, (void *)(long)irq); 202 &irq_affinity_proc_fops, (void *)(long)irq);
204#endif 203#endif
205 204
206 entry = create_proc_entry("spurious", 0444, irq_desc[irq].dir); 205 entry = create_proc_entry("spurious", 0444, desc->dir);
207 if (entry) { 206 if (entry) {
208 entry->data = (void *)(long)irq; 207 entry->data = (void *)(long)irq;
209 entry->read_proc = irq_spurious_read; 208 entry->read_proc = irq_spurious_read;
@@ -214,8 +213,11 @@ void register_irq_proc(unsigned int irq)
214 213
215void unregister_handler_proc(unsigned int irq, struct irqaction *action) 214void unregister_handler_proc(unsigned int irq, struct irqaction *action)
216{ 215{
217 if (action->dir) 216 if (action->dir) {
218 remove_proc_entry(action->dir->name, irq_desc[irq].dir); 217 struct irq_desc *desc = irq_to_desc(irq);
218
219 remove_proc_entry(action->dir->name, desc->dir);
220 }
219} 221}
220 222
221void register_default_affinity_proc(void) 223void register_default_affinity_proc(void)
@@ -228,7 +230,8 @@ void register_default_affinity_proc(void)
228 230
229void init_irq_proc(void) 231void init_irq_proc(void)
230{ 232{
231 int i; 233 unsigned int irq;
234 struct irq_desc *desc;
232 235
233 /* create /proc/irq */ 236 /* create /proc/irq */
234 root_irq_dir = proc_mkdir("irq", NULL); 237 root_irq_dir = proc_mkdir("irq", NULL);
@@ -240,7 +243,7 @@ void init_irq_proc(void)
240 /* 243 /*
241 * Create entries for all existing IRQs. 244 * Create entries for all existing IRQs.
242 */ 245 */
243 for (i = 0; i < NR_IRQS; i++) 246 for_each_irq_desc(irq, desc)
244 register_irq_proc(i); 247 register_irq_proc(irq, desc);
245} 248}
246 249
diff --git a/kernel/irq/resend.c b/kernel/irq/resend.c
index a8046791ba2d..89c7117acf2b 100644
--- a/kernel/irq/resend.c
+++ b/kernel/irq/resend.c
@@ -33,10 +33,10 @@ static void resend_irqs(unsigned long arg)
33 struct irq_desc *desc; 33 struct irq_desc *desc;
34 int irq; 34 int irq;
35 35
36 while (!bitmap_empty(irqs_resend, NR_IRQS)) { 36 while (!bitmap_empty(irqs_resend, nr_irqs)) {
37 irq = find_first_bit(irqs_resend, NR_IRQS); 37 irq = find_first_bit(irqs_resend, nr_irqs);
38 clear_bit(irq, irqs_resend); 38 clear_bit(irq, irqs_resend);
39 desc = irq_desc + irq; 39 desc = irq_to_desc(irq);
40 local_irq_disable(); 40 local_irq_disable();
41 desc->handle_irq(irq, desc); 41 desc->handle_irq(irq, desc);
42 local_irq_enable(); 42 local_irq_enable();
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index c66d3f10e853..dd364c11e56e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -12,83 +12,122 @@
12#include <linux/kallsyms.h> 12#include <linux/kallsyms.h>
13#include <linux/interrupt.h> 13#include <linux/interrupt.h>
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/timer.h>
15 16
16static int irqfixup __read_mostly; 17static int irqfixup __read_mostly;
17 18
19#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
20static void poll_spurious_irqs(unsigned long dummy);
21static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
22
18/* 23/*
19 * Recovery handler for misrouted interrupts. 24 * Recovery handler for misrouted interrupts.
20 */ 25 */
21static int misrouted_irq(int irq) 26static int try_one_irq(int irq, struct irq_desc *desc)
22{ 27{
23 int i; 28 struct irqaction *action;
24 int ok = 0; 29 int ok = 0, work = 0;
25 int work = 0; /* Did we do work for a real IRQ */
26
27 for (i = 1; i < NR_IRQS; i++) {
28 struct irq_desc *desc = irq_desc + i;
29 struct irqaction *action;
30
31 if (i == irq) /* Already tried */
32 continue;
33 30
34 spin_lock(&desc->lock); 31 spin_lock(&desc->lock);
35 /* Already running on another processor */ 32 /* Already running on another processor */
36 if (desc->status & IRQ_INPROGRESS) { 33 if (desc->status & IRQ_INPROGRESS) {
37 /* 34 /*
38 * Already running: If it is shared get the other 35 * Already running: If it is shared get the other
39 * CPU to go looking for our mystery interrupt too 36 * CPU to go looking for our mystery interrupt too
40 */ 37 */
41 if (desc->action && (desc->action->flags & IRQF_SHARED)) 38 if (desc->action && (desc->action->flags & IRQF_SHARED))
42 desc->status |= IRQ_PENDING; 39 desc->status |= IRQ_PENDING;
43 spin_unlock(&desc->lock);
44 continue;
45 }
46 /* Honour the normal IRQ locking */
47 desc->status |= IRQ_INPROGRESS;
48 action = desc->action;
49 spin_unlock(&desc->lock); 40 spin_unlock(&desc->lock);
41 return ok;
42 }
43 /* Honour the normal IRQ locking */
44 desc->status |= IRQ_INPROGRESS;
45 action = desc->action;
46 spin_unlock(&desc->lock);
50 47
51 while (action) { 48 while (action) {
52 /* Only shared IRQ handlers are safe to call */ 49 /* Only shared IRQ handlers are safe to call */
53 if (action->flags & IRQF_SHARED) { 50 if (action->flags & IRQF_SHARED) {
54 if (action->handler(i, action->dev_id) == 51 if (action->handler(irq, action->dev_id) ==
55 IRQ_HANDLED) 52 IRQ_HANDLED)
56 ok = 1; 53 ok = 1;
57 }
58 action = action->next;
59 } 54 }
60 local_irq_disable(); 55 action = action->next;
61 /* Now clean up the flags */ 56 }
62 spin_lock(&desc->lock); 57 local_irq_disable();
63 action = desc->action; 58 /* Now clean up the flags */
59 spin_lock(&desc->lock);
60 action = desc->action;
64 61
62 /*
63 * While we were looking for a fixup someone queued a real
64 * IRQ clashing with our walk:
65 */
66 while ((desc->status & IRQ_PENDING) && action) {
65 /* 67 /*
66 * While we were looking for a fixup someone queued a real 68 * Perform real IRQ processing for the IRQ we deferred
67 * IRQ clashing with our walk:
68 */
69 while ((desc->status & IRQ_PENDING) && action) {
70 /*
71 * Perform real IRQ processing for the IRQ we deferred
72 */
73 work = 1;
74 spin_unlock(&desc->lock);
75 handle_IRQ_event(i, action);
76 spin_lock(&desc->lock);
77 desc->status &= ~IRQ_PENDING;
78 }
79 desc->status &= ~IRQ_INPROGRESS;
80 /*
81 * If we did actual work for the real IRQ line we must let the
82 * IRQ controller clean up too
83 */ 69 */
84 if (work && desc->chip && desc->chip->end) 70 work = 1;
85 desc->chip->end(i);
86 spin_unlock(&desc->lock); 71 spin_unlock(&desc->lock);
72 handle_IRQ_event(irq, action);
73 spin_lock(&desc->lock);
74 desc->status &= ~IRQ_PENDING;
75 }
76 desc->status &= ~IRQ_INPROGRESS;
77 /*
78 * If we did actual work for the real IRQ line we must let the
79 * IRQ controller clean up too
80 */
81 if (work && desc->chip && desc->chip->end)
82 desc->chip->end(irq);
83 spin_unlock(&desc->lock);
84
85 return ok;
86}
87
88static int misrouted_irq(int irq)
89{
90 struct irq_desc *desc;
91 int i, ok = 0;
92
93 for_each_irq_desc(i, desc) {
94 if (!i)
95 continue;
96
97 if (i == irq) /* Already tried */
98 continue;
99
100 if (try_one_irq(i, desc))
101 ok = 1;
87 } 102 }
88 /* So the caller can adjust the irq error counts */ 103 /* So the caller can adjust the irq error counts */
89 return ok; 104 return ok;
90} 105}
91 106
107static void poll_spurious_irqs(unsigned long dummy)
108{
109 struct irq_desc *desc;
110 int i;
111
112 for_each_irq_desc(i, desc) {
113 unsigned int status;
114
115 if (!i)
116 continue;
117
118 /* Racy but it doesn't matter */
119 status = desc->status;
120 barrier();
121 if (!(status & IRQ_SPURIOUS_DISABLED))
122 continue;
123
124 try_one_irq(i, desc);
125 }
126
127 mod_timer(&poll_spurious_irq_timer,
128 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
129}
130
92/* 131/*
93 * If 99,900 of the previous 100,000 interrupts have not been handled 132 * If 99,900 of the previous 100,000 interrupts have not been handled
94 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 133 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -137,7 +176,9 @@ report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret)
137 } 176 }
138} 177}
139 178
140static inline int try_misrouted_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) 179static inline int
180try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
181 irqreturn_t action_ret)
141{ 182{
142 struct irqaction *action; 183 struct irqaction *action;
143 184
@@ -212,6 +253,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
212 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 253 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED;
213 desc->depth++; 254 desc->depth++;
214 desc->chip->disable(irq); 255 desc->chip->disable(irq);
256
257 mod_timer(&poll_spurious_irq_timer,
258 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
215 } 259 }
216 desc->irqs_unhandled = 0; 260 desc->irqs_unhandled = 0;
217} 261}
@@ -241,7 +285,7 @@ static int __init irqfixup_setup(char *str)
241 285
242__setup("irqfixup", irqfixup_setup); 286__setup("irqfixup", irqfixup_setup);
243module_param(irqfixup, int, 0644); 287module_param(irqfixup, int, 0644);
244MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode 2: irqpoll mode"); 288MODULE_PARM_DESC("irqfixup", "0: No fixup, 1: irqfixup mode, 2: irqpoll mode");
245 289
246static int __init irqpoll_setup(char *str) 290static int __init irqpoll_setup(char *str)
247{ 291{
diff --git a/kernel/itimer.c b/kernel/itimer.c
index ab982747d9bd..db7c358b9a02 100644
--- a/kernel/itimer.c
+++ b/kernel/itimer.c
@@ -55,17 +55,15 @@ int do_getitimer(int which, struct itimerval *value)
55 spin_unlock_irq(&tsk->sighand->siglock); 55 spin_unlock_irq(&tsk->sighand->siglock);
56 break; 56 break;
57 case ITIMER_VIRTUAL: 57 case ITIMER_VIRTUAL:
58 read_lock(&tasklist_lock);
59 spin_lock_irq(&tsk->sighand->siglock); 58 spin_lock_irq(&tsk->sighand->siglock);
60 cval = tsk->signal->it_virt_expires; 59 cval = tsk->signal->it_virt_expires;
61 cinterval = tsk->signal->it_virt_incr; 60 cinterval = tsk->signal->it_virt_incr;
62 if (!cputime_eq(cval, cputime_zero)) { 61 if (!cputime_eq(cval, cputime_zero)) {
63 struct task_struct *t = tsk; 62 struct task_cputime cputime;
64 cputime_t utime = tsk->signal->utime; 63 cputime_t utime;
65 do { 64
66 utime = cputime_add(utime, t->utime); 65 thread_group_cputime(tsk, &cputime);
67 t = next_thread(t); 66 utime = cputime.utime;
68 } while (t != tsk);
69 if (cputime_le(cval, utime)) { /* about to fire */ 67 if (cputime_le(cval, utime)) { /* about to fire */
70 cval = jiffies_to_cputime(1); 68 cval = jiffies_to_cputime(1);
71 } else { 69 } else {
@@ -73,25 +71,19 @@ int do_getitimer(int which, struct itimerval *value)
73 } 71 }
74 } 72 }
75 spin_unlock_irq(&tsk->sighand->siglock); 73 spin_unlock_irq(&tsk->sighand->siglock);
76 read_unlock(&tasklist_lock);
77 cputime_to_timeval(cval, &value->it_value); 74 cputime_to_timeval(cval, &value->it_value);
78 cputime_to_timeval(cinterval, &value->it_interval); 75 cputime_to_timeval(cinterval, &value->it_interval);
79 break; 76 break;
80 case ITIMER_PROF: 77 case ITIMER_PROF:
81 read_lock(&tasklist_lock);
82 spin_lock_irq(&tsk->sighand->siglock); 78 spin_lock_irq(&tsk->sighand->siglock);
83 cval = tsk->signal->it_prof_expires; 79 cval = tsk->signal->it_prof_expires;
84 cinterval = tsk->signal->it_prof_incr; 80 cinterval = tsk->signal->it_prof_incr;
85 if (!cputime_eq(cval, cputime_zero)) { 81 if (!cputime_eq(cval, cputime_zero)) {
86 struct task_struct *t = tsk; 82 struct task_cputime times;
87 cputime_t ptime = cputime_add(tsk->signal->utime, 83 cputime_t ptime;
88 tsk->signal->stime); 84
89 do { 85 thread_group_cputime(tsk, &times);
90 ptime = cputime_add(ptime, 86 ptime = cputime_add(times.utime, times.stime);
91 cputime_add(t->utime,
92 t->stime));
93 t = next_thread(t);
94 } while (t != tsk);
95 if (cputime_le(cval, ptime)) { /* about to fire */ 87 if (cputime_le(cval, ptime)) { /* about to fire */
96 cval = jiffies_to_cputime(1); 88 cval = jiffies_to_cputime(1);
97 } else { 89 } else {
@@ -99,7 +91,6 @@ int do_getitimer(int which, struct itimerval *value)
99 } 91 }
100 } 92 }
101 spin_unlock_irq(&tsk->sighand->siglock); 93 spin_unlock_irq(&tsk->sighand->siglock);
102 read_unlock(&tasklist_lock);
103 cputime_to_timeval(cval, &value->it_value); 94 cputime_to_timeval(cval, &value->it_value);
104 cputime_to_timeval(cinterval, &value->it_interval); 95 cputime_to_timeval(cinterval, &value->it_interval);
105 break; 96 break;
@@ -185,7 +176,6 @@ again:
185 case ITIMER_VIRTUAL: 176 case ITIMER_VIRTUAL:
186 nval = timeval_to_cputime(&value->it_value); 177 nval = timeval_to_cputime(&value->it_value);
187 ninterval = timeval_to_cputime(&value->it_interval); 178 ninterval = timeval_to_cputime(&value->it_interval);
188 read_lock(&tasklist_lock);
189 spin_lock_irq(&tsk->sighand->siglock); 179 spin_lock_irq(&tsk->sighand->siglock);
190 cval = tsk->signal->it_virt_expires; 180 cval = tsk->signal->it_virt_expires;
191 cinterval = tsk->signal->it_virt_incr; 181 cinterval = tsk->signal->it_virt_incr;
@@ -200,7 +190,6 @@ again:
200 tsk->signal->it_virt_expires = nval; 190 tsk->signal->it_virt_expires = nval;
201 tsk->signal->it_virt_incr = ninterval; 191 tsk->signal->it_virt_incr = ninterval;
202 spin_unlock_irq(&tsk->sighand->siglock); 192 spin_unlock_irq(&tsk->sighand->siglock);
203 read_unlock(&tasklist_lock);
204 if (ovalue) { 193 if (ovalue) {
205 cputime_to_timeval(cval, &ovalue->it_value); 194 cputime_to_timeval(cval, &ovalue->it_value);
206 cputime_to_timeval(cinterval, &ovalue->it_interval); 195 cputime_to_timeval(cinterval, &ovalue->it_interval);
@@ -209,7 +198,6 @@ again:
209 case ITIMER_PROF: 198 case ITIMER_PROF:
210 nval = timeval_to_cputime(&value->it_value); 199 nval = timeval_to_cputime(&value->it_value);
211 ninterval = timeval_to_cputime(&value->it_interval); 200 ninterval = timeval_to_cputime(&value->it_interval);
212 read_lock(&tasklist_lock);
213 spin_lock_irq(&tsk->sighand->siglock); 201 spin_lock_irq(&tsk->sighand->siglock);
214 cval = tsk->signal->it_prof_expires; 202 cval = tsk->signal->it_prof_expires;
215 cinterval = tsk->signal->it_prof_incr; 203 cinterval = tsk->signal->it_prof_incr;
@@ -224,7 +212,6 @@ again:
224 tsk->signal->it_prof_expires = nval; 212 tsk->signal->it_prof_expires = nval;
225 tsk->signal->it_prof_incr = ninterval; 213 tsk->signal->it_prof_incr = ninterval;
226 spin_unlock_irq(&tsk->sighand->siglock); 214 spin_unlock_irq(&tsk->sighand->siglock);
227 read_unlock(&tasklist_lock);
228 if (ovalue) { 215 if (ovalue) {
229 cputime_to_timeval(cval, &ovalue->it_value); 216 cputime_to_timeval(cval, &ovalue->it_value);
230 cputime_to_timeval(cinterval, &ovalue->it_interval); 217 cputime_to_timeval(cinterval, &ovalue->it_interval);
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 38fc10ac7541..5072cf1685a2 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -260,7 +260,6 @@ const char *kallsyms_lookup(unsigned long addr,
260 /* see if it's in a module */ 260 /* see if it's in a module */
261 return module_address_lookup(addr, symbolsize, offset, modname, 261 return module_address_lookup(addr, symbolsize, offset, modname,
262 namebuf); 262 namebuf);
263 return NULL;
264} 263}
265 264
266int lookup_symbol_name(unsigned long addr, char *symname) 265int lookup_symbol_name(unsigned long addr, char *symname)
diff --git a/kernel/kexec.c b/kernel/kexec.c
index aef265325cd3..ac0fde7b54d0 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -30,6 +30,7 @@
30#include <linux/pm.h> 30#include <linux/pm.h>
31#include <linux/cpu.h> 31#include <linux/cpu.h>
32#include <linux/console.h> 32#include <linux/console.h>
33#include <linux/vmalloc.h>
33 34
34#include <asm/page.h> 35#include <asm/page.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
@@ -1371,6 +1372,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1371 VMCOREINFO_SYMBOL(node_online_map); 1372 VMCOREINFO_SYMBOL(node_online_map);
1372 VMCOREINFO_SYMBOL(swapper_pg_dir); 1373 VMCOREINFO_SYMBOL(swapper_pg_dir);
1373 VMCOREINFO_SYMBOL(_stext); 1374 VMCOREINFO_SYMBOL(_stext);
1375 VMCOREINFO_SYMBOL(vmlist);
1374 1376
1375#ifndef CONFIG_NEED_MULTIPLE_NODES 1377#ifndef CONFIG_NEED_MULTIPLE_NODES
1376 VMCOREINFO_SYMBOL(mem_map); 1378 VMCOREINFO_SYMBOL(mem_map);
@@ -1406,6 +1408,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1406 VMCOREINFO_OFFSET(free_area, free_list); 1408 VMCOREINFO_OFFSET(free_area, free_list);
1407 VMCOREINFO_OFFSET(list_head, next); 1409 VMCOREINFO_OFFSET(list_head, next);
1408 VMCOREINFO_OFFSET(list_head, prev); 1410 VMCOREINFO_OFFSET(list_head, prev);
1411 VMCOREINFO_OFFSET(vm_struct, addr);
1409 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER); 1412 VMCOREINFO_LENGTH(zone.free_area, MAX_ORDER);
1410 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES); 1413 VMCOREINFO_LENGTH(free_area.free_list, MIGRATE_TYPES);
1411 VMCOREINFO_NUMBER(NR_FREE_PAGES); 1414 VMCOREINFO_NUMBER(NR_FREE_PAGES);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 2456d1a0befb..3d3c3ea3a023 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -113,7 +113,7 @@ int request_module(const char *fmt, ...)
113 return ret; 113 return ret;
114} 114}
115EXPORT_SYMBOL(request_module); 115EXPORT_SYMBOL(request_module);
116#endif /* CONFIG_KMOD */ 116#endif /* CONFIG_MODULES */
117 117
118struct subprocess_info { 118struct subprocess_info {
119 struct work_struct work; 119 struct work_struct work;
@@ -265,7 +265,7 @@ static void __call_usermodehelper(struct work_struct *work)
265 } 265 }
266} 266}
267 267
268#ifdef CONFIG_PM 268#ifdef CONFIG_PM_SLEEP
269/* 269/*
270 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY 270 * If set, call_usermodehelper_exec() will exit immediately returning -EBUSY
271 * (used for preventing user land processes from being created after the user 271 * (used for preventing user land processes from being created after the user
@@ -288,39 +288,37 @@ static DECLARE_WAIT_QUEUE_HEAD(running_helpers_waitq);
288 */ 288 */
289#define RUNNING_HELPERS_TIMEOUT (5 * HZ) 289#define RUNNING_HELPERS_TIMEOUT (5 * HZ)
290 290
291static int usermodehelper_pm_callback(struct notifier_block *nfb, 291/**
292 unsigned long action, 292 * usermodehelper_disable - prevent new helpers from being started
293 void *ignored) 293 */
294int usermodehelper_disable(void)
294{ 295{
295 long retval; 296 long retval;
296 297
297 switch (action) { 298 usermodehelper_disabled = 1;
298 case PM_HIBERNATION_PREPARE: 299 smp_mb();
299 case PM_SUSPEND_PREPARE: 300 /*
300 usermodehelper_disabled = 1; 301 * From now on call_usermodehelper_exec() won't start any new
301 smp_mb(); 302 * helpers, so it is sufficient if running_helpers turns out to
302 /* 303 * be zero at one point (it may be increased later, but that
303 * From now on call_usermodehelper_exec() won't start any new 304 * doesn't matter).
304 * helpers, so it is sufficient if running_helpers turns out to 305 */
305 * be zero at one point (it may be increased later, but that 306 retval = wait_event_timeout(running_helpers_waitq,
306 * doesn't matter).
307 */
308 retval = wait_event_timeout(running_helpers_waitq,
309 atomic_read(&running_helpers) == 0, 307 atomic_read(&running_helpers) == 0,
310 RUNNING_HELPERS_TIMEOUT); 308 RUNNING_HELPERS_TIMEOUT);
311 if (retval) { 309 if (retval)
312 return NOTIFY_OK; 310 return 0;
313 } else {
314 usermodehelper_disabled = 0;
315 return NOTIFY_BAD;
316 }
317 case PM_POST_HIBERNATION:
318 case PM_POST_SUSPEND:
319 usermodehelper_disabled = 0;
320 return NOTIFY_OK;
321 }
322 311
323 return NOTIFY_DONE; 312 usermodehelper_disabled = 0;
313 return -EAGAIN;
314}
315
316/**
317 * usermodehelper_enable - allow new helpers to be started again
318 */
319void usermodehelper_enable(void)
320{
321 usermodehelper_disabled = 0;
324} 322}
325 323
326static void helper_lock(void) 324static void helper_lock(void)
@@ -334,18 +332,12 @@ static void helper_unlock(void)
334 if (atomic_dec_and_test(&running_helpers)) 332 if (atomic_dec_and_test(&running_helpers))
335 wake_up(&running_helpers_waitq); 333 wake_up(&running_helpers_waitq);
336} 334}
337 335#else /* CONFIG_PM_SLEEP */
338static void register_pm_notifier_callback(void)
339{
340 pm_notifier(usermodehelper_pm_callback, 0);
341}
342#else /* CONFIG_PM */
343#define usermodehelper_disabled 0 336#define usermodehelper_disabled 0
344 337
345static inline void helper_lock(void) {} 338static inline void helper_lock(void) {}
346static inline void helper_unlock(void) {} 339static inline void helper_unlock(void) {}
347static inline void register_pm_notifier_callback(void) {} 340#endif /* CONFIG_PM_SLEEP */
348#endif /* CONFIG_PM */
349 341
350/** 342/**
351 * call_usermodehelper_setup - prepare to call a usermode helper 343 * call_usermodehelper_setup - prepare to call a usermode helper
@@ -515,5 +507,4 @@ void __init usermodehelper_init(void)
515{ 507{
516 khelper_wq = create_singlethread_workqueue("khelper"); 508 khelper_wq = create_singlethread_workqueue("khelper");
517 BUG_ON(!khelper_wq); 509 BUG_ON(!khelper_wq);
518 register_pm_notifier_callback();
519} 510}
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 75bc2cd9ebc6..8b57a2597f21 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -404,7 +404,7 @@ void kretprobe_hash_lock(struct task_struct *tsk,
404 spin_lock_irqsave(hlist_lock, *flags); 404 spin_lock_irqsave(hlist_lock, *flags);
405} 405}
406 406
407void kretprobe_table_lock(unsigned long hash, unsigned long *flags) 407static void kretprobe_table_lock(unsigned long hash, unsigned long *flags)
408{ 408{
409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash); 409 spinlock_t *hlist_lock = kretprobe_table_lock_ptr(hash);
410 spin_lock_irqsave(hlist_lock, *flags); 410 spin_lock_irqsave(hlist_lock, *flags);
diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
index e53bc30e9ba5..08dd8ed86c77 100644
--- a/kernel/ksysfs.c
+++ b/kernel/ksysfs.c
@@ -14,6 +14,7 @@
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kexec.h> 16#include <linux/kexec.h>
17#include <linux/profile.h>
17#include <linux/sched.h> 18#include <linux/sched.h>
18 19
19#define KERNEL_ATTR_RO(_name) \ 20#define KERNEL_ATTR_RO(_name) \
@@ -53,6 +54,37 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
53KERNEL_ATTR_RW(uevent_helper); 54KERNEL_ATTR_RW(uevent_helper);
54#endif 55#endif
55 56
57#ifdef CONFIG_PROFILING
58static ssize_t profiling_show(struct kobject *kobj,
59 struct kobj_attribute *attr, char *buf)
60{
61 return sprintf(buf, "%d\n", prof_on);
62}
63static ssize_t profiling_store(struct kobject *kobj,
64 struct kobj_attribute *attr,
65 const char *buf, size_t count)
66{
67 int ret;
68
69 if (prof_on)
70 return -EEXIST;
71 /*
72 * This eventually calls into get_option() which
73 * has a ton of callers and is not const. It is
74 * easiest to cast it away here.
75 */
76 profile_setup((char *)buf);
77 ret = profile_init();
78 if (ret)
79 return ret;
80 ret = create_proc_profile();
81 if (ret)
82 return ret;
83 return count;
84}
85KERNEL_ATTR_RW(profiling);
86#endif
87
56#ifdef CONFIG_KEXEC 88#ifdef CONFIG_KEXEC
57static ssize_t kexec_loaded_show(struct kobject *kobj, 89static ssize_t kexec_loaded_show(struct kobject *kobj,
58 struct kobj_attribute *attr, char *buf) 90 struct kobj_attribute *attr, char *buf)
@@ -109,6 +141,9 @@ static struct attribute * kernel_attrs[] = {
109 &uevent_seqnum_attr.attr, 141 &uevent_seqnum_attr.attr,
110 &uevent_helper_attr.attr, 142 &uevent_helper_attr.attr,
111#endif 143#endif
144#ifdef CONFIG_PROFILING
145 &profiling_attr.attr,
146#endif
112#ifdef CONFIG_KEXEC 147#ifdef CONFIG_KEXEC
113 &kexec_loaded_attr.attr, 148 &kexec_loaded_attr.attr,
114 &kexec_crash_loaded_attr.attr, 149 &kexec_crash_loaded_attr.attr,
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 96cff2f8710b..8e7a7ce3ed0a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -13,6 +13,7 @@
13#include <linux/file.h> 13#include <linux/file.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/mutex.h> 15#include <linux/mutex.h>
16#include <trace/sched.h>
16 17
17#define KTHREAD_NICE_LEVEL (-5) 18#define KTHREAD_NICE_LEVEL (-5)
18 19
@@ -171,12 +172,11 @@ EXPORT_SYMBOL(kthread_create);
171 */ 172 */
172void kthread_bind(struct task_struct *k, unsigned int cpu) 173void kthread_bind(struct task_struct *k, unsigned int cpu)
173{ 174{
174 if (k->state != TASK_UNINTERRUPTIBLE) { 175 /* Must have done schedule() in kthread() before we set_task_cpu */
176 if (!wait_task_inactive(k, TASK_UNINTERRUPTIBLE)) {
175 WARN_ON(1); 177 WARN_ON(1);
176 return; 178 return;
177 } 179 }
178 /* Must have done schedule() in kthread() before we set_task_cpu */
179 wait_task_inactive(k, 0);
180 set_task_cpu(k, cpu); 180 set_task_cpu(k, cpu);
181 k->cpus_allowed = cpumask_of_cpu(cpu); 181 k->cpus_allowed = cpumask_of_cpu(cpu);
182 k->rt.nr_cpus_allowed = 1; 182 k->rt.nr_cpus_allowed = 1;
@@ -206,6 +206,8 @@ int kthread_stop(struct task_struct *k)
206 /* It could exit after stop_info.k set, but before wake_up_process. */ 206 /* It could exit after stop_info.k set, but before wake_up_process. */
207 get_task_struct(k); 207 get_task_struct(k);
208 208
209 trace_sched_kthread_stop(k);
210
209 /* Must init completion *before* thread sees kthread_stop_info.k */ 211 /* Must init completion *before* thread sees kthread_stop_info.k */
210 init_completion(&kthread_stop_info.done); 212 init_completion(&kthread_stop_info.done);
211 smp_wmb(); 213 smp_wmb();
@@ -221,6 +223,8 @@ int kthread_stop(struct task_struct *k)
221 ret = kthread_stop_info.err; 223 ret = kthread_stop_info.err;
222 mutex_unlock(&kthread_stop_lock); 224 mutex_unlock(&kthread_stop_lock);
223 225
226 trace_sched_kthread_stop_ret(ret);
227
224 return ret; 228 return ret;
225} 229}
226EXPORT_SYMBOL(kthread_stop); 230EXPORT_SYMBOL(kthread_stop);
diff --git a/kernel/marker.c b/kernel/marker.c
index 7d1faecd7a51..e9c6b2bc9400 100644
--- a/kernel/marker.c
+++ b/kernel/marker.c
@@ -62,7 +62,7 @@ struct marker_entry {
62 int refcount; /* Number of times armed. 0 if disarmed. */ 62 int refcount; /* Number of times armed. 0 if disarmed. */
63 struct rcu_head rcu; 63 struct rcu_head rcu;
64 void *oldptr; 64 void *oldptr;
65 unsigned char rcu_pending:1; 65 int rcu_pending;
66 unsigned char ptype:1; 66 unsigned char ptype:1;
67 char name[0]; /* Contains name'\0'format'\0' */ 67 char name[0]; /* Contains name'\0'format'\0' */
68}; 68};
@@ -103,11 +103,11 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
103 char ptype; 103 char ptype;
104 104
105 /* 105 /*
106 * preempt_disable does two things : disabling preemption to make sure 106 * rcu_read_lock_sched does two things : disabling preemption to make
107 * the teardown of the callbacks can be done correctly when they are in 107 * sure the teardown of the callbacks can be done correctly when they
108 * modules and they insure RCU read coherency. 108 * are in modules and they insure RCU read coherency.
109 */ 109 */
110 preempt_disable(); 110 rcu_read_lock_sched();
111 ptype = mdata->ptype; 111 ptype = mdata->ptype;
112 if (likely(!ptype)) { 112 if (likely(!ptype)) {
113 marker_probe_func *func; 113 marker_probe_func *func;
@@ -145,7 +145,7 @@ void marker_probe_cb(const struct marker *mdata, void *call_private, ...)
145 va_end(args); 145 va_end(args);
146 } 146 }
147 } 147 }
148 preempt_enable(); 148 rcu_read_unlock_sched();
149} 149}
150EXPORT_SYMBOL_GPL(marker_probe_cb); 150EXPORT_SYMBOL_GPL(marker_probe_cb);
151 151
@@ -162,7 +162,7 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
162 va_list args; /* not initialized */ 162 va_list args; /* not initialized */
163 char ptype; 163 char ptype;
164 164
165 preempt_disable(); 165 rcu_read_lock_sched();
166 ptype = mdata->ptype; 166 ptype = mdata->ptype;
167 if (likely(!ptype)) { 167 if (likely(!ptype)) {
168 marker_probe_func *func; 168 marker_probe_func *func;
@@ -195,7 +195,7 @@ void marker_probe_cb_noarg(const struct marker *mdata, void *call_private, ...)
195 multi[i].func(multi[i].probe_private, call_private, 195 multi[i].func(multi[i].probe_private, call_private,
196 mdata->format, &args); 196 mdata->format, &args);
197 } 197 }
198 preempt_enable(); 198 rcu_read_unlock_sched();
199} 199}
200EXPORT_SYMBOL_GPL(marker_probe_cb_noarg); 200EXPORT_SYMBOL_GPL(marker_probe_cb_noarg);
201 201
@@ -560,7 +560,7 @@ static int set_marker(struct marker_entry **entry, struct marker *elem,
560 * Disable a marker and its probe callback. 560 * Disable a marker and its probe callback.
561 * Note: only waiting an RCU period after setting elem->call to the empty 561 * Note: only waiting an RCU period after setting elem->call to the empty
562 * function insures that the original callback is not used anymore. This insured 562 * function insures that the original callback is not used anymore. This insured
563 * by preempt_disable around the call site. 563 * by rcu_read_lock_sched around the call site.
564 */ 564 */
565static void disable_marker(struct marker *elem) 565static void disable_marker(struct marker *elem)
566{ 566{
@@ -653,11 +653,17 @@ int marker_probe_register(const char *name, const char *format,
653 entry = get_marker(name); 653 entry = get_marker(name);
654 if (!entry) { 654 if (!entry) {
655 entry = add_marker(name, format); 655 entry = add_marker(name, format);
656 if (IS_ERR(entry)) { 656 if (IS_ERR(entry))
657 ret = PTR_ERR(entry); 657 ret = PTR_ERR(entry);
658 goto end; 658 } else if (format) {
659 } 659 if (!entry->format)
660 ret = marker_set_format(&entry, format);
661 else if (strcmp(entry->format, format))
662 ret = -EPERM;
660 } 663 }
664 if (ret)
665 goto end;
666
661 /* 667 /*
662 * If we detect that a call_rcu is pending for this marker, 668 * If we detect that a call_rcu is pending for this marker,
663 * make sure it's executed now. 669 * make sure it's executed now.
@@ -674,6 +680,8 @@ int marker_probe_register(const char *name, const char *format,
674 mutex_lock(&markers_mutex); 680 mutex_lock(&markers_mutex);
675 entry = get_marker(name); 681 entry = get_marker(name);
676 WARN_ON(!entry); 682 WARN_ON(!entry);
683 if (entry->rcu_pending)
684 rcu_barrier_sched();
677 entry->oldptr = old; 685 entry->oldptr = old;
678 entry->rcu_pending = 1; 686 entry->rcu_pending = 1;
679 /* write rcu_pending before calling the RCU callback */ 687 /* write rcu_pending before calling the RCU callback */
@@ -717,6 +725,8 @@ int marker_probe_unregister(const char *name,
717 entry = get_marker(name); 725 entry = get_marker(name);
718 if (!entry) 726 if (!entry)
719 goto end; 727 goto end;
728 if (entry->rcu_pending)
729 rcu_barrier_sched();
720 entry->oldptr = old; 730 entry->oldptr = old;
721 entry->rcu_pending = 1; 731 entry->rcu_pending = 1;
722 /* write rcu_pending before calling the RCU callback */ 732 /* write rcu_pending before calling the RCU callback */
@@ -795,6 +805,8 @@ int marker_probe_unregister_private_data(marker_probe_func *probe,
795 mutex_lock(&markers_mutex); 805 mutex_lock(&markers_mutex);
796 entry = get_marker_from_private_data(probe, probe_private); 806 entry = get_marker_from_private_data(probe, probe_private);
797 WARN_ON(!entry); 807 WARN_ON(!entry);
808 if (entry->rcu_pending)
809 rcu_barrier_sched();
798 entry->oldptr = old; 810 entry->oldptr = old;
799 entry->rcu_pending = 1; 811 entry->rcu_pending = 1;
800 /* write rcu_pending before calling the RCU callback */ 812 /* write rcu_pending before calling the RCU callback */
diff --git a/kernel/module.c b/kernel/module.c
index 9db11911e04b..0d8d21ee792c 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -46,6 +46,8 @@
46#include <asm/cacheflush.h> 46#include <asm/cacheflush.h>
47#include <linux/license.h> 47#include <linux/license.h>
48#include <asm/sections.h> 48#include <asm/sections.h>
49#include <linux/tracepoint.h>
50#include <linux/ftrace.h>
49 51
50#if 0 52#if 0
51#define DEBUGP printk 53#define DEBUGP printk
@@ -100,7 +102,7 @@ static inline int strong_try_module_get(struct module *mod)
100static inline void add_taint_module(struct module *mod, unsigned flag) 102static inline void add_taint_module(struct module *mod, unsigned flag)
101{ 103{
102 add_taint(flag); 104 add_taint(flag);
103 mod->taints |= flag; 105 mod->taints |= (1U << flag);
104} 106}
105 107
106/* 108/*
@@ -784,6 +786,7 @@ sys_delete_module(const char __user *name_user, unsigned int flags)
784 mutex_lock(&module_mutex); 786 mutex_lock(&module_mutex);
785 /* Store the name of the last unloaded module for diagnostic purposes */ 787 /* Store the name of the last unloaded module for diagnostic purposes */
786 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module)); 788 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
789 unregister_dynamic_debug_module(mod->name);
787 free_module(mod); 790 free_module(mod);
788 791
789 out: 792 out:
@@ -923,7 +926,7 @@ static const char vermagic[] = VERMAGIC_STRING;
923static int try_to_force_load(struct module *mod, const char *symname) 926static int try_to_force_load(struct module *mod, const char *symname)
924{ 927{
925#ifdef CONFIG_MODULE_FORCE_LOAD 928#ifdef CONFIG_MODULE_FORCE_LOAD
926 if (!(tainted & TAINT_FORCED_MODULE)) 929 if (!test_taint(TAINT_FORCED_MODULE))
927 printk("%s: no version for \"%s\" found: kernel tainted.\n", 930 printk("%s: no version for \"%s\" found: kernel tainted.\n",
928 mod->name, symname); 931 mod->name, symname);
929 add_taint_module(mod, TAINT_FORCED_MODULE); 932 add_taint_module(mod, TAINT_FORCED_MODULE);
@@ -1033,7 +1036,7 @@ static unsigned long resolve_symbol(Elf_Shdr *sechdrs,
1033 const unsigned long *crc; 1036 const unsigned long *crc;
1034 1037
1035 ret = find_symbol(name, &owner, &crc, 1038 ret = find_symbol(name, &owner, &crc,
1036 !(mod->taints & TAINT_PROPRIETARY_MODULE), true); 1039 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1037 if (!IS_ERR_VALUE(ret)) { 1040 if (!IS_ERR_VALUE(ret)) {
1038 /* use_module can fail due to OOM, 1041 /* use_module can fail due to OOM,
1039 or module initialization or unloading */ 1042 or module initialization or unloading */
@@ -1173,7 +1176,7 @@ static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1173 while (i-- > 0) 1176 while (i-- > 0)
1174 sysfs_remove_bin_file(notes_attrs->dir, 1177 sysfs_remove_bin_file(notes_attrs->dir,
1175 &notes_attrs->attrs[i]); 1178 &notes_attrs->attrs[i]);
1176 kobject_del(notes_attrs->dir); 1179 kobject_put(notes_attrs->dir);
1177 } 1180 }
1178 kfree(notes_attrs); 1181 kfree(notes_attrs);
1179} 1182}
@@ -1429,6 +1432,9 @@ static void free_module(struct module *mod)
1429 /* Module unload stuff */ 1432 /* Module unload stuff */
1430 module_unload_free(mod); 1433 module_unload_free(mod);
1431 1434
1435 /* release any pointers to mcount in this module */
1436 ftrace_release(mod->module_core, mod->core_size);
1437
1432 /* This may be NULL, but that's OK */ 1438 /* This may be NULL, but that's OK */
1433 module_free(mod, mod->module_init); 1439 module_free(mod, mod->module_init);
1434 kfree(mod->args); 1440 kfree(mod->args);
@@ -1634,7 +1640,7 @@ static void set_license(struct module *mod, const char *license)
1634 license = "unspecified"; 1640 license = "unspecified";
1635 1641
1636 if (!license_is_gpl_compatible(license)) { 1642 if (!license_is_gpl_compatible(license)) {
1637 if (!(tainted & TAINT_PROPRIETARY_MODULE)) 1643 if (!test_taint(TAINT_PROPRIETARY_MODULE))
1638 printk(KERN_WARNING "%s: module license '%s' taints " 1644 printk(KERN_WARNING "%s: module license '%s' taints "
1639 "kernel.\n", mod->name, license); 1645 "kernel.\n", mod->name, license);
1640 add_taint_module(mod, TAINT_PROPRIETARY_MODULE); 1646 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
@@ -1783,6 +1789,33 @@ static inline void add_kallsyms(struct module *mod,
1783} 1789}
1784#endif /* CONFIG_KALLSYMS */ 1790#endif /* CONFIG_KALLSYMS */
1785 1791
1792#ifdef CONFIG_DYNAMIC_PRINTK_DEBUG
1793static void dynamic_printk_setup(Elf_Shdr *sechdrs, unsigned int verboseindex)
1794{
1795 struct mod_debug *debug_info;
1796 unsigned long pos, end;
1797 unsigned int num_verbose;
1798
1799 pos = sechdrs[verboseindex].sh_addr;
1800 num_verbose = sechdrs[verboseindex].sh_size /
1801 sizeof(struct mod_debug);
1802 end = pos + (num_verbose * sizeof(struct mod_debug));
1803
1804 for (; pos < end; pos += sizeof(struct mod_debug)) {
1805 debug_info = (struct mod_debug *)pos;
1806 register_dynamic_debug_module(debug_info->modname,
1807 debug_info->type, debug_info->logical_modname,
1808 debug_info->flag_names, debug_info->hash,
1809 debug_info->hash2);
1810 }
1811}
1812#else
1813static inline void dynamic_printk_setup(Elf_Shdr *sechdrs,
1814 unsigned int verboseindex)
1815{
1816}
1817#endif /* CONFIG_DYNAMIC_PRINTK_DEBUG */
1818
1786static void *module_alloc_update_bounds(unsigned long size) 1819static void *module_alloc_update_bounds(unsigned long size)
1787{ 1820{
1788 void *ret = module_alloc(size); 1821 void *ret = module_alloc(size);
@@ -1806,6 +1839,7 @@ static noinline struct module *load_module(void __user *umod,
1806 Elf_Ehdr *hdr; 1839 Elf_Ehdr *hdr;
1807 Elf_Shdr *sechdrs; 1840 Elf_Shdr *sechdrs;
1808 char *secstrings, *args, *modmagic, *strtab = NULL; 1841 char *secstrings, *args, *modmagic, *strtab = NULL;
1842 char *staging;
1809 unsigned int i; 1843 unsigned int i;
1810 unsigned int symindex = 0; 1844 unsigned int symindex = 0;
1811 unsigned int strindex = 0; 1845 unsigned int strindex = 0;
@@ -1831,9 +1865,14 @@ static noinline struct module *load_module(void __user *umod,
1831#endif 1865#endif
1832 unsigned int markersindex; 1866 unsigned int markersindex;
1833 unsigned int markersstringsindex; 1867 unsigned int markersstringsindex;
1868 unsigned int verboseindex;
1869 unsigned int tracepointsindex;
1870 unsigned int tracepointsstringsindex;
1871 unsigned int mcountindex;
1834 struct module *mod; 1872 struct module *mod;
1835 long err = 0; 1873 long err = 0;
1836 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */ 1874 void *percpu = NULL, *ptr = NULL; /* Stops spurious gcc warning */
1875 void *mseg;
1837 struct exception_table_entry *extable; 1876 struct exception_table_entry *extable;
1838 mm_segment_t old_fs; 1877 mm_segment_t old_fs;
1839 1878
@@ -1960,6 +1999,14 @@ static noinline struct module *load_module(void __user *umod,
1960 goto free_hdr; 1999 goto free_hdr;
1961 } 2000 }
1962 2001
2002 staging = get_modinfo(sechdrs, infoindex, "staging");
2003 if (staging) {
2004 add_taint_module(mod, TAINT_CRAP);
2005 printk(KERN_WARNING "%s: module is from the staging directory,"
2006 " the quality is unknown, you have been warned.\n",
2007 mod->name);
2008 }
2009
1963 /* Now copy in args */ 2010 /* Now copy in args */
1964 args = strndup_user(uargs, ~0UL >> 1); 2011 args = strndup_user(uargs, ~0UL >> 1);
1965 if (IS_ERR(args)) { 2012 if (IS_ERR(args)) {
@@ -2117,6 +2164,13 @@ static noinline struct module *load_module(void __user *umod,
2117 markersindex = find_sec(hdr, sechdrs, secstrings, "__markers"); 2164 markersindex = find_sec(hdr, sechdrs, secstrings, "__markers");
2118 markersstringsindex = find_sec(hdr, sechdrs, secstrings, 2165 markersstringsindex = find_sec(hdr, sechdrs, secstrings,
2119 "__markers_strings"); 2166 "__markers_strings");
2167 verboseindex = find_sec(hdr, sechdrs, secstrings, "__verbose");
2168 tracepointsindex = find_sec(hdr, sechdrs, secstrings, "__tracepoints");
2169 tracepointsstringsindex = find_sec(hdr, sechdrs, secstrings,
2170 "__tracepoints_strings");
2171
2172 mcountindex = find_sec(hdr, sechdrs, secstrings,
2173 "__mcount_loc");
2120 2174
2121 /* Now do relocations. */ 2175 /* Now do relocations. */
2122 for (i = 1; i < hdr->e_shnum; i++) { 2176 for (i = 1; i < hdr->e_shnum; i++) {
@@ -2144,6 +2198,12 @@ static noinline struct module *load_module(void __user *umod,
2144 mod->num_markers = 2198 mod->num_markers =
2145 sechdrs[markersindex].sh_size / sizeof(*mod->markers); 2199 sechdrs[markersindex].sh_size / sizeof(*mod->markers);
2146#endif 2200#endif
2201#ifdef CONFIG_TRACEPOINTS
2202 mod->tracepoints = (void *)sechdrs[tracepointsindex].sh_addr;
2203 mod->num_tracepoints =
2204 sechdrs[tracepointsindex].sh_size / sizeof(*mod->tracepoints);
2205#endif
2206
2147 2207
2148 /* Find duplicate symbols */ 2208 /* Find duplicate symbols */
2149 err = verify_export_symbols(mod); 2209 err = verify_export_symbols(mod);
@@ -2162,11 +2222,22 @@ static noinline struct module *load_module(void __user *umod,
2162 2222
2163 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings); 2223 add_kallsyms(mod, sechdrs, symindex, strindex, secstrings);
2164 2224
2225 if (!mod->taints) {
2165#ifdef CONFIG_MARKERS 2226#ifdef CONFIG_MARKERS
2166 if (!mod->taints)
2167 marker_update_probe_range(mod->markers, 2227 marker_update_probe_range(mod->markers,
2168 mod->markers + mod->num_markers); 2228 mod->markers + mod->num_markers);
2169#endif 2229#endif
2230 dynamic_printk_setup(sechdrs, verboseindex);
2231#ifdef CONFIG_TRACEPOINTS
2232 tracepoint_update_probe_range(mod->tracepoints,
2233 mod->tracepoints + mod->num_tracepoints);
2234#endif
2235 }
2236
2237 /* sechdrs[0].sh_size is always zero */
2238 mseg = (void *)sechdrs[mcountindex].sh_addr;
2239 ftrace_init_module(mseg, mseg + sechdrs[mcountindex].sh_size);
2240
2170 err = module_finalize(hdr, sechdrs, mod); 2241 err = module_finalize(hdr, sechdrs, mod);
2171 if (err < 0) 2242 if (err < 0)
2172 goto cleanup; 2243 goto cleanup;
@@ -2236,6 +2307,7 @@ static noinline struct module *load_module(void __user *umod,
2236 cleanup: 2307 cleanup:
2237 kobject_del(&mod->mkobj.kobj); 2308 kobject_del(&mod->mkobj.kobj);
2238 kobject_put(&mod->mkobj.kobj); 2309 kobject_put(&mod->mkobj.kobj);
2310 ftrace_release(mod->module_core, mod->core_size);
2239 free_unload: 2311 free_unload:
2240 module_unload_free(mod); 2312 module_unload_free(mod);
2241 module_free(mod, mod->module_init); 2313 module_free(mod, mod->module_init);
@@ -2552,10 +2624,12 @@ static char *module_flags(struct module *mod, char *buf)
2552 mod->state == MODULE_STATE_GOING || 2624 mod->state == MODULE_STATE_GOING ||
2553 mod->state == MODULE_STATE_COMING) { 2625 mod->state == MODULE_STATE_COMING) {
2554 buf[bx++] = '('; 2626 buf[bx++] = '(';
2555 if (mod->taints & TAINT_PROPRIETARY_MODULE) 2627 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
2556 buf[bx++] = 'P'; 2628 buf[bx++] = 'P';
2557 if (mod->taints & TAINT_FORCED_MODULE) 2629 if (mod->taints & (1 << TAINT_FORCED_MODULE))
2558 buf[bx++] = 'F'; 2630 buf[bx++] = 'F';
2631 if (mod->taints & (1 << TAINT_CRAP))
2632 buf[bx++] = 'C';
2559 /* 2633 /*
2560 * TAINT_FORCED_RMMOD: could be added. 2634 * TAINT_FORCED_RMMOD: could be added.
2561 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't 2635 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
@@ -2717,3 +2791,50 @@ void module_update_markers(void)
2717 mutex_unlock(&module_mutex); 2791 mutex_unlock(&module_mutex);
2718} 2792}
2719#endif 2793#endif
2794
2795#ifdef CONFIG_TRACEPOINTS
2796void module_update_tracepoints(void)
2797{
2798 struct module *mod;
2799
2800 mutex_lock(&module_mutex);
2801 list_for_each_entry(mod, &modules, list)
2802 if (!mod->taints)
2803 tracepoint_update_probe_range(mod->tracepoints,
2804 mod->tracepoints + mod->num_tracepoints);
2805 mutex_unlock(&module_mutex);
2806}
2807
2808/*
2809 * Returns 0 if current not found.
2810 * Returns 1 if current found.
2811 */
2812int module_get_iter_tracepoints(struct tracepoint_iter *iter)
2813{
2814 struct module *iter_mod;
2815 int found = 0;
2816
2817 mutex_lock(&module_mutex);
2818 list_for_each_entry(iter_mod, &modules, list) {
2819 if (!iter_mod->taints) {
2820 /*
2821 * Sorted module list
2822 */
2823 if (iter_mod < iter->module)
2824 continue;
2825 else if (iter_mod > iter->module)
2826 iter->tracepoint = NULL;
2827 found = tracepoint_get_iter_range(&iter->tracepoint,
2828 iter_mod->tracepoints,
2829 iter_mod->tracepoints
2830 + iter_mod->num_tracepoints);
2831 if (found) {
2832 iter->module = iter_mod;
2833 break;
2834 }
2835 }
2836 }
2837 mutex_unlock(&module_mutex);
2838 return found;
2839}
2840#endif
diff --git a/kernel/notifier.c b/kernel/notifier.c
index 823be11584ef..4282c0a40a57 100644
--- a/kernel/notifier.c
+++ b/kernel/notifier.c
@@ -550,7 +550,7 @@ EXPORT_SYMBOL(unregister_reboot_notifier);
550 550
551static ATOMIC_NOTIFIER_HEAD(die_chain); 551static ATOMIC_NOTIFIER_HEAD(die_chain);
552 552
553int notify_die(enum die_val val, const char *str, 553int notrace notify_die(enum die_val val, const char *str,
554 struct pt_regs *regs, long err, int trap, int sig) 554 struct pt_regs *regs, long err, int trap, int sig)
555{ 555{
556 struct die_args args = { 556 struct die_args args = {
diff --git a/kernel/panic.c b/kernel/panic.c
index 12c5a0a6c89b..bda561ef3cdf 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -23,7 +23,7 @@
23#include <linux/kallsyms.h> 23#include <linux/kallsyms.h>
24 24
25int panic_on_oops; 25int panic_on_oops;
26int tainted; 26static unsigned long tainted_mask;
27static int pause_on_oops; 27static int pause_on_oops;
28static int pause_on_oops_flag; 28static int pause_on_oops_flag;
29static DEFINE_SPINLOCK(pause_on_oops_lock); 29static DEFINE_SPINLOCK(pause_on_oops_lock);
@@ -143,6 +143,27 @@ NORET_TYPE void panic(const char * fmt, ...)
143 143
144EXPORT_SYMBOL(panic); 144EXPORT_SYMBOL(panic);
145 145
146
147struct tnt {
148 u8 bit;
149 char true;
150 char false;
151};
152
153static const struct tnt tnts[] = {
154 { TAINT_PROPRIETARY_MODULE, 'P', 'G' },
155 { TAINT_FORCED_MODULE, 'F', ' ' },
156 { TAINT_UNSAFE_SMP, 'S', ' ' },
157 { TAINT_FORCED_RMMOD, 'R', ' ' },
158 { TAINT_MACHINE_CHECK, 'M', ' ' },
159 { TAINT_BAD_PAGE, 'B', ' ' },
160 { TAINT_USER, 'U', ' ' },
161 { TAINT_DIE, 'D', ' ' },
162 { TAINT_OVERRIDDEN_ACPI_TABLE, 'A', ' ' },
163 { TAINT_WARN, 'W', ' ' },
164 { TAINT_CRAP, 'C', ' ' },
165};
166
146/** 167/**
147 * print_tainted - return a string to represent the kernel taint state. 168 * print_tainted - return a string to represent the kernel taint state.
148 * 169 *
@@ -155,35 +176,45 @@ EXPORT_SYMBOL(panic);
155 * 'U' - Userspace-defined naughtiness. 176 * 'U' - Userspace-defined naughtiness.
156 * 'A' - ACPI table overridden. 177 * 'A' - ACPI table overridden.
157 * 'W' - Taint on warning. 178 * 'W' - Taint on warning.
179 * 'C' - modules from drivers/staging are loaded.
158 * 180 *
159 * The string is overwritten by the next call to print_taint(). 181 * The string is overwritten by the next call to print_taint().
160 */ 182 */
161
162const char *print_tainted(void) 183const char *print_tainted(void)
163{ 184{
164 static char buf[20]; 185 static char buf[ARRAY_SIZE(tnts) + sizeof("Tainted: ") + 1];
165 if (tainted) { 186
166 snprintf(buf, sizeof(buf), "Tainted: %c%c%c%c%c%c%c%c%c%c", 187 if (tainted_mask) {
167 tainted & TAINT_PROPRIETARY_MODULE ? 'P' : 'G', 188 char *s;
168 tainted & TAINT_FORCED_MODULE ? 'F' : ' ', 189 int i;
169 tainted & TAINT_UNSAFE_SMP ? 'S' : ' ', 190
170 tainted & TAINT_FORCED_RMMOD ? 'R' : ' ', 191 s = buf + sprintf(buf, "Tainted: ");
171 tainted & TAINT_MACHINE_CHECK ? 'M' : ' ', 192 for (i = 0; i < ARRAY_SIZE(tnts); i++) {
172 tainted & TAINT_BAD_PAGE ? 'B' : ' ', 193 const struct tnt *t = &tnts[i];
173 tainted & TAINT_USER ? 'U' : ' ', 194 *s++ = test_bit(t->bit, &tainted_mask) ?
174 tainted & TAINT_DIE ? 'D' : ' ', 195 t->true : t->false;
175 tainted & TAINT_OVERRIDDEN_ACPI_TABLE ? 'A' : ' ', 196 }
176 tainted & TAINT_WARN ? 'W' : ' '); 197 *s = 0;
177 } 198 } else
178 else
179 snprintf(buf, sizeof(buf), "Not tainted"); 199 snprintf(buf, sizeof(buf), "Not tainted");
180 return(buf); 200 return(buf);
181} 201}
182 202
203int test_taint(unsigned flag)
204{
205 return test_bit(flag, &tainted_mask);
206}
207EXPORT_SYMBOL(test_taint);
208
209unsigned long get_taint(void)
210{
211 return tainted_mask;
212}
213
183void add_taint(unsigned flag) 214void add_taint(unsigned flag)
184{ 215{
185 debug_locks = 0; /* can't trust the integrity of the kernel anymore */ 216 debug_locks = 0; /* can't trust the integrity of the kernel anymore */
186 tainted |= flag; 217 set_bit(flag, &tainted_mask);
187} 218}
188EXPORT_SYMBOL(add_taint); 219EXPORT_SYMBOL(add_taint);
189 220
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index c42a03aef36f..153dcb2639c3 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -7,6 +7,93 @@
7#include <linux/errno.h> 7#include <linux/errno.h>
8#include <linux/math64.h> 8#include <linux/math64.h>
9#include <asm/uaccess.h> 9#include <asm/uaccess.h>
10#include <linux/kernel_stat.h>
11
12/*
13 * Allocate the thread_group_cputime structure appropriately and fill in the
14 * current values of the fields. Called from copy_signal() via
15 * thread_group_cputime_clone_thread() when adding a second or subsequent
16 * thread to a thread group. Assumes interrupts are enabled when called.
17 */
18int thread_group_cputime_alloc(struct task_struct *tsk)
19{
20 struct signal_struct *sig = tsk->signal;
21 struct task_cputime *cputime;
22
23 /*
24 * If we have multiple threads and we don't already have a
25 * per-CPU task_cputime struct (checked in the caller), allocate
26 * one and fill it in with the times accumulated so far. We may
27 * race with another thread so recheck after we pick up the sighand
28 * lock.
29 */
30 cputime = alloc_percpu(struct task_cputime);
31 if (cputime == NULL)
32 return -ENOMEM;
33 spin_lock_irq(&tsk->sighand->siglock);
34 if (sig->cputime.totals) {
35 spin_unlock_irq(&tsk->sighand->siglock);
36 free_percpu(cputime);
37 return 0;
38 }
39 sig->cputime.totals = cputime;
40 cputime = per_cpu_ptr(sig->cputime.totals, smp_processor_id());
41 cputime->utime = tsk->utime;
42 cputime->stime = tsk->stime;
43 cputime->sum_exec_runtime = tsk->se.sum_exec_runtime;
44 spin_unlock_irq(&tsk->sighand->siglock);
45 return 0;
46}
47
48/**
49 * thread_group_cputime - Sum the thread group time fields across all CPUs.
50 *
51 * @tsk: The task we use to identify the thread group.
52 * @times: task_cputime structure in which we return the summed fields.
53 *
54 * Walk the list of CPUs to sum the per-CPU time fields in the thread group
55 * time structure.
56 */
57void thread_group_cputime(
58 struct task_struct *tsk,
59 struct task_cputime *times)
60{
61 struct signal_struct *sig;
62 int i;
63 struct task_cputime *tot;
64
65 sig = tsk->signal;
66 if (unlikely(!sig) || !sig->cputime.totals) {
67 times->utime = tsk->utime;
68 times->stime = tsk->stime;
69 times->sum_exec_runtime = tsk->se.sum_exec_runtime;
70 return;
71 }
72 times->stime = times->utime = cputime_zero;
73 times->sum_exec_runtime = 0;
74 for_each_possible_cpu(i) {
75 tot = per_cpu_ptr(tsk->signal->cputime.totals, i);
76 times->utime = cputime_add(times->utime, tot->utime);
77 times->stime = cputime_add(times->stime, tot->stime);
78 times->sum_exec_runtime += tot->sum_exec_runtime;
79 }
80}
81
82/*
83 * Called after updating RLIMIT_CPU to set timer expiration if necessary.
84 */
85void update_rlimit_cpu(unsigned long rlim_new)
86{
87 cputime_t cputime;
88
89 cputime = secs_to_cputime(rlim_new);
90 if (cputime_eq(current->signal->it_prof_expires, cputime_zero) ||
91 cputime_lt(current->signal->it_prof_expires, cputime)) {
92 spin_lock_irq(&current->sighand->siglock);
93 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
94 spin_unlock_irq(&current->sighand->siglock);
95 }
96}
10 97
11static int check_clock(const clockid_t which_clock) 98static int check_clock(const clockid_t which_clock)
12{ 99{
@@ -158,10 +245,6 @@ static inline cputime_t virt_ticks(struct task_struct *p)
158{ 245{
159 return p->utime; 246 return p->utime;
160} 247}
161static inline unsigned long long sched_ns(struct task_struct *p)
162{
163 return task_sched_runtime(p);
164}
165 248
166int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp) 249int posix_cpu_clock_getres(const clockid_t which_clock, struct timespec *tp)
167{ 250{
@@ -211,7 +294,7 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
211 cpu->cpu = virt_ticks(p); 294 cpu->cpu = virt_ticks(p);
212 break; 295 break;
213 case CPUCLOCK_SCHED: 296 case CPUCLOCK_SCHED:
214 cpu->sched = sched_ns(p); 297 cpu->sched = p->se.sum_exec_runtime + task_delta_exec(p);
215 break; 298 break;
216 } 299 }
217 return 0; 300 return 0;
@@ -220,59 +303,30 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
220/* 303/*
221 * Sample a process (thread group) clock for the given group_leader task. 304 * Sample a process (thread group) clock for the given group_leader task.
222 * Must be called with tasklist_lock held for reading. 305 * Must be called with tasklist_lock held for reading.
223 * Must be called with tasklist_lock held for reading, and p->sighand->siglock.
224 */ 306 */
225static int cpu_clock_sample_group_locked(unsigned int clock_idx, 307static int cpu_clock_sample_group(const clockid_t which_clock,
226 struct task_struct *p, 308 struct task_struct *p,
227 union cpu_time_count *cpu) 309 union cpu_time_count *cpu)
228{ 310{
229 struct task_struct *t = p; 311 struct task_cputime cputime;
230 switch (clock_idx) { 312
313 thread_group_cputime(p, &cputime);
314 switch (which_clock) {
231 default: 315 default:
232 return -EINVAL; 316 return -EINVAL;
233 case CPUCLOCK_PROF: 317 case CPUCLOCK_PROF:
234 cpu->cpu = cputime_add(p->signal->utime, p->signal->stime); 318 cpu->cpu = cputime_add(cputime.utime, cputime.stime);
235 do {
236 cpu->cpu = cputime_add(cpu->cpu, prof_ticks(t));
237 t = next_thread(t);
238 } while (t != p);
239 break; 319 break;
240 case CPUCLOCK_VIRT: 320 case CPUCLOCK_VIRT:
241 cpu->cpu = p->signal->utime; 321 cpu->cpu = cputime.utime;
242 do {
243 cpu->cpu = cputime_add(cpu->cpu, virt_ticks(t));
244 t = next_thread(t);
245 } while (t != p);
246 break; 322 break;
247 case CPUCLOCK_SCHED: 323 case CPUCLOCK_SCHED:
248 cpu->sched = p->signal->sum_sched_runtime; 324 cpu->sched = cputime.sum_exec_runtime + task_delta_exec(p);
249 /* Add in each other live thread. */
250 while ((t = next_thread(t)) != p) {
251 cpu->sched += t->se.sum_exec_runtime;
252 }
253 cpu->sched += sched_ns(p);
254 break; 325 break;
255 } 326 }
256 return 0; 327 return 0;
257} 328}
258 329
259/*
260 * Sample a process (thread group) clock for the given group_leader task.
261 * Must be called with tasklist_lock held for reading.
262 */
263static int cpu_clock_sample_group(const clockid_t which_clock,
264 struct task_struct *p,
265 union cpu_time_count *cpu)
266{
267 int ret;
268 unsigned long flags;
269 spin_lock_irqsave(&p->sighand->siglock, flags);
270 ret = cpu_clock_sample_group_locked(CPUCLOCK_WHICH(which_clock), p,
271 cpu);
272 spin_unlock_irqrestore(&p->sighand->siglock, flags);
273 return ret;
274}
275
276 330
277int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp) 331int posix_cpu_clock_get(const clockid_t which_clock, struct timespec *tp)
278{ 332{
@@ -471,80 +525,11 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
471} 525}
472void posix_cpu_timers_exit_group(struct task_struct *tsk) 526void posix_cpu_timers_exit_group(struct task_struct *tsk)
473{ 527{
474 cleanup_timers(tsk->signal->cpu_timers, 528 struct task_cputime cputime;
475 cputime_add(tsk->utime, tsk->signal->utime),
476 cputime_add(tsk->stime, tsk->signal->stime),
477 tsk->se.sum_exec_runtime + tsk->signal->sum_sched_runtime);
478}
479 529
480 530 thread_group_cputime(tsk, &cputime);
481/* 531 cleanup_timers(tsk->signal->cpu_timers,
482 * Set the expiry times of all the threads in the process so one of them 532 cputime.utime, cputime.stime, cputime.sum_exec_runtime);
483 * will go off before the process cumulative expiry total is reached.
484 */
485static void process_timer_rebalance(struct task_struct *p,
486 unsigned int clock_idx,
487 union cpu_time_count expires,
488 union cpu_time_count val)
489{
490 cputime_t ticks, left;
491 unsigned long long ns, nsleft;
492 struct task_struct *t = p;
493 unsigned int nthreads = atomic_read(&p->signal->live);
494
495 if (!nthreads)
496 return;
497
498 switch (clock_idx) {
499 default:
500 BUG();
501 break;
502 case CPUCLOCK_PROF:
503 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
504 nthreads);
505 do {
506 if (likely(!(t->flags & PF_EXITING))) {
507 ticks = cputime_add(prof_ticks(t), left);
508 if (cputime_eq(t->it_prof_expires,
509 cputime_zero) ||
510 cputime_gt(t->it_prof_expires, ticks)) {
511 t->it_prof_expires = ticks;
512 }
513 }
514 t = next_thread(t);
515 } while (t != p);
516 break;
517 case CPUCLOCK_VIRT:
518 left = cputime_div_non_zero(cputime_sub(expires.cpu, val.cpu),
519 nthreads);
520 do {
521 if (likely(!(t->flags & PF_EXITING))) {
522 ticks = cputime_add(virt_ticks(t), left);
523 if (cputime_eq(t->it_virt_expires,
524 cputime_zero) ||
525 cputime_gt(t->it_virt_expires, ticks)) {
526 t->it_virt_expires = ticks;
527 }
528 }
529 t = next_thread(t);
530 } while (t != p);
531 break;
532 case CPUCLOCK_SCHED:
533 nsleft = expires.sched - val.sched;
534 do_div(nsleft, nthreads);
535 nsleft = max_t(unsigned long long, nsleft, 1);
536 do {
537 if (likely(!(t->flags & PF_EXITING))) {
538 ns = t->se.sum_exec_runtime + nsleft;
539 if (t->it_sched_expires == 0 ||
540 t->it_sched_expires > ns) {
541 t->it_sched_expires = ns;
542 }
543 }
544 t = next_thread(t);
545 } while (t != p);
546 break;
547 }
548} 533}
549 534
550static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) 535static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
@@ -608,29 +593,32 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
608 default: 593 default:
609 BUG(); 594 BUG();
610 case CPUCLOCK_PROF: 595 case CPUCLOCK_PROF:
611 if (cputime_eq(p->it_prof_expires, 596 if (cputime_eq(p->cputime_expires.prof_exp,
612 cputime_zero) || 597 cputime_zero) ||
613 cputime_gt(p->it_prof_expires, 598 cputime_gt(p->cputime_expires.prof_exp,
614 nt->expires.cpu)) 599 nt->expires.cpu))
615 p->it_prof_expires = nt->expires.cpu; 600 p->cputime_expires.prof_exp =
601 nt->expires.cpu;
616 break; 602 break;
617 case CPUCLOCK_VIRT: 603 case CPUCLOCK_VIRT:
618 if (cputime_eq(p->it_virt_expires, 604 if (cputime_eq(p->cputime_expires.virt_exp,
619 cputime_zero) || 605 cputime_zero) ||
620 cputime_gt(p->it_virt_expires, 606 cputime_gt(p->cputime_expires.virt_exp,
621 nt->expires.cpu)) 607 nt->expires.cpu))
622 p->it_virt_expires = nt->expires.cpu; 608 p->cputime_expires.virt_exp =
609 nt->expires.cpu;
623 break; 610 break;
624 case CPUCLOCK_SCHED: 611 case CPUCLOCK_SCHED:
625 if (p->it_sched_expires == 0 || 612 if (p->cputime_expires.sched_exp == 0 ||
626 p->it_sched_expires > nt->expires.sched) 613 p->cputime_expires.sched_exp >
627 p->it_sched_expires = nt->expires.sched; 614 nt->expires.sched)
615 p->cputime_expires.sched_exp =
616 nt->expires.sched;
628 break; 617 break;
629 } 618 }
630 } else { 619 } else {
631 /* 620 /*
632 * For a process timer, we must balance 621 * For a process timer, set the cached expiration time.
633 * all the live threads' expirations.
634 */ 622 */
635 switch (CPUCLOCK_WHICH(timer->it_clock)) { 623 switch (CPUCLOCK_WHICH(timer->it_clock)) {
636 default: 624 default:
@@ -641,7 +629,9 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
641 cputime_lt(p->signal->it_virt_expires, 629 cputime_lt(p->signal->it_virt_expires,
642 timer->it.cpu.expires.cpu)) 630 timer->it.cpu.expires.cpu))
643 break; 631 break;
644 goto rebalance; 632 p->signal->cputime_expires.virt_exp =
633 timer->it.cpu.expires.cpu;
634 break;
645 case CPUCLOCK_PROF: 635 case CPUCLOCK_PROF:
646 if (!cputime_eq(p->signal->it_prof_expires, 636 if (!cputime_eq(p->signal->it_prof_expires,
647 cputime_zero) && 637 cputime_zero) &&
@@ -652,13 +642,12 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
652 if (i != RLIM_INFINITY && 642 if (i != RLIM_INFINITY &&
653 i <= cputime_to_secs(timer->it.cpu.expires.cpu)) 643 i <= cputime_to_secs(timer->it.cpu.expires.cpu))
654 break; 644 break;
655 goto rebalance; 645 p->signal->cputime_expires.prof_exp =
646 timer->it.cpu.expires.cpu;
647 break;
656 case CPUCLOCK_SCHED: 648 case CPUCLOCK_SCHED:
657 rebalance: 649 p->signal->cputime_expires.sched_exp =
658 process_timer_rebalance( 650 timer->it.cpu.expires.sched;
659 timer->it.cpu.task,
660 CPUCLOCK_WHICH(timer->it_clock),
661 timer->it.cpu.expires, now);
662 break; 651 break;
663 } 652 }
664 } 653 }
@@ -969,13 +958,13 @@ static void check_thread_timers(struct task_struct *tsk,
969 struct signal_struct *const sig = tsk->signal; 958 struct signal_struct *const sig = tsk->signal;
970 959
971 maxfire = 20; 960 maxfire = 20;
972 tsk->it_prof_expires = cputime_zero; 961 tsk->cputime_expires.prof_exp = cputime_zero;
973 while (!list_empty(timers)) { 962 while (!list_empty(timers)) {
974 struct cpu_timer_list *t = list_first_entry(timers, 963 struct cpu_timer_list *t = list_first_entry(timers,
975 struct cpu_timer_list, 964 struct cpu_timer_list,
976 entry); 965 entry);
977 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) { 966 if (!--maxfire || cputime_lt(prof_ticks(tsk), t->expires.cpu)) {
978 tsk->it_prof_expires = t->expires.cpu; 967 tsk->cputime_expires.prof_exp = t->expires.cpu;
979 break; 968 break;
980 } 969 }
981 t->firing = 1; 970 t->firing = 1;
@@ -984,13 +973,13 @@ static void check_thread_timers(struct task_struct *tsk,
984 973
985 ++timers; 974 ++timers;
986 maxfire = 20; 975 maxfire = 20;
987 tsk->it_virt_expires = cputime_zero; 976 tsk->cputime_expires.virt_exp = cputime_zero;
988 while (!list_empty(timers)) { 977 while (!list_empty(timers)) {
989 struct cpu_timer_list *t = list_first_entry(timers, 978 struct cpu_timer_list *t = list_first_entry(timers,
990 struct cpu_timer_list, 979 struct cpu_timer_list,
991 entry); 980 entry);
992 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) { 981 if (!--maxfire || cputime_lt(virt_ticks(tsk), t->expires.cpu)) {
993 tsk->it_virt_expires = t->expires.cpu; 982 tsk->cputime_expires.virt_exp = t->expires.cpu;
994 break; 983 break;
995 } 984 }
996 t->firing = 1; 985 t->firing = 1;
@@ -999,13 +988,13 @@ static void check_thread_timers(struct task_struct *tsk,
999 988
1000 ++timers; 989 ++timers;
1001 maxfire = 20; 990 maxfire = 20;
1002 tsk->it_sched_expires = 0; 991 tsk->cputime_expires.sched_exp = 0;
1003 while (!list_empty(timers)) { 992 while (!list_empty(timers)) {
1004 struct cpu_timer_list *t = list_first_entry(timers, 993 struct cpu_timer_list *t = list_first_entry(timers,
1005 struct cpu_timer_list, 994 struct cpu_timer_list,
1006 entry); 995 entry);
1007 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) { 996 if (!--maxfire || tsk->se.sum_exec_runtime < t->expires.sched) {
1008 tsk->it_sched_expires = t->expires.sched; 997 tsk->cputime_expires.sched_exp = t->expires.sched;
1009 break; 998 break;
1010 } 999 }
1011 t->firing = 1; 1000 t->firing = 1;
@@ -1055,10 +1044,10 @@ static void check_process_timers(struct task_struct *tsk,
1055{ 1044{
1056 int maxfire; 1045 int maxfire;
1057 struct signal_struct *const sig = tsk->signal; 1046 struct signal_struct *const sig = tsk->signal;
1058 cputime_t utime, stime, ptime, virt_expires, prof_expires; 1047 cputime_t utime, ptime, virt_expires, prof_expires;
1059 unsigned long long sum_sched_runtime, sched_expires; 1048 unsigned long long sum_sched_runtime, sched_expires;
1060 struct task_struct *t;
1061 struct list_head *timers = sig->cpu_timers; 1049 struct list_head *timers = sig->cpu_timers;
1050 struct task_cputime cputime;
1062 1051
1063 /* 1052 /*
1064 * Don't sample the current process CPU clocks if there are no timers. 1053 * Don't sample the current process CPU clocks if there are no timers.
@@ -1074,18 +1063,10 @@ static void check_process_timers(struct task_struct *tsk,
1074 /* 1063 /*
1075 * Collect the current process totals. 1064 * Collect the current process totals.
1076 */ 1065 */
1077 utime = sig->utime; 1066 thread_group_cputime(tsk, &cputime);
1078 stime = sig->stime; 1067 utime = cputime.utime;
1079 sum_sched_runtime = sig->sum_sched_runtime; 1068 ptime = cputime_add(utime, cputime.stime);
1080 t = tsk; 1069 sum_sched_runtime = cputime.sum_exec_runtime;
1081 do {
1082 utime = cputime_add(utime, t->utime);
1083 stime = cputime_add(stime, t->stime);
1084 sum_sched_runtime += t->se.sum_exec_runtime;
1085 t = next_thread(t);
1086 } while (t != tsk);
1087 ptime = cputime_add(utime, stime);
1088
1089 maxfire = 20; 1070 maxfire = 20;
1090 prof_expires = cputime_zero; 1071 prof_expires = cputime_zero;
1091 while (!list_empty(timers)) { 1072 while (!list_empty(timers)) {
@@ -1193,60 +1174,18 @@ static void check_process_timers(struct task_struct *tsk,
1193 } 1174 }
1194 } 1175 }
1195 1176
1196 if (!cputime_eq(prof_expires, cputime_zero) || 1177 if (!cputime_eq(prof_expires, cputime_zero) &&
1197 !cputime_eq(virt_expires, cputime_zero) || 1178 (cputime_eq(sig->cputime_expires.prof_exp, cputime_zero) ||
1198 sched_expires != 0) { 1179 cputime_gt(sig->cputime_expires.prof_exp, prof_expires)))
1199 /* 1180 sig->cputime_expires.prof_exp = prof_expires;
1200 * Rebalance the threads' expiry times for the remaining 1181 if (!cputime_eq(virt_expires, cputime_zero) &&
1201 * process CPU timers. 1182 (cputime_eq(sig->cputime_expires.virt_exp, cputime_zero) ||
1202 */ 1183 cputime_gt(sig->cputime_expires.virt_exp, virt_expires)))
1203 1184 sig->cputime_expires.virt_exp = virt_expires;
1204 cputime_t prof_left, virt_left, ticks; 1185 if (sched_expires != 0 &&
1205 unsigned long long sched_left, sched; 1186 (sig->cputime_expires.sched_exp == 0 ||
1206 const unsigned int nthreads = atomic_read(&sig->live); 1187 sig->cputime_expires.sched_exp > sched_expires))
1207 1188 sig->cputime_expires.sched_exp = sched_expires;
1208 if (!nthreads)
1209 return;
1210
1211 prof_left = cputime_sub(prof_expires, utime);
1212 prof_left = cputime_sub(prof_left, stime);
1213 prof_left = cputime_div_non_zero(prof_left, nthreads);
1214 virt_left = cputime_sub(virt_expires, utime);
1215 virt_left = cputime_div_non_zero(virt_left, nthreads);
1216 if (sched_expires) {
1217 sched_left = sched_expires - sum_sched_runtime;
1218 do_div(sched_left, nthreads);
1219 sched_left = max_t(unsigned long long, sched_left, 1);
1220 } else {
1221 sched_left = 0;
1222 }
1223 t = tsk;
1224 do {
1225 if (unlikely(t->flags & PF_EXITING))
1226 continue;
1227
1228 ticks = cputime_add(cputime_add(t->utime, t->stime),
1229 prof_left);
1230 if (!cputime_eq(prof_expires, cputime_zero) &&
1231 (cputime_eq(t->it_prof_expires, cputime_zero) ||
1232 cputime_gt(t->it_prof_expires, ticks))) {
1233 t->it_prof_expires = ticks;
1234 }
1235
1236 ticks = cputime_add(t->utime, virt_left);
1237 if (!cputime_eq(virt_expires, cputime_zero) &&
1238 (cputime_eq(t->it_virt_expires, cputime_zero) ||
1239 cputime_gt(t->it_virt_expires, ticks))) {
1240 t->it_virt_expires = ticks;
1241 }
1242
1243 sched = t->se.sum_exec_runtime + sched_left;
1244 if (sched_expires && (t->it_sched_expires == 0 ||
1245 t->it_sched_expires > sched)) {
1246 t->it_sched_expires = sched;
1247 }
1248 } while ((t = next_thread(t)) != tsk);
1249 }
1250} 1189}
1251 1190
1252/* 1191/*
@@ -1314,6 +1253,86 @@ out:
1314 ++timer->it_requeue_pending; 1253 ++timer->it_requeue_pending;
1315} 1254}
1316 1255
1256/**
1257 * task_cputime_zero - Check a task_cputime struct for all zero fields.
1258 *
1259 * @cputime: The struct to compare.
1260 *
1261 * Checks @cputime to see if all fields are zero. Returns true if all fields
1262 * are zero, false if any field is nonzero.
1263 */
1264static inline int task_cputime_zero(const struct task_cputime *cputime)
1265{
1266 if (cputime_eq(cputime->utime, cputime_zero) &&
1267 cputime_eq(cputime->stime, cputime_zero) &&
1268 cputime->sum_exec_runtime == 0)
1269 return 1;
1270 return 0;
1271}
1272
1273/**
1274 * task_cputime_expired - Compare two task_cputime entities.
1275 *
1276 * @sample: The task_cputime structure to be checked for expiration.
1277 * @expires: Expiration times, against which @sample will be checked.
1278 *
1279 * Checks @sample against @expires to see if any field of @sample has expired.
1280 * Returns true if any field of the former is greater than the corresponding
1281 * field of the latter if the latter field is set. Otherwise returns false.
1282 */
1283static inline int task_cputime_expired(const struct task_cputime *sample,
1284 const struct task_cputime *expires)
1285{
1286 if (!cputime_eq(expires->utime, cputime_zero) &&
1287 cputime_ge(sample->utime, expires->utime))
1288 return 1;
1289 if (!cputime_eq(expires->stime, cputime_zero) &&
1290 cputime_ge(cputime_add(sample->utime, sample->stime),
1291 expires->stime))
1292 return 1;
1293 if (expires->sum_exec_runtime != 0 &&
1294 sample->sum_exec_runtime >= expires->sum_exec_runtime)
1295 return 1;
1296 return 0;
1297}
1298
1299/**
1300 * fastpath_timer_check - POSIX CPU timers fast path.
1301 *
1302 * @tsk: The task (thread) being checked.
1303 *
1304 * Check the task and thread group timers. If both are zero (there are no
1305 * timers set) return false. Otherwise snapshot the task and thread group
1306 * timers and compare them with the corresponding expiration times. Return
1307 * true if a timer has expired, else return false.
1308 */
1309static inline int fastpath_timer_check(struct task_struct *tsk)
1310{
1311 struct signal_struct *sig = tsk->signal;
1312
1313 if (unlikely(!sig))
1314 return 0;
1315
1316 if (!task_cputime_zero(&tsk->cputime_expires)) {
1317 struct task_cputime task_sample = {
1318 .utime = tsk->utime,
1319 .stime = tsk->stime,
1320 .sum_exec_runtime = tsk->se.sum_exec_runtime
1321 };
1322
1323 if (task_cputime_expired(&task_sample, &tsk->cputime_expires))
1324 return 1;
1325 }
1326 if (!task_cputime_zero(&sig->cputime_expires)) {
1327 struct task_cputime group_sample;
1328
1329 thread_group_cputime(tsk, &group_sample);
1330 if (task_cputime_expired(&group_sample, &sig->cputime_expires))
1331 return 1;
1332 }
1333 return 0;
1334}
1335
1317/* 1336/*
1318 * This is called from the timer interrupt handler. The irq handler has 1337 * This is called from the timer interrupt handler. The irq handler has
1319 * already updated our counts. We need to check if any timers fire now. 1338 * already updated our counts. We need to check if any timers fire now.
@@ -1326,42 +1345,31 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1326 1345
1327 BUG_ON(!irqs_disabled()); 1346 BUG_ON(!irqs_disabled());
1328 1347
1329#define UNEXPIRED(clock) \ 1348 /*
1330 (cputime_eq(tsk->it_##clock##_expires, cputime_zero) || \ 1349 * The fast path checks that there are no expired thread or thread
1331 cputime_lt(clock##_ticks(tsk), tsk->it_##clock##_expires)) 1350 * group timers. If that's so, just return.
1332 1351 */
1333 if (UNEXPIRED(prof) && UNEXPIRED(virt) && 1352 if (!fastpath_timer_check(tsk))
1334 (tsk->it_sched_expires == 0 ||
1335 tsk->se.sum_exec_runtime < tsk->it_sched_expires))
1336 return; 1353 return;
1337 1354
1338#undef UNEXPIRED 1355 spin_lock(&tsk->sighand->siglock);
1339
1340 /* 1356 /*
1341 * Double-check with locks held. 1357 * Here we take off tsk->signal->cpu_timers[N] and
1358 * tsk->cpu_timers[N] all the timers that are firing, and
1359 * put them on the firing list.
1342 */ 1360 */
1343 read_lock(&tasklist_lock); 1361 check_thread_timers(tsk, &firing);
1344 if (likely(tsk->signal != NULL)) { 1362 check_process_timers(tsk, &firing);
1345 spin_lock(&tsk->sighand->siglock);
1346 1363
1347 /* 1364 /*
1348 * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N] 1365 * We must release these locks before taking any timer's lock.
1349 * all the timers that are firing, and put them on the firing list. 1366 * There is a potential race with timer deletion here, as the
1350 */ 1367 * siglock now protects our private firing list. We have set
1351 check_thread_timers(tsk, &firing); 1368 * the firing flag in each timer, so that a deletion attempt
1352 check_process_timers(tsk, &firing); 1369 * that gets the timer lock before we do will give it up and
1353 1370 * spin until we've taken care of that timer below.
1354 /* 1371 */
1355 * We must release these locks before taking any timer's lock. 1372 spin_unlock(&tsk->sighand->siglock);
1356 * There is a potential race with timer deletion here, as the
1357 * siglock now protects our private firing list. We have set
1358 * the firing flag in each timer, so that a deletion attempt
1359 * that gets the timer lock before we do will give it up and
1360 * spin until we've taken care of that timer below.
1361 */
1362 spin_unlock(&tsk->sighand->siglock);
1363 }
1364 read_unlock(&tasklist_lock);
1365 1373
1366 /* 1374 /*
1367 * Now that all the timers on our list have the firing flag, 1375 * Now that all the timers on our list have the firing flag,
@@ -1389,10 +1397,9 @@ void run_posix_cpu_timers(struct task_struct *tsk)
1389 1397
1390/* 1398/*
1391 * Set one of the process-wide special case CPU timers. 1399 * Set one of the process-wide special case CPU timers.
1392 * The tasklist_lock and tsk->sighand->siglock must be held by the caller. 1400 * The tsk->sighand->siglock must be held by the caller.
1393 * The oldval argument is null for the RLIMIT_CPU timer, where *newval is 1401 * The *newval argument is relative and we update it to be absolute, *oldval
1394 * absolute; non-null for ITIMER_*, where *newval is relative and we update 1402 * is absolute and we update it to be relative.
1395 * it to be absolute, *oldval is absolute and we update it to be relative.
1396 */ 1403 */
1397void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx, 1404void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1398 cputime_t *newval, cputime_t *oldval) 1405 cputime_t *newval, cputime_t *oldval)
@@ -1401,7 +1408,7 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1401 struct list_head *head; 1408 struct list_head *head;
1402 1409
1403 BUG_ON(clock_idx == CPUCLOCK_SCHED); 1410 BUG_ON(clock_idx == CPUCLOCK_SCHED);
1404 cpu_clock_sample_group_locked(clock_idx, tsk, &now); 1411 cpu_clock_sample_group(clock_idx, tsk, &now);
1405 1412
1406 if (oldval) { 1413 if (oldval) {
1407 if (!cputime_eq(*oldval, cputime_zero)) { 1414 if (!cputime_eq(*oldval, cputime_zero)) {
@@ -1435,13 +1442,14 @@ void set_process_cpu_timer(struct task_struct *tsk, unsigned int clock_idx,
1435 cputime_ge(list_first_entry(head, 1442 cputime_ge(list_first_entry(head,
1436 struct cpu_timer_list, entry)->expires.cpu, 1443 struct cpu_timer_list, entry)->expires.cpu,
1437 *newval)) { 1444 *newval)) {
1438 /* 1445 switch (clock_idx) {
1439 * Rejigger each thread's expiry time so that one will 1446 case CPUCLOCK_PROF:
1440 * notice before we hit the process-cumulative expiry time. 1447 tsk->signal->cputime_expires.prof_exp = *newval;
1441 */ 1448 break;
1442 union cpu_time_count expires = { .sched = 0 }; 1449 case CPUCLOCK_VIRT:
1443 expires.cpu = *newval; 1450 tsk->signal->cputime_expires.virt_exp = *newval;
1444 process_timer_rebalance(tsk, clock_idx, expires, now); 1451 break;
1452 }
1445 } 1453 }
1446} 1454}
1447 1455
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 5131e5471169..b931d7cedbfa 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -223,6 +223,15 @@ static int posix_ktime_get_ts(clockid_t which_clock, struct timespec *tp)
223} 223}
224 224
225/* 225/*
226 * Get monotonic time for posix timers
227 */
228static int posix_get_monotonic_raw(clockid_t which_clock, struct timespec *tp)
229{
230 getrawmonotonic(tp);
231 return 0;
232}
233
234/*
226 * Initialize everything, well, just everything in Posix clocks/timers ;) 235 * Initialize everything, well, just everything in Posix clocks/timers ;)
227 */ 236 */
228static __init int init_posix_timers(void) 237static __init int init_posix_timers(void)
@@ -235,9 +244,15 @@ static __init int init_posix_timers(void)
235 .clock_get = posix_ktime_get_ts, 244 .clock_get = posix_ktime_get_ts,
236 .clock_set = do_posix_clock_nosettime, 245 .clock_set = do_posix_clock_nosettime,
237 }; 246 };
247 struct k_clock clock_monotonic_raw = {
248 .clock_getres = hrtimer_get_res,
249 .clock_get = posix_get_monotonic_raw,
250 .clock_set = do_posix_clock_nosettime,
251 };
238 252
239 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 253 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
240 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic); 254 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
255 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
241 256
242 posix_timers_cache = kmem_cache_create("posix_timers_cache", 257 posix_timers_cache = kmem_cache_create("posix_timers_cache",
243 sizeof (struct k_itimer), 0, SLAB_PANIC, 258 sizeof (struct k_itimer), 0, SLAB_PANIC,
@@ -298,6 +313,7 @@ void do_schedule_next_timer(struct siginfo *info)
298 313
299int posix_timer_event(struct k_itimer *timr, int si_private) 314int posix_timer_event(struct k_itimer *timr, int si_private)
300{ 315{
316 int shared, ret;
301 /* 317 /*
302 * FIXME: if ->sigq is queued we can race with 318 * FIXME: if ->sigq is queued we can race with
303 * dequeue_signal()->do_schedule_next_timer(). 319 * dequeue_signal()->do_schedule_next_timer().
@@ -311,25 +327,10 @@ int posix_timer_event(struct k_itimer *timr, int si_private)
311 */ 327 */
312 timr->sigq->info.si_sys_private = si_private; 328 timr->sigq->info.si_sys_private = si_private;
313 329
314 timr->sigq->info.si_signo = timr->it_sigev_signo; 330 shared = !(timr->it_sigev_notify & SIGEV_THREAD_ID);
315 timr->sigq->info.si_code = SI_TIMER; 331 ret = send_sigqueue(timr->sigq, timr->it_process, shared);
316 timr->sigq->info.si_tid = timr->it_id; 332 /* If we failed to send the signal the timer stops. */
317 timr->sigq->info.si_value = timr->it_sigev_value; 333 return ret > 0;
318
319 if (timr->it_sigev_notify & SIGEV_THREAD_ID) {
320 struct task_struct *leader;
321 int ret = send_sigqueue(timr->sigq, timr->it_process, 0);
322
323 if (likely(ret >= 0))
324 return ret;
325
326 timr->it_sigev_notify = SIGEV_SIGNAL;
327 leader = timr->it_process->group_leader;
328 put_task_struct(timr->it_process);
329 timr->it_process = leader;
330 }
331
332 return send_sigqueue(timr->sigq, timr->it_process, 1);
333} 334}
334EXPORT_SYMBOL_GPL(posix_timer_event); 335EXPORT_SYMBOL_GPL(posix_timer_event);
335 336
@@ -468,11 +469,9 @@ sys_timer_create(const clockid_t which_clock,
468 struct sigevent __user *timer_event_spec, 469 struct sigevent __user *timer_event_spec,
469 timer_t __user * created_timer_id) 470 timer_t __user * created_timer_id)
470{ 471{
471 int error = 0; 472 struct k_itimer *new_timer;
472 struct k_itimer *new_timer = NULL; 473 int error, new_timer_id;
473 int new_timer_id; 474 struct task_struct *process;
474 struct task_struct *process = NULL;
475 unsigned long flags;
476 sigevent_t event; 475 sigevent_t event;
477 int it_id_set = IT_ID_NOT_SET; 476 int it_id_set = IT_ID_NOT_SET;
478 477
@@ -490,12 +489,11 @@ sys_timer_create(const clockid_t which_clock,
490 goto out; 489 goto out;
491 } 490 }
492 spin_lock_irq(&idr_lock); 491 spin_lock_irq(&idr_lock);
493 error = idr_get_new(&posix_timers_id, (void *) new_timer, 492 error = idr_get_new(&posix_timers_id, new_timer, &new_timer_id);
494 &new_timer_id);
495 spin_unlock_irq(&idr_lock); 493 spin_unlock_irq(&idr_lock);
496 if (error == -EAGAIN) 494 if (error) {
497 goto retry; 495 if (error == -EAGAIN)
498 else if (error) { 496 goto retry;
499 /* 497 /*
500 * Weird looking, but we return EAGAIN if the IDR is 498 * Weird looking, but we return EAGAIN if the IDR is
501 * full (proper POSIX return value for this) 499 * full (proper POSIX return value for this)
@@ -526,67 +524,43 @@ sys_timer_create(const clockid_t which_clock,
526 error = -EFAULT; 524 error = -EFAULT;
527 goto out; 525 goto out;
528 } 526 }
529 new_timer->it_sigev_notify = event.sigev_notify; 527 rcu_read_lock();
530 new_timer->it_sigev_signo = event.sigev_signo; 528 process = good_sigevent(&event);
531 new_timer->it_sigev_value = event.sigev_value; 529 if (process)
532 530 get_task_struct(process);
533 read_lock(&tasklist_lock); 531 rcu_read_unlock();
534 if ((process = good_sigevent(&event))) {
535 /*
536 * We may be setting up this process for another
537 * thread. It may be exiting. To catch this
538 * case the we check the PF_EXITING flag. If
539 * the flag is not set, the siglock will catch
540 * him before it is too late (in exit_itimers).
541 *
542 * The exec case is a bit more invloved but easy
543 * to code. If the process is in our thread
544 * group (and it must be or we would not allow
545 * it here) and is doing an exec, it will cause
546 * us to be killed. In this case it will wait
547 * for us to die which means we can finish this
548 * linkage with our last gasp. I.e. no code :)
549 */
550 spin_lock_irqsave(&process->sighand->siglock, flags);
551 if (!(process->flags & PF_EXITING)) {
552 new_timer->it_process = process;
553 list_add(&new_timer->list,
554 &process->signal->posix_timers);
555 if (new_timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID))
556 get_task_struct(process);
557 spin_unlock_irqrestore(&process->sighand->siglock, flags);
558 } else {
559 spin_unlock_irqrestore(&process->sighand->siglock, flags);
560 process = NULL;
561 }
562 }
563 read_unlock(&tasklist_lock);
564 if (!process) { 532 if (!process) {
565 error = -EINVAL; 533 error = -EINVAL;
566 goto out; 534 goto out;
567 } 535 }
568 } else { 536 } else {
569 new_timer->it_sigev_notify = SIGEV_SIGNAL; 537 event.sigev_notify = SIGEV_SIGNAL;
570 new_timer->it_sigev_signo = SIGALRM; 538 event.sigev_signo = SIGALRM;
571 new_timer->it_sigev_value.sival_int = new_timer->it_id; 539 event.sigev_value.sival_int = new_timer->it_id;
572 process = current->group_leader; 540 process = current->group_leader;
573 spin_lock_irqsave(&process->sighand->siglock, flags); 541 get_task_struct(process);
574 new_timer->it_process = process;
575 list_add(&new_timer->list, &process->signal->posix_timers);
576 spin_unlock_irqrestore(&process->sighand->siglock, flags);
577 } 542 }
578 543
544 new_timer->it_sigev_notify = event.sigev_notify;
545 new_timer->sigq->info.si_signo = event.sigev_signo;
546 new_timer->sigq->info.si_value = event.sigev_value;
547 new_timer->sigq->info.si_tid = new_timer->it_id;
548 new_timer->sigq->info.si_code = SI_TIMER;
549
550 spin_lock_irq(&current->sighand->siglock);
551 new_timer->it_process = process;
552 list_add(&new_timer->list, &current->signal->posix_timers);
553 spin_unlock_irq(&current->sighand->siglock);
554
555 return 0;
579 /* 556 /*
580 * In the case of the timer belonging to another task, after 557 * In the case of the timer belonging to another task, after
581 * the task is unlocked, the timer is owned by the other task 558 * the task is unlocked, the timer is owned by the other task
582 * and may cease to exist at any time. Don't use or modify 559 * and may cease to exist at any time. Don't use or modify
583 * new_timer after the unlock call. 560 * new_timer after the unlock call.
584 */ 561 */
585
586out: 562out:
587 if (error) 563 release_posix_timer(new_timer, it_id_set);
588 release_posix_timer(new_timer, it_id_set);
589
590 return error; 564 return error;
591} 565}
592 566
@@ -597,7 +571,7 @@ out:
597 * the find to the timer lock. To avoid a dead lock, the timer id MUST 571 * the find to the timer lock. To avoid a dead lock, the timer id MUST
598 * be release with out holding the timer lock. 572 * be release with out holding the timer lock.
599 */ 573 */
600static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags) 574static struct k_itimer *lock_timer(timer_t timer_id, unsigned long *flags)
601{ 575{
602 struct k_itimer *timr; 576 struct k_itimer *timr;
603 /* 577 /*
@@ -605,23 +579,20 @@ static struct k_itimer * lock_timer(timer_t timer_id, unsigned long *flags)
605 * flags part over to the timer lock. Must not let interrupts in 579 * flags part over to the timer lock. Must not let interrupts in
606 * while we are moving the lock. 580 * while we are moving the lock.
607 */ 581 */
608
609 spin_lock_irqsave(&idr_lock, *flags); 582 spin_lock_irqsave(&idr_lock, *flags);
610 timr = (struct k_itimer *) idr_find(&posix_timers_id, (int) timer_id); 583 timr = idr_find(&posix_timers_id, (int)timer_id);
611 if (timr) { 584 if (timr) {
612 spin_lock(&timr->it_lock); 585 spin_lock(&timr->it_lock);
613 586 if (timr->it_process &&
614 if ((timr->it_id != timer_id) || !(timr->it_process) || 587 same_thread_group(timr->it_process, current)) {
615 !same_thread_group(timr->it_process, current)) {
616 spin_unlock(&timr->it_lock);
617 spin_unlock_irqrestore(&idr_lock, *flags);
618 timr = NULL;
619 } else
620 spin_unlock(&idr_lock); 588 spin_unlock(&idr_lock);
621 } else 589 return timr;
622 spin_unlock_irqrestore(&idr_lock, *flags); 590 }
591 spin_unlock(&timr->it_lock);
592 }
593 spin_unlock_irqrestore(&idr_lock, *flags);
623 594
624 return timr; 595 return NULL;
625} 596}
626 597
627/* 598/*
@@ -862,8 +833,7 @@ retry_delete:
862 * This keeps any tasks waiting on the spin lock from thinking 833 * This keeps any tasks waiting on the spin lock from thinking
863 * they got something (see the lock code above). 834 * they got something (see the lock code above).
864 */ 835 */
865 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 836 put_task_struct(timer->it_process);
866 put_task_struct(timer->it_process);
867 timer->it_process = NULL; 837 timer->it_process = NULL;
868 838
869 unlock_timer(timer, flags); 839 unlock_timer(timer, flags);
@@ -890,8 +860,7 @@ retry_delete:
890 * This keeps any tasks waiting on the spin lock from thinking 860 * This keeps any tasks waiting on the spin lock from thinking
891 * they got something (see the lock code above). 861 * they got something (see the lock code above).
892 */ 862 */
893 if (timer->it_sigev_notify == (SIGEV_SIGNAL|SIGEV_THREAD_ID)) 863 put_task_struct(timer->it_process);
894 put_task_struct(timer->it_process);
895 timer->it_process = NULL; 864 timer->it_process = NULL;
896 865
897 unlock_timer(timer, flags); 866 unlock_timer(timer, flags);
diff --git a/kernel/power/disk.c b/kernel/power/disk.c
index bbd85c60f741..331f9836383f 100644
--- a/kernel/power/disk.c
+++ b/kernel/power/disk.c
@@ -14,6 +14,7 @@
14#include <linux/reboot.h> 14#include <linux/reboot.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kmod.h>
17#include <linux/delay.h> 18#include <linux/delay.h>
18#include <linux/fs.h> 19#include <linux/fs.h>
19#include <linux/mount.h> 20#include <linux/mount.h>
@@ -520,6 +521,10 @@ int hibernate(void)
520 if (error) 521 if (error)
521 goto Exit; 522 goto Exit;
522 523
524 error = usermodehelper_disable();
525 if (error)
526 goto Exit;
527
523 /* Allocate memory management structures */ 528 /* Allocate memory management structures */
524 error = create_basic_memory_bitmaps(); 529 error = create_basic_memory_bitmaps();
525 if (error) 530 if (error)
@@ -558,6 +563,7 @@ int hibernate(void)
558 thaw_processes(); 563 thaw_processes();
559 Finish: 564 Finish:
560 free_basic_memory_bitmaps(); 565 free_basic_memory_bitmaps();
566 usermodehelper_enable();
561 Exit: 567 Exit:
562 pm_notifier_call_chain(PM_POST_HIBERNATION); 568 pm_notifier_call_chain(PM_POST_HIBERNATION);
563 pm_restore_console(); 569 pm_restore_console();
@@ -634,6 +640,10 @@ static int software_resume(void)
634 if (error) 640 if (error)
635 goto Finish; 641 goto Finish;
636 642
643 error = usermodehelper_disable();
644 if (error)
645 goto Finish;
646
637 error = create_basic_memory_bitmaps(); 647 error = create_basic_memory_bitmaps();
638 if (error) 648 if (error)
639 goto Finish; 649 goto Finish;
@@ -656,6 +666,7 @@ static int software_resume(void)
656 thaw_processes(); 666 thaw_processes();
657 Done: 667 Done:
658 free_basic_memory_bitmaps(); 668 free_basic_memory_bitmaps();
669 usermodehelper_enable();
659 Finish: 670 Finish:
660 pm_notifier_call_chain(PM_POST_RESTORE); 671 pm_notifier_call_chain(PM_POST_RESTORE);
661 pm_restore_console(); 672 pm_restore_console();
diff --git a/kernel/power/main.c b/kernel/power/main.c
index 540b16b68565..19122cf6d827 100644
--- a/kernel/power/main.c
+++ b/kernel/power/main.c
@@ -14,6 +14,7 @@
14#include <linux/string.h> 14#include <linux/string.h>
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/errno.h> 16#include <linux/errno.h>
17#include <linux/kmod.h>
17#include <linux/init.h> 18#include <linux/init.h>
18#include <linux/console.h> 19#include <linux/console.h>
19#include <linux/cpu.h> 20#include <linux/cpu.h>
@@ -237,6 +238,10 @@ static int suspend_prepare(void)
237 if (error) 238 if (error)
238 goto Finish; 239 goto Finish;
239 240
241 error = usermodehelper_disable();
242 if (error)
243 goto Finish;
244
240 if (suspend_freeze_processes()) { 245 if (suspend_freeze_processes()) {
241 error = -EAGAIN; 246 error = -EAGAIN;
242 goto Thaw; 247 goto Thaw;
@@ -256,6 +261,7 @@ static int suspend_prepare(void)
256 261
257 Thaw: 262 Thaw:
258 suspend_thaw_processes(); 263 suspend_thaw_processes();
264 usermodehelper_enable();
259 Finish: 265 Finish:
260 pm_notifier_call_chain(PM_POST_SUSPEND); 266 pm_notifier_call_chain(PM_POST_SUSPEND);
261 pm_restore_console(); 267 pm_restore_console();
@@ -376,6 +382,7 @@ int suspend_devices_and_enter(suspend_state_t state)
376static void suspend_finish(void) 382static void suspend_finish(void)
377{ 383{
378 suspend_thaw_processes(); 384 suspend_thaw_processes();
385 usermodehelper_enable();
379 pm_notifier_call_chain(PM_POST_SUSPEND); 386 pm_notifier_call_chain(PM_POST_SUSPEND);
380 pm_restore_console(); 387 pm_restore_console();
381} 388}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 278946aecaf0..ca634019497a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -28,121 +28,6 @@ static inline int freezeable(struct task_struct * p)
28 return 1; 28 return 1;
29} 29}
30 30
31/*
32 * freezing is complete, mark current process as frozen
33 */
34static inline void frozen_process(void)
35{
36 if (!unlikely(current->flags & PF_NOFREEZE)) {
37 current->flags |= PF_FROZEN;
38 wmb();
39 }
40 clear_freeze_flag(current);
41}
42
43/* Refrigerator is place where frozen processes are stored :-). */
44void refrigerator(void)
45{
46 /* Hmm, should we be allowed to suspend when there are realtime
47 processes around? */
48 long save;
49
50 task_lock(current);
51 if (freezing(current)) {
52 frozen_process();
53 task_unlock(current);
54 } else {
55 task_unlock(current);
56 return;
57 }
58 save = current->state;
59 pr_debug("%s entered refrigerator\n", current->comm);
60
61 spin_lock_irq(&current->sighand->siglock);
62 recalc_sigpending(); /* We sent fake signal, clean it up */
63 spin_unlock_irq(&current->sighand->siglock);
64
65 for (;;) {
66 set_current_state(TASK_UNINTERRUPTIBLE);
67 if (!frozen(current))
68 break;
69 schedule();
70 }
71 pr_debug("%s left refrigerator\n", current->comm);
72 __set_current_state(save);
73}
74
75static void fake_signal_wake_up(struct task_struct *p)
76{
77 unsigned long flags;
78
79 spin_lock_irqsave(&p->sighand->siglock, flags);
80 signal_wake_up(p, 0);
81 spin_unlock_irqrestore(&p->sighand->siglock, flags);
82}
83
84static inline bool should_send_signal(struct task_struct *p)
85{
86 return !(p->flags & PF_FREEZER_NOSIG);
87}
88
89/**
90 * freeze_task - send a freeze request to given task
91 * @p: task to send the request to
92 * @sig_only: if set, the request will only be sent if the task has the
93 * PF_FREEZER_NOSIG flag unset
94 * Return value: 'false', if @sig_only is set and the task has
95 * PF_FREEZER_NOSIG set or the task is frozen, 'true', otherwise
96 *
97 * The freeze request is sent by setting the tasks's TIF_FREEZE flag and
98 * either sending a fake signal to it or waking it up, depending on whether
99 * or not it has PF_FREEZER_NOSIG set. If @sig_only is set and the task
100 * has PF_FREEZER_NOSIG set (ie. it is a typical kernel thread), its
101 * TIF_FREEZE flag will not be set.
102 */
103static bool freeze_task(struct task_struct *p, bool sig_only)
104{
105 /*
106 * We first check if the task is freezing and next if it has already
107 * been frozen to avoid the race with frozen_process() which first marks
108 * the task as frozen and next clears its TIF_FREEZE.
109 */
110 if (!freezing(p)) {
111 rmb();
112 if (frozen(p))
113 return false;
114
115 if (!sig_only || should_send_signal(p))
116 set_freeze_flag(p);
117 else
118 return false;
119 }
120
121 if (should_send_signal(p)) {
122 if (!signal_pending(p))
123 fake_signal_wake_up(p);
124 } else if (sig_only) {
125 return false;
126 } else {
127 wake_up_state(p, TASK_INTERRUPTIBLE);
128 }
129
130 return true;
131}
132
133static void cancel_freezing(struct task_struct *p)
134{
135 unsigned long flags;
136
137 if (freezing(p)) {
138 pr_debug(" clean up: %s\n", p->comm);
139 clear_freeze_flag(p);
140 spin_lock_irqsave(&p->sighand->siglock, flags);
141 recalc_sigpending_and_wake(p);
142 spin_unlock_irqrestore(&p->sighand->siglock, flags);
143 }
144}
145
146static int try_to_freeze_tasks(bool sig_only) 31static int try_to_freeze_tasks(bool sig_only)
147{ 32{
148 struct task_struct *g, *p; 33 struct task_struct *g, *p;
@@ -250,6 +135,9 @@ static void thaw_tasks(bool nosig_only)
250 if (nosig_only && should_send_signal(p)) 135 if (nosig_only && should_send_signal(p))
251 continue; 136 continue;
252 137
138 if (cgroup_frozen(p))
139 continue;
140
253 thaw_process(p); 141 thaw_process(p);
254 } while_each_thread(g, p); 142 } while_each_thread(g, p);
255 read_unlock(&tasklist_lock); 143 read_unlock(&tasklist_lock);
@@ -264,4 +152,3 @@ void thaw_processes(void)
264 printk("done.\n"); 152 printk("done.\n");
265} 153}
266 154
267EXPORT_SYMBOL(refrigerator);
diff --git a/kernel/power/user.c b/kernel/power/user.c
index a6332a313262..005b93d839ba 100644
--- a/kernel/power/user.c
+++ b/kernel/power/user.c
@@ -212,13 +212,20 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
212 case SNAPSHOT_FREEZE: 212 case SNAPSHOT_FREEZE:
213 if (data->frozen) 213 if (data->frozen)
214 break; 214 break;
215
215 printk("Syncing filesystems ... "); 216 printk("Syncing filesystems ... ");
216 sys_sync(); 217 sys_sync();
217 printk("done.\n"); 218 printk("done.\n");
218 219
219 error = freeze_processes(); 220 error = usermodehelper_disable();
220 if (error) 221 if (error)
222 break;
223
224 error = freeze_processes();
225 if (error) {
221 thaw_processes(); 226 thaw_processes();
227 usermodehelper_enable();
228 }
222 if (!error) 229 if (!error)
223 data->frozen = 1; 230 data->frozen = 1;
224 break; 231 break;
@@ -227,6 +234,7 @@ static long snapshot_ioctl(struct file *filp, unsigned int cmd,
227 if (!data->frozen || data->ready) 234 if (!data->frozen || data->ready)
228 break; 235 break;
229 thaw_processes(); 236 thaw_processes();
237 usermodehelper_enable();
230 data->frozen = 0; 238 data->frozen = 0;
231 break; 239 break;
232 240
diff --git a/kernel/printk.c b/kernel/printk.c
index b51b1567bb55..6341af77eb65 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -13,7 +13,7 @@
13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul 13 * Fixed SMP synchronization, 08/08/99, Manfred Spraul
14 * manfred@colorfullife.com 14 * manfred@colorfullife.com
15 * Rewrote bits to get rid of console_lock 15 * Rewrote bits to get rid of console_lock
16 * 01Mar01 Andrew Morton <andrewm@uow.edu.au> 16 * 01Mar01 Andrew Morton
17 */ 17 */
18 18
19#include <linux/kernel.h> 19#include <linux/kernel.h>
@@ -577,9 +577,6 @@ static int have_callable_console(void)
577 * @fmt: format string 577 * @fmt: format string
578 * 578 *
579 * This is printk(). It can be called from any context. We want it to work. 579 * This is printk(). It can be called from any context. We want it to work.
580 * Be aware of the fact that if oops_in_progress is not set, we might try to
581 * wake klogd up which could deadlock on runqueue lock if printk() is called
582 * from scheduler code.
583 * 580 *
584 * We try to grab the console_sem. If we succeed, it's easy - we log the output and 581 * We try to grab the console_sem. If we succeed, it's easy - we log the output and
585 * call the console drivers. If we fail to get the semaphore we place the output 582 * call the console drivers. If we fail to get the semaphore we place the output
@@ -593,6 +590,8 @@ static int have_callable_console(void)
593 * 590 *
594 * See also: 591 * See also:
595 * printf(3) 592 * printf(3)
593 *
594 * See the vsnprintf() documentation for format string extensions over C99.
596 */ 595 */
597 596
598asmlinkage int printk(const char *fmt, ...) 597asmlinkage int printk(const char *fmt, ...)
@@ -982,10 +981,25 @@ int is_console_locked(void)
982 return console_locked; 981 return console_locked;
983} 982}
984 983
985void wake_up_klogd(void) 984static DEFINE_PER_CPU(int, printk_pending);
985
986void printk_tick(void)
986{ 987{
987 if (!oops_in_progress && waitqueue_active(&log_wait)) 988 if (__get_cpu_var(printk_pending)) {
989 __get_cpu_var(printk_pending) = 0;
988 wake_up_interruptible(&log_wait); 990 wake_up_interruptible(&log_wait);
991 }
992}
993
994int printk_needs_cpu(int cpu)
995{
996 return per_cpu(printk_pending, cpu);
997}
998
999void wake_up_klogd(void)
1000{
1001 if (waitqueue_active(&log_wait))
1002 __raw_get_cpu_var(printk_pending) = 1;
989} 1003}
990 1004
991/** 1005/**
@@ -1291,22 +1305,6 @@ static int __init disable_boot_consoles(void)
1291} 1305}
1292late_initcall(disable_boot_consoles); 1306late_initcall(disable_boot_consoles);
1293 1307
1294/**
1295 * tty_write_message - write a message to a certain tty, not just the console.
1296 * @tty: the destination tty_struct
1297 * @msg: the message to write
1298 *
1299 * This is used for messages that need to be redirected to a specific tty.
1300 * We don't put it into the syslog queue right now maybe in the future if
1301 * really needed.
1302 */
1303void tty_write_message(struct tty_struct *tty, char *msg)
1304{
1305 if (tty && tty->ops->write)
1306 tty->ops->write(tty, msg, strlen(msg));
1307 return;
1308}
1309
1310#if defined CONFIG_PRINTK 1308#if defined CONFIG_PRINTK
1311 1309
1312/* 1310/*
diff --git a/kernel/profile.c b/kernel/profile.c
index cd26bed4cc26..a9e422df6bf6 100644
--- a/kernel/profile.c
+++ b/kernel/profile.c
@@ -22,6 +22,8 @@
22#include <linux/cpu.h> 22#include <linux/cpu.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/mutex.h> 24#include <linux/mutex.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
25#include <asm/sections.h> 27#include <asm/sections.h>
26#include <asm/irq_regs.h> 28#include <asm/irq_regs.h>
27#include <asm/ptrace.h> 29#include <asm/ptrace.h>
@@ -50,11 +52,11 @@ static DEFINE_PER_CPU(int, cpu_profile_flip);
50static DEFINE_MUTEX(profile_flip_mutex); 52static DEFINE_MUTEX(profile_flip_mutex);
51#endif /* CONFIG_SMP */ 53#endif /* CONFIG_SMP */
52 54
53static int __init profile_setup(char *str) 55int profile_setup(char *str)
54{ 56{
55 static char __initdata schedstr[] = "schedule"; 57 static char schedstr[] = "schedule";
56 static char __initdata sleepstr[] = "sleep"; 58 static char sleepstr[] = "sleep";
57 static char __initdata kvmstr[] = "kvm"; 59 static char kvmstr[] = "kvm";
58 int par; 60 int par;
59 61
60 if (!strncmp(str, sleepstr, strlen(sleepstr))) { 62 if (!strncmp(str, sleepstr, strlen(sleepstr))) {
@@ -100,14 +102,33 @@ static int __init profile_setup(char *str)
100__setup("profile=", profile_setup); 102__setup("profile=", profile_setup);
101 103
102 104
103void __init profile_init(void) 105int profile_init(void)
104{ 106{
107 int buffer_bytes;
105 if (!prof_on) 108 if (!prof_on)
106 return; 109 return 0;
107 110
108 /* only text is profiled */ 111 /* only text is profiled */
109 prof_len = (_etext - _stext) >> prof_shift; 112 prof_len = (_etext - _stext) >> prof_shift;
110 prof_buffer = alloc_bootmem(prof_len*sizeof(atomic_t)); 113 buffer_bytes = prof_len*sizeof(atomic_t);
114 if (!slab_is_available()) {
115 prof_buffer = alloc_bootmem(buffer_bytes);
116 return 0;
117 }
118
119 prof_buffer = kzalloc(buffer_bytes, GFP_KERNEL);
120 if (prof_buffer)
121 return 0;
122
123 prof_buffer = alloc_pages_exact(buffer_bytes, GFP_KERNEL|__GFP_ZERO);
124 if (prof_buffer)
125 return 0;
126
127 prof_buffer = vmalloc(buffer_bytes);
128 if (prof_buffer)
129 return 0;
130
131 return -ENOMEM;
111} 132}
112 133
113/* Profile event notifications */ 134/* Profile event notifications */
@@ -527,7 +548,7 @@ static void __init profile_nop(void *unused)
527{ 548{
528} 549}
529 550
530static int __init create_hash_tables(void) 551static int create_hash_tables(void)
531{ 552{
532 int cpu; 553 int cpu;
533 554
@@ -575,14 +596,14 @@ out_cleanup:
575#define create_hash_tables() ({ 0; }) 596#define create_hash_tables() ({ 0; })
576#endif 597#endif
577 598
578static int __init create_proc_profile(void) 599int create_proc_profile(void)
579{ 600{
580 struct proc_dir_entry *entry; 601 struct proc_dir_entry *entry;
581 602
582 if (!prof_on) 603 if (!prof_on)
583 return 0; 604 return 0;
584 if (create_hash_tables()) 605 if (create_hash_tables())
585 return -1; 606 return -ENOMEM;
586 entry = proc_create("profile", S_IWUSR | S_IRUGO, 607 entry = proc_create("profile", S_IWUSR | S_IRUGO,
587 NULL, &proc_profile_operations); 608 NULL, &proc_profile_operations);
588 if (!entry) 609 if (!entry)
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 356699a96d56..1e68e4c39e2c 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -45,7 +45,7 @@ void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
45 * TASK_TRACED, resume it now. 45 * TASK_TRACED, resume it now.
46 * Requires that irqs be disabled. 46 * Requires that irqs be disabled.
47 */ 47 */
48void ptrace_untrace(struct task_struct *child) 48static void ptrace_untrace(struct task_struct *child)
49{ 49{
50 spin_lock(&child->sighand->siglock); 50 spin_lock(&child->sighand->siglock);
51 if (task_is_traced(child)) { 51 if (task_is_traced(child)) {
diff --git a/kernel/rcuclassic.c b/kernel/rcuclassic.c
index aad93cdc9f68..37f72e551542 100644
--- a/kernel/rcuclassic.c
+++ b/kernel/rcuclassic.c
@@ -47,6 +47,7 @@
47#include <linux/notifier.h> 47#include <linux/notifier.h>
48#include <linux/cpu.h> 48#include <linux/cpu.h>
49#include <linux/mutex.h> 49#include <linux/mutex.h>
50#include <linux/time.h>
50 51
51#ifdef CONFIG_DEBUG_LOCK_ALLOC 52#ifdef CONFIG_DEBUG_LOCK_ALLOC
52static struct lock_class_key rcu_lock_key; 53static struct lock_class_key rcu_lock_key;
@@ -60,12 +61,14 @@ EXPORT_SYMBOL_GPL(rcu_lock_map);
60static struct rcu_ctrlblk rcu_ctrlblk = { 61static struct rcu_ctrlblk rcu_ctrlblk = {
61 .cur = -300, 62 .cur = -300,
62 .completed = -300, 63 .completed = -300,
64 .pending = -300,
63 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock), 65 .lock = __SPIN_LOCK_UNLOCKED(&rcu_ctrlblk.lock),
64 .cpumask = CPU_MASK_NONE, 66 .cpumask = CPU_MASK_NONE,
65}; 67};
66static struct rcu_ctrlblk rcu_bh_ctrlblk = { 68static struct rcu_ctrlblk rcu_bh_ctrlblk = {
67 .cur = -300, 69 .cur = -300,
68 .completed = -300, 70 .completed = -300,
71 .pending = -300,
69 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock), 72 .lock = __SPIN_LOCK_UNLOCKED(&rcu_bh_ctrlblk.lock),
70 .cpumask = CPU_MASK_NONE, 73 .cpumask = CPU_MASK_NONE,
71}; 74};
@@ -83,7 +86,10 @@ static void force_quiescent_state(struct rcu_data *rdp,
83{ 86{
84 int cpu; 87 int cpu;
85 cpumask_t cpumask; 88 cpumask_t cpumask;
89 unsigned long flags;
90
86 set_need_resched(); 91 set_need_resched();
92 spin_lock_irqsave(&rcp->lock, flags);
87 if (unlikely(!rcp->signaled)) { 93 if (unlikely(!rcp->signaled)) {
88 rcp->signaled = 1; 94 rcp->signaled = 1;
89 /* 95 /*
@@ -109,6 +115,7 @@ static void force_quiescent_state(struct rcu_data *rdp,
109 for_each_cpu_mask_nr(cpu, cpumask) 115 for_each_cpu_mask_nr(cpu, cpumask)
110 smp_send_reschedule(cpu); 116 smp_send_reschedule(cpu);
111 } 117 }
118 spin_unlock_irqrestore(&rcp->lock, flags);
112} 119}
113#else 120#else
114static inline void force_quiescent_state(struct rcu_data *rdp, 121static inline void force_quiescent_state(struct rcu_data *rdp,
@@ -118,6 +125,126 @@ static inline void force_quiescent_state(struct rcu_data *rdp,
118} 125}
119#endif 126#endif
120 127
128static void __call_rcu(struct rcu_head *head, struct rcu_ctrlblk *rcp,
129 struct rcu_data *rdp)
130{
131 long batch;
132
133 head->next = NULL;
134 smp_mb(); /* Read of rcu->cur must happen after any change by caller. */
135
136 /*
137 * Determine the batch number of this callback.
138 *
139 * Using ACCESS_ONCE to avoid the following error when gcc eliminates
140 * local variable "batch" and emits codes like this:
141 * 1) rdp->batch = rcp->cur + 1 # gets old value
142 * ......
143 * 2)rcu_batch_after(rcp->cur + 1, rdp->batch) # gets new value
144 * then [*nxttail[0], *nxttail[1]) may contain callbacks
145 * that batch# = rdp->batch, see the comment of struct rcu_data.
146 */
147 batch = ACCESS_ONCE(rcp->cur) + 1;
148
149 if (rdp->nxtlist && rcu_batch_after(batch, rdp->batch)) {
150 /* process callbacks */
151 rdp->nxttail[0] = rdp->nxttail[1];
152 rdp->nxttail[1] = rdp->nxttail[2];
153 if (rcu_batch_after(batch - 1, rdp->batch))
154 rdp->nxttail[0] = rdp->nxttail[2];
155 }
156
157 rdp->batch = batch;
158 *rdp->nxttail[2] = head;
159 rdp->nxttail[2] = &head->next;
160
161 if (unlikely(++rdp->qlen > qhimark)) {
162 rdp->blimit = INT_MAX;
163 force_quiescent_state(rdp, &rcu_ctrlblk);
164 }
165}
166
167#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
168
169static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
170{
171 rcp->gp_start = jiffies;
172 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_CHECK;
173}
174
175static void print_other_cpu_stall(struct rcu_ctrlblk *rcp)
176{
177 int cpu;
178 long delta;
179 unsigned long flags;
180
181 /* Only let one CPU complain about others per time interval. */
182
183 spin_lock_irqsave(&rcp->lock, flags);
184 delta = jiffies - rcp->jiffies_stall;
185 if (delta < 2 || rcp->cur != rcp->completed) {
186 spin_unlock_irqrestore(&rcp->lock, flags);
187 return;
188 }
189 rcp->jiffies_stall = jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
190 spin_unlock_irqrestore(&rcp->lock, flags);
191
192 /* OK, time to rat on our buddy... */
193
194 printk(KERN_ERR "RCU detected CPU stalls:");
195 for_each_possible_cpu(cpu) {
196 if (cpu_isset(cpu, rcp->cpumask))
197 printk(" %d", cpu);
198 }
199 printk(" (detected by %d, t=%ld jiffies)\n",
200 smp_processor_id(), (long)(jiffies - rcp->gp_start));
201}
202
203static void print_cpu_stall(struct rcu_ctrlblk *rcp)
204{
205 unsigned long flags;
206
207 printk(KERN_ERR "RCU detected CPU %d stall (t=%lu/%lu jiffies)\n",
208 smp_processor_id(), jiffies,
209 jiffies - rcp->gp_start);
210 dump_stack();
211 spin_lock_irqsave(&rcp->lock, flags);
212 if ((long)(jiffies - rcp->jiffies_stall) >= 0)
213 rcp->jiffies_stall =
214 jiffies + RCU_SECONDS_TILL_STALL_RECHECK;
215 spin_unlock_irqrestore(&rcp->lock, flags);
216 set_need_resched(); /* kick ourselves to get things going. */
217}
218
219static void check_cpu_stall(struct rcu_ctrlblk *rcp)
220{
221 long delta;
222
223 delta = jiffies - rcp->jiffies_stall;
224 if (cpu_isset(smp_processor_id(), rcp->cpumask) && delta >= 0) {
225
226 /* We haven't checked in, so go dump stack. */
227 print_cpu_stall(rcp);
228
229 } else if (rcp->cur != rcp->completed && delta >= 2) {
230
231 /* They had two seconds to dump stack, so complain. */
232 print_other_cpu_stall(rcp);
233 }
234}
235
236#else /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
237
238static void record_gp_stall_check_time(struct rcu_ctrlblk *rcp)
239{
240}
241
242static inline void check_cpu_stall(struct rcu_ctrlblk *rcp)
243{
244}
245
246#endif /* #else #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
247
121/** 248/**
122 * call_rcu - Queue an RCU callback for invocation after a grace period. 249 * call_rcu - Queue an RCU callback for invocation after a grace period.
123 * @head: structure to be used for queueing the RCU updates. 250 * @head: structure to be used for queueing the RCU updates.
@@ -133,18 +260,10 @@ void call_rcu(struct rcu_head *head,
133 void (*func)(struct rcu_head *rcu)) 260 void (*func)(struct rcu_head *rcu))
134{ 261{
135 unsigned long flags; 262 unsigned long flags;
136 struct rcu_data *rdp;
137 263
138 head->func = func; 264 head->func = func;
139 head->next = NULL;
140 local_irq_save(flags); 265 local_irq_save(flags);
141 rdp = &__get_cpu_var(rcu_data); 266 __call_rcu(head, &rcu_ctrlblk, &__get_cpu_var(rcu_data));
142 *rdp->nxttail = head;
143 rdp->nxttail = &head->next;
144 if (unlikely(++rdp->qlen > qhimark)) {
145 rdp->blimit = INT_MAX;
146 force_quiescent_state(rdp, &rcu_ctrlblk);
147 }
148 local_irq_restore(flags); 267 local_irq_restore(flags);
149} 268}
150EXPORT_SYMBOL_GPL(call_rcu); 269EXPORT_SYMBOL_GPL(call_rcu);
@@ -169,20 +288,10 @@ void call_rcu_bh(struct rcu_head *head,
169 void (*func)(struct rcu_head *rcu)) 288 void (*func)(struct rcu_head *rcu))
170{ 289{
171 unsigned long flags; 290 unsigned long flags;
172 struct rcu_data *rdp;
173 291
174 head->func = func; 292 head->func = func;
175 head->next = NULL;
176 local_irq_save(flags); 293 local_irq_save(flags);
177 rdp = &__get_cpu_var(rcu_bh_data); 294 __call_rcu(head, &rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
178 *rdp->nxttail = head;
179 rdp->nxttail = &head->next;
180
181 if (unlikely(++rdp->qlen > qhimark)) {
182 rdp->blimit = INT_MAX;
183 force_quiescent_state(rdp, &rcu_bh_ctrlblk);
184 }
185
186 local_irq_restore(flags); 295 local_irq_restore(flags);
187} 296}
188EXPORT_SYMBOL_GPL(call_rcu_bh); 297EXPORT_SYMBOL_GPL(call_rcu_bh);
@@ -211,12 +320,6 @@ EXPORT_SYMBOL_GPL(rcu_batches_completed_bh);
211static inline void raise_rcu_softirq(void) 320static inline void raise_rcu_softirq(void)
212{ 321{
213 raise_softirq(RCU_SOFTIRQ); 322 raise_softirq(RCU_SOFTIRQ);
214 /*
215 * The smp_mb() here is required to ensure that this cpu's
216 * __rcu_process_callbacks() reads the most recently updated
217 * value of rcu->cur.
218 */
219 smp_mb();
220} 323}
221 324
222/* 325/*
@@ -225,6 +328,7 @@ static inline void raise_rcu_softirq(void)
225 */ 328 */
226static void rcu_do_batch(struct rcu_data *rdp) 329static void rcu_do_batch(struct rcu_data *rdp)
227{ 330{
331 unsigned long flags;
228 struct rcu_head *next, *list; 332 struct rcu_head *next, *list;
229 int count = 0; 333 int count = 0;
230 334
@@ -239,9 +343,9 @@ static void rcu_do_batch(struct rcu_data *rdp)
239 } 343 }
240 rdp->donelist = list; 344 rdp->donelist = list;
241 345
242 local_irq_disable(); 346 local_irq_save(flags);
243 rdp->qlen -= count; 347 rdp->qlen -= count;
244 local_irq_enable(); 348 local_irq_restore(flags);
245 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark) 349 if (rdp->blimit == INT_MAX && rdp->qlen <= qlowmark)
246 rdp->blimit = blimit; 350 rdp->blimit = blimit;
247 351
@@ -269,6 +373,7 @@ static void rcu_do_batch(struct rcu_data *rdp)
269 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace 373 * rcu_check_quiescent_state calls rcu_start_batch(0) to start the next grace
270 * period (if necessary). 374 * period (if necessary).
271 */ 375 */
376
272/* 377/*
273 * Register a new batch of callbacks, and start it up if there is currently no 378 * Register a new batch of callbacks, and start it up if there is currently no
274 * active batch and the batch to be registered has not already occurred. 379 * active batch and the batch to be registered has not already occurred.
@@ -276,15 +381,10 @@ static void rcu_do_batch(struct rcu_data *rdp)
276 */ 381 */
277static void rcu_start_batch(struct rcu_ctrlblk *rcp) 382static void rcu_start_batch(struct rcu_ctrlblk *rcp)
278{ 383{
279 if (rcp->next_pending && 384 if (rcp->cur != rcp->pending &&
280 rcp->completed == rcp->cur) { 385 rcp->completed == rcp->cur) {
281 rcp->next_pending = 0;
282 /*
283 * next_pending == 0 must be visible in
284 * __rcu_process_callbacks() before it can see new value of cur.
285 */
286 smp_wmb();
287 rcp->cur++; 386 rcp->cur++;
387 record_gp_stall_check_time(rcp);
288 388
289 /* 389 /*
290 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a 390 * Accessing nohz_cpu_mask before incrementing rcp->cur needs a
@@ -322,6 +422,8 @@ static void cpu_quiet(int cpu, struct rcu_ctrlblk *rcp)
322static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp, 422static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
323 struct rcu_data *rdp) 423 struct rcu_data *rdp)
324{ 424{
425 unsigned long flags;
426
325 if (rdp->quiescbatch != rcp->cur) { 427 if (rdp->quiescbatch != rcp->cur) {
326 /* start new grace period: */ 428 /* start new grace period: */
327 rdp->qs_pending = 1; 429 rdp->qs_pending = 1;
@@ -345,7 +447,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
345 return; 447 return;
346 rdp->qs_pending = 0; 448 rdp->qs_pending = 0;
347 449
348 spin_lock(&rcp->lock); 450 spin_lock_irqsave(&rcp->lock, flags);
349 /* 451 /*
350 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync 452 * rdp->quiescbatch/rcp->cur and the cpu bitmap can come out of sync
351 * during cpu startup. Ignore the quiescent state. 453 * during cpu startup. Ignore the quiescent state.
@@ -353,7 +455,7 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
353 if (likely(rdp->quiescbatch == rcp->cur)) 455 if (likely(rdp->quiescbatch == rcp->cur))
354 cpu_quiet(rdp->cpu, rcp); 456 cpu_quiet(rdp->cpu, rcp);
355 457
356 spin_unlock(&rcp->lock); 458 spin_unlock_irqrestore(&rcp->lock, flags);
357} 459}
358 460
359 461
@@ -364,33 +466,38 @@ static void rcu_check_quiescent_state(struct rcu_ctrlblk *rcp,
364 * which is dead and hence not processing interrupts. 466 * which is dead and hence not processing interrupts.
365 */ 467 */
366static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list, 468static void rcu_move_batch(struct rcu_data *this_rdp, struct rcu_head *list,
367 struct rcu_head **tail) 469 struct rcu_head **tail, long batch)
368{ 470{
369 local_irq_disable(); 471 unsigned long flags;
370 *this_rdp->nxttail = list; 472
371 if (list) 473 if (list) {
372 this_rdp->nxttail = tail; 474 local_irq_save(flags);
373 local_irq_enable(); 475 this_rdp->batch = batch;
476 *this_rdp->nxttail[2] = list;
477 this_rdp->nxttail[2] = tail;
478 local_irq_restore(flags);
479 }
374} 480}
375 481
376static void __rcu_offline_cpu(struct rcu_data *this_rdp, 482static void __rcu_offline_cpu(struct rcu_data *this_rdp,
377 struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 483 struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
378{ 484{
379 /* if the cpu going offline owns the grace period 485 unsigned long flags;
486
487 /*
488 * if the cpu going offline owns the grace period
380 * we can block indefinitely waiting for it, so flush 489 * we can block indefinitely waiting for it, so flush
381 * it here 490 * it here
382 */ 491 */
383 spin_lock_bh(&rcp->lock); 492 spin_lock_irqsave(&rcp->lock, flags);
384 if (rcp->cur != rcp->completed) 493 if (rcp->cur != rcp->completed)
385 cpu_quiet(rdp->cpu, rcp); 494 cpu_quiet(rdp->cpu, rcp);
386 spin_unlock_bh(&rcp->lock); 495 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail, rcp->cur + 1);
387 rcu_move_batch(this_rdp, rdp->donelist, rdp->donetail); 496 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail[2], rcp->cur + 1);
388 rcu_move_batch(this_rdp, rdp->curlist, rdp->curtail); 497 spin_unlock(&rcp->lock);
389 rcu_move_batch(this_rdp, rdp->nxtlist, rdp->nxttail);
390 498
391 local_irq_disable();
392 this_rdp->qlen += rdp->qlen; 499 this_rdp->qlen += rdp->qlen;
393 local_irq_enable(); 500 local_irq_restore(flags);
394} 501}
395 502
396static void rcu_offline_cpu(int cpu) 503static void rcu_offline_cpu(int cpu)
@@ -420,38 +527,52 @@ static void rcu_offline_cpu(int cpu)
420static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp, 527static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
421 struct rcu_data *rdp) 528 struct rcu_data *rdp)
422{ 529{
423 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch)) { 530 unsigned long flags;
424 *rdp->donetail = rdp->curlist; 531 long completed_snap;
425 rdp->donetail = rdp->curtail;
426 rdp->curlist = NULL;
427 rdp->curtail = &rdp->curlist;
428 }
429 532
430 if (rdp->nxtlist && !rdp->curlist) { 533 if (rdp->nxtlist) {
431 local_irq_disable(); 534 local_irq_save(flags);
432 rdp->curlist = rdp->nxtlist; 535 completed_snap = ACCESS_ONCE(rcp->completed);
433 rdp->curtail = rdp->nxttail;
434 rdp->nxtlist = NULL;
435 rdp->nxttail = &rdp->nxtlist;
436 local_irq_enable();
437 536
438 /* 537 /*
439 * start the next batch of callbacks 538 * move the other grace-period-completed entries to
539 * [rdp->nxtlist, *rdp->nxttail[0]) temporarily
440 */ 540 */
541 if (!rcu_batch_before(completed_snap, rdp->batch))
542 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2];
543 else if (!rcu_batch_before(completed_snap, rdp->batch - 1))
544 rdp->nxttail[0] = rdp->nxttail[1];
441 545
442 /* determine batch number */ 546 /*
443 rdp->batch = rcp->cur + 1; 547 * the grace period for entries in
444 /* see the comment and corresponding wmb() in 548 * [rdp->nxtlist, *rdp->nxttail[0]) has completed and
445 * the rcu_start_batch() 549 * move these entries to donelist
446 */ 550 */
447 smp_rmb(); 551 if (rdp->nxttail[0] != &rdp->nxtlist) {
552 *rdp->donetail = rdp->nxtlist;
553 rdp->donetail = rdp->nxttail[0];
554 rdp->nxtlist = *rdp->nxttail[0];
555 *rdp->donetail = NULL;
556
557 if (rdp->nxttail[1] == rdp->nxttail[0])
558 rdp->nxttail[1] = &rdp->nxtlist;
559 if (rdp->nxttail[2] == rdp->nxttail[0])
560 rdp->nxttail[2] = &rdp->nxtlist;
561 rdp->nxttail[0] = &rdp->nxtlist;
562 }
563
564 local_irq_restore(flags);
565
566 if (rcu_batch_after(rdp->batch, rcp->pending)) {
567 unsigned long flags2;
448 568
449 if (!rcp->next_pending) {
450 /* and start it/schedule start if it's a new batch */ 569 /* and start it/schedule start if it's a new batch */
451 spin_lock(&rcp->lock); 570 spin_lock_irqsave(&rcp->lock, flags2);
452 rcp->next_pending = 1; 571 if (rcu_batch_after(rdp->batch, rcp->pending)) {
453 rcu_start_batch(rcp); 572 rcp->pending = rdp->batch;
454 spin_unlock(&rcp->lock); 573 rcu_start_batch(rcp);
574 }
575 spin_unlock_irqrestore(&rcp->lock, flags2);
455 } 576 }
456 } 577 }
457 578
@@ -462,21 +583,53 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp,
462 583
463static void rcu_process_callbacks(struct softirq_action *unused) 584static void rcu_process_callbacks(struct softirq_action *unused)
464{ 585{
586 /*
587 * Memory references from any prior RCU read-side critical sections
588 * executed by the interrupted code must be see before any RCU
589 * grace-period manupulations below.
590 */
591
592 smp_mb(); /* See above block comment. */
593
465 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data)); 594 __rcu_process_callbacks(&rcu_ctrlblk, &__get_cpu_var(rcu_data));
466 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data)); 595 __rcu_process_callbacks(&rcu_bh_ctrlblk, &__get_cpu_var(rcu_bh_data));
596
597 /*
598 * Memory references from any later RCU read-side critical sections
599 * executed by the interrupted code must be see after any RCU
600 * grace-period manupulations above.
601 */
602
603 smp_mb(); /* See above block comment. */
467} 604}
468 605
469static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp) 606static int __rcu_pending(struct rcu_ctrlblk *rcp, struct rcu_data *rdp)
470{ 607{
471 /* This cpu has pending rcu entries and the grace period 608 /* Check for CPU stalls, if enabled. */
472 * for them has completed. 609 check_cpu_stall(rcp);
473 */
474 if (rdp->curlist && !rcu_batch_before(rcp->completed, rdp->batch))
475 return 1;
476 610
477 /* This cpu has no pending entries, but there are new entries */ 611 if (rdp->nxtlist) {
478 if (!rdp->curlist && rdp->nxtlist) 612 long completed_snap = ACCESS_ONCE(rcp->completed);
479 return 1; 613
614 /*
615 * This cpu has pending rcu entries and the grace period
616 * for them has completed.
617 */
618 if (!rcu_batch_before(completed_snap, rdp->batch))
619 return 1;
620 if (!rcu_batch_before(completed_snap, rdp->batch - 1) &&
621 rdp->nxttail[0] != rdp->nxttail[1])
622 return 1;
623 if (rdp->nxttail[0] != &rdp->nxtlist)
624 return 1;
625
626 /*
627 * This cpu has pending rcu entries and the new batch
628 * for then hasn't been started nor scheduled start
629 */
630 if (rcu_batch_after(rdp->batch, rcp->pending))
631 return 1;
632 }
480 633
481 /* This cpu has finished callbacks to invoke */ 634 /* This cpu has finished callbacks to invoke */
482 if (rdp->donelist) 635 if (rdp->donelist)
@@ -512,9 +665,15 @@ int rcu_needs_cpu(int cpu)
512 struct rcu_data *rdp = &per_cpu(rcu_data, cpu); 665 struct rcu_data *rdp = &per_cpu(rcu_data, cpu);
513 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu); 666 struct rcu_data *rdp_bh = &per_cpu(rcu_bh_data, cpu);
514 667
515 return (!!rdp->curlist || !!rdp_bh->curlist || rcu_pending(cpu)); 668 return !!rdp->nxtlist || !!rdp_bh->nxtlist || rcu_pending(cpu);
516} 669}
517 670
671/*
672 * Top-level function driving RCU grace-period detection, normally
673 * invoked from the scheduler-clock interrupt. This function simply
674 * increments counters that are read only from softirq by this same
675 * CPU, so there are no memory barriers required.
676 */
518void rcu_check_callbacks(int cpu, int user) 677void rcu_check_callbacks(int cpu, int user)
519{ 678{
520 if (user || 679 if (user ||
@@ -558,14 +717,17 @@ void rcu_check_callbacks(int cpu, int user)
558static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp, 717static void rcu_init_percpu_data(int cpu, struct rcu_ctrlblk *rcp,
559 struct rcu_data *rdp) 718 struct rcu_data *rdp)
560{ 719{
720 unsigned long flags;
721
722 spin_lock_irqsave(&rcp->lock, flags);
561 memset(rdp, 0, sizeof(*rdp)); 723 memset(rdp, 0, sizeof(*rdp));
562 rdp->curtail = &rdp->curlist; 724 rdp->nxttail[0] = rdp->nxttail[1] = rdp->nxttail[2] = &rdp->nxtlist;
563 rdp->nxttail = &rdp->nxtlist;
564 rdp->donetail = &rdp->donelist; 725 rdp->donetail = &rdp->donelist;
565 rdp->quiescbatch = rcp->completed; 726 rdp->quiescbatch = rcp->completed;
566 rdp->qs_pending = 0; 727 rdp->qs_pending = 0;
567 rdp->cpu = cpu; 728 rdp->cpu = cpu;
568 rdp->blimit = blimit; 729 rdp->blimit = blimit;
730 spin_unlock_irqrestore(&rcp->lock, flags);
569} 731}
570 732
571static void __cpuinit rcu_online_cpu(int cpu) 733static void __cpuinit rcu_online_cpu(int cpu)
@@ -610,6 +772,9 @@ static struct notifier_block __cpuinitdata rcu_nb = {
610 */ 772 */
611void __init __rcu_init(void) 773void __init __rcu_init(void)
612{ 774{
775#ifdef CONFIG_RCU_CPU_STALL_DETECTOR
776 printk(KERN_INFO "RCU-based detection of stalled CPUs is enabled.\n");
777#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */
613 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE, 778 rcu_cpu_notify(&rcu_nb, CPU_UP_PREPARE,
614 (void *)(long)smp_processor_id()); 779 (void *)(long)smp_processor_id());
615 /* Register notifier for non-boot CPUs */ 780 /* Register notifier for non-boot CPUs */
diff --git a/kernel/rcupreempt.c b/kernel/rcupreempt.c
index 27827931ca0d..59236e8b9daa 100644
--- a/kernel/rcupreempt.c
+++ b/kernel/rcupreempt.c
@@ -54,17 +54,9 @@
54#include <linux/cpu.h> 54#include <linux/cpu.h>
55#include <linux/random.h> 55#include <linux/random.h>
56#include <linux/delay.h> 56#include <linux/delay.h>
57#include <linux/byteorder/swabb.h>
58#include <linux/cpumask.h> 57#include <linux/cpumask.h>
59#include <linux/rcupreempt_trace.h> 58#include <linux/rcupreempt_trace.h>
60 59#include <asm/byteorder.h>
61/*
62 * Macro that prevents the compiler from reordering accesses, but does
63 * absolutely -nothing- to prevent CPUs from reordering. This is used
64 * only to mediate communication between mainline code and hardware
65 * interrupt and NMI handlers.
66 */
67#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68 60
69/* 61/*
70 * PREEMPT_RCU data structures. 62 * PREEMPT_RCU data structures.
diff --git a/kernel/rcupreempt_trace.c b/kernel/rcupreempt_trace.c
index 5edf82c34bbc..35c2d3360ecf 100644
--- a/kernel/rcupreempt_trace.c
+++ b/kernel/rcupreempt_trace.c
@@ -308,11 +308,16 @@ out:
308 308
309static int __init rcupreempt_trace_init(void) 309static int __init rcupreempt_trace_init(void)
310{ 310{
311 int ret;
312
311 mutex_init(&rcupreempt_trace_mutex); 313 mutex_init(&rcupreempt_trace_mutex);
312 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL); 314 rcupreempt_trace_buf = kmalloc(RCUPREEMPT_TRACE_BUF_SIZE, GFP_KERNEL);
313 if (!rcupreempt_trace_buf) 315 if (!rcupreempt_trace_buf)
314 return 1; 316 return 1;
315 return rcupreempt_debugfs_init(); 317 ret = rcupreempt_debugfs_init();
318 if (ret)
319 kfree(rcupreempt_trace_buf);
320 return ret;
316} 321}
317 322
318static void __exit rcupreempt_trace_cleanup(void) 323static void __exit rcupreempt_trace_cleanup(void)
diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
index 90b5b123f7a1..85cb90588a55 100644
--- a/kernel/rcutorture.c
+++ b/kernel/rcutorture.c
@@ -42,10 +42,10 @@
42#include <linux/freezer.h> 42#include <linux/freezer.h>
43#include <linux/cpu.h> 43#include <linux/cpu.h>
44#include <linux/delay.h> 44#include <linux/delay.h>
45#include <linux/byteorder/swabb.h>
46#include <linux/stat.h> 45#include <linux/stat.h>
47#include <linux/srcu.h> 46#include <linux/srcu.h>
48#include <linux/slab.h> 47#include <linux/slab.h>
48#include <asm/byteorder.h>
49 49
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and " 51MODULE_AUTHOR("Paul E. McKenney <paulmck@us.ibm.com> and "
diff --git a/kernel/resource.c b/kernel/resource.c
index 03d796c1b2e9..4089d12af6e0 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -38,10 +38,6 @@ EXPORT_SYMBOL(iomem_resource);
38 38
39static DEFINE_RWLOCK(resource_lock); 39static DEFINE_RWLOCK(resource_lock);
40 40
41#ifdef CONFIG_PROC_FS
42
43enum { MAX_IORES_LEVEL = 5 };
44
45static void *r_next(struct seq_file *m, void *v, loff_t *pos) 41static void *r_next(struct seq_file *m, void *v, loff_t *pos)
46{ 42{
47 struct resource *p = v; 43 struct resource *p = v;
@@ -53,6 +49,10 @@ static void *r_next(struct seq_file *m, void *v, loff_t *pos)
53 return p->sibling; 49 return p->sibling;
54} 50}
55 51
52#ifdef CONFIG_PROC_FS
53
54enum { MAX_IORES_LEVEL = 5 };
55
56static void *r_start(struct seq_file *m, loff_t *pos) 56static void *r_start(struct seq_file *m, loff_t *pos)
57 __acquires(resource_lock) 57 __acquires(resource_lock)
58{ 58{
@@ -516,6 +516,70 @@ int adjust_resource(struct resource *res, resource_size_t start, resource_size_t
516 return result; 516 return result;
517} 517}
518 518
519static void __init __reserve_region_with_split(struct resource *root,
520 resource_size_t start, resource_size_t end,
521 const char *name)
522{
523 struct resource *parent = root;
524 struct resource *conflict;
525 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
526
527 if (!res)
528 return;
529
530 res->name = name;
531 res->start = start;
532 res->end = end;
533 res->flags = IORESOURCE_BUSY;
534
535 for (;;) {
536 conflict = __request_resource(parent, res);
537 if (!conflict)
538 break;
539 if (conflict != parent) {
540 parent = conflict;
541 if (!(conflict->flags & IORESOURCE_BUSY))
542 continue;
543 }
544
545 /* Uhhuh, that didn't work out.. */
546 kfree(res);
547 res = NULL;
548 break;
549 }
550
551 if (!res) {
552 /* failed, split and try again */
553
554 /* conflict covered whole area */
555 if (conflict->start <= start && conflict->end >= end)
556 return;
557
558 if (conflict->start > start)
559 __reserve_region_with_split(root, start, conflict->start-1, name);
560 if (!(conflict->flags & IORESOURCE_BUSY)) {
561 resource_size_t common_start, common_end;
562
563 common_start = max(conflict->start, start);
564 common_end = min(conflict->end, end);
565 if (common_start < common_end)
566 __reserve_region_with_split(root, common_start, common_end, name);
567 }
568 if (conflict->end < end)
569 __reserve_region_with_split(root, conflict->end+1, end, name);
570 }
571
572}
573
574void reserve_region_with_split(struct resource *root,
575 resource_size_t start, resource_size_t end,
576 const char *name)
577{
578 write_lock(&resource_lock);
579 __reserve_region_with_split(root, start, end, name);
580 write_unlock(&resource_lock);
581}
582
519EXPORT_SYMBOL(adjust_resource); 583EXPORT_SYMBOL(adjust_resource);
520 584
521/** 585/**
@@ -562,33 +626,34 @@ struct resource * __request_region(struct resource *parent,
562{ 626{
563 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL); 627 struct resource *res = kzalloc(sizeof(*res), GFP_KERNEL);
564 628
565 if (res) { 629 if (!res)
566 res->name = name; 630 return NULL;
567 res->start = start;
568 res->end = start + n - 1;
569 res->flags = IORESOURCE_BUSY;
570 631
571 write_lock(&resource_lock); 632 res->name = name;
633 res->start = start;
634 res->end = start + n - 1;
635 res->flags = IORESOURCE_BUSY;
572 636
573 for (;;) { 637 write_lock(&resource_lock);
574 struct resource *conflict;
575 638
576 conflict = __request_resource(parent, res); 639 for (;;) {
577 if (!conflict) 640 struct resource *conflict;
578 break;
579 if (conflict != parent) {
580 parent = conflict;
581 if (!(conflict->flags & IORESOURCE_BUSY))
582 continue;
583 }
584 641
585 /* Uhhuh, that didn't work out.. */ 642 conflict = __request_resource(parent, res);
586 kfree(res); 643 if (!conflict)
587 res = NULL;
588 break; 644 break;
645 if (conflict != parent) {
646 parent = conflict;
647 if (!(conflict->flags & IORESOURCE_BUSY))
648 continue;
589 } 649 }
590 write_unlock(&resource_lock); 650
651 /* Uhhuh, that didn't work out.. */
652 kfree(res);
653 res = NULL;
654 break;
591 } 655 }
656 write_unlock(&resource_lock);
592 return res; 657 return res;
593} 658}
594EXPORT_SYMBOL(__request_region); 659EXPORT_SYMBOL(__request_region);
@@ -763,3 +828,40 @@ static int __init reserve_setup(char *str)
763} 828}
764 829
765__setup("reserve=", reserve_setup); 830__setup("reserve=", reserve_setup);
831
832/*
833 * Check if the requested addr and size spans more than any slot in the
834 * iomem resource tree.
835 */
836int iomem_map_sanity_check(resource_size_t addr, unsigned long size)
837{
838 struct resource *p = &iomem_resource;
839 int err = 0;
840 loff_t l;
841
842 read_lock(&resource_lock);
843 for (p = p->child; p ; p = r_next(NULL, p, &l)) {
844 /*
845 * We can probably skip the resources without
846 * IORESOURCE_IO attribute?
847 */
848 if (p->start >= addr + size)
849 continue;
850 if (p->end < addr)
851 continue;
852 if (p->start <= addr && (p->end >= addr + size - 1))
853 continue;
854 printk(KERN_WARNING "resource map sanity check conflict: "
855 "0x%llx 0x%llx 0x%llx 0x%llx %s\n",
856 (unsigned long long)addr,
857 (unsigned long long)(addr + size - 1),
858 (unsigned long long)p->start,
859 (unsigned long long)p->end,
860 p->name);
861 err = -1;
862 break;
863 }
864 read_unlock(&resource_lock);
865
866 return err;
867}
diff --git a/kernel/sched.c b/kernel/sched.c
index ad1962dc0aa2..d906f72b42d2 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -71,6 +71,7 @@
71#include <linux/debugfs.h> 71#include <linux/debugfs.h>
72#include <linux/ctype.h> 72#include <linux/ctype.h>
73#include <linux/ftrace.h> 73#include <linux/ftrace.h>
74#include <trace/sched.h>
74 75
75#include <asm/tlb.h> 76#include <asm/tlb.h>
76#include <asm/irq_regs.h> 77#include <asm/irq_regs.h>
@@ -204,11 +205,16 @@ void init_rt_bandwidth(struct rt_bandwidth *rt_b, u64 period, u64 runtime)
204 rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED; 205 rt_b->rt_period_timer.cb_mode = HRTIMER_CB_IRQSAFE_UNLOCKED;
205} 206}
206 207
208static inline int rt_bandwidth_enabled(void)
209{
210 return sysctl_sched_rt_runtime >= 0;
211}
212
207static void start_rt_bandwidth(struct rt_bandwidth *rt_b) 213static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
208{ 214{
209 ktime_t now; 215 ktime_t now;
210 216
211 if (rt_b->rt_runtime == RUNTIME_INF) 217 if (rt_bandwidth_enabled() && rt_b->rt_runtime == RUNTIME_INF)
212 return; 218 return;
213 219
214 if (hrtimer_active(&rt_b->rt_period_timer)) 220 if (hrtimer_active(&rt_b->rt_period_timer))
@@ -298,9 +304,9 @@ static DEFINE_PER_CPU(struct cfs_rq, init_cfs_rq) ____cacheline_aligned_in_smp;
298static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity); 304static DEFINE_PER_CPU(struct sched_rt_entity, init_sched_rt_entity);
299static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp; 305static DEFINE_PER_CPU(struct rt_rq, init_rt_rq) ____cacheline_aligned_in_smp;
300#endif /* CONFIG_RT_GROUP_SCHED */ 306#endif /* CONFIG_RT_GROUP_SCHED */
301#else /* !CONFIG_FAIR_GROUP_SCHED */ 307#else /* !CONFIG_USER_SCHED */
302#define root_task_group init_task_group 308#define root_task_group init_task_group
303#endif /* CONFIG_FAIR_GROUP_SCHED */ 309#endif /* CONFIG_USER_SCHED */
304 310
305/* task_group_lock serializes add/remove of task groups and also changes to 311/* task_group_lock serializes add/remove of task groups and also changes to
306 * a task group's cpu shares. 312 * a task group's cpu shares.
@@ -604,9 +610,9 @@ struct rq {
604 610
605static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues); 611static DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
606 612
607static inline void check_preempt_curr(struct rq *rq, struct task_struct *p) 613static inline void check_preempt_curr(struct rq *rq, struct task_struct *p, int sync)
608{ 614{
609 rq->curr->sched_class->check_preempt_curr(rq, p); 615 rq->curr->sched_class->check_preempt_curr(rq, p, sync);
610} 616}
611 617
612static inline int cpu_of(struct rq *rq) 618static inline int cpu_of(struct rq *rq)
@@ -1102,7 +1108,7 @@ static void hrtick_start(struct rq *rq, u64 delay)
1102 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL); 1108 hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay), HRTIMER_MODE_REL);
1103} 1109}
1104 1110
1105static void init_hrtick(void) 1111static inline void init_hrtick(void)
1106{ 1112{
1107} 1113}
1108#endif /* CONFIG_SMP */ 1114#endif /* CONFIG_SMP */
@@ -1121,7 +1127,7 @@ static void init_rq_hrtick(struct rq *rq)
1121 rq->hrtick_timer.function = hrtick; 1127 rq->hrtick_timer.function = hrtick;
1122 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU; 1128 rq->hrtick_timer.cb_mode = HRTIMER_CB_IRQSAFE_PERCPU;
1123} 1129}
1124#else 1130#else /* CONFIG_SCHED_HRTICK */
1125static inline void hrtick_clear(struct rq *rq) 1131static inline void hrtick_clear(struct rq *rq)
1126{ 1132{
1127} 1133}
@@ -1133,7 +1139,7 @@ static inline void init_rq_hrtick(struct rq *rq)
1133static inline void init_hrtick(void) 1139static inline void init_hrtick(void)
1134{ 1140{
1135} 1141}
1136#endif 1142#endif /* CONFIG_SCHED_HRTICK */
1137 1143
1138/* 1144/*
1139 * resched_task - mark a task 'to be rescheduled now'. 1145 * resched_task - mark a task 'to be rescheduled now'.
@@ -1380,38 +1386,24 @@ static inline void dec_cpu_load(struct rq *rq, unsigned long load)
1380 update_load_sub(&rq->load, load); 1386 update_load_sub(&rq->load, load);
1381} 1387}
1382 1388
1383#ifdef CONFIG_SMP 1389#if (defined(CONFIG_SMP) && defined(CONFIG_FAIR_GROUP_SCHED)) || defined(CONFIG_RT_GROUP_SCHED)
1384static unsigned long source_load(int cpu, int type); 1390typedef int (*tg_visitor)(struct task_group *, void *);
1385static unsigned long target_load(int cpu, int type);
1386static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1387
1388static unsigned long cpu_avg_load_per_task(int cpu)
1389{
1390 struct rq *rq = cpu_rq(cpu);
1391
1392 if (rq->nr_running)
1393 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1394
1395 return rq->avg_load_per_task;
1396}
1397
1398#ifdef CONFIG_FAIR_GROUP_SCHED
1399
1400typedef void (*tg_visitor)(struct task_group *, int, struct sched_domain *);
1401 1391
1402/* 1392/*
1403 * Iterate the full tree, calling @down when first entering a node and @up when 1393 * Iterate the full tree, calling @down when first entering a node and @up when
1404 * leaving it for the final time. 1394 * leaving it for the final time.
1405 */ 1395 */
1406static void 1396static int walk_tg_tree(tg_visitor down, tg_visitor up, void *data)
1407walk_tg_tree(tg_visitor down, tg_visitor up, int cpu, struct sched_domain *sd)
1408{ 1397{
1409 struct task_group *parent, *child; 1398 struct task_group *parent, *child;
1399 int ret;
1410 1400
1411 rcu_read_lock(); 1401 rcu_read_lock();
1412 parent = &root_task_group; 1402 parent = &root_task_group;
1413down: 1403down:
1414 (*down)(parent, cpu, sd); 1404 ret = (*down)(parent, data);
1405 if (ret)
1406 goto out_unlock;
1415 list_for_each_entry_rcu(child, &parent->children, siblings) { 1407 list_for_each_entry_rcu(child, &parent->children, siblings) {
1416 parent = child; 1408 parent = child;
1417 goto down; 1409 goto down;
@@ -1419,14 +1411,42 @@ down:
1419up: 1411up:
1420 continue; 1412 continue;
1421 } 1413 }
1422 (*up)(parent, cpu, sd); 1414 ret = (*up)(parent, data);
1415 if (ret)
1416 goto out_unlock;
1423 1417
1424 child = parent; 1418 child = parent;
1425 parent = parent->parent; 1419 parent = parent->parent;
1426 if (parent) 1420 if (parent)
1427 goto up; 1421 goto up;
1422out_unlock:
1428 rcu_read_unlock(); 1423 rcu_read_unlock();
1424
1425 return ret;
1426}
1427
1428static int tg_nop(struct task_group *tg, void *data)
1429{
1430 return 0;
1429} 1431}
1432#endif
1433
1434#ifdef CONFIG_SMP
1435static unsigned long source_load(int cpu, int type);
1436static unsigned long target_load(int cpu, int type);
1437static int task_hot(struct task_struct *p, u64 now, struct sched_domain *sd);
1438
1439static unsigned long cpu_avg_load_per_task(int cpu)
1440{
1441 struct rq *rq = cpu_rq(cpu);
1442
1443 if (rq->nr_running)
1444 rq->avg_load_per_task = rq->load.weight / rq->nr_running;
1445
1446 return rq->avg_load_per_task;
1447}
1448
1449#ifdef CONFIG_FAIR_GROUP_SCHED
1430 1450
1431static void __set_se_shares(struct sched_entity *se, unsigned long shares); 1451static void __set_se_shares(struct sched_entity *se, unsigned long shares);
1432 1452
@@ -1486,11 +1506,11 @@ __update_group_shares_cpu(struct task_group *tg, int cpu,
1486 * This needs to be done in a bottom-up fashion because the rq weight of a 1506 * This needs to be done in a bottom-up fashion because the rq weight of a
1487 * parent group depends on the shares of its child groups. 1507 * parent group depends on the shares of its child groups.
1488 */ 1508 */
1489static void 1509static int tg_shares_up(struct task_group *tg, void *data)
1490tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1491{ 1510{
1492 unsigned long rq_weight = 0; 1511 unsigned long rq_weight = 0;
1493 unsigned long shares = 0; 1512 unsigned long shares = 0;
1513 struct sched_domain *sd = data;
1494 int i; 1514 int i;
1495 1515
1496 for_each_cpu_mask(i, sd->span) { 1516 for_each_cpu_mask(i, sd->span) {
@@ -1515,6 +1535,8 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1515 __update_group_shares_cpu(tg, i, shares, rq_weight); 1535 __update_group_shares_cpu(tg, i, shares, rq_weight);
1516 spin_unlock_irqrestore(&rq->lock, flags); 1536 spin_unlock_irqrestore(&rq->lock, flags);
1517 } 1537 }
1538
1539 return 0;
1518} 1540}
1519 1541
1520/* 1542/*
@@ -1522,10 +1544,10 @@ tg_shares_up(struct task_group *tg, int cpu, struct sched_domain *sd)
1522 * This needs to be done in a top-down fashion because the load of a child 1544 * This needs to be done in a top-down fashion because the load of a child
1523 * group is a fraction of its parents load. 1545 * group is a fraction of its parents load.
1524 */ 1546 */
1525static void 1547static int tg_load_down(struct task_group *tg, void *data)
1526tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
1527{ 1548{
1528 unsigned long load; 1549 unsigned long load;
1550 long cpu = (long)data;
1529 1551
1530 if (!tg->parent) { 1552 if (!tg->parent) {
1531 load = cpu_rq(cpu)->load.weight; 1553 load = cpu_rq(cpu)->load.weight;
@@ -1536,11 +1558,8 @@ tg_load_down(struct task_group *tg, int cpu, struct sched_domain *sd)
1536 } 1558 }
1537 1559
1538 tg->cfs_rq[cpu]->h_load = load; 1560 tg->cfs_rq[cpu]->h_load = load;
1539}
1540 1561
1541static void 1562 return 0;
1542tg_nop(struct task_group *tg, int cpu, struct sched_domain *sd)
1543{
1544} 1563}
1545 1564
1546static void update_shares(struct sched_domain *sd) 1565static void update_shares(struct sched_domain *sd)
@@ -1550,7 +1569,7 @@ static void update_shares(struct sched_domain *sd)
1550 1569
1551 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) { 1570 if (elapsed >= (s64)(u64)sysctl_sched_shares_ratelimit) {
1552 sd->last_update = now; 1571 sd->last_update = now;
1553 walk_tg_tree(tg_nop, tg_shares_up, 0, sd); 1572 walk_tg_tree(tg_nop, tg_shares_up, sd);
1554 } 1573 }
1555} 1574}
1556 1575
@@ -1561,9 +1580,9 @@ static void update_shares_locked(struct rq *rq, struct sched_domain *sd)
1561 spin_lock(&rq->lock); 1580 spin_lock(&rq->lock);
1562} 1581}
1563 1582
1564static void update_h_load(int cpu) 1583static void update_h_load(long cpu)
1565{ 1584{
1566 walk_tg_tree(tg_load_down, tg_nop, cpu, NULL); 1585 walk_tg_tree(tg_load_down, tg_nop, (void *)cpu);
1567} 1586}
1568 1587
1569#else 1588#else
@@ -1918,14 +1937,12 @@ unsigned long wait_task_inactive(struct task_struct *p, long match_state)
1918 * just go back and repeat. 1937 * just go back and repeat.
1919 */ 1938 */
1920 rq = task_rq_lock(p, &flags); 1939 rq = task_rq_lock(p, &flags);
1940 trace_sched_wait_task(rq, p);
1921 running = task_running(rq, p); 1941 running = task_running(rq, p);
1922 on_rq = p->se.on_rq; 1942 on_rq = p->se.on_rq;
1923 ncsw = 0; 1943 ncsw = 0;
1924 if (!match_state || p->state == match_state) { 1944 if (!match_state || p->state == match_state)
1925 ncsw = p->nivcsw + p->nvcsw; 1945 ncsw = p->nvcsw | LONG_MIN; /* sets MSB */
1926 if (unlikely(!ncsw))
1927 ncsw = 1;
1928 }
1929 task_rq_unlock(rq, &flags); 1946 task_rq_unlock(rq, &flags);
1930 1947
1931 /* 1948 /*
@@ -2282,10 +2299,8 @@ out_activate:
2282 success = 1; 2299 success = 1;
2283 2300
2284out_running: 2301out_running:
2285 trace_mark(kernel_sched_wakeup, 2302 trace_sched_wakeup(rq, p);
2286 "pid %d state %ld ## rq %p task %p rq->curr %p", 2303 check_preempt_curr(rq, p, sync);
2287 p->pid, p->state, rq, p, rq->curr);
2288 check_preempt_curr(rq, p);
2289 2304
2290 p->state = TASK_RUNNING; 2305 p->state = TASK_RUNNING;
2291#ifdef CONFIG_SMP 2306#ifdef CONFIG_SMP
@@ -2417,10 +2432,8 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
2417 p->sched_class->task_new(rq, p); 2432 p->sched_class->task_new(rq, p);
2418 inc_nr_running(rq); 2433 inc_nr_running(rq);
2419 } 2434 }
2420 trace_mark(kernel_sched_wakeup_new, 2435 trace_sched_wakeup_new(rq, p);
2421 "pid %d state %ld ## rq %p task %p rq->curr %p", 2436 check_preempt_curr(rq, p, 0);
2422 p->pid, p->state, rq, p, rq->curr);
2423 check_preempt_curr(rq, p);
2424#ifdef CONFIG_SMP 2437#ifdef CONFIG_SMP
2425 if (p->sched_class->task_wake_up) 2438 if (p->sched_class->task_wake_up)
2426 p->sched_class->task_wake_up(rq, p); 2439 p->sched_class->task_wake_up(rq, p);
@@ -2592,11 +2605,7 @@ context_switch(struct rq *rq, struct task_struct *prev,
2592 struct mm_struct *mm, *oldmm; 2605 struct mm_struct *mm, *oldmm;
2593 2606
2594 prepare_task_switch(rq, prev, next); 2607 prepare_task_switch(rq, prev, next);
2595 trace_mark(kernel_sched_schedule, 2608 trace_sched_switch(rq, prev, next);
2596 "prev_pid %d next_pid %d prev_state %ld "
2597 "## rq %p prev %p next %p",
2598 prev->pid, next->pid, prev->state,
2599 rq, prev, next);
2600 mm = next->mm; 2609 mm = next->mm;
2601 oldmm = prev->active_mm; 2610 oldmm = prev->active_mm;
2602 /* 2611 /*
@@ -2836,6 +2845,7 @@ static void sched_migrate_task(struct task_struct *p, int dest_cpu)
2836 || unlikely(!cpu_active(dest_cpu))) 2845 || unlikely(!cpu_active(dest_cpu)))
2837 goto out; 2846 goto out;
2838 2847
2848 trace_sched_migrate_task(rq, p, dest_cpu);
2839 /* force the process onto the specified CPU */ 2849 /* force the process onto the specified CPU */
2840 if (migrate_task(p, dest_cpu, &req)) { 2850 if (migrate_task(p, dest_cpu, &req)) {
2841 /* Need to wait for migration thread (might exit: take ref). */ 2851 /* Need to wait for migration thread (might exit: take ref). */
@@ -2880,7 +2890,7 @@ static void pull_task(struct rq *src_rq, struct task_struct *p,
2880 * Note that idle threads have a prio of MAX_PRIO, for this test 2890 * Note that idle threads have a prio of MAX_PRIO, for this test
2881 * to be always true for them. 2891 * to be always true for them.
2882 */ 2892 */
2883 check_preempt_curr(this_rq, p); 2893 check_preempt_curr(this_rq, p, 0);
2884} 2894}
2885 2895
2886/* 2896/*
@@ -4037,23 +4047,26 @@ DEFINE_PER_CPU(struct kernel_stat, kstat);
4037EXPORT_PER_CPU_SYMBOL(kstat); 4047EXPORT_PER_CPU_SYMBOL(kstat);
4038 4048
4039/* 4049/*
4040 * Return p->sum_exec_runtime plus any more ns on the sched_clock 4050 * Return any ns on the sched_clock that have not yet been banked in
4041 * that have not yet been banked in case the task is currently running. 4051 * @p in case that task is currently running.
4042 */ 4052 */
4043unsigned long long task_sched_runtime(struct task_struct *p) 4053unsigned long long task_delta_exec(struct task_struct *p)
4044{ 4054{
4045 unsigned long flags; 4055 unsigned long flags;
4046 u64 ns, delta_exec;
4047 struct rq *rq; 4056 struct rq *rq;
4057 u64 ns = 0;
4048 4058
4049 rq = task_rq_lock(p, &flags); 4059 rq = task_rq_lock(p, &flags);
4050 ns = p->se.sum_exec_runtime; 4060
4051 if (task_current(rq, p)) { 4061 if (task_current(rq, p)) {
4062 u64 delta_exec;
4063
4052 update_rq_clock(rq); 4064 update_rq_clock(rq);
4053 delta_exec = rq->clock - p->se.exec_start; 4065 delta_exec = rq->clock - p->se.exec_start;
4054 if ((s64)delta_exec > 0) 4066 if ((s64)delta_exec > 0)
4055 ns += delta_exec; 4067 ns = delta_exec;
4056 } 4068 }
4069
4057 task_rq_unlock(rq, &flags); 4070 task_rq_unlock(rq, &flags);
4058 4071
4059 return ns; 4072 return ns;
@@ -4070,6 +4083,7 @@ void account_user_time(struct task_struct *p, cputime_t cputime)
4070 cputime64_t tmp; 4083 cputime64_t tmp;
4071 4084
4072 p->utime = cputime_add(p->utime, cputime); 4085 p->utime = cputime_add(p->utime, cputime);
4086 account_group_user_time(p, cputime);
4073 4087
4074 /* Add user time to cpustat. */ 4088 /* Add user time to cpustat. */
4075 tmp = cputime_to_cputime64(cputime); 4089 tmp = cputime_to_cputime64(cputime);
@@ -4094,6 +4108,7 @@ static void account_guest_time(struct task_struct *p, cputime_t cputime)
4094 tmp = cputime_to_cputime64(cputime); 4108 tmp = cputime_to_cputime64(cputime);
4095 4109
4096 p->utime = cputime_add(p->utime, cputime); 4110 p->utime = cputime_add(p->utime, cputime);
4111 account_group_user_time(p, cputime);
4097 p->gtime = cputime_add(p->gtime, cputime); 4112 p->gtime = cputime_add(p->gtime, cputime);
4098 4113
4099 cpustat->user = cputime64_add(cpustat->user, tmp); 4114 cpustat->user = cputime64_add(cpustat->user, tmp);
@@ -4129,6 +4144,7 @@ void account_system_time(struct task_struct *p, int hardirq_offset,
4129 } 4144 }
4130 4145
4131 p->stime = cputime_add(p->stime, cputime); 4146 p->stime = cputime_add(p->stime, cputime);
4147 account_group_system_time(p, cputime);
4132 4148
4133 /* Add system time to cpustat. */ 4149 /* Add system time to cpustat. */
4134 tmp = cputime_to_cputime64(cputime); 4150 tmp = cputime_to_cputime64(cputime);
@@ -4170,6 +4186,7 @@ void account_steal_time(struct task_struct *p, cputime_t steal)
4170 4186
4171 if (p == rq->idle) { 4187 if (p == rq->idle) {
4172 p->stime = cputime_add(p->stime, steal); 4188 p->stime = cputime_add(p->stime, steal);
4189 account_group_system_time(p, steal);
4173 if (atomic_read(&rq->nr_iowait) > 0) 4190 if (atomic_read(&rq->nr_iowait) > 0)
4174 cpustat->iowait = cputime64_add(cpustat->iowait, tmp); 4191 cpustat->iowait = cputime64_add(cpustat->iowait, tmp);
4175 else 4192 else
@@ -4627,6 +4644,15 @@ __wake_up_sync(wait_queue_head_t *q, unsigned int mode, int nr_exclusive)
4627} 4644}
4628EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */ 4645EXPORT_SYMBOL_GPL(__wake_up_sync); /* For internal use only */
4629 4646
4647/**
4648 * complete: - signals a single thread waiting on this completion
4649 * @x: holds the state of this particular completion
4650 *
4651 * This will wake up a single thread waiting on this completion. Threads will be
4652 * awakened in the same order in which they were queued.
4653 *
4654 * See also complete_all(), wait_for_completion() and related routines.
4655 */
4630void complete(struct completion *x) 4656void complete(struct completion *x)
4631{ 4657{
4632 unsigned long flags; 4658 unsigned long flags;
@@ -4638,6 +4664,12 @@ void complete(struct completion *x)
4638} 4664}
4639EXPORT_SYMBOL(complete); 4665EXPORT_SYMBOL(complete);
4640 4666
4667/**
4668 * complete_all: - signals all threads waiting on this completion
4669 * @x: holds the state of this particular completion
4670 *
4671 * This will wake up all threads waiting on this particular completion event.
4672 */
4641void complete_all(struct completion *x) 4673void complete_all(struct completion *x)
4642{ 4674{
4643 unsigned long flags; 4675 unsigned long flags;
@@ -4658,10 +4690,7 @@ do_wait_for_common(struct completion *x, long timeout, int state)
4658 wait.flags |= WQ_FLAG_EXCLUSIVE; 4690 wait.flags |= WQ_FLAG_EXCLUSIVE;
4659 __add_wait_queue_tail(&x->wait, &wait); 4691 __add_wait_queue_tail(&x->wait, &wait);
4660 do { 4692 do {
4661 if ((state == TASK_INTERRUPTIBLE && 4693 if (signal_pending_state(state, current)) {
4662 signal_pending(current)) ||
4663 (state == TASK_KILLABLE &&
4664 fatal_signal_pending(current))) {
4665 timeout = -ERESTARTSYS; 4694 timeout = -ERESTARTSYS;
4666 break; 4695 break;
4667 } 4696 }
@@ -4689,12 +4718,31 @@ wait_for_common(struct completion *x, long timeout, int state)
4689 return timeout; 4718 return timeout;
4690} 4719}
4691 4720
4721/**
4722 * wait_for_completion: - waits for completion of a task
4723 * @x: holds the state of this particular completion
4724 *
4725 * This waits to be signaled for completion of a specific task. It is NOT
4726 * interruptible and there is no timeout.
4727 *
4728 * See also similar routines (i.e. wait_for_completion_timeout()) with timeout
4729 * and interrupt capability. Also see complete().
4730 */
4692void __sched wait_for_completion(struct completion *x) 4731void __sched wait_for_completion(struct completion *x)
4693{ 4732{
4694 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE); 4733 wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_UNINTERRUPTIBLE);
4695} 4734}
4696EXPORT_SYMBOL(wait_for_completion); 4735EXPORT_SYMBOL(wait_for_completion);
4697 4736
4737/**
4738 * wait_for_completion_timeout: - waits for completion of a task (w/timeout)
4739 * @x: holds the state of this particular completion
4740 * @timeout: timeout value in jiffies
4741 *
4742 * This waits for either a completion of a specific task to be signaled or for a
4743 * specified timeout to expire. The timeout is in jiffies. It is not
4744 * interruptible.
4745 */
4698unsigned long __sched 4746unsigned long __sched
4699wait_for_completion_timeout(struct completion *x, unsigned long timeout) 4747wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4700{ 4748{
@@ -4702,6 +4750,13 @@ wait_for_completion_timeout(struct completion *x, unsigned long timeout)
4702} 4750}
4703EXPORT_SYMBOL(wait_for_completion_timeout); 4751EXPORT_SYMBOL(wait_for_completion_timeout);
4704 4752
4753/**
4754 * wait_for_completion_interruptible: - waits for completion of a task (w/intr)
4755 * @x: holds the state of this particular completion
4756 *
4757 * This waits for completion of a specific task to be signaled. It is
4758 * interruptible.
4759 */
4705int __sched wait_for_completion_interruptible(struct completion *x) 4760int __sched wait_for_completion_interruptible(struct completion *x)
4706{ 4761{
4707 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE); 4762 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_INTERRUPTIBLE);
@@ -4711,6 +4766,14 @@ int __sched wait_for_completion_interruptible(struct completion *x)
4711} 4766}
4712EXPORT_SYMBOL(wait_for_completion_interruptible); 4767EXPORT_SYMBOL(wait_for_completion_interruptible);
4713 4768
4769/**
4770 * wait_for_completion_interruptible_timeout: - waits for completion (w/(to,intr))
4771 * @x: holds the state of this particular completion
4772 * @timeout: timeout value in jiffies
4773 *
4774 * This waits for either a completion of a specific task to be signaled or for a
4775 * specified timeout to expire. It is interruptible. The timeout is in jiffies.
4776 */
4714unsigned long __sched 4777unsigned long __sched
4715wait_for_completion_interruptible_timeout(struct completion *x, 4778wait_for_completion_interruptible_timeout(struct completion *x,
4716 unsigned long timeout) 4779 unsigned long timeout)
@@ -4719,6 +4782,13 @@ wait_for_completion_interruptible_timeout(struct completion *x,
4719} 4782}
4720EXPORT_SYMBOL(wait_for_completion_interruptible_timeout); 4783EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
4721 4784
4785/**
4786 * wait_for_completion_killable: - waits for completion of a task (killable)
4787 * @x: holds the state of this particular completion
4788 *
4789 * This waits to be signaled for completion of a specific task. It can be
4790 * interrupted by a kill signal.
4791 */
4722int __sched wait_for_completion_killable(struct completion *x) 4792int __sched wait_for_completion_killable(struct completion *x)
4723{ 4793{
4724 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE); 4794 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
@@ -5121,7 +5191,8 @@ recheck:
5121 * Do not allow realtime tasks into groups that have no runtime 5191 * Do not allow realtime tasks into groups that have no runtime
5122 * assigned. 5192 * assigned.
5123 */ 5193 */
5124 if (rt_policy(policy) && task_group(p)->rt_bandwidth.rt_runtime == 0) 5194 if (rt_bandwidth_enabled() && rt_policy(policy) &&
5195 task_group(p)->rt_bandwidth.rt_runtime == 0)
5125 return -EPERM; 5196 return -EPERM;
5126#endif 5197#endif
5127 5198
@@ -5957,7 +6028,7 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
5957 set_task_cpu(p, dest_cpu); 6028 set_task_cpu(p, dest_cpu);
5958 if (on_rq) { 6029 if (on_rq) {
5959 activate_task(rq_dest, p, 0); 6030 activate_task(rq_dest, p, 0);
5960 check_preempt_curr(rq_dest, p); 6031 check_preempt_curr(rq_dest, p, 0);
5961 } 6032 }
5962done: 6033done:
5963 ret = 1; 6034 ret = 1;
@@ -6282,7 +6353,7 @@ set_table_entry(struct ctl_table *entry,
6282static struct ctl_table * 6353static struct ctl_table *
6283sd_alloc_ctl_domain_table(struct sched_domain *sd) 6354sd_alloc_ctl_domain_table(struct sched_domain *sd)
6284{ 6355{
6285 struct ctl_table *table = sd_alloc_ctl_entry(12); 6356 struct ctl_table *table = sd_alloc_ctl_entry(13);
6286 6357
6287 if (table == NULL) 6358 if (table == NULL)
6288 return NULL; 6359 return NULL;
@@ -6310,7 +6381,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
6310 sizeof(int), 0644, proc_dointvec_minmax); 6381 sizeof(int), 0644, proc_dointvec_minmax);
6311 set_table_entry(&table[10], "flags", &sd->flags, 6382 set_table_entry(&table[10], "flags", &sd->flags,
6312 sizeof(int), 0644, proc_dointvec_minmax); 6383 sizeof(int), 0644, proc_dointvec_minmax);
6313 /* &table[11] is terminator */ 6384 set_table_entry(&table[11], "name", sd->name,
6385 CORENAME_MAX_SIZE, 0444, proc_dostring);
6386 /* &table[12] is terminator */
6314 6387
6315 return table; 6388 return table;
6316} 6389}
@@ -7194,13 +7267,21 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
7194 * Non-inlined to reduce accumulated stack pressure in build_sched_domains() 7267 * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
7195 */ 7268 */
7196 7269
7270#ifdef CONFIG_SCHED_DEBUG
7271# define SD_INIT_NAME(sd, type) sd->name = #type
7272#else
7273# define SD_INIT_NAME(sd, type) do { } while (0)
7274#endif
7275
7197#define SD_INIT(sd, type) sd_init_##type(sd) 7276#define SD_INIT(sd, type) sd_init_##type(sd)
7277
7198#define SD_INIT_FUNC(type) \ 7278#define SD_INIT_FUNC(type) \
7199static noinline void sd_init_##type(struct sched_domain *sd) \ 7279static noinline void sd_init_##type(struct sched_domain *sd) \
7200{ \ 7280{ \
7201 memset(sd, 0, sizeof(*sd)); \ 7281 memset(sd, 0, sizeof(*sd)); \
7202 *sd = SD_##type##_INIT; \ 7282 *sd = SD_##type##_INIT; \
7203 sd->level = SD_LV_##type; \ 7283 sd->level = SD_LV_##type; \
7284 SD_INIT_NAME(sd, type); \
7204} 7285}
7205 7286
7206SD_INIT_FUNC(CPU) 7287SD_INIT_FUNC(CPU)
@@ -8242,20 +8323,25 @@ void __might_sleep(char *file, int line)
8242#ifdef in_atomic 8323#ifdef in_atomic
8243 static unsigned long prev_jiffy; /* ratelimiting */ 8324 static unsigned long prev_jiffy; /* ratelimiting */
8244 8325
8245 if ((in_atomic() || irqs_disabled()) && 8326 if ((!in_atomic() && !irqs_disabled()) ||
8246 system_state == SYSTEM_RUNNING && !oops_in_progress) { 8327 system_state != SYSTEM_RUNNING || oops_in_progress)
8247 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy) 8328 return;
8248 return; 8329 if (time_before(jiffies, prev_jiffy + HZ) && prev_jiffy)
8249 prev_jiffy = jiffies; 8330 return;
8250 printk(KERN_ERR "BUG: sleeping function called from invalid" 8331 prev_jiffy = jiffies;
8251 " context at %s:%d\n", file, line); 8332
8252 printk("in_atomic():%d, irqs_disabled():%d\n", 8333 printk(KERN_ERR
8253 in_atomic(), irqs_disabled()); 8334 "BUG: sleeping function called from invalid context at %s:%d\n",
8254 debug_show_held_locks(current); 8335 file, line);
8255 if (irqs_disabled()) 8336 printk(KERN_ERR
8256 print_irqtrace_events(current); 8337 "in_atomic(): %d, irqs_disabled(): %d, pid: %d, name: %s\n",
8257 dump_stack(); 8338 in_atomic(), irqs_disabled(),
8258 } 8339 current->pid, current->comm);
8340
8341 debug_show_held_locks(current);
8342 if (irqs_disabled())
8343 print_irqtrace_events(current);
8344 dump_stack();
8259#endif 8345#endif
8260} 8346}
8261EXPORT_SYMBOL(__might_sleep); 8347EXPORT_SYMBOL(__might_sleep);
@@ -8753,73 +8839,95 @@ static DEFINE_MUTEX(rt_constraints_mutex);
8753static unsigned long to_ratio(u64 period, u64 runtime) 8839static unsigned long to_ratio(u64 period, u64 runtime)
8754{ 8840{
8755 if (runtime == RUNTIME_INF) 8841 if (runtime == RUNTIME_INF)
8756 return 1ULL << 16; 8842 return 1ULL << 20;
8757 8843
8758 return div64_u64(runtime << 16, period); 8844 return div64_u64(runtime << 20, period);
8759} 8845}
8760 8846
8761#ifdef CONFIG_CGROUP_SCHED 8847/* Must be called with tasklist_lock held */
8762static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime) 8848static inline int tg_has_rt_tasks(struct task_group *tg)
8763{ 8849{
8764 struct task_group *tgi, *parent = tg->parent; 8850 struct task_struct *g, *p;
8765 unsigned long total = 0;
8766 8851
8767 if (!parent) { 8852 do_each_thread(g, p) {
8768 if (global_rt_period() < period) 8853 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg)
8769 return 0; 8854 return 1;
8855 } while_each_thread(g, p);
8770 8856
8771 return to_ratio(period, runtime) < 8857 return 0;
8772 to_ratio(global_rt_period(), global_rt_runtime()); 8858}
8773 }
8774 8859
8775 if (ktime_to_ns(parent->rt_bandwidth.rt_period) < period) 8860struct rt_schedulable_data {
8776 return 0; 8861 struct task_group *tg;
8862 u64 rt_period;
8863 u64 rt_runtime;
8864};
8777 8865
8778 rcu_read_lock(); 8866static int tg_schedulable(struct task_group *tg, void *data)
8779 list_for_each_entry_rcu(tgi, &parent->children, siblings) { 8867{
8780 if (tgi == tg) 8868 struct rt_schedulable_data *d = data;
8781 continue; 8869 struct task_group *child;
8870 unsigned long total, sum = 0;
8871 u64 period, runtime;
8872
8873 period = ktime_to_ns(tg->rt_bandwidth.rt_period);
8874 runtime = tg->rt_bandwidth.rt_runtime;
8782 8875
8783 total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), 8876 if (tg == d->tg) {
8784 tgi->rt_bandwidth.rt_runtime); 8877 period = d->rt_period;
8878 runtime = d->rt_runtime;
8785 } 8879 }
8786 rcu_read_unlock();
8787 8880
8788 return total + to_ratio(period, runtime) <= 8881 /*
8789 to_ratio(ktime_to_ns(parent->rt_bandwidth.rt_period), 8882 * Cannot have more runtime than the period.
8790 parent->rt_bandwidth.rt_runtime); 8883 */
8791} 8884 if (runtime > period && runtime != RUNTIME_INF)
8792#elif defined CONFIG_USER_SCHED 8885 return -EINVAL;
8793static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8794{
8795 struct task_group *tgi;
8796 unsigned long total = 0;
8797 unsigned long global_ratio =
8798 to_ratio(global_rt_period(), global_rt_runtime());
8799 8886
8800 rcu_read_lock(); 8887 /*
8801 list_for_each_entry_rcu(tgi, &task_groups, list) { 8888 * Ensure we don't starve existing RT tasks.
8802 if (tgi == tg) 8889 */
8803 continue; 8890 if (rt_bandwidth_enabled() && !runtime && tg_has_rt_tasks(tg))
8891 return -EBUSY;
8892
8893 total = to_ratio(period, runtime);
8894
8895 /*
8896 * Nobody can have more than the global setting allows.
8897 */
8898 if (total > to_ratio(global_rt_period(), global_rt_runtime()))
8899 return -EINVAL;
8900
8901 /*
8902 * The sum of our children's runtime should not exceed our own.
8903 */
8904 list_for_each_entry_rcu(child, &tg->children, siblings) {
8905 period = ktime_to_ns(child->rt_bandwidth.rt_period);
8906 runtime = child->rt_bandwidth.rt_runtime;
8804 8907
8805 total += to_ratio(ktime_to_ns(tgi->rt_bandwidth.rt_period), 8908 if (child == d->tg) {
8806 tgi->rt_bandwidth.rt_runtime); 8909 period = d->rt_period;
8910 runtime = d->rt_runtime;
8911 }
8912
8913 sum += to_ratio(period, runtime);
8807 } 8914 }
8808 rcu_read_unlock();
8809 8915
8810 return total + to_ratio(period, runtime) < global_ratio; 8916 if (sum > total)
8917 return -EINVAL;
8918
8919 return 0;
8811} 8920}
8812#endif
8813 8921
8814/* Must be called with tasklist_lock held */ 8922static int __rt_schedulable(struct task_group *tg, u64 period, u64 runtime)
8815static inline int tg_has_rt_tasks(struct task_group *tg)
8816{ 8923{
8817 struct task_struct *g, *p; 8924 struct rt_schedulable_data data = {
8818 do_each_thread(g, p) { 8925 .tg = tg,
8819 if (rt_task(p) && rt_rq_of_se(&p->rt)->tg == tg) 8926 .rt_period = period,
8820 return 1; 8927 .rt_runtime = runtime,
8821 } while_each_thread(g, p); 8928 };
8822 return 0; 8929
8930 return walk_tg_tree(tg_schedulable, tg_nop, &data);
8823} 8931}
8824 8932
8825static int tg_set_bandwidth(struct task_group *tg, 8933static int tg_set_bandwidth(struct task_group *tg,
@@ -8829,14 +8937,9 @@ static int tg_set_bandwidth(struct task_group *tg,
8829 8937
8830 mutex_lock(&rt_constraints_mutex); 8938 mutex_lock(&rt_constraints_mutex);
8831 read_lock(&tasklist_lock); 8939 read_lock(&tasklist_lock);
8832 if (rt_runtime == 0 && tg_has_rt_tasks(tg)) { 8940 err = __rt_schedulable(tg, rt_period, rt_runtime);
8833 err = -EBUSY; 8941 if (err)
8834 goto unlock; 8942 goto unlock;
8835 }
8836 if (!__rt_schedulable(tg, rt_period, rt_runtime)) {
8837 err = -EINVAL;
8838 goto unlock;
8839 }
8840 8943
8841 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock); 8944 spin_lock_irq(&tg->rt_bandwidth.rt_runtime_lock);
8842 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period); 8945 tg->rt_bandwidth.rt_period = ns_to_ktime(rt_period);
@@ -8905,19 +9008,25 @@ long sched_group_rt_period(struct task_group *tg)
8905 9008
8906static int sched_rt_global_constraints(void) 9009static int sched_rt_global_constraints(void)
8907{ 9010{
8908 struct task_group *tg = &root_task_group; 9011 u64 runtime, period;
8909 u64 rt_runtime, rt_period;
8910 int ret = 0; 9012 int ret = 0;
8911 9013
8912 if (sysctl_sched_rt_period <= 0) 9014 if (sysctl_sched_rt_period <= 0)
8913 return -EINVAL; 9015 return -EINVAL;
8914 9016
8915 rt_period = ktime_to_ns(tg->rt_bandwidth.rt_period); 9017 runtime = global_rt_runtime();
8916 rt_runtime = tg->rt_bandwidth.rt_runtime; 9018 period = global_rt_period();
9019
9020 /*
9021 * Sanity check on the sysctl variables.
9022 */
9023 if (runtime > period && runtime != RUNTIME_INF)
9024 return -EINVAL;
8917 9025
8918 mutex_lock(&rt_constraints_mutex); 9026 mutex_lock(&rt_constraints_mutex);
8919 if (!__rt_schedulable(tg, rt_period, rt_runtime)) 9027 read_lock(&tasklist_lock);
8920 ret = -EINVAL; 9028 ret = __rt_schedulable(NULL, 0, 0);
9029 read_unlock(&tasklist_lock);
8921 mutex_unlock(&rt_constraints_mutex); 9030 mutex_unlock(&rt_constraints_mutex);
8922 9031
8923 return ret; 9032 return ret;
@@ -8991,7 +9100,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
8991 9100
8992 if (!cgrp->parent) { 9101 if (!cgrp->parent) {
8993 /* This is early initialization for the top cgroup */ 9102 /* This is early initialization for the top cgroup */
8994 init_task_group.css.cgroup = cgrp;
8995 return &init_task_group.css; 9103 return &init_task_group.css;
8996 } 9104 }
8997 9105
@@ -9000,9 +9108,6 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)
9000 if (IS_ERR(tg)) 9108 if (IS_ERR(tg))
9001 return ERR_PTR(-ENOMEM); 9109 return ERR_PTR(-ENOMEM);
9002 9110
9003 /* Bind the cgroup to task_group object we just created */
9004 tg->css.cgroup = cgrp;
9005
9006 return &tg->css; 9111 return &tg->css;
9007} 9112}
9008 9113
diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c
index e8ab096ddfe3..81787248b60f 100644
--- a/kernel/sched_clock.c
+++ b/kernel/sched_clock.c
@@ -118,13 +118,13 @@ static u64 __update_sched_clock(struct sched_clock_data *scd, u64 now)
118 118
119 /* 119 /*
120 * scd->clock = clamp(scd->tick_gtod + delta, 120 * scd->clock = clamp(scd->tick_gtod + delta,
121 * max(scd->tick_gtod, scd->clock), 121 * max(scd->tick_gtod, scd->clock),
122 * scd->tick_gtod + TICK_NSEC); 122 * max(scd->clock, scd->tick_gtod + TICK_NSEC));
123 */ 123 */
124 124
125 clock = scd->tick_gtod + delta; 125 clock = scd->tick_gtod + delta;
126 min_clock = wrap_max(scd->tick_gtod, scd->clock); 126 min_clock = wrap_max(scd->tick_gtod, scd->clock);
127 max_clock = scd->tick_gtod + TICK_NSEC; 127 max_clock = wrap_max(scd->clock, scd->tick_gtod + TICK_NSEC);
128 128
129 clock = wrap_max(clock, min_clock); 129 clock = wrap_max(clock, min_clock);
130 clock = wrap_min(clock, max_clock); 130 clock = wrap_min(clock, max_clock);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index bbe6b31c3c56..ad958c1ec708 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -333,12 +333,10 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
333 unsigned long flags; 333 unsigned long flags;
334 int num_threads = 1; 334 int num_threads = 1;
335 335
336 rcu_read_lock();
337 if (lock_task_sighand(p, &flags)) { 336 if (lock_task_sighand(p, &flags)) {
338 num_threads = atomic_read(&p->signal->count); 337 num_threads = atomic_read(&p->signal->count);
339 unlock_task_sighand(p, &flags); 338 unlock_task_sighand(p, &flags);
340 } 339 }
341 rcu_read_unlock();
342 340
343 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads); 341 SEQ_printf(m, "%s (%d, #threads: %d)\n", p->comm, p->pid, num_threads);
344 SEQ_printf(m, 342 SEQ_printf(m,
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index fb8994c6d4bb..f604dae71316 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -409,64 +409,6 @@ static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
409} 409}
410 410
411/* 411/*
412 * The goal of calc_delta_asym() is to be asymmetrically around NICE_0_LOAD, in
413 * that it favours >=0 over <0.
414 *
415 * -20 |
416 * |
417 * 0 --------+-------
418 * .'
419 * 19 .'
420 *
421 */
422static unsigned long
423calc_delta_asym(unsigned long delta, struct sched_entity *se)
424{
425 struct load_weight lw = {
426 .weight = NICE_0_LOAD,
427 .inv_weight = 1UL << (WMULT_SHIFT-NICE_0_SHIFT)
428 };
429
430 for_each_sched_entity(se) {
431 struct load_weight *se_lw = &se->load;
432 unsigned long rw = cfs_rq_of(se)->load.weight;
433
434#ifdef CONFIG_FAIR_SCHED_GROUP
435 struct cfs_rq *cfs_rq = se->my_q;
436 struct task_group *tg = NULL
437
438 if (cfs_rq)
439 tg = cfs_rq->tg;
440
441 if (tg && tg->shares < NICE_0_LOAD) {
442 /*
443 * scale shares to what it would have been had
444 * tg->weight been NICE_0_LOAD:
445 *
446 * weight = 1024 * shares / tg->weight
447 */
448 lw.weight *= se->load.weight;
449 lw.weight /= tg->shares;
450
451 lw.inv_weight = 0;
452
453 se_lw = &lw;
454 rw += lw.weight - se->load.weight;
455 } else
456#endif
457
458 if (se->load.weight < NICE_0_LOAD) {
459 se_lw = &lw;
460 rw += NICE_0_LOAD - se->load.weight;
461 }
462
463 delta = calc_delta_mine(delta, rw, se_lw);
464 }
465
466 return delta;
467}
468
469/*
470 * Update the current task's runtime statistics. Skip current tasks that 412 * Update the current task's runtime statistics. Skip current tasks that
471 * are not in our scheduling class. 413 * are not in our scheduling class.
472 */ 414 */
@@ -507,6 +449,7 @@ static void update_curr(struct cfs_rq *cfs_rq)
507 struct task_struct *curtask = task_of(curr); 449 struct task_struct *curtask = task_of(curr);
508 450
509 cpuacct_charge(curtask, delta_exec); 451 cpuacct_charge(curtask, delta_exec);
452 account_group_exec_runtime(curtask, delta_exec);
510 } 453 }
511} 454}
512 455
@@ -586,11 +529,12 @@ account_entity_enqueue(struct cfs_rq *cfs_rq, struct sched_entity *se)
586 update_load_add(&cfs_rq->load, se->load.weight); 529 update_load_add(&cfs_rq->load, se->load.weight);
587 if (!parent_entity(se)) 530 if (!parent_entity(se))
588 inc_cpu_load(rq_of(cfs_rq), se->load.weight); 531 inc_cpu_load(rq_of(cfs_rq), se->load.weight);
589 if (entity_is_task(se)) 532 if (entity_is_task(se)) {
590 add_cfs_task_weight(cfs_rq, se->load.weight); 533 add_cfs_task_weight(cfs_rq, se->load.weight);
534 list_add(&se->group_node, &cfs_rq->tasks);
535 }
591 cfs_rq->nr_running++; 536 cfs_rq->nr_running++;
592 se->on_rq = 1; 537 se->on_rq = 1;
593 list_add(&se->group_node, &cfs_rq->tasks);
594} 538}
595 539
596static void 540static void
@@ -599,11 +543,12 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
599 update_load_sub(&cfs_rq->load, se->load.weight); 543 update_load_sub(&cfs_rq->load, se->load.weight);
600 if (!parent_entity(se)) 544 if (!parent_entity(se))
601 dec_cpu_load(rq_of(cfs_rq), se->load.weight); 545 dec_cpu_load(rq_of(cfs_rq), se->load.weight);
602 if (entity_is_task(se)) 546 if (entity_is_task(se)) {
603 add_cfs_task_weight(cfs_rq, -se->load.weight); 547 add_cfs_task_weight(cfs_rq, -se->load.weight);
548 list_del_init(&se->group_node);
549 }
604 cfs_rq->nr_running--; 550 cfs_rq->nr_running--;
605 se->on_rq = 0; 551 se->on_rq = 0;
606 list_del_init(&se->group_node);
607} 552}
608 553
609static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 554static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
@@ -1085,7 +1030,6 @@ static long effective_load(struct task_group *tg, int cpu,
1085 long wl, long wg) 1030 long wl, long wg)
1086{ 1031{
1087 struct sched_entity *se = tg->se[cpu]; 1032 struct sched_entity *se = tg->se[cpu];
1088 long more_w;
1089 1033
1090 if (!tg->parent) 1034 if (!tg->parent)
1091 return wl; 1035 return wl;
@@ -1097,18 +1041,17 @@ static long effective_load(struct task_group *tg, int cpu,
1097 if (!wl && sched_feat(ASYM_EFF_LOAD)) 1041 if (!wl && sched_feat(ASYM_EFF_LOAD))
1098 return wl; 1042 return wl;
1099 1043
1100 /*
1101 * Instead of using this increment, also add the difference
1102 * between when the shares were last updated and now.
1103 */
1104 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1105 wl += more_w;
1106 wg += more_w;
1107
1108 for_each_sched_entity(se) { 1044 for_each_sched_entity(se) {
1109#define D(n) (likely(n) ? (n) : 1)
1110
1111 long S, rw, s, a, b; 1045 long S, rw, s, a, b;
1046 long more_w;
1047
1048 /*
1049 * Instead of using this increment, also add the difference
1050 * between when the shares were last updated and now.
1051 */
1052 more_w = se->my_q->load.weight - se->my_q->rq_weight;
1053 wl += more_w;
1054 wg += more_w;
1112 1055
1113 S = se->my_q->tg->shares; 1056 S = se->my_q->tg->shares;
1114 s = se->my_q->shares; 1057 s = se->my_q->shares;
@@ -1117,7 +1060,11 @@ static long effective_load(struct task_group *tg, int cpu,
1117 a = S*(rw + wl); 1060 a = S*(rw + wl);
1118 b = S*rw + s*wg; 1061 b = S*rw + s*wg;
1119 1062
1120 wl = s*(a-b)/D(b); 1063 wl = s*(a-b);
1064
1065 if (likely(b))
1066 wl /= b;
1067
1121 /* 1068 /*
1122 * Assume the group is already running and will 1069 * Assume the group is already running and will
1123 * thus already be accounted for in the weight. 1070 * thus already be accounted for in the weight.
@@ -1126,7 +1073,6 @@ static long effective_load(struct task_group *tg, int cpu,
1126 * alter the group weight. 1073 * alter the group weight.
1127 */ 1074 */
1128 wg = 0; 1075 wg = 0;
1129#undef D
1130 } 1076 }
1131 1077
1132 return wl; 1078 return wl;
@@ -1143,7 +1089,7 @@ static inline unsigned long effective_load(struct task_group *tg, int cpu,
1143#endif 1089#endif
1144 1090
1145static int 1091static int
1146wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq, 1092wake_affine(struct sched_domain *this_sd, struct rq *this_rq,
1147 struct task_struct *p, int prev_cpu, int this_cpu, int sync, 1093 struct task_struct *p, int prev_cpu, int this_cpu, int sync,
1148 int idx, unsigned long load, unsigned long this_load, 1094 int idx, unsigned long load, unsigned long this_load,
1149 unsigned int imbalance) 1095 unsigned int imbalance)
@@ -1158,6 +1104,11 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1158 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS)) 1104 if (!(this_sd->flags & SD_WAKE_AFFINE) || !sched_feat(AFFINE_WAKEUPS))
1159 return 0; 1105 return 0;
1160 1106
1107 if (!sync && sched_feat(SYNC_WAKEUPS) &&
1108 curr->se.avg_overlap < sysctl_sched_migration_cost &&
1109 p->se.avg_overlap < sysctl_sched_migration_cost)
1110 sync = 1;
1111
1161 /* 1112 /*
1162 * If sync wakeup then subtract the (maximum possible) 1113 * If sync wakeup then subtract the (maximum possible)
1163 * effect of the currently running task from the load 1114 * effect of the currently running task from the load
@@ -1182,17 +1133,14 @@ wake_affine(struct rq *rq, struct sched_domain *this_sd, struct rq *this_rq,
1182 * a reasonable amount of time then attract this newly 1133 * a reasonable amount of time then attract this newly
1183 * woken task: 1134 * woken task:
1184 */ 1135 */
1185 if (sync && balanced) { 1136 if (sync && balanced)
1186 if (curr->se.avg_overlap < sysctl_sched_migration_cost && 1137 return 1;
1187 p->se.avg_overlap < sysctl_sched_migration_cost)
1188 return 1;
1189 }
1190 1138
1191 schedstat_inc(p, se.nr_wakeups_affine_attempts); 1139 schedstat_inc(p, se.nr_wakeups_affine_attempts);
1192 tl_per_task = cpu_avg_load_per_task(this_cpu); 1140 tl_per_task = cpu_avg_load_per_task(this_cpu);
1193 1141
1194 if ((tl <= load && tl + target_load(prev_cpu, idx) <= tl_per_task) || 1142 if (balanced || (tl <= load && tl + target_load(prev_cpu, idx) <=
1195 balanced) { 1143 tl_per_task)) {
1196 /* 1144 /*
1197 * This domain has SD_WAKE_AFFINE and 1145 * This domain has SD_WAKE_AFFINE and
1198 * p is cache cold in this domain, and 1146 * p is cache cold in this domain, and
@@ -1211,16 +1159,17 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1211 struct sched_domain *sd, *this_sd = NULL; 1159 struct sched_domain *sd, *this_sd = NULL;
1212 int prev_cpu, this_cpu, new_cpu; 1160 int prev_cpu, this_cpu, new_cpu;
1213 unsigned long load, this_load; 1161 unsigned long load, this_load;
1214 struct rq *rq, *this_rq; 1162 struct rq *this_rq;
1215 unsigned int imbalance; 1163 unsigned int imbalance;
1216 int idx; 1164 int idx;
1217 1165
1218 prev_cpu = task_cpu(p); 1166 prev_cpu = task_cpu(p);
1219 rq = task_rq(p);
1220 this_cpu = smp_processor_id(); 1167 this_cpu = smp_processor_id();
1221 this_rq = cpu_rq(this_cpu); 1168 this_rq = cpu_rq(this_cpu);
1222 new_cpu = prev_cpu; 1169 new_cpu = prev_cpu;
1223 1170
1171 if (prev_cpu == this_cpu)
1172 goto out;
1224 /* 1173 /*
1225 * 'this_sd' is the first domain that both 1174 * 'this_sd' is the first domain that both
1226 * this_cpu and prev_cpu are present in: 1175 * this_cpu and prev_cpu are present in:
@@ -1248,13 +1197,10 @@ static int select_task_rq_fair(struct task_struct *p, int sync)
1248 load = source_load(prev_cpu, idx); 1197 load = source_load(prev_cpu, idx);
1249 this_load = target_load(this_cpu, idx); 1198 this_load = target_load(this_cpu, idx);
1250 1199
1251 if (wake_affine(rq, this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx, 1200 if (wake_affine(this_sd, this_rq, p, prev_cpu, this_cpu, sync, idx,
1252 load, this_load, imbalance)) 1201 load, this_load, imbalance))
1253 return this_cpu; 1202 return this_cpu;
1254 1203
1255 if (prev_cpu == this_cpu)
1256 goto out;
1257
1258 /* 1204 /*
1259 * Start passive balancing when half the imbalance_pct 1205 * Start passive balancing when half the imbalance_pct
1260 * limit is reached. 1206 * limit is reached.
@@ -1281,62 +1227,20 @@ static unsigned long wakeup_gran(struct sched_entity *se)
1281 * + nice tasks. 1227 * + nice tasks.
1282 */ 1228 */
1283 if (sched_feat(ASYM_GRAN)) 1229 if (sched_feat(ASYM_GRAN))
1284 gran = calc_delta_asym(sysctl_sched_wakeup_granularity, se); 1230 gran = calc_delta_mine(gran, NICE_0_LOAD, &se->load);
1285 else
1286 gran = calc_delta_fair(sysctl_sched_wakeup_granularity, se);
1287 1231
1288 return gran; 1232 return gran;
1289} 1233}
1290 1234
1291/* 1235/*
1292 * Should 'se' preempt 'curr'.
1293 *
1294 * |s1
1295 * |s2
1296 * |s3
1297 * g
1298 * |<--->|c
1299 *
1300 * w(c, s1) = -1
1301 * w(c, s2) = 0
1302 * w(c, s3) = 1
1303 *
1304 */
1305static int
1306wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se)
1307{
1308 s64 gran, vdiff = curr->vruntime - se->vruntime;
1309
1310 if (vdiff < 0)
1311 return -1;
1312
1313 gran = wakeup_gran(curr);
1314 if (vdiff > gran)
1315 return 1;
1316
1317 return 0;
1318}
1319
1320/* return depth at which a sched entity is present in the hierarchy */
1321static inline int depth_se(struct sched_entity *se)
1322{
1323 int depth = 0;
1324
1325 for_each_sched_entity(se)
1326 depth++;
1327
1328 return depth;
1329}
1330
1331/*
1332 * Preempt the current task with a newly woken task if needed: 1236 * Preempt the current task with a newly woken task if needed:
1333 */ 1237 */
1334static void check_preempt_wakeup(struct rq *rq, struct task_struct *p) 1238static void check_preempt_wakeup(struct rq *rq, struct task_struct *p, int sync)
1335{ 1239{
1336 struct task_struct *curr = rq->curr; 1240 struct task_struct *curr = rq->curr;
1337 struct cfs_rq *cfs_rq = task_cfs_rq(curr); 1241 struct cfs_rq *cfs_rq = task_cfs_rq(curr);
1338 struct sched_entity *se = &curr->se, *pse = &p->se; 1242 struct sched_entity *se = &curr->se, *pse = &p->se;
1339 int se_depth, pse_depth; 1243 s64 delta_exec;
1340 1244
1341 if (unlikely(rt_prio(p->prio))) { 1245 if (unlikely(rt_prio(p->prio))) {
1342 update_rq_clock(rq); 1246 update_rq_clock(rq);
@@ -1351,6 +1255,13 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1351 cfs_rq_of(pse)->next = pse; 1255 cfs_rq_of(pse)->next = pse;
1352 1256
1353 /* 1257 /*
1258 * We can come here with TIF_NEED_RESCHED already set from new task
1259 * wake up path.
1260 */
1261 if (test_tsk_need_resched(curr))
1262 return;
1263
1264 /*
1354 * Batch tasks do not preempt (their preemption is driven by 1265 * Batch tasks do not preempt (their preemption is driven by
1355 * the tick): 1266 * the tick):
1356 */ 1267 */
@@ -1360,33 +1271,15 @@ static void check_preempt_wakeup(struct rq *rq, struct task_struct *p)
1360 if (!sched_feat(WAKEUP_PREEMPT)) 1271 if (!sched_feat(WAKEUP_PREEMPT))
1361 return; 1272 return;
1362 1273
1363 /* 1274 if (sched_feat(WAKEUP_OVERLAP) && (sync ||
1364 * preemption test can be made between sibling entities who are in the 1275 (se->avg_overlap < sysctl_sched_migration_cost &&
1365 * same cfs_rq i.e who have a common parent. Walk up the hierarchy of 1276 pse->avg_overlap < sysctl_sched_migration_cost))) {
1366 * both tasks until we find their ancestors who are siblings of common 1277 resched_task(curr);
1367 * parent. 1278 return;
1368 */
1369
1370 /* First walk up until both entities are at same depth */
1371 se_depth = depth_se(se);
1372 pse_depth = depth_se(pse);
1373
1374 while (se_depth > pse_depth) {
1375 se_depth--;
1376 se = parent_entity(se);
1377 }
1378
1379 while (pse_depth > se_depth) {
1380 pse_depth--;
1381 pse = parent_entity(pse);
1382 }
1383
1384 while (!is_same_group(se, pse)) {
1385 se = parent_entity(se);
1386 pse = parent_entity(pse);
1387 } 1279 }
1388 1280
1389 if (wakeup_preempt_entity(se, pse) == 1) 1281 delta_exec = se->sum_exec_runtime - se->prev_sum_exec_runtime;
1282 if (delta_exec > wakeup_gran(pse))
1390 resched_task(curr); 1283 resched_task(curr);
1391} 1284}
1392 1285
@@ -1445,19 +1338,9 @@ __load_balance_iterator(struct cfs_rq *cfs_rq, struct list_head *next)
1445 if (next == &cfs_rq->tasks) 1338 if (next == &cfs_rq->tasks)
1446 return NULL; 1339 return NULL;
1447 1340
1448 /* Skip over entities that are not tasks */ 1341 se = list_entry(next, struct sched_entity, group_node);
1449 do { 1342 p = task_of(se);
1450 se = list_entry(next, struct sched_entity, group_node); 1343 cfs_rq->balance_iterator = next->next;
1451 next = next->next;
1452 } while (next != &cfs_rq->tasks && !entity_is_task(se));
1453
1454 if (next == &cfs_rq->tasks)
1455 return NULL;
1456
1457 cfs_rq->balance_iterator = next;
1458
1459 if (entity_is_task(se))
1460 p = task_of(se);
1461 1344
1462 return p; 1345 return p;
1463} 1346}
@@ -1507,7 +1390,7 @@ load_balance_fair(struct rq *this_rq, int this_cpu, struct rq *busiest,
1507 rcu_read_lock(); 1390 rcu_read_lock();
1508 update_h_load(busiest_cpu); 1391 update_h_load(busiest_cpu);
1509 1392
1510 list_for_each_entry(tg, &task_groups, list) { 1393 list_for_each_entry_rcu(tg, &task_groups, list) {
1511 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu]; 1394 struct cfs_rq *busiest_cfs_rq = tg->cfs_rq[busiest_cpu];
1512 unsigned long busiest_h_load = busiest_cfs_rq->h_load; 1395 unsigned long busiest_h_load = busiest_cfs_rq->h_load;
1513 unsigned long busiest_weight = busiest_cfs_rq->load.weight; 1396 unsigned long busiest_weight = busiest_cfs_rq->load.weight;
@@ -1620,10 +1503,10 @@ static void task_new_fair(struct rq *rq, struct task_struct *p)
1620 * 'current' within the tree based on its new key value. 1503 * 'current' within the tree based on its new key value.
1621 */ 1504 */
1622 swap(curr->vruntime, se->vruntime); 1505 swap(curr->vruntime, se->vruntime);
1506 resched_task(rq->curr);
1623 } 1507 }
1624 1508
1625 enqueue_task_fair(rq, p, 0); 1509 enqueue_task_fair(rq, p, 0);
1626 resched_task(rq->curr);
1627} 1510}
1628 1511
1629/* 1512/*
@@ -1642,7 +1525,7 @@ static void prio_changed_fair(struct rq *rq, struct task_struct *p,
1642 if (p->prio > oldprio) 1525 if (p->prio > oldprio)
1643 resched_task(rq->curr); 1526 resched_task(rq->curr);
1644 } else 1527 } else
1645 check_preempt_curr(rq, p); 1528 check_preempt_curr(rq, p, 0);
1646} 1529}
1647 1530
1648/* 1531/*
@@ -1659,7 +1542,7 @@ static void switched_to_fair(struct rq *rq, struct task_struct *p,
1659 if (running) 1542 if (running)
1660 resched_task(rq->curr); 1543 resched_task(rq->curr);
1661 else 1544 else
1662 check_preempt_curr(rq, p); 1545 check_preempt_curr(rq, p, 0);
1663} 1546}
1664 1547
1665/* Account for a task changing its policy or group. 1548/* Account for a task changing its policy or group.
diff --git a/kernel/sched_features.h b/kernel/sched_features.h
index 9353ca78154e..7c9e8f4a049f 100644
--- a/kernel/sched_features.h
+++ b/kernel/sched_features.h
@@ -11,3 +11,4 @@ SCHED_FEAT(ASYM_GRAN, 1)
11SCHED_FEAT(LB_BIAS, 1) 11SCHED_FEAT(LB_BIAS, 1)
12SCHED_FEAT(LB_WAKEUP_UPDATE, 1) 12SCHED_FEAT(LB_WAKEUP_UPDATE, 1)
13SCHED_FEAT(ASYM_EFF_LOAD, 1) 13SCHED_FEAT(ASYM_EFF_LOAD, 1)
14SCHED_FEAT(WAKEUP_OVERLAP, 0)
diff --git a/kernel/sched_idletask.c b/kernel/sched_idletask.c
index 3a4f92dbbe66..dec4ccabe2f5 100644
--- a/kernel/sched_idletask.c
+++ b/kernel/sched_idletask.c
@@ -14,7 +14,7 @@ static int select_task_rq_idle(struct task_struct *p, int sync)
14/* 14/*
15 * Idle tasks are unconditionally rescheduled: 15 * Idle tasks are unconditionally rescheduled:
16 */ 16 */
17static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p) 17static void check_preempt_curr_idle(struct rq *rq, struct task_struct *p, int sync)
18{ 18{
19 resched_task(rq->idle); 19 resched_task(rq->idle);
20} 20}
@@ -76,7 +76,7 @@ static void switched_to_idle(struct rq *rq, struct task_struct *p,
76 if (running) 76 if (running)
77 resched_task(rq->curr); 77 resched_task(rq->curr);
78 else 78 else
79 check_preempt_curr(rq, p); 79 check_preempt_curr(rq, p, 0);
80} 80}
81 81
82static void prio_changed_idle(struct rq *rq, struct task_struct *p, 82static void prio_changed_idle(struct rq *rq, struct task_struct *p,
@@ -93,7 +93,7 @@ static void prio_changed_idle(struct rq *rq, struct task_struct *p,
93 if (p->prio > oldprio) 93 if (p->prio > oldprio)
94 resched_task(rq->curr); 94 resched_task(rq->curr);
95 } else 95 } else
96 check_preempt_curr(rq, p); 96 check_preempt_curr(rq, p, 0);
97} 97}
98 98
99/* 99/*
diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c
index 1113157b2058..b446dc87494f 100644
--- a/kernel/sched_rt.c
+++ b/kernel/sched_rt.c
@@ -102,12 +102,12 @@ static void dequeue_rt_entity(struct sched_rt_entity *rt_se);
102 102
103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq) 103static void sched_rt_rq_enqueue(struct rt_rq *rt_rq)
104{ 104{
105 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr;
105 struct sched_rt_entity *rt_se = rt_rq->rt_se; 106 struct sched_rt_entity *rt_se = rt_rq->rt_se;
106 107
107 if (rt_se && !on_rt_rq(rt_se) && rt_rq->rt_nr_running) { 108 if (rt_rq->rt_nr_running) {
108 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; 109 if (rt_se && !on_rt_rq(rt_se))
109 110 enqueue_rt_entity(rt_se);
110 enqueue_rt_entity(rt_se);
111 if (rt_rq->highest_prio < curr->prio) 111 if (rt_rq->highest_prio < curr->prio)
112 resched_task(curr); 112 resched_task(curr);
113 } 113 }
@@ -231,6 +231,9 @@ static inline struct rt_bandwidth *sched_rt_bandwidth(struct rt_rq *rt_rq)
231#endif /* CONFIG_RT_GROUP_SCHED */ 231#endif /* CONFIG_RT_GROUP_SCHED */
232 232
233#ifdef CONFIG_SMP 233#ifdef CONFIG_SMP
234/*
235 * We ran out of runtime, see if we can borrow some from our neighbours.
236 */
234static int do_balance_runtime(struct rt_rq *rt_rq) 237static int do_balance_runtime(struct rt_rq *rt_rq)
235{ 238{
236 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 239 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
@@ -250,9 +253,18 @@ static int do_balance_runtime(struct rt_rq *rt_rq)
250 continue; 253 continue;
251 254
252 spin_lock(&iter->rt_runtime_lock); 255 spin_lock(&iter->rt_runtime_lock);
256 /*
257 * Either all rqs have inf runtime and there's nothing to steal
258 * or __disable_runtime() below sets a specific rq to inf to
259 * indicate its been disabled and disalow stealing.
260 */
253 if (iter->rt_runtime == RUNTIME_INF) 261 if (iter->rt_runtime == RUNTIME_INF)
254 goto next; 262 goto next;
255 263
264 /*
265 * From runqueues with spare time, take 1/n part of their
266 * spare time, but no more than our period.
267 */
256 diff = iter->rt_runtime - iter->rt_time; 268 diff = iter->rt_runtime - iter->rt_time;
257 if (diff > 0) { 269 if (diff > 0) {
258 diff = div_u64((u64)diff, weight); 270 diff = div_u64((u64)diff, weight);
@@ -274,6 +286,9 @@ next:
274 return more; 286 return more;
275} 287}
276 288
289/*
290 * Ensure this RQ takes back all the runtime it lend to its neighbours.
291 */
277static void __disable_runtime(struct rq *rq) 292static void __disable_runtime(struct rq *rq)
278{ 293{
279 struct root_domain *rd = rq->rd; 294 struct root_domain *rd = rq->rd;
@@ -289,17 +304,33 @@ static void __disable_runtime(struct rq *rq)
289 304
290 spin_lock(&rt_b->rt_runtime_lock); 305 spin_lock(&rt_b->rt_runtime_lock);
291 spin_lock(&rt_rq->rt_runtime_lock); 306 spin_lock(&rt_rq->rt_runtime_lock);
307 /*
308 * Either we're all inf and nobody needs to borrow, or we're
309 * already disabled and thus have nothing to do, or we have
310 * exactly the right amount of runtime to take out.
311 */
292 if (rt_rq->rt_runtime == RUNTIME_INF || 312 if (rt_rq->rt_runtime == RUNTIME_INF ||
293 rt_rq->rt_runtime == rt_b->rt_runtime) 313 rt_rq->rt_runtime == rt_b->rt_runtime)
294 goto balanced; 314 goto balanced;
295 spin_unlock(&rt_rq->rt_runtime_lock); 315 spin_unlock(&rt_rq->rt_runtime_lock);
296 316
317 /*
318 * Calculate the difference between what we started out with
319 * and what we current have, that's the amount of runtime
320 * we lend and now have to reclaim.
321 */
297 want = rt_b->rt_runtime - rt_rq->rt_runtime; 322 want = rt_b->rt_runtime - rt_rq->rt_runtime;
298 323
324 /*
325 * Greedy reclaim, take back as much as we can.
326 */
299 for_each_cpu_mask(i, rd->span) { 327 for_each_cpu_mask(i, rd->span) {
300 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i); 328 struct rt_rq *iter = sched_rt_period_rt_rq(rt_b, i);
301 s64 diff; 329 s64 diff;
302 330
331 /*
332 * Can't reclaim from ourselves or disabled runqueues.
333 */
303 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF) 334 if (iter == rt_rq || iter->rt_runtime == RUNTIME_INF)
304 continue; 335 continue;
305 336
@@ -319,8 +350,16 @@ static void __disable_runtime(struct rq *rq)
319 } 350 }
320 351
321 spin_lock(&rt_rq->rt_runtime_lock); 352 spin_lock(&rt_rq->rt_runtime_lock);
353 /*
354 * We cannot be left wanting - that would mean some runtime
355 * leaked out of the system.
356 */
322 BUG_ON(want); 357 BUG_ON(want);
323balanced: 358balanced:
359 /*
360 * Disable all the borrow logic by pretending we have inf
361 * runtime - in which case borrowing doesn't make sense.
362 */
324 rt_rq->rt_runtime = RUNTIME_INF; 363 rt_rq->rt_runtime = RUNTIME_INF;
325 spin_unlock(&rt_rq->rt_runtime_lock); 364 spin_unlock(&rt_rq->rt_runtime_lock);
326 spin_unlock(&rt_b->rt_runtime_lock); 365 spin_unlock(&rt_b->rt_runtime_lock);
@@ -343,6 +382,9 @@ static void __enable_runtime(struct rq *rq)
343 if (unlikely(!scheduler_running)) 382 if (unlikely(!scheduler_running))
344 return; 383 return;
345 384
385 /*
386 * Reset each runqueue's bandwidth settings
387 */
346 for_each_leaf_rt_rq(rt_rq, rq) { 388 for_each_leaf_rt_rq(rt_rq, rq) {
347 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq); 389 struct rt_bandwidth *rt_b = sched_rt_bandwidth(rt_rq);
348 390
@@ -389,7 +431,7 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun)
389 int i, idle = 1; 431 int i, idle = 1;
390 cpumask_t span; 432 cpumask_t span;
391 433
392 if (rt_b->rt_runtime == RUNTIME_INF) 434 if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
393 return 1; 435 return 1;
394 436
395 span = sched_rt_period_mask(); 437 span = sched_rt_period_mask();
@@ -484,9 +526,14 @@ static void update_curr_rt(struct rq *rq)
484 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec)); 526 schedstat_set(curr->se.exec_max, max(curr->se.exec_max, delta_exec));
485 527
486 curr->se.sum_exec_runtime += delta_exec; 528 curr->se.sum_exec_runtime += delta_exec;
529 account_group_exec_runtime(curr, delta_exec);
530
487 curr->se.exec_start = rq->clock; 531 curr->se.exec_start = rq->clock;
488 cpuacct_charge(curr, delta_exec); 532 cpuacct_charge(curr, delta_exec);
489 533
534 if (!rt_bandwidth_enabled())
535 return;
536
490 for_each_sched_rt_entity(rt_se) { 537 for_each_sched_rt_entity(rt_se) {
491 rt_rq = rt_rq_of_se(rt_se); 538 rt_rq = rt_rq_of_se(rt_se);
492 539
@@ -784,7 +831,7 @@ static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
784/* 831/*
785 * Preempt the current task with a newly woken task if needed: 832 * Preempt the current task with a newly woken task if needed:
786 */ 833 */
787static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p) 834static void check_preempt_curr_rt(struct rq *rq, struct task_struct *p, int sync)
788{ 835{
789 if (p->prio < rq->curr->prio) { 836 if (p->prio < rq->curr->prio) {
790 resched_task(rq->curr); 837 resched_task(rq->curr);
@@ -1413,7 +1460,7 @@ static void watchdog(struct rq *rq, struct task_struct *p)
1413 p->rt.timeout++; 1460 p->rt.timeout++;
1414 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); 1461 next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ);
1415 if (p->rt.timeout > next) 1462 if (p->rt.timeout > next)
1416 p->it_sched_expires = p->se.sum_exec_runtime; 1463 p->cputime_expires.sched_exp = p->se.sum_exec_runtime;
1417 } 1464 }
1418} 1465}
1419 1466
diff --git a/kernel/sched_stats.h b/kernel/sched_stats.h
index 8385d43987e2..b8c156979cf2 100644
--- a/kernel/sched_stats.h
+++ b/kernel/sched_stats.h
@@ -270,3 +270,89 @@ sched_info_switch(struct task_struct *prev, struct task_struct *next)
270#define sched_info_switch(t, next) do { } while (0) 270#define sched_info_switch(t, next) do { } while (0)
271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */ 271#endif /* CONFIG_SCHEDSTATS || CONFIG_TASK_DELAY_ACCT */
272 272
273/*
274 * The following are functions that support scheduler-internal time accounting.
275 * These functions are generally called at the timer tick. None of this depends
276 * on CONFIG_SCHEDSTATS.
277 */
278
279/**
280 * account_group_user_time - Maintain utime for a thread group.
281 *
282 * @tsk: Pointer to task structure.
283 * @cputime: Time value by which to increment the utime field of the
284 * thread_group_cputime structure.
285 *
286 * If thread group time is being maintained, get the structure for the
287 * running CPU and update the utime field there.
288 */
289static inline void account_group_user_time(struct task_struct *tsk,
290 cputime_t cputime)
291{
292 struct signal_struct *sig;
293
294 sig = tsk->signal;
295 if (unlikely(!sig))
296 return;
297 if (sig->cputime.totals) {
298 struct task_cputime *times;
299
300 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
301 times->utime = cputime_add(times->utime, cputime);
302 put_cpu_no_resched();
303 }
304}
305
306/**
307 * account_group_system_time - Maintain stime for a thread group.
308 *
309 * @tsk: Pointer to task structure.
310 * @cputime: Time value by which to increment the stime field of the
311 * thread_group_cputime structure.
312 *
313 * If thread group time is being maintained, get the structure for the
314 * running CPU and update the stime field there.
315 */
316static inline void account_group_system_time(struct task_struct *tsk,
317 cputime_t cputime)
318{
319 struct signal_struct *sig;
320
321 sig = tsk->signal;
322 if (unlikely(!sig))
323 return;
324 if (sig->cputime.totals) {
325 struct task_cputime *times;
326
327 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
328 times->stime = cputime_add(times->stime, cputime);
329 put_cpu_no_resched();
330 }
331}
332
333/**
334 * account_group_exec_runtime - Maintain exec runtime for a thread group.
335 *
336 * @tsk: Pointer to task structure.
337 * @ns: Time value by which to increment the sum_exec_runtime field
338 * of the thread_group_cputime structure.
339 *
340 * If thread group time is being maintained, get the structure for the
341 * running CPU and update the sum_exec_runtime field there.
342 */
343static inline void account_group_exec_runtime(struct task_struct *tsk,
344 unsigned long long ns)
345{
346 struct signal_struct *sig;
347
348 sig = tsk->signal;
349 if (unlikely(!sig))
350 return;
351 if (sig->cputime.totals) {
352 struct task_cputime *times;
353
354 times = per_cpu_ptr(sig->cputime.totals, get_cpu());
355 times->sum_exec_runtime += ns;
356 put_cpu_no_resched();
357 }
358}
diff --git a/kernel/signal.c b/kernel/signal.c
index e661b01d340f..105217da5c82 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -27,6 +27,7 @@
27#include <linux/freezer.h> 27#include <linux/freezer.h>
28#include <linux/pid_namespace.h> 28#include <linux/pid_namespace.h>
29#include <linux/nsproxy.h> 29#include <linux/nsproxy.h>
30#include <trace/sched.h>
30 31
31#include <asm/param.h> 32#include <asm/param.h>
32#include <asm/uaccess.h> 33#include <asm/uaccess.h>
@@ -803,6 +804,8 @@ static int send_signal(int sig, struct siginfo *info, struct task_struct *t,
803 struct sigpending *pending; 804 struct sigpending *pending;
804 struct sigqueue *q; 805 struct sigqueue *q;
805 806
807 trace_sched_signal_send(sig, t);
808
806 assert_spin_locked(&t->sighand->siglock); 809 assert_spin_locked(&t->sighand->siglock);
807 if (!prepare_signal(sig, t)) 810 if (!prepare_signal(sig, t))
808 return 0; 811 return 0;
@@ -1338,6 +1341,7 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1338 struct siginfo info; 1341 struct siginfo info;
1339 unsigned long flags; 1342 unsigned long flags;
1340 struct sighand_struct *psig; 1343 struct sighand_struct *psig;
1344 struct task_cputime cputime;
1341 int ret = sig; 1345 int ret = sig;
1342 1346
1343 BUG_ON(sig == -1); 1347 BUG_ON(sig == -1);
@@ -1368,10 +1372,9 @@ int do_notify_parent(struct task_struct *tsk, int sig)
1368 1372
1369 info.si_uid = tsk->uid; 1373 info.si_uid = tsk->uid;
1370 1374
1371 info.si_utime = cputime_to_clock_t(cputime_add(tsk->utime, 1375 thread_group_cputime(tsk, &cputime);
1372 tsk->signal->utime)); 1376 info.si_utime = cputime_to_jiffies(cputime.utime);
1373 info.si_stime = cputime_to_clock_t(cputime_add(tsk->stime, 1377 info.si_stime = cputime_to_jiffies(cputime.stime);
1374 tsk->signal->stime));
1375 1378
1376 info.si_status = tsk->exit_code & 0x7f; 1379 info.si_status = tsk->exit_code & 0x7f;
1377 if (tsk->exit_code & 0x80) 1380 if (tsk->exit_code & 0x80)
diff --git a/kernel/softirq.c b/kernel/softirq.c
index c506f266a6b9..7110daeb9a90 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -6,6 +6,8 @@
6 * Distribute under GPLv2. 6 * Distribute under GPLv2.
7 * 7 *
8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903) 8 * Rewritten. Old one was good in 2.2, but in 2.3 it was immoral. --ANK (990903)
9 *
10 * Remote softirq infrastructure is by Jens Axboe.
9 */ 11 */
10 12
11#include <linux/module.h> 13#include <linux/module.h>
@@ -46,7 +48,7 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
46EXPORT_SYMBOL(irq_stat); 48EXPORT_SYMBOL(irq_stat);
47#endif 49#endif
48 50
49static struct softirq_action softirq_vec[32] __cacheline_aligned_in_smp; 51static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
50 52
51static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); 53static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
52 54
@@ -205,7 +207,18 @@ restart:
205 207
206 do { 208 do {
207 if (pending & 1) { 209 if (pending & 1) {
210 int prev_count = preempt_count();
211
208 h->action(h); 212 h->action(h);
213
214 if (unlikely(prev_count != preempt_count())) {
215 printk(KERN_ERR "huh, entered softirq %td %p"
216 "with preempt_count %08x,"
217 " exited with %08x?\n", h - softirq_vec,
218 h->action, prev_count, preempt_count());
219 preempt_count() = prev_count;
220 }
221
209 rcu_bh_qsctr_inc(cpu); 222 rcu_bh_qsctr_inc(cpu);
210 } 223 }
211 h++; 224 h++;
@@ -254,16 +267,12 @@ asmlinkage void do_softirq(void)
254 */ 267 */
255void irq_enter(void) 268void irq_enter(void)
256{ 269{
257#ifdef CONFIG_NO_HZ
258 int cpu = smp_processor_id(); 270 int cpu = smp_processor_id();
271
259 if (idle_cpu(cpu) && !in_interrupt()) 272 if (idle_cpu(cpu) && !in_interrupt())
260 tick_nohz_stop_idle(cpu); 273 tick_check_idle(cpu);
261#endif 274
262 __irq_enter(); 275 __irq_enter();
263#ifdef CONFIG_NO_HZ
264 if (idle_cpu(cpu))
265 tick_nohz_update_jiffies();
266#endif
267} 276}
268 277
269#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED 278#ifdef __ARCH_IRQ_EXIT_IRQS_DISABLED
@@ -463,17 +472,144 @@ void tasklet_kill(struct tasklet_struct *t)
463 472
464EXPORT_SYMBOL(tasklet_kill); 473EXPORT_SYMBOL(tasklet_kill);
465 474
475DEFINE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
476EXPORT_PER_CPU_SYMBOL(softirq_work_list);
477
478static void __local_trigger(struct call_single_data *cp, int softirq)
479{
480 struct list_head *head = &__get_cpu_var(softirq_work_list[softirq]);
481
482 list_add_tail(&cp->list, head);
483
484 /* Trigger the softirq only if the list was previously empty. */
485 if (head->next == &cp->list)
486 raise_softirq_irqoff(softirq);
487}
488
489#ifdef CONFIG_USE_GENERIC_SMP_HELPERS
490static void remote_softirq_receive(void *data)
491{
492 struct call_single_data *cp = data;
493 unsigned long flags;
494 int softirq;
495
496 softirq = cp->priv;
497
498 local_irq_save(flags);
499 __local_trigger(cp, softirq);
500 local_irq_restore(flags);
501}
502
503static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
504{
505 if (cpu_online(cpu)) {
506 cp->func = remote_softirq_receive;
507 cp->info = cp;
508 cp->flags = 0;
509 cp->priv = softirq;
510
511 __smp_call_function_single(cpu, cp);
512 return 0;
513 }
514 return 1;
515}
516#else /* CONFIG_USE_GENERIC_SMP_HELPERS */
517static int __try_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
518{
519 return 1;
520}
521#endif
522
523/**
524 * __send_remote_softirq - try to schedule softirq work on a remote cpu
525 * @cp: private SMP call function data area
526 * @cpu: the remote cpu
527 * @this_cpu: the currently executing cpu
528 * @softirq: the softirq for the work
529 *
530 * Attempt to schedule softirq work on a remote cpu. If this cannot be
531 * done, the work is instead queued up on the local cpu.
532 *
533 * Interrupts must be disabled.
534 */
535void __send_remote_softirq(struct call_single_data *cp, int cpu, int this_cpu, int softirq)
536{
537 if (cpu == this_cpu || __try_remote_softirq(cp, cpu, softirq))
538 __local_trigger(cp, softirq);
539}
540EXPORT_SYMBOL(__send_remote_softirq);
541
542/**
543 * send_remote_softirq - try to schedule softirq work on a remote cpu
544 * @cp: private SMP call function data area
545 * @cpu: the remote cpu
546 * @softirq: the softirq for the work
547 *
548 * Like __send_remote_softirq except that disabling interrupts and
549 * computing the current cpu is done for the caller.
550 */
551void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq)
552{
553 unsigned long flags;
554 int this_cpu;
555
556 local_irq_save(flags);
557 this_cpu = smp_processor_id();
558 __send_remote_softirq(cp, cpu, this_cpu, softirq);
559 local_irq_restore(flags);
560}
561EXPORT_SYMBOL(send_remote_softirq);
562
563static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
564 unsigned long action, void *hcpu)
565{
566 /*
567 * If a CPU goes away, splice its entries to the current CPU
568 * and trigger a run of the softirq
569 */
570 if (action == CPU_DEAD || action == CPU_DEAD_FROZEN) {
571 int cpu = (unsigned long) hcpu;
572 int i;
573
574 local_irq_disable();
575 for (i = 0; i < NR_SOFTIRQS; i++) {
576 struct list_head *head = &per_cpu(softirq_work_list[i], cpu);
577 struct list_head *local_head;
578
579 if (list_empty(head))
580 continue;
581
582 local_head = &__get_cpu_var(softirq_work_list[i]);
583 list_splice_init(head, local_head);
584 raise_softirq_irqoff(i);
585 }
586 local_irq_enable();
587 }
588
589 return NOTIFY_OK;
590}
591
592static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
593 .notifier_call = remote_softirq_cpu_notify,
594};
595
466void __init softirq_init(void) 596void __init softirq_init(void)
467{ 597{
468 int cpu; 598 int cpu;
469 599
470 for_each_possible_cpu(cpu) { 600 for_each_possible_cpu(cpu) {
601 int i;
602
471 per_cpu(tasklet_vec, cpu).tail = 603 per_cpu(tasklet_vec, cpu).tail =
472 &per_cpu(tasklet_vec, cpu).head; 604 &per_cpu(tasklet_vec, cpu).head;
473 per_cpu(tasklet_hi_vec, cpu).tail = 605 per_cpu(tasklet_hi_vec, cpu).tail =
474 &per_cpu(tasklet_hi_vec, cpu).head; 606 &per_cpu(tasklet_hi_vec, cpu).head;
607 for (i = 0; i < NR_SOFTIRQS; i++)
608 INIT_LIST_HEAD(&per_cpu(softirq_work_list[i], cpu));
475 } 609 }
476 610
611 register_hotcpu_notifier(&remote_softirq_cpu_notifier);
612
477 open_softirq(TASKLET_SOFTIRQ, tasklet_action); 613 open_softirq(TASKLET_SOFTIRQ, tasklet_action);
478 open_softirq(HI_SOFTIRQ, tasklet_hi_action); 614 open_softirq(HI_SOFTIRQ, tasklet_hi_action);
479} 615}
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index cb838ee93a82..3953e4aed733 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -226,7 +226,7 @@ static void check_hung_uninterruptible_tasks(int this_cpu)
226 * If the system crashed already then all bets are off, 226 * If the system crashed already then all bets are off,
227 * do not report extra hung tasks: 227 * do not report extra hung tasks:
228 */ 228 */
229 if ((tainted & TAINT_DIE) || did_panic) 229 if (test_taint(TAINT_DIE) || did_panic)
230 return; 230 return;
231 231
232 read_lock(&tasklist_lock); 232 read_lock(&tasklist_lock);
diff --git a/kernel/sys.c b/kernel/sys.c
index 038a7bc0901d..53879cdae483 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -853,38 +853,28 @@ asmlinkage long sys_setfsgid(gid_t gid)
853 return old_fsgid; 853 return old_fsgid;
854} 854}
855 855
856void do_sys_times(struct tms *tms)
857{
858 struct task_cputime cputime;
859 cputime_t cutime, cstime;
860
861 spin_lock_irq(&current->sighand->siglock);
862 thread_group_cputime(current, &cputime);
863 cutime = current->signal->cutime;
864 cstime = current->signal->cstime;
865 spin_unlock_irq(&current->sighand->siglock);
866 tms->tms_utime = cputime_to_clock_t(cputime.utime);
867 tms->tms_stime = cputime_to_clock_t(cputime.stime);
868 tms->tms_cutime = cputime_to_clock_t(cutime);
869 tms->tms_cstime = cputime_to_clock_t(cstime);
870}
871
856asmlinkage long sys_times(struct tms __user * tbuf) 872asmlinkage long sys_times(struct tms __user * tbuf)
857{ 873{
858 /*
859 * In the SMP world we might just be unlucky and have one of
860 * the times increment as we use it. Since the value is an
861 * atomically safe type this is just fine. Conceptually its
862 * as if the syscall took an instant longer to occur.
863 */
864 if (tbuf) { 874 if (tbuf) {
865 struct tms tmp; 875 struct tms tmp;
866 struct task_struct *tsk = current; 876
867 struct task_struct *t; 877 do_sys_times(&tmp);
868 cputime_t utime, stime, cutime, cstime;
869
870 spin_lock_irq(&tsk->sighand->siglock);
871 utime = tsk->signal->utime;
872 stime = tsk->signal->stime;
873 t = tsk;
874 do {
875 utime = cputime_add(utime, t->utime);
876 stime = cputime_add(stime, t->stime);
877 t = next_thread(t);
878 } while (t != tsk);
879
880 cutime = tsk->signal->cutime;
881 cstime = tsk->signal->cstime;
882 spin_unlock_irq(&tsk->sighand->siglock);
883
884 tmp.tms_utime = cputime_to_clock_t(utime);
885 tmp.tms_stime = cputime_to_clock_t(stime);
886 tmp.tms_cutime = cputime_to_clock_t(cutime);
887 tmp.tms_cstime = cputime_to_clock_t(cstime);
888 if (copy_to_user(tbuf, &tmp, sizeof(struct tms))) 878 if (copy_to_user(tbuf, &tmp, sizeof(struct tms)))
889 return -EFAULT; 879 return -EFAULT;
890 } 880 }
@@ -1060,9 +1050,7 @@ asmlinkage long sys_setsid(void)
1060 group_leader->signal->leader = 1; 1050 group_leader->signal->leader = 1;
1061 __set_special_pids(sid); 1051 __set_special_pids(sid);
1062 1052
1063 spin_lock(&group_leader->sighand->siglock); 1053 proc_clear_tty(group_leader);
1064 group_leader->signal->tty = NULL;
1065 spin_unlock(&group_leader->sighand->siglock);
1066 1054
1067 err = session; 1055 err = session;
1068out: 1056out:
@@ -1351,8 +1339,10 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1351 down_write(&uts_sem); 1339 down_write(&uts_sem);
1352 errno = -EFAULT; 1340 errno = -EFAULT;
1353 if (!copy_from_user(tmp, name, len)) { 1341 if (!copy_from_user(tmp, name, len)) {
1354 memcpy(utsname()->nodename, tmp, len); 1342 struct new_utsname *u = utsname();
1355 utsname()->nodename[len] = 0; 1343
1344 memcpy(u->nodename, tmp, len);
1345 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1356 errno = 0; 1346 errno = 0;
1357 } 1347 }
1358 up_write(&uts_sem); 1348 up_write(&uts_sem);
@@ -1364,15 +1354,17 @@ asmlinkage long sys_sethostname(char __user *name, int len)
1364asmlinkage long sys_gethostname(char __user *name, int len) 1354asmlinkage long sys_gethostname(char __user *name, int len)
1365{ 1355{
1366 int i, errno; 1356 int i, errno;
1357 struct new_utsname *u;
1367 1358
1368 if (len < 0) 1359 if (len < 0)
1369 return -EINVAL; 1360 return -EINVAL;
1370 down_read(&uts_sem); 1361 down_read(&uts_sem);
1371 i = 1 + strlen(utsname()->nodename); 1362 u = utsname();
1363 i = 1 + strlen(u->nodename);
1372 if (i > len) 1364 if (i > len)
1373 i = len; 1365 i = len;
1374 errno = 0; 1366 errno = 0;
1375 if (copy_to_user(name, utsname()->nodename, i)) 1367 if (copy_to_user(name, u->nodename, i))
1376 errno = -EFAULT; 1368 errno = -EFAULT;
1377 up_read(&uts_sem); 1369 up_read(&uts_sem);
1378 return errno; 1370 return errno;
@@ -1397,8 +1389,10 @@ asmlinkage long sys_setdomainname(char __user *name, int len)
1397 down_write(&uts_sem); 1389 down_write(&uts_sem);
1398 errno = -EFAULT; 1390 errno = -EFAULT;
1399 if (!copy_from_user(tmp, name, len)) { 1391 if (!copy_from_user(tmp, name, len)) {
1400 memcpy(utsname()->domainname, tmp, len); 1392 struct new_utsname *u = utsname();
1401 utsname()->domainname[len] = 0; 1393
1394 memcpy(u->domainname, tmp, len);
1395 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1402 errno = 0; 1396 errno = 0;
1403 } 1397 }
1404 up_write(&uts_sem); 1398 up_write(&uts_sem);
@@ -1445,21 +1439,28 @@ asmlinkage long sys_old_getrlimit(unsigned int resource, struct rlimit __user *r
1445asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim) 1439asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1446{ 1440{
1447 struct rlimit new_rlim, *old_rlim; 1441 struct rlimit new_rlim, *old_rlim;
1448 unsigned long it_prof_secs;
1449 int retval; 1442 int retval;
1450 1443
1451 if (resource >= RLIM_NLIMITS) 1444 if (resource >= RLIM_NLIMITS)
1452 return -EINVAL; 1445 return -EINVAL;
1453 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim))) 1446 if (copy_from_user(&new_rlim, rlim, sizeof(*rlim)))
1454 return -EFAULT; 1447 return -EFAULT;
1455 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1456 return -EINVAL;
1457 old_rlim = current->signal->rlim + resource; 1448 old_rlim = current->signal->rlim + resource;
1458 if ((new_rlim.rlim_max > old_rlim->rlim_max) && 1449 if ((new_rlim.rlim_max > old_rlim->rlim_max) &&
1459 !capable(CAP_SYS_RESOURCE)) 1450 !capable(CAP_SYS_RESOURCE))
1460 return -EPERM; 1451 return -EPERM;
1461 if (resource == RLIMIT_NOFILE && new_rlim.rlim_max > sysctl_nr_open) 1452
1462 return -EPERM; 1453 if (resource == RLIMIT_NOFILE) {
1454 if (new_rlim.rlim_max == RLIM_INFINITY)
1455 new_rlim.rlim_max = sysctl_nr_open;
1456 if (new_rlim.rlim_cur == RLIM_INFINITY)
1457 new_rlim.rlim_cur = sysctl_nr_open;
1458 if (new_rlim.rlim_max > sysctl_nr_open)
1459 return -EPERM;
1460 }
1461
1462 if (new_rlim.rlim_cur > new_rlim.rlim_max)
1463 return -EINVAL;
1463 1464
1464 retval = security_task_setrlimit(resource, &new_rlim); 1465 retval = security_task_setrlimit(resource, &new_rlim);
1465 if (retval) 1466 if (retval)
@@ -1491,18 +1492,7 @@ asmlinkage long sys_setrlimit(unsigned int resource, struct rlimit __user *rlim)
1491 if (new_rlim.rlim_cur == RLIM_INFINITY) 1492 if (new_rlim.rlim_cur == RLIM_INFINITY)
1492 goto out; 1493 goto out;
1493 1494
1494 it_prof_secs = cputime_to_secs(current->signal->it_prof_expires); 1495 update_rlimit_cpu(new_rlim.rlim_cur);
1495 if (it_prof_secs == 0 || new_rlim.rlim_cur <= it_prof_secs) {
1496 unsigned long rlim_cur = new_rlim.rlim_cur;
1497 cputime_t cputime;
1498
1499 cputime = secs_to_cputime(rlim_cur);
1500 read_lock(&tasklist_lock);
1501 spin_lock_irq(&current->sighand->siglock);
1502 set_process_cpu_timer(current, CPUCLOCK_PROF, &cputime, NULL);
1503 spin_unlock_irq(&current->sighand->siglock);
1504 read_unlock(&tasklist_lock);
1505 }
1506out: 1496out:
1507 return 0; 1497 return 0;
1508} 1498}
@@ -1540,11 +1530,8 @@ out:
1540 * 1530 *
1541 */ 1531 */
1542 1532
1543static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r, 1533static void accumulate_thread_rusage(struct task_struct *t, struct rusage *r)
1544 cputime_t *utimep, cputime_t *stimep)
1545{ 1534{
1546 *utimep = cputime_add(*utimep, t->utime);
1547 *stimep = cputime_add(*stimep, t->stime);
1548 r->ru_nvcsw += t->nvcsw; 1535 r->ru_nvcsw += t->nvcsw;
1549 r->ru_nivcsw += t->nivcsw; 1536 r->ru_nivcsw += t->nivcsw;
1550 r->ru_minflt += t->min_flt; 1537 r->ru_minflt += t->min_flt;
@@ -1558,12 +1545,13 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1558 struct task_struct *t; 1545 struct task_struct *t;
1559 unsigned long flags; 1546 unsigned long flags;
1560 cputime_t utime, stime; 1547 cputime_t utime, stime;
1548 struct task_cputime cputime;
1561 1549
1562 memset((char *) r, 0, sizeof *r); 1550 memset((char *) r, 0, sizeof *r);
1563 utime = stime = cputime_zero; 1551 utime = stime = cputime_zero;
1564 1552
1565 if (who == RUSAGE_THREAD) { 1553 if (who == RUSAGE_THREAD) {
1566 accumulate_thread_rusage(p, r, &utime, &stime); 1554 accumulate_thread_rusage(p, r);
1567 goto out; 1555 goto out;
1568 } 1556 }
1569 1557
@@ -1586,8 +1574,9 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1586 break; 1574 break;
1587 1575
1588 case RUSAGE_SELF: 1576 case RUSAGE_SELF:
1589 utime = cputime_add(utime, p->signal->utime); 1577 thread_group_cputime(p, &cputime);
1590 stime = cputime_add(stime, p->signal->stime); 1578 utime = cputime_add(utime, cputime.utime);
1579 stime = cputime_add(stime, cputime.stime);
1591 r->ru_nvcsw += p->signal->nvcsw; 1580 r->ru_nvcsw += p->signal->nvcsw;
1592 r->ru_nivcsw += p->signal->nivcsw; 1581 r->ru_nivcsw += p->signal->nivcsw;
1593 r->ru_minflt += p->signal->min_flt; 1582 r->ru_minflt += p->signal->min_flt;
@@ -1596,7 +1585,7 @@ static void k_getrusage(struct task_struct *p, int who, struct rusage *r)
1596 r->ru_oublock += p->signal->oublock; 1585 r->ru_oublock += p->signal->oublock;
1597 t = p; 1586 t = p;
1598 do { 1587 do {
1599 accumulate_thread_rusage(t, r, &utime, &stime); 1588 accumulate_thread_rusage(t, r);
1600 t = next_thread(t); 1589 t = next_thread(t);
1601 } while (t != p); 1590 } while (t != p);
1602 break; 1591 break;
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 08d6e1bb99ac..a77b27b11b04 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -125,6 +125,12 @@ cond_syscall(sys_vm86old);
125cond_syscall(sys_vm86); 125cond_syscall(sys_vm86);
126cond_syscall(compat_sys_ipc); 126cond_syscall(compat_sys_ipc);
127cond_syscall(compat_sys_sysctl); 127cond_syscall(compat_sys_sysctl);
128cond_syscall(sys_flock);
129cond_syscall(sys_io_setup);
130cond_syscall(sys_io_destroy);
131cond_syscall(sys_io_submit);
132cond_syscall(sys_io_cancel);
133cond_syscall(sys_io_getevents);
128 134
129/* arch-specific weak syscall entries */ 135/* arch-specific weak syscall entries */
130cond_syscall(sys_pciconfig_read); 136cond_syscall(sys_pciconfig_read);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 50ec0886fa3d..b3cc73931d1f 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -80,7 +80,6 @@ extern int pid_max_min, pid_max_max;
80extern int sysctl_drop_caches; 80extern int sysctl_drop_caches;
81extern int percpu_pagelist_fraction; 81extern int percpu_pagelist_fraction;
82extern int compat_log; 82extern int compat_log;
83extern int maps_protect;
84extern int latencytop_enabled; 83extern int latencytop_enabled;
85extern int sysctl_nr_open_min, sysctl_nr_open_max; 84extern int sysctl_nr_open_min, sysctl_nr_open_max;
86#ifdef CONFIG_RCU_TORTURE_TEST 85#ifdef CONFIG_RCU_TORTURE_TEST
@@ -97,7 +96,7 @@ static int sixty = 60;
97static int neg_one = -1; 96static int neg_one = -1;
98#endif 97#endif
99 98
100#ifdef CONFIG_MMU 99#if defined(CONFIG_MMU) && defined(CONFIG_FILE_LOCKING)
101static int two = 2; 100static int two = 2;
102#endif 101#endif
103 102
@@ -118,10 +117,8 @@ extern char modprobe_path[];
118extern int sg_big_buff; 117extern int sg_big_buff;
119#endif 118#endif
120 119
121#ifdef __sparc__ 120#ifdef CONFIG_SPARC
122extern char reboot_command []; 121#include <asm/system.h>
123extern int stop_a_enabled;
124extern int scons_pwroff;
125#endif 122#endif
126 123
127#ifdef __hppa__ 124#ifdef __hppa__
@@ -152,7 +149,7 @@ extern int max_lock_depth;
152#ifdef CONFIG_PROC_SYSCTL 149#ifdef CONFIG_PROC_SYSCTL
153static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp, 150static int proc_do_cad_pid(struct ctl_table *table, int write, struct file *filp,
154 void __user *buffer, size_t *lenp, loff_t *ppos); 151 void __user *buffer, size_t *lenp, loff_t *ppos);
155static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, 152static int proc_taint(struct ctl_table *table, int write, struct file *filp,
156 void __user *buffer, size_t *lenp, loff_t *ppos); 153 void __user *buffer, size_t *lenp, loff_t *ppos);
157#endif 154#endif
158 155
@@ -382,10 +379,9 @@ static struct ctl_table kern_table[] = {
382#ifdef CONFIG_PROC_SYSCTL 379#ifdef CONFIG_PROC_SYSCTL
383 { 380 {
384 .procname = "tainted", 381 .procname = "tainted",
385 .data = &tainted, 382 .maxlen = sizeof(long),
386 .maxlen = sizeof(int),
387 .mode = 0644, 383 .mode = 0644,
388 .proc_handler = &proc_dointvec_taint, 384 .proc_handler = &proc_taint,
389 }, 385 },
390#endif 386#endif
391#ifdef CONFIG_LATENCYTOP 387#ifdef CONFIG_LATENCYTOP
@@ -415,7 +411,7 @@ static struct ctl_table kern_table[] = {
415 .mode = 0644, 411 .mode = 0644,
416 .proc_handler = &proc_dointvec, 412 .proc_handler = &proc_dointvec,
417 }, 413 },
418#ifdef __sparc__ 414#ifdef CONFIG_SPARC
419 { 415 {
420 .ctl_name = KERN_SPARC_REBOOT, 416 .ctl_name = KERN_SPARC_REBOOT,
421 .procname = "reboot-cmd", 417 .procname = "reboot-cmd",
@@ -810,16 +806,6 @@ static struct ctl_table kern_table[] = {
810 .proc_handler = &proc_dointvec, 806 .proc_handler = &proc_dointvec,
811 }, 807 },
812#endif 808#endif
813#ifdef CONFIG_PROC_FS
814 {
815 .ctl_name = CTL_UNNUMBERED,
816 .procname = "maps_protect",
817 .data = &maps_protect,
818 .maxlen = sizeof(int),
819 .mode = 0644,
820 .proc_handler = &proc_dointvec,
821 },
822#endif
823 { 809 {
824 .ctl_name = CTL_UNNUMBERED, 810 .ctl_name = CTL_UNNUMBERED,
825 .procname = "poweroff_cmd", 811 .procname = "poweroff_cmd",
@@ -847,6 +833,16 @@ static struct ctl_table kern_table[] = {
847 .proc_handler = &proc_dointvec, 833 .proc_handler = &proc_dointvec,
848 }, 834 },
849#endif 835#endif
836#ifdef CONFIG_UNEVICTABLE_LRU
837 {
838 .ctl_name = CTL_UNNUMBERED,
839 .procname = "scan_unevictable_pages",
840 .data = &scan_unevictable_pages,
841 .maxlen = sizeof(scan_unevictable_pages),
842 .mode = 0644,
843 .proc_handler = &scan_unevictable_handler,
844 },
845#endif
850/* 846/*
851 * NOTE: do not add new entries to this table unless you have read 847 * NOTE: do not add new entries to this table unless you have read
852 * Documentation/sysctl/ctl_unnumbered.txt 848 * Documentation/sysctl/ctl_unnumbered.txt
@@ -1261,6 +1257,7 @@ static struct ctl_table fs_table[] = {
1261 .extra1 = &minolduid, 1257 .extra1 = &minolduid,
1262 .extra2 = &maxolduid, 1258 .extra2 = &maxolduid,
1263 }, 1259 },
1260#ifdef CONFIG_FILE_LOCKING
1264 { 1261 {
1265 .ctl_name = FS_LEASES, 1262 .ctl_name = FS_LEASES,
1266 .procname = "leases-enable", 1263 .procname = "leases-enable",
@@ -1269,6 +1266,7 @@ static struct ctl_table fs_table[] = {
1269 .mode = 0644, 1266 .mode = 0644,
1270 .proc_handler = &proc_dointvec, 1267 .proc_handler = &proc_dointvec,
1271 }, 1268 },
1269#endif
1272#ifdef CONFIG_DNOTIFY 1270#ifdef CONFIG_DNOTIFY
1273 { 1271 {
1274 .ctl_name = FS_DIR_NOTIFY, 1272 .ctl_name = FS_DIR_NOTIFY,
@@ -1280,6 +1278,7 @@ static struct ctl_table fs_table[] = {
1280 }, 1278 },
1281#endif 1279#endif
1282#ifdef CONFIG_MMU 1280#ifdef CONFIG_MMU
1281#ifdef CONFIG_FILE_LOCKING
1283 { 1282 {
1284 .ctl_name = FS_LEASE_TIME, 1283 .ctl_name = FS_LEASE_TIME,
1285 .procname = "lease-break-time", 1284 .procname = "lease-break-time",
@@ -1291,6 +1290,8 @@ static struct ctl_table fs_table[] = {
1291 .extra1 = &zero, 1290 .extra1 = &zero,
1292 .extra2 = &two, 1291 .extra2 = &two,
1293 }, 1292 },
1293#endif
1294#ifdef CONFIG_AIO
1294 { 1295 {
1295 .procname = "aio-nr", 1296 .procname = "aio-nr",
1296 .data = &aio_nr, 1297 .data = &aio_nr,
@@ -1305,6 +1306,7 @@ static struct ctl_table fs_table[] = {
1305 .mode = 0644, 1306 .mode = 0644,
1306 .proc_handler = &proc_doulongvec_minmax, 1307 .proc_handler = &proc_doulongvec_minmax,
1307 }, 1308 },
1309#endif /* CONFIG_AIO */
1308#ifdef CONFIG_INOTIFY_USER 1310#ifdef CONFIG_INOTIFY_USER
1309 { 1311 {
1310 .ctl_name = FS_INOTIFY, 1312 .ctl_name = FS_INOTIFY,
@@ -1510,7 +1512,6 @@ void register_sysctl_root(struct ctl_table_root *root)
1510/* Perform the actual read/write of a sysctl table entry. */ 1512/* Perform the actual read/write of a sysctl table entry. */
1511static int do_sysctl_strategy(struct ctl_table_root *root, 1513static int do_sysctl_strategy(struct ctl_table_root *root,
1512 struct ctl_table *table, 1514 struct ctl_table *table,
1513 int __user *name, int nlen,
1514 void __user *oldval, size_t __user *oldlenp, 1515 void __user *oldval, size_t __user *oldlenp,
1515 void __user *newval, size_t newlen) 1516 void __user *newval, size_t newlen)
1516{ 1517{
@@ -1524,8 +1525,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1524 return -EPERM; 1525 return -EPERM;
1525 1526
1526 if (table->strategy) { 1527 if (table->strategy) {
1527 rc = table->strategy(table, name, nlen, oldval, oldlenp, 1528 rc = table->strategy(table, oldval, oldlenp, newval, newlen);
1528 newval, newlen);
1529 if (rc < 0) 1529 if (rc < 0)
1530 return rc; 1530 return rc;
1531 if (rc > 0) 1531 if (rc > 0)
@@ -1535,8 +1535,7 @@ static int do_sysctl_strategy(struct ctl_table_root *root,
1535 /* If there is no strategy routine, or if the strategy returns 1535 /* If there is no strategy routine, or if the strategy returns
1536 * zero, proceed with automatic r/w */ 1536 * zero, proceed with automatic r/w */
1537 if (table->data && table->maxlen) { 1537 if (table->data && table->maxlen) {
1538 rc = sysctl_data(table, name, nlen, oldval, oldlenp, 1538 rc = sysctl_data(table, oldval, oldlenp, newval, newlen);
1539 newval, newlen);
1540 if (rc < 0) 1539 if (rc < 0)
1541 return rc; 1540 return rc;
1542 } 1541 }
@@ -1568,7 +1567,7 @@ repeat:
1568 table = table->child; 1567 table = table->child;
1569 goto repeat; 1568 goto repeat;
1570 } 1569 }
1571 error = do_sysctl_strategy(root, table, name, nlen, 1570 error = do_sysctl_strategy(root, table,
1572 oldval, oldlenp, 1571 oldval, oldlenp,
1573 newval, newlen); 1572 newval, newlen);
1574 return error; 1573 return error;
@@ -2237,49 +2236,39 @@ int proc_dointvec(struct ctl_table *table, int write, struct file *filp,
2237 NULL,NULL); 2236 NULL,NULL);
2238} 2237}
2239 2238
2240#define OP_SET 0
2241#define OP_AND 1
2242#define OP_OR 2
2243
2244static int do_proc_dointvec_bset_conv(int *negp, unsigned long *lvalp,
2245 int *valp,
2246 int write, void *data)
2247{
2248 int op = *(int *)data;
2249 if (write) {
2250 int val = *negp ? -*lvalp : *lvalp;
2251 switch(op) {
2252 case OP_SET: *valp = val; break;
2253 case OP_AND: *valp &= val; break;
2254 case OP_OR: *valp |= val; break;
2255 }
2256 } else {
2257 int val = *valp;
2258 if (val < 0) {
2259 *negp = -1;
2260 *lvalp = (unsigned long)-val;
2261 } else {
2262 *negp = 0;
2263 *lvalp = (unsigned long)val;
2264 }
2265 }
2266 return 0;
2267}
2268
2269/* 2239/*
2270 * Taint values can only be increased 2240 * Taint values can only be increased
2241 * This means we can safely use a temporary.
2271 */ 2242 */
2272static int proc_dointvec_taint(struct ctl_table *table, int write, struct file *filp, 2243static int proc_taint(struct ctl_table *table, int write, struct file *filp,
2273 void __user *buffer, size_t *lenp, loff_t *ppos) 2244 void __user *buffer, size_t *lenp, loff_t *ppos)
2274{ 2245{
2275 int op; 2246 struct ctl_table t;
2247 unsigned long tmptaint = get_taint();
2248 int err;
2276 2249
2277 if (write && !capable(CAP_SYS_ADMIN)) 2250 if (write && !capable(CAP_SYS_ADMIN))
2278 return -EPERM; 2251 return -EPERM;
2279 2252
2280 op = OP_OR; 2253 t = *table;
2281 return do_proc_dointvec(table,write,filp,buffer,lenp,ppos, 2254 t.data = &tmptaint;
2282 do_proc_dointvec_bset_conv,&op); 2255 err = proc_doulongvec_minmax(&t, write, filp, buffer, lenp, ppos);
2256 if (err < 0)
2257 return err;
2258
2259 if (write) {
2260 /*
2261 * Poor man's atomic or. Not worth adding a primitive
2262 * to everyone's atomic.h for this
2263 */
2264 int i;
2265 for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) {
2266 if ((tmptaint >> i) & 1)
2267 add_taint(i);
2268 }
2269 }
2270
2271 return err;
2283} 2272}
2284 2273
2285struct do_proc_dointvec_minmax_conv_param { 2274struct do_proc_dointvec_minmax_conv_param {
@@ -2727,7 +2716,7 @@ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write,
2727 */ 2716 */
2728 2717
2729/* The generic sysctl data routine (used if no strategy routine supplied) */ 2718/* The generic sysctl data routine (used if no strategy routine supplied) */
2730int sysctl_data(struct ctl_table *table, int __user *name, int nlen, 2719int sysctl_data(struct ctl_table *table,
2731 void __user *oldval, size_t __user *oldlenp, 2720 void __user *oldval, size_t __user *oldlenp,
2732 void __user *newval, size_t newlen) 2721 void __user *newval, size_t newlen)
2733{ 2722{
@@ -2761,7 +2750,7 @@ int sysctl_data(struct ctl_table *table, int __user *name, int nlen,
2761} 2750}
2762 2751
2763/* The generic string strategy routine: */ 2752/* The generic string strategy routine: */
2764int sysctl_string(struct ctl_table *table, int __user *name, int nlen, 2753int sysctl_string(struct ctl_table *table,
2765 void __user *oldval, size_t __user *oldlenp, 2754 void __user *oldval, size_t __user *oldlenp,
2766 void __user *newval, size_t newlen) 2755 void __user *newval, size_t newlen)
2767{ 2756{
@@ -2807,7 +2796,7 @@ int sysctl_string(struct ctl_table *table, int __user *name, int nlen,
2807 * are between the minimum and maximum values given in the arrays 2796 * are between the minimum and maximum values given in the arrays
2808 * table->extra1 and table->extra2, respectively. 2797 * table->extra1 and table->extra2, respectively.
2809 */ 2798 */
2810int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, 2799int sysctl_intvec(struct ctl_table *table,
2811 void __user *oldval, size_t __user *oldlenp, 2800 void __user *oldval, size_t __user *oldlenp,
2812 void __user *newval, size_t newlen) 2801 void __user *newval, size_t newlen)
2813{ 2802{
@@ -2843,7 +2832,7 @@ int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen,
2843} 2832}
2844 2833
2845/* Strategy function to convert jiffies to seconds */ 2834/* Strategy function to convert jiffies to seconds */
2846int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, 2835int sysctl_jiffies(struct ctl_table *table,
2847 void __user *oldval, size_t __user *oldlenp, 2836 void __user *oldval, size_t __user *oldlenp,
2848 void __user *newval, size_t newlen) 2837 void __user *newval, size_t newlen)
2849{ 2838{
@@ -2877,7 +2866,7 @@ int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen,
2877} 2866}
2878 2867
2879/* Strategy function to convert jiffies to seconds */ 2868/* Strategy function to convert jiffies to seconds */
2880int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, 2869int sysctl_ms_jiffies(struct ctl_table *table,
2881 void __user *oldval, size_t __user *oldlenp, 2870 void __user *oldval, size_t __user *oldlenp,
2882 void __user *newval, size_t newlen) 2871 void __user *newval, size_t newlen)
2883{ 2872{
@@ -2932,35 +2921,35 @@ asmlinkage long sys_sysctl(struct __sysctl_args __user *args)
2932 return error; 2921 return error;
2933} 2922}
2934 2923
2935int sysctl_data(struct ctl_table *table, int __user *name, int nlen, 2924int sysctl_data(struct ctl_table *table,
2936 void __user *oldval, size_t __user *oldlenp, 2925 void __user *oldval, size_t __user *oldlenp,
2937 void __user *newval, size_t newlen) 2926 void __user *newval, size_t newlen)
2938{ 2927{
2939 return -ENOSYS; 2928 return -ENOSYS;
2940} 2929}
2941 2930
2942int sysctl_string(struct ctl_table *table, int __user *name, int nlen, 2931int sysctl_string(struct ctl_table *table,
2943 void __user *oldval, size_t __user *oldlenp, 2932 void __user *oldval, size_t __user *oldlenp,
2944 void __user *newval, size_t newlen) 2933 void __user *newval, size_t newlen)
2945{ 2934{
2946 return -ENOSYS; 2935 return -ENOSYS;
2947} 2936}
2948 2937
2949int sysctl_intvec(struct ctl_table *table, int __user *name, int nlen, 2938int sysctl_intvec(struct ctl_table *table,
2950 void __user *oldval, size_t __user *oldlenp, 2939 void __user *oldval, size_t __user *oldlenp,
2951 void __user *newval, size_t newlen) 2940 void __user *newval, size_t newlen)
2952{ 2941{
2953 return -ENOSYS; 2942 return -ENOSYS;
2954} 2943}
2955 2944
2956int sysctl_jiffies(struct ctl_table *table, int __user *name, int nlen, 2945int sysctl_jiffies(struct ctl_table *table,
2957 void __user *oldval, size_t __user *oldlenp, 2946 void __user *oldval, size_t __user *oldlenp,
2958 void __user *newval, size_t newlen) 2947 void __user *newval, size_t newlen)
2959{ 2948{
2960 return -ENOSYS; 2949 return -ENOSYS;
2961} 2950}
2962 2951
2963int sysctl_ms_jiffies(struct ctl_table *table, int __user *name, int nlen, 2952int sysctl_ms_jiffies(struct ctl_table *table,
2964 void __user *oldval, size_t __user *oldlenp, 2953 void __user *oldval, size_t __user *oldlenp,
2965 void __user *newval, size_t newlen) 2954 void __user *newval, size_t newlen)
2966{ 2955{
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 8d53106a0a92..95ed42951e0a 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -3,7 +3,6 @@
3# 3#
4config TICK_ONESHOT 4config TICK_ONESHOT
5 bool 5 bool
6 default n
7 6
8config NO_HZ 7config NO_HZ
9 bool "Tickless System (Dynamic Ticks)" 8 bool "Tickless System (Dynamic Ticks)"
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 093d4acf993b..9ed2eec97526 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -325,6 +325,9 @@ int clocksource_register(struct clocksource *c)
325 unsigned long flags; 325 unsigned long flags;
326 int ret; 326 int ret;
327 327
328 /* save mult_orig on registration */
329 c->mult_orig = c->mult;
330
328 spin_lock_irqsave(&clocksource_lock, flags); 331 spin_lock_irqsave(&clocksource_lock, flags);
329 ret = clocksource_enqueue(c); 332 ret = clocksource_enqueue(c);
330 if (!ret) 333 if (!ret)
diff --git a/kernel/time/jiffies.c b/kernel/time/jiffies.c
index 4c256fdb8875..1ca99557e929 100644
--- a/kernel/time/jiffies.c
+++ b/kernel/time/jiffies.c
@@ -61,6 +61,7 @@ struct clocksource clocksource_jiffies = {
61 .read = jiffies_read, 61 .read = jiffies_read,
62 .mask = 0xffffffff, /*32bits*/ 62 .mask = 0xffffffff, /*32bits*/
63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */ 63 .mult = NSEC_PER_JIFFY << JIFFIES_SHIFT, /* details above */
64 .mult_orig = NSEC_PER_JIFFY << JIFFIES_SHIFT,
64 .shift = JIFFIES_SHIFT, 65 .shift = JIFFIES_SHIFT,
65}; 66};
66 67
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index 1ad46f3df6e7..1a20715bfd6e 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -10,13 +10,13 @@
10 10
11#include <linux/mm.h> 11#include <linux/mm.h>
12#include <linux/time.h> 12#include <linux/time.h>
13#include <linux/timer.h>
14#include <linux/timex.h> 13#include <linux/timex.h>
15#include <linux/jiffies.h> 14#include <linux/jiffies.h>
16#include <linux/hrtimer.h> 15#include <linux/hrtimer.h>
17#include <linux/capability.h> 16#include <linux/capability.h>
18#include <linux/math64.h> 17#include <linux/math64.h>
19#include <linux/clocksource.h> 18#include <linux/clocksource.h>
19#include <linux/workqueue.h>
20#include <asm/timex.h> 20#include <asm/timex.h>
21 21
22/* 22/*
@@ -218,11 +218,11 @@ void second_overflow(void)
218/* Disable the cmos update - used by virtualization and embedded */ 218/* Disable the cmos update - used by virtualization and embedded */
219int no_sync_cmos_clock __read_mostly; 219int no_sync_cmos_clock __read_mostly;
220 220
221static void sync_cmos_clock(unsigned long dummy); 221static void sync_cmos_clock(struct work_struct *work);
222 222
223static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0); 223static DECLARE_DELAYED_WORK(sync_cmos_work, sync_cmos_clock);
224 224
225static void sync_cmos_clock(unsigned long dummy) 225static void sync_cmos_clock(struct work_struct *work)
226{ 226{
227 struct timespec now, next; 227 struct timespec now, next;
228 int fail = 1; 228 int fail = 1;
@@ -258,13 +258,13 @@ static void sync_cmos_clock(unsigned long dummy)
258 next.tv_sec++; 258 next.tv_sec++;
259 next.tv_nsec -= NSEC_PER_SEC; 259 next.tv_nsec -= NSEC_PER_SEC;
260 } 260 }
261 mod_timer(&sync_cmos_timer, jiffies + timespec_to_jiffies(&next)); 261 schedule_delayed_work(&sync_cmos_work, timespec_to_jiffies(&next));
262} 262}
263 263
264static void notify_cmos_timer(void) 264static void notify_cmos_timer(void)
265{ 265{
266 if (!no_sync_cmos_clock) 266 if (!no_sync_cmos_clock)
267 mod_timer(&sync_cmos_timer, jiffies + 1); 267 schedule_delayed_work(&sync_cmos_work, 0);
268} 268}
269 269
270#else 270#else
@@ -277,38 +277,50 @@ static inline void notify_cmos_timer(void) { }
277int do_adjtimex(struct timex *txc) 277int do_adjtimex(struct timex *txc)
278{ 278{
279 struct timespec ts; 279 struct timespec ts;
280 long save_adjust, sec;
281 int result; 280 int result;
282 281
283 /* In order to modify anything, you gotta be super-user! */ 282 /* Validate the data before disabling interrupts */
284 if (txc->modes && !capable(CAP_SYS_TIME)) 283 if (txc->modes & ADJ_ADJTIME) {
285 return -EPERM;
286
287 /* Now we validate the data before disabling interrupts */
288
289 if ((txc->modes & ADJ_OFFSET_SINGLESHOT) == ADJ_OFFSET_SINGLESHOT) {
290 /* singleshot must not be used with any other mode bits */ 284 /* singleshot must not be used with any other mode bits */
291 if (txc->modes & ~ADJ_OFFSET_SS_READ) 285 if (!(txc->modes & ADJ_OFFSET_SINGLESHOT))
292 return -EINVAL; 286 return -EINVAL;
287 if (!(txc->modes & ADJ_OFFSET_READONLY) &&
288 !capable(CAP_SYS_TIME))
289 return -EPERM;
290 } else {
291 /* In order to modify anything, you gotta be super-user! */
292 if (txc->modes && !capable(CAP_SYS_TIME))
293 return -EPERM;
294
295 /* if the quartz is off by more than 10% something is VERY wrong! */
296 if (txc->modes & ADJ_TICK &&
297 (txc->tick < 900000/USER_HZ ||
298 txc->tick > 1100000/USER_HZ))
299 return -EINVAL;
300
301 if (txc->modes & ADJ_STATUS && time_state != TIME_OK)
302 hrtimer_cancel(&leap_timer);
293 } 303 }
294 304
295 /* if the quartz is off by more than 10% something is VERY wrong ! */
296 if (txc->modes & ADJ_TICK)
297 if (txc->tick < 900000/USER_HZ ||
298 txc->tick > 1100000/USER_HZ)
299 return -EINVAL;
300
301 if (time_state != TIME_OK && txc->modes & ADJ_STATUS)
302 hrtimer_cancel(&leap_timer);
303 getnstimeofday(&ts); 305 getnstimeofday(&ts);
304 306
305 write_seqlock_irq(&xtime_lock); 307 write_seqlock_irq(&xtime_lock);
306 308
307 /* Save for later - semantics of adjtime is to return old value */
308 save_adjust = time_adjust;
309
310 /* If there are input parameters, then process them */ 309 /* If there are input parameters, then process them */
310 if (txc->modes & ADJ_ADJTIME) {
311 long save_adjust = time_adjust;
312
313 if (!(txc->modes & ADJ_OFFSET_READONLY)) {
314 /* adjtime() is independent from ntp_adjtime() */
315 time_adjust = txc->offset;
316 ntp_update_frequency();
317 }
318 txc->offset = save_adjust;
319 goto adj_done;
320 }
311 if (txc->modes) { 321 if (txc->modes) {
322 long sec;
323
312 if (txc->modes & ADJ_STATUS) { 324 if (txc->modes & ADJ_STATUS) {
313 if ((time_status & STA_PLL) && 325 if ((time_status & STA_PLL) &&
314 !(txc->status & STA_PLL)) { 326 !(txc->status & STA_PLL)) {
@@ -375,13 +387,8 @@ int do_adjtimex(struct timex *txc)
375 if (txc->modes & ADJ_TAI && txc->constant > 0) 387 if (txc->modes & ADJ_TAI && txc->constant > 0)
376 time_tai = txc->constant; 388 time_tai = txc->constant;
377 389
378 if (txc->modes & ADJ_OFFSET) { 390 if (txc->modes & ADJ_OFFSET)
379 if (txc->modes == ADJ_OFFSET_SINGLESHOT) 391 ntp_update_offset(txc->offset);
380 /* adjtime() is independent from ntp_adjtime() */
381 time_adjust = txc->offset;
382 else
383 ntp_update_offset(txc->offset);
384 }
385 if (txc->modes & ADJ_TICK) 392 if (txc->modes & ADJ_TICK)
386 tick_usec = txc->tick; 393 tick_usec = txc->tick;
387 394
@@ -389,22 +396,18 @@ int do_adjtimex(struct timex *txc)
389 ntp_update_frequency(); 396 ntp_update_frequency();
390 } 397 }
391 398
399 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
400 NTP_SCALE_SHIFT);
401 if (!(time_status & STA_NANO))
402 txc->offset /= NSEC_PER_USEC;
403
404adj_done:
392 result = time_state; /* mostly `TIME_OK' */ 405 result = time_state; /* mostly `TIME_OK' */
393 if (time_status & (STA_UNSYNC|STA_CLOCKERR)) 406 if (time_status & (STA_UNSYNC|STA_CLOCKERR))
394 result = TIME_ERROR; 407 result = TIME_ERROR;
395 408
396 if ((txc->modes == ADJ_OFFSET_SINGLESHOT) || 409 txc->freq = shift_right((time_freq >> PPM_SCALE_INV_SHIFT) *
397 (txc->modes == ADJ_OFFSET_SS_READ)) 410 (s64)PPM_SCALE_INV, NTP_SCALE_SHIFT);
398 txc->offset = save_adjust;
399 else {
400 txc->offset = shift_right(time_offset * NTP_INTERVAL_FREQ,
401 NTP_SCALE_SHIFT);
402 if (!(time_status & STA_NANO))
403 txc->offset /= NSEC_PER_USEC;
404 }
405 txc->freq = shift_right((s32)(time_freq >> PPM_SCALE_INV_SHIFT) *
406 (s64)PPM_SCALE_INV,
407 NTP_SCALE_SHIFT);
408 txc->maxerror = time_maxerror; 411 txc->maxerror = time_maxerror;
409 txc->esterror = time_esterror; 412 txc->esterror = time_esterror;
410 txc->status = time_status; 413 txc->status = time_status;
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index cb01cd8f919b..f98a1b7b16e9 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -384,6 +384,19 @@ int tick_resume_broadcast_oneshot(struct clock_event_device *bc)
384} 384}
385 385
386/* 386/*
387 * Called from irq_enter() when idle was interrupted to reenable the
388 * per cpu device.
389 */
390void tick_check_oneshot_broadcast(int cpu)
391{
392 if (cpu_isset(cpu, tick_broadcast_oneshot_mask)) {
393 struct tick_device *td = &per_cpu(tick_cpu_device, cpu);
394
395 clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT);
396 }
397}
398
399/*
387 * Handle oneshot mode broadcasting 400 * Handle oneshot mode broadcasting
388 */ 401 */
389static void tick_handle_oneshot_broadcast(struct clock_event_device *dev) 402static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
diff --git a/kernel/time/tick-internal.h b/kernel/time/tick-internal.h
index 469248782c23..b1c05bf75ee0 100644
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -36,6 +36,7 @@ extern void tick_broadcast_switch_to_oneshot(void);
36extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup); 36extern void tick_shutdown_broadcast_oneshot(unsigned int *cpup);
37extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc); 37extern int tick_resume_broadcast_oneshot(struct clock_event_device *bc);
38extern int tick_broadcast_oneshot_active(void); 38extern int tick_broadcast_oneshot_active(void);
39extern void tick_check_oneshot_broadcast(int cpu);
39# else /* BROADCAST */ 40# else /* BROADCAST */
40static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc) 41static inline void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
41{ 42{
@@ -45,6 +46,7 @@ static inline void tick_broadcast_oneshot_control(unsigned long reason) { }
45static inline void tick_broadcast_switch_to_oneshot(void) { } 46static inline void tick_broadcast_switch_to_oneshot(void) { }
46static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { } 47static inline void tick_shutdown_broadcast_oneshot(unsigned int *cpup) { }
47static inline int tick_broadcast_oneshot_active(void) { return 0; } 48static inline int tick_broadcast_oneshot_active(void) { return 0; }
49static inline void tick_check_oneshot_broadcast(int cpu) { }
48# endif /* !BROADCAST */ 50# endif /* !BROADCAST */
49 51
50#else /* !ONESHOT */ 52#else /* !ONESHOT */
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index cb02324bdb88..0581c11fe6c6 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -20,6 +20,7 @@
20#include <linux/profile.h> 20#include <linux/profile.h>
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/tick.h> 22#include <linux/tick.h>
23#include <linux/module.h>
23 24
24#include <asm/irq_regs.h> 25#include <asm/irq_regs.h>
25 26
@@ -154,7 +155,7 @@ void tick_nohz_update_jiffies(void)
154 touch_softlockup_watchdog(); 155 touch_softlockup_watchdog();
155} 156}
156 157
157void tick_nohz_stop_idle(int cpu) 158static void tick_nohz_stop_idle(int cpu)
158{ 159{
159 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 160 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
160 161
@@ -190,9 +191,17 @@ u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time)
190{ 191{
191 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu); 192 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
192 193
193 *last_update_time = ktime_to_us(ts->idle_lastupdate); 194 if (!tick_nohz_enabled)
195 return -1;
196
197 if (ts->idle_active)
198 *last_update_time = ktime_to_us(ts->idle_lastupdate);
199 else
200 *last_update_time = ktime_to_us(ktime_get());
201
194 return ktime_to_us(ts->idle_sleeptime); 202 return ktime_to_us(ts->idle_sleeptime);
195} 203}
204EXPORT_SYMBOL_GPL(get_cpu_idle_time_us);
196 205
197/** 206/**
198 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task 207 * tick_nohz_stop_sched_tick - stop the idle tick from the idle task
@@ -261,7 +270,7 @@ void tick_nohz_stop_sched_tick(int inidle)
261 next_jiffies = get_next_timer_interrupt(last_jiffies); 270 next_jiffies = get_next_timer_interrupt(last_jiffies);
262 delta_jiffies = next_jiffies - last_jiffies; 271 delta_jiffies = next_jiffies - last_jiffies;
263 272
264 if (rcu_needs_cpu(cpu)) 273 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu))
265 delta_jiffies = 1; 274 delta_jiffies = 1;
266 /* 275 /*
267 * Do not stop the tick, if we are only one off 276 * Do not stop the tick, if we are only one off
@@ -368,6 +377,32 @@ ktime_t tick_nohz_get_sleep_length(void)
368 return ts->sleep_length; 377 return ts->sleep_length;
369} 378}
370 379
380static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
381{
382 hrtimer_cancel(&ts->sched_timer);
383 ts->sched_timer.expires = ts->idle_tick;
384
385 while (1) {
386 /* Forward the time to expire in the future */
387 hrtimer_forward(&ts->sched_timer, now, tick_period);
388
389 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
390 hrtimer_start(&ts->sched_timer,
391 ts->sched_timer.expires,
392 HRTIMER_MODE_ABS);
393 /* Check, if the timer was already in the past */
394 if (hrtimer_active(&ts->sched_timer))
395 break;
396 } else {
397 if (!tick_program_event(ts->sched_timer.expires, 0))
398 break;
399 }
400 /* Update jiffies and reread time */
401 tick_do_update_jiffies64(now);
402 now = ktime_get();
403 }
404}
405
371/** 406/**
372 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task 407 * tick_nohz_restart_sched_tick - restart the idle tick from the idle task
373 * 408 *
@@ -421,28 +456,7 @@ void tick_nohz_restart_sched_tick(void)
421 */ 456 */
422 ts->tick_stopped = 0; 457 ts->tick_stopped = 0;
423 ts->idle_exittime = now; 458 ts->idle_exittime = now;
424 hrtimer_cancel(&ts->sched_timer); 459 tick_nohz_restart(ts, now);
425 ts->sched_timer.expires = ts->idle_tick;
426
427 while (1) {
428 /* Forward the time to expire in the future */
429 hrtimer_forward(&ts->sched_timer, now, tick_period);
430
431 if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
432 hrtimer_start(&ts->sched_timer,
433 ts->sched_timer.expires,
434 HRTIMER_MODE_ABS);
435 /* Check, if the timer was already in the past */
436 if (hrtimer_active(&ts->sched_timer))
437 break;
438 } else {
439 if (!tick_program_event(ts->sched_timer.expires, 0))
440 break;
441 }
442 /* Update jiffies and reread time */
443 tick_do_update_jiffies64(now);
444 now = ktime_get();
445 }
446 local_irq_enable(); 460 local_irq_enable();
447} 461}
448 462
@@ -494,10 +508,6 @@ static void tick_nohz_handler(struct clock_event_device *dev)
494 update_process_times(user_mode(regs)); 508 update_process_times(user_mode(regs));
495 profile_tick(CPU_PROFILING); 509 profile_tick(CPU_PROFILING);
496 510
497 /* Do not restart, when we are in the idle loop */
498 if (ts->tick_stopped)
499 return;
500
501 while (tick_nohz_reprogram(ts, now)) { 511 while (tick_nohz_reprogram(ts, now)) {
502 now = ktime_get(); 512 now = ktime_get();
503 tick_do_update_jiffies64(now); 513 tick_do_update_jiffies64(now);
@@ -543,6 +553,27 @@ static void tick_nohz_switch_to_nohz(void)
543 smp_processor_id()); 553 smp_processor_id());
544} 554}
545 555
556/*
557 * When NOHZ is enabled and the tick is stopped, we need to kick the
558 * tick timer from irq_enter() so that the jiffies update is kept
559 * alive during long running softirqs. That's ugly as hell, but
560 * correctness is key even if we need to fix the offending softirq in
561 * the first place.
562 *
563 * Note, this is different to tick_nohz_restart. We just kick the
564 * timer and do not touch the other magic bits which need to be done
565 * when idle is left.
566 */
567static void tick_nohz_kick_tick(int cpu)
568{
569 struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
570
571 if (!ts->tick_stopped)
572 return;
573
574 tick_nohz_restart(ts, ktime_get());
575}
576
546#else 577#else
547 578
548static inline void tick_nohz_switch_to_nohz(void) { } 579static inline void tick_nohz_switch_to_nohz(void) { }
@@ -550,6 +581,19 @@ static inline void tick_nohz_switch_to_nohz(void) { }
550#endif /* NO_HZ */ 581#endif /* NO_HZ */
551 582
552/* 583/*
584 * Called from irq_enter to notify about the possible interruption of idle()
585 */
586void tick_check_idle(int cpu)
587{
588 tick_check_oneshot_broadcast(cpu);
589#ifdef CONFIG_NO_HZ
590 tick_nohz_stop_idle(cpu);
591 tick_nohz_update_jiffies();
592 tick_nohz_kick_tick(cpu);
593#endif
594}
595
596/*
553 * High resolution timer specific code 597 * High resolution timer specific code
554 */ 598 */
555#ifdef CONFIG_HIGH_RES_TIMERS 599#ifdef CONFIG_HIGH_RES_TIMERS
@@ -602,10 +646,6 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
602 profile_tick(CPU_PROFILING); 646 profile_tick(CPU_PROFILING);
603 } 647 }
604 648
605 /* Do not restart, when we are in the idle loop */
606 if (ts->tick_stopped)
607 return HRTIMER_NORESTART;
608
609 hrtimer_forward(timer, now, tick_period); 649 hrtimer_forward(timer, now, tick_period);
610 650
611 return HRTIMER_RESTART; 651 return HRTIMER_RESTART;
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index e91c29f961c9..e7acfb482a68 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -58,27 +58,26 @@ struct clocksource *clock;
58 58
59#ifdef CONFIG_GENERIC_TIME 59#ifdef CONFIG_GENERIC_TIME
60/** 60/**
61 * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook 61 * clocksource_forward_now - update clock to the current time
62 * 62 *
63 * private function, must hold xtime_lock lock when being 63 * Forward the current clock to update its state since the last call to
64 * called. Returns the number of nanoseconds since the 64 * update_wall_time(). This is useful before significant clock changes,
65 * last call to update_wall_time() (adjusted by NTP scaling) 65 * as it avoids having to deal with this time offset explicitly.
66 */ 66 */
67static inline s64 __get_nsec_offset(void) 67static void clocksource_forward_now(void)
68{ 68{
69 cycle_t cycle_now, cycle_delta; 69 cycle_t cycle_now, cycle_delta;
70 s64 ns_offset; 70 s64 nsec;
71 71
72 /* read clocksource: */
73 cycle_now = clocksource_read(clock); 72 cycle_now = clocksource_read(clock);
74
75 /* calculate the delta since the last update_wall_time: */
76 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask; 73 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
74 clock->cycle_last = cycle_now;
77 75
78 /* convert to nanoseconds: */ 76 nsec = cyc2ns(clock, cycle_delta);
79 ns_offset = cyc2ns(clock, cycle_delta); 77 timespec_add_ns(&xtime, nsec);
80 78
81 return ns_offset; 79 nsec = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
80 clock->raw_time.tv_nsec += nsec;
82} 81}
83 82
84/** 83/**
@@ -89,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
89 */ 88 */
90void getnstimeofday(struct timespec *ts) 89void getnstimeofday(struct timespec *ts)
91{ 90{
91 cycle_t cycle_now, cycle_delta;
92 unsigned long seq; 92 unsigned long seq;
93 s64 nsecs; 93 s64 nsecs;
94 94
@@ -96,7 +96,15 @@ void getnstimeofday(struct timespec *ts)
96 seq = read_seqbegin(&xtime_lock); 96 seq = read_seqbegin(&xtime_lock);
97 97
98 *ts = xtime; 98 *ts = xtime;
99 nsecs = __get_nsec_offset(); 99
100 /* read clocksource: */
101 cycle_now = clocksource_read(clock);
102
103 /* calculate the delta since the last update_wall_time: */
104 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
105
106 /* convert to nanoseconds: */
107 nsecs = cyc2ns(clock, cycle_delta);
100 108
101 } while (read_seqretry(&xtime_lock, seq)); 109 } while (read_seqretry(&xtime_lock, seq));
102 110
@@ -129,22 +137,22 @@ EXPORT_SYMBOL(do_gettimeofday);
129 */ 137 */
130int do_settimeofday(struct timespec *tv) 138int do_settimeofday(struct timespec *tv)
131{ 139{
140 struct timespec ts_delta;
132 unsigned long flags; 141 unsigned long flags;
133 time_t wtm_sec, sec = tv->tv_sec;
134 long wtm_nsec, nsec = tv->tv_nsec;
135 142
136 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC) 143 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
137 return -EINVAL; 144 return -EINVAL;
138 145
139 write_seqlock_irqsave(&xtime_lock, flags); 146 write_seqlock_irqsave(&xtime_lock, flags);
140 147
141 nsec -= __get_nsec_offset(); 148 clocksource_forward_now();
149
150 ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
151 ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
152 wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
142 153
143 wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec); 154 xtime = *tv;
144 wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
145 155
146 set_normalized_timespec(&xtime, sec, nsec);
147 set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
148 update_xtime_cache(0); 156 update_xtime_cache(0);
149 157
150 clock->error = 0; 158 clock->error = 0;
@@ -170,22 +178,19 @@ EXPORT_SYMBOL(do_settimeofday);
170static void change_clocksource(void) 178static void change_clocksource(void)
171{ 179{
172 struct clocksource *new; 180 struct clocksource *new;
173 cycle_t now;
174 u64 nsec;
175 181
176 new = clocksource_get_next(); 182 new = clocksource_get_next();
177 183
178 if (clock == new) 184 if (clock == new)
179 return; 185 return;
180 186
181 new->cycle_last = 0; 187 clocksource_forward_now();
182 now = clocksource_read(new);
183 nsec = __get_nsec_offset();
184 timespec_add_ns(&xtime, nsec);
185 188
186 clock = new; 189 new->raw_time = clock->raw_time;
187 clock->cycle_last = now;
188 190
191 clock = new;
192 clock->cycle_last = 0;
193 clock->cycle_last = clocksource_read(new);
189 clock->error = 0; 194 clock->error = 0;
190 clock->xtime_nsec = 0; 195 clock->xtime_nsec = 0;
191 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH); 196 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
@@ -200,11 +205,44 @@ static void change_clocksource(void)
200 */ 205 */
201} 206}
202#else 207#else
208static inline void clocksource_forward_now(void) { }
203static inline void change_clocksource(void) { } 209static inline void change_clocksource(void) { }
204static inline s64 __get_nsec_offset(void) { return 0; }
205#endif 210#endif
206 211
207/** 212/**
213 * getrawmonotonic - Returns the raw monotonic time in a timespec
214 * @ts: pointer to the timespec to be set
215 *
216 * Returns the raw monotonic time (completely un-modified by ntp)
217 */
218void getrawmonotonic(struct timespec *ts)
219{
220 unsigned long seq;
221 s64 nsecs;
222 cycle_t cycle_now, cycle_delta;
223
224 do {
225 seq = read_seqbegin(&xtime_lock);
226
227 /* read clocksource: */
228 cycle_now = clocksource_read(clock);
229
230 /* calculate the delta since the last update_wall_time: */
231 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
232
233 /* convert to nanoseconds: */
234 nsecs = ((s64)cycle_delta * clock->mult_orig) >> clock->shift;
235
236 *ts = clock->raw_time;
237
238 } while (read_seqretry(&xtime_lock, seq));
239
240 timespec_add_ns(ts, nsecs);
241}
242EXPORT_SYMBOL(getrawmonotonic);
243
244
245/**
208 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres 246 * timekeeping_valid_for_hres - Check if timekeeping is suitable for hres
209 */ 247 */
210int timekeeping_valid_for_hres(void) 248int timekeeping_valid_for_hres(void)
@@ -265,8 +303,6 @@ void __init timekeeping_init(void)
265static int timekeeping_suspended; 303static int timekeeping_suspended;
266/* time in seconds when suspend began */ 304/* time in seconds when suspend began */
267static unsigned long timekeeping_suspend_time; 305static unsigned long timekeeping_suspend_time;
268/* xtime offset when we went into suspend */
269static s64 timekeeping_suspend_nsecs;
270 306
271/** 307/**
272 * timekeeping_resume - Resumes the generic timekeeping subsystem. 308 * timekeeping_resume - Resumes the generic timekeeping subsystem.
@@ -292,8 +328,6 @@ static int timekeeping_resume(struct sys_device *dev)
292 wall_to_monotonic.tv_sec -= sleep_length; 328 wall_to_monotonic.tv_sec -= sleep_length;
293 total_sleep_time += sleep_length; 329 total_sleep_time += sleep_length;
294 } 330 }
295 /* Make sure that we have the correct xtime reference */
296 timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
297 update_xtime_cache(0); 331 update_xtime_cache(0);
298 /* re-base the last cycle value */ 332 /* re-base the last cycle value */
299 clock->cycle_last = 0; 333 clock->cycle_last = 0;
@@ -319,8 +353,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
319 timekeeping_suspend_time = read_persistent_clock(); 353 timekeeping_suspend_time = read_persistent_clock();
320 354
321 write_seqlock_irqsave(&xtime_lock, flags); 355 write_seqlock_irqsave(&xtime_lock, flags);
322 /* Get the current xtime offset */ 356 clocksource_forward_now();
323 timekeeping_suspend_nsecs = __get_nsec_offset();
324 timekeeping_suspended = 1; 357 timekeeping_suspended = 1;
325 write_sequnlock_irqrestore(&xtime_lock, flags); 358 write_sequnlock_irqrestore(&xtime_lock, flags);
326 359
@@ -454,23 +487,29 @@ void update_wall_time(void)
454#else 487#else
455 offset = clock->cycle_interval; 488 offset = clock->cycle_interval;
456#endif 489#endif
457 clock->xtime_nsec += (s64)xtime.tv_nsec << clock->shift; 490 clock->xtime_nsec = (s64)xtime.tv_nsec << clock->shift;
458 491
459 /* normally this loop will run just once, however in the 492 /* normally this loop will run just once, however in the
460 * case of lost or late ticks, it will accumulate correctly. 493 * case of lost or late ticks, it will accumulate correctly.
461 */ 494 */
462 while (offset >= clock->cycle_interval) { 495 while (offset >= clock->cycle_interval) {
463 /* accumulate one interval */ 496 /* accumulate one interval */
464 clock->xtime_nsec += clock->xtime_interval;
465 clock->cycle_last += clock->cycle_interval;
466 offset -= clock->cycle_interval; 497 offset -= clock->cycle_interval;
498 clock->cycle_last += clock->cycle_interval;
467 499
500 clock->xtime_nsec += clock->xtime_interval;
468 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) { 501 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
469 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift; 502 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
470 xtime.tv_sec++; 503 xtime.tv_sec++;
471 second_overflow(); 504 second_overflow();
472 } 505 }
473 506
507 clock->raw_time.tv_nsec += clock->raw_interval;
508 if (clock->raw_time.tv_nsec >= NSEC_PER_SEC) {
509 clock->raw_time.tv_nsec -= NSEC_PER_SEC;
510 clock->raw_time.tv_sec++;
511 }
512
474 /* accumulate error between NTP and clock interval */ 513 /* accumulate error between NTP and clock interval */
475 clock->error += tick_length; 514 clock->error += tick_length;
476 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift); 515 clock->error -= clock->xtime_interval << (NTP_SCALE_SHIFT - clock->shift);
@@ -479,9 +518,12 @@ void update_wall_time(void)
479 /* correct the clock when NTP error is too big */ 518 /* correct the clock when NTP error is too big */
480 clocksource_adjust(offset); 519 clocksource_adjust(offset);
481 520
482 /* store full nanoseconds into xtime */ 521 /* store full nanoseconds into xtime after rounding it up and
483 xtime.tv_nsec = (s64)clock->xtime_nsec >> clock->shift; 522 * add the remainder to the error difference.
523 */
524 xtime.tv_nsec = ((s64)clock->xtime_nsec >> clock->shift) + 1;
484 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift; 525 clock->xtime_nsec -= (s64)xtime.tv_nsec << clock->shift;
526 clock->error += clock->xtime_nsec << (NTP_SCALE_SHIFT - clock->shift);
485 527
486 update_xtime_cache(cyc2ns(clock, offset)); 528 update_xtime_cache(cyc2ns(clock, offset));
487 529
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a40e20fd0001..f6426911e35a 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -47,13 +47,14 @@ static void print_name_offset(struct seq_file *m, void *sym)
47} 47}
48 48
49static void 49static void
50print_timer(struct seq_file *m, struct hrtimer *timer, int idx, u64 now) 50print_timer(struct seq_file *m, struct hrtimer *taddr, struct hrtimer *timer,
51 int idx, u64 now)
51{ 52{
52#ifdef CONFIG_TIMER_STATS 53#ifdef CONFIG_TIMER_STATS
53 char tmp[TASK_COMM_LEN + 1]; 54 char tmp[TASK_COMM_LEN + 1];
54#endif 55#endif
55 SEQ_printf(m, " #%d: ", idx); 56 SEQ_printf(m, " #%d: ", idx);
56 print_name_offset(m, timer); 57 print_name_offset(m, taddr);
57 SEQ_printf(m, ", "); 58 SEQ_printf(m, ", ");
58 print_name_offset(m, timer->function); 59 print_name_offset(m, timer->function);
59 SEQ_printf(m, ", S:%02lx", timer->state); 60 SEQ_printf(m, ", S:%02lx", timer->state);
@@ -99,7 +100,7 @@ next_one:
99 tmp = *timer; 100 tmp = *timer;
100 spin_unlock_irqrestore(&base->cpu_base->lock, flags); 101 spin_unlock_irqrestore(&base->cpu_base->lock, flags);
101 102
102 print_timer(m, &tmp, i, now); 103 print_timer(m, timer, &tmp, i, now);
103 next++; 104 next++;
104 goto next_one; 105 goto next_one;
105 } 106 }
@@ -109,6 +110,7 @@ next_one:
109static void 110static void
110print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) 111print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
111{ 112{
113 SEQ_printf(m, " .base: %p\n", base);
112 SEQ_printf(m, " .index: %d\n", 114 SEQ_printf(m, " .index: %d\n",
113 base->index); 115 base->index);
114 SEQ_printf(m, " .resolution: %Lu nsecs\n", 116 SEQ_printf(m, " .resolution: %Lu nsecs\n",
@@ -183,12 +185,16 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
183 185
184#ifdef CONFIG_GENERIC_CLOCKEVENTS 186#ifdef CONFIG_GENERIC_CLOCKEVENTS
185static void 187static void
186print_tickdevice(struct seq_file *m, struct tick_device *td) 188print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
187{ 189{
188 struct clock_event_device *dev = td->evtdev; 190 struct clock_event_device *dev = td->evtdev;
189 191
190 SEQ_printf(m, "\n"); 192 SEQ_printf(m, "\n");
191 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode); 193 SEQ_printf(m, "Tick Device: mode: %d\n", td->mode);
194 if (cpu < 0)
195 SEQ_printf(m, "Broadcast device\n");
196 else
197 SEQ_printf(m, "Per CPU device: %d\n", cpu);
192 198
193 SEQ_printf(m, "Clock Event Device: "); 199 SEQ_printf(m, "Clock Event Device: ");
194 if (!dev) { 200 if (!dev) {
@@ -222,7 +228,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
222 int cpu; 228 int cpu;
223 229
224#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST 230#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
225 print_tickdevice(m, tick_get_broadcast_device()); 231 print_tickdevice(m, tick_get_broadcast_device(), -1);
226 SEQ_printf(m, "tick_broadcast_mask: %08lx\n", 232 SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
227 tick_get_broadcast_mask()->bits[0]); 233 tick_get_broadcast_mask()->bits[0]);
228#ifdef CONFIG_TICK_ONESHOT 234#ifdef CONFIG_TICK_ONESHOT
@@ -232,7 +238,7 @@ static void timer_list_show_tickdevices(struct seq_file *m)
232 SEQ_printf(m, "\n"); 238 SEQ_printf(m, "\n");
233#endif 239#endif
234 for_each_online_cpu(cpu) 240 for_each_online_cpu(cpu)
235 print_tickdevice(m, tick_get_device(cpu)); 241 print_tickdevice(m, tick_get_device(cpu), cpu);
236 SEQ_printf(m, "\n"); 242 SEQ_printf(m, "\n");
237} 243}
238#else 244#else
@@ -244,7 +250,7 @@ static int timer_list_show(struct seq_file *m, void *v)
244 u64 now = ktime_to_ns(ktime_get()); 250 u64 now = ktime_to_ns(ktime_get());
245 int cpu; 251 int cpu;
246 252
247 SEQ_printf(m, "Timer List Version: v0.3\n"); 253 SEQ_printf(m, "Timer List Version: v0.4\n");
248 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES); 254 SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
249 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now); 255 SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
250 256
diff --git a/kernel/timer.c b/kernel/timer.c
index 03bc7f1f1593..56becf373c58 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -978,6 +978,7 @@ void update_process_times(int user_tick)
978 run_local_timers(); 978 run_local_timers();
979 if (rcu_pending(cpu)) 979 if (rcu_pending(cpu))
980 rcu_check_callbacks(cpu, user_tick); 980 rcu_check_callbacks(cpu, user_tick);
981 printk_tick();
981 scheduler_tick(); 982 scheduler_tick();
982 run_posix_cpu_timers(p); 983 run_posix_cpu_timers(p);
983} 984}
@@ -1435,9 +1436,11 @@ static void __cpuinit migrate_timers(int cpu)
1435 BUG_ON(cpu_online(cpu)); 1436 BUG_ON(cpu_online(cpu));
1436 old_base = per_cpu(tvec_bases, cpu); 1437 old_base = per_cpu(tvec_bases, cpu);
1437 new_base = get_cpu_var(tvec_bases); 1438 new_base = get_cpu_var(tvec_bases);
1438 1439 /*
1439 local_irq_disable(); 1440 * The caller is globally serialized and nobody else
1440 spin_lock(&new_base->lock); 1441 * takes two locks at once, deadlock is not possible.
1442 */
1443 spin_lock_irq(&new_base->lock);
1441 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING); 1444 spin_lock_nested(&old_base->lock, SINGLE_DEPTH_NESTING);
1442 1445
1443 BUG_ON(old_base->running_timer); 1446 BUG_ON(old_base->running_timer);
@@ -1452,8 +1455,7 @@ static void __cpuinit migrate_timers(int cpu)
1452 } 1455 }
1453 1456
1454 spin_unlock(&old_base->lock); 1457 spin_unlock(&old_base->lock);
1455 spin_unlock(&new_base->lock); 1458 spin_unlock_irq(&new_base->lock);
1456 local_irq_enable();
1457 put_cpu_var(tvec_bases); 1459 put_cpu_var(tvec_bases);
1458} 1460}
1459#endif /* CONFIG_HOTPLUG_CPU */ 1461#endif /* CONFIG_HOTPLUG_CPU */
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 263e9e6bbd60..1cb3e1f616af 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -1,23 +1,37 @@
1# 1#
2# Architectures that offer an FTRACE implementation should select HAVE_FTRACE: 2# Architectures that offer an FTRACE implementation should select HAVE_FTRACE:
3# 3#
4
5config NOP_TRACER
6 bool
7
4config HAVE_FTRACE 8config HAVE_FTRACE
5 bool 9 bool
10 select NOP_TRACER
6 11
7config HAVE_DYNAMIC_FTRACE 12config HAVE_DYNAMIC_FTRACE
8 bool 13 bool
9 14
15config HAVE_FTRACE_MCOUNT_RECORD
16 bool
17
10config TRACER_MAX_TRACE 18config TRACER_MAX_TRACE
11 bool 19 bool
12 20
21config RING_BUFFER
22 bool
23
13config TRACING 24config TRACING
14 bool 25 bool
15 select DEBUG_FS 26 select DEBUG_FS
27 select RING_BUFFER
16 select STACKTRACE 28 select STACKTRACE
29 select TRACEPOINTS
17 30
18config FTRACE 31config FTRACE
19 bool "Kernel Function Tracer" 32 bool "Kernel Function Tracer"
20 depends on HAVE_FTRACE 33 depends on HAVE_FTRACE
34 depends on DEBUG_KERNEL
21 select FRAME_POINTER 35 select FRAME_POINTER
22 select TRACING 36 select TRACING
23 select CONTEXT_SWITCH_TRACER 37 select CONTEXT_SWITCH_TRACER
@@ -36,6 +50,7 @@ config IRQSOFF_TRACER
36 depends on TRACE_IRQFLAGS_SUPPORT 50 depends on TRACE_IRQFLAGS_SUPPORT
37 depends on GENERIC_TIME 51 depends on GENERIC_TIME
38 depends on HAVE_FTRACE 52 depends on HAVE_FTRACE
53 depends on DEBUG_KERNEL
39 select TRACE_IRQFLAGS 54 select TRACE_IRQFLAGS
40 select TRACING 55 select TRACING
41 select TRACER_MAX_TRACE 56 select TRACER_MAX_TRACE
@@ -59,6 +74,7 @@ config PREEMPT_TRACER
59 depends on GENERIC_TIME 74 depends on GENERIC_TIME
60 depends on PREEMPT 75 depends on PREEMPT
61 depends on HAVE_FTRACE 76 depends on HAVE_FTRACE
77 depends on DEBUG_KERNEL
62 select TRACING 78 select TRACING
63 select TRACER_MAX_TRACE 79 select TRACER_MAX_TRACE
64 help 80 help
@@ -86,6 +102,7 @@ config SYSPROF_TRACER
86config SCHED_TRACER 102config SCHED_TRACER
87 bool "Scheduling Latency Tracer" 103 bool "Scheduling Latency Tracer"
88 depends on HAVE_FTRACE 104 depends on HAVE_FTRACE
105 depends on DEBUG_KERNEL
89 select TRACING 106 select TRACING
90 select CONTEXT_SWITCH_TRACER 107 select CONTEXT_SWITCH_TRACER
91 select TRACER_MAX_TRACE 108 select TRACER_MAX_TRACE
@@ -96,16 +113,56 @@ config SCHED_TRACER
96config CONTEXT_SWITCH_TRACER 113config CONTEXT_SWITCH_TRACER
97 bool "Trace process context switches" 114 bool "Trace process context switches"
98 depends on HAVE_FTRACE 115 depends on HAVE_FTRACE
116 depends on DEBUG_KERNEL
99 select TRACING 117 select TRACING
100 select MARKERS 118 select MARKERS
101 help 119 help
102 This tracer gets called from the context switch and records 120 This tracer gets called from the context switch and records
103 all switching of tasks. 121 all switching of tasks.
104 122
123config BOOT_TRACER
124 bool "Trace boot initcalls"
125 depends on HAVE_FTRACE
126 depends on DEBUG_KERNEL
127 select TRACING
128 help
129 This tracer helps developers to optimize boot times: it records
130 the timings of the initcalls and traces key events and the identity
131 of tasks that can cause boot delays, such as context-switches.
132
133 Its aim is to be parsed by the /scripts/bootgraph.pl tool to
134 produce pretty graphics about boot inefficiencies, giving a visual
135 representation of the delays during initcalls - but the raw
136 /debug/tracing/trace text output is readable too.
137
138 ( Note that tracing self tests can't be enabled if this tracer is
139 selected, because the self-tests are an initcall as well and that
140 would invalidate the boot trace. )
141
142config STACK_TRACER
143 bool "Trace max stack"
144 depends on HAVE_FTRACE
145 depends on DEBUG_KERNEL
146 select FTRACE
147 select STACKTRACE
148 help
149 This special tracer records the maximum stack footprint of the
150 kernel and displays it in debugfs/tracing/stack_trace.
151
152 This tracer works by hooking into every function call that the
153 kernel executes, and keeping a maximum stack depth value and
154 stack-trace saved. Because this logic has to execute in every
155 kernel function, all the time, this option can slow down the
156 kernel measurably and is generally intended for kernel
157 developers only.
158
159 Say N if unsure.
160
105config DYNAMIC_FTRACE 161config DYNAMIC_FTRACE
106 bool "enable/disable ftrace tracepoints dynamically" 162 bool "enable/disable ftrace tracepoints dynamically"
107 depends on FTRACE 163 depends on FTRACE
108 depends on HAVE_DYNAMIC_FTRACE 164 depends on HAVE_DYNAMIC_FTRACE
165 depends on DEBUG_KERNEL
109 default y 166 default y
110 help 167 help
111 This option will modify all the calls to ftrace dynamically 168 This option will modify all the calls to ftrace dynamically
@@ -121,12 +178,17 @@ config DYNAMIC_FTRACE
121 were made. If so, it runs stop_machine (stops all CPUS) 178 were made. If so, it runs stop_machine (stops all CPUS)
122 and modifies the code to jump over the call to ftrace. 179 and modifies the code to jump over the call to ftrace.
123 180
181config FTRACE_MCOUNT_RECORD
182 def_bool y
183 depends on DYNAMIC_FTRACE
184 depends on HAVE_FTRACE_MCOUNT_RECORD
185
124config FTRACE_SELFTEST 186config FTRACE_SELFTEST
125 bool 187 bool
126 188
127config FTRACE_STARTUP_TEST 189config FTRACE_STARTUP_TEST
128 bool "Perform a startup test on ftrace" 190 bool "Perform a startup test on ftrace"
129 depends on TRACING 191 depends on TRACING && DEBUG_KERNEL && !BOOT_TRACER
130 select FTRACE_SELFTEST 192 select FTRACE_SELFTEST
131 help 193 help
132 This option performs a series of startup tests on ftrace. On bootup 194 This option performs a series of startup tests on ftrace. On bootup
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 71d17de17288..a85dfba88ba0 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -11,6 +11,7 @@ obj-y += trace_selftest_dynamic.o
11endif 11endif
12 12
13obj-$(CONFIG_FTRACE) += libftrace.o 13obj-$(CONFIG_FTRACE) += libftrace.o
14obj-$(CONFIG_RING_BUFFER) += ring_buffer.o
14 15
15obj-$(CONFIG_TRACING) += trace.o 16obj-$(CONFIG_TRACING) += trace.o
16obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o 17obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
@@ -19,6 +20,9 @@ obj-$(CONFIG_FTRACE) += trace_functions.o
19obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o 20obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
20obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o 21obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
21obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o 22obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
23obj-$(CONFIG_NOP_TRACER) += trace_nop.o
24obj-$(CONFIG_STACK_TRACER) += trace_stack.o
22obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o 25obj-$(CONFIG_MMIOTRACE) += trace_mmiotrace.o
26obj-$(CONFIG_BOOT_TRACER) += trace_boot.o
23 27
24libftrace-y := ftrace.o 28libftrace-y := ftrace.o
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index f6e3af31b403..4dda4f60a2a9 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -81,7 +81,7 @@ void clear_ftrace_function(void)
81 81
82static int __register_ftrace_function(struct ftrace_ops *ops) 82static int __register_ftrace_function(struct ftrace_ops *ops)
83{ 83{
84 /* Should never be called by interrupts */ 84 /* should not be called from interrupt context */
85 spin_lock(&ftrace_lock); 85 spin_lock(&ftrace_lock);
86 86
87 ops->next = ftrace_list; 87 ops->next = ftrace_list;
@@ -115,6 +115,7 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
115 struct ftrace_ops **p; 115 struct ftrace_ops **p;
116 int ret = 0; 116 int ret = 0;
117 117
118 /* should not be called from interrupt context */
118 spin_lock(&ftrace_lock); 119 spin_lock(&ftrace_lock);
119 120
120 /* 121 /*
@@ -153,6 +154,30 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
153 154
154#ifdef CONFIG_DYNAMIC_FTRACE 155#ifdef CONFIG_DYNAMIC_FTRACE
155 156
157#ifndef CONFIG_FTRACE_MCOUNT_RECORD
158/*
159 * The hash lock is only needed when the recording of the mcount
160 * callers are dynamic. That is, by the caller themselves and
161 * not recorded via the compilation.
162 */
163static DEFINE_SPINLOCK(ftrace_hash_lock);
164#define ftrace_hash_lock(flags) spin_lock_irqsave(&ftrace_hash_lock, flags)
165#define ftrace_hash_unlock(flags) \
166 spin_unlock_irqrestore(&ftrace_hash_lock, flags)
167#else
168/* This is protected via the ftrace_lock with MCOUNT_RECORD. */
169#define ftrace_hash_lock(flags) do { (void)(flags); } while (0)
170#define ftrace_hash_unlock(flags) do { } while(0)
171#endif
172
173/*
174 * Since MCOUNT_ADDR may point to mcount itself, we do not want
175 * to get it confused by reading a reference in the code as we
176 * are parsing on objcopy output of text. Use a variable for
177 * it instead.
178 */
179static unsigned long mcount_addr = MCOUNT_ADDR;
180
156static struct task_struct *ftraced_task; 181static struct task_struct *ftraced_task;
157 182
158enum { 183enum {
@@ -171,7 +196,6 @@ static struct hlist_head ftrace_hash[FTRACE_HASHSIZE];
171 196
172static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu); 197static DEFINE_PER_CPU(int, ftrace_shutdown_disable_cpu);
173 198
174static DEFINE_SPINLOCK(ftrace_shutdown_lock);
175static DEFINE_MUTEX(ftraced_lock); 199static DEFINE_MUTEX(ftraced_lock);
176static DEFINE_MUTEX(ftrace_regex_lock); 200static DEFINE_MUTEX(ftrace_regex_lock);
177 201
@@ -294,13 +318,37 @@ static inline void ftrace_del_hash(struct dyn_ftrace *node)
294 318
295static void ftrace_free_rec(struct dyn_ftrace *rec) 319static void ftrace_free_rec(struct dyn_ftrace *rec)
296{ 320{
297 /* no locking, only called from kstop_machine */
298
299 rec->ip = (unsigned long)ftrace_free_records; 321 rec->ip = (unsigned long)ftrace_free_records;
300 ftrace_free_records = rec; 322 ftrace_free_records = rec;
301 rec->flags |= FTRACE_FL_FREE; 323 rec->flags |= FTRACE_FL_FREE;
302} 324}
303 325
326void ftrace_release(void *start, unsigned long size)
327{
328 struct dyn_ftrace *rec;
329 struct ftrace_page *pg;
330 unsigned long s = (unsigned long)start;
331 unsigned long e = s + size;
332 int i;
333
334 if (ftrace_disabled || !start)
335 return;
336
337 /* should not be called from interrupt context */
338 spin_lock(&ftrace_lock);
339
340 for (pg = ftrace_pages_start; pg; pg = pg->next) {
341 for (i = 0; i < pg->index; i++) {
342 rec = &pg->records[i];
343
344 if ((rec->ip >= s) && (rec->ip < e))
345 ftrace_free_rec(rec);
346 }
347 }
348 spin_unlock(&ftrace_lock);
349
350}
351
304static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip) 352static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
305{ 353{
306 struct dyn_ftrace *rec; 354 struct dyn_ftrace *rec;
@@ -338,7 +386,6 @@ ftrace_record_ip(unsigned long ip)
338 unsigned long flags; 386 unsigned long flags;
339 unsigned long key; 387 unsigned long key;
340 int resched; 388 int resched;
341 int atomic;
342 int cpu; 389 int cpu;
343 390
344 if (!ftrace_enabled || ftrace_disabled) 391 if (!ftrace_enabled || ftrace_disabled)
@@ -368,9 +415,7 @@ ftrace_record_ip(unsigned long ip)
368 if (ftrace_ip_in_hash(ip, key)) 415 if (ftrace_ip_in_hash(ip, key))
369 goto out; 416 goto out;
370 417
371 atomic = irqs_disabled(); 418 ftrace_hash_lock(flags);
372
373 spin_lock_irqsave(&ftrace_shutdown_lock, flags);
374 419
375 /* This ip may have hit the hash before the lock */ 420 /* This ip may have hit the hash before the lock */
376 if (ftrace_ip_in_hash(ip, key)) 421 if (ftrace_ip_in_hash(ip, key))
@@ -387,7 +432,7 @@ ftrace_record_ip(unsigned long ip)
387 ftraced_trigger = 1; 432 ftraced_trigger = 1;
388 433
389 out_unlock: 434 out_unlock:
390 spin_unlock_irqrestore(&ftrace_shutdown_lock, flags); 435 ftrace_hash_unlock(flags);
391 out: 436 out:
392 per_cpu(ftrace_shutdown_disable_cpu, cpu)--; 437 per_cpu(ftrace_shutdown_disable_cpu, cpu)--;
393 438
@@ -531,6 +576,16 @@ static void ftrace_shutdown_replenish(void)
531 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL); 576 ftrace_pages->next = (void *)get_zeroed_page(GFP_KERNEL);
532} 577}
533 578
579static void print_ip_ins(const char *fmt, unsigned char *p)
580{
581 int i;
582
583 printk(KERN_CONT "%s", fmt);
584
585 for (i = 0; i < MCOUNT_INSN_SIZE; i++)
586 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
587}
588
534static int 589static int
535ftrace_code_disable(struct dyn_ftrace *rec) 590ftrace_code_disable(struct dyn_ftrace *rec)
536{ 591{
@@ -541,10 +596,27 @@ ftrace_code_disable(struct dyn_ftrace *rec)
541 ip = rec->ip; 596 ip = rec->ip;
542 597
543 nop = ftrace_nop_replace(); 598 nop = ftrace_nop_replace();
544 call = ftrace_call_replace(ip, MCOUNT_ADDR); 599 call = ftrace_call_replace(ip, mcount_addr);
545 600
546 failed = ftrace_modify_code(ip, call, nop); 601 failed = ftrace_modify_code(ip, call, nop);
547 if (failed) { 602 if (failed) {
603 switch (failed) {
604 case 1:
605 WARN_ON_ONCE(1);
606 pr_info("ftrace faulted on modifying ");
607 print_ip_sym(ip);
608 break;
609 case 2:
610 WARN_ON_ONCE(1);
611 pr_info("ftrace failed to modify ");
612 print_ip_sym(ip);
613 print_ip_ins(" expected: ", call);
614 print_ip_ins(" actual: ", (unsigned char *)ip);
615 print_ip_ins(" replace: ", nop);
616 printk(KERN_CONT "\n");
617 break;
618 }
619
548 rec->flags |= FTRACE_FL_FAILED; 620 rec->flags |= FTRACE_FL_FAILED;
549 return 0; 621 return 0;
550 } 622 }
@@ -792,47 +864,7 @@ static int ftrace_update_code(void)
792 return 1; 864 return 1;
793} 865}
794 866
795static int ftraced(void *ignore) 867static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
796{
797 unsigned long usecs;
798
799 while (!kthread_should_stop()) {
800
801 set_current_state(TASK_INTERRUPTIBLE);
802
803 /* check once a second */
804 schedule_timeout(HZ);
805
806 if (unlikely(ftrace_disabled))
807 continue;
808
809 mutex_lock(&ftrace_sysctl_lock);
810 mutex_lock(&ftraced_lock);
811 if (!ftraced_suspend && !ftraced_stop &&
812 ftrace_update_code()) {
813 usecs = nsecs_to_usecs(ftrace_update_time);
814 if (ftrace_update_tot_cnt > 100000) {
815 ftrace_update_tot_cnt = 0;
816 pr_info("hm, dftrace overflow: %lu change%s"
817 " (%lu total) in %lu usec%s\n",
818 ftrace_update_cnt,
819 ftrace_update_cnt != 1 ? "s" : "",
820 ftrace_update_tot_cnt,
821 usecs, usecs != 1 ? "s" : "");
822 ftrace_disabled = 1;
823 WARN_ON_ONCE(1);
824 }
825 }
826 mutex_unlock(&ftraced_lock);
827 mutex_unlock(&ftrace_sysctl_lock);
828
829 ftrace_shutdown_replenish();
830 }
831 __set_current_state(TASK_RUNNING);
832 return 0;
833}
834
835static int __init ftrace_dyn_table_alloc(void)
836{ 868{
837 struct ftrace_page *pg; 869 struct ftrace_page *pg;
838 int cnt; 870 int cnt;
@@ -859,7 +891,9 @@ static int __init ftrace_dyn_table_alloc(void)
859 891
860 pg = ftrace_pages = ftrace_pages_start; 892 pg = ftrace_pages = ftrace_pages_start;
861 893
862 cnt = NR_TO_INIT / ENTRIES_PER_PAGE; 894 cnt = num_to_init / ENTRIES_PER_PAGE;
895 pr_info("ftrace: allocating %ld hash entries in %d pages\n",
896 num_to_init, cnt);
863 897
864 for (i = 0; i < cnt; i++) { 898 for (i = 0; i < cnt; i++) {
865 pg->next = (void *)get_zeroed_page(GFP_KERNEL); 899 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
@@ -901,6 +935,8 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
901 935
902 (*pos)++; 936 (*pos)++;
903 937
938 /* should not be called from interrupt context */
939 spin_lock(&ftrace_lock);
904 retry: 940 retry:
905 if (iter->idx >= iter->pg->index) { 941 if (iter->idx >= iter->pg->index) {
906 if (iter->pg->next) { 942 if (iter->pg->next) {
@@ -910,15 +946,13 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
910 } 946 }
911 } else { 947 } else {
912 rec = &iter->pg->records[iter->idx++]; 948 rec = &iter->pg->records[iter->idx++];
913 if ((!(iter->flags & FTRACE_ITER_FAILURES) && 949 if ((rec->flags & FTRACE_FL_FREE) ||
950
951 (!(iter->flags & FTRACE_ITER_FAILURES) &&
914 (rec->flags & FTRACE_FL_FAILED)) || 952 (rec->flags & FTRACE_FL_FAILED)) ||
915 953
916 ((iter->flags & FTRACE_ITER_FAILURES) && 954 ((iter->flags & FTRACE_ITER_FAILURES) &&
917 (!(rec->flags & FTRACE_FL_FAILED) || 955 !(rec->flags & FTRACE_FL_FAILED)) ||
918 (rec->flags & FTRACE_FL_FREE))) ||
919
920 ((iter->flags & FTRACE_ITER_FILTER) &&
921 !(rec->flags & FTRACE_FL_FILTER)) ||
922 956
923 ((iter->flags & FTRACE_ITER_NOTRACE) && 957 ((iter->flags & FTRACE_ITER_NOTRACE) &&
924 !(rec->flags & FTRACE_FL_NOTRACE))) { 958 !(rec->flags & FTRACE_FL_NOTRACE))) {
@@ -926,6 +960,7 @@ t_next(struct seq_file *m, void *v, loff_t *pos)
926 goto retry; 960 goto retry;
927 } 961 }
928 } 962 }
963 spin_unlock(&ftrace_lock);
929 964
930 iter->pos = *pos; 965 iter->pos = *pos;
931 966
@@ -1039,8 +1074,8 @@ static void ftrace_filter_reset(int enable)
1039 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE; 1074 unsigned long type = enable ? FTRACE_FL_FILTER : FTRACE_FL_NOTRACE;
1040 unsigned i; 1075 unsigned i;
1041 1076
1042 /* keep kstop machine from running */ 1077 /* should not be called from interrupt context */
1043 preempt_disable(); 1078 spin_lock(&ftrace_lock);
1044 if (enable) 1079 if (enable)
1045 ftrace_filtered = 0; 1080 ftrace_filtered = 0;
1046 pg = ftrace_pages_start; 1081 pg = ftrace_pages_start;
@@ -1053,7 +1088,7 @@ static void ftrace_filter_reset(int enable)
1053 } 1088 }
1054 pg = pg->next; 1089 pg = pg->next;
1055 } 1090 }
1056 preempt_enable(); 1091 spin_unlock(&ftrace_lock);
1057} 1092}
1058 1093
1059static int 1094static int
@@ -1165,8 +1200,8 @@ ftrace_match(unsigned char *buff, int len, int enable)
1165 } 1200 }
1166 } 1201 }
1167 1202
1168 /* keep kstop machine from running */ 1203 /* should not be called from interrupt context */
1169 preempt_disable(); 1204 spin_lock(&ftrace_lock);
1170 if (enable) 1205 if (enable)
1171 ftrace_filtered = 1; 1206 ftrace_filtered = 1;
1172 pg = ftrace_pages_start; 1207 pg = ftrace_pages_start;
@@ -1203,7 +1238,7 @@ ftrace_match(unsigned char *buff, int len, int enable)
1203 } 1238 }
1204 pg = pg->next; 1239 pg = pg->next;
1205 } 1240 }
1206 preempt_enable(); 1241 spin_unlock(&ftrace_lock);
1207} 1242}
1208 1243
1209static ssize_t 1244static ssize_t
@@ -1556,6 +1591,114 @@ static __init int ftrace_init_debugfs(void)
1556 1591
1557fs_initcall(ftrace_init_debugfs); 1592fs_initcall(ftrace_init_debugfs);
1558 1593
1594#ifdef CONFIG_FTRACE_MCOUNT_RECORD
1595static int ftrace_convert_nops(unsigned long *start,
1596 unsigned long *end)
1597{
1598 unsigned long *p;
1599 unsigned long addr;
1600 unsigned long flags;
1601
1602 p = start;
1603 while (p < end) {
1604 addr = ftrace_call_adjust(*p++);
1605 /* should not be called from interrupt context */
1606 spin_lock(&ftrace_lock);
1607 ftrace_record_ip(addr);
1608 spin_unlock(&ftrace_lock);
1609 ftrace_shutdown_replenish();
1610 }
1611
1612 /* p is ignored */
1613 local_irq_save(flags);
1614 __ftrace_update_code(p);
1615 local_irq_restore(flags);
1616
1617 return 0;
1618}
1619
1620void ftrace_init_module(unsigned long *start, unsigned long *end)
1621{
1622 if (ftrace_disabled || start == end)
1623 return;
1624 ftrace_convert_nops(start, end);
1625}
1626
1627extern unsigned long __start_mcount_loc[];
1628extern unsigned long __stop_mcount_loc[];
1629
1630void __init ftrace_init(void)
1631{
1632 unsigned long count, addr, flags;
1633 int ret;
1634
1635 /* Keep the ftrace pointer to the stub */
1636 addr = (unsigned long)ftrace_stub;
1637
1638 local_irq_save(flags);
1639 ftrace_dyn_arch_init(&addr);
1640 local_irq_restore(flags);
1641
1642 /* ftrace_dyn_arch_init places the return code in addr */
1643 if (addr)
1644 goto failed;
1645
1646 count = __stop_mcount_loc - __start_mcount_loc;
1647
1648 ret = ftrace_dyn_table_alloc(count);
1649 if (ret)
1650 goto failed;
1651
1652 last_ftrace_enabled = ftrace_enabled = 1;
1653
1654 ret = ftrace_convert_nops(__start_mcount_loc,
1655 __stop_mcount_loc);
1656
1657 return;
1658 failed:
1659 ftrace_disabled = 1;
1660}
1661#else /* CONFIG_FTRACE_MCOUNT_RECORD */
1662static int ftraced(void *ignore)
1663{
1664 unsigned long usecs;
1665
1666 while (!kthread_should_stop()) {
1667
1668 set_current_state(TASK_INTERRUPTIBLE);
1669
1670 /* check once a second */
1671 schedule_timeout(HZ);
1672
1673 if (unlikely(ftrace_disabled))
1674 continue;
1675
1676 mutex_lock(&ftrace_sysctl_lock);
1677 mutex_lock(&ftraced_lock);
1678 if (!ftraced_suspend && !ftraced_stop &&
1679 ftrace_update_code()) {
1680 usecs = nsecs_to_usecs(ftrace_update_time);
1681 if (ftrace_update_tot_cnt > 100000) {
1682 ftrace_update_tot_cnt = 0;
1683 pr_info("hm, dftrace overflow: %lu change%s"
1684 " (%lu total) in %lu usec%s\n",
1685 ftrace_update_cnt,
1686 ftrace_update_cnt != 1 ? "s" : "",
1687 ftrace_update_tot_cnt,
1688 usecs, usecs != 1 ? "s" : "");
1689 ftrace_disabled = 1;
1690 WARN_ON_ONCE(1);
1691 }
1692 }
1693 mutex_unlock(&ftraced_lock);
1694 mutex_unlock(&ftrace_sysctl_lock);
1695
1696 ftrace_shutdown_replenish();
1697 }
1698 __set_current_state(TASK_RUNNING);
1699 return 0;
1700}
1701
1559static int __init ftrace_dynamic_init(void) 1702static int __init ftrace_dynamic_init(void)
1560{ 1703{
1561 struct task_struct *p; 1704 struct task_struct *p;
@@ -1572,7 +1715,7 @@ static int __init ftrace_dynamic_init(void)
1572 goto failed; 1715 goto failed;
1573 } 1716 }
1574 1717
1575 ret = ftrace_dyn_table_alloc(); 1718 ret = ftrace_dyn_table_alloc(NR_TO_INIT);
1576 if (ret) 1719 if (ret)
1577 goto failed; 1720 goto failed;
1578 1721
@@ -1593,6 +1736,8 @@ static int __init ftrace_dynamic_init(void)
1593} 1736}
1594 1737
1595core_initcall(ftrace_dynamic_init); 1738core_initcall(ftrace_dynamic_init);
1739#endif /* CONFIG_FTRACE_MCOUNT_RECORD */
1740
1596#else 1741#else
1597# define ftrace_startup() do { } while (0) 1742# define ftrace_startup() do { } while (0)
1598# define ftrace_shutdown() do { } while (0) 1743# define ftrace_shutdown() do { } while (0)
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
new file mode 100644
index 000000000000..94af1fe56bb4
--- /dev/null
+++ b/kernel/trace/ring_buffer.c
@@ -0,0 +1,2014 @@
1/*
2 * Generic ring buffer
3 *
4 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
5 */
6#include <linux/ring_buffer.h>
7#include <linux/spinlock.h>
8#include <linux/debugfs.h>
9#include <linux/uaccess.h>
10#include <linux/module.h>
11#include <linux/percpu.h>
12#include <linux/mutex.h>
13#include <linux/sched.h> /* used for sched_clock() (for now) */
14#include <linux/init.h>
15#include <linux/hash.h>
16#include <linux/list.h>
17#include <linux/fs.h>
18
19/* Up this if you want to test the TIME_EXTENTS and normalization */
20#define DEBUG_SHIFT 0
21
22/* FIXME!!! */
23u64 ring_buffer_time_stamp(int cpu)
24{
25 /* shift to debug/test normalization and TIME_EXTENTS */
26 return sched_clock() << DEBUG_SHIFT;
27}
28
29void ring_buffer_normalize_time_stamp(int cpu, u64 *ts)
30{
31 /* Just stupid testing the normalize function and deltas */
32 *ts >>= DEBUG_SHIFT;
33}
34
35#define RB_EVNT_HDR_SIZE (sizeof(struct ring_buffer_event))
36#define RB_ALIGNMENT_SHIFT 2
37#define RB_ALIGNMENT (1 << RB_ALIGNMENT_SHIFT)
38#define RB_MAX_SMALL_DATA 28
39
40enum {
41 RB_LEN_TIME_EXTEND = 8,
42 RB_LEN_TIME_STAMP = 16,
43};
44
45/* inline for ring buffer fast paths */
46static inline unsigned
47rb_event_length(struct ring_buffer_event *event)
48{
49 unsigned length;
50
51 switch (event->type) {
52 case RINGBUF_TYPE_PADDING:
53 /* undefined */
54 return -1;
55
56 case RINGBUF_TYPE_TIME_EXTEND:
57 return RB_LEN_TIME_EXTEND;
58
59 case RINGBUF_TYPE_TIME_STAMP:
60 return RB_LEN_TIME_STAMP;
61
62 case RINGBUF_TYPE_DATA:
63 if (event->len)
64 length = event->len << RB_ALIGNMENT_SHIFT;
65 else
66 length = event->array[0];
67 return length + RB_EVNT_HDR_SIZE;
68 default:
69 BUG();
70 }
71 /* not hit */
72 return 0;
73}
74
75/**
76 * ring_buffer_event_length - return the length of the event
77 * @event: the event to get the length of
78 */
79unsigned ring_buffer_event_length(struct ring_buffer_event *event)
80{
81 return rb_event_length(event);
82}
83
84/* inline for ring buffer fast paths */
85static inline void *
86rb_event_data(struct ring_buffer_event *event)
87{
88 BUG_ON(event->type != RINGBUF_TYPE_DATA);
89 /* If length is in len field, then array[0] has the data */
90 if (event->len)
91 return (void *)&event->array[0];
92 /* Otherwise length is in array[0] and array[1] has the data */
93 return (void *)&event->array[1];
94}
95
96/**
97 * ring_buffer_event_data - return the data of the event
98 * @event: the event to get the data from
99 */
100void *ring_buffer_event_data(struct ring_buffer_event *event)
101{
102 return rb_event_data(event);
103}
104
105#define for_each_buffer_cpu(buffer, cpu) \
106 for_each_cpu_mask(cpu, buffer->cpumask)
107
108#define TS_SHIFT 27
109#define TS_MASK ((1ULL << TS_SHIFT) - 1)
110#define TS_DELTA_TEST (~TS_MASK)
111
112/*
113 * This hack stolen from mm/slob.c.
114 * We can store per page timing information in the page frame of the page.
115 * Thanks to Peter Zijlstra for suggesting this idea.
116 */
117struct buffer_page {
118 u64 time_stamp; /* page time stamp */
119 local_t write; /* index for next write */
120 local_t commit; /* write commited index */
121 unsigned read; /* index for next read */
122 struct list_head list; /* list of free pages */
123 void *page; /* Actual data page */
124};
125
126/*
127 * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing
128 * this issue out.
129 */
130static inline void free_buffer_page(struct buffer_page *bpage)
131{
132 if (bpage->page)
133 __free_page(bpage->page);
134 kfree(bpage);
135}
136
137/*
138 * We need to fit the time_stamp delta into 27 bits.
139 */
140static inline int test_time_stamp(u64 delta)
141{
142 if (delta & TS_DELTA_TEST)
143 return 1;
144 return 0;
145}
146
147#define BUF_PAGE_SIZE PAGE_SIZE
148
149/*
150 * head_page == tail_page && head == tail then buffer is empty.
151 */
152struct ring_buffer_per_cpu {
153 int cpu;
154 struct ring_buffer *buffer;
155 spinlock_t lock;
156 struct lock_class_key lock_key;
157 struct list_head pages;
158 struct buffer_page *head_page; /* read from head */
159 struct buffer_page *tail_page; /* write to tail */
160 struct buffer_page *commit_page; /* commited pages */
161 struct buffer_page *reader_page;
162 unsigned long overrun;
163 unsigned long entries;
164 u64 write_stamp;
165 u64 read_stamp;
166 atomic_t record_disabled;
167};
168
169struct ring_buffer {
170 unsigned long size;
171 unsigned pages;
172 unsigned flags;
173 int cpus;
174 cpumask_t cpumask;
175 atomic_t record_disabled;
176
177 struct mutex mutex;
178
179 struct ring_buffer_per_cpu **buffers;
180};
181
182struct ring_buffer_iter {
183 struct ring_buffer_per_cpu *cpu_buffer;
184 unsigned long head;
185 struct buffer_page *head_page;
186 u64 read_stamp;
187};
188
189#define RB_WARN_ON(buffer, cond) \
190 do { \
191 if (unlikely(cond)) { \
192 atomic_inc(&buffer->record_disabled); \
193 WARN_ON(1); \
194 } \
195 } while (0)
196
197#define RB_WARN_ON_RET(buffer, cond) \
198 do { \
199 if (unlikely(cond)) { \
200 atomic_inc(&buffer->record_disabled); \
201 WARN_ON(1); \
202 return -1; \
203 } \
204 } while (0)
205
206#define RB_WARN_ON_ONCE(buffer, cond) \
207 do { \
208 static int once; \
209 if (unlikely(cond) && !once) { \
210 once++; \
211 atomic_inc(&buffer->record_disabled); \
212 WARN_ON(1); \
213 } \
214 } while (0)
215
216/**
217 * check_pages - integrity check of buffer pages
218 * @cpu_buffer: CPU buffer with pages to test
219 *
220 * As a safty measure we check to make sure the data pages have not
221 * been corrupted.
222 */
223static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer)
224{
225 struct list_head *head = &cpu_buffer->pages;
226 struct buffer_page *page, *tmp;
227
228 RB_WARN_ON_RET(cpu_buffer, head->next->prev != head);
229 RB_WARN_ON_RET(cpu_buffer, head->prev->next != head);
230
231 list_for_each_entry_safe(page, tmp, head, list) {
232 RB_WARN_ON_RET(cpu_buffer,
233 page->list.next->prev != &page->list);
234 RB_WARN_ON_RET(cpu_buffer,
235 page->list.prev->next != &page->list);
236 }
237
238 return 0;
239}
240
241static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer,
242 unsigned nr_pages)
243{
244 struct list_head *head = &cpu_buffer->pages;
245 struct buffer_page *page, *tmp;
246 unsigned long addr;
247 LIST_HEAD(pages);
248 unsigned i;
249
250 for (i = 0; i < nr_pages; i++) {
251 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
252 GFP_KERNEL, cpu_to_node(cpu_buffer->cpu));
253 if (!page)
254 goto free_pages;
255 list_add(&page->list, &pages);
256
257 addr = __get_free_page(GFP_KERNEL);
258 if (!addr)
259 goto free_pages;
260 page->page = (void *)addr;
261 }
262
263 list_splice(&pages, head);
264
265 rb_check_pages(cpu_buffer);
266
267 return 0;
268
269 free_pages:
270 list_for_each_entry_safe(page, tmp, &pages, list) {
271 list_del_init(&page->list);
272 free_buffer_page(page);
273 }
274 return -ENOMEM;
275}
276
277static struct ring_buffer_per_cpu *
278rb_allocate_cpu_buffer(struct ring_buffer *buffer, int cpu)
279{
280 struct ring_buffer_per_cpu *cpu_buffer;
281 struct buffer_page *page;
282 unsigned long addr;
283 int ret;
284
285 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()),
286 GFP_KERNEL, cpu_to_node(cpu));
287 if (!cpu_buffer)
288 return NULL;
289
290 cpu_buffer->cpu = cpu;
291 cpu_buffer->buffer = buffer;
292 spin_lock_init(&cpu_buffer->lock);
293 INIT_LIST_HEAD(&cpu_buffer->pages);
294
295 page = kzalloc_node(ALIGN(sizeof(*page), cache_line_size()),
296 GFP_KERNEL, cpu_to_node(cpu));
297 if (!page)
298 goto fail_free_buffer;
299
300 cpu_buffer->reader_page = page;
301 addr = __get_free_page(GFP_KERNEL);
302 if (!addr)
303 goto fail_free_reader;
304 page->page = (void *)addr;
305
306 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
307
308 ret = rb_allocate_pages(cpu_buffer, buffer->pages);
309 if (ret < 0)
310 goto fail_free_reader;
311
312 cpu_buffer->head_page
313 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
314 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page;
315
316 return cpu_buffer;
317
318 fail_free_reader:
319 free_buffer_page(cpu_buffer->reader_page);
320
321 fail_free_buffer:
322 kfree(cpu_buffer);
323 return NULL;
324}
325
326static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer)
327{
328 struct list_head *head = &cpu_buffer->pages;
329 struct buffer_page *page, *tmp;
330
331 list_del_init(&cpu_buffer->reader_page->list);
332 free_buffer_page(cpu_buffer->reader_page);
333
334 list_for_each_entry_safe(page, tmp, head, list) {
335 list_del_init(&page->list);
336 free_buffer_page(page);
337 }
338 kfree(cpu_buffer);
339}
340
341/*
342 * Causes compile errors if the struct buffer_page gets bigger
343 * than the struct page.
344 */
345extern int ring_buffer_page_too_big(void);
346
347/**
348 * ring_buffer_alloc - allocate a new ring_buffer
349 * @size: the size in bytes that is needed.
350 * @flags: attributes to set for the ring buffer.
351 *
352 * Currently the only flag that is available is the RB_FL_OVERWRITE
353 * flag. This flag means that the buffer will overwrite old data
354 * when the buffer wraps. If this flag is not set, the buffer will
355 * drop data when the tail hits the head.
356 */
357struct ring_buffer *ring_buffer_alloc(unsigned long size, unsigned flags)
358{
359 struct ring_buffer *buffer;
360 int bsize;
361 int cpu;
362
363 /* Paranoid! Optimizes out when all is well */
364 if (sizeof(struct buffer_page) > sizeof(struct page))
365 ring_buffer_page_too_big();
366
367
368 /* keep it in its own cache line */
369 buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()),
370 GFP_KERNEL);
371 if (!buffer)
372 return NULL;
373
374 buffer->pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
375 buffer->flags = flags;
376
377 /* need at least two pages */
378 if (buffer->pages == 1)
379 buffer->pages++;
380
381 buffer->cpumask = cpu_possible_map;
382 buffer->cpus = nr_cpu_ids;
383
384 bsize = sizeof(void *) * nr_cpu_ids;
385 buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()),
386 GFP_KERNEL);
387 if (!buffer->buffers)
388 goto fail_free_buffer;
389
390 for_each_buffer_cpu(buffer, cpu) {
391 buffer->buffers[cpu] =
392 rb_allocate_cpu_buffer(buffer, cpu);
393 if (!buffer->buffers[cpu])
394 goto fail_free_buffers;
395 }
396
397 mutex_init(&buffer->mutex);
398
399 return buffer;
400
401 fail_free_buffers:
402 for_each_buffer_cpu(buffer, cpu) {
403 if (buffer->buffers[cpu])
404 rb_free_cpu_buffer(buffer->buffers[cpu]);
405 }
406 kfree(buffer->buffers);
407
408 fail_free_buffer:
409 kfree(buffer);
410 return NULL;
411}
412
413/**
414 * ring_buffer_free - free a ring buffer.
415 * @buffer: the buffer to free.
416 */
417void
418ring_buffer_free(struct ring_buffer *buffer)
419{
420 int cpu;
421
422 for_each_buffer_cpu(buffer, cpu)
423 rb_free_cpu_buffer(buffer->buffers[cpu]);
424
425 kfree(buffer);
426}
427
428static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
429
430static void
431rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
432{
433 struct buffer_page *page;
434 struct list_head *p;
435 unsigned i;
436
437 atomic_inc(&cpu_buffer->record_disabled);
438 synchronize_sched();
439
440 for (i = 0; i < nr_pages; i++) {
441 BUG_ON(list_empty(&cpu_buffer->pages));
442 p = cpu_buffer->pages.next;
443 page = list_entry(p, struct buffer_page, list);
444 list_del_init(&page->list);
445 free_buffer_page(page);
446 }
447 BUG_ON(list_empty(&cpu_buffer->pages));
448
449 rb_reset_cpu(cpu_buffer);
450
451 rb_check_pages(cpu_buffer);
452
453 atomic_dec(&cpu_buffer->record_disabled);
454
455}
456
457static void
458rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
459 struct list_head *pages, unsigned nr_pages)
460{
461 struct buffer_page *page;
462 struct list_head *p;
463 unsigned i;
464
465 atomic_inc(&cpu_buffer->record_disabled);
466 synchronize_sched();
467
468 for (i = 0; i < nr_pages; i++) {
469 BUG_ON(list_empty(pages));
470 p = pages->next;
471 page = list_entry(p, struct buffer_page, list);
472 list_del_init(&page->list);
473 list_add_tail(&page->list, &cpu_buffer->pages);
474 }
475 rb_reset_cpu(cpu_buffer);
476
477 rb_check_pages(cpu_buffer);
478
479 atomic_dec(&cpu_buffer->record_disabled);
480}
481
482/**
483 * ring_buffer_resize - resize the ring buffer
484 * @buffer: the buffer to resize.
485 * @size: the new size.
486 *
487 * The tracer is responsible for making sure that the buffer is
488 * not being used while changing the size.
489 * Note: We may be able to change the above requirement by using
490 * RCU synchronizations.
491 *
492 * Minimum size is 2 * BUF_PAGE_SIZE.
493 *
494 * Returns -1 on failure.
495 */
496int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size)
497{
498 struct ring_buffer_per_cpu *cpu_buffer;
499 unsigned nr_pages, rm_pages, new_pages;
500 struct buffer_page *page, *tmp;
501 unsigned long buffer_size;
502 unsigned long addr;
503 LIST_HEAD(pages);
504 int i, cpu;
505
506 size = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
507 size *= BUF_PAGE_SIZE;
508 buffer_size = buffer->pages * BUF_PAGE_SIZE;
509
510 /* we need a minimum of two pages */
511 if (size < BUF_PAGE_SIZE * 2)
512 size = BUF_PAGE_SIZE * 2;
513
514 if (size == buffer_size)
515 return size;
516
517 mutex_lock(&buffer->mutex);
518
519 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE);
520
521 if (size < buffer_size) {
522
523 /* easy case, just free pages */
524 BUG_ON(nr_pages >= buffer->pages);
525
526 rm_pages = buffer->pages - nr_pages;
527
528 for_each_buffer_cpu(buffer, cpu) {
529 cpu_buffer = buffer->buffers[cpu];
530 rb_remove_pages(cpu_buffer, rm_pages);
531 }
532 goto out;
533 }
534
535 /*
536 * This is a bit more difficult. We only want to add pages
537 * when we can allocate enough for all CPUs. We do this
538 * by allocating all the pages and storing them on a local
539 * link list. If we succeed in our allocation, then we
540 * add these pages to the cpu_buffers. Otherwise we just free
541 * them all and return -ENOMEM;
542 */
543 BUG_ON(nr_pages <= buffer->pages);
544 new_pages = nr_pages - buffer->pages;
545
546 for_each_buffer_cpu(buffer, cpu) {
547 for (i = 0; i < new_pages; i++) {
548 page = kzalloc_node(ALIGN(sizeof(*page),
549 cache_line_size()),
550 GFP_KERNEL, cpu_to_node(cpu));
551 if (!page)
552 goto free_pages;
553 list_add(&page->list, &pages);
554 addr = __get_free_page(GFP_KERNEL);
555 if (!addr)
556 goto free_pages;
557 page->page = (void *)addr;
558 }
559 }
560
561 for_each_buffer_cpu(buffer, cpu) {
562 cpu_buffer = buffer->buffers[cpu];
563 rb_insert_pages(cpu_buffer, &pages, new_pages);
564 }
565
566 BUG_ON(!list_empty(&pages));
567
568 out:
569 buffer->pages = nr_pages;
570 mutex_unlock(&buffer->mutex);
571
572 return size;
573
574 free_pages:
575 list_for_each_entry_safe(page, tmp, &pages, list) {
576 list_del_init(&page->list);
577 free_buffer_page(page);
578 }
579 return -ENOMEM;
580}
581
582static inline int rb_null_event(struct ring_buffer_event *event)
583{
584 return event->type == RINGBUF_TYPE_PADDING;
585}
586
587static inline void *__rb_page_index(struct buffer_page *page, unsigned index)
588{
589 return page->page + index;
590}
591
592static inline struct ring_buffer_event *
593rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer)
594{
595 return __rb_page_index(cpu_buffer->reader_page,
596 cpu_buffer->reader_page->read);
597}
598
599static inline struct ring_buffer_event *
600rb_head_event(struct ring_buffer_per_cpu *cpu_buffer)
601{
602 return __rb_page_index(cpu_buffer->head_page,
603 cpu_buffer->head_page->read);
604}
605
606static inline struct ring_buffer_event *
607rb_iter_head_event(struct ring_buffer_iter *iter)
608{
609 return __rb_page_index(iter->head_page, iter->head);
610}
611
612static inline unsigned rb_page_write(struct buffer_page *bpage)
613{
614 return local_read(&bpage->write);
615}
616
617static inline unsigned rb_page_commit(struct buffer_page *bpage)
618{
619 return local_read(&bpage->commit);
620}
621
622/* Size is determined by what has been commited */
623static inline unsigned rb_page_size(struct buffer_page *bpage)
624{
625 return rb_page_commit(bpage);
626}
627
628static inline unsigned
629rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer)
630{
631 return rb_page_commit(cpu_buffer->commit_page);
632}
633
634static inline unsigned rb_head_size(struct ring_buffer_per_cpu *cpu_buffer)
635{
636 return rb_page_commit(cpu_buffer->head_page);
637}
638
639/*
640 * When the tail hits the head and the buffer is in overwrite mode,
641 * the head jumps to the next page and all content on the previous
642 * page is discarded. But before doing so, we update the overrun
643 * variable of the buffer.
644 */
645static void rb_update_overflow(struct ring_buffer_per_cpu *cpu_buffer)
646{
647 struct ring_buffer_event *event;
648 unsigned long head;
649
650 for (head = 0; head < rb_head_size(cpu_buffer);
651 head += rb_event_length(event)) {
652
653 event = __rb_page_index(cpu_buffer->head_page, head);
654 BUG_ON(rb_null_event(event));
655 /* Only count data entries */
656 if (event->type != RINGBUF_TYPE_DATA)
657 continue;
658 cpu_buffer->overrun++;
659 cpu_buffer->entries--;
660 }
661}
662
663static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer,
664 struct buffer_page **page)
665{
666 struct list_head *p = (*page)->list.next;
667
668 if (p == &cpu_buffer->pages)
669 p = p->next;
670
671 *page = list_entry(p, struct buffer_page, list);
672}
673
674static inline unsigned
675rb_event_index(struct ring_buffer_event *event)
676{
677 unsigned long addr = (unsigned long)event;
678
679 return (addr & ~PAGE_MASK) - (PAGE_SIZE - BUF_PAGE_SIZE);
680}
681
682static inline int
683rb_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
684 struct ring_buffer_event *event)
685{
686 unsigned long addr = (unsigned long)event;
687 unsigned long index;
688
689 index = rb_event_index(event);
690 addr &= PAGE_MASK;
691
692 return cpu_buffer->commit_page->page == (void *)addr &&
693 rb_commit_index(cpu_buffer) == index;
694}
695
696static inline void
697rb_set_commit_event(struct ring_buffer_per_cpu *cpu_buffer,
698 struct ring_buffer_event *event)
699{
700 unsigned long addr = (unsigned long)event;
701 unsigned long index;
702
703 index = rb_event_index(event);
704 addr &= PAGE_MASK;
705
706 while (cpu_buffer->commit_page->page != (void *)addr) {
707 RB_WARN_ON(cpu_buffer,
708 cpu_buffer->commit_page == cpu_buffer->tail_page);
709 cpu_buffer->commit_page->commit =
710 cpu_buffer->commit_page->write;
711 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
712 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
713 }
714
715 /* Now set the commit to the event's index */
716 local_set(&cpu_buffer->commit_page->commit, index);
717}
718
719static inline void
720rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer)
721{
722 /*
723 * We only race with interrupts and NMIs on this CPU.
724 * If we own the commit event, then we can commit
725 * all others that interrupted us, since the interruptions
726 * are in stack format (they finish before they come
727 * back to us). This allows us to do a simple loop to
728 * assign the commit to the tail.
729 */
730 while (cpu_buffer->commit_page != cpu_buffer->tail_page) {
731 cpu_buffer->commit_page->commit =
732 cpu_buffer->commit_page->write;
733 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page);
734 cpu_buffer->write_stamp = cpu_buffer->commit_page->time_stamp;
735 /* add barrier to keep gcc from optimizing too much */
736 barrier();
737 }
738 while (rb_commit_index(cpu_buffer) !=
739 rb_page_write(cpu_buffer->commit_page)) {
740 cpu_buffer->commit_page->commit =
741 cpu_buffer->commit_page->write;
742 barrier();
743 }
744}
745
746static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
747{
748 cpu_buffer->read_stamp = cpu_buffer->reader_page->time_stamp;
749 cpu_buffer->reader_page->read = 0;
750}
751
752static inline void rb_inc_iter(struct ring_buffer_iter *iter)
753{
754 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
755
756 /*
757 * The iterator could be on the reader page (it starts there).
758 * But the head could have moved, since the reader was
759 * found. Check for this case and assign the iterator
760 * to the head page instead of next.
761 */
762 if (iter->head_page == cpu_buffer->reader_page)
763 iter->head_page = cpu_buffer->head_page;
764 else
765 rb_inc_page(cpu_buffer, &iter->head_page);
766
767 iter->read_stamp = iter->head_page->time_stamp;
768 iter->head = 0;
769}
770
771/**
772 * ring_buffer_update_event - update event type and data
773 * @event: the even to update
774 * @type: the type of event
775 * @length: the size of the event field in the ring buffer
776 *
777 * Update the type and data fields of the event. The length
778 * is the actual size that is written to the ring buffer,
779 * and with this, we can determine what to place into the
780 * data field.
781 */
782static inline void
783rb_update_event(struct ring_buffer_event *event,
784 unsigned type, unsigned length)
785{
786 event->type = type;
787
788 switch (type) {
789
790 case RINGBUF_TYPE_PADDING:
791 break;
792
793 case RINGBUF_TYPE_TIME_EXTEND:
794 event->len =
795 (RB_LEN_TIME_EXTEND + (RB_ALIGNMENT-1))
796 >> RB_ALIGNMENT_SHIFT;
797 break;
798
799 case RINGBUF_TYPE_TIME_STAMP:
800 event->len =
801 (RB_LEN_TIME_STAMP + (RB_ALIGNMENT-1))
802 >> RB_ALIGNMENT_SHIFT;
803 break;
804
805 case RINGBUF_TYPE_DATA:
806 length -= RB_EVNT_HDR_SIZE;
807 if (length > RB_MAX_SMALL_DATA) {
808 event->len = 0;
809 event->array[0] = length;
810 } else
811 event->len =
812 (length + (RB_ALIGNMENT-1))
813 >> RB_ALIGNMENT_SHIFT;
814 break;
815 default:
816 BUG();
817 }
818}
819
820static inline unsigned rb_calculate_event_length(unsigned length)
821{
822 struct ring_buffer_event event; /* Used only for sizeof array */
823
824 /* zero length can cause confusions */
825 if (!length)
826 length = 1;
827
828 if (length > RB_MAX_SMALL_DATA)
829 length += sizeof(event.array[0]);
830
831 length += RB_EVNT_HDR_SIZE;
832 length = ALIGN(length, RB_ALIGNMENT);
833
834 return length;
835}
836
837static struct ring_buffer_event *
838__rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
839 unsigned type, unsigned long length, u64 *ts)
840{
841 struct buffer_page *tail_page, *head_page, *reader_page;
842 unsigned long tail, write;
843 struct ring_buffer *buffer = cpu_buffer->buffer;
844 struct ring_buffer_event *event;
845 unsigned long flags;
846
847 tail_page = cpu_buffer->tail_page;
848 write = local_add_return(length, &tail_page->write);
849 tail = write - length;
850
851 /* See if we shot pass the end of this buffer page */
852 if (write > BUF_PAGE_SIZE) {
853 struct buffer_page *next_page = tail_page;
854
855 spin_lock_irqsave(&cpu_buffer->lock, flags);
856
857 rb_inc_page(cpu_buffer, &next_page);
858
859 head_page = cpu_buffer->head_page;
860 reader_page = cpu_buffer->reader_page;
861
862 /* we grabbed the lock before incrementing */
863 RB_WARN_ON(cpu_buffer, next_page == reader_page);
864
865 /*
866 * If for some reason, we had an interrupt storm that made
867 * it all the way around the buffer, bail, and warn
868 * about it.
869 */
870 if (unlikely(next_page == cpu_buffer->commit_page)) {
871 WARN_ON_ONCE(1);
872 goto out_unlock;
873 }
874
875 if (next_page == head_page) {
876 if (!(buffer->flags & RB_FL_OVERWRITE)) {
877 /* reset write */
878 if (tail <= BUF_PAGE_SIZE)
879 local_set(&tail_page->write, tail);
880 goto out_unlock;
881 }
882
883 /* tail_page has not moved yet? */
884 if (tail_page == cpu_buffer->tail_page) {
885 /* count overflows */
886 rb_update_overflow(cpu_buffer);
887
888 rb_inc_page(cpu_buffer, &head_page);
889 cpu_buffer->head_page = head_page;
890 cpu_buffer->head_page->read = 0;
891 }
892 }
893
894 /*
895 * If the tail page is still the same as what we think
896 * it is, then it is up to us to update the tail
897 * pointer.
898 */
899 if (tail_page == cpu_buffer->tail_page) {
900 local_set(&next_page->write, 0);
901 local_set(&next_page->commit, 0);
902 cpu_buffer->tail_page = next_page;
903
904 /* reread the time stamp */
905 *ts = ring_buffer_time_stamp(cpu_buffer->cpu);
906 cpu_buffer->tail_page->time_stamp = *ts;
907 }
908
909 /*
910 * The actual tail page has moved forward.
911 */
912 if (tail < BUF_PAGE_SIZE) {
913 /* Mark the rest of the page with padding */
914 event = __rb_page_index(tail_page, tail);
915 event->type = RINGBUF_TYPE_PADDING;
916 }
917
918 if (tail <= BUF_PAGE_SIZE)
919 /* Set the write back to the previous setting */
920 local_set(&tail_page->write, tail);
921
922 /*
923 * If this was a commit entry that failed,
924 * increment that too
925 */
926 if (tail_page == cpu_buffer->commit_page &&
927 tail == rb_commit_index(cpu_buffer)) {
928 rb_set_commit_to_write(cpu_buffer);
929 }
930
931 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
932
933 /* fail and let the caller try again */
934 return ERR_PTR(-EAGAIN);
935 }
936
937 /* We reserved something on the buffer */
938
939 BUG_ON(write > BUF_PAGE_SIZE);
940
941 event = __rb_page_index(tail_page, tail);
942 rb_update_event(event, type, length);
943
944 /*
945 * If this is a commit and the tail is zero, then update
946 * this page's time stamp.
947 */
948 if (!tail && rb_is_commit(cpu_buffer, event))
949 cpu_buffer->commit_page->time_stamp = *ts;
950
951 return event;
952
953 out_unlock:
954 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
955 return NULL;
956}
957
958static int
959rb_add_time_stamp(struct ring_buffer_per_cpu *cpu_buffer,
960 u64 *ts, u64 *delta)
961{
962 struct ring_buffer_event *event;
963 static int once;
964 int ret;
965
966 if (unlikely(*delta > (1ULL << 59) && !once++)) {
967 printk(KERN_WARNING "Delta way too big! %llu"
968 " ts=%llu write stamp = %llu\n",
969 *delta, *ts, cpu_buffer->write_stamp);
970 WARN_ON(1);
971 }
972
973 /*
974 * The delta is too big, we to add a
975 * new timestamp.
976 */
977 event = __rb_reserve_next(cpu_buffer,
978 RINGBUF_TYPE_TIME_EXTEND,
979 RB_LEN_TIME_EXTEND,
980 ts);
981 if (!event)
982 return -EBUSY;
983
984 if (PTR_ERR(event) == -EAGAIN)
985 return -EAGAIN;
986
987 /* Only a commited time event can update the write stamp */
988 if (rb_is_commit(cpu_buffer, event)) {
989 /*
990 * If this is the first on the page, then we need to
991 * update the page itself, and just put in a zero.
992 */
993 if (rb_event_index(event)) {
994 event->time_delta = *delta & TS_MASK;
995 event->array[0] = *delta >> TS_SHIFT;
996 } else {
997 cpu_buffer->commit_page->time_stamp = *ts;
998 event->time_delta = 0;
999 event->array[0] = 0;
1000 }
1001 cpu_buffer->write_stamp = *ts;
1002 /* let the caller know this was the commit */
1003 ret = 1;
1004 } else {
1005 /* Darn, this is just wasted space */
1006 event->time_delta = 0;
1007 event->array[0] = 0;
1008 ret = 0;
1009 }
1010
1011 *delta = 0;
1012
1013 return ret;
1014}
1015
1016static struct ring_buffer_event *
1017rb_reserve_next_event(struct ring_buffer_per_cpu *cpu_buffer,
1018 unsigned type, unsigned long length)
1019{
1020 struct ring_buffer_event *event;
1021 u64 ts, delta;
1022 int commit = 0;
1023
1024 again:
1025 ts = ring_buffer_time_stamp(cpu_buffer->cpu);
1026
1027 /*
1028 * Only the first commit can update the timestamp.
1029 * Yes there is a race here. If an interrupt comes in
1030 * just after the conditional and it traces too, then it
1031 * will also check the deltas. More than one timestamp may
1032 * also be made. But only the entry that did the actual
1033 * commit will be something other than zero.
1034 */
1035 if (cpu_buffer->tail_page == cpu_buffer->commit_page &&
1036 rb_page_write(cpu_buffer->tail_page) ==
1037 rb_commit_index(cpu_buffer)) {
1038
1039 delta = ts - cpu_buffer->write_stamp;
1040
1041 /* make sure this delta is calculated here */
1042 barrier();
1043
1044 /* Did the write stamp get updated already? */
1045 if (unlikely(ts < cpu_buffer->write_stamp))
1046 goto again;
1047
1048 if (test_time_stamp(delta)) {
1049
1050 commit = rb_add_time_stamp(cpu_buffer, &ts, &delta);
1051
1052 if (commit == -EBUSY)
1053 return NULL;
1054
1055 if (commit == -EAGAIN)
1056 goto again;
1057
1058 RB_WARN_ON(cpu_buffer, commit < 0);
1059 }
1060 } else
1061 /* Non commits have zero deltas */
1062 delta = 0;
1063
1064 event = __rb_reserve_next(cpu_buffer, type, length, &ts);
1065 if (PTR_ERR(event) == -EAGAIN)
1066 goto again;
1067
1068 if (!event) {
1069 if (unlikely(commit))
1070 /*
1071 * Ouch! We needed a timestamp and it was commited. But
1072 * we didn't get our event reserved.
1073 */
1074 rb_set_commit_to_write(cpu_buffer);
1075 return NULL;
1076 }
1077
1078 /*
1079 * If the timestamp was commited, make the commit our entry
1080 * now so that we will update it when needed.
1081 */
1082 if (commit)
1083 rb_set_commit_event(cpu_buffer, event);
1084 else if (!rb_is_commit(cpu_buffer, event))
1085 delta = 0;
1086
1087 event->time_delta = delta;
1088
1089 return event;
1090}
1091
1092static DEFINE_PER_CPU(int, rb_need_resched);
1093
1094/**
1095 * ring_buffer_lock_reserve - reserve a part of the buffer
1096 * @buffer: the ring buffer to reserve from
1097 * @length: the length of the data to reserve (excluding event header)
1098 * @flags: a pointer to save the interrupt flags
1099 *
1100 * Returns a reseverd event on the ring buffer to copy directly to.
1101 * The user of this interface will need to get the body to write into
1102 * and can use the ring_buffer_event_data() interface.
1103 *
1104 * The length is the length of the data needed, not the event length
1105 * which also includes the event header.
1106 *
1107 * Must be paired with ring_buffer_unlock_commit, unless NULL is returned.
1108 * If NULL is returned, then nothing has been allocated or locked.
1109 */
1110struct ring_buffer_event *
1111ring_buffer_lock_reserve(struct ring_buffer *buffer,
1112 unsigned long length,
1113 unsigned long *flags)
1114{
1115 struct ring_buffer_per_cpu *cpu_buffer;
1116 struct ring_buffer_event *event;
1117 int cpu, resched;
1118
1119 if (atomic_read(&buffer->record_disabled))
1120 return NULL;
1121
1122 /* If we are tracing schedule, we don't want to recurse */
1123 resched = need_resched();
1124 preempt_disable_notrace();
1125
1126 cpu = raw_smp_processor_id();
1127
1128 if (!cpu_isset(cpu, buffer->cpumask))
1129 goto out;
1130
1131 cpu_buffer = buffer->buffers[cpu];
1132
1133 if (atomic_read(&cpu_buffer->record_disabled))
1134 goto out;
1135
1136 length = rb_calculate_event_length(length);
1137 if (length > BUF_PAGE_SIZE)
1138 goto out;
1139
1140 event = rb_reserve_next_event(cpu_buffer, RINGBUF_TYPE_DATA, length);
1141 if (!event)
1142 goto out;
1143
1144 /*
1145 * Need to store resched state on this cpu.
1146 * Only the first needs to.
1147 */
1148
1149 if (preempt_count() == 1)
1150 per_cpu(rb_need_resched, cpu) = resched;
1151
1152 return event;
1153
1154 out:
1155 if (resched)
1156 preempt_enable_notrace();
1157 else
1158 preempt_enable_notrace();
1159 return NULL;
1160}
1161
1162static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer,
1163 struct ring_buffer_event *event)
1164{
1165 cpu_buffer->entries++;
1166
1167 /* Only process further if we own the commit */
1168 if (!rb_is_commit(cpu_buffer, event))
1169 return;
1170
1171 cpu_buffer->write_stamp += event->time_delta;
1172
1173 rb_set_commit_to_write(cpu_buffer);
1174}
1175
1176/**
1177 * ring_buffer_unlock_commit - commit a reserved
1178 * @buffer: The buffer to commit to
1179 * @event: The event pointer to commit.
1180 * @flags: the interrupt flags received from ring_buffer_lock_reserve.
1181 *
1182 * This commits the data to the ring buffer, and releases any locks held.
1183 *
1184 * Must be paired with ring_buffer_lock_reserve.
1185 */
1186int ring_buffer_unlock_commit(struct ring_buffer *buffer,
1187 struct ring_buffer_event *event,
1188 unsigned long flags)
1189{
1190 struct ring_buffer_per_cpu *cpu_buffer;
1191 int cpu = raw_smp_processor_id();
1192
1193 cpu_buffer = buffer->buffers[cpu];
1194
1195 rb_commit(cpu_buffer, event);
1196
1197 /*
1198 * Only the last preempt count needs to restore preemption.
1199 */
1200 if (preempt_count() == 1) {
1201 if (per_cpu(rb_need_resched, cpu))
1202 preempt_enable_no_resched_notrace();
1203 else
1204 preempt_enable_notrace();
1205 } else
1206 preempt_enable_no_resched_notrace();
1207
1208 return 0;
1209}
1210
1211/**
1212 * ring_buffer_write - write data to the buffer without reserving
1213 * @buffer: The ring buffer to write to.
1214 * @length: The length of the data being written (excluding the event header)
1215 * @data: The data to write to the buffer.
1216 *
1217 * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as
1218 * one function. If you already have the data to write to the buffer, it
1219 * may be easier to simply call this function.
1220 *
1221 * Note, like ring_buffer_lock_reserve, the length is the length of the data
1222 * and not the length of the event which would hold the header.
1223 */
1224int ring_buffer_write(struct ring_buffer *buffer,
1225 unsigned long length,
1226 void *data)
1227{
1228 struct ring_buffer_per_cpu *cpu_buffer;
1229 struct ring_buffer_event *event;
1230 unsigned long event_length;
1231 void *body;
1232 int ret = -EBUSY;
1233 int cpu, resched;
1234
1235 if (atomic_read(&buffer->record_disabled))
1236 return -EBUSY;
1237
1238 resched = need_resched();
1239 preempt_disable_notrace();
1240
1241 cpu = raw_smp_processor_id();
1242
1243 if (!cpu_isset(cpu, buffer->cpumask))
1244 goto out;
1245
1246 cpu_buffer = buffer->buffers[cpu];
1247
1248 if (atomic_read(&cpu_buffer->record_disabled))
1249 goto out;
1250
1251 event_length = rb_calculate_event_length(length);
1252 event = rb_reserve_next_event(cpu_buffer,
1253 RINGBUF_TYPE_DATA, event_length);
1254 if (!event)
1255 goto out;
1256
1257 body = rb_event_data(event);
1258
1259 memcpy(body, data, length);
1260
1261 rb_commit(cpu_buffer, event);
1262
1263 ret = 0;
1264 out:
1265 if (resched)
1266 preempt_enable_no_resched_notrace();
1267 else
1268 preempt_enable_notrace();
1269
1270 return ret;
1271}
1272
1273static inline int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer)
1274{
1275 struct buffer_page *reader = cpu_buffer->reader_page;
1276 struct buffer_page *head = cpu_buffer->head_page;
1277 struct buffer_page *commit = cpu_buffer->commit_page;
1278
1279 return reader->read == rb_page_commit(reader) &&
1280 (commit == reader ||
1281 (commit == head &&
1282 head->read == rb_page_commit(commit)));
1283}
1284
1285/**
1286 * ring_buffer_record_disable - stop all writes into the buffer
1287 * @buffer: The ring buffer to stop writes to.
1288 *
1289 * This prevents all writes to the buffer. Any attempt to write
1290 * to the buffer after this will fail and return NULL.
1291 *
1292 * The caller should call synchronize_sched() after this.
1293 */
1294void ring_buffer_record_disable(struct ring_buffer *buffer)
1295{
1296 atomic_inc(&buffer->record_disabled);
1297}
1298
1299/**
1300 * ring_buffer_record_enable - enable writes to the buffer
1301 * @buffer: The ring buffer to enable writes
1302 *
1303 * Note, multiple disables will need the same number of enables
1304 * to truely enable the writing (much like preempt_disable).
1305 */
1306void ring_buffer_record_enable(struct ring_buffer *buffer)
1307{
1308 atomic_dec(&buffer->record_disabled);
1309}
1310
1311/**
1312 * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer
1313 * @buffer: The ring buffer to stop writes to.
1314 * @cpu: The CPU buffer to stop
1315 *
1316 * This prevents all writes to the buffer. Any attempt to write
1317 * to the buffer after this will fail and return NULL.
1318 *
1319 * The caller should call synchronize_sched() after this.
1320 */
1321void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu)
1322{
1323 struct ring_buffer_per_cpu *cpu_buffer;
1324
1325 if (!cpu_isset(cpu, buffer->cpumask))
1326 return;
1327
1328 cpu_buffer = buffer->buffers[cpu];
1329 atomic_inc(&cpu_buffer->record_disabled);
1330}
1331
1332/**
1333 * ring_buffer_record_enable_cpu - enable writes to the buffer
1334 * @buffer: The ring buffer to enable writes
1335 * @cpu: The CPU to enable.
1336 *
1337 * Note, multiple disables will need the same number of enables
1338 * to truely enable the writing (much like preempt_disable).
1339 */
1340void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu)
1341{
1342 struct ring_buffer_per_cpu *cpu_buffer;
1343
1344 if (!cpu_isset(cpu, buffer->cpumask))
1345 return;
1346
1347 cpu_buffer = buffer->buffers[cpu];
1348 atomic_dec(&cpu_buffer->record_disabled);
1349}
1350
1351/**
1352 * ring_buffer_entries_cpu - get the number of entries in a cpu buffer
1353 * @buffer: The ring buffer
1354 * @cpu: The per CPU buffer to get the entries from.
1355 */
1356unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu)
1357{
1358 struct ring_buffer_per_cpu *cpu_buffer;
1359
1360 if (!cpu_isset(cpu, buffer->cpumask))
1361 return 0;
1362
1363 cpu_buffer = buffer->buffers[cpu];
1364 return cpu_buffer->entries;
1365}
1366
1367/**
1368 * ring_buffer_overrun_cpu - get the number of overruns in a cpu_buffer
1369 * @buffer: The ring buffer
1370 * @cpu: The per CPU buffer to get the number of overruns from
1371 */
1372unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
1373{
1374 struct ring_buffer_per_cpu *cpu_buffer;
1375
1376 if (!cpu_isset(cpu, buffer->cpumask))
1377 return 0;
1378
1379 cpu_buffer = buffer->buffers[cpu];
1380 return cpu_buffer->overrun;
1381}
1382
1383/**
1384 * ring_buffer_entries - get the number of entries in a buffer
1385 * @buffer: The ring buffer
1386 *
1387 * Returns the total number of entries in the ring buffer
1388 * (all CPU entries)
1389 */
1390unsigned long ring_buffer_entries(struct ring_buffer *buffer)
1391{
1392 struct ring_buffer_per_cpu *cpu_buffer;
1393 unsigned long entries = 0;
1394 int cpu;
1395
1396 /* if you care about this being correct, lock the buffer */
1397 for_each_buffer_cpu(buffer, cpu) {
1398 cpu_buffer = buffer->buffers[cpu];
1399 entries += cpu_buffer->entries;
1400 }
1401
1402 return entries;
1403}
1404
1405/**
1406 * ring_buffer_overrun_cpu - get the number of overruns in buffer
1407 * @buffer: The ring buffer
1408 *
1409 * Returns the total number of overruns in the ring buffer
1410 * (all CPU entries)
1411 */
1412unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
1413{
1414 struct ring_buffer_per_cpu *cpu_buffer;
1415 unsigned long overruns = 0;
1416 int cpu;
1417
1418 /* if you care about this being correct, lock the buffer */
1419 for_each_buffer_cpu(buffer, cpu) {
1420 cpu_buffer = buffer->buffers[cpu];
1421 overruns += cpu_buffer->overrun;
1422 }
1423
1424 return overruns;
1425}
1426
1427/**
1428 * ring_buffer_iter_reset - reset an iterator
1429 * @iter: The iterator to reset
1430 *
1431 * Resets the iterator, so that it will start from the beginning
1432 * again.
1433 */
1434void ring_buffer_iter_reset(struct ring_buffer_iter *iter)
1435{
1436 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1437
1438 /* Iterator usage is expected to have record disabled */
1439 if (list_empty(&cpu_buffer->reader_page->list)) {
1440 iter->head_page = cpu_buffer->head_page;
1441 iter->head = cpu_buffer->head_page->read;
1442 } else {
1443 iter->head_page = cpu_buffer->reader_page;
1444 iter->head = cpu_buffer->reader_page->read;
1445 }
1446 if (iter->head)
1447 iter->read_stamp = cpu_buffer->read_stamp;
1448 else
1449 iter->read_stamp = iter->head_page->time_stamp;
1450}
1451
1452/**
1453 * ring_buffer_iter_empty - check if an iterator has no more to read
1454 * @iter: The iterator to check
1455 */
1456int ring_buffer_iter_empty(struct ring_buffer_iter *iter)
1457{
1458 struct ring_buffer_per_cpu *cpu_buffer;
1459
1460 cpu_buffer = iter->cpu_buffer;
1461
1462 return iter->head_page == cpu_buffer->commit_page &&
1463 iter->head == rb_commit_index(cpu_buffer);
1464}
1465
1466static void
1467rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer,
1468 struct ring_buffer_event *event)
1469{
1470 u64 delta;
1471
1472 switch (event->type) {
1473 case RINGBUF_TYPE_PADDING:
1474 return;
1475
1476 case RINGBUF_TYPE_TIME_EXTEND:
1477 delta = event->array[0];
1478 delta <<= TS_SHIFT;
1479 delta += event->time_delta;
1480 cpu_buffer->read_stamp += delta;
1481 return;
1482
1483 case RINGBUF_TYPE_TIME_STAMP:
1484 /* FIXME: not implemented */
1485 return;
1486
1487 case RINGBUF_TYPE_DATA:
1488 cpu_buffer->read_stamp += event->time_delta;
1489 return;
1490
1491 default:
1492 BUG();
1493 }
1494 return;
1495}
1496
1497static void
1498rb_update_iter_read_stamp(struct ring_buffer_iter *iter,
1499 struct ring_buffer_event *event)
1500{
1501 u64 delta;
1502
1503 switch (event->type) {
1504 case RINGBUF_TYPE_PADDING:
1505 return;
1506
1507 case RINGBUF_TYPE_TIME_EXTEND:
1508 delta = event->array[0];
1509 delta <<= TS_SHIFT;
1510 delta += event->time_delta;
1511 iter->read_stamp += delta;
1512 return;
1513
1514 case RINGBUF_TYPE_TIME_STAMP:
1515 /* FIXME: not implemented */
1516 return;
1517
1518 case RINGBUF_TYPE_DATA:
1519 iter->read_stamp += event->time_delta;
1520 return;
1521
1522 default:
1523 BUG();
1524 }
1525 return;
1526}
1527
1528static struct buffer_page *
1529rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
1530{
1531 struct buffer_page *reader = NULL;
1532 unsigned long flags;
1533
1534 spin_lock_irqsave(&cpu_buffer->lock, flags);
1535
1536 again:
1537 reader = cpu_buffer->reader_page;
1538
1539 /* If there's more to read, return this page */
1540 if (cpu_buffer->reader_page->read < rb_page_size(reader))
1541 goto out;
1542
1543 /* Never should we have an index greater than the size */
1544 RB_WARN_ON(cpu_buffer,
1545 cpu_buffer->reader_page->read > rb_page_size(reader));
1546
1547 /* check if we caught up to the tail */
1548 reader = NULL;
1549 if (cpu_buffer->commit_page == cpu_buffer->reader_page)
1550 goto out;
1551
1552 /*
1553 * Splice the empty reader page into the list around the head.
1554 * Reset the reader page to size zero.
1555 */
1556
1557 reader = cpu_buffer->head_page;
1558 cpu_buffer->reader_page->list.next = reader->list.next;
1559 cpu_buffer->reader_page->list.prev = reader->list.prev;
1560
1561 local_set(&cpu_buffer->reader_page->write, 0);
1562 local_set(&cpu_buffer->reader_page->commit, 0);
1563
1564 /* Make the reader page now replace the head */
1565 reader->list.prev->next = &cpu_buffer->reader_page->list;
1566 reader->list.next->prev = &cpu_buffer->reader_page->list;
1567
1568 /*
1569 * If the tail is on the reader, then we must set the head
1570 * to the inserted page, otherwise we set it one before.
1571 */
1572 cpu_buffer->head_page = cpu_buffer->reader_page;
1573
1574 if (cpu_buffer->commit_page != reader)
1575 rb_inc_page(cpu_buffer, &cpu_buffer->head_page);
1576
1577 /* Finally update the reader page to the new head */
1578 cpu_buffer->reader_page = reader;
1579 rb_reset_reader_page(cpu_buffer);
1580
1581 goto again;
1582
1583 out:
1584 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1585
1586 return reader;
1587}
1588
1589static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer)
1590{
1591 struct ring_buffer_event *event;
1592 struct buffer_page *reader;
1593 unsigned length;
1594
1595 reader = rb_get_reader_page(cpu_buffer);
1596
1597 /* This function should not be called when buffer is empty */
1598 BUG_ON(!reader);
1599
1600 event = rb_reader_event(cpu_buffer);
1601
1602 if (event->type == RINGBUF_TYPE_DATA)
1603 cpu_buffer->entries--;
1604
1605 rb_update_read_stamp(cpu_buffer, event);
1606
1607 length = rb_event_length(event);
1608 cpu_buffer->reader_page->read += length;
1609}
1610
1611static void rb_advance_iter(struct ring_buffer_iter *iter)
1612{
1613 struct ring_buffer *buffer;
1614 struct ring_buffer_per_cpu *cpu_buffer;
1615 struct ring_buffer_event *event;
1616 unsigned length;
1617
1618 cpu_buffer = iter->cpu_buffer;
1619 buffer = cpu_buffer->buffer;
1620
1621 /*
1622 * Check if we are at the end of the buffer.
1623 */
1624 if (iter->head >= rb_page_size(iter->head_page)) {
1625 BUG_ON(iter->head_page == cpu_buffer->commit_page);
1626 rb_inc_iter(iter);
1627 return;
1628 }
1629
1630 event = rb_iter_head_event(iter);
1631
1632 length = rb_event_length(event);
1633
1634 /*
1635 * This should not be called to advance the header if we are
1636 * at the tail of the buffer.
1637 */
1638 BUG_ON((iter->head_page == cpu_buffer->commit_page) &&
1639 (iter->head + length > rb_commit_index(cpu_buffer)));
1640
1641 rb_update_iter_read_stamp(iter, event);
1642
1643 iter->head += length;
1644
1645 /* check for end of page padding */
1646 if ((iter->head >= rb_page_size(iter->head_page)) &&
1647 (iter->head_page != cpu_buffer->commit_page))
1648 rb_advance_iter(iter);
1649}
1650
1651/**
1652 * ring_buffer_peek - peek at the next event to be read
1653 * @buffer: The ring buffer to read
1654 * @cpu: The cpu to peak at
1655 * @ts: The timestamp counter of this event.
1656 *
1657 * This will return the event that will be read next, but does
1658 * not consume the data.
1659 */
1660struct ring_buffer_event *
1661ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
1662{
1663 struct ring_buffer_per_cpu *cpu_buffer;
1664 struct ring_buffer_event *event;
1665 struct buffer_page *reader;
1666
1667 if (!cpu_isset(cpu, buffer->cpumask))
1668 return NULL;
1669
1670 cpu_buffer = buffer->buffers[cpu];
1671
1672 again:
1673 reader = rb_get_reader_page(cpu_buffer);
1674 if (!reader)
1675 return NULL;
1676
1677 event = rb_reader_event(cpu_buffer);
1678
1679 switch (event->type) {
1680 case RINGBUF_TYPE_PADDING:
1681 RB_WARN_ON(cpu_buffer, 1);
1682 rb_advance_reader(cpu_buffer);
1683 return NULL;
1684
1685 case RINGBUF_TYPE_TIME_EXTEND:
1686 /* Internal data, OK to advance */
1687 rb_advance_reader(cpu_buffer);
1688 goto again;
1689
1690 case RINGBUF_TYPE_TIME_STAMP:
1691 /* FIXME: not implemented */
1692 rb_advance_reader(cpu_buffer);
1693 goto again;
1694
1695 case RINGBUF_TYPE_DATA:
1696 if (ts) {
1697 *ts = cpu_buffer->read_stamp + event->time_delta;
1698 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1699 }
1700 return event;
1701
1702 default:
1703 BUG();
1704 }
1705
1706 return NULL;
1707}
1708
1709/**
1710 * ring_buffer_iter_peek - peek at the next event to be read
1711 * @iter: The ring buffer iterator
1712 * @ts: The timestamp counter of this event.
1713 *
1714 * This will return the event that will be read next, but does
1715 * not increment the iterator.
1716 */
1717struct ring_buffer_event *
1718ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts)
1719{
1720 struct ring_buffer *buffer;
1721 struct ring_buffer_per_cpu *cpu_buffer;
1722 struct ring_buffer_event *event;
1723
1724 if (ring_buffer_iter_empty(iter))
1725 return NULL;
1726
1727 cpu_buffer = iter->cpu_buffer;
1728 buffer = cpu_buffer->buffer;
1729
1730 again:
1731 if (rb_per_cpu_empty(cpu_buffer))
1732 return NULL;
1733
1734 event = rb_iter_head_event(iter);
1735
1736 switch (event->type) {
1737 case RINGBUF_TYPE_PADDING:
1738 rb_inc_iter(iter);
1739 goto again;
1740
1741 case RINGBUF_TYPE_TIME_EXTEND:
1742 /* Internal data, OK to advance */
1743 rb_advance_iter(iter);
1744 goto again;
1745
1746 case RINGBUF_TYPE_TIME_STAMP:
1747 /* FIXME: not implemented */
1748 rb_advance_iter(iter);
1749 goto again;
1750
1751 case RINGBUF_TYPE_DATA:
1752 if (ts) {
1753 *ts = iter->read_stamp + event->time_delta;
1754 ring_buffer_normalize_time_stamp(cpu_buffer->cpu, ts);
1755 }
1756 return event;
1757
1758 default:
1759 BUG();
1760 }
1761
1762 return NULL;
1763}
1764
1765/**
1766 * ring_buffer_consume - return an event and consume it
1767 * @buffer: The ring buffer to get the next event from
1768 *
1769 * Returns the next event in the ring buffer, and that event is consumed.
1770 * Meaning, that sequential reads will keep returning a different event,
1771 * and eventually empty the ring buffer if the producer is slower.
1772 */
1773struct ring_buffer_event *
1774ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
1775{
1776 struct ring_buffer_per_cpu *cpu_buffer;
1777 struct ring_buffer_event *event;
1778
1779 if (!cpu_isset(cpu, buffer->cpumask))
1780 return NULL;
1781
1782 event = ring_buffer_peek(buffer, cpu, ts);
1783 if (!event)
1784 return NULL;
1785
1786 cpu_buffer = buffer->buffers[cpu];
1787 rb_advance_reader(cpu_buffer);
1788
1789 return event;
1790}
1791
1792/**
1793 * ring_buffer_read_start - start a non consuming read of the buffer
1794 * @buffer: The ring buffer to read from
1795 * @cpu: The cpu buffer to iterate over
1796 *
1797 * This starts up an iteration through the buffer. It also disables
1798 * the recording to the buffer until the reading is finished.
1799 * This prevents the reading from being corrupted. This is not
1800 * a consuming read, so a producer is not expected.
1801 *
1802 * Must be paired with ring_buffer_finish.
1803 */
1804struct ring_buffer_iter *
1805ring_buffer_read_start(struct ring_buffer *buffer, int cpu)
1806{
1807 struct ring_buffer_per_cpu *cpu_buffer;
1808 struct ring_buffer_iter *iter;
1809 unsigned long flags;
1810
1811 if (!cpu_isset(cpu, buffer->cpumask))
1812 return NULL;
1813
1814 iter = kmalloc(sizeof(*iter), GFP_KERNEL);
1815 if (!iter)
1816 return NULL;
1817
1818 cpu_buffer = buffer->buffers[cpu];
1819
1820 iter->cpu_buffer = cpu_buffer;
1821
1822 atomic_inc(&cpu_buffer->record_disabled);
1823 synchronize_sched();
1824
1825 spin_lock_irqsave(&cpu_buffer->lock, flags);
1826 ring_buffer_iter_reset(iter);
1827 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1828
1829 return iter;
1830}
1831
1832/**
1833 * ring_buffer_finish - finish reading the iterator of the buffer
1834 * @iter: The iterator retrieved by ring_buffer_start
1835 *
1836 * This re-enables the recording to the buffer, and frees the
1837 * iterator.
1838 */
1839void
1840ring_buffer_read_finish(struct ring_buffer_iter *iter)
1841{
1842 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer;
1843
1844 atomic_dec(&cpu_buffer->record_disabled);
1845 kfree(iter);
1846}
1847
1848/**
1849 * ring_buffer_read - read the next item in the ring buffer by the iterator
1850 * @iter: The ring buffer iterator
1851 * @ts: The time stamp of the event read.
1852 *
1853 * This reads the next event in the ring buffer and increments the iterator.
1854 */
1855struct ring_buffer_event *
1856ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts)
1857{
1858 struct ring_buffer_event *event;
1859
1860 event = ring_buffer_iter_peek(iter, ts);
1861 if (!event)
1862 return NULL;
1863
1864 rb_advance_iter(iter);
1865
1866 return event;
1867}
1868
1869/**
1870 * ring_buffer_size - return the size of the ring buffer (in bytes)
1871 * @buffer: The ring buffer.
1872 */
1873unsigned long ring_buffer_size(struct ring_buffer *buffer)
1874{
1875 return BUF_PAGE_SIZE * buffer->pages;
1876}
1877
1878static void
1879rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
1880{
1881 cpu_buffer->head_page
1882 = list_entry(cpu_buffer->pages.next, struct buffer_page, list);
1883 local_set(&cpu_buffer->head_page->write, 0);
1884 local_set(&cpu_buffer->head_page->commit, 0);
1885
1886 cpu_buffer->head_page->read = 0;
1887
1888 cpu_buffer->tail_page = cpu_buffer->head_page;
1889 cpu_buffer->commit_page = cpu_buffer->head_page;
1890
1891 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
1892 local_set(&cpu_buffer->reader_page->write, 0);
1893 local_set(&cpu_buffer->reader_page->commit, 0);
1894 cpu_buffer->reader_page->read = 0;
1895
1896 cpu_buffer->overrun = 0;
1897 cpu_buffer->entries = 0;
1898}
1899
1900/**
1901 * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer
1902 * @buffer: The ring buffer to reset a per cpu buffer of
1903 * @cpu: The CPU buffer to be reset
1904 */
1905void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu)
1906{
1907 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu];
1908 unsigned long flags;
1909
1910 if (!cpu_isset(cpu, buffer->cpumask))
1911 return;
1912
1913 spin_lock_irqsave(&cpu_buffer->lock, flags);
1914
1915 rb_reset_cpu(cpu_buffer);
1916
1917 spin_unlock_irqrestore(&cpu_buffer->lock, flags);
1918}
1919
1920/**
1921 * ring_buffer_reset - reset a ring buffer
1922 * @buffer: The ring buffer to reset all cpu buffers
1923 */
1924void ring_buffer_reset(struct ring_buffer *buffer)
1925{
1926 int cpu;
1927
1928 for_each_buffer_cpu(buffer, cpu)
1929 ring_buffer_reset_cpu(buffer, cpu);
1930}
1931
1932/**
1933 * rind_buffer_empty - is the ring buffer empty?
1934 * @buffer: The ring buffer to test
1935 */
1936int ring_buffer_empty(struct ring_buffer *buffer)
1937{
1938 struct ring_buffer_per_cpu *cpu_buffer;
1939 int cpu;
1940
1941 /* yes this is racy, but if you don't like the race, lock the buffer */
1942 for_each_buffer_cpu(buffer, cpu) {
1943 cpu_buffer = buffer->buffers[cpu];
1944 if (!rb_per_cpu_empty(cpu_buffer))
1945 return 0;
1946 }
1947 return 1;
1948}
1949
1950/**
1951 * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty?
1952 * @buffer: The ring buffer
1953 * @cpu: The CPU buffer to test
1954 */
1955int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu)
1956{
1957 struct ring_buffer_per_cpu *cpu_buffer;
1958
1959 if (!cpu_isset(cpu, buffer->cpumask))
1960 return 1;
1961
1962 cpu_buffer = buffer->buffers[cpu];
1963 return rb_per_cpu_empty(cpu_buffer);
1964}
1965
1966/**
1967 * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers
1968 * @buffer_a: One buffer to swap with
1969 * @buffer_b: The other buffer to swap with
1970 *
1971 * This function is useful for tracers that want to take a "snapshot"
1972 * of a CPU buffer and has another back up buffer lying around.
1973 * it is expected that the tracer handles the cpu buffer not being
1974 * used at the moment.
1975 */
1976int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
1977 struct ring_buffer *buffer_b, int cpu)
1978{
1979 struct ring_buffer_per_cpu *cpu_buffer_a;
1980 struct ring_buffer_per_cpu *cpu_buffer_b;
1981
1982 if (!cpu_isset(cpu, buffer_a->cpumask) ||
1983 !cpu_isset(cpu, buffer_b->cpumask))
1984 return -EINVAL;
1985
1986 /* At least make sure the two buffers are somewhat the same */
1987 if (buffer_a->size != buffer_b->size ||
1988 buffer_a->pages != buffer_b->pages)
1989 return -EINVAL;
1990
1991 cpu_buffer_a = buffer_a->buffers[cpu];
1992 cpu_buffer_b = buffer_b->buffers[cpu];
1993
1994 /*
1995 * We can't do a synchronize_sched here because this
1996 * function can be called in atomic context.
1997 * Normally this will be called from the same CPU as cpu.
1998 * If not it's up to the caller to protect this.
1999 */
2000 atomic_inc(&cpu_buffer_a->record_disabled);
2001 atomic_inc(&cpu_buffer_b->record_disabled);
2002
2003 buffer_a->buffers[cpu] = cpu_buffer_b;
2004 buffer_b->buffers[cpu] = cpu_buffer_a;
2005
2006 cpu_buffer_b->buffer = buffer_a;
2007 cpu_buffer_a->buffer = buffer_b;
2008
2009 atomic_dec(&cpu_buffer_a->record_disabled);
2010 atomic_dec(&cpu_buffer_b->record_disabled);
2011
2012 return 0;
2013}
2014
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8f3fb3db61c3..d345d649d073 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -14,6 +14,7 @@
14#include <linux/utsrelease.h> 14#include <linux/utsrelease.h>
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/seq_file.h> 16#include <linux/seq_file.h>
17#include <linux/notifier.h>
17#include <linux/debugfs.h> 18#include <linux/debugfs.h>
18#include <linux/pagemap.h> 19#include <linux/pagemap.h>
19#include <linux/hardirq.h> 20#include <linux/hardirq.h>
@@ -22,6 +23,7 @@
22#include <linux/ftrace.h> 23#include <linux/ftrace.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/percpu.h> 25#include <linux/percpu.h>
26#include <linux/kdebug.h>
25#include <linux/ctype.h> 27#include <linux/ctype.h>
26#include <linux/init.h> 28#include <linux/init.h>
27#include <linux/poll.h> 29#include <linux/poll.h>
@@ -31,25 +33,36 @@
31#include <linux/writeback.h> 33#include <linux/writeback.h>
32 34
33#include <linux/stacktrace.h> 35#include <linux/stacktrace.h>
36#include <linux/ring_buffer.h>
34 37
35#include "trace.h" 38#include "trace.h"
36 39
40#define TRACE_BUFFER_FLAGS (RB_FL_OVERWRITE)
41
37unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX; 42unsigned long __read_mostly tracing_max_latency = (cycle_t)ULONG_MAX;
38unsigned long __read_mostly tracing_thresh; 43unsigned long __read_mostly tracing_thresh;
39 44
40static unsigned long __read_mostly tracing_nr_buffers; 45static DEFINE_PER_CPU(local_t, ftrace_cpu_disabled);
46
47static inline void ftrace_disable_cpu(void)
48{
49 preempt_disable();
50 local_inc(&__get_cpu_var(ftrace_cpu_disabled));
51}
52
53static inline void ftrace_enable_cpu(void)
54{
55 local_dec(&__get_cpu_var(ftrace_cpu_disabled));
56 preempt_enable();
57}
58
41static cpumask_t __read_mostly tracing_buffer_mask; 59static cpumask_t __read_mostly tracing_buffer_mask;
42 60
43#define for_each_tracing_cpu(cpu) \ 61#define for_each_tracing_cpu(cpu) \
44 for_each_cpu_mask(cpu, tracing_buffer_mask) 62 for_each_cpu_mask(cpu, tracing_buffer_mask)
45 63
46static int trace_alloc_page(void);
47static int trace_free_page(void);
48
49static int tracing_disabled = 1; 64static int tracing_disabled = 1;
50 65
51static unsigned long tracing_pages_allocated;
52
53long 66long
54ns2usecs(cycle_t nsec) 67ns2usecs(cycle_t nsec)
55{ 68{
@@ -60,7 +73,9 @@ ns2usecs(cycle_t nsec)
60 73
61cycle_t ftrace_now(int cpu) 74cycle_t ftrace_now(int cpu)
62{ 75{
63 return cpu_clock(cpu); 76 u64 ts = ring_buffer_time_stamp(cpu);
77 ring_buffer_normalize_time_stamp(cpu, &ts);
78 return ts;
64} 79}
65 80
66/* 81/*
@@ -100,11 +115,18 @@ static int tracer_enabled = 1;
100int ftrace_function_enabled; 115int ftrace_function_enabled;
101 116
102/* 117/*
103 * trace_nr_entries is the number of entries that is allocated 118 * trace_buf_size is the size in bytes that is allocated
104 * for a buffer. Note, the number of entries is always rounded 119 * for a buffer. Note, the number of bytes is always rounded
105 * to ENTRIES_PER_PAGE. 120 * to page size.
121 *
122 * This number is purposely set to a low number of 16384.
123 * If the dump on oops happens, it will be much appreciated
124 * to not have to wait for all that output. Anyway this can be
125 * boot time and run time configurable.
106 */ 126 */
107static unsigned long trace_nr_entries = 65536UL; 127#define TRACE_BUF_SIZE_DEFAULT 1441792UL /* 16384 * 88 (sizeof(entry)) */
128
129static unsigned long trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
108 130
109/* trace_types holds a link list of available tracers. */ 131/* trace_types holds a link list of available tracers. */
110static struct tracer *trace_types __read_mostly; 132static struct tracer *trace_types __read_mostly;
@@ -133,24 +155,6 @@ static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
133/* trace_flags holds iter_ctrl options */ 155/* trace_flags holds iter_ctrl options */
134unsigned long trace_flags = TRACE_ITER_PRINT_PARENT; 156unsigned long trace_flags = TRACE_ITER_PRINT_PARENT;
135 157
136static notrace void no_trace_init(struct trace_array *tr)
137{
138 int cpu;
139
140 ftrace_function_enabled = 0;
141 if(tr->ctrl)
142 for_each_online_cpu(cpu)
143 tracing_reset(tr->data[cpu]);
144 tracer_enabled = 0;
145}
146
147/* dummy trace to disable tracing */
148static struct tracer no_tracer __read_mostly = {
149 .name = "none",
150 .init = no_trace_init
151};
152
153
154/** 158/**
155 * trace_wake_up - wake up tasks waiting for trace input 159 * trace_wake_up - wake up tasks waiting for trace input
156 * 160 *
@@ -167,23 +171,21 @@ void trace_wake_up(void)
167 wake_up(&trace_wait); 171 wake_up(&trace_wait);
168} 172}
169 173
170#define ENTRIES_PER_PAGE (PAGE_SIZE / sizeof(struct trace_entry)) 174static int __init set_buf_size(char *str)
171
172static int __init set_nr_entries(char *str)
173{ 175{
174 unsigned long nr_entries; 176 unsigned long buf_size;
175 int ret; 177 int ret;
176 178
177 if (!str) 179 if (!str)
178 return 0; 180 return 0;
179 ret = strict_strtoul(str, 0, &nr_entries); 181 ret = strict_strtoul(str, 0, &buf_size);
180 /* nr_entries can not be zero */ 182 /* nr_entries can not be zero */
181 if (ret < 0 || nr_entries == 0) 183 if (ret < 0 || buf_size == 0)
182 return 0; 184 return 0;
183 trace_nr_entries = nr_entries; 185 trace_buf_size = buf_size;
184 return 1; 186 return 1;
185} 187}
186__setup("trace_entries=", set_nr_entries); 188__setup("trace_buf_size=", set_buf_size);
187 189
188unsigned long nsecs_to_usecs(unsigned long nsecs) 190unsigned long nsecs_to_usecs(unsigned long nsecs)
189{ 191{
@@ -191,21 +193,6 @@ unsigned long nsecs_to_usecs(unsigned long nsecs)
191} 193}
192 194
193/* 195/*
194 * trace_flag_type is an enumeration that holds different
195 * states when a trace occurs. These are:
196 * IRQS_OFF - interrupts were disabled
197 * NEED_RESCED - reschedule is requested
198 * HARDIRQ - inside an interrupt handler
199 * SOFTIRQ - inside a softirq handler
200 */
201enum trace_flag_type {
202 TRACE_FLAG_IRQS_OFF = 0x01,
203 TRACE_FLAG_NEED_RESCHED = 0x02,
204 TRACE_FLAG_HARDIRQ = 0x04,
205 TRACE_FLAG_SOFTIRQ = 0x08,
206};
207
208/*
209 * TRACE_ITER_SYM_MASK masks the options in trace_flags that 196 * TRACE_ITER_SYM_MASK masks the options in trace_flags that
210 * control the output of kernel symbols. 197 * control the output of kernel symbols.
211 */ 198 */
@@ -224,6 +211,7 @@ static const char *trace_options[] = {
224 "block", 211 "block",
225 "stacktrace", 212 "stacktrace",
226 "sched-tree", 213 "sched-tree",
214 "ftrace_printk",
227 NULL 215 NULL
228}; 216};
229 217
@@ -266,54 +254,6 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
266 tracing_record_cmdline(current); 254 tracing_record_cmdline(current);
267} 255}
268 256
269#define CHECK_COND(cond) \
270 if (unlikely(cond)) { \
271 tracing_disabled = 1; \
272 WARN_ON(1); \
273 return -1; \
274 }
275
276/**
277 * check_pages - integrity check of trace buffers
278 *
279 * As a safty measure we check to make sure the data pages have not
280 * been corrupted.
281 */
282int check_pages(struct trace_array_cpu *data)
283{
284 struct page *page, *tmp;
285
286 CHECK_COND(data->trace_pages.next->prev != &data->trace_pages);
287 CHECK_COND(data->trace_pages.prev->next != &data->trace_pages);
288
289 list_for_each_entry_safe(page, tmp, &data->trace_pages, lru) {
290 CHECK_COND(page->lru.next->prev != &page->lru);
291 CHECK_COND(page->lru.prev->next != &page->lru);
292 }
293
294 return 0;
295}
296
297/**
298 * head_page - page address of the first page in per_cpu buffer.
299 *
300 * head_page returns the page address of the first page in
301 * a per_cpu buffer. This also preforms various consistency
302 * checks to make sure the buffer has not been corrupted.
303 */
304void *head_page(struct trace_array_cpu *data)
305{
306 struct page *page;
307
308 if (list_empty(&data->trace_pages))
309 return NULL;
310
311 page = list_entry(data->trace_pages.next, struct page, lru);
312 BUG_ON(&page->lru == &data->trace_pages);
313
314 return page_address(page);
315}
316
317/** 257/**
318 * trace_seq_printf - sequence printing of trace information 258 * trace_seq_printf - sequence printing of trace information
319 * @s: trace sequence descriptor 259 * @s: trace sequence descriptor
@@ -395,28 +335,23 @@ trace_seq_putmem(struct trace_seq *s, void *mem, size_t len)
395 return len; 335 return len;
396} 336}
397 337
398#define HEX_CHARS 17 338#define MAX_MEMHEX_BYTES 8
399static const char hex2asc[] = "0123456789abcdef"; 339#define HEX_CHARS (MAX_MEMHEX_BYTES*2 + 1)
400 340
401static int 341static int
402trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len) 342trace_seq_putmem_hex(struct trace_seq *s, void *mem, size_t len)
403{ 343{
404 unsigned char hex[HEX_CHARS]; 344 unsigned char hex[HEX_CHARS];
405 unsigned char *data = mem; 345 unsigned char *data = mem;
406 unsigned char byte;
407 int i, j; 346 int i, j;
408 347
409 BUG_ON(len >= HEX_CHARS);
410
411#ifdef __BIG_ENDIAN 348#ifdef __BIG_ENDIAN
412 for (i = 0, j = 0; i < len; i++) { 349 for (i = 0, j = 0; i < len; i++) {
413#else 350#else
414 for (i = len-1, j = 0; i >= 0; i--) { 351 for (i = len-1, j = 0; i >= 0; i--) {
415#endif 352#endif
416 byte = data[i]; 353 hex[j++] = hex_asc_hi(data[i]);
417 354 hex[j++] = hex_asc_lo(data[i]);
418 hex[j++] = hex2asc[byte & 0x0f];
419 hex[j++] = hex2asc[byte >> 4];
420 } 355 }
421 hex[j++] = ' '; 356 hex[j++] = ' ';
422 357
@@ -460,34 +395,6 @@ trace_print_seq(struct seq_file *m, struct trace_seq *s)
460 trace_seq_reset(s); 395 trace_seq_reset(s);
461} 396}
462 397
463/*
464 * flip the trace buffers between two trace descriptors.
465 * This usually is the buffers between the global_trace and
466 * the max_tr to record a snapshot of a current trace.
467 *
468 * The ftrace_max_lock must be held.
469 */
470static void
471flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
472{
473 struct list_head flip_pages;
474
475 INIT_LIST_HEAD(&flip_pages);
476
477 memcpy(&tr1->trace_head_idx, &tr2->trace_head_idx,
478 sizeof(struct trace_array_cpu) -
479 offsetof(struct trace_array_cpu, trace_head_idx));
480
481 check_pages(tr1);
482 check_pages(tr2);
483 list_splice_init(&tr1->trace_pages, &flip_pages);
484 list_splice_init(&tr2->trace_pages, &tr1->trace_pages);
485 list_splice_init(&flip_pages, &tr2->trace_pages);
486 BUG_ON(!list_empty(&flip_pages));
487 check_pages(tr1);
488 check_pages(tr2);
489}
490
491/** 398/**
492 * update_max_tr - snapshot all trace buffers from global_trace to max_tr 399 * update_max_tr - snapshot all trace buffers from global_trace to max_tr
493 * @tr: tracer 400 * @tr: tracer
@@ -500,17 +407,17 @@ flip_trace(struct trace_array_cpu *tr1, struct trace_array_cpu *tr2)
500void 407void
501update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) 408update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
502{ 409{
503 struct trace_array_cpu *data; 410 struct ring_buffer *buf = tr->buffer;
504 int i;
505 411
506 WARN_ON_ONCE(!irqs_disabled()); 412 WARN_ON_ONCE(!irqs_disabled());
507 __raw_spin_lock(&ftrace_max_lock); 413 __raw_spin_lock(&ftrace_max_lock);
508 /* clear out all the previous traces */ 414
509 for_each_tracing_cpu(i) { 415 tr->buffer = max_tr.buffer;
510 data = tr->data[i]; 416 max_tr.buffer = buf;
511 flip_trace(max_tr.data[i], data); 417
512 tracing_reset(data); 418 ftrace_disable_cpu();
513 } 419 ring_buffer_reset(tr->buffer);
420 ftrace_enable_cpu();
514 421
515 __update_max_tr(tr, tsk, cpu); 422 __update_max_tr(tr, tsk, cpu);
516 __raw_spin_unlock(&ftrace_max_lock); 423 __raw_spin_unlock(&ftrace_max_lock);
@@ -527,16 +434,19 @@ update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
527void 434void
528update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu) 435update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
529{ 436{
530 struct trace_array_cpu *data = tr->data[cpu]; 437 int ret;
531 int i;
532 438
533 WARN_ON_ONCE(!irqs_disabled()); 439 WARN_ON_ONCE(!irqs_disabled());
534 __raw_spin_lock(&ftrace_max_lock); 440 __raw_spin_lock(&ftrace_max_lock);
535 for_each_tracing_cpu(i)
536 tracing_reset(max_tr.data[i]);
537 441
538 flip_trace(max_tr.data[cpu], data); 442 ftrace_disable_cpu();
539 tracing_reset(data); 443
444 ring_buffer_reset(max_tr.buffer);
445 ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
446
447 ftrace_enable_cpu();
448
449 WARN_ON_ONCE(ret);
540 450
541 __update_max_tr(tr, tsk, cpu); 451 __update_max_tr(tr, tsk, cpu);
542 __raw_spin_unlock(&ftrace_max_lock); 452 __raw_spin_unlock(&ftrace_max_lock);
@@ -573,7 +483,6 @@ int register_tracer(struct tracer *type)
573#ifdef CONFIG_FTRACE_STARTUP_TEST 483#ifdef CONFIG_FTRACE_STARTUP_TEST
574 if (type->selftest) { 484 if (type->selftest) {
575 struct tracer *saved_tracer = current_trace; 485 struct tracer *saved_tracer = current_trace;
576 struct trace_array_cpu *data;
577 struct trace_array *tr = &global_trace; 486 struct trace_array *tr = &global_trace;
578 int saved_ctrl = tr->ctrl; 487 int saved_ctrl = tr->ctrl;
579 int i; 488 int i;
@@ -585,10 +494,7 @@ int register_tracer(struct tracer *type)
585 * If we fail, we do not register this tracer. 494 * If we fail, we do not register this tracer.
586 */ 495 */
587 for_each_tracing_cpu(i) { 496 for_each_tracing_cpu(i) {
588 data = tr->data[i]; 497 tracing_reset(tr, i);
589 if (!head_page(data))
590 continue;
591 tracing_reset(data);
592 } 498 }
593 current_trace = type; 499 current_trace = type;
594 tr->ctrl = 0; 500 tr->ctrl = 0;
@@ -604,10 +510,7 @@ int register_tracer(struct tracer *type)
604 } 510 }
605 /* Only reset on passing, to avoid touching corrupted buffers */ 511 /* Only reset on passing, to avoid touching corrupted buffers */
606 for_each_tracing_cpu(i) { 512 for_each_tracing_cpu(i) {
607 data = tr->data[i]; 513 tracing_reset(tr, i);
608 if (!head_page(data))
609 continue;
610 tracing_reset(data);
611 } 514 }
612 printk(KERN_CONT "PASSED\n"); 515 printk(KERN_CONT "PASSED\n");
613 } 516 }
@@ -653,13 +556,11 @@ void unregister_tracer(struct tracer *type)
653 mutex_unlock(&trace_types_lock); 556 mutex_unlock(&trace_types_lock);
654} 557}
655 558
656void tracing_reset(struct trace_array_cpu *data) 559void tracing_reset(struct trace_array *tr, int cpu)
657{ 560{
658 data->trace_idx = 0; 561 ftrace_disable_cpu();
659 data->overrun = 0; 562 ring_buffer_reset_cpu(tr->buffer, cpu);
660 data->trace_head = data->trace_tail = head_page(data); 563 ftrace_enable_cpu();
661 data->trace_head_idx = 0;
662 data->trace_tail_idx = 0;
663} 564}
664 565
665#define SAVED_CMDLINES 128 566#define SAVED_CMDLINES 128
@@ -745,82 +646,16 @@ void tracing_record_cmdline(struct task_struct *tsk)
745 trace_save_cmdline(tsk); 646 trace_save_cmdline(tsk);
746} 647}
747 648
748static inline struct list_head * 649void
749trace_next_list(struct trace_array_cpu *data, struct list_head *next) 650tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
750{ 651 int pc)
751 /*
752 * Roundrobin - but skip the head (which is not a real page):
753 */
754 next = next->next;
755 if (unlikely(next == &data->trace_pages))
756 next = next->next;
757 BUG_ON(next == &data->trace_pages);
758
759 return next;
760}
761
762static inline void *
763trace_next_page(struct trace_array_cpu *data, void *addr)
764{
765 struct list_head *next;
766 struct page *page;
767
768 page = virt_to_page(addr);
769
770 next = trace_next_list(data, &page->lru);
771 page = list_entry(next, struct page, lru);
772
773 return page_address(page);
774}
775
776static inline struct trace_entry *
777tracing_get_trace_entry(struct trace_array *tr, struct trace_array_cpu *data)
778{
779 unsigned long idx, idx_next;
780 struct trace_entry *entry;
781
782 data->trace_idx++;
783 idx = data->trace_head_idx;
784 idx_next = idx + 1;
785
786 BUG_ON(idx * TRACE_ENTRY_SIZE >= PAGE_SIZE);
787
788 entry = data->trace_head + idx * TRACE_ENTRY_SIZE;
789
790 if (unlikely(idx_next >= ENTRIES_PER_PAGE)) {
791 data->trace_head = trace_next_page(data, data->trace_head);
792 idx_next = 0;
793 }
794
795 if (data->trace_head == data->trace_tail &&
796 idx_next == data->trace_tail_idx) {
797 /* overrun */
798 data->overrun++;
799 data->trace_tail_idx++;
800 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) {
801 data->trace_tail =
802 trace_next_page(data, data->trace_tail);
803 data->trace_tail_idx = 0;
804 }
805 }
806
807 data->trace_head_idx = idx_next;
808
809 return entry;
810}
811
812static inline void
813tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
814{ 652{
815 struct task_struct *tsk = current; 653 struct task_struct *tsk = current;
816 unsigned long pc;
817
818 pc = preempt_count();
819 654
820 entry->preempt_count = pc & 0xff; 655 entry->preempt_count = pc & 0xff;
821 entry->pid = (tsk) ? tsk->pid : 0; 656 entry->pid = (tsk) ? tsk->pid : 0;
822 entry->t = ftrace_now(raw_smp_processor_id()); 657 entry->flags =
823 entry->flags = (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) | 658 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
824 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) | 659 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
825 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 660 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
826 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 661 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
@@ -828,145 +663,139 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags)
828 663
829void 664void
830trace_function(struct trace_array *tr, struct trace_array_cpu *data, 665trace_function(struct trace_array *tr, struct trace_array_cpu *data,
831 unsigned long ip, unsigned long parent_ip, unsigned long flags) 666 unsigned long ip, unsigned long parent_ip, unsigned long flags,
667 int pc)
832{ 668{
833 struct trace_entry *entry; 669 struct ring_buffer_event *event;
670 struct ftrace_entry *entry;
834 unsigned long irq_flags; 671 unsigned long irq_flags;
835 672
836 raw_local_irq_save(irq_flags); 673 /* If we are reading the ring buffer, don't trace */
837 __raw_spin_lock(&data->lock); 674 if (unlikely(local_read(&__get_cpu_var(ftrace_cpu_disabled))))
838 entry = tracing_get_trace_entry(tr, data); 675 return;
839 tracing_generic_entry_update(entry, flags); 676
840 entry->type = TRACE_FN; 677 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
841 entry->fn.ip = ip; 678 &irq_flags);
842 entry->fn.parent_ip = parent_ip; 679 if (!event)
843 __raw_spin_unlock(&data->lock); 680 return;
844 raw_local_irq_restore(irq_flags); 681 entry = ring_buffer_event_data(event);
682 tracing_generic_entry_update(&entry->ent, flags, pc);
683 entry->ent.type = TRACE_FN;
684 entry->ip = ip;
685 entry->parent_ip = parent_ip;
686 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
845} 687}
846 688
847void 689void
848ftrace(struct trace_array *tr, struct trace_array_cpu *data, 690ftrace(struct trace_array *tr, struct trace_array_cpu *data,
849 unsigned long ip, unsigned long parent_ip, unsigned long flags) 691 unsigned long ip, unsigned long parent_ip, unsigned long flags,
692 int pc)
850{ 693{
851 if (likely(!atomic_read(&data->disabled))) 694 if (likely(!atomic_read(&data->disabled)))
852 trace_function(tr, data, ip, parent_ip, flags); 695 trace_function(tr, data, ip, parent_ip, flags, pc);
853} 696}
854 697
855#ifdef CONFIG_MMIOTRACE 698static void ftrace_trace_stack(struct trace_array *tr,
856void __trace_mmiotrace_rw(struct trace_array *tr, struct trace_array_cpu *data, 699 struct trace_array_cpu *data,
857 struct mmiotrace_rw *rw) 700 unsigned long flags,
701 int skip, int pc)
858{ 702{
859 struct trace_entry *entry; 703 struct ring_buffer_event *event;
704 struct stack_entry *entry;
705 struct stack_trace trace;
860 unsigned long irq_flags; 706 unsigned long irq_flags;
861 707
862 raw_local_irq_save(irq_flags); 708 if (!(trace_flags & TRACE_ITER_STACKTRACE))
863 __raw_spin_lock(&data->lock); 709 return;
864
865 entry = tracing_get_trace_entry(tr, data);
866 tracing_generic_entry_update(entry, 0);
867 entry->type = TRACE_MMIO_RW;
868 entry->mmiorw = *rw;
869
870 __raw_spin_unlock(&data->lock);
871 raw_local_irq_restore(irq_flags);
872
873 trace_wake_up();
874}
875
876void __trace_mmiotrace_map(struct trace_array *tr, struct trace_array_cpu *data,
877 struct mmiotrace_map *map)
878{
879 struct trace_entry *entry;
880 unsigned long irq_flags;
881 710
882 raw_local_irq_save(irq_flags); 711 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
883 __raw_spin_lock(&data->lock); 712 &irq_flags);
713 if (!event)
714 return;
715 entry = ring_buffer_event_data(event);
716 tracing_generic_entry_update(&entry->ent, flags, pc);
717 entry->ent.type = TRACE_STACK;
884 718
885 entry = tracing_get_trace_entry(tr, data); 719 memset(&entry->caller, 0, sizeof(entry->caller));
886 tracing_generic_entry_update(entry, 0);
887 entry->type = TRACE_MMIO_MAP;
888 entry->mmiomap = *map;
889 720
890 __raw_spin_unlock(&data->lock); 721 trace.nr_entries = 0;
891 raw_local_irq_restore(irq_flags); 722 trace.max_entries = FTRACE_STACK_ENTRIES;
723 trace.skip = skip;
724 trace.entries = entry->caller;
892 725
893 trace_wake_up(); 726 save_stack_trace(&trace);
727 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
894} 728}
895#endif
896 729
897void __trace_stack(struct trace_array *tr, 730void __trace_stack(struct trace_array *tr,
898 struct trace_array_cpu *data, 731 struct trace_array_cpu *data,
899 unsigned long flags, 732 unsigned long flags,
900 int skip) 733 int skip)
901{ 734{
902 struct trace_entry *entry; 735 ftrace_trace_stack(tr, data, flags, skip, preempt_count());
903 struct stack_trace trace;
904
905 if (!(trace_flags & TRACE_ITER_STACKTRACE))
906 return;
907
908 entry = tracing_get_trace_entry(tr, data);
909 tracing_generic_entry_update(entry, flags);
910 entry->type = TRACE_STACK;
911
912 memset(&entry->stack, 0, sizeof(entry->stack));
913
914 trace.nr_entries = 0;
915 trace.max_entries = FTRACE_STACK_ENTRIES;
916 trace.skip = skip;
917 trace.entries = entry->stack.caller;
918
919 save_stack_trace(&trace);
920} 736}
921 737
922void 738static void
923__trace_special(void *__tr, void *__data, 739ftrace_trace_special(void *__tr, void *__data,
924 unsigned long arg1, unsigned long arg2, unsigned long arg3) 740 unsigned long arg1, unsigned long arg2, unsigned long arg3,
741 int pc)
925{ 742{
743 struct ring_buffer_event *event;
926 struct trace_array_cpu *data = __data; 744 struct trace_array_cpu *data = __data;
927 struct trace_array *tr = __tr; 745 struct trace_array *tr = __tr;
928 struct trace_entry *entry; 746 struct special_entry *entry;
929 unsigned long irq_flags; 747 unsigned long irq_flags;
930 748
931 raw_local_irq_save(irq_flags); 749 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
932 __raw_spin_lock(&data->lock); 750 &irq_flags);
933 entry = tracing_get_trace_entry(tr, data); 751 if (!event)
934 tracing_generic_entry_update(entry, 0); 752 return;
935 entry->type = TRACE_SPECIAL; 753 entry = ring_buffer_event_data(event);
936 entry->special.arg1 = arg1; 754 tracing_generic_entry_update(&entry->ent, 0, pc);
937 entry->special.arg2 = arg2; 755 entry->ent.type = TRACE_SPECIAL;
938 entry->special.arg3 = arg3; 756 entry->arg1 = arg1;
939 __trace_stack(tr, data, irq_flags, 4); 757 entry->arg2 = arg2;
940 __raw_spin_unlock(&data->lock); 758 entry->arg3 = arg3;
941 raw_local_irq_restore(irq_flags); 759 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
760 ftrace_trace_stack(tr, data, irq_flags, 4, pc);
942 761
943 trace_wake_up(); 762 trace_wake_up();
944} 763}
945 764
946void 765void
766__trace_special(void *__tr, void *__data,
767 unsigned long arg1, unsigned long arg2, unsigned long arg3)
768{
769 ftrace_trace_special(__tr, __data, arg1, arg2, arg3, preempt_count());
770}
771
772void
947tracing_sched_switch_trace(struct trace_array *tr, 773tracing_sched_switch_trace(struct trace_array *tr,
948 struct trace_array_cpu *data, 774 struct trace_array_cpu *data,
949 struct task_struct *prev, 775 struct task_struct *prev,
950 struct task_struct *next, 776 struct task_struct *next,
951 unsigned long flags) 777 unsigned long flags, int pc)
952{ 778{
953 struct trace_entry *entry; 779 struct ring_buffer_event *event;
780 struct ctx_switch_entry *entry;
954 unsigned long irq_flags; 781 unsigned long irq_flags;
955 782
956 raw_local_irq_save(irq_flags); 783 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
957 __raw_spin_lock(&data->lock); 784 &irq_flags);
958 entry = tracing_get_trace_entry(tr, data); 785 if (!event)
959 tracing_generic_entry_update(entry, flags); 786 return;
960 entry->type = TRACE_CTX; 787 entry = ring_buffer_event_data(event);
961 entry->ctx.prev_pid = prev->pid; 788 tracing_generic_entry_update(&entry->ent, flags, pc);
962 entry->ctx.prev_prio = prev->prio; 789 entry->ent.type = TRACE_CTX;
963 entry->ctx.prev_state = prev->state; 790 entry->prev_pid = prev->pid;
964 entry->ctx.next_pid = next->pid; 791 entry->prev_prio = prev->prio;
965 entry->ctx.next_prio = next->prio; 792 entry->prev_state = prev->state;
966 entry->ctx.next_state = next->state; 793 entry->next_pid = next->pid;
967 __trace_stack(tr, data, flags, 5); 794 entry->next_prio = next->prio;
968 __raw_spin_unlock(&data->lock); 795 entry->next_state = next->state;
969 raw_local_irq_restore(irq_flags); 796 entry->next_cpu = task_cpu(next);
797 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
798 ftrace_trace_stack(tr, data, flags, 5, pc);
970} 799}
971 800
972void 801void
@@ -974,25 +803,28 @@ tracing_sched_wakeup_trace(struct trace_array *tr,
974 struct trace_array_cpu *data, 803 struct trace_array_cpu *data,
975 struct task_struct *wakee, 804 struct task_struct *wakee,
976 struct task_struct *curr, 805 struct task_struct *curr,
977 unsigned long flags) 806 unsigned long flags, int pc)
978{ 807{
979 struct trace_entry *entry; 808 struct ring_buffer_event *event;
809 struct ctx_switch_entry *entry;
980 unsigned long irq_flags; 810 unsigned long irq_flags;
981 811
982 raw_local_irq_save(irq_flags); 812 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
983 __raw_spin_lock(&data->lock); 813 &irq_flags);
984 entry = tracing_get_trace_entry(tr, data); 814 if (!event)
985 tracing_generic_entry_update(entry, flags); 815 return;
986 entry->type = TRACE_WAKE; 816 entry = ring_buffer_event_data(event);
987 entry->ctx.prev_pid = curr->pid; 817 tracing_generic_entry_update(&entry->ent, flags, pc);
988 entry->ctx.prev_prio = curr->prio; 818 entry->ent.type = TRACE_WAKE;
989 entry->ctx.prev_state = curr->state; 819 entry->prev_pid = curr->pid;
990 entry->ctx.next_pid = wakee->pid; 820 entry->prev_prio = curr->prio;
991 entry->ctx.next_prio = wakee->prio; 821 entry->prev_state = curr->state;
992 entry->ctx.next_state = wakee->state; 822 entry->next_pid = wakee->pid;
993 __trace_stack(tr, data, flags, 6); 823 entry->next_prio = wakee->prio;
994 __raw_spin_unlock(&data->lock); 824 entry->next_state = wakee->state;
995 raw_local_irq_restore(irq_flags); 825 entry->next_cpu = task_cpu(wakee);
826 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
827 ftrace_trace_stack(tr, data, flags, 6, pc);
996 828
997 trace_wake_up(); 829 trace_wake_up();
998} 830}
@@ -1002,23 +834,21 @@ ftrace_special(unsigned long arg1, unsigned long arg2, unsigned long arg3)
1002{ 834{
1003 struct trace_array *tr = &global_trace; 835 struct trace_array *tr = &global_trace;
1004 struct trace_array_cpu *data; 836 struct trace_array_cpu *data;
1005 unsigned long flags;
1006 long disabled;
1007 int cpu; 837 int cpu;
838 int pc;
1008 839
1009 if (tracing_disabled || current_trace == &no_tracer || !tr->ctrl) 840 if (tracing_disabled || !tr->ctrl)
1010 return; 841 return;
1011 842
1012 local_irq_save(flags); 843 pc = preempt_count();
844 preempt_disable_notrace();
1013 cpu = raw_smp_processor_id(); 845 cpu = raw_smp_processor_id();
1014 data = tr->data[cpu]; 846 data = tr->data[cpu];
1015 disabled = atomic_inc_return(&data->disabled);
1016 847
1017 if (likely(disabled == 1)) 848 if (likely(!atomic_read(&data->disabled)))
1018 __trace_special(tr, data, arg1, arg2, arg3); 849 ftrace_trace_special(tr, data, arg1, arg2, arg3, pc);
1019 850
1020 atomic_dec(&data->disabled); 851 preempt_enable_notrace();
1021 local_irq_restore(flags);
1022} 852}
1023 853
1024#ifdef CONFIG_FTRACE 854#ifdef CONFIG_FTRACE
@@ -1029,7 +859,8 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1029 struct trace_array_cpu *data; 859 struct trace_array_cpu *data;
1030 unsigned long flags; 860 unsigned long flags;
1031 long disabled; 861 long disabled;
1032 int cpu; 862 int cpu, resched;
863 int pc;
1033 864
1034 if (unlikely(!ftrace_function_enabled)) 865 if (unlikely(!ftrace_function_enabled))
1035 return; 866 return;
@@ -1037,16 +868,22 @@ function_trace_call(unsigned long ip, unsigned long parent_ip)
1037 if (skip_trace(ip)) 868 if (skip_trace(ip))
1038 return; 869 return;
1039 870
1040 local_irq_save(flags); 871 pc = preempt_count();
872 resched = need_resched();
873 preempt_disable_notrace();
874 local_save_flags(flags);
1041 cpu = raw_smp_processor_id(); 875 cpu = raw_smp_processor_id();
1042 data = tr->data[cpu]; 876 data = tr->data[cpu];
1043 disabled = atomic_inc_return(&data->disabled); 877 disabled = atomic_inc_return(&data->disabled);
1044 878
1045 if (likely(disabled == 1)) 879 if (likely(disabled == 1))
1046 trace_function(tr, data, ip, parent_ip, flags); 880 trace_function(tr, data, ip, parent_ip, flags, pc);
1047 881
1048 atomic_dec(&data->disabled); 882 atomic_dec(&data->disabled);
1049 local_irq_restore(flags); 883 if (resched)
884 preempt_enable_no_resched_notrace();
885 else
886 preempt_enable_notrace();
1050} 887}
1051 888
1052static struct ftrace_ops trace_ops __read_mostly = 889static struct ftrace_ops trace_ops __read_mostly =
@@ -1073,111 +910,96 @@ enum trace_file_type {
1073 TRACE_FILE_LAT_FMT = 1, 910 TRACE_FILE_LAT_FMT = 1,
1074}; 911};
1075 912
1076static struct trace_entry * 913static void trace_iterator_increment(struct trace_iterator *iter, int cpu)
1077trace_entry_idx(struct trace_array *tr, struct trace_array_cpu *data,
1078 struct trace_iterator *iter, int cpu)
1079{ 914{
1080 struct page *page; 915 /* Don't allow ftrace to trace into the ring buffers */
1081 struct trace_entry *array; 916 ftrace_disable_cpu();
1082 917
1083 if (iter->next_idx[cpu] >= tr->entries || 918 iter->idx++;
1084 iter->next_idx[cpu] >= data->trace_idx || 919 if (iter->buffer_iter[iter->cpu])
1085 (data->trace_head == data->trace_tail && 920 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1086 data->trace_head_idx == data->trace_tail_idx))
1087 return NULL;
1088 921
1089 if (!iter->next_page[cpu]) { 922 ftrace_enable_cpu();
1090 /* Initialize the iterator for this cpu trace buffer */ 923}
1091 WARN_ON(!data->trace_tail); 924
1092 page = virt_to_page(data->trace_tail); 925static struct trace_entry *
1093 iter->next_page[cpu] = &page->lru; 926peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts)
1094 iter->next_page_idx[cpu] = data->trace_tail_idx; 927{
1095 } 928 struct ring_buffer_event *event;
929 struct ring_buffer_iter *buf_iter = iter->buffer_iter[cpu];
1096 930
1097 page = list_entry(iter->next_page[cpu], struct page, lru); 931 /* Don't allow ftrace to trace into the ring buffers */
1098 BUG_ON(&data->trace_pages == &page->lru); 932 ftrace_disable_cpu();
933
934 if (buf_iter)
935 event = ring_buffer_iter_peek(buf_iter, ts);
936 else
937 event = ring_buffer_peek(iter->tr->buffer, cpu, ts);
1099 938
1100 array = page_address(page); 939 ftrace_enable_cpu();
1101 940
1102 WARN_ON(iter->next_page_idx[cpu] >= ENTRIES_PER_PAGE); 941 return event ? ring_buffer_event_data(event) : NULL;
1103 return &array[iter->next_page_idx[cpu]];
1104} 942}
1105 943
1106static struct trace_entry * 944static struct trace_entry *
1107find_next_entry(struct trace_iterator *iter, int *ent_cpu) 945__find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1108{ 946{
1109 struct trace_array *tr = iter->tr; 947 struct ring_buffer *buffer = iter->tr->buffer;
1110 struct trace_entry *ent, *next = NULL; 948 struct trace_entry *ent, *next = NULL;
949 u64 next_ts = 0, ts;
1111 int next_cpu = -1; 950 int next_cpu = -1;
1112 int cpu; 951 int cpu;
1113 952
1114 for_each_tracing_cpu(cpu) { 953 for_each_tracing_cpu(cpu) {
1115 if (!head_page(tr->data[cpu])) 954
955 if (ring_buffer_empty_cpu(buffer, cpu))
1116 continue; 956 continue;
1117 ent = trace_entry_idx(tr, tr->data[cpu], iter, cpu); 957
958 ent = peek_next_entry(iter, cpu, &ts);
959
1118 /* 960 /*
1119 * Pick the entry with the smallest timestamp: 961 * Pick the entry with the smallest timestamp:
1120 */ 962 */
1121 if (ent && (!next || ent->t < next->t)) { 963 if (ent && (!next || ts < next_ts)) {
1122 next = ent; 964 next = ent;
1123 next_cpu = cpu; 965 next_cpu = cpu;
966 next_ts = ts;
1124 } 967 }
1125 } 968 }
1126 969
1127 if (ent_cpu) 970 if (ent_cpu)
1128 *ent_cpu = next_cpu; 971 *ent_cpu = next_cpu;
1129 972
973 if (ent_ts)
974 *ent_ts = next_ts;
975
1130 return next; 976 return next;
1131} 977}
1132 978
1133static void trace_iterator_increment(struct trace_iterator *iter) 979/* Find the next real entry, without updating the iterator itself */
980static struct trace_entry *
981find_next_entry(struct trace_iterator *iter, int *ent_cpu, u64 *ent_ts)
1134{ 982{
1135 iter->idx++; 983 return __find_next_entry(iter, ent_cpu, ent_ts);
1136 iter->next_idx[iter->cpu]++;
1137 iter->next_page_idx[iter->cpu]++;
1138
1139 if (iter->next_page_idx[iter->cpu] >= ENTRIES_PER_PAGE) {
1140 struct trace_array_cpu *data = iter->tr->data[iter->cpu];
1141
1142 iter->next_page_idx[iter->cpu] = 0;
1143 iter->next_page[iter->cpu] =
1144 trace_next_list(data, iter->next_page[iter->cpu]);
1145 }
1146} 984}
1147 985
1148static void trace_consume(struct trace_iterator *iter) 986/* Find the next real entry, and increment the iterator to the next entry */
987static void *find_next_entry_inc(struct trace_iterator *iter)
1149{ 988{
1150 struct trace_array_cpu *data = iter->tr->data[iter->cpu]; 989 iter->ent = __find_next_entry(iter, &iter->cpu, &iter->ts);
1151 990
1152 data->trace_tail_idx++; 991 if (iter->ent)
1153 if (data->trace_tail_idx >= ENTRIES_PER_PAGE) { 992 trace_iterator_increment(iter, iter->cpu);
1154 data->trace_tail = trace_next_page(data, data->trace_tail);
1155 data->trace_tail_idx = 0;
1156 }
1157 993
1158 /* Check if we empty it, then reset the index */ 994 return iter->ent ? iter : NULL;
1159 if (data->trace_head == data->trace_tail &&
1160 data->trace_head_idx == data->trace_tail_idx)
1161 data->trace_idx = 0;
1162} 995}
1163 996
1164static void *find_next_entry_inc(struct trace_iterator *iter) 997static void trace_consume(struct trace_iterator *iter)
1165{ 998{
1166 struct trace_entry *next; 999 /* Don't allow ftrace to trace into the ring buffers */
1167 int next_cpu = -1; 1000 ftrace_disable_cpu();
1168 1001 ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts);
1169 next = find_next_entry(iter, &next_cpu); 1002 ftrace_enable_cpu();
1170
1171 iter->prev_ent = iter->ent;
1172 iter->prev_cpu = iter->cpu;
1173
1174 iter->ent = next;
1175 iter->cpu = next_cpu;
1176
1177 if (next)
1178 trace_iterator_increment(iter);
1179
1180 return next ? iter : NULL;
1181} 1003}
1182 1004
1183static void *s_next(struct seq_file *m, void *v, loff_t *pos) 1005static void *s_next(struct seq_file *m, void *v, loff_t *pos)
@@ -1210,7 +1032,7 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1210 struct trace_iterator *iter = m->private; 1032 struct trace_iterator *iter = m->private;
1211 void *p = NULL; 1033 void *p = NULL;
1212 loff_t l = 0; 1034 loff_t l = 0;
1213 int i; 1035 int cpu;
1214 1036
1215 mutex_lock(&trace_types_lock); 1037 mutex_lock(&trace_types_lock);
1216 1038
@@ -1229,14 +1051,15 @@ static void *s_start(struct seq_file *m, loff_t *pos)
1229 iter->ent = NULL; 1051 iter->ent = NULL;
1230 iter->cpu = 0; 1052 iter->cpu = 0;
1231 iter->idx = -1; 1053 iter->idx = -1;
1232 iter->prev_ent = NULL;
1233 iter->prev_cpu = -1;
1234 1054
1235 for_each_tracing_cpu(i) { 1055 ftrace_disable_cpu();
1236 iter->next_idx[i] = 0; 1056
1237 iter->next_page[i] = NULL; 1057 for_each_tracing_cpu(cpu) {
1058 ring_buffer_iter_reset(iter->buffer_iter[cpu]);
1238 } 1059 }
1239 1060
1061 ftrace_enable_cpu();
1062
1240 for (p = iter; p && l < *pos; p = s_next(m, p, &l)) 1063 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1241 ; 1064 ;
1242 1065
@@ -1330,21 +1153,21 @@ seq_print_ip_sym(struct trace_seq *s, unsigned long ip, unsigned long sym_flags)
1330 1153
1331static void print_lat_help_header(struct seq_file *m) 1154static void print_lat_help_header(struct seq_file *m)
1332{ 1155{
1333 seq_puts(m, "# _------=> CPU# \n"); 1156 seq_puts(m, "# _------=> CPU# \n");
1334 seq_puts(m, "# / _-----=> irqs-off \n"); 1157 seq_puts(m, "# / _-----=> irqs-off \n");
1335 seq_puts(m, "# | / _----=> need-resched \n"); 1158 seq_puts(m, "# | / _----=> need-resched \n");
1336 seq_puts(m, "# || / _---=> hardirq/softirq \n"); 1159 seq_puts(m, "# || / _---=> hardirq/softirq \n");
1337 seq_puts(m, "# ||| / _--=> preempt-depth \n"); 1160 seq_puts(m, "# ||| / _--=> preempt-depth \n");
1338 seq_puts(m, "# |||| / \n"); 1161 seq_puts(m, "# |||| / \n");
1339 seq_puts(m, "# ||||| delay \n"); 1162 seq_puts(m, "# ||||| delay \n");
1340 seq_puts(m, "# cmd pid ||||| time | caller \n"); 1163 seq_puts(m, "# cmd pid ||||| time | caller \n");
1341 seq_puts(m, "# \\ / ||||| \\ | / \n"); 1164 seq_puts(m, "# \\ / ||||| \\ | / \n");
1342} 1165}
1343 1166
1344static void print_func_help_header(struct seq_file *m) 1167static void print_func_help_header(struct seq_file *m)
1345{ 1168{
1346 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n"); 1169 seq_puts(m, "# TASK-PID CPU# TIMESTAMP FUNCTION\n");
1347 seq_puts(m, "# | | | | |\n"); 1170 seq_puts(m, "# | | | | |\n");
1348} 1171}
1349 1172
1350 1173
@@ -1355,23 +1178,16 @@ print_trace_header(struct seq_file *m, struct trace_iterator *iter)
1355 struct trace_array *tr = iter->tr; 1178 struct trace_array *tr = iter->tr;
1356 struct trace_array_cpu *data = tr->data[tr->cpu]; 1179 struct trace_array_cpu *data = tr->data[tr->cpu];
1357 struct tracer *type = current_trace; 1180 struct tracer *type = current_trace;
1358 unsigned long total = 0; 1181 unsigned long total;
1359 unsigned long entries = 0; 1182 unsigned long entries;
1360 int cpu;
1361 const char *name = "preemption"; 1183 const char *name = "preemption";
1362 1184
1363 if (type) 1185 if (type)
1364 name = type->name; 1186 name = type->name;
1365 1187
1366 for_each_tracing_cpu(cpu) { 1188 entries = ring_buffer_entries(iter->tr->buffer);
1367 if (head_page(tr->data[cpu])) { 1189 total = entries +
1368 total += tr->data[cpu]->trace_idx; 1190 ring_buffer_overruns(iter->tr->buffer);
1369 if (tr->data[cpu]->trace_idx > tr->entries)
1370 entries += tr->entries;
1371 else
1372 entries += tr->data[cpu]->trace_idx;
1373 }
1374 }
1375 1191
1376 seq_printf(m, "%s latency trace v1.1.5 on %s\n", 1192 seq_printf(m, "%s latency trace v1.1.5 on %s\n",
1377 name, UTS_RELEASE); 1193 name, UTS_RELEASE);
@@ -1428,7 +1244,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1428 comm = trace_find_cmdline(entry->pid); 1244 comm = trace_find_cmdline(entry->pid);
1429 1245
1430 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid); 1246 trace_seq_printf(s, "%8.8s-%-5d ", comm, entry->pid);
1431 trace_seq_printf(s, "%d", cpu); 1247 trace_seq_printf(s, "%3d", cpu);
1432 trace_seq_printf(s, "%c%c", 1248 trace_seq_printf(s, "%c%c",
1433 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.', 1249 (entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' : '.',
1434 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.')); 1250 ((entry->flags & TRACE_FLAG_NEED_RESCHED) ? 'N' : '.'));
@@ -1457,7 +1273,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu)
1457unsigned long preempt_mark_thresh = 100; 1273unsigned long preempt_mark_thresh = 100;
1458 1274
1459static void 1275static void
1460lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs, 1276lat_print_timestamp(struct trace_seq *s, u64 abs_usecs,
1461 unsigned long rel_usecs) 1277 unsigned long rel_usecs)
1462{ 1278{
1463 trace_seq_printf(s, " %4lldus", abs_usecs); 1279 trace_seq_printf(s, " %4lldus", abs_usecs);
@@ -1471,34 +1287,76 @@ lat_print_timestamp(struct trace_seq *s, unsigned long long abs_usecs,
1471 1287
1472static const char state_to_char[] = TASK_STATE_TO_CHAR_STR; 1288static const char state_to_char[] = TASK_STATE_TO_CHAR_STR;
1473 1289
1474static int 1290/*
1291 * The message is supposed to contain an ending newline.
1292 * If the printing stops prematurely, try to add a newline of our own.
1293 */
1294void trace_seq_print_cont(struct trace_seq *s, struct trace_iterator *iter)
1295{
1296 struct trace_entry *ent;
1297 struct trace_field_cont *cont;
1298 bool ok = true;
1299
1300 ent = peek_next_entry(iter, iter->cpu, NULL);
1301 if (!ent || ent->type != TRACE_CONT) {
1302 trace_seq_putc(s, '\n');
1303 return;
1304 }
1305
1306 do {
1307 cont = (struct trace_field_cont *)ent;
1308 if (ok)
1309 ok = (trace_seq_printf(s, "%s", cont->buf) > 0);
1310
1311 ftrace_disable_cpu();
1312
1313 if (iter->buffer_iter[iter->cpu])
1314 ring_buffer_read(iter->buffer_iter[iter->cpu], NULL);
1315 else
1316 ring_buffer_consume(iter->tr->buffer, iter->cpu, NULL);
1317
1318 ftrace_enable_cpu();
1319
1320 ent = peek_next_entry(iter, iter->cpu, NULL);
1321 } while (ent && ent->type == TRACE_CONT);
1322
1323 if (!ok)
1324 trace_seq_putc(s, '\n');
1325}
1326
1327static enum print_line_t
1475print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu) 1328print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1476{ 1329{
1477 struct trace_seq *s = &iter->seq; 1330 struct trace_seq *s = &iter->seq;
1478 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1331 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
1479 struct trace_entry *next_entry = find_next_entry(iter, NULL); 1332 struct trace_entry *next_entry;
1480 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE); 1333 unsigned long verbose = (trace_flags & TRACE_ITER_VERBOSE);
1481 struct trace_entry *entry = iter->ent; 1334 struct trace_entry *entry = iter->ent;
1482 unsigned long abs_usecs; 1335 unsigned long abs_usecs;
1483 unsigned long rel_usecs; 1336 unsigned long rel_usecs;
1337 u64 next_ts;
1484 char *comm; 1338 char *comm;
1485 int S, T; 1339 int S, T;
1486 int i; 1340 int i;
1487 unsigned state; 1341 unsigned state;
1488 1342
1343 if (entry->type == TRACE_CONT)
1344 return TRACE_TYPE_HANDLED;
1345
1346 next_entry = find_next_entry(iter, NULL, &next_ts);
1489 if (!next_entry) 1347 if (!next_entry)
1490 next_entry = entry; 1348 next_ts = iter->ts;
1491 rel_usecs = ns2usecs(next_entry->t - entry->t); 1349 rel_usecs = ns2usecs(next_ts - iter->ts);
1492 abs_usecs = ns2usecs(entry->t - iter->tr->time_start); 1350 abs_usecs = ns2usecs(iter->ts - iter->tr->time_start);
1493 1351
1494 if (verbose) { 1352 if (verbose) {
1495 comm = trace_find_cmdline(entry->pid); 1353 comm = trace_find_cmdline(entry->pid);
1496 trace_seq_printf(s, "%16s %5d %d %d %08x %08x [%08lx]" 1354 trace_seq_printf(s, "%16s %5d %3d %d %08x %08x [%08lx]"
1497 " %ld.%03ldms (+%ld.%03ldms): ", 1355 " %ld.%03ldms (+%ld.%03ldms): ",
1498 comm, 1356 comm,
1499 entry->pid, cpu, entry->flags, 1357 entry->pid, cpu, entry->flags,
1500 entry->preempt_count, trace_idx, 1358 entry->preempt_count, trace_idx,
1501 ns2usecs(entry->t), 1359 ns2usecs(iter->ts),
1502 abs_usecs/1000, 1360 abs_usecs/1000,
1503 abs_usecs % 1000, rel_usecs/1000, 1361 abs_usecs % 1000, rel_usecs/1000,
1504 rel_usecs % 1000); 1362 rel_usecs % 1000);
@@ -1507,52 +1365,85 @@ print_lat_fmt(struct trace_iterator *iter, unsigned int trace_idx, int cpu)
1507 lat_print_timestamp(s, abs_usecs, rel_usecs); 1365 lat_print_timestamp(s, abs_usecs, rel_usecs);
1508 } 1366 }
1509 switch (entry->type) { 1367 switch (entry->type) {
1510 case TRACE_FN: 1368 case TRACE_FN: {
1511 seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1369 struct ftrace_entry *field;
1370
1371 trace_assign_type(field, entry);
1372
1373 seq_print_ip_sym(s, field->ip, sym_flags);
1512 trace_seq_puts(s, " ("); 1374 trace_seq_puts(s, " (");
1513 if (kretprobed(entry->fn.parent_ip)) 1375 if (kretprobed(field->parent_ip))
1514 trace_seq_puts(s, KRETPROBE_MSG); 1376 trace_seq_puts(s, KRETPROBE_MSG);
1515 else 1377 else
1516 seq_print_ip_sym(s, entry->fn.parent_ip, sym_flags); 1378 seq_print_ip_sym(s, field->parent_ip, sym_flags);
1517 trace_seq_puts(s, ")\n"); 1379 trace_seq_puts(s, ")\n");
1518 break; 1380 break;
1381 }
1519 case TRACE_CTX: 1382 case TRACE_CTX:
1520 case TRACE_WAKE: 1383 case TRACE_WAKE: {
1521 T = entry->ctx.next_state < sizeof(state_to_char) ? 1384 struct ctx_switch_entry *field;
1522 state_to_char[entry->ctx.next_state] : 'X'; 1385
1386 trace_assign_type(field, entry);
1387
1388 T = field->next_state < sizeof(state_to_char) ?
1389 state_to_char[field->next_state] : 'X';
1523 1390
1524 state = entry->ctx.prev_state ? __ffs(entry->ctx.prev_state) + 1 : 0; 1391 state = field->prev_state ?
1392 __ffs(field->prev_state) + 1 : 0;
1525 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X'; 1393 S = state < sizeof(state_to_char) - 1 ? state_to_char[state] : 'X';
1526 comm = trace_find_cmdline(entry->ctx.next_pid); 1394 comm = trace_find_cmdline(field->next_pid);
1527 trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c %s\n", 1395 trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n",
1528 entry->ctx.prev_pid, 1396 field->prev_pid,
1529 entry->ctx.prev_prio, 1397 field->prev_prio,
1530 S, entry->type == TRACE_CTX ? "==>" : " +", 1398 S, entry->type == TRACE_CTX ? "==>" : " +",
1531 entry->ctx.next_pid, 1399 field->next_cpu,
1532 entry->ctx.next_prio, 1400 field->next_pid,
1401 field->next_prio,
1533 T, comm); 1402 T, comm);
1534 break; 1403 break;
1535 case TRACE_SPECIAL: 1404 }
1405 case TRACE_SPECIAL: {
1406 struct special_entry *field;
1407
1408 trace_assign_type(field, entry);
1409
1536 trace_seq_printf(s, "# %ld %ld %ld\n", 1410 trace_seq_printf(s, "# %ld %ld %ld\n",
1537 entry->special.arg1, 1411 field->arg1,
1538 entry->special.arg2, 1412 field->arg2,
1539 entry->special.arg3); 1413 field->arg3);
1540 break; 1414 break;
1541 case TRACE_STACK: 1415 }
1416 case TRACE_STACK: {
1417 struct stack_entry *field;
1418
1419 trace_assign_type(field, entry);
1420
1542 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1421 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1543 if (i) 1422 if (i)
1544 trace_seq_puts(s, " <= "); 1423 trace_seq_puts(s, " <= ");
1545 seq_print_ip_sym(s, entry->stack.caller[i], sym_flags); 1424 seq_print_ip_sym(s, field->caller[i], sym_flags);
1546 } 1425 }
1547 trace_seq_puts(s, "\n"); 1426 trace_seq_puts(s, "\n");
1548 break; 1427 break;
1428 }
1429 case TRACE_PRINT: {
1430 struct print_entry *field;
1431
1432 trace_assign_type(field, entry);
1433
1434 seq_print_ip_sym(s, field->ip, sym_flags);
1435 trace_seq_printf(s, ": %s", field->buf);
1436 if (entry->flags & TRACE_FLAG_CONT)
1437 trace_seq_print_cont(s, iter);
1438 break;
1439 }
1549 default: 1440 default:
1550 trace_seq_printf(s, "Unknown type %d\n", entry->type); 1441 trace_seq_printf(s, "Unknown type %d\n", entry->type);
1551 } 1442 }
1552 return 1; 1443 return TRACE_TYPE_HANDLED;
1553} 1444}
1554 1445
1555static int print_trace_fmt(struct trace_iterator *iter) 1446static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
1556{ 1447{
1557 struct trace_seq *s = &iter->seq; 1448 struct trace_seq *s = &iter->seq;
1558 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK); 1449 unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
@@ -1567,90 +1458,126 @@ static int print_trace_fmt(struct trace_iterator *iter)
1567 1458
1568 entry = iter->ent; 1459 entry = iter->ent;
1569 1460
1461 if (entry->type == TRACE_CONT)
1462 return TRACE_TYPE_HANDLED;
1463
1570 comm = trace_find_cmdline(iter->ent->pid); 1464 comm = trace_find_cmdline(iter->ent->pid);
1571 1465
1572 t = ns2usecs(entry->t); 1466 t = ns2usecs(iter->ts);
1573 usec_rem = do_div(t, 1000000ULL); 1467 usec_rem = do_div(t, 1000000ULL);
1574 secs = (unsigned long)t; 1468 secs = (unsigned long)t;
1575 1469
1576 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); 1470 ret = trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid);
1577 if (!ret) 1471 if (!ret)
1578 return 0; 1472 return TRACE_TYPE_PARTIAL_LINE;
1579 ret = trace_seq_printf(s, "[%02d] ", iter->cpu); 1473 ret = trace_seq_printf(s, "[%03d] ", iter->cpu);
1580 if (!ret) 1474 if (!ret)
1581 return 0; 1475 return TRACE_TYPE_PARTIAL_LINE;
1582 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem); 1476 ret = trace_seq_printf(s, "%5lu.%06lu: ", secs, usec_rem);
1583 if (!ret) 1477 if (!ret)
1584 return 0; 1478 return TRACE_TYPE_PARTIAL_LINE;
1585 1479
1586 switch (entry->type) { 1480 switch (entry->type) {
1587 case TRACE_FN: 1481 case TRACE_FN: {
1588 ret = seq_print_ip_sym(s, entry->fn.ip, sym_flags); 1482 struct ftrace_entry *field;
1483
1484 trace_assign_type(field, entry);
1485
1486 ret = seq_print_ip_sym(s, field->ip, sym_flags);
1589 if (!ret) 1487 if (!ret)
1590 return 0; 1488 return TRACE_TYPE_PARTIAL_LINE;
1591 if ((sym_flags & TRACE_ITER_PRINT_PARENT) && 1489 if ((sym_flags & TRACE_ITER_PRINT_PARENT) &&
1592 entry->fn.parent_ip) { 1490 field->parent_ip) {
1593 ret = trace_seq_printf(s, " <-"); 1491 ret = trace_seq_printf(s, " <-");
1594 if (!ret) 1492 if (!ret)
1595 return 0; 1493 return TRACE_TYPE_PARTIAL_LINE;
1596 if (kretprobed(entry->fn.parent_ip)) 1494 if (kretprobed(field->parent_ip))
1597 ret = trace_seq_puts(s, KRETPROBE_MSG); 1495 ret = trace_seq_puts(s, KRETPROBE_MSG);
1598 else 1496 else
1599 ret = seq_print_ip_sym(s, entry->fn.parent_ip, 1497 ret = seq_print_ip_sym(s,
1498 field->parent_ip,
1600 sym_flags); 1499 sym_flags);
1601 if (!ret) 1500 if (!ret)
1602 return 0; 1501 return TRACE_TYPE_PARTIAL_LINE;
1603 } 1502 }
1604 ret = trace_seq_printf(s, "\n"); 1503 ret = trace_seq_printf(s, "\n");
1605 if (!ret) 1504 if (!ret)
1606 return 0; 1505 return TRACE_TYPE_PARTIAL_LINE;
1607 break; 1506 break;
1507 }
1608 case TRACE_CTX: 1508 case TRACE_CTX:
1609 case TRACE_WAKE: 1509 case TRACE_WAKE: {
1610 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1510 struct ctx_switch_entry *field;
1611 state_to_char[entry->ctx.prev_state] : 'X'; 1511
1612 T = entry->ctx.next_state < sizeof(state_to_char) ? 1512 trace_assign_type(field, entry);
1613 state_to_char[entry->ctx.next_state] : 'X'; 1513
1614 ret = trace_seq_printf(s, " %5d:%3d:%c %s %5d:%3d:%c\n", 1514 S = field->prev_state < sizeof(state_to_char) ?
1615 entry->ctx.prev_pid, 1515 state_to_char[field->prev_state] : 'X';
1616 entry->ctx.prev_prio, 1516 T = field->next_state < sizeof(state_to_char) ?
1517 state_to_char[field->next_state] : 'X';
1518 ret = trace_seq_printf(s, " %5d:%3d:%c %s [%03d] %5d:%3d:%c\n",
1519 field->prev_pid,
1520 field->prev_prio,
1617 S, 1521 S,
1618 entry->type == TRACE_CTX ? "==>" : " +", 1522 entry->type == TRACE_CTX ? "==>" : " +",
1619 entry->ctx.next_pid, 1523 field->next_cpu,
1620 entry->ctx.next_prio, 1524 field->next_pid,
1525 field->next_prio,
1621 T); 1526 T);
1622 if (!ret) 1527 if (!ret)
1623 return 0; 1528 return TRACE_TYPE_PARTIAL_LINE;
1624 break; 1529 break;
1625 case TRACE_SPECIAL: 1530 }
1531 case TRACE_SPECIAL: {
1532 struct special_entry *field;
1533
1534 trace_assign_type(field, entry);
1535
1626 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1536 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1627 entry->special.arg1, 1537 field->arg1,
1628 entry->special.arg2, 1538 field->arg2,
1629 entry->special.arg3); 1539 field->arg3);
1630 if (!ret) 1540 if (!ret)
1631 return 0; 1541 return TRACE_TYPE_PARTIAL_LINE;
1632 break; 1542 break;
1633 case TRACE_STACK: 1543 }
1544 case TRACE_STACK: {
1545 struct stack_entry *field;
1546
1547 trace_assign_type(field, entry);
1548
1634 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) { 1549 for (i = 0; i < FTRACE_STACK_ENTRIES; i++) {
1635 if (i) { 1550 if (i) {
1636 ret = trace_seq_puts(s, " <= "); 1551 ret = trace_seq_puts(s, " <= ");
1637 if (!ret) 1552 if (!ret)
1638 return 0; 1553 return TRACE_TYPE_PARTIAL_LINE;
1639 } 1554 }
1640 ret = seq_print_ip_sym(s, entry->stack.caller[i], 1555 ret = seq_print_ip_sym(s, field->caller[i],
1641 sym_flags); 1556 sym_flags);
1642 if (!ret) 1557 if (!ret)
1643 return 0; 1558 return TRACE_TYPE_PARTIAL_LINE;
1644 } 1559 }
1645 ret = trace_seq_puts(s, "\n"); 1560 ret = trace_seq_puts(s, "\n");
1646 if (!ret) 1561 if (!ret)
1647 return 0; 1562 return TRACE_TYPE_PARTIAL_LINE;
1648 break; 1563 break;
1649 } 1564 }
1650 return 1; 1565 case TRACE_PRINT: {
1566 struct print_entry *field;
1567
1568 trace_assign_type(field, entry);
1569
1570 seq_print_ip_sym(s, field->ip, sym_flags);
1571 trace_seq_printf(s, ": %s", field->buf);
1572 if (entry->flags & TRACE_FLAG_CONT)
1573 trace_seq_print_cont(s, iter);
1574 break;
1575 }
1576 }
1577 return TRACE_TYPE_HANDLED;
1651} 1578}
1652 1579
1653static int print_raw_fmt(struct trace_iterator *iter) 1580static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
1654{ 1581{
1655 struct trace_seq *s = &iter->seq; 1582 struct trace_seq *s = &iter->seq;
1656 struct trace_entry *entry; 1583 struct trace_entry *entry;
@@ -1659,47 +1586,77 @@ static int print_raw_fmt(struct trace_iterator *iter)
1659 1586
1660 entry = iter->ent; 1587 entry = iter->ent;
1661 1588
1589 if (entry->type == TRACE_CONT)
1590 return TRACE_TYPE_HANDLED;
1591
1662 ret = trace_seq_printf(s, "%d %d %llu ", 1592 ret = trace_seq_printf(s, "%d %d %llu ",
1663 entry->pid, iter->cpu, entry->t); 1593 entry->pid, iter->cpu, iter->ts);
1664 if (!ret) 1594 if (!ret)
1665 return 0; 1595 return TRACE_TYPE_PARTIAL_LINE;
1666 1596
1667 switch (entry->type) { 1597 switch (entry->type) {
1668 case TRACE_FN: 1598 case TRACE_FN: {
1599 struct ftrace_entry *field;
1600
1601 trace_assign_type(field, entry);
1602
1669 ret = trace_seq_printf(s, "%x %x\n", 1603 ret = trace_seq_printf(s, "%x %x\n",
1670 entry->fn.ip, entry->fn.parent_ip); 1604 field->ip,
1605 field->parent_ip);
1671 if (!ret) 1606 if (!ret)
1672 return 0; 1607 return TRACE_TYPE_PARTIAL_LINE;
1673 break; 1608 break;
1609 }
1674 case TRACE_CTX: 1610 case TRACE_CTX:
1675 case TRACE_WAKE: 1611 case TRACE_WAKE: {
1676 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1612 struct ctx_switch_entry *field;
1677 state_to_char[entry->ctx.prev_state] : 'X'; 1613
1678 T = entry->ctx.next_state < sizeof(state_to_char) ? 1614 trace_assign_type(field, entry);
1679 state_to_char[entry->ctx.next_state] : 'X'; 1615
1616 S = field->prev_state < sizeof(state_to_char) ?
1617 state_to_char[field->prev_state] : 'X';
1618 T = field->next_state < sizeof(state_to_char) ?
1619 state_to_char[field->next_state] : 'X';
1680 if (entry->type == TRACE_WAKE) 1620 if (entry->type == TRACE_WAKE)
1681 S = '+'; 1621 S = '+';
1682 ret = trace_seq_printf(s, "%d %d %c %d %d %c\n", 1622 ret = trace_seq_printf(s, "%d %d %c %d %d %d %c\n",
1683 entry->ctx.prev_pid, 1623 field->prev_pid,
1684 entry->ctx.prev_prio, 1624 field->prev_prio,
1685 S, 1625 S,
1686 entry->ctx.next_pid, 1626 field->next_cpu,
1687 entry->ctx.next_prio, 1627 field->next_pid,
1628 field->next_prio,
1688 T); 1629 T);
1689 if (!ret) 1630 if (!ret)
1690 return 0; 1631 return TRACE_TYPE_PARTIAL_LINE;
1691 break; 1632 break;
1633 }
1692 case TRACE_SPECIAL: 1634 case TRACE_SPECIAL:
1693 case TRACE_STACK: 1635 case TRACE_STACK: {
1636 struct special_entry *field;
1637
1638 trace_assign_type(field, entry);
1639
1694 ret = trace_seq_printf(s, "# %ld %ld %ld\n", 1640 ret = trace_seq_printf(s, "# %ld %ld %ld\n",
1695 entry->special.arg1, 1641 field->arg1,
1696 entry->special.arg2, 1642 field->arg2,
1697 entry->special.arg3); 1643 field->arg3);
1698 if (!ret) 1644 if (!ret)
1699 return 0; 1645 return TRACE_TYPE_PARTIAL_LINE;
1700 break; 1646 break;
1701 } 1647 }
1702 return 1; 1648 case TRACE_PRINT: {
1649 struct print_entry *field;
1650
1651 trace_assign_type(field, entry);
1652
1653 trace_seq_printf(s, "# %lx %s", field->ip, field->buf);
1654 if (entry->flags & TRACE_FLAG_CONT)
1655 trace_seq_print_cont(s, iter);
1656 break;
1657 }
1658 }
1659 return TRACE_TYPE_HANDLED;
1703} 1660}
1704 1661
1705#define SEQ_PUT_FIELD_RET(s, x) \ 1662#define SEQ_PUT_FIELD_RET(s, x) \
@@ -1710,11 +1667,12 @@ do { \
1710 1667
1711#define SEQ_PUT_HEX_FIELD_RET(s, x) \ 1668#define SEQ_PUT_HEX_FIELD_RET(s, x) \
1712do { \ 1669do { \
1670 BUILD_BUG_ON(sizeof(x) > MAX_MEMHEX_BYTES); \
1713 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \ 1671 if (!trace_seq_putmem_hex(s, &(x), sizeof(x))) \
1714 return 0; \ 1672 return 0; \
1715} while (0) 1673} while (0)
1716 1674
1717static int print_hex_fmt(struct trace_iterator *iter) 1675static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
1718{ 1676{
1719 struct trace_seq *s = &iter->seq; 1677 struct trace_seq *s = &iter->seq;
1720 unsigned char newline = '\n'; 1678 unsigned char newline = '\n';
@@ -1723,97 +1681,139 @@ static int print_hex_fmt(struct trace_iterator *iter)
1723 1681
1724 entry = iter->ent; 1682 entry = iter->ent;
1725 1683
1684 if (entry->type == TRACE_CONT)
1685 return TRACE_TYPE_HANDLED;
1686
1726 SEQ_PUT_HEX_FIELD_RET(s, entry->pid); 1687 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
1727 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu); 1688 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
1728 SEQ_PUT_HEX_FIELD_RET(s, entry->t); 1689 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
1729 1690
1730 switch (entry->type) { 1691 switch (entry->type) {
1731 case TRACE_FN: 1692 case TRACE_FN: {
1732 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.ip); 1693 struct ftrace_entry *field;
1733 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1694
1695 trace_assign_type(field, entry);
1696
1697 SEQ_PUT_HEX_FIELD_RET(s, field->ip);
1698 SEQ_PUT_HEX_FIELD_RET(s, field->parent_ip);
1734 break; 1699 break;
1700 }
1735 case TRACE_CTX: 1701 case TRACE_CTX:
1736 case TRACE_WAKE: 1702 case TRACE_WAKE: {
1737 S = entry->ctx.prev_state < sizeof(state_to_char) ? 1703 struct ctx_switch_entry *field;
1738 state_to_char[entry->ctx.prev_state] : 'X'; 1704
1739 T = entry->ctx.next_state < sizeof(state_to_char) ? 1705 trace_assign_type(field, entry);
1740 state_to_char[entry->ctx.next_state] : 'X'; 1706
1707 S = field->prev_state < sizeof(state_to_char) ?
1708 state_to_char[field->prev_state] : 'X';
1709 T = field->next_state < sizeof(state_to_char) ?
1710 state_to_char[field->next_state] : 'X';
1741 if (entry->type == TRACE_WAKE) 1711 if (entry->type == TRACE_WAKE)
1742 S = '+'; 1712 S = '+';
1743 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_pid); 1713 SEQ_PUT_HEX_FIELD_RET(s, field->prev_pid);
1744 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.prev_prio); 1714 SEQ_PUT_HEX_FIELD_RET(s, field->prev_prio);
1745 SEQ_PUT_HEX_FIELD_RET(s, S); 1715 SEQ_PUT_HEX_FIELD_RET(s, S);
1746 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_pid); 1716 SEQ_PUT_HEX_FIELD_RET(s, field->next_cpu);
1747 SEQ_PUT_HEX_FIELD_RET(s, entry->ctx.next_prio); 1717 SEQ_PUT_HEX_FIELD_RET(s, field->next_pid);
1748 SEQ_PUT_HEX_FIELD_RET(s, entry->fn.parent_ip); 1718 SEQ_PUT_HEX_FIELD_RET(s, field->next_prio);
1749 SEQ_PUT_HEX_FIELD_RET(s, T); 1719 SEQ_PUT_HEX_FIELD_RET(s, T);
1750 break; 1720 break;
1721 }
1751 case TRACE_SPECIAL: 1722 case TRACE_SPECIAL:
1752 case TRACE_STACK: 1723 case TRACE_STACK: {
1753 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg1); 1724 struct special_entry *field;
1754 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg2); 1725
1755 SEQ_PUT_HEX_FIELD_RET(s, entry->special.arg3); 1726 trace_assign_type(field, entry);
1727
1728 SEQ_PUT_HEX_FIELD_RET(s, field->arg1);
1729 SEQ_PUT_HEX_FIELD_RET(s, field->arg2);
1730 SEQ_PUT_HEX_FIELD_RET(s, field->arg3);
1756 break; 1731 break;
1757 } 1732 }
1733 }
1758 SEQ_PUT_FIELD_RET(s, newline); 1734 SEQ_PUT_FIELD_RET(s, newline);
1759 1735
1760 return 1; 1736 return TRACE_TYPE_HANDLED;
1761} 1737}
1762 1738
1763static int print_bin_fmt(struct trace_iterator *iter) 1739static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
1764{ 1740{
1765 struct trace_seq *s = &iter->seq; 1741 struct trace_seq *s = &iter->seq;
1766 struct trace_entry *entry; 1742 struct trace_entry *entry;
1767 1743
1768 entry = iter->ent; 1744 entry = iter->ent;
1769 1745
1746 if (entry->type == TRACE_CONT)
1747 return TRACE_TYPE_HANDLED;
1748
1770 SEQ_PUT_FIELD_RET(s, entry->pid); 1749 SEQ_PUT_FIELD_RET(s, entry->pid);
1771 SEQ_PUT_FIELD_RET(s, entry->cpu); 1750 SEQ_PUT_FIELD_RET(s, iter->cpu);
1772 SEQ_PUT_FIELD_RET(s, entry->t); 1751 SEQ_PUT_FIELD_RET(s, iter->ts);
1773 1752
1774 switch (entry->type) { 1753 switch (entry->type) {
1775 case TRACE_FN: 1754 case TRACE_FN: {
1776 SEQ_PUT_FIELD_RET(s, entry->fn.ip); 1755 struct ftrace_entry *field;
1777 SEQ_PUT_FIELD_RET(s, entry->fn.parent_ip); 1756
1757 trace_assign_type(field, entry);
1758
1759 SEQ_PUT_FIELD_RET(s, field->ip);
1760 SEQ_PUT_FIELD_RET(s, field->parent_ip);
1778 break; 1761 break;
1779 case TRACE_CTX: 1762 }
1780 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_pid); 1763 case TRACE_CTX: {
1781 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_prio); 1764 struct ctx_switch_entry *field;
1782 SEQ_PUT_FIELD_RET(s, entry->ctx.prev_state); 1765
1783 SEQ_PUT_FIELD_RET(s, entry->ctx.next_pid); 1766 trace_assign_type(field, entry);
1784 SEQ_PUT_FIELD_RET(s, entry->ctx.next_prio); 1767
1785 SEQ_PUT_FIELD_RET(s, entry->ctx.next_state); 1768 SEQ_PUT_FIELD_RET(s, field->prev_pid);
1769 SEQ_PUT_FIELD_RET(s, field->prev_prio);
1770 SEQ_PUT_FIELD_RET(s, field->prev_state);
1771 SEQ_PUT_FIELD_RET(s, field->next_pid);
1772 SEQ_PUT_FIELD_RET(s, field->next_prio);
1773 SEQ_PUT_FIELD_RET(s, field->next_state);
1786 break; 1774 break;
1775 }
1787 case TRACE_SPECIAL: 1776 case TRACE_SPECIAL:
1788 case TRACE_STACK: 1777 case TRACE_STACK: {
1789 SEQ_PUT_FIELD_RET(s, entry->special.arg1); 1778 struct special_entry *field;
1790 SEQ_PUT_FIELD_RET(s, entry->special.arg2); 1779
1791 SEQ_PUT_FIELD_RET(s, entry->special.arg3); 1780 trace_assign_type(field, entry);
1781
1782 SEQ_PUT_FIELD_RET(s, field->arg1);
1783 SEQ_PUT_FIELD_RET(s, field->arg2);
1784 SEQ_PUT_FIELD_RET(s, field->arg3);
1792 break; 1785 break;
1793 } 1786 }
1787 }
1794 return 1; 1788 return 1;
1795} 1789}
1796 1790
1797static int trace_empty(struct trace_iterator *iter) 1791static int trace_empty(struct trace_iterator *iter)
1798{ 1792{
1799 struct trace_array_cpu *data;
1800 int cpu; 1793 int cpu;
1801 1794
1802 for_each_tracing_cpu(cpu) { 1795 for_each_tracing_cpu(cpu) {
1803 data = iter->tr->data[cpu]; 1796 if (iter->buffer_iter[cpu]) {
1804 1797 if (!ring_buffer_iter_empty(iter->buffer_iter[cpu]))
1805 if (head_page(data) && data->trace_idx && 1798 return 0;
1806 (data->trace_tail != data->trace_head || 1799 } else {
1807 data->trace_tail_idx != data->trace_head_idx)) 1800 if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
1808 return 0; 1801 return 0;
1802 }
1809 } 1803 }
1804
1810 return 1; 1805 return 1;
1811} 1806}
1812 1807
1813static int print_trace_line(struct trace_iterator *iter) 1808static enum print_line_t print_trace_line(struct trace_iterator *iter)
1814{ 1809{
1815 if (iter->trace && iter->trace->print_line) 1810 enum print_line_t ret;
1816 return iter->trace->print_line(iter); 1811
1812 if (iter->trace && iter->trace->print_line) {
1813 ret = iter->trace->print_line(iter);
1814 if (ret != TRACE_TYPE_UNHANDLED)
1815 return ret;
1816 }
1817 1817
1818 if (trace_flags & TRACE_ITER_BIN) 1818 if (trace_flags & TRACE_ITER_BIN)
1819 return print_bin_fmt(iter); 1819 return print_bin_fmt(iter);
@@ -1869,6 +1869,8 @@ static struct trace_iterator *
1869__tracing_open(struct inode *inode, struct file *file, int *ret) 1869__tracing_open(struct inode *inode, struct file *file, int *ret)
1870{ 1870{
1871 struct trace_iterator *iter; 1871 struct trace_iterator *iter;
1872 struct seq_file *m;
1873 int cpu;
1872 1874
1873 if (tracing_disabled) { 1875 if (tracing_disabled) {
1874 *ret = -ENODEV; 1876 *ret = -ENODEV;
@@ -1889,28 +1891,45 @@ __tracing_open(struct inode *inode, struct file *file, int *ret)
1889 iter->trace = current_trace; 1891 iter->trace = current_trace;
1890 iter->pos = -1; 1892 iter->pos = -1;
1891 1893
1894 for_each_tracing_cpu(cpu) {
1895
1896 iter->buffer_iter[cpu] =
1897 ring_buffer_read_start(iter->tr->buffer, cpu);
1898
1899 if (!iter->buffer_iter[cpu])
1900 goto fail_buffer;
1901 }
1902
1892 /* TODO stop tracer */ 1903 /* TODO stop tracer */
1893 *ret = seq_open(file, &tracer_seq_ops); 1904 *ret = seq_open(file, &tracer_seq_ops);
1894 if (!*ret) { 1905 if (*ret)
1895 struct seq_file *m = file->private_data; 1906 goto fail_buffer;
1896 m->private = iter;
1897 1907
1898 /* stop the trace while dumping */ 1908 m = file->private_data;
1899 if (iter->tr->ctrl) { 1909 m->private = iter;
1900 tracer_enabled = 0;
1901 ftrace_function_enabled = 0;
1902 }
1903 1910
1904 if (iter->trace && iter->trace->open) 1911 /* stop the trace while dumping */
1905 iter->trace->open(iter); 1912 if (iter->tr->ctrl) {
1906 } else { 1913 tracer_enabled = 0;
1907 kfree(iter); 1914 ftrace_function_enabled = 0;
1908 iter = NULL;
1909 } 1915 }
1916
1917 if (iter->trace && iter->trace->open)
1918 iter->trace->open(iter);
1919
1910 mutex_unlock(&trace_types_lock); 1920 mutex_unlock(&trace_types_lock);
1911 1921
1912 out: 1922 out:
1913 return iter; 1923 return iter;
1924
1925 fail_buffer:
1926 for_each_tracing_cpu(cpu) {
1927 if (iter->buffer_iter[cpu])
1928 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1929 }
1930 mutex_unlock(&trace_types_lock);
1931
1932 return ERR_PTR(-ENOMEM);
1914} 1933}
1915 1934
1916int tracing_open_generic(struct inode *inode, struct file *filp) 1935int tracing_open_generic(struct inode *inode, struct file *filp)
@@ -1926,8 +1945,14 @@ int tracing_release(struct inode *inode, struct file *file)
1926{ 1945{
1927 struct seq_file *m = (struct seq_file *)file->private_data; 1946 struct seq_file *m = (struct seq_file *)file->private_data;
1928 struct trace_iterator *iter = m->private; 1947 struct trace_iterator *iter = m->private;
1948 int cpu;
1929 1949
1930 mutex_lock(&trace_types_lock); 1950 mutex_lock(&trace_types_lock);
1951 for_each_tracing_cpu(cpu) {
1952 if (iter->buffer_iter[cpu])
1953 ring_buffer_read_finish(iter->buffer_iter[cpu]);
1954 }
1955
1931 if (iter->trace && iter->trace->close) 1956 if (iter->trace && iter->trace->close)
1932 iter->trace->close(iter); 1957 iter->trace->close(iter);
1933 1958
@@ -2352,9 +2377,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2352 struct tracer *t; 2377 struct tracer *t;
2353 char buf[max_tracer_type_len+1]; 2378 char buf[max_tracer_type_len+1];
2354 int i; 2379 int i;
2380 size_t ret;
2355 2381
2356 if (cnt > max_tracer_type_len) 2382 if (cnt > max_tracer_type_len)
2357 cnt = max_tracer_type_len; 2383 cnt = max_tracer_type_len;
2384 ret = cnt;
2358 2385
2359 if (copy_from_user(&buf, ubuf, cnt)) 2386 if (copy_from_user(&buf, ubuf, cnt))
2360 return -EFAULT; 2387 return -EFAULT;
@@ -2370,7 +2397,11 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2370 if (strcmp(t->name, buf) == 0) 2397 if (strcmp(t->name, buf) == 0)
2371 break; 2398 break;
2372 } 2399 }
2373 if (!t || t == current_trace) 2400 if (!t) {
2401 ret = -EINVAL;
2402 goto out;
2403 }
2404 if (t == current_trace)
2374 goto out; 2405 goto out;
2375 2406
2376 if (current_trace && current_trace->reset) 2407 if (current_trace && current_trace->reset)
@@ -2383,9 +2414,10 @@ tracing_set_trace_write(struct file *filp, const char __user *ubuf,
2383 out: 2414 out:
2384 mutex_unlock(&trace_types_lock); 2415 mutex_unlock(&trace_types_lock);
2385 2416
2386 filp->f_pos += cnt; 2417 if (ret == cnt)
2418 filp->f_pos += cnt;
2387 2419
2388 return cnt; 2420 return ret;
2389} 2421}
2390 2422
2391static ssize_t 2423static ssize_t
@@ -2500,20 +2532,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2500 size_t cnt, loff_t *ppos) 2532 size_t cnt, loff_t *ppos)
2501{ 2533{
2502 struct trace_iterator *iter = filp->private_data; 2534 struct trace_iterator *iter = filp->private_data;
2503 struct trace_array_cpu *data;
2504 static cpumask_t mask;
2505 unsigned long flags;
2506#ifdef CONFIG_FTRACE
2507 int ftrace_save;
2508#endif
2509 int cpu;
2510 ssize_t sret; 2535 ssize_t sret;
2511 2536
2512 /* return any leftover data */ 2537 /* return any leftover data */
2513 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2538 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2514 if (sret != -EBUSY) 2539 if (sret != -EBUSY)
2515 return sret; 2540 return sret;
2516 sret = 0;
2517 2541
2518 trace_seq_reset(&iter->seq); 2542 trace_seq_reset(&iter->seq);
2519 2543
@@ -2524,6 +2548,8 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2524 goto out; 2548 goto out;
2525 } 2549 }
2526 2550
2551waitagain:
2552 sret = 0;
2527 while (trace_empty(iter)) { 2553 while (trace_empty(iter)) {
2528 2554
2529 if ((filp->f_flags & O_NONBLOCK)) { 2555 if ((filp->f_flags & O_NONBLOCK)) {
@@ -2588,46 +2614,12 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2588 offsetof(struct trace_iterator, seq)); 2614 offsetof(struct trace_iterator, seq));
2589 iter->pos = -1; 2615 iter->pos = -1;
2590 2616
2591 /*
2592 * We need to stop all tracing on all CPUS to read the
2593 * the next buffer. This is a bit expensive, but is
2594 * not done often. We fill all what we can read,
2595 * and then release the locks again.
2596 */
2597
2598 cpus_clear(mask);
2599 local_irq_save(flags);
2600#ifdef CONFIG_FTRACE
2601 ftrace_save = ftrace_enabled;
2602 ftrace_enabled = 0;
2603#endif
2604 smp_wmb();
2605 for_each_tracing_cpu(cpu) {
2606 data = iter->tr->data[cpu];
2607
2608 if (!head_page(data) || !data->trace_idx)
2609 continue;
2610
2611 atomic_inc(&data->disabled);
2612 cpu_set(cpu, mask);
2613 }
2614
2615 for_each_cpu_mask(cpu, mask) {
2616 data = iter->tr->data[cpu];
2617 __raw_spin_lock(&data->lock);
2618
2619 if (data->overrun > iter->last_overrun[cpu])
2620 iter->overrun[cpu] +=
2621 data->overrun - iter->last_overrun[cpu];
2622 iter->last_overrun[cpu] = data->overrun;
2623 }
2624
2625 while (find_next_entry_inc(iter) != NULL) { 2617 while (find_next_entry_inc(iter) != NULL) {
2626 int ret; 2618 enum print_line_t ret;
2627 int len = iter->seq.len; 2619 int len = iter->seq.len;
2628 2620
2629 ret = print_trace_line(iter); 2621 ret = print_trace_line(iter);
2630 if (!ret) { 2622 if (ret == TRACE_TYPE_PARTIAL_LINE) {
2631 /* don't print partial lines */ 2623 /* don't print partial lines */
2632 iter->seq.len = len; 2624 iter->seq.len = len;
2633 break; 2625 break;
@@ -2639,26 +2631,17 @@ tracing_read_pipe(struct file *filp, char __user *ubuf,
2639 break; 2631 break;
2640 } 2632 }
2641 2633
2642 for_each_cpu_mask(cpu, mask) {
2643 data = iter->tr->data[cpu];
2644 __raw_spin_unlock(&data->lock);
2645 }
2646
2647 for_each_cpu_mask(cpu, mask) {
2648 data = iter->tr->data[cpu];
2649 atomic_dec(&data->disabled);
2650 }
2651#ifdef CONFIG_FTRACE
2652 ftrace_enabled = ftrace_save;
2653#endif
2654 local_irq_restore(flags);
2655
2656 /* Now copy what we have to the user */ 2634 /* Now copy what we have to the user */
2657 sret = trace_seq_to_user(&iter->seq, ubuf, cnt); 2635 sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
2658 if (iter->seq.readpos >= iter->seq.len) 2636 if (iter->seq.readpos >= iter->seq.len)
2659 trace_seq_reset(&iter->seq); 2637 trace_seq_reset(&iter->seq);
2638
2639 /*
2640 * If there was nothing to send to user, inspite of consuming trace
2641 * entries, go back to wait for more entries.
2642 */
2660 if (sret == -EBUSY) 2643 if (sret == -EBUSY)
2661 sret = 0; 2644 goto waitagain;
2662 2645
2663out: 2646out:
2664 mutex_unlock(&trace_types_lock); 2647 mutex_unlock(&trace_types_lock);
@@ -2684,7 +2667,8 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2684{ 2667{
2685 unsigned long val; 2668 unsigned long val;
2686 char buf[64]; 2669 char buf[64];
2687 int i, ret; 2670 int ret;
2671 struct trace_array *tr = filp->private_data;
2688 2672
2689 if (cnt >= sizeof(buf)) 2673 if (cnt >= sizeof(buf))
2690 return -EINVAL; 2674 return -EINVAL;
@@ -2704,59 +2688,38 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2704 2688
2705 mutex_lock(&trace_types_lock); 2689 mutex_lock(&trace_types_lock);
2706 2690
2707 if (current_trace != &no_tracer) { 2691 if (tr->ctrl) {
2708 cnt = -EBUSY; 2692 cnt = -EBUSY;
2709 pr_info("ftrace: set current_tracer to none" 2693 pr_info("ftrace: please disable tracing"
2710 " before modifying buffer size\n"); 2694 " before modifying buffer size\n");
2711 goto out; 2695 goto out;
2712 } 2696 }
2713 2697
2714 if (val > global_trace.entries) { 2698 if (val != global_trace.entries) {
2715 long pages_requested; 2699 ret = ring_buffer_resize(global_trace.buffer, val);
2716 unsigned long freeable_pages; 2700 if (ret < 0) {
2717 2701 cnt = ret;
2718 /* make sure we have enough memory before mapping */
2719 pages_requested =
2720 (val + (ENTRIES_PER_PAGE-1)) / ENTRIES_PER_PAGE;
2721
2722 /* account for each buffer (and max_tr) */
2723 pages_requested *= tracing_nr_buffers * 2;
2724
2725 /* Check for overflow */
2726 if (pages_requested < 0) {
2727 cnt = -ENOMEM;
2728 goto out;
2729 }
2730
2731 freeable_pages = determine_dirtyable_memory();
2732
2733 /* we only allow to request 1/4 of useable memory */
2734 if (pages_requested >
2735 ((freeable_pages + tracing_pages_allocated) / 4)) {
2736 cnt = -ENOMEM;
2737 goto out; 2702 goto out;
2738 } 2703 }
2739 2704
2740 while (global_trace.entries < val) { 2705 ret = ring_buffer_resize(max_tr.buffer, val);
2741 if (trace_alloc_page()) { 2706 if (ret < 0) {
2742 cnt = -ENOMEM; 2707 int r;
2743 goto out; 2708 cnt = ret;
2709 r = ring_buffer_resize(global_trace.buffer,
2710 global_trace.entries);
2711 if (r < 0) {
2712 /* AARGH! We are left with different
2713 * size max buffer!!!! */
2714 WARN_ON(1);
2715 tracing_disabled = 1;
2744 } 2716 }
2745 /* double check that we don't go over the known pages */ 2717 goto out;
2746 if (tracing_pages_allocated > pages_requested)
2747 break;
2748 } 2718 }
2749 2719
2750 } else { 2720 global_trace.entries = val;
2751 /* include the number of entries in val (inc of page entries) */
2752 while (global_trace.entries > val + (ENTRIES_PER_PAGE - 1))
2753 trace_free_page();
2754 } 2721 }
2755 2722
2756 /* check integrity */
2757 for_each_tracing_cpu(i)
2758 check_pages(global_trace.data[i]);
2759
2760 filp->f_pos += cnt; 2723 filp->f_pos += cnt;
2761 2724
2762 /* If check pages failed, return ENOMEM */ 2725 /* If check pages failed, return ENOMEM */
@@ -2769,6 +2732,52 @@ tracing_entries_write(struct file *filp, const char __user *ubuf,
2769 return cnt; 2732 return cnt;
2770} 2733}
2771 2734
2735static int mark_printk(const char *fmt, ...)
2736{
2737 int ret;
2738 va_list args;
2739 va_start(args, fmt);
2740 ret = trace_vprintk(0, fmt, args);
2741 va_end(args);
2742 return ret;
2743}
2744
2745static ssize_t
2746tracing_mark_write(struct file *filp, const char __user *ubuf,
2747 size_t cnt, loff_t *fpos)
2748{
2749 char *buf;
2750 char *end;
2751 struct trace_array *tr = &global_trace;
2752
2753 if (!tr->ctrl || tracing_disabled)
2754 return -EINVAL;
2755
2756 if (cnt > TRACE_BUF_SIZE)
2757 cnt = TRACE_BUF_SIZE;
2758
2759 buf = kmalloc(cnt + 1, GFP_KERNEL);
2760 if (buf == NULL)
2761 return -ENOMEM;
2762
2763 if (copy_from_user(buf, ubuf, cnt)) {
2764 kfree(buf);
2765 return -EFAULT;
2766 }
2767
2768 /* Cut from the first nil or newline. */
2769 buf[cnt] = '\0';
2770 end = strchr(buf, '\n');
2771 if (end)
2772 *end = '\0';
2773
2774 cnt = mark_printk("%s\n", buf);
2775 kfree(buf);
2776 *fpos += cnt;
2777
2778 return cnt;
2779}
2780
2772static struct file_operations tracing_max_lat_fops = { 2781static struct file_operations tracing_max_lat_fops = {
2773 .open = tracing_open_generic, 2782 .open = tracing_open_generic,
2774 .read = tracing_max_lat_read, 2783 .read = tracing_max_lat_read,
@@ -2800,6 +2809,11 @@ static struct file_operations tracing_entries_fops = {
2800 .write = tracing_entries_write, 2809 .write = tracing_entries_write,
2801}; 2810};
2802 2811
2812static struct file_operations tracing_mark_fops = {
2813 .open = tracing_open_generic,
2814 .write = tracing_mark_write,
2815};
2816
2803#ifdef CONFIG_DYNAMIC_FTRACE 2817#ifdef CONFIG_DYNAMIC_FTRACE
2804 2818
2805static ssize_t 2819static ssize_t
@@ -2846,7 +2860,7 @@ struct dentry *tracing_init_dentry(void)
2846#include "trace_selftest.c" 2860#include "trace_selftest.c"
2847#endif 2861#endif
2848 2862
2849static __init void tracer_init_debugfs(void) 2863static __init int tracer_init_debugfs(void)
2850{ 2864{
2851 struct dentry *d_tracer; 2865 struct dentry *d_tracer;
2852 struct dentry *entry; 2866 struct dentry *entry;
@@ -2881,12 +2895,12 @@ static __init void tracer_init_debugfs(void)
2881 entry = debugfs_create_file("available_tracers", 0444, d_tracer, 2895 entry = debugfs_create_file("available_tracers", 0444, d_tracer,
2882 &global_trace, &show_traces_fops); 2896 &global_trace, &show_traces_fops);
2883 if (!entry) 2897 if (!entry)
2884 pr_warning("Could not create debugfs 'trace' entry\n"); 2898 pr_warning("Could not create debugfs 'available_tracers' entry\n");
2885 2899
2886 entry = debugfs_create_file("current_tracer", 0444, d_tracer, 2900 entry = debugfs_create_file("current_tracer", 0444, d_tracer,
2887 &global_trace, &set_tracer_fops); 2901 &global_trace, &set_tracer_fops);
2888 if (!entry) 2902 if (!entry)
2889 pr_warning("Could not create debugfs 'trace' entry\n"); 2903 pr_warning("Could not create debugfs 'current_tracer' entry\n");
2890 2904
2891 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer, 2905 entry = debugfs_create_file("tracing_max_latency", 0644, d_tracer,
2892 &tracing_max_latency, 2906 &tracing_max_latency,
@@ -2899,7 +2913,7 @@ static __init void tracer_init_debugfs(void)
2899 &tracing_thresh, &tracing_max_lat_fops); 2913 &tracing_thresh, &tracing_max_lat_fops);
2900 if (!entry) 2914 if (!entry)
2901 pr_warning("Could not create debugfs " 2915 pr_warning("Could not create debugfs "
2902 "'tracing_threash' entry\n"); 2916 "'tracing_thresh' entry\n");
2903 entry = debugfs_create_file("README", 0644, d_tracer, 2917 entry = debugfs_create_file("README", 0644, d_tracer,
2904 NULL, &tracing_readme_fops); 2918 NULL, &tracing_readme_fops);
2905 if (!entry) 2919 if (!entry)
@@ -2909,13 +2923,19 @@ static __init void tracer_init_debugfs(void)
2909 NULL, &tracing_pipe_fops); 2923 NULL, &tracing_pipe_fops);
2910 if (!entry) 2924 if (!entry)
2911 pr_warning("Could not create debugfs " 2925 pr_warning("Could not create debugfs "
2912 "'tracing_threash' entry\n"); 2926 "'trace_pipe' entry\n");
2913 2927
2914 entry = debugfs_create_file("trace_entries", 0644, d_tracer, 2928 entry = debugfs_create_file("trace_entries", 0644, d_tracer,
2915 &global_trace, &tracing_entries_fops); 2929 &global_trace, &tracing_entries_fops);
2916 if (!entry) 2930 if (!entry)
2917 pr_warning("Could not create debugfs " 2931 pr_warning("Could not create debugfs "
2918 "'tracing_threash' entry\n"); 2932 "'trace_entries' entry\n");
2933
2934 entry = debugfs_create_file("trace_marker", 0220, d_tracer,
2935 NULL, &tracing_mark_fops);
2936 if (!entry)
2937 pr_warning("Could not create debugfs "
2938 "'trace_marker' entry\n");
2919 2939
2920#ifdef CONFIG_DYNAMIC_FTRACE 2940#ifdef CONFIG_DYNAMIC_FTRACE
2921 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer, 2941 entry = debugfs_create_file("dyn_ftrace_total_info", 0444, d_tracer,
@@ -2928,230 +2948,263 @@ static __init void tracer_init_debugfs(void)
2928#ifdef CONFIG_SYSPROF_TRACER 2948#ifdef CONFIG_SYSPROF_TRACER
2929 init_tracer_sysprof_debugfs(d_tracer); 2949 init_tracer_sysprof_debugfs(d_tracer);
2930#endif 2950#endif
2951 return 0;
2931} 2952}
2932 2953
2933static int trace_alloc_page(void) 2954int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
2934{ 2955{
2956 static DEFINE_SPINLOCK(trace_buf_lock);
2957 static char trace_buf[TRACE_BUF_SIZE];
2958
2959 struct ring_buffer_event *event;
2960 struct trace_array *tr = &global_trace;
2935 struct trace_array_cpu *data; 2961 struct trace_array_cpu *data;
2936 struct page *page, *tmp; 2962 struct print_entry *entry;
2937 LIST_HEAD(pages); 2963 unsigned long flags, irq_flags;
2938 void *array; 2964 int cpu, len = 0, size, pc;
2939 unsigned pages_allocated = 0;
2940 int i;
2941 2965
2942 /* first allocate a page for each CPU */ 2966 if (!tr->ctrl || tracing_disabled)
2943 for_each_tracing_cpu(i) { 2967 return 0;
2944 array = (void *)__get_free_page(GFP_KERNEL);
2945 if (array == NULL) {
2946 printk(KERN_ERR "tracer: failed to allocate page"
2947 "for trace buffer!\n");
2948 goto free_pages;
2949 }
2950 2968
2951 pages_allocated++; 2969 pc = preempt_count();
2952 page = virt_to_page(array); 2970 preempt_disable_notrace();
2953 list_add(&page->lru, &pages); 2971 cpu = raw_smp_processor_id();
2972 data = tr->data[cpu];
2954 2973
2955/* Only allocate if we are actually using the max trace */ 2974 if (unlikely(atomic_read(&data->disabled)))
2956#ifdef CONFIG_TRACER_MAX_TRACE 2975 goto out;
2957 array = (void *)__get_free_page(GFP_KERNEL);
2958 if (array == NULL) {
2959 printk(KERN_ERR "tracer: failed to allocate page"
2960 "for trace buffer!\n");
2961 goto free_pages;
2962 }
2963 pages_allocated++;
2964 page = virt_to_page(array);
2965 list_add(&page->lru, &pages);
2966#endif
2967 }
2968 2976
2969 /* Now that we successfully allocate a page per CPU, add them */ 2977 spin_lock_irqsave(&trace_buf_lock, flags);
2970 for_each_tracing_cpu(i) { 2978 len = vsnprintf(trace_buf, TRACE_BUF_SIZE, fmt, args);
2971 data = global_trace.data[i];
2972 page = list_entry(pages.next, struct page, lru);
2973 list_del_init(&page->lru);
2974 list_add_tail(&page->lru, &data->trace_pages);
2975 ClearPageLRU(page);
2976 2979
2977#ifdef CONFIG_TRACER_MAX_TRACE 2980 len = min(len, TRACE_BUF_SIZE-1);
2978 data = max_tr.data[i]; 2981 trace_buf[len] = 0;
2979 page = list_entry(pages.next, struct page, lru);
2980 list_del_init(&page->lru);
2981 list_add_tail(&page->lru, &data->trace_pages);
2982 SetPageLRU(page);
2983#endif
2984 }
2985 tracing_pages_allocated += pages_allocated;
2986 global_trace.entries += ENTRIES_PER_PAGE;
2987 2982
2988 return 0; 2983 size = sizeof(*entry) + len + 1;
2984 event = ring_buffer_lock_reserve(tr->buffer, size, &irq_flags);
2985 if (!event)
2986 goto out_unlock;
2987 entry = ring_buffer_event_data(event);
2988 tracing_generic_entry_update(&entry->ent, flags, pc);
2989 entry->ent.type = TRACE_PRINT;
2990 entry->ip = ip;
2989 2991
2990 free_pages: 2992 memcpy(&entry->buf, trace_buf, len);
2991 list_for_each_entry_safe(page, tmp, &pages, lru) { 2993 entry->buf[len] = 0;
2992 list_del_init(&page->lru); 2994 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
2993 __free_page(page); 2995
2994 } 2996 out_unlock:
2995 return -ENOMEM; 2997 spin_unlock_irqrestore(&trace_buf_lock, flags);
2998
2999 out:
3000 preempt_enable_notrace();
3001
3002 return len;
2996} 3003}
3004EXPORT_SYMBOL_GPL(trace_vprintk);
2997 3005
2998static int trace_free_page(void) 3006int __ftrace_printk(unsigned long ip, const char *fmt, ...)
2999{ 3007{
3000 struct trace_array_cpu *data; 3008 int ret;
3001 struct page *page; 3009 va_list ap;
3002 struct list_head *p;
3003 int i;
3004 int ret = 0;
3005 3010
3006 /* free one page from each buffer */ 3011 if (!(trace_flags & TRACE_ITER_PRINTK))
3007 for_each_tracing_cpu(i) { 3012 return 0;
3008 data = global_trace.data[i];
3009 p = data->trace_pages.next;
3010 if (p == &data->trace_pages) {
3011 /* should never happen */
3012 WARN_ON(1);
3013 tracing_disabled = 1;
3014 ret = -1;
3015 break;
3016 }
3017 page = list_entry(p, struct page, lru);
3018 ClearPageLRU(page);
3019 list_del(&page->lru);
3020 tracing_pages_allocated--;
3021 tracing_pages_allocated--;
3022 __free_page(page);
3023 3013
3024 tracing_reset(data); 3014 va_start(ap, fmt);
3015 ret = trace_vprintk(ip, fmt, ap);
3016 va_end(ap);
3017 return ret;
3018}
3019EXPORT_SYMBOL_GPL(__ftrace_printk);
3025 3020
3026#ifdef CONFIG_TRACER_MAX_TRACE 3021static int trace_panic_handler(struct notifier_block *this,
3027 data = max_tr.data[i]; 3022 unsigned long event, void *unused)
3028 p = data->trace_pages.next; 3023{
3029 if (p == &data->trace_pages) { 3024 ftrace_dump();
3030 /* should never happen */ 3025 return NOTIFY_OK;
3031 WARN_ON(1); 3026}
3032 tracing_disabled = 1;
3033 ret = -1;
3034 break;
3035 }
3036 page = list_entry(p, struct page, lru);
3037 ClearPageLRU(page);
3038 list_del(&page->lru);
3039 __free_page(page);
3040 3027
3041 tracing_reset(data); 3028static struct notifier_block trace_panic_notifier = {
3042#endif 3029 .notifier_call = trace_panic_handler,
3043 } 3030 .next = NULL,
3044 global_trace.entries -= ENTRIES_PER_PAGE; 3031 .priority = 150 /* priority: INT_MAX >= x >= 0 */
3032};
3045 3033
3046 return ret; 3034static int trace_die_handler(struct notifier_block *self,
3035 unsigned long val,
3036 void *data)
3037{
3038 switch (val) {
3039 case DIE_OOPS:
3040 ftrace_dump();
3041 break;
3042 default:
3043 break;
3044 }
3045 return NOTIFY_OK;
3047} 3046}
3048 3047
3049__init static int tracer_alloc_buffers(void) 3048static struct notifier_block trace_die_notifier = {
3049 .notifier_call = trace_die_handler,
3050 .priority = 200
3051};
3052
3053/*
3054 * printk is set to max of 1024, we really don't need it that big.
3055 * Nothing should be printing 1000 characters anyway.
3056 */
3057#define TRACE_MAX_PRINT 1000
3058
3059/*
3060 * Define here KERN_TRACE so that we have one place to modify
3061 * it if we decide to change what log level the ftrace dump
3062 * should be at.
3063 */
3064#define KERN_TRACE KERN_INFO
3065
3066static void
3067trace_printk_seq(struct trace_seq *s)
3050{ 3068{
3051 struct trace_array_cpu *data; 3069 /* Probably should print a warning here. */
3052 void *array; 3070 if (s->len >= 1000)
3053 struct page *page; 3071 s->len = 1000;
3054 int pages = 0;
3055 int ret = -ENOMEM;
3056 int i;
3057 3072
3058 /* TODO: make the number of buffers hot pluggable with CPUS */ 3073 /* should be zero ended, but we are paranoid. */
3059 tracing_nr_buffers = num_possible_cpus(); 3074 s->buffer[s->len] = 0;
3060 tracing_buffer_mask = cpu_possible_map;
3061 3075
3062 /* Allocate the first page for all buffers */ 3076 printk(KERN_TRACE "%s", s->buffer);
3063 for_each_tracing_cpu(i) {
3064 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3065 max_tr.data[i] = &per_cpu(max_data, i);
3066 3077
3067 array = (void *)__get_free_page(GFP_KERNEL); 3078 trace_seq_reset(s);
3068 if (array == NULL) { 3079}
3069 printk(KERN_ERR "tracer: failed to allocate page" 3080
3070 "for trace buffer!\n"); 3081
3071 goto free_buffers; 3082void ftrace_dump(void)
3072 } 3083{
3084 static DEFINE_SPINLOCK(ftrace_dump_lock);
3085 /* use static because iter can be a bit big for the stack */
3086 static struct trace_iterator iter;
3087 static cpumask_t mask;
3088 static int dump_ran;
3089 unsigned long flags;
3090 int cnt = 0, cpu;
3073 3091
3074 /* set the array to the list */ 3092 /* only one dump */
3075 INIT_LIST_HEAD(&data->trace_pages); 3093 spin_lock_irqsave(&ftrace_dump_lock, flags);
3076 page = virt_to_page(array); 3094 if (dump_ran)
3077 list_add(&page->lru, &data->trace_pages); 3095 goto out;
3078 /* use the LRU flag to differentiate the two buffers */
3079 ClearPageLRU(page);
3080 3096
3081 data->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED; 3097 dump_ran = 1;
3082 max_tr.data[i]->lock = (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
3083 3098
3084/* Only allocate if we are actually using the max trace */ 3099 /* No turning back! */
3085#ifdef CONFIG_TRACER_MAX_TRACE 3100 ftrace_kill_atomic();
3086 array = (void *)__get_free_page(GFP_KERNEL);
3087 if (array == NULL) {
3088 printk(KERN_ERR "tracer: failed to allocate page"
3089 "for trace buffer!\n");
3090 goto free_buffers;
3091 }
3092 3101
3093 INIT_LIST_HEAD(&max_tr.data[i]->trace_pages); 3102 for_each_tracing_cpu(cpu) {
3094 page = virt_to_page(array); 3103 atomic_inc(&global_trace.data[cpu]->disabled);
3095 list_add(&page->lru, &max_tr.data[i]->trace_pages);
3096 SetPageLRU(page);
3097#endif
3098 } 3104 }
3099 3105
3106 printk(KERN_TRACE "Dumping ftrace buffer:\n");
3107
3108 iter.tr = &global_trace;
3109 iter.trace = current_trace;
3110
3100 /* 3111 /*
3101 * Since we allocate by orders of pages, we may be able to 3112 * We need to stop all tracing on all CPUS to read the
3102 * round up a bit. 3113 * the next buffer. This is a bit expensive, but is
3114 * not done often. We fill all what we can read,
3115 * and then release the locks again.
3103 */ 3116 */
3104 global_trace.entries = ENTRIES_PER_PAGE;
3105 pages++;
3106 3117
3107 while (global_trace.entries < trace_nr_entries) { 3118 cpus_clear(mask);
3108 if (trace_alloc_page()) 3119
3109 break; 3120 while (!trace_empty(&iter)) {
3110 pages++; 3121
3122 if (!cnt)
3123 printk(KERN_TRACE "---------------------------------\n");
3124
3125 cnt++;
3126
3127 /* reset all but tr, trace, and overruns */
3128 memset(&iter.seq, 0,
3129 sizeof(struct trace_iterator) -
3130 offsetof(struct trace_iterator, seq));
3131 iter.iter_flags |= TRACE_FILE_LAT_FMT;
3132 iter.pos = -1;
3133
3134 if (find_next_entry_inc(&iter) != NULL) {
3135 print_trace_line(&iter);
3136 trace_consume(&iter);
3137 }
3138
3139 trace_printk_seq(&iter.seq);
3111 } 3140 }
3112 max_tr.entries = global_trace.entries;
3113 3141
3114 pr_info("tracer: %d pages allocated for %ld entries of %ld bytes\n", 3142 if (!cnt)
3115 pages, trace_nr_entries, (long)TRACE_ENTRY_SIZE); 3143 printk(KERN_TRACE " (ftrace buffer empty)\n");
3116 pr_info(" actual entries %ld\n", global_trace.entries); 3144 else
3145 printk(KERN_TRACE "---------------------------------\n");
3146
3147 out:
3148 spin_unlock_irqrestore(&ftrace_dump_lock, flags);
3149}
3150
3151__init static int tracer_alloc_buffers(void)
3152{
3153 struct trace_array_cpu *data;
3154 int i;
3155
3156 /* TODO: make the number of buffers hot pluggable with CPUS */
3157 tracing_buffer_mask = cpu_possible_map;
3158
3159 global_trace.buffer = ring_buffer_alloc(trace_buf_size,
3160 TRACE_BUFFER_FLAGS);
3161 if (!global_trace.buffer) {
3162 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
3163 WARN_ON(1);
3164 return 0;
3165 }
3166 global_trace.entries = ring_buffer_size(global_trace.buffer);
3117 3167
3118 tracer_init_debugfs(); 3168#ifdef CONFIG_TRACER_MAX_TRACE
3169 max_tr.buffer = ring_buffer_alloc(trace_buf_size,
3170 TRACE_BUFFER_FLAGS);
3171 if (!max_tr.buffer) {
3172 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
3173 WARN_ON(1);
3174 ring_buffer_free(global_trace.buffer);
3175 return 0;
3176 }
3177 max_tr.entries = ring_buffer_size(max_tr.buffer);
3178 WARN_ON(max_tr.entries != global_trace.entries);
3179#endif
3180
3181 /* Allocate the first page for all buffers */
3182 for_each_tracing_cpu(i) {
3183 data = global_trace.data[i] = &per_cpu(global_trace_cpu, i);
3184 max_tr.data[i] = &per_cpu(max_data, i);
3185 }
3119 3186
3120 trace_init_cmdlines(); 3187 trace_init_cmdlines();
3121 3188
3122 register_tracer(&no_tracer); 3189 register_tracer(&nop_trace);
3123 current_trace = &no_tracer; 3190#ifdef CONFIG_BOOT_TRACER
3191 register_tracer(&boot_tracer);
3192 current_trace = &boot_tracer;
3193 current_trace->init(&global_trace);
3194#else
3195 current_trace = &nop_trace;
3196#endif
3124 3197
3125 /* All seems OK, enable tracing */ 3198 /* All seems OK, enable tracing */
3126 global_trace.ctrl = tracer_enabled; 3199 global_trace.ctrl = tracer_enabled;
3127 tracing_disabled = 0; 3200 tracing_disabled = 0;
3128 3201
3129 return 0; 3202 atomic_notifier_chain_register(&panic_notifier_list,
3203 &trace_panic_notifier);
3130 3204
3131 free_buffers: 3205 register_die_notifier(&trace_die_notifier);
3132 for (i-- ; i >= 0; i--) {
3133 struct page *page, *tmp;
3134 struct trace_array_cpu *data = global_trace.data[i];
3135 3206
3136 if (data) { 3207 return 0;
3137 list_for_each_entry_safe(page, tmp,
3138 &data->trace_pages, lru) {
3139 list_del_init(&page->lru);
3140 __free_page(page);
3141 }
3142 }
3143
3144#ifdef CONFIG_TRACER_MAX_TRACE
3145 data = max_tr.data[i];
3146 if (data) {
3147 list_for_each_entry_safe(page, tmp,
3148 &data->trace_pages, lru) {
3149 list_del_init(&page->lru);
3150 __free_page(page);
3151 }
3152 }
3153#endif
3154 }
3155 return ret;
3156} 3208}
3157fs_initcall(tracer_alloc_buffers); 3209early_initcall(tracer_alloc_buffers);
3210fs_initcall(tracer_init_debugfs);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index f69f86788c2b..f1f99572cde7 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -5,7 +5,9 @@
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/clocksource.h> 7#include <linux/clocksource.h>
8#include <linux/ring_buffer.h>
8#include <linux/mmiotrace.h> 9#include <linux/mmiotrace.h>
10#include <linux/ftrace.h>
9 11
10enum trace_type { 12enum trace_type {
11 __TRACE_FIRST_TYPE = 0, 13 __TRACE_FIRST_TYPE = 0,
@@ -13,38 +15,60 @@ enum trace_type {
13 TRACE_FN, 15 TRACE_FN,
14 TRACE_CTX, 16 TRACE_CTX,
15 TRACE_WAKE, 17 TRACE_WAKE,
18 TRACE_CONT,
16 TRACE_STACK, 19 TRACE_STACK,
20 TRACE_PRINT,
17 TRACE_SPECIAL, 21 TRACE_SPECIAL,
18 TRACE_MMIO_RW, 22 TRACE_MMIO_RW,
19 TRACE_MMIO_MAP, 23 TRACE_MMIO_MAP,
24 TRACE_BOOT,
20 25
21 __TRACE_LAST_TYPE 26 __TRACE_LAST_TYPE
22}; 27};
23 28
24/* 29/*
30 * The trace entry - the most basic unit of tracing. This is what
31 * is printed in the end as a single line in the trace output, such as:
32 *
33 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
34 */
35struct trace_entry {
36 unsigned char type;
37 unsigned char cpu;
38 unsigned char flags;
39 unsigned char preempt_count;
40 int pid;
41};
42
43/*
25 * Function trace entry - function address and parent function addres: 44 * Function trace entry - function address and parent function addres:
26 */ 45 */
27struct ftrace_entry { 46struct ftrace_entry {
47 struct trace_entry ent;
28 unsigned long ip; 48 unsigned long ip;
29 unsigned long parent_ip; 49 unsigned long parent_ip;
30}; 50};
51extern struct tracer boot_tracer;
31 52
32/* 53/*
33 * Context switch trace entry - which task (and prio) we switched from/to: 54 * Context switch trace entry - which task (and prio) we switched from/to:
34 */ 55 */
35struct ctx_switch_entry { 56struct ctx_switch_entry {
57 struct trace_entry ent;
36 unsigned int prev_pid; 58 unsigned int prev_pid;
37 unsigned char prev_prio; 59 unsigned char prev_prio;
38 unsigned char prev_state; 60 unsigned char prev_state;
39 unsigned int next_pid; 61 unsigned int next_pid;
40 unsigned char next_prio; 62 unsigned char next_prio;
41 unsigned char next_state; 63 unsigned char next_state;
64 unsigned int next_cpu;
42}; 65};
43 66
44/* 67/*
45 * Special (free-form) trace entry: 68 * Special (free-form) trace entry:
46 */ 69 */
47struct special_entry { 70struct special_entry {
71 struct trace_entry ent;
48 unsigned long arg1; 72 unsigned long arg1;
49 unsigned long arg2; 73 unsigned long arg2;
50 unsigned long arg3; 74 unsigned long arg3;
@@ -57,33 +81,60 @@ struct special_entry {
57#define FTRACE_STACK_ENTRIES 8 81#define FTRACE_STACK_ENTRIES 8
58 82
59struct stack_entry { 83struct stack_entry {
84 struct trace_entry ent;
60 unsigned long caller[FTRACE_STACK_ENTRIES]; 85 unsigned long caller[FTRACE_STACK_ENTRIES];
61}; 86};
62 87
63/* 88/*
64 * The trace entry - the most basic unit of tracing. This is what 89 * ftrace_printk entry:
65 * is printed in the end as a single line in the trace output, such as:
66 *
67 * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
68 */ 90 */
69struct trace_entry { 91struct print_entry {
70 char type; 92 struct trace_entry ent;
71 char cpu; 93 unsigned long ip;
72 char flags; 94 char buf[];
73 char preempt_count; 95};
74 int pid; 96
75 cycle_t t; 97#define TRACE_OLD_SIZE 88
76 union { 98
77 struct ftrace_entry fn; 99struct trace_field_cont {
78 struct ctx_switch_entry ctx; 100 unsigned char type;
79 struct special_entry special; 101 /* Temporary till we get rid of this completely */
80 struct stack_entry stack; 102 char buf[TRACE_OLD_SIZE - 1];
81 struct mmiotrace_rw mmiorw; 103};
82 struct mmiotrace_map mmiomap; 104
83 }; 105struct trace_mmiotrace_rw {
106 struct trace_entry ent;
107 struct mmiotrace_rw rw;
84}; 108};
85 109
86#define TRACE_ENTRY_SIZE sizeof(struct trace_entry) 110struct trace_mmiotrace_map {
111 struct trace_entry ent;
112 struct mmiotrace_map map;
113};
114
115struct trace_boot {
116 struct trace_entry ent;
117 struct boot_trace initcall;
118};
119
120/*
121 * trace_flag_type is an enumeration that holds different
122 * states when a trace occurs. These are:
123 * IRQS_OFF - interrupts were disabled
124 * NEED_RESCED - reschedule is requested
125 * HARDIRQ - inside an interrupt handler
126 * SOFTIRQ - inside a softirq handler
127 * CONT - multiple entries hold the trace item
128 */
129enum trace_flag_type {
130 TRACE_FLAG_IRQS_OFF = 0x01,
131 TRACE_FLAG_NEED_RESCHED = 0x02,
132 TRACE_FLAG_HARDIRQ = 0x04,
133 TRACE_FLAG_SOFTIRQ = 0x08,
134 TRACE_FLAG_CONT = 0x10,
135};
136
137#define TRACE_BUF_SIZE 1024
87 138
88/* 139/*
89 * The CPU trace array - it consists of thousands of trace entries 140 * The CPU trace array - it consists of thousands of trace entries
@@ -91,16 +142,9 @@ struct trace_entry {
91 * the trace, etc.) 142 * the trace, etc.)
92 */ 143 */
93struct trace_array_cpu { 144struct trace_array_cpu {
94 struct list_head trace_pages;
95 atomic_t disabled; 145 atomic_t disabled;
96 raw_spinlock_t lock;
97 struct lock_class_key lock_key;
98 146
99 /* these fields get copied into max-trace: */ 147 /* these fields get copied into max-trace: */
100 unsigned trace_head_idx;
101 unsigned trace_tail_idx;
102 void *trace_head; /* producer */
103 void *trace_tail; /* consumer */
104 unsigned long trace_idx; 148 unsigned long trace_idx;
105 unsigned long overrun; 149 unsigned long overrun;
106 unsigned long saved_latency; 150 unsigned long saved_latency;
@@ -124,6 +168,7 @@ struct trace_iterator;
124 * They have on/off state as well: 168 * They have on/off state as well:
125 */ 169 */
126struct trace_array { 170struct trace_array {
171 struct ring_buffer *buffer;
127 unsigned long entries; 172 unsigned long entries;
128 long ctrl; 173 long ctrl;
129 int cpu; 174 int cpu;
@@ -132,6 +177,56 @@ struct trace_array {
132 struct trace_array_cpu *data[NR_CPUS]; 177 struct trace_array_cpu *data[NR_CPUS];
133}; 178};
134 179
180#define FTRACE_CMP_TYPE(var, type) \
181 __builtin_types_compatible_p(typeof(var), type *)
182
183#undef IF_ASSIGN
184#define IF_ASSIGN(var, entry, etype, id) \
185 if (FTRACE_CMP_TYPE(var, etype)) { \
186 var = (typeof(var))(entry); \
187 WARN_ON(id && (entry)->type != id); \
188 break; \
189 }
190
191/* Will cause compile errors if type is not found. */
192extern void __ftrace_bad_type(void);
193
194/*
195 * The trace_assign_type is a verifier that the entry type is
196 * the same as the type being assigned. To add new types simply
197 * add a line with the following format:
198 *
199 * IF_ASSIGN(var, ent, type, id);
200 *
201 * Where "type" is the trace type that includes the trace_entry
202 * as the "ent" item. And "id" is the trace identifier that is
203 * used in the trace_type enum.
204 *
205 * If the type can have more than one id, then use zero.
206 */
207#define trace_assign_type(var, ent) \
208 do { \
209 IF_ASSIGN(var, ent, struct ftrace_entry, TRACE_FN); \
210 IF_ASSIGN(var, ent, struct ctx_switch_entry, 0); \
211 IF_ASSIGN(var, ent, struct trace_field_cont, TRACE_CONT); \
212 IF_ASSIGN(var, ent, struct stack_entry, TRACE_STACK); \
213 IF_ASSIGN(var, ent, struct print_entry, TRACE_PRINT); \
214 IF_ASSIGN(var, ent, struct special_entry, 0); \
215 IF_ASSIGN(var, ent, struct trace_mmiotrace_rw, \
216 TRACE_MMIO_RW); \
217 IF_ASSIGN(var, ent, struct trace_mmiotrace_map, \
218 TRACE_MMIO_MAP); \
219 IF_ASSIGN(var, ent, struct trace_boot, TRACE_BOOT); \
220 __ftrace_bad_type(); \
221 } while (0)
222
223/* Return values for print_line callback */
224enum print_line_t {
225 TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
226 TRACE_TYPE_HANDLED = 1,
227 TRACE_TYPE_UNHANDLED = 2 /* Relay to other output functions */
228};
229
135/* 230/*
136 * A specific tracer, represented by methods that operate on a trace array: 231 * A specific tracer, represented by methods that operate on a trace array:
137 */ 232 */
@@ -152,7 +247,7 @@ struct tracer {
152 int (*selftest)(struct tracer *trace, 247 int (*selftest)(struct tracer *trace,
153 struct trace_array *tr); 248 struct trace_array *tr);
154#endif 249#endif
155 int (*print_line)(struct trace_iterator *iter); 250 enum print_line_t (*print_line)(struct trace_iterator *iter);
156 struct tracer *next; 251 struct tracer *next;
157 int print_max; 252 int print_max;
158}; 253};
@@ -171,57 +266,58 @@ struct trace_iterator {
171 struct trace_array *tr; 266 struct trace_array *tr;
172 struct tracer *trace; 267 struct tracer *trace;
173 void *private; 268 void *private;
174 long last_overrun[NR_CPUS]; 269 struct ring_buffer_iter *buffer_iter[NR_CPUS];
175 long overrun[NR_CPUS];
176 270
177 /* The below is zeroed out in pipe_read */ 271 /* The below is zeroed out in pipe_read */
178 struct trace_seq seq; 272 struct trace_seq seq;
179 struct trace_entry *ent; 273 struct trace_entry *ent;
180 int cpu; 274 int cpu;
181 275 u64 ts;
182 struct trace_entry *prev_ent;
183 int prev_cpu;
184 276
185 unsigned long iter_flags; 277 unsigned long iter_flags;
186 loff_t pos; 278 loff_t pos;
187 unsigned long next_idx[NR_CPUS];
188 struct list_head *next_page[NR_CPUS];
189 unsigned next_page_idx[NR_CPUS];
190 long idx; 279 long idx;
191}; 280};
192 281
193void tracing_reset(struct trace_array_cpu *data); 282void trace_wake_up(void);
283void tracing_reset(struct trace_array *tr, int cpu);
194int tracing_open_generic(struct inode *inode, struct file *filp); 284int tracing_open_generic(struct inode *inode, struct file *filp);
195struct dentry *tracing_init_dentry(void); 285struct dentry *tracing_init_dentry(void);
196void init_tracer_sysprof_debugfs(struct dentry *d_tracer); 286void init_tracer_sysprof_debugfs(struct dentry *d_tracer);
197 287
288struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
289 struct trace_array_cpu *data);
290void tracing_generic_entry_update(struct trace_entry *entry,
291 unsigned long flags,
292 int pc);
293
198void ftrace(struct trace_array *tr, 294void ftrace(struct trace_array *tr,
199 struct trace_array_cpu *data, 295 struct trace_array_cpu *data,
200 unsigned long ip, 296 unsigned long ip,
201 unsigned long parent_ip, 297 unsigned long parent_ip,
202 unsigned long flags); 298 unsigned long flags, int pc);
203void tracing_sched_switch_trace(struct trace_array *tr, 299void tracing_sched_switch_trace(struct trace_array *tr,
204 struct trace_array_cpu *data, 300 struct trace_array_cpu *data,
205 struct task_struct *prev, 301 struct task_struct *prev,
206 struct task_struct *next, 302 struct task_struct *next,
207 unsigned long flags); 303 unsigned long flags, int pc);
208void tracing_record_cmdline(struct task_struct *tsk); 304void tracing_record_cmdline(struct task_struct *tsk);
209 305
210void tracing_sched_wakeup_trace(struct trace_array *tr, 306void tracing_sched_wakeup_trace(struct trace_array *tr,
211 struct trace_array_cpu *data, 307 struct trace_array_cpu *data,
212 struct task_struct *wakee, 308 struct task_struct *wakee,
213 struct task_struct *cur, 309 struct task_struct *cur,
214 unsigned long flags); 310 unsigned long flags, int pc);
215void trace_special(struct trace_array *tr, 311void trace_special(struct trace_array *tr,
216 struct trace_array_cpu *data, 312 struct trace_array_cpu *data,
217 unsigned long arg1, 313 unsigned long arg1,
218 unsigned long arg2, 314 unsigned long arg2,
219 unsigned long arg3); 315 unsigned long arg3, int pc);
220void trace_function(struct trace_array *tr, 316void trace_function(struct trace_array *tr,
221 struct trace_array_cpu *data, 317 struct trace_array_cpu *data,
222 unsigned long ip, 318 unsigned long ip,
223 unsigned long parent_ip, 319 unsigned long parent_ip,
224 unsigned long flags); 320 unsigned long flags, int pc);
225 321
226void tracing_start_cmdline_record(void); 322void tracing_start_cmdline_record(void);
227void tracing_stop_cmdline_record(void); 323void tracing_stop_cmdline_record(void);
@@ -268,51 +364,33 @@ extern unsigned long ftrace_update_tot_cnt;
268extern int DYN_FTRACE_TEST_NAME(void); 364extern int DYN_FTRACE_TEST_NAME(void);
269#endif 365#endif
270 366
271#ifdef CONFIG_MMIOTRACE
272extern void __trace_mmiotrace_rw(struct trace_array *tr,
273 struct trace_array_cpu *data,
274 struct mmiotrace_rw *rw);
275extern void __trace_mmiotrace_map(struct trace_array *tr,
276 struct trace_array_cpu *data,
277 struct mmiotrace_map *map);
278#endif
279
280#ifdef CONFIG_FTRACE_STARTUP_TEST 367#ifdef CONFIG_FTRACE_STARTUP_TEST
281#ifdef CONFIG_FTRACE
282extern int trace_selftest_startup_function(struct tracer *trace, 368extern int trace_selftest_startup_function(struct tracer *trace,
283 struct trace_array *tr); 369 struct trace_array *tr);
284#endif
285#ifdef CONFIG_IRQSOFF_TRACER
286extern int trace_selftest_startup_irqsoff(struct tracer *trace, 370extern int trace_selftest_startup_irqsoff(struct tracer *trace,
287 struct trace_array *tr); 371 struct trace_array *tr);
288#endif
289#ifdef CONFIG_PREEMPT_TRACER
290extern int trace_selftest_startup_preemptoff(struct tracer *trace, 372extern int trace_selftest_startup_preemptoff(struct tracer *trace,
291 struct trace_array *tr); 373 struct trace_array *tr);
292#endif
293#if defined(CONFIG_IRQSOFF_TRACER) && defined(CONFIG_PREEMPT_TRACER)
294extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace, 374extern int trace_selftest_startup_preemptirqsoff(struct tracer *trace,
295 struct trace_array *tr); 375 struct trace_array *tr);
296#endif
297#ifdef CONFIG_SCHED_TRACER
298extern int trace_selftest_startup_wakeup(struct tracer *trace, 376extern int trace_selftest_startup_wakeup(struct tracer *trace,
299 struct trace_array *tr); 377 struct trace_array *tr);
300#endif 378extern int trace_selftest_startup_nop(struct tracer *trace,
301#ifdef CONFIG_CONTEXT_SWITCH_TRACER 379 struct trace_array *tr);
302extern int trace_selftest_startup_sched_switch(struct tracer *trace, 380extern int trace_selftest_startup_sched_switch(struct tracer *trace,
303 struct trace_array *tr); 381 struct trace_array *tr);
304#endif
305#ifdef CONFIG_SYSPROF_TRACER
306extern int trace_selftest_startup_sysprof(struct tracer *trace, 382extern int trace_selftest_startup_sysprof(struct tracer *trace,
307 struct trace_array *tr); 383 struct trace_array *tr);
308#endif
309#endif /* CONFIG_FTRACE_STARTUP_TEST */ 384#endif /* CONFIG_FTRACE_STARTUP_TEST */
310 385
311extern void *head_page(struct trace_array_cpu *data); 386extern void *head_page(struct trace_array_cpu *data);
312extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...); 387extern int trace_seq_printf(struct trace_seq *s, const char *fmt, ...);
388extern void trace_seq_print_cont(struct trace_seq *s,
389 struct trace_iterator *iter);
313extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, 390extern ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf,
314 size_t cnt); 391 size_t cnt);
315extern long ns2usecs(cycle_t nsec); 392extern long ns2usecs(cycle_t nsec);
393extern int trace_vprintk(unsigned long ip, const char *fmt, va_list args);
316 394
317extern unsigned long trace_flags; 395extern unsigned long trace_flags;
318 396
@@ -334,6 +412,9 @@ enum trace_iterator_flags {
334 TRACE_ITER_BLOCK = 0x80, 412 TRACE_ITER_BLOCK = 0x80,
335 TRACE_ITER_STACKTRACE = 0x100, 413 TRACE_ITER_STACKTRACE = 0x100,
336 TRACE_ITER_SCHED_TREE = 0x200, 414 TRACE_ITER_SCHED_TREE = 0x200,
415 TRACE_ITER_PRINTK = 0x400,
337}; 416};
338 417
418extern struct tracer nop_trace;
419
339#endif /* _LINUX_KERNEL_TRACE_H */ 420#endif /* _LINUX_KERNEL_TRACE_H */
diff --git a/kernel/trace/trace_boot.c b/kernel/trace/trace_boot.c
new file mode 100644
index 000000000000..d0a5e50eeff2
--- /dev/null
+++ b/kernel/trace/trace_boot.c
@@ -0,0 +1,126 @@
1/*
2 * ring buffer based initcalls tracer
3 *
4 * Copyright (C) 2008 Frederic Weisbecker <fweisbec@gmail.com>
5 *
6 */
7
8#include <linux/init.h>
9#include <linux/debugfs.h>
10#include <linux/ftrace.h>
11#include <linux/kallsyms.h>
12
13#include "trace.h"
14
15static struct trace_array *boot_trace;
16static int trace_boot_enabled;
17
18
19/* Should be started after do_pre_smp_initcalls() in init/main.c */
20void start_boot_trace(void)
21{
22 trace_boot_enabled = 1;
23}
24
25void stop_boot_trace(void)
26{
27 trace_boot_enabled = 0;
28}
29
30void reset_boot_trace(struct trace_array *tr)
31{
32 stop_boot_trace();
33}
34
35static void boot_trace_init(struct trace_array *tr)
36{
37 int cpu;
38 boot_trace = tr;
39
40 trace_boot_enabled = 0;
41
42 for_each_cpu_mask(cpu, cpu_possible_map)
43 tracing_reset(tr, cpu);
44}
45
46static void boot_trace_ctrl_update(struct trace_array *tr)
47{
48 if (tr->ctrl)
49 start_boot_trace();
50 else
51 stop_boot_trace();
52}
53
54static enum print_line_t initcall_print_line(struct trace_iterator *iter)
55{
56 int ret;
57 struct trace_entry *entry = iter->ent;
58 struct trace_boot *field = (struct trace_boot *)entry;
59 struct boot_trace *it = &field->initcall;
60 struct trace_seq *s = &iter->seq;
61 struct timespec calltime = ktime_to_timespec(it->calltime);
62 struct timespec rettime = ktime_to_timespec(it->rettime);
63
64 if (entry->type == TRACE_BOOT) {
65 ret = trace_seq_printf(s, "[%5ld.%09ld] calling %s @ %i\n",
66 calltime.tv_sec,
67 calltime.tv_nsec,
68 it->func, it->caller);
69 if (!ret)
70 return TRACE_TYPE_PARTIAL_LINE;
71
72 ret = trace_seq_printf(s, "[%5ld.%09ld] initcall %s "
73 "returned %d after %lld msecs\n",
74 rettime.tv_sec,
75 rettime.tv_nsec,
76 it->func, it->result, it->duration);
77
78 if (!ret)
79 return TRACE_TYPE_PARTIAL_LINE;
80 return TRACE_TYPE_HANDLED;
81 }
82 return TRACE_TYPE_UNHANDLED;
83}
84
85struct tracer boot_tracer __read_mostly =
86{
87 .name = "initcall",
88 .init = boot_trace_init,
89 .reset = reset_boot_trace,
90 .ctrl_update = boot_trace_ctrl_update,
91 .print_line = initcall_print_line,
92};
93
94void trace_boot(struct boot_trace *it, initcall_t fn)
95{
96 struct ring_buffer_event *event;
97 struct trace_boot *entry;
98 struct trace_array_cpu *data;
99 unsigned long irq_flags;
100 struct trace_array *tr = boot_trace;
101
102 if (!trace_boot_enabled)
103 return;
104
105 /* Get its name now since this function could
106 * disappear because it is in the .init section.
107 */
108 sprint_symbol(it->func, (unsigned long)fn);
109 preempt_disable();
110 data = tr->data[smp_processor_id()];
111
112 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
113 &irq_flags);
114 if (!event)
115 goto out;
116 entry = ring_buffer_event_data(event);
117 tracing_generic_entry_update(&entry->ent, 0, 0);
118 entry->ent.type = TRACE_BOOT;
119 entry->initcall = *it;
120 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
121
122 trace_wake_up();
123
124 out:
125 preempt_enable();
126}
diff --git a/kernel/trace/trace_functions.c b/kernel/trace/trace_functions.c
index 312144897970..e90eb0c2c56c 100644
--- a/kernel/trace/trace_functions.c
+++ b/kernel/trace/trace_functions.c
@@ -23,7 +23,7 @@ static void function_reset(struct trace_array *tr)
23 tr->time_start = ftrace_now(tr->cpu); 23 tr->time_start = ftrace_now(tr->cpu);
24 24
25 for_each_online_cpu(cpu) 25 for_each_online_cpu(cpu)
26 tracing_reset(tr->data[cpu]); 26 tracing_reset(tr, cpu);
27} 27}
28 28
29static void start_function_trace(struct trace_array *tr) 29static void start_function_trace(struct trace_array *tr)
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index ece6cfb649fa..a7db7f040ae0 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -95,7 +95,7 @@ irqsoff_tracer_call(unsigned long ip, unsigned long parent_ip)
95 disabled = atomic_inc_return(&data->disabled); 95 disabled = atomic_inc_return(&data->disabled);
96 96
97 if (likely(disabled == 1)) 97 if (likely(disabled == 1))
98 trace_function(tr, data, ip, parent_ip, flags); 98 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
99 99
100 atomic_dec(&data->disabled); 100 atomic_dec(&data->disabled);
101} 101}
@@ -130,6 +130,7 @@ check_critical_timing(struct trace_array *tr,
130 unsigned long latency, t0, t1; 130 unsigned long latency, t0, t1;
131 cycle_t T0, T1, delta; 131 cycle_t T0, T1, delta;
132 unsigned long flags; 132 unsigned long flags;
133 int pc;
133 134
134 /* 135 /*
135 * usecs conversion is slow so we try to delay the conversion 136 * usecs conversion is slow so we try to delay the conversion
@@ -141,6 +142,8 @@ check_critical_timing(struct trace_array *tr,
141 142
142 local_save_flags(flags); 143 local_save_flags(flags);
143 144
145 pc = preempt_count();
146
144 if (!report_latency(delta)) 147 if (!report_latency(delta))
145 goto out; 148 goto out;
146 149
@@ -150,7 +153,7 @@ check_critical_timing(struct trace_array *tr,
150 if (!report_latency(delta)) 153 if (!report_latency(delta))
151 goto out_unlock; 154 goto out_unlock;
152 155
153 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 156 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
154 157
155 latency = nsecs_to_usecs(delta); 158 latency = nsecs_to_usecs(delta);
156 159
@@ -173,8 +176,8 @@ out_unlock:
173out: 176out:
174 data->critical_sequence = max_sequence; 177 data->critical_sequence = max_sequence;
175 data->preempt_timestamp = ftrace_now(cpu); 178 data->preempt_timestamp = ftrace_now(cpu);
176 tracing_reset(data); 179 tracing_reset(tr, cpu);
177 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags); 180 trace_function(tr, data, CALLER_ADDR0, parent_ip, flags, pc);
178} 181}
179 182
180static inline void 183static inline void
@@ -203,11 +206,11 @@ start_critical_timing(unsigned long ip, unsigned long parent_ip)
203 data->critical_sequence = max_sequence; 206 data->critical_sequence = max_sequence;
204 data->preempt_timestamp = ftrace_now(cpu); 207 data->preempt_timestamp = ftrace_now(cpu);
205 data->critical_start = parent_ip ? : ip; 208 data->critical_start = parent_ip ? : ip;
206 tracing_reset(data); 209 tracing_reset(tr, cpu);
207 210
208 local_save_flags(flags); 211 local_save_flags(flags);
209 212
210 trace_function(tr, data, ip, parent_ip, flags); 213 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
211 214
212 per_cpu(tracing_cpu, cpu) = 1; 215 per_cpu(tracing_cpu, cpu) = 1;
213 216
@@ -234,14 +237,14 @@ stop_critical_timing(unsigned long ip, unsigned long parent_ip)
234 237
235 data = tr->data[cpu]; 238 data = tr->data[cpu];
236 239
237 if (unlikely(!data) || unlikely(!head_page(data)) || 240 if (unlikely(!data) ||
238 !data->critical_start || atomic_read(&data->disabled)) 241 !data->critical_start || atomic_read(&data->disabled))
239 return; 242 return;
240 243
241 atomic_inc(&data->disabled); 244 atomic_inc(&data->disabled);
242 245
243 local_save_flags(flags); 246 local_save_flags(flags);
244 trace_function(tr, data, ip, parent_ip, flags); 247 trace_function(tr, data, ip, parent_ip, flags, preempt_count());
245 check_critical_timing(tr, data, parent_ip ? : ip, cpu); 248 check_critical_timing(tr, data, parent_ip ? : ip, cpu);
246 data->critical_start = 0; 249 data->critical_start = 0;
247 atomic_dec(&data->disabled); 250 atomic_dec(&data->disabled);
diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
index b13dc19dcbb4..f28484618ff0 100644
--- a/kernel/trace/trace_mmiotrace.c
+++ b/kernel/trace/trace_mmiotrace.c
@@ -27,7 +27,7 @@ static void mmio_reset_data(struct trace_array *tr)
27 tr->time_start = ftrace_now(tr->cpu); 27 tr->time_start = ftrace_now(tr->cpu);
28 28
29 for_each_online_cpu(cpu) 29 for_each_online_cpu(cpu)
30 tracing_reset(tr->data[cpu]); 30 tracing_reset(tr, cpu);
31} 31}
32 32
33static void mmio_trace_init(struct trace_array *tr) 33static void mmio_trace_init(struct trace_array *tr)
@@ -130,10 +130,14 @@ static unsigned long count_overruns(struct trace_iterator *iter)
130{ 130{
131 int cpu; 131 int cpu;
132 unsigned long cnt = 0; 132 unsigned long cnt = 0;
133/* FIXME: */
134#if 0
133 for_each_online_cpu(cpu) { 135 for_each_online_cpu(cpu) {
134 cnt += iter->overrun[cpu]; 136 cnt += iter->overrun[cpu];
135 iter->overrun[cpu] = 0; 137 iter->overrun[cpu] = 0;
136 } 138 }
139#endif
140 (void)cpu;
137 return cnt; 141 return cnt;
138} 142}
139 143
@@ -171,17 +175,21 @@ print_out:
171 return (ret == -EBUSY) ? 0 : ret; 175 return (ret == -EBUSY) ? 0 : ret;
172} 176}
173 177
174static int mmio_print_rw(struct trace_iterator *iter) 178static enum print_line_t mmio_print_rw(struct trace_iterator *iter)
175{ 179{
176 struct trace_entry *entry = iter->ent; 180 struct trace_entry *entry = iter->ent;
177 struct mmiotrace_rw *rw = &entry->mmiorw; 181 struct trace_mmiotrace_rw *field;
182 struct mmiotrace_rw *rw;
178 struct trace_seq *s = &iter->seq; 183 struct trace_seq *s = &iter->seq;
179 unsigned long long t = ns2usecs(entry->t); 184 unsigned long long t = ns2usecs(iter->ts);
180 unsigned long usec_rem = do_div(t, 1000000ULL); 185 unsigned long usec_rem = do_div(t, 1000000ULL);
181 unsigned secs = (unsigned long)t; 186 unsigned secs = (unsigned long)t;
182 int ret = 1; 187 int ret = 1;
183 188
184 switch (entry->mmiorw.opcode) { 189 trace_assign_type(field, entry);
190 rw = &field->rw;
191
192 switch (rw->opcode) {
185 case MMIO_READ: 193 case MMIO_READ:
186 ret = trace_seq_printf(s, 194 ret = trace_seq_printf(s,
187 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n", 195 "R %d %lu.%06lu %d 0x%llx 0x%lx 0x%lx %d\n",
@@ -209,21 +217,25 @@ static int mmio_print_rw(struct trace_iterator *iter)
209 break; 217 break;
210 } 218 }
211 if (ret) 219 if (ret)
212 return 1; 220 return TRACE_TYPE_HANDLED;
213 return 0; 221 return TRACE_TYPE_PARTIAL_LINE;
214} 222}
215 223
216static int mmio_print_map(struct trace_iterator *iter) 224static enum print_line_t mmio_print_map(struct trace_iterator *iter)
217{ 225{
218 struct trace_entry *entry = iter->ent; 226 struct trace_entry *entry = iter->ent;
219 struct mmiotrace_map *m = &entry->mmiomap; 227 struct trace_mmiotrace_map *field;
228 struct mmiotrace_map *m;
220 struct trace_seq *s = &iter->seq; 229 struct trace_seq *s = &iter->seq;
221 unsigned long long t = ns2usecs(entry->t); 230 unsigned long long t = ns2usecs(iter->ts);
222 unsigned long usec_rem = do_div(t, 1000000ULL); 231 unsigned long usec_rem = do_div(t, 1000000ULL);
223 unsigned secs = (unsigned long)t; 232 unsigned secs = (unsigned long)t;
224 int ret = 1; 233 int ret;
225 234
226 switch (entry->mmiorw.opcode) { 235 trace_assign_type(field, entry);
236 m = &field->map;
237
238 switch (m->opcode) {
227 case MMIO_PROBE: 239 case MMIO_PROBE:
228 ret = trace_seq_printf(s, 240 ret = trace_seq_printf(s,
229 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n", 241 "MAP %lu.%06lu %d 0x%llx 0x%lx 0x%lx 0x%lx %d\n",
@@ -241,20 +253,43 @@ static int mmio_print_map(struct trace_iterator *iter)
241 break; 253 break;
242 } 254 }
243 if (ret) 255 if (ret)
244 return 1; 256 return TRACE_TYPE_HANDLED;
245 return 0; 257 return TRACE_TYPE_PARTIAL_LINE;
258}
259
260static enum print_line_t mmio_print_mark(struct trace_iterator *iter)
261{
262 struct trace_entry *entry = iter->ent;
263 struct print_entry *print = (struct print_entry *)entry;
264 const char *msg = print->buf;
265 struct trace_seq *s = &iter->seq;
266 unsigned long long t = ns2usecs(iter->ts);
267 unsigned long usec_rem = do_div(t, 1000000ULL);
268 unsigned secs = (unsigned long)t;
269 int ret;
270
271 /* The trailing newline must be in the message. */
272 ret = trace_seq_printf(s, "MARK %lu.%06lu %s", secs, usec_rem, msg);
273 if (!ret)
274 return TRACE_TYPE_PARTIAL_LINE;
275
276 if (entry->flags & TRACE_FLAG_CONT)
277 trace_seq_print_cont(s, iter);
278
279 return TRACE_TYPE_HANDLED;
246} 280}
247 281
248/* return 0 to abort printing without consuming current entry in pipe mode */ 282static enum print_line_t mmio_print_line(struct trace_iterator *iter)
249static int mmio_print_line(struct trace_iterator *iter)
250{ 283{
251 switch (iter->ent->type) { 284 switch (iter->ent->type) {
252 case TRACE_MMIO_RW: 285 case TRACE_MMIO_RW:
253 return mmio_print_rw(iter); 286 return mmio_print_rw(iter);
254 case TRACE_MMIO_MAP: 287 case TRACE_MMIO_MAP:
255 return mmio_print_map(iter); 288 return mmio_print_map(iter);
289 case TRACE_PRINT:
290 return mmio_print_mark(iter);
256 default: 291 default:
257 return 1; /* ignore unknown entries */ 292 return TRACE_TYPE_HANDLED; /* ignore unknown entries */
258 } 293 }
259} 294}
260 295
@@ -276,6 +311,27 @@ __init static int init_mmio_trace(void)
276} 311}
277device_initcall(init_mmio_trace); 312device_initcall(init_mmio_trace);
278 313
314static void __trace_mmiotrace_rw(struct trace_array *tr,
315 struct trace_array_cpu *data,
316 struct mmiotrace_rw *rw)
317{
318 struct ring_buffer_event *event;
319 struct trace_mmiotrace_rw *entry;
320 unsigned long irq_flags;
321
322 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
323 &irq_flags);
324 if (!event)
325 return;
326 entry = ring_buffer_event_data(event);
327 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
328 entry->ent.type = TRACE_MMIO_RW;
329 entry->rw = *rw;
330 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
331
332 trace_wake_up();
333}
334
279void mmio_trace_rw(struct mmiotrace_rw *rw) 335void mmio_trace_rw(struct mmiotrace_rw *rw)
280{ 336{
281 struct trace_array *tr = mmio_trace_array; 337 struct trace_array *tr = mmio_trace_array;
@@ -283,6 +339,27 @@ void mmio_trace_rw(struct mmiotrace_rw *rw)
283 __trace_mmiotrace_rw(tr, data, rw); 339 __trace_mmiotrace_rw(tr, data, rw);
284} 340}
285 341
342static void __trace_mmiotrace_map(struct trace_array *tr,
343 struct trace_array_cpu *data,
344 struct mmiotrace_map *map)
345{
346 struct ring_buffer_event *event;
347 struct trace_mmiotrace_map *entry;
348 unsigned long irq_flags;
349
350 event = ring_buffer_lock_reserve(tr->buffer, sizeof(*entry),
351 &irq_flags);
352 if (!event)
353 return;
354 entry = ring_buffer_event_data(event);
355 tracing_generic_entry_update(&entry->ent, 0, preempt_count());
356 entry->ent.type = TRACE_MMIO_MAP;
357 entry->map = *map;
358 ring_buffer_unlock_commit(tr->buffer, event, irq_flags);
359
360 trace_wake_up();
361}
362
286void mmio_trace_mapping(struct mmiotrace_map *map) 363void mmio_trace_mapping(struct mmiotrace_map *map)
287{ 364{
288 struct trace_array *tr = mmio_trace_array; 365 struct trace_array *tr = mmio_trace_array;
@@ -293,3 +370,8 @@ void mmio_trace_mapping(struct mmiotrace_map *map)
293 __trace_mmiotrace_map(tr, data, map); 370 __trace_mmiotrace_map(tr, data, map);
294 preempt_enable(); 371 preempt_enable();
295} 372}
373
374int mmio_trace_printk(const char *fmt, va_list args)
375{
376 return trace_vprintk(0, fmt, args);
377}
diff --git a/kernel/trace/trace_nop.c b/kernel/trace/trace_nop.c
new file mode 100644
index 000000000000..4592b4862515
--- /dev/null
+++ b/kernel/trace/trace_nop.c
@@ -0,0 +1,64 @@
1/*
2 * nop tracer
3 *
4 * Copyright (C) 2008 Steven Noonan <steven@uplinklabs.net>
5 *
6 */
7
8#include <linux/module.h>
9#include <linux/fs.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12
13#include "trace.h"
14
15static struct trace_array *ctx_trace;
16
17static void start_nop_trace(struct trace_array *tr)
18{
19 /* Nothing to do! */
20}
21
22static void stop_nop_trace(struct trace_array *tr)
23{
24 /* Nothing to do! */
25}
26
27static void nop_trace_init(struct trace_array *tr)
28{
29 int cpu;
30 ctx_trace = tr;
31
32 for_each_online_cpu(cpu)
33 tracing_reset(tr, cpu);
34
35 if (tr->ctrl)
36 start_nop_trace(tr);
37}
38
39static void nop_trace_reset(struct trace_array *tr)
40{
41 if (tr->ctrl)
42 stop_nop_trace(tr);
43}
44
45static void nop_trace_ctrl_update(struct trace_array *tr)
46{
47 /* When starting a new trace, reset the buffers */
48 if (tr->ctrl)
49 start_nop_trace(tr);
50 else
51 stop_nop_trace(tr);
52}
53
54struct tracer nop_trace __read_mostly =
55{
56 .name = "nop",
57 .init = nop_trace_init,
58 .reset = nop_trace_reset,
59 .ctrl_update = nop_trace_ctrl_update,
60#ifdef CONFIG_FTRACE_SELFTEST
61 .selftest = trace_selftest_startup_nop,
62#endif
63};
64
diff --git a/kernel/trace/trace_sched_switch.c b/kernel/trace/trace_sched_switch.c
index cb817a209aa0..b8f56beb1a62 100644
--- a/kernel/trace/trace_sched_switch.c
+++ b/kernel/trace/trace_sched_switch.c
@@ -9,8 +9,8 @@
9#include <linux/debugfs.h> 9#include <linux/debugfs.h>
10#include <linux/kallsyms.h> 10#include <linux/kallsyms.h>
11#include <linux/uaccess.h> 11#include <linux/uaccess.h>
12#include <linux/marker.h>
13#include <linux/ftrace.h> 12#include <linux/ftrace.h>
13#include <trace/sched.h>
14 14
15#include "trace.h" 15#include "trace.h"
16 16
@@ -19,15 +19,16 @@ static int __read_mostly tracer_enabled;
19static atomic_t sched_ref; 19static atomic_t sched_ref;
20 20
21static void 21static void
22sched_switch_func(void *private, void *__rq, struct task_struct *prev, 22probe_sched_switch(struct rq *__rq, struct task_struct *prev,
23 struct task_struct *next) 23 struct task_struct *next)
24{ 24{
25 struct trace_array **ptr = private;
26 struct trace_array *tr = *ptr;
27 struct trace_array_cpu *data; 25 struct trace_array_cpu *data;
28 unsigned long flags; 26 unsigned long flags;
29 long disabled;
30 int cpu; 27 int cpu;
28 int pc;
29
30 if (!atomic_read(&sched_ref))
31 return;
31 32
32 tracing_record_cmdline(prev); 33 tracing_record_cmdline(prev);
33 tracing_record_cmdline(next); 34 tracing_record_cmdline(next);
@@ -35,97 +36,41 @@ sched_switch_func(void *private, void *__rq, struct task_struct *prev,
35 if (!tracer_enabled) 36 if (!tracer_enabled)
36 return; 37 return;
37 38
39 pc = preempt_count();
38 local_irq_save(flags); 40 local_irq_save(flags);
39 cpu = raw_smp_processor_id(); 41 cpu = raw_smp_processor_id();
40 data = tr->data[cpu]; 42 data = ctx_trace->data[cpu];
41 disabled = atomic_inc_return(&data->disabled);
42 43
43 if (likely(disabled == 1)) 44 if (likely(!atomic_read(&data->disabled)))
44 tracing_sched_switch_trace(tr, data, prev, next, flags); 45 tracing_sched_switch_trace(ctx_trace, data, prev, next, flags, pc);
45 46
46 atomic_dec(&data->disabled);
47 local_irq_restore(flags); 47 local_irq_restore(flags);
48} 48}
49 49
50static notrace void
51sched_switch_callback(void *probe_data, void *call_data,
52 const char *format, va_list *args)
53{
54 struct task_struct *prev;
55 struct task_struct *next;
56 struct rq *__rq;
57
58 if (!atomic_read(&sched_ref))
59 return;
60
61 /* skip prev_pid %d next_pid %d prev_state %ld */
62 (void)va_arg(*args, int);
63 (void)va_arg(*args, int);
64 (void)va_arg(*args, long);
65 __rq = va_arg(*args, typeof(__rq));
66 prev = va_arg(*args, typeof(prev));
67 next = va_arg(*args, typeof(next));
68
69 /*
70 * If tracer_switch_func only points to the local
71 * switch func, it still needs the ptr passed to it.
72 */
73 sched_switch_func(probe_data, __rq, prev, next);
74}
75
76static void 50static void
77wakeup_func(void *private, void *__rq, struct task_struct *wakee, struct 51probe_sched_wakeup(struct rq *__rq, struct task_struct *wakee)
78 task_struct *curr)
79{ 52{
80 struct trace_array **ptr = private;
81 struct trace_array *tr = *ptr;
82 struct trace_array_cpu *data; 53 struct trace_array_cpu *data;
83 unsigned long flags; 54 unsigned long flags;
84 long disabled; 55 int cpu, pc;
85 int cpu;
86 56
87 if (!tracer_enabled) 57 if (!likely(tracer_enabled))
88 return; 58 return;
89 59
90 tracing_record_cmdline(curr); 60 pc = preempt_count();
61 tracing_record_cmdline(current);
91 62
92 local_irq_save(flags); 63 local_irq_save(flags);
93 cpu = raw_smp_processor_id(); 64 cpu = raw_smp_processor_id();
94 data = tr->data[cpu]; 65 data = ctx_trace->data[cpu];
95 disabled = atomic_inc_return(&data->disabled);
96 66
97 if (likely(disabled == 1)) 67 if (likely(!atomic_read(&data->disabled)))
98 tracing_sched_wakeup_trace(tr, data, wakee, curr, flags); 68 tracing_sched_wakeup_trace(ctx_trace, data, wakee, current,
69 flags, pc);
99 70
100 atomic_dec(&data->disabled);
101 local_irq_restore(flags); 71 local_irq_restore(flags);
102} 72}
103 73
104static notrace void
105wake_up_callback(void *probe_data, void *call_data,
106 const char *format, va_list *args)
107{
108 struct task_struct *curr;
109 struct task_struct *task;
110 struct rq *__rq;
111
112 if (likely(!tracer_enabled))
113 return;
114
115 /* Skip pid %d state %ld */
116 (void)va_arg(*args, int);
117 (void)va_arg(*args, long);
118 /* now get the meat: "rq %p task %p rq->curr %p" */
119 __rq = va_arg(*args, typeof(__rq));
120 task = va_arg(*args, typeof(task));
121 curr = va_arg(*args, typeof(curr));
122
123 tracing_record_cmdline(task);
124 tracing_record_cmdline(curr);
125
126 wakeup_func(probe_data, __rq, task, curr);
127}
128
129static void sched_switch_reset(struct trace_array *tr) 74static void sched_switch_reset(struct trace_array *tr)
130{ 75{
131 int cpu; 76 int cpu;
@@ -133,67 +78,47 @@ static void sched_switch_reset(struct trace_array *tr)
133 tr->time_start = ftrace_now(tr->cpu); 78 tr->time_start = ftrace_now(tr->cpu);
134 79
135 for_each_online_cpu(cpu) 80 for_each_online_cpu(cpu)
136 tracing_reset(tr->data[cpu]); 81 tracing_reset(tr, cpu);
137} 82}
138 83
139static int tracing_sched_register(void) 84static int tracing_sched_register(void)
140{ 85{
141 int ret; 86 int ret;
142 87
143 ret = marker_probe_register("kernel_sched_wakeup", 88 ret = register_trace_sched_wakeup(probe_sched_wakeup);
144 "pid %d state %ld ## rq %p task %p rq->curr %p",
145 wake_up_callback,
146 &ctx_trace);
147 if (ret) { 89 if (ret) {
148 pr_info("wakeup trace: Couldn't add marker" 90 pr_info("wakeup trace: Couldn't activate tracepoint"
149 " probe to kernel_sched_wakeup\n"); 91 " probe to kernel_sched_wakeup\n");
150 return ret; 92 return ret;
151 } 93 }
152 94
153 ret = marker_probe_register("kernel_sched_wakeup_new", 95 ret = register_trace_sched_wakeup_new(probe_sched_wakeup);
154 "pid %d state %ld ## rq %p task %p rq->curr %p",
155 wake_up_callback,
156 &ctx_trace);
157 if (ret) { 96 if (ret) {
158 pr_info("wakeup trace: Couldn't add marker" 97 pr_info("wakeup trace: Couldn't activate tracepoint"
159 " probe to kernel_sched_wakeup_new\n"); 98 " probe to kernel_sched_wakeup_new\n");
160 goto fail_deprobe; 99 goto fail_deprobe;
161 } 100 }
162 101
163 ret = marker_probe_register("kernel_sched_schedule", 102 ret = register_trace_sched_switch(probe_sched_switch);
164 "prev_pid %d next_pid %d prev_state %ld "
165 "## rq %p prev %p next %p",
166 sched_switch_callback,
167 &ctx_trace);
168 if (ret) { 103 if (ret) {
169 pr_info("sched trace: Couldn't add marker" 104 pr_info("sched trace: Couldn't activate tracepoint"
170 " probe to kernel_sched_schedule\n"); 105 " probe to kernel_sched_schedule\n");
171 goto fail_deprobe_wake_new; 106 goto fail_deprobe_wake_new;
172 } 107 }
173 108
174 return ret; 109 return ret;
175fail_deprobe_wake_new: 110fail_deprobe_wake_new:
176 marker_probe_unregister("kernel_sched_wakeup_new", 111 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
177 wake_up_callback,
178 &ctx_trace);
179fail_deprobe: 112fail_deprobe:
180 marker_probe_unregister("kernel_sched_wakeup", 113 unregister_trace_sched_wakeup(probe_sched_wakeup);
181 wake_up_callback,
182 &ctx_trace);
183 return ret; 114 return ret;
184} 115}
185 116
186static void tracing_sched_unregister(void) 117static void tracing_sched_unregister(void)
187{ 118{
188 marker_probe_unregister("kernel_sched_schedule", 119 unregister_trace_sched_switch(probe_sched_switch);
189 sched_switch_callback, 120 unregister_trace_sched_wakeup_new(probe_sched_wakeup);
190 &ctx_trace); 121 unregister_trace_sched_wakeup(probe_sched_wakeup);
191 marker_probe_unregister("kernel_sched_wakeup_new",
192 wake_up_callback,
193 &ctx_trace);
194 marker_probe_unregister("kernel_sched_wakeup",
195 wake_up_callback,
196 &ctx_trace);
197} 122}
198 123
199static void tracing_start_sched_switch(void) 124static void tracing_start_sched_switch(void)
diff --git a/kernel/trace/trace_sched_wakeup.c b/kernel/trace/trace_sched_wakeup.c
index e303ccb62cdf..fe4a252c2363 100644
--- a/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/trace/trace_sched_wakeup.c
@@ -15,7 +15,7 @@
15#include <linux/kallsyms.h> 15#include <linux/kallsyms.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/ftrace.h> 17#include <linux/ftrace.h>
18#include <linux/marker.h> 18#include <trace/sched.h>
19 19
20#include "trace.h" 20#include "trace.h"
21 21
@@ -44,10 +44,12 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
44 long disabled; 44 long disabled;
45 int resched; 45 int resched;
46 int cpu; 46 int cpu;
47 int pc;
47 48
48 if (likely(!wakeup_task)) 49 if (likely(!wakeup_task))
49 return; 50 return;
50 51
52 pc = preempt_count();
51 resched = need_resched(); 53 resched = need_resched();
52 preempt_disable_notrace(); 54 preempt_disable_notrace();
53 55
@@ -70,7 +72,7 @@ wakeup_tracer_call(unsigned long ip, unsigned long parent_ip)
70 if (task_cpu(wakeup_task) != cpu) 72 if (task_cpu(wakeup_task) != cpu)
71 goto unlock; 73 goto unlock;
72 74
73 trace_function(tr, data, ip, parent_ip, flags); 75 trace_function(tr, data, ip, parent_ip, flags, pc);
74 76
75 unlock: 77 unlock:
76 __raw_spin_unlock(&wakeup_lock); 78 __raw_spin_unlock(&wakeup_lock);
@@ -112,17 +114,18 @@ static int report_latency(cycle_t delta)
112} 114}
113 115
114static void notrace 116static void notrace
115wakeup_sched_switch(void *private, void *rq, struct task_struct *prev, 117probe_wakeup_sched_switch(struct rq *rq, struct task_struct *prev,
116 struct task_struct *next) 118 struct task_struct *next)
117{ 119{
118 unsigned long latency = 0, t0 = 0, t1 = 0; 120 unsigned long latency = 0, t0 = 0, t1 = 0;
119 struct trace_array **ptr = private;
120 struct trace_array *tr = *ptr;
121 struct trace_array_cpu *data; 121 struct trace_array_cpu *data;
122 cycle_t T0, T1, delta; 122 cycle_t T0, T1, delta;
123 unsigned long flags; 123 unsigned long flags;
124 long disabled; 124 long disabled;
125 int cpu; 125 int cpu;
126 int pc;
127
128 tracing_record_cmdline(prev);
126 129
127 if (unlikely(!tracer_enabled)) 130 if (unlikely(!tracer_enabled))
128 return; 131 return;
@@ -139,12 +142,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
139 if (next != wakeup_task) 142 if (next != wakeup_task)
140 return; 143 return;
141 144
145 pc = preempt_count();
146
142 /* The task we are waiting for is waking up */ 147 /* The task we are waiting for is waking up */
143 data = tr->data[wakeup_cpu]; 148 data = wakeup_trace->data[wakeup_cpu];
144 149
145 /* disable local data, not wakeup_cpu data */ 150 /* disable local data, not wakeup_cpu data */
146 cpu = raw_smp_processor_id(); 151 cpu = raw_smp_processor_id();
147 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 152 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
148 if (likely(disabled != 1)) 153 if (likely(disabled != 1))
149 goto out; 154 goto out;
150 155
@@ -155,7 +160,7 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
155 if (unlikely(!tracer_enabled || next != wakeup_task)) 160 if (unlikely(!tracer_enabled || next != wakeup_task))
156 goto out_unlock; 161 goto out_unlock;
157 162
158 trace_function(tr, data, CALLER_ADDR1, CALLER_ADDR2, flags); 163 trace_function(wakeup_trace, data, CALLER_ADDR1, CALLER_ADDR2, flags, pc);
159 164
160 /* 165 /*
161 * usecs conversion is slow so we try to delay the conversion 166 * usecs conversion is slow so we try to delay the conversion
@@ -174,39 +179,14 @@ wakeup_sched_switch(void *private, void *rq, struct task_struct *prev,
174 t0 = nsecs_to_usecs(T0); 179 t0 = nsecs_to_usecs(T0);
175 t1 = nsecs_to_usecs(T1); 180 t1 = nsecs_to_usecs(T1);
176 181
177 update_max_tr(tr, wakeup_task, wakeup_cpu); 182 update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu);
178 183
179out_unlock: 184out_unlock:
180 __wakeup_reset(tr); 185 __wakeup_reset(wakeup_trace);
181 __raw_spin_unlock(&wakeup_lock); 186 __raw_spin_unlock(&wakeup_lock);
182 local_irq_restore(flags); 187 local_irq_restore(flags);
183out: 188out:
184 atomic_dec(&tr->data[cpu]->disabled); 189 atomic_dec(&wakeup_trace->data[cpu]->disabled);
185}
186
187static notrace void
188sched_switch_callback(void *probe_data, void *call_data,
189 const char *format, va_list *args)
190{
191 struct task_struct *prev;
192 struct task_struct *next;
193 struct rq *__rq;
194
195 /* skip prev_pid %d next_pid %d prev_state %ld */
196 (void)va_arg(*args, int);
197 (void)va_arg(*args, int);
198 (void)va_arg(*args, long);
199 __rq = va_arg(*args, typeof(__rq));
200 prev = va_arg(*args, typeof(prev));
201 next = va_arg(*args, typeof(next));
202
203 tracing_record_cmdline(prev);
204
205 /*
206 * If tracer_switch_func only points to the local
207 * switch func, it still needs the ptr passed to it.
208 */
209 wakeup_sched_switch(probe_data, __rq, prev, next);
210} 190}
211 191
212static void __wakeup_reset(struct trace_array *tr) 192static void __wakeup_reset(struct trace_array *tr)
@@ -216,7 +196,7 @@ static void __wakeup_reset(struct trace_array *tr)
216 196
217 for_each_possible_cpu(cpu) { 197 for_each_possible_cpu(cpu) {
218 data = tr->data[cpu]; 198 data = tr->data[cpu];
219 tracing_reset(data); 199 tracing_reset(tr, cpu);
220 } 200 }
221 201
222 wakeup_cpu = -1; 202 wakeup_cpu = -1;
@@ -240,19 +220,26 @@ static void wakeup_reset(struct trace_array *tr)
240} 220}
241 221
242static void 222static void
243wakeup_check_start(struct trace_array *tr, struct task_struct *p, 223probe_wakeup(struct rq *rq, struct task_struct *p)
244 struct task_struct *curr)
245{ 224{
246 int cpu = smp_processor_id(); 225 int cpu = smp_processor_id();
247 unsigned long flags; 226 unsigned long flags;
248 long disabled; 227 long disabled;
228 int pc;
229
230 if (likely(!tracer_enabled))
231 return;
232
233 tracing_record_cmdline(p);
234 tracing_record_cmdline(current);
249 235
250 if (likely(!rt_task(p)) || 236 if (likely(!rt_task(p)) ||
251 p->prio >= wakeup_prio || 237 p->prio >= wakeup_prio ||
252 p->prio >= curr->prio) 238 p->prio >= current->prio)
253 return; 239 return;
254 240
255 disabled = atomic_inc_return(&tr->data[cpu]->disabled); 241 pc = preempt_count();
242 disabled = atomic_inc_return(&wakeup_trace->data[cpu]->disabled);
256 if (unlikely(disabled != 1)) 243 if (unlikely(disabled != 1))
257 goto out; 244 goto out;
258 245
@@ -264,7 +251,7 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
264 goto out_locked; 251 goto out_locked;
265 252
266 /* reset the trace */ 253 /* reset the trace */
267 __wakeup_reset(tr); 254 __wakeup_reset(wakeup_trace);
268 255
269 wakeup_cpu = task_cpu(p); 256 wakeup_cpu = task_cpu(p);
270 wakeup_prio = p->prio; 257 wakeup_prio = p->prio;
@@ -274,74 +261,37 @@ wakeup_check_start(struct trace_array *tr, struct task_struct *p,
274 261
275 local_save_flags(flags); 262 local_save_flags(flags);
276 263
277 tr->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu); 264 wakeup_trace->data[wakeup_cpu]->preempt_timestamp = ftrace_now(cpu);
278 trace_function(tr, tr->data[wakeup_cpu], 265 trace_function(wakeup_trace, wakeup_trace->data[wakeup_cpu],
279 CALLER_ADDR1, CALLER_ADDR2, flags); 266 CALLER_ADDR1, CALLER_ADDR2, flags, pc);
280 267
281out_locked: 268out_locked:
282 __raw_spin_unlock(&wakeup_lock); 269 __raw_spin_unlock(&wakeup_lock);
283out: 270out:
284 atomic_dec(&tr->data[cpu]->disabled); 271 atomic_dec(&wakeup_trace->data[cpu]->disabled);
285}
286
287static notrace void
288wake_up_callback(void *probe_data, void *call_data,
289 const char *format, va_list *args)
290{
291 struct trace_array **ptr = probe_data;
292 struct trace_array *tr = *ptr;
293 struct task_struct *curr;
294 struct task_struct *task;
295 struct rq *__rq;
296
297 if (likely(!tracer_enabled))
298 return;
299
300 /* Skip pid %d state %ld */
301 (void)va_arg(*args, int);
302 (void)va_arg(*args, long);
303 /* now get the meat: "rq %p task %p rq->curr %p" */
304 __rq = va_arg(*args, typeof(__rq));
305 task = va_arg(*args, typeof(task));
306 curr = va_arg(*args, typeof(curr));
307
308 tracing_record_cmdline(task);
309 tracing_record_cmdline(curr);
310
311 wakeup_check_start(tr, task, curr);
312} 272}
313 273
314static void start_wakeup_tracer(struct trace_array *tr) 274static void start_wakeup_tracer(struct trace_array *tr)
315{ 275{
316 int ret; 276 int ret;
317 277
318 ret = marker_probe_register("kernel_sched_wakeup", 278 ret = register_trace_sched_wakeup(probe_wakeup);
319 "pid %d state %ld ## rq %p task %p rq->curr %p",
320 wake_up_callback,
321 &wakeup_trace);
322 if (ret) { 279 if (ret) {
323 pr_info("wakeup trace: Couldn't add marker" 280 pr_info("wakeup trace: Couldn't activate tracepoint"
324 " probe to kernel_sched_wakeup\n"); 281 " probe to kernel_sched_wakeup\n");
325 return; 282 return;
326 } 283 }
327 284
328 ret = marker_probe_register("kernel_sched_wakeup_new", 285 ret = register_trace_sched_wakeup_new(probe_wakeup);
329 "pid %d state %ld ## rq %p task %p rq->curr %p",
330 wake_up_callback,
331 &wakeup_trace);
332 if (ret) { 286 if (ret) {
333 pr_info("wakeup trace: Couldn't add marker" 287 pr_info("wakeup trace: Couldn't activate tracepoint"
334 " probe to kernel_sched_wakeup_new\n"); 288 " probe to kernel_sched_wakeup_new\n");
335 goto fail_deprobe; 289 goto fail_deprobe;
336 } 290 }
337 291
338 ret = marker_probe_register("kernel_sched_schedule", 292 ret = register_trace_sched_switch(probe_wakeup_sched_switch);
339 "prev_pid %d next_pid %d prev_state %ld "
340 "## rq %p prev %p next %p",
341 sched_switch_callback,
342 &wakeup_trace);
343 if (ret) { 293 if (ret) {
344 pr_info("sched trace: Couldn't add marker" 294 pr_info("sched trace: Couldn't activate tracepoint"
345 " probe to kernel_sched_schedule\n"); 295 " probe to kernel_sched_schedule\n");
346 goto fail_deprobe_wake_new; 296 goto fail_deprobe_wake_new;
347 } 297 }
@@ -363,28 +313,18 @@ static void start_wakeup_tracer(struct trace_array *tr)
363 313
364 return; 314 return;
365fail_deprobe_wake_new: 315fail_deprobe_wake_new:
366 marker_probe_unregister("kernel_sched_wakeup_new", 316 unregister_trace_sched_wakeup_new(probe_wakeup);
367 wake_up_callback,
368 &wakeup_trace);
369fail_deprobe: 317fail_deprobe:
370 marker_probe_unregister("kernel_sched_wakeup", 318 unregister_trace_sched_wakeup(probe_wakeup);
371 wake_up_callback,
372 &wakeup_trace);
373} 319}
374 320
375static void stop_wakeup_tracer(struct trace_array *tr) 321static void stop_wakeup_tracer(struct trace_array *tr)
376{ 322{
377 tracer_enabled = 0; 323 tracer_enabled = 0;
378 unregister_ftrace_function(&trace_ops); 324 unregister_ftrace_function(&trace_ops);
379 marker_probe_unregister("kernel_sched_schedule", 325 unregister_trace_sched_switch(probe_wakeup_sched_switch);
380 sched_switch_callback, 326 unregister_trace_sched_wakeup_new(probe_wakeup);
381 &wakeup_trace); 327 unregister_trace_sched_wakeup(probe_wakeup);
382 marker_probe_unregister("kernel_sched_wakeup_new",
383 wake_up_callback,
384 &wakeup_trace);
385 marker_probe_unregister("kernel_sched_wakeup",
386 wake_up_callback,
387 &wakeup_trace);
388} 328}
389 329
390static void wakeup_tracer_init(struct trace_array *tr) 330static void wakeup_tracer_init(struct trace_array *tr)
diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c
index 0911b7e073bf..09cf230d7eca 100644
--- a/kernel/trace/trace_selftest.c
+++ b/kernel/trace/trace_selftest.c
@@ -9,65 +9,29 @@ static inline int trace_valid_entry(struct trace_entry *entry)
9 case TRACE_FN: 9 case TRACE_FN:
10 case TRACE_CTX: 10 case TRACE_CTX:
11 case TRACE_WAKE: 11 case TRACE_WAKE:
12 case TRACE_CONT:
12 case TRACE_STACK: 13 case TRACE_STACK:
14 case TRACE_PRINT:
13 case TRACE_SPECIAL: 15 case TRACE_SPECIAL:
14 return 1; 16 return 1;
15 } 17 }
16 return 0; 18 return 0;
17} 19}
18 20
19static int 21static int trace_test_buffer_cpu(struct trace_array *tr, int cpu)
20trace_test_buffer_cpu(struct trace_array *tr, struct trace_array_cpu *data)
21{ 22{
22 struct trace_entry *entries; 23 struct ring_buffer_event *event;
23 struct page *page; 24 struct trace_entry *entry;
24 int idx = 0;
25 int i;
26 25
27 BUG_ON(list_empty(&data->trace_pages)); 26 while ((event = ring_buffer_consume(tr->buffer, cpu, NULL))) {
28 page = list_entry(data->trace_pages.next, struct page, lru); 27 entry = ring_buffer_event_data(event);
29 entries = page_address(page);
30 28
31 check_pages(data); 29 if (!trace_valid_entry(entry)) {
32 if (head_page(data) != entries)
33 goto failed;
34
35 /*
36 * The starting trace buffer always has valid elements,
37 * if any element exists.
38 */
39 entries = head_page(data);
40
41 for (i = 0; i < tr->entries; i++) {
42
43 if (i < data->trace_idx && !trace_valid_entry(&entries[idx])) {
44 printk(KERN_CONT ".. invalid entry %d ", 30 printk(KERN_CONT ".. invalid entry %d ",
45 entries[idx].type); 31 entry->type);
46 goto failed; 32 goto failed;
47 } 33 }
48
49 idx++;
50 if (idx >= ENTRIES_PER_PAGE) {
51 page = virt_to_page(entries);
52 if (page->lru.next == &data->trace_pages) {
53 if (i != tr->entries - 1) {
54 printk(KERN_CONT ".. entries buffer mismatch");
55 goto failed;
56 }
57 } else {
58 page = list_entry(page->lru.next, struct page, lru);
59 entries = page_address(page);
60 }
61 idx = 0;
62 }
63 } 34 }
64
65 page = virt_to_page(entries);
66 if (page->lru.next != &data->trace_pages) {
67 printk(KERN_CONT ".. too many entries");
68 goto failed;
69 }
70
71 return 0; 35 return 0;
72 36
73 failed: 37 failed:
@@ -89,13 +53,11 @@ static int trace_test_buffer(struct trace_array *tr, unsigned long *count)
89 /* Don't allow flipping of max traces now */ 53 /* Don't allow flipping of max traces now */
90 raw_local_irq_save(flags); 54 raw_local_irq_save(flags);
91 __raw_spin_lock(&ftrace_max_lock); 55 __raw_spin_lock(&ftrace_max_lock);
92 for_each_possible_cpu(cpu) {
93 if (!head_page(tr->data[cpu]))
94 continue;
95 56
96 cnt += tr->data[cpu]->trace_idx; 57 cnt = ring_buffer_entries(tr->buffer);
97 58
98 ret = trace_test_buffer_cpu(tr, tr->data[cpu]); 59 for_each_possible_cpu(cpu) {
60 ret = trace_test_buffer_cpu(tr, cpu);
99 if (ret) 61 if (ret)
100 break; 62 break;
101 } 63 }
@@ -120,11 +82,11 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
120 struct trace_array *tr, 82 struct trace_array *tr,
121 int (*func)(void)) 83 int (*func)(void))
122{ 84{
123 unsigned long count;
124 int ret;
125 int save_ftrace_enabled = ftrace_enabled; 85 int save_ftrace_enabled = ftrace_enabled;
126 int save_tracer_enabled = tracer_enabled; 86 int save_tracer_enabled = tracer_enabled;
87 unsigned long count;
127 char *func_name; 88 char *func_name;
89 int ret;
128 90
129 /* The ftrace test PASSED */ 91 /* The ftrace test PASSED */
130 printk(KERN_CONT "PASSED\n"); 92 printk(KERN_CONT "PASSED\n");
@@ -157,6 +119,7 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
157 /* enable tracing */ 119 /* enable tracing */
158 tr->ctrl = 1; 120 tr->ctrl = 1;
159 trace->init(tr); 121 trace->init(tr);
122
160 /* Sleep for a 1/10 of a second */ 123 /* Sleep for a 1/10 of a second */
161 msleep(100); 124 msleep(100);
162 125
@@ -212,10 +175,10 @@ int trace_selftest_startup_dynamic_tracing(struct tracer *trace,
212int 175int
213trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) 176trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr)
214{ 177{
215 unsigned long count;
216 int ret;
217 int save_ftrace_enabled = ftrace_enabled; 178 int save_ftrace_enabled = ftrace_enabled;
218 int save_tracer_enabled = tracer_enabled; 179 int save_tracer_enabled = tracer_enabled;
180 unsigned long count;
181 int ret;
219 182
220 /* make sure msleep has been recorded */ 183 /* make sure msleep has been recorded */
221 msleep(1); 184 msleep(1);
@@ -415,6 +378,15 @@ trace_selftest_startup_preemptirqsoff(struct tracer *trace, struct trace_array *
415} 378}
416#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */ 379#endif /* CONFIG_IRQSOFF_TRACER && CONFIG_PREEMPT_TRACER */
417 380
381#ifdef CONFIG_NOP_TRACER
382int
383trace_selftest_startup_nop(struct tracer *trace, struct trace_array *tr)
384{
385 /* What could possibly go wrong? */
386 return 0;
387}
388#endif
389
418#ifdef CONFIG_SCHED_TRACER 390#ifdef CONFIG_SCHED_TRACER
419static int trace_wakeup_test_thread(void *data) 391static int trace_wakeup_test_thread(void *data)
420{ 392{
@@ -486,6 +458,9 @@ trace_selftest_startup_wakeup(struct tracer *trace, struct trace_array *tr)
486 458
487 wake_up_process(p); 459 wake_up_process(p);
488 460
461 /* give a little time to let the thread wake up */
462 msleep(100);
463
489 /* stop the tracing. */ 464 /* stop the tracing. */
490 tr->ctrl = 0; 465 tr->ctrl = 0;
491 trace->ctrl_update(tr); 466 trace->ctrl_update(tr);
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
new file mode 100644
index 000000000000..74c5d9a3afae
--- /dev/null
+++ b/kernel/trace/trace_stack.c
@@ -0,0 +1,310 @@
1/*
2 * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com>
3 *
4 */
5#include <linux/stacktrace.h>
6#include <linux/kallsyms.h>
7#include <linux/seq_file.h>
8#include <linux/spinlock.h>
9#include <linux/uaccess.h>
10#include <linux/debugfs.h>
11#include <linux/ftrace.h>
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/fs.h>
15#include "trace.h"
16
17#define STACK_TRACE_ENTRIES 500
18
19static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] =
20 { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX };
21static unsigned stack_dump_index[STACK_TRACE_ENTRIES];
22
23static struct stack_trace max_stack_trace = {
24 .max_entries = STACK_TRACE_ENTRIES,
25 .entries = stack_dump_trace,
26};
27
28static unsigned long max_stack_size;
29static raw_spinlock_t max_stack_lock =
30 (raw_spinlock_t)__RAW_SPIN_LOCK_UNLOCKED;
31
32static int stack_trace_disabled __read_mostly;
33static DEFINE_PER_CPU(int, trace_active);
34
35static inline void check_stack(void)
36{
37 unsigned long this_size, flags;
38 unsigned long *p, *top, *start;
39 int i;
40
41 this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1);
42 this_size = THREAD_SIZE - this_size;
43
44 if (this_size <= max_stack_size)
45 return;
46
47 raw_local_irq_save(flags);
48 __raw_spin_lock(&max_stack_lock);
49
50 /* a race could have already updated it */
51 if (this_size <= max_stack_size)
52 goto out;
53
54 max_stack_size = this_size;
55
56 max_stack_trace.nr_entries = 0;
57 max_stack_trace.skip = 3;
58
59 save_stack_trace(&max_stack_trace);
60
61 /*
62 * Now find where in the stack these are.
63 */
64 i = 0;
65 start = &this_size;
66 top = (unsigned long *)
67 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
68
69 /*
70 * Loop through all the entries. One of the entries may
71 * for some reason be missed on the stack, so we may
72 * have to account for them. If they are all there, this
73 * loop will only happen once. This code only takes place
74 * on a new max, so it is far from a fast path.
75 */
76 while (i < max_stack_trace.nr_entries) {
77
78 stack_dump_index[i] = this_size;
79 p = start;
80
81 for (; p < top && i < max_stack_trace.nr_entries; p++) {
82 if (*p == stack_dump_trace[i]) {
83 this_size = stack_dump_index[i++] =
84 (top - p) * sizeof(unsigned long);
85 /* Start the search from here */
86 start = p + 1;
87 }
88 }
89
90 i++;
91 }
92
93 out:
94 __raw_spin_unlock(&max_stack_lock);
95 raw_local_irq_restore(flags);
96}
97
98static void
99stack_trace_call(unsigned long ip, unsigned long parent_ip)
100{
101 int cpu, resched;
102
103 if (unlikely(!ftrace_enabled || stack_trace_disabled))
104 return;
105
106 resched = need_resched();
107 preempt_disable_notrace();
108
109 cpu = raw_smp_processor_id();
110 /* no atomic needed, we only modify this variable by this cpu */
111 if (per_cpu(trace_active, cpu)++ != 0)
112 goto out;
113
114 check_stack();
115
116 out:
117 per_cpu(trace_active, cpu)--;
118 /* prevent recursion in schedule */
119 if (resched)
120 preempt_enable_no_resched_notrace();
121 else
122 preempt_enable_notrace();
123}
124
125static struct ftrace_ops trace_ops __read_mostly =
126{
127 .func = stack_trace_call,
128};
129
130static ssize_t
131stack_max_size_read(struct file *filp, char __user *ubuf,
132 size_t count, loff_t *ppos)
133{
134 unsigned long *ptr = filp->private_data;
135 char buf[64];
136 int r;
137
138 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
139 if (r > sizeof(buf))
140 r = sizeof(buf);
141 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
142}
143
144static ssize_t
145stack_max_size_write(struct file *filp, const char __user *ubuf,
146 size_t count, loff_t *ppos)
147{
148 long *ptr = filp->private_data;
149 unsigned long val, flags;
150 char buf[64];
151 int ret;
152
153 if (count >= sizeof(buf))
154 return -EINVAL;
155
156 if (copy_from_user(&buf, ubuf, count))
157 return -EFAULT;
158
159 buf[count] = 0;
160
161 ret = strict_strtoul(buf, 10, &val);
162 if (ret < 0)
163 return ret;
164
165 raw_local_irq_save(flags);
166 __raw_spin_lock(&max_stack_lock);
167 *ptr = val;
168 __raw_spin_unlock(&max_stack_lock);
169 raw_local_irq_restore(flags);
170
171 return count;
172}
173
174static struct file_operations stack_max_size_fops = {
175 .open = tracing_open_generic,
176 .read = stack_max_size_read,
177 .write = stack_max_size_write,
178};
179
180static void *
181t_next(struct seq_file *m, void *v, loff_t *pos)
182{
183 long i = (long)m->private;
184
185 (*pos)++;
186
187 i++;
188
189 if (i >= max_stack_trace.nr_entries ||
190 stack_dump_trace[i] == ULONG_MAX)
191 return NULL;
192
193 m->private = (void *)i;
194
195 return &m->private;
196}
197
198static void *t_start(struct seq_file *m, loff_t *pos)
199{
200 void *t = &m->private;
201 loff_t l = 0;
202
203 local_irq_disable();
204 __raw_spin_lock(&max_stack_lock);
205
206 for (; t && l < *pos; t = t_next(m, t, &l))
207 ;
208
209 return t;
210}
211
212static void t_stop(struct seq_file *m, void *p)
213{
214 __raw_spin_unlock(&max_stack_lock);
215 local_irq_enable();
216}
217
218static int trace_lookup_stack(struct seq_file *m, long i)
219{
220 unsigned long addr = stack_dump_trace[i];
221#ifdef CONFIG_KALLSYMS
222 char str[KSYM_SYMBOL_LEN];
223
224 sprint_symbol(str, addr);
225
226 return seq_printf(m, "%s\n", str);
227#else
228 return seq_printf(m, "%p\n", (void*)addr);
229#endif
230}
231
232static int t_show(struct seq_file *m, void *v)
233{
234 long i = *(long *)v;
235 int size;
236
237 if (i < 0) {
238 seq_printf(m, " Depth Size Location"
239 " (%d entries)\n"
240 " ----- ---- --------\n",
241 max_stack_trace.nr_entries);
242 return 0;
243 }
244
245 if (i >= max_stack_trace.nr_entries ||
246 stack_dump_trace[i] == ULONG_MAX)
247 return 0;
248
249 if (i+1 == max_stack_trace.nr_entries ||
250 stack_dump_trace[i+1] == ULONG_MAX)
251 size = stack_dump_index[i];
252 else
253 size = stack_dump_index[i] - stack_dump_index[i+1];
254
255 seq_printf(m, "%3ld) %8d %5d ", i, stack_dump_index[i], size);
256
257 trace_lookup_stack(m, i);
258
259 return 0;
260}
261
262static struct seq_operations stack_trace_seq_ops = {
263 .start = t_start,
264 .next = t_next,
265 .stop = t_stop,
266 .show = t_show,
267};
268
269static int stack_trace_open(struct inode *inode, struct file *file)
270{
271 int ret;
272
273 ret = seq_open(file, &stack_trace_seq_ops);
274 if (!ret) {
275 struct seq_file *m = file->private_data;
276 m->private = (void *)-1;
277 }
278
279 return ret;
280}
281
282static struct file_operations stack_trace_fops = {
283 .open = stack_trace_open,
284 .read = seq_read,
285 .llseek = seq_lseek,
286};
287
288static __init int stack_trace_init(void)
289{
290 struct dentry *d_tracer;
291 struct dentry *entry;
292
293 d_tracer = tracing_init_dentry();
294
295 entry = debugfs_create_file("stack_max_size", 0644, d_tracer,
296 &max_stack_size, &stack_max_size_fops);
297 if (!entry)
298 pr_warning("Could not create debugfs 'stack_max_size' entry\n");
299
300 entry = debugfs_create_file("stack_trace", 0444, d_tracer,
301 NULL, &stack_trace_fops);
302 if (!entry)
303 pr_warning("Could not create debugfs 'stack_trace' entry\n");
304
305 register_ftrace_function(&trace_ops);
306
307 return 0;
308}
309
310device_initcall(stack_trace_init);
diff --git a/kernel/trace/trace_sysprof.c b/kernel/trace/trace_sysprof.c
index db58fb66a135..9587d3bcba55 100644
--- a/kernel/trace/trace_sysprof.c
+++ b/kernel/trace/trace_sysprof.c
@@ -241,7 +241,7 @@ static void stack_reset(struct trace_array *tr)
241 tr->time_start = ftrace_now(tr->cpu); 241 tr->time_start = ftrace_now(tr->cpu);
242 242
243 for_each_online_cpu(cpu) 243 for_each_online_cpu(cpu)
244 tracing_reset(tr->data[cpu]); 244 tracing_reset(tr, cpu);
245} 245}
246 246
247static void start_stack_trace(struct trace_array *tr) 247static void start_stack_trace(struct trace_array *tr)
diff --git a/kernel/tracepoint.c b/kernel/tracepoint.c
new file mode 100644
index 000000000000..f2b7c28a4708
--- /dev/null
+++ b/kernel/tracepoint.c
@@ -0,0 +1,477 @@
1/*
2 * Copyright (C) 2008 Mathieu Desnoyers
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 */
18#include <linux/module.h>
19#include <linux/mutex.h>
20#include <linux/types.h>
21#include <linux/jhash.h>
22#include <linux/list.h>
23#include <linux/rcupdate.h>
24#include <linux/tracepoint.h>
25#include <linux/err.h>
26#include <linux/slab.h>
27
28extern struct tracepoint __start___tracepoints[];
29extern struct tracepoint __stop___tracepoints[];
30
31/* Set to 1 to enable tracepoint debug output */
32static const int tracepoint_debug;
33
34/*
35 * tracepoints_mutex nests inside module_mutex. Tracepoints mutex protects the
36 * builtin and module tracepoints and the hash table.
37 */
38static DEFINE_MUTEX(tracepoints_mutex);
39
40/*
41 * Tracepoint hash table, containing the active tracepoints.
42 * Protected by tracepoints_mutex.
43 */
44#define TRACEPOINT_HASH_BITS 6
45#define TRACEPOINT_TABLE_SIZE (1 << TRACEPOINT_HASH_BITS)
46
47/*
48 * Note about RCU :
49 * It is used to to delay the free of multiple probes array until a quiescent
50 * state is reached.
51 * Tracepoint entries modifications are protected by the tracepoints_mutex.
52 */
53struct tracepoint_entry {
54 struct hlist_node hlist;
55 void **funcs;
56 int refcount; /* Number of times armed. 0 if disarmed. */
57 struct rcu_head rcu;
58 void *oldptr;
59 unsigned char rcu_pending:1;
60 char name[0];
61};
62
63static struct hlist_head tracepoint_table[TRACEPOINT_TABLE_SIZE];
64
65static void free_old_closure(struct rcu_head *head)
66{
67 struct tracepoint_entry *entry = container_of(head,
68 struct tracepoint_entry, rcu);
69 kfree(entry->oldptr);
70 /* Make sure we free the data before setting the pending flag to 0 */
71 smp_wmb();
72 entry->rcu_pending = 0;
73}
74
75static void tracepoint_entry_free_old(struct tracepoint_entry *entry, void *old)
76{
77 if (!old)
78 return;
79 entry->oldptr = old;
80 entry->rcu_pending = 1;
81 /* write rcu_pending before calling the RCU callback */
82 smp_wmb();
83 call_rcu_sched(&entry->rcu, free_old_closure);
84}
85
86static void debug_print_probes(struct tracepoint_entry *entry)
87{
88 int i;
89
90 if (!tracepoint_debug)
91 return;
92
93 for (i = 0; entry->funcs[i]; i++)
94 printk(KERN_DEBUG "Probe %d : %p\n", i, entry->funcs[i]);
95}
96
97static void *
98tracepoint_entry_add_probe(struct tracepoint_entry *entry, void *probe)
99{
100 int nr_probes = 0;
101 void **old, **new;
102
103 WARN_ON(!probe);
104
105 debug_print_probes(entry);
106 old = entry->funcs;
107 if (old) {
108 /* (N -> N+1), (N != 0, 1) probes */
109 for (nr_probes = 0; old[nr_probes]; nr_probes++)
110 if (old[nr_probes] == probe)
111 return ERR_PTR(-EEXIST);
112 }
113 /* + 2 : one for new probe, one for NULL func */
114 new = kzalloc((nr_probes + 2) * sizeof(void *), GFP_KERNEL);
115 if (new == NULL)
116 return ERR_PTR(-ENOMEM);
117 if (old)
118 memcpy(new, old, nr_probes * sizeof(void *));
119 new[nr_probes] = probe;
120 entry->refcount = nr_probes + 1;
121 entry->funcs = new;
122 debug_print_probes(entry);
123 return old;
124}
125
126static void *
127tracepoint_entry_remove_probe(struct tracepoint_entry *entry, void *probe)
128{
129 int nr_probes = 0, nr_del = 0, i;
130 void **old, **new;
131
132 old = entry->funcs;
133
134 debug_print_probes(entry);
135 /* (N -> M), (N > 1, M >= 0) probes */
136 for (nr_probes = 0; old[nr_probes]; nr_probes++) {
137 if ((!probe || old[nr_probes] == probe))
138 nr_del++;
139 }
140
141 if (nr_probes - nr_del == 0) {
142 /* N -> 0, (N > 1) */
143 entry->funcs = NULL;
144 entry->refcount = 0;
145 debug_print_probes(entry);
146 return old;
147 } else {
148 int j = 0;
149 /* N -> M, (N > 1, M > 0) */
150 /* + 1 for NULL */
151 new = kzalloc((nr_probes - nr_del + 1)
152 * sizeof(void *), GFP_KERNEL);
153 if (new == NULL)
154 return ERR_PTR(-ENOMEM);
155 for (i = 0; old[i]; i++)
156 if ((probe && old[i] != probe))
157 new[j++] = old[i];
158 entry->refcount = nr_probes - nr_del;
159 entry->funcs = new;
160 }
161 debug_print_probes(entry);
162 return old;
163}
164
165/*
166 * Get tracepoint if the tracepoint is present in the tracepoint hash table.
167 * Must be called with tracepoints_mutex held.
168 * Returns NULL if not present.
169 */
170static struct tracepoint_entry *get_tracepoint(const char *name)
171{
172 struct hlist_head *head;
173 struct hlist_node *node;
174 struct tracepoint_entry *e;
175 u32 hash = jhash(name, strlen(name), 0);
176
177 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
178 hlist_for_each_entry(e, node, head, hlist) {
179 if (!strcmp(name, e->name))
180 return e;
181 }
182 return NULL;
183}
184
185/*
186 * Add the tracepoint to the tracepoint hash table. Must be called with
187 * tracepoints_mutex held.
188 */
189static struct tracepoint_entry *add_tracepoint(const char *name)
190{
191 struct hlist_head *head;
192 struct hlist_node *node;
193 struct tracepoint_entry *e;
194 size_t name_len = strlen(name) + 1;
195 u32 hash = jhash(name, name_len-1, 0);
196
197 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
198 hlist_for_each_entry(e, node, head, hlist) {
199 if (!strcmp(name, e->name)) {
200 printk(KERN_NOTICE
201 "tracepoint %s busy\n", name);
202 return ERR_PTR(-EEXIST); /* Already there */
203 }
204 }
205 /*
206 * Using kmalloc here to allocate a variable length element. Could
207 * cause some memory fragmentation if overused.
208 */
209 e = kmalloc(sizeof(struct tracepoint_entry) + name_len, GFP_KERNEL);
210 if (!e)
211 return ERR_PTR(-ENOMEM);
212 memcpy(&e->name[0], name, name_len);
213 e->funcs = NULL;
214 e->refcount = 0;
215 e->rcu_pending = 0;
216 hlist_add_head(&e->hlist, head);
217 return e;
218}
219
220/*
221 * Remove the tracepoint from the tracepoint hash table. Must be called with
222 * mutex_lock held.
223 */
224static int remove_tracepoint(const char *name)
225{
226 struct hlist_head *head;
227 struct hlist_node *node;
228 struct tracepoint_entry *e;
229 int found = 0;
230 size_t len = strlen(name) + 1;
231 u32 hash = jhash(name, len-1, 0);
232
233 head = &tracepoint_table[hash & (TRACEPOINT_TABLE_SIZE - 1)];
234 hlist_for_each_entry(e, node, head, hlist) {
235 if (!strcmp(name, e->name)) {
236 found = 1;
237 break;
238 }
239 }
240 if (!found)
241 return -ENOENT;
242 if (e->refcount)
243 return -EBUSY;
244 hlist_del(&e->hlist);
245 /* Make sure the call_rcu_sched has been executed */
246 if (e->rcu_pending)
247 rcu_barrier_sched();
248 kfree(e);
249 return 0;
250}
251
252/*
253 * Sets the probe callback corresponding to one tracepoint.
254 */
255static void set_tracepoint(struct tracepoint_entry **entry,
256 struct tracepoint *elem, int active)
257{
258 WARN_ON(strcmp((*entry)->name, elem->name) != 0);
259
260 /*
261 * rcu_assign_pointer has a smp_wmb() which makes sure that the new
262 * probe callbacks array is consistent before setting a pointer to it.
263 * This array is referenced by __DO_TRACE from
264 * include/linux/tracepoints.h. A matching smp_read_barrier_depends()
265 * is used.
266 */
267 rcu_assign_pointer(elem->funcs, (*entry)->funcs);
268 elem->state = active;
269}
270
271/*
272 * Disable a tracepoint and its probe callback.
273 * Note: only waiting an RCU period after setting elem->call to the empty
274 * function insures that the original callback is not used anymore. This insured
275 * by preempt_disable around the call site.
276 */
277static void disable_tracepoint(struct tracepoint *elem)
278{
279 elem->state = 0;
280}
281
282/**
283 * tracepoint_update_probe_range - Update a probe range
284 * @begin: beginning of the range
285 * @end: end of the range
286 *
287 * Updates the probe callback corresponding to a range of tracepoints.
288 */
289void tracepoint_update_probe_range(struct tracepoint *begin,
290 struct tracepoint *end)
291{
292 struct tracepoint *iter;
293 struct tracepoint_entry *mark_entry;
294
295 mutex_lock(&tracepoints_mutex);
296 for (iter = begin; iter < end; iter++) {
297 mark_entry = get_tracepoint(iter->name);
298 if (mark_entry) {
299 set_tracepoint(&mark_entry, iter,
300 !!mark_entry->refcount);
301 } else {
302 disable_tracepoint(iter);
303 }
304 }
305 mutex_unlock(&tracepoints_mutex);
306}
307
308/*
309 * Update probes, removing the faulty probes.
310 */
311static void tracepoint_update_probes(void)
312{
313 /* Core kernel tracepoints */
314 tracepoint_update_probe_range(__start___tracepoints,
315 __stop___tracepoints);
316 /* tracepoints in modules. */
317 module_update_tracepoints();
318}
319
320/**
321 * tracepoint_probe_register - Connect a probe to a tracepoint
322 * @name: tracepoint name
323 * @probe: probe handler
324 *
325 * Returns 0 if ok, error value on error.
326 * The probe address must at least be aligned on the architecture pointer size.
327 */
328int tracepoint_probe_register(const char *name, void *probe)
329{
330 struct tracepoint_entry *entry;
331 int ret = 0;
332 void *old;
333
334 mutex_lock(&tracepoints_mutex);
335 entry = get_tracepoint(name);
336 if (!entry) {
337 entry = add_tracepoint(name);
338 if (IS_ERR(entry)) {
339 ret = PTR_ERR(entry);
340 goto end;
341 }
342 }
343 /*
344 * If we detect that a call_rcu_sched is pending for this tracepoint,
345 * make sure it's executed now.
346 */
347 if (entry->rcu_pending)
348 rcu_barrier_sched();
349 old = tracepoint_entry_add_probe(entry, probe);
350 if (IS_ERR(old)) {
351 ret = PTR_ERR(old);
352 goto end;
353 }
354 mutex_unlock(&tracepoints_mutex);
355 tracepoint_update_probes(); /* may update entry */
356 mutex_lock(&tracepoints_mutex);
357 entry = get_tracepoint(name);
358 WARN_ON(!entry);
359 if (entry->rcu_pending)
360 rcu_barrier_sched();
361 tracepoint_entry_free_old(entry, old);
362end:
363 mutex_unlock(&tracepoints_mutex);
364 return ret;
365}
366EXPORT_SYMBOL_GPL(tracepoint_probe_register);
367
368/**
369 * tracepoint_probe_unregister - Disconnect a probe from a tracepoint
370 * @name: tracepoint name
371 * @probe: probe function pointer
372 *
373 * We do not need to call a synchronize_sched to make sure the probes have
374 * finished running before doing a module unload, because the module unload
375 * itself uses stop_machine(), which insures that every preempt disabled section
376 * have finished.
377 */
378int tracepoint_probe_unregister(const char *name, void *probe)
379{
380 struct tracepoint_entry *entry;
381 void *old;
382 int ret = -ENOENT;
383
384 mutex_lock(&tracepoints_mutex);
385 entry = get_tracepoint(name);
386 if (!entry)
387 goto end;
388 if (entry->rcu_pending)
389 rcu_barrier_sched();
390 old = tracepoint_entry_remove_probe(entry, probe);
391 mutex_unlock(&tracepoints_mutex);
392 tracepoint_update_probes(); /* may update entry */
393 mutex_lock(&tracepoints_mutex);
394 entry = get_tracepoint(name);
395 if (!entry)
396 goto end;
397 if (entry->rcu_pending)
398 rcu_barrier_sched();
399 tracepoint_entry_free_old(entry, old);
400 remove_tracepoint(name); /* Ignore busy error message */
401 ret = 0;
402end:
403 mutex_unlock(&tracepoints_mutex);
404 return ret;
405}
406EXPORT_SYMBOL_GPL(tracepoint_probe_unregister);
407
408/**
409 * tracepoint_get_iter_range - Get a next tracepoint iterator given a range.
410 * @tracepoint: current tracepoints (in), next tracepoint (out)
411 * @begin: beginning of the range
412 * @end: end of the range
413 *
414 * Returns whether a next tracepoint has been found (1) or not (0).
415 * Will return the first tracepoint in the range if the input tracepoint is
416 * NULL.
417 */
418int tracepoint_get_iter_range(struct tracepoint **tracepoint,
419 struct tracepoint *begin, struct tracepoint *end)
420{
421 if (!*tracepoint && begin != end) {
422 *tracepoint = begin;
423 return 1;
424 }
425 if (*tracepoint >= begin && *tracepoint < end)
426 return 1;
427 return 0;
428}
429EXPORT_SYMBOL_GPL(tracepoint_get_iter_range);
430
431static void tracepoint_get_iter(struct tracepoint_iter *iter)
432{
433 int found = 0;
434
435 /* Core kernel tracepoints */
436 if (!iter->module) {
437 found = tracepoint_get_iter_range(&iter->tracepoint,
438 __start___tracepoints, __stop___tracepoints);
439 if (found)
440 goto end;
441 }
442 /* tracepoints in modules. */
443 found = module_get_iter_tracepoints(iter);
444end:
445 if (!found)
446 tracepoint_iter_reset(iter);
447}
448
449void tracepoint_iter_start(struct tracepoint_iter *iter)
450{
451 tracepoint_get_iter(iter);
452}
453EXPORT_SYMBOL_GPL(tracepoint_iter_start);
454
455void tracepoint_iter_next(struct tracepoint_iter *iter)
456{
457 iter->tracepoint++;
458 /*
459 * iter->tracepoint may be invalid because we blindly incremented it.
460 * Make sure it is valid by marshalling on the tracepoints, getting the
461 * tracepoints from following modules if necessary.
462 */
463 tracepoint_get_iter(iter);
464}
465EXPORT_SYMBOL_GPL(tracepoint_iter_next);
466
467void tracepoint_iter_stop(struct tracepoint_iter *iter)
468{
469}
470EXPORT_SYMBOL_GPL(tracepoint_iter_stop);
471
472void tracepoint_iter_reset(struct tracepoint_iter *iter)
473{
474 iter->module = NULL;
475 iter->tracepoint = NULL;
476}
477EXPORT_SYMBOL_GPL(tracepoint_iter_reset);
diff --git a/kernel/user.c b/kernel/user.c
index 865ecf57a096..39d6159fae43 100644
--- a/kernel/user.c
+++ b/kernel/user.c
@@ -169,7 +169,7 @@ static ssize_t cpu_rt_runtime_show(struct kobject *kobj,
169{ 169{
170 struct user_struct *up = container_of(kobj, struct user_struct, kobj); 170 struct user_struct *up = container_of(kobj, struct user_struct, kobj);
171 171
172 return sprintf(buf, "%lu\n", sched_group_rt_runtime(up->tg)); 172 return sprintf(buf, "%ld\n", sched_group_rt_runtime(up->tg));
173} 173}
174 174
175static ssize_t cpu_rt_runtime_store(struct kobject *kobj, 175static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
@@ -180,7 +180,7 @@ static ssize_t cpu_rt_runtime_store(struct kobject *kobj,
180 unsigned long rt_runtime; 180 unsigned long rt_runtime;
181 int rc; 181 int rc;
182 182
183 sscanf(buf, "%lu", &rt_runtime); 183 sscanf(buf, "%ld", &rt_runtime);
184 184
185 rc = sched_group_set_rt_runtime(up->tg, rt_runtime); 185 rc = sched_group_set_rt_runtime(up->tg, rt_runtime);
186 186
diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
index 4ab9659d269e..3b34b3545936 100644
--- a/kernel/utsname_sysctl.c
+++ b/kernel/utsname_sysctl.c
@@ -60,7 +60,7 @@ static int proc_do_uts_string(ctl_table *table, int write, struct file *filp,
60 60
61#ifdef CONFIG_SYSCTL_SYSCALL 61#ifdef CONFIG_SYSCTL_SYSCALL
62/* The generic string strategy routine: */ 62/* The generic string strategy routine: */
63static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen, 63static int sysctl_uts_string(ctl_table *table,
64 void __user *oldval, size_t __user *oldlenp, 64 void __user *oldval, size_t __user *oldlenp,
65 void __user *newval, size_t newlen) 65 void __user *newval, size_t newlen)
66{ 66{
@@ -69,8 +69,7 @@ static int sysctl_uts_string(ctl_table *table, int __user *name, int nlen,
69 write = newval && newlen; 69 write = newval && newlen;
70 memcpy(&uts_table, table, sizeof(uts_table)); 70 memcpy(&uts_table, table, sizeof(uts_table));
71 uts_table.data = get_uts(table, write); 71 uts_table.data = get_uts(table, write);
72 r = sysctl_string(&uts_table, name, nlen, 72 r = sysctl_string(&uts_table, oldval, oldlenp, newval, newlen);
73 oldval, oldlenp, newval, newlen);
74 put_uts(table, write, uts_table.data); 73 put_uts(table, write, uts_table.data);
75 return r; 74 return r;
76} 75}
diff --git a/kernel/wait.c b/kernel/wait.c
index c275c56cf2d3..cd87131f2fc2 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -72,12 +72,7 @@ prepare_to_wait(wait_queue_head_t *q, wait_queue_t *wait, int state)
72 spin_lock_irqsave(&q->lock, flags); 72 spin_lock_irqsave(&q->lock, flags);
73 if (list_empty(&wait->task_list)) 73 if (list_empty(&wait->task_list))
74 __add_wait_queue(q, wait); 74 __add_wait_queue(q, wait);
75 /* 75 set_current_state(state);
76 * don't alter the task state if this is just going to
77 * queue an async wait queue callback
78 */
79 if (is_sync_wait(wait))
80 set_current_state(state);
81 spin_unlock_irqrestore(&q->lock, flags); 76 spin_unlock_irqrestore(&q->lock, flags);
82} 77}
83EXPORT_SYMBOL(prepare_to_wait); 78EXPORT_SYMBOL(prepare_to_wait);
@@ -91,12 +86,7 @@ prepare_to_wait_exclusive(wait_queue_head_t *q, wait_queue_t *wait, int state)
91 spin_lock_irqsave(&q->lock, flags); 86 spin_lock_irqsave(&q->lock, flags);
92 if (list_empty(&wait->task_list)) 87 if (list_empty(&wait->task_list))
93 __add_wait_queue_tail(q, wait); 88 __add_wait_queue_tail(q, wait);
94 /* 89 set_current_state(state);
95 * don't alter the task state if this is just going to
96 * queue an async wait queue callback
97 */
98 if (is_sync_wait(wait))
99 set_current_state(state);
100 spin_unlock_irqrestore(&q->lock, flags); 90 spin_unlock_irqrestore(&q->lock, flags);
101} 91}
102EXPORT_SYMBOL(prepare_to_wait_exclusive); 92EXPORT_SYMBOL(prepare_to_wait_exclusive);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 4048e92aa04f..714afad46539 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -9,7 +9,7 @@
9 * Derived from the taskqueue/keventd code by: 9 * Derived from the taskqueue/keventd code by:
10 * 10 *
11 * David Woodhouse <dwmw2@infradead.org> 11 * David Woodhouse <dwmw2@infradead.org>
12 * Andrew Morton <andrewm@uow.edu.au> 12 * Andrew Morton
13 * Kai Petzke <wpp@marie.physik.tu-berlin.de> 13 * Kai Petzke <wpp@marie.physik.tu-berlin.de>
14 * Theodore Ts'o <tytso@mit.edu> 14 * Theodore Ts'o <tytso@mit.edu>
15 * 15 *