aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c26
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/futex_compat.c6
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/irq/numa_migrate.c4
-rw-r--r--kernel/kprobes.c2
-rw-r--r--kernel/lockdep_proc.c3
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/perf_counter.c662
-rw-r--r--kernel/posix-cpu-timers.c7
-rw-r--r--kernel/posix-timers.c7
-rw-r--r--kernel/rtmutex.c4
-rw-r--r--kernel/sched_cpupri.c15
-rw-r--r--kernel/sched_fair.c32
-rw-r--r--kernel/signal.c25
-rw-r--r--kernel/smp.c2
-rw-r--r--kernel/trace/blktrace.c12
-rw-r--r--kernel/trace/ftrace.c19
-rw-r--r--kernel/trace/ring_buffer.c15
-rw-r--r--kernel/trace/trace.c13
-rw-r--r--kernel/trace/trace.h4
-rw-r--r--kernel/trace/trace_event_profile.c2
-rw-r--r--kernel/trace/trace_events.c4
-rw-r--r--kernel/trace/trace_events_filter.c20
-rw-r--r--kernel/trace/trace_functions_graph.c11
-rw-r--r--kernel/trace/trace_printk.c2
-rw-r--r--kernel/trace/trace_stack.c7
-rw-r--r--kernel/trace/trace_stat.c34
-rw-r--r--kernel/wait.c5
29 files changed, 649 insertions, 340 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 29b532e718f7..021e1138556e 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -568,18 +568,18 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
568 * the value intact in a core dump, and to save the unnecessary 568 * the value intact in a core dump, and to save the unnecessary
569 * trouble otherwise. Userland only wants this done for a sys_exit. 569 * trouble otherwise. Userland only wants this done for a sys_exit.
570 */ 570 */
571 if (tsk->clear_child_tid 571 if (tsk->clear_child_tid) {
572 && !(tsk->flags & PF_SIGNALED) 572 if (!(tsk->flags & PF_SIGNALED) &&
573 && atomic_read(&mm->mm_users) > 1) { 573 atomic_read(&mm->mm_users) > 1) {
574 u32 __user * tidptr = tsk->clear_child_tid; 574 /*
575 * We don't check the error code - if userspace has
576 * not set up a proper pointer then tough luck.
577 */
578 put_user(0, tsk->clear_child_tid);
579 sys_futex(tsk->clear_child_tid, FUTEX_WAKE,
580 1, NULL, NULL, 0);
581 }
575 tsk->clear_child_tid = NULL; 582 tsk->clear_child_tid = NULL;
576
577 /*
578 * We don't check the error code - if userspace has
579 * not set up a proper pointer then tough luck.
580 */
581 put_user(0, tidptr);
582 sys_futex(tidptr, FUTEX_WAKE, 1, NULL, NULL, 0);
583 } 583 }
584} 584}
585 585
@@ -1269,6 +1269,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1269 write_unlock_irq(&tasklist_lock); 1269 write_unlock_irq(&tasklist_lock);
1270 proc_fork_connector(p); 1270 proc_fork_connector(p);
1271 cgroup_post_fork(p); 1271 cgroup_post_fork(p);
1272 perf_counter_fork(p);
1272 return p; 1273 return p;
1273 1274
1274bad_fork_free_pid: 1275bad_fork_free_pid:
@@ -1410,9 +1411,6 @@ long do_fork(unsigned long clone_flags,
1410 init_completion(&vfork); 1411 init_completion(&vfork);
1411 } 1412 }
1412 1413
1413 if (!(clone_flags & CLONE_THREAD))
1414 perf_counter_fork(p);
1415
1416 audit_finish_fork(p); 1414 audit_finish_fork(p);
1417 tracehook_report_clone(regs, clone_flags, nr, p); 1415 tracehook_report_clone(regs, clone_flags, nr, p);
1418 1416
diff --git a/kernel/futex.c b/kernel/futex.c
index 0672ff88f159..e18cfbdc7190 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1010,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1010 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 1010 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1011 * q: the futex_q 1011 * q: the futex_q
1012 * key: the key of the requeue target futex 1012 * key: the key of the requeue target futex
1013 * hb: the hash_bucket of the requeue target futex
1013 * 1014 *
1014 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 1015 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1015 * target futex if it is uncontended or via a lock steal. Set the futex_q key 1016 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1016 * to the requeue target futex so the waiter can detect the wakeup on the right 1017 * to the requeue target futex so the waiter can detect the wakeup on the right
1017 * futex, but remove it from the hb and NULL the rt_waiter so it can detect 1018 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1018 * atomic lock acquisition. Must be called with the q->lock_ptr held. 1019 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1020 * to protect access to the pi_state to fixup the owner later. Must be called
1021 * with both q->lock_ptr and hb->lock held.
1019 */ 1022 */
1020static inline 1023static inline
1021void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) 1024void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1025 struct futex_hash_bucket *hb)
1022{ 1026{
1023 drop_futex_key_refs(&q->key); 1027 drop_futex_key_refs(&q->key);
1024 get_futex_key_refs(key); 1028 get_futex_key_refs(key);
@@ -1030,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
1030 WARN_ON(!q->rt_waiter); 1034 WARN_ON(!q->rt_waiter);
1031 q->rt_waiter = NULL; 1035 q->rt_waiter = NULL;
1032 1036
1037 q->lock_ptr = &hb->lock;
1038#ifdef CONFIG_DEBUG_PI_LIST
1039 q->list.plist.lock = &hb->lock;
1040#endif
1041
1033 wake_up_state(q->task, TASK_NORMAL); 1042 wake_up_state(q->task, TASK_NORMAL);
1034} 1043}
1035 1044
@@ -1088,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1088 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1097 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1089 set_waiters); 1098 set_waiters);
1090 if (ret == 1) 1099 if (ret == 1)
1091 requeue_pi_wake_futex(top_waiter, key2); 1100 requeue_pi_wake_futex(top_waiter, key2, hb2);
1092 1101
1093 return ret; 1102 return ret;
1094} 1103}
@@ -1247,8 +1256,15 @@ retry_private:
1247 if (!match_futex(&this->key, &key1)) 1256 if (!match_futex(&this->key, &key1))
1248 continue; 1257 continue;
1249 1258
1250 WARN_ON(!requeue_pi && this->rt_waiter); 1259 /*
1251 WARN_ON(requeue_pi && !this->rt_waiter); 1260 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1261 * be paired with each other and no other futex ops.
1262 */
1263 if ((requeue_pi && !this->rt_waiter) ||
1264 (!requeue_pi && this->rt_waiter)) {
1265 ret = -EINVAL;
1266 break;
1267 }
1252 1268
1253 /* 1269 /*
1254 * Wake nr_wake waiters. For requeue_pi, if we acquired the 1270 * Wake nr_wake waiters. For requeue_pi, if we acquired the
@@ -1273,7 +1289,7 @@ retry_private:
1273 this->task, 1); 1289 this->task, 1);
1274 if (ret == 1) { 1290 if (ret == 1) {
1275 /* We got the lock. */ 1291 /* We got the lock. */
1276 requeue_pi_wake_futex(this, &key2); 1292 requeue_pi_wake_futex(this, &key2, hb2);
1277 continue; 1293 continue;
1278 } else if (ret) { 1294 } else if (ret) {
1279 /* -EDEADLK */ 1295 /* -EDEADLK */
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d607a5b9ee29..235716556bf1 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -180,7 +180,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
180 int cmd = op & FUTEX_CMD_MASK; 180 int cmd = op & FUTEX_CMD_MASK;
181 181
182 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || 182 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
183 cmd == FUTEX_WAIT_BITSET)) { 183 cmd == FUTEX_WAIT_BITSET ||
184 cmd == FUTEX_WAIT_REQUEUE_PI)) {
184 if (get_compat_timespec(&ts, utime)) 185 if (get_compat_timespec(&ts, utime))
185 return -EFAULT; 186 return -EFAULT;
186 if (!timespec_valid(&ts)) 187 if (!timespec_valid(&ts))
@@ -191,7 +192,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
191 t = ktime_add_safe(ktime_get(), t); 192 t = ktime_add_safe(ktime_get(), t);
192 tp = &t; 193 tp = &t;
193 } 194 }
194 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) 195 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
196 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
195 val2 = (int) (unsigned long) utime; 197 val2 = (int) (unsigned long) utime;
196 198
197 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 199 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c679db4687..d222515a5a06 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -761,7 +761,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
761{ 761{
762 struct irq_desc *desc = irq_to_desc(irq); 762 struct irq_desc *desc = irq_to_desc(irq);
763 struct irqaction *action, **action_ptr; 763 struct irqaction *action, **action_ptr;
764 struct task_struct *irqthread;
765 unsigned long flags; 764 unsigned long flags;
766 765
767 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 766 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -809,9 +808,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
809 desc->chip->disable(irq); 808 desc->chip->disable(irq);
810 } 809 }
811 810
812 irqthread = action->thread;
813 action->thread = NULL;
814
815 spin_unlock_irqrestore(&desc->lock, flags); 811 spin_unlock_irqrestore(&desc->lock, flags);
816 812
817 unregister_handler_proc(irq, action); 813 unregister_handler_proc(irq, action);
@@ -819,12 +815,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
819 /* Make sure it's not being used on another CPU: */ 815 /* Make sure it's not being used on another CPU: */
820 synchronize_irq(irq); 816 synchronize_irq(irq);
821 817
822 if (irqthread) {
823 if (!test_bit(IRQTF_DIED, &action->thread_flags))
824 kthread_stop(irqthread);
825 put_task_struct(irqthread);
826 }
827
828#ifdef CONFIG_DEBUG_SHIRQ 818#ifdef CONFIG_DEBUG_SHIRQ
829 /* 819 /*
830 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 820 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -840,6 +830,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
840 local_irq_restore(flags); 830 local_irq_restore(flags);
841 } 831 }
842#endif 832#endif
833
834 if (action->thread) {
835 if (!test_bit(IRQTF_DIED, &action->thread_flags))
836 kthread_stop(action->thread);
837 put_task_struct(action->thread);
838 }
839
843 return action; 840 return action;
844} 841}
845 842
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 2f69bee57bf2..3fd30197da2e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -107,8 +107,8 @@ out_unlock:
107 107
108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
109{ 109{
110 /* those all static, do move them */ 110 /* those static or target node is -1, do not move them */
111 if (desc->irq < NR_IRQS_LEGACY) 111 if (desc->irq < NR_IRQS_LEGACY || node == -1)
112 return desc; 112 return desc;
113 113
114 if (desc->node != node) 114 if (desc->node != node)
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 16b5739c516a..0540948e29ab 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -694,7 +694,7 @@ int __kprobes register_kprobe(struct kprobe *p)
694 p->addr = addr; 694 p->addr = addr;
695 695
696 preempt_disable(); 696 preempt_disable();
697 if (!__kernel_text_address((unsigned long) p->addr) || 697 if (!kernel_text_address((unsigned long) p->addr) ||
698 in_kprobes_functions((unsigned long) p->addr)) { 698 in_kprobes_functions((unsigned long) p->addr)) {
699 preempt_enable(); 699 preempt_enable();
700 return -EINVAL; 700 return -EINVAL;
diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
index d7135aa2d2c4..e94caa666dba 100644
--- a/kernel/lockdep_proc.c
+++ b/kernel/lockdep_proc.c
@@ -758,7 +758,8 @@ static int __init lockdep_proc_init(void)
758 &proc_lockdep_stats_operations); 758 &proc_lockdep_stats_operations);
759 759
760#ifdef CONFIG_LOCK_STAT 760#ifdef CONFIG_LOCK_STAT
761 proc_create("lock_stat", S_IRUSR, NULL, &proc_lock_stat_operations); 761 proc_create("lock_stat", S_IRUSR | S_IWUSR, NULL,
762 &proc_lock_stat_operations);
762#endif 763#endif
763 764
764 return 0; 765 return 0;
diff --git a/kernel/panic.c b/kernel/panic.c
index 984b3ecbd72c..512ab73b0ca3 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -301,6 +301,7 @@ int oops_may_print(void)
301 */ 301 */
302void oops_enter(void) 302void oops_enter(void)
303{ 303{
304 tracing_off();
304 /* can't trust the integrity of the kernel anymore: */ 305 /* can't trust the integrity of the kernel anymore: */
305 debug_locks_off(); 306 debug_locks_off();
306 do_oops_enter_exit(); 307 do_oops_enter_exit();
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 950931041954..534e20d14d63 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -42,6 +42,7 @@ static int perf_overcommit __read_mostly = 1;
42static atomic_t nr_counters __read_mostly; 42static atomic_t nr_counters __read_mostly;
43static atomic_t nr_mmap_counters __read_mostly; 43static atomic_t nr_mmap_counters __read_mostly;
44static atomic_t nr_comm_counters __read_mostly; 44static atomic_t nr_comm_counters __read_mostly;
45static atomic_t nr_task_counters __read_mostly;
45 46
46/* 47/*
47 * perf counter paranoia level: 48 * perf counter paranoia level:
@@ -87,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
87void __weak hw_perf_enable(void) { barrier(); } 88void __weak hw_perf_enable(void) { barrier(); }
88 89
89void __weak hw_perf_counter_setup(int cpu) { barrier(); } 90void __weak hw_perf_counter_setup(int cpu) { barrier(); }
91void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
90 92
91int __weak 93int __weak
92hw_perf_group_sched_in(struct perf_counter *group_leader, 94hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -305,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
305 return; 307 return;
306 308
307 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
308 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
309 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
310 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1103,7 +1109,7 @@ static void perf_counter_sync_stat(struct perf_counter_context *ctx,
1103 __perf_counter_sync_stat(counter, next_counter); 1109 __perf_counter_sync_stat(counter, next_counter);
1104 1110
1105 counter = list_next_entry(counter, event_entry); 1111 counter = list_next_entry(counter, event_entry);
1106 next_counter = list_next_entry(counter, event_entry); 1112 next_counter = list_next_entry(next_counter, event_entry);
1107 } 1113 }
1108} 1114}
1109 1115
@@ -1654,6 +1660,8 @@ static void free_counter(struct perf_counter *counter)
1654 atomic_dec(&nr_mmap_counters); 1660 atomic_dec(&nr_mmap_counters);
1655 if (counter->attr.comm) 1661 if (counter->attr.comm)
1656 atomic_dec(&nr_comm_counters); 1662 atomic_dec(&nr_comm_counters);
1663 if (counter->attr.task)
1664 atomic_dec(&nr_task_counters);
1657 } 1665 }
1658 1666
1659 if (counter->destroy) 1667 if (counter->destroy)
@@ -1688,14 +1696,133 @@ static int perf_release(struct inode *inode, struct file *file)
1688 return 0; 1696 return 0;
1689} 1697}
1690 1698
1699static int perf_counter_read_size(struct perf_counter *counter)
1700{
1701 int entry = sizeof(u64); /* value */
1702 int size = 0;
1703 int nr = 1;
1704
1705 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1706 size += sizeof(u64);
1707
1708 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1709 size += sizeof(u64);
1710
1711 if (counter->attr.read_format & PERF_FORMAT_ID)
1712 entry += sizeof(u64);
1713
1714 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1715 nr += counter->group_leader->nr_siblings;
1716 size += sizeof(u64);
1717 }
1718
1719 size += entry * nr;
1720
1721 return size;
1722}
1723
1724static u64 perf_counter_read_value(struct perf_counter *counter)
1725{
1726 struct perf_counter *child;
1727 u64 total = 0;
1728
1729 total += perf_counter_read(counter);
1730 list_for_each_entry(child, &counter->child_list, child_list)
1731 total += perf_counter_read(child);
1732
1733 return total;
1734}
1735
1736static int perf_counter_read_entry(struct perf_counter *counter,
1737 u64 read_format, char __user *buf)
1738{
1739 int n = 0, count = 0;
1740 u64 values[2];
1741
1742 values[n++] = perf_counter_read_value(counter);
1743 if (read_format & PERF_FORMAT_ID)
1744 values[n++] = primary_counter_id(counter);
1745
1746 count = n * sizeof(u64);
1747
1748 if (copy_to_user(buf, values, count))
1749 return -EFAULT;
1750
1751 return count;
1752}
1753
1754static int perf_counter_read_group(struct perf_counter *counter,
1755 u64 read_format, char __user *buf)
1756{
1757 struct perf_counter *leader = counter->group_leader, *sub;
1758 int n = 0, size = 0, err = -EFAULT;
1759 u64 values[3];
1760
1761 values[n++] = 1 + leader->nr_siblings;
1762 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1763 values[n++] = leader->total_time_enabled +
1764 atomic64_read(&leader->child_total_time_enabled);
1765 }
1766 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1767 values[n++] = leader->total_time_running +
1768 atomic64_read(&leader->child_total_time_running);
1769 }
1770
1771 size = n * sizeof(u64);
1772
1773 if (copy_to_user(buf, values, size))
1774 return -EFAULT;
1775
1776 err = perf_counter_read_entry(leader, read_format, buf + size);
1777 if (err < 0)
1778 return err;
1779
1780 size += err;
1781
1782 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1783 err = perf_counter_read_entry(counter, read_format,
1784 buf + size);
1785 if (err < 0)
1786 return err;
1787
1788 size += err;
1789 }
1790
1791 return size;
1792}
1793
1794static int perf_counter_read_one(struct perf_counter *counter,
1795 u64 read_format, char __user *buf)
1796{
1797 u64 values[4];
1798 int n = 0;
1799
1800 values[n++] = perf_counter_read_value(counter);
1801 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1802 values[n++] = counter->total_time_enabled +
1803 atomic64_read(&counter->child_total_time_enabled);
1804 }
1805 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1806 values[n++] = counter->total_time_running +
1807 atomic64_read(&counter->child_total_time_running);
1808 }
1809 if (read_format & PERF_FORMAT_ID)
1810 values[n++] = primary_counter_id(counter);
1811
1812 if (copy_to_user(buf, values, n * sizeof(u64)))
1813 return -EFAULT;
1814
1815 return n * sizeof(u64);
1816}
1817
1691/* 1818/*
1692 * Read the performance counter - simple non blocking version for now 1819 * Read the performance counter - simple non blocking version for now
1693 */ 1820 */
1694static ssize_t 1821static ssize_t
1695perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1822perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1696{ 1823{
1697 u64 values[4]; 1824 u64 read_format = counter->attr.read_format;
1698 int n; 1825 int ret;
1699 1826
1700 /* 1827 /*
1701 * Return end-of-file for a read on a counter that is in 1828 * Return end-of-file for a read on a counter that is in
@@ -1705,28 +1832,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1705 if (counter->state == PERF_COUNTER_STATE_ERROR) 1832 if (counter->state == PERF_COUNTER_STATE_ERROR)
1706 return 0; 1833 return 0;
1707 1834
1835 if (count < perf_counter_read_size(counter))
1836 return -ENOSPC;
1837
1708 WARN_ON_ONCE(counter->ctx->parent_ctx); 1838 WARN_ON_ONCE(counter->ctx->parent_ctx);
1709 mutex_lock(&counter->child_mutex); 1839 mutex_lock(&counter->child_mutex);
1710 values[0] = perf_counter_read(counter); 1840 if (read_format & PERF_FORMAT_GROUP)
1711 n = 1; 1841 ret = perf_counter_read_group(counter, read_format, buf);
1712 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1842 else
1713 values[n++] = counter->total_time_enabled + 1843 ret = perf_counter_read_one(counter, read_format, buf);
1714 atomic64_read(&counter->child_total_time_enabled);
1715 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1716 values[n++] = counter->total_time_running +
1717 atomic64_read(&counter->child_total_time_running);
1718 if (counter->attr.read_format & PERF_FORMAT_ID)
1719 values[n++] = primary_counter_id(counter);
1720 mutex_unlock(&counter->child_mutex); 1844 mutex_unlock(&counter->child_mutex);
1721 1845
1722 if (count < n * sizeof(u64)) 1846 return ret;
1723 return -EINVAL;
1724 count = n * sizeof(u64);
1725
1726 if (copy_to_user(buf, values, count))
1727 return -EFAULT;
1728
1729 return count;
1730} 1847}
1731 1848
1732static ssize_t 1849static ssize_t
@@ -2230,7 +2347,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2230 2347
2231 if (counter->pending_disable) { 2348 if (counter->pending_disable) {
2232 counter->pending_disable = 0; 2349 counter->pending_disable = 0;
2233 perf_counter_disable(counter); 2350 __perf_counter_disable(counter);
2234 } 2351 }
2235 2352
2236 if (counter->pending_wakeup) { 2353 if (counter->pending_wakeup) {
@@ -2615,7 +2732,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2615 return task_pid_nr_ns(p, counter->ns); 2732 return task_pid_nr_ns(p, counter->ns);
2616} 2733}
2617 2734
2618static void perf_counter_output(struct perf_counter *counter, int nmi, 2735static void perf_output_read_one(struct perf_output_handle *handle,
2736 struct perf_counter *counter)
2737{
2738 u64 read_format = counter->attr.read_format;
2739 u64 values[4];
2740 int n = 0;
2741
2742 values[n++] = atomic64_read(&counter->count);
2743 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2744 values[n++] = counter->total_time_enabled +
2745 atomic64_read(&counter->child_total_time_enabled);
2746 }
2747 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2748 values[n++] = counter->total_time_running +
2749 atomic64_read(&counter->child_total_time_running);
2750 }
2751 if (read_format & PERF_FORMAT_ID)
2752 values[n++] = primary_counter_id(counter);
2753
2754 perf_output_copy(handle, values, n * sizeof(u64));
2755}
2756
2757/*
2758 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2759 */
2760static void perf_output_read_group(struct perf_output_handle *handle,
2761 struct perf_counter *counter)
2762{
2763 struct perf_counter *leader = counter->group_leader, *sub;
2764 u64 read_format = counter->attr.read_format;
2765 u64 values[5];
2766 int n = 0;
2767
2768 values[n++] = 1 + leader->nr_siblings;
2769
2770 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2771 values[n++] = leader->total_time_enabled;
2772
2773 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2774 values[n++] = leader->total_time_running;
2775
2776 if (leader != counter)
2777 leader->pmu->read(leader);
2778
2779 values[n++] = atomic64_read(&leader->count);
2780 if (read_format & PERF_FORMAT_ID)
2781 values[n++] = primary_counter_id(leader);
2782
2783 perf_output_copy(handle, values, n * sizeof(u64));
2784
2785 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2786 n = 0;
2787
2788 if (sub != counter)
2789 sub->pmu->read(sub);
2790
2791 values[n++] = atomic64_read(&sub->count);
2792 if (read_format & PERF_FORMAT_ID)
2793 values[n++] = primary_counter_id(sub);
2794
2795 perf_output_copy(handle, values, n * sizeof(u64));
2796 }
2797}
2798
2799static void perf_output_read(struct perf_output_handle *handle,
2800 struct perf_counter *counter)
2801{
2802 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2803 perf_output_read_group(handle, counter);
2804 else
2805 perf_output_read_one(handle, counter);
2806}
2807
2808void perf_counter_output(struct perf_counter *counter, int nmi,
2619 struct perf_sample_data *data) 2809 struct perf_sample_data *data)
2620{ 2810{
2621 int ret; 2811 int ret;
@@ -2626,10 +2816,6 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2626 struct { 2816 struct {
2627 u32 pid, tid; 2817 u32 pid, tid;
2628 } tid_entry; 2818 } tid_entry;
2629 struct {
2630 u64 id;
2631 u64 counter;
2632 } group_entry;
2633 struct perf_callchain_entry *callchain = NULL; 2819 struct perf_callchain_entry *callchain = NULL;
2634 int callchain_size = 0; 2820 int callchain_size = 0;
2635 u64 time; 2821 u64 time;
@@ -2684,10 +2870,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2684 if (sample_type & PERF_SAMPLE_PERIOD) 2870 if (sample_type & PERF_SAMPLE_PERIOD)
2685 header.size += sizeof(u64); 2871 header.size += sizeof(u64);
2686 2872
2687 if (sample_type & PERF_SAMPLE_GROUP) { 2873 if (sample_type & PERF_SAMPLE_READ)
2688 header.size += sizeof(u64) + 2874 header.size += perf_counter_read_size(counter);
2689 counter->nr_siblings * sizeof(group_entry);
2690 }
2691 2875
2692 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2876 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2693 callchain = perf_callchain(data->regs); 2877 callchain = perf_callchain(data->regs);
@@ -2699,6 +2883,18 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2699 header.size += sizeof(u64); 2883 header.size += sizeof(u64);
2700 } 2884 }
2701 2885
2886 if (sample_type & PERF_SAMPLE_RAW) {
2887 int size = sizeof(u32);
2888
2889 if (data->raw)
2890 size += data->raw->size;
2891 else
2892 size += sizeof(u32);
2893
2894 WARN_ON_ONCE(size & (sizeof(u64)-1));
2895 header.size += size;
2896 }
2897
2702 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2898 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
2703 if (ret) 2899 if (ret)
2704 return; 2900 return;
@@ -2732,26 +2928,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2732 if (sample_type & PERF_SAMPLE_PERIOD) 2928 if (sample_type & PERF_SAMPLE_PERIOD)
2733 perf_output_put(&handle, data->period); 2929 perf_output_put(&handle, data->period);
2734 2930
2735 /* 2931 if (sample_type & PERF_SAMPLE_READ)
2736 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2932 perf_output_read(&handle, counter);
2737 */
2738 if (sample_type & PERF_SAMPLE_GROUP) {
2739 struct perf_counter *leader, *sub;
2740 u64 nr = counter->nr_siblings;
2741
2742 perf_output_put(&handle, nr);
2743
2744 leader = counter->group_leader;
2745 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2746 if (sub != counter)
2747 sub->pmu->read(sub);
2748
2749 group_entry.id = primary_counter_id(sub);
2750 group_entry.counter = atomic64_read(&sub->count);
2751
2752 perf_output_put(&handle, group_entry);
2753 }
2754 }
2755 2933
2756 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2934 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2757 if (callchain) 2935 if (callchain)
@@ -2762,6 +2940,22 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2762 } 2940 }
2763 } 2941 }
2764 2942
2943 if (sample_type & PERF_SAMPLE_RAW) {
2944 if (data->raw) {
2945 perf_output_put(&handle, data->raw->size);
2946 perf_output_copy(&handle, data->raw->data, data->raw->size);
2947 } else {
2948 struct {
2949 u32 size;
2950 u32 data;
2951 } raw = {
2952 .size = sizeof(u32),
2953 .data = 0,
2954 };
2955 perf_output_put(&handle, raw);
2956 }
2957 }
2958
2765 perf_output_end(&handle); 2959 perf_output_end(&handle);
2766} 2960}
2767 2961
@@ -2774,8 +2968,6 @@ struct perf_read_event {
2774 2968
2775 u32 pid; 2969 u32 pid;
2776 u32 tid; 2970 u32 tid;
2777 u64 value;
2778 u64 format[3];
2779}; 2971};
2780 2972
2781static void 2973static void
@@ -2787,80 +2979,74 @@ perf_counter_read_event(struct perf_counter *counter,
2787 .header = { 2979 .header = {
2788 .type = PERF_EVENT_READ, 2980 .type = PERF_EVENT_READ,
2789 .misc = 0, 2981 .misc = 0,
2790 .size = sizeof(event) - sizeof(event.format), 2982 .size = sizeof(event) + perf_counter_read_size(counter),
2791 }, 2983 },
2792 .pid = perf_counter_pid(counter, task), 2984 .pid = perf_counter_pid(counter, task),
2793 .tid = perf_counter_tid(counter, task), 2985 .tid = perf_counter_tid(counter, task),
2794 .value = atomic64_read(&counter->count),
2795 }; 2986 };
2796 int ret, i = 0; 2987 int ret;
2797
2798 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2799 event.header.size += sizeof(u64);
2800 event.format[i++] = counter->total_time_enabled;
2801 }
2802
2803 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2804 event.header.size += sizeof(u64);
2805 event.format[i++] = counter->total_time_running;
2806 }
2807
2808 if (counter->attr.read_format & PERF_FORMAT_ID) {
2809 event.header.size += sizeof(u64);
2810 event.format[i++] = primary_counter_id(counter);
2811 }
2812 2988
2813 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 2989 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2814 if (ret) 2990 if (ret)
2815 return; 2991 return;
2816 2992
2817 perf_output_copy(&handle, &event, event.header.size); 2993 perf_output_put(&handle, event);
2994 perf_output_read(&handle, counter);
2995
2818 perf_output_end(&handle); 2996 perf_output_end(&handle);
2819} 2997}
2820 2998
2821/* 2999/*
2822 * fork tracking 3000 * task tracking -- fork/exit
3001 *
3002 * enabled by: attr.comm | attr.mmap | attr.task
2823 */ 3003 */
2824 3004
2825struct perf_fork_event { 3005struct perf_task_event {
2826 struct task_struct *task; 3006 struct task_struct *task;
3007 struct perf_counter_context *task_ctx;
2827 3008
2828 struct { 3009 struct {
2829 struct perf_event_header header; 3010 struct perf_event_header header;
2830 3011
2831 u32 pid; 3012 u32 pid;
2832 u32 ppid; 3013 u32 ppid;
3014 u32 tid;
3015 u32 ptid;
2833 } event; 3016 } event;
2834}; 3017};
2835 3018
2836static void perf_counter_fork_output(struct perf_counter *counter, 3019static void perf_counter_task_output(struct perf_counter *counter,
2837 struct perf_fork_event *fork_event) 3020 struct perf_task_event *task_event)
2838{ 3021{
2839 struct perf_output_handle handle; 3022 struct perf_output_handle handle;
2840 int size = fork_event->event.header.size; 3023 int size = task_event->event.header.size;
2841 struct task_struct *task = fork_event->task; 3024 struct task_struct *task = task_event->task;
2842 int ret = perf_output_begin(&handle, counter, size, 0, 0); 3025 int ret = perf_output_begin(&handle, counter, size, 0, 0);
2843 3026
2844 if (ret) 3027 if (ret)
2845 return; 3028 return;
2846 3029
2847 fork_event->event.pid = perf_counter_pid(counter, task); 3030 task_event->event.pid = perf_counter_pid(counter, task);
2848 fork_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3031 task_event->event.ppid = perf_counter_pid(counter, current);
3032
3033 task_event->event.tid = perf_counter_tid(counter, task);
3034 task_event->event.ptid = perf_counter_tid(counter, current);
2849 3035
2850 perf_output_put(&handle, fork_event->event); 3036 perf_output_put(&handle, task_event->event);
2851 perf_output_end(&handle); 3037 perf_output_end(&handle);
2852} 3038}
2853 3039
2854static int perf_counter_fork_match(struct perf_counter *counter) 3040static int perf_counter_task_match(struct perf_counter *counter)
2855{ 3041{
2856 if (counter->attr.comm || counter->attr.mmap) 3042 if (counter->attr.comm || counter->attr.mmap || counter->attr.task)
2857 return 1; 3043 return 1;
2858 3044
2859 return 0; 3045 return 0;
2860} 3046}
2861 3047
2862static void perf_counter_fork_ctx(struct perf_counter_context *ctx, 3048static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2863 struct perf_fork_event *fork_event) 3049 struct perf_task_event *task_event)
2864{ 3050{
2865 struct perf_counter *counter; 3051 struct perf_counter *counter;
2866 3052
@@ -2869,54 +3055,62 @@ static void perf_counter_fork_ctx(struct perf_counter_context *ctx,
2869 3055
2870 rcu_read_lock(); 3056 rcu_read_lock();
2871 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) { 3057 list_for_each_entry_rcu(counter, &ctx->event_list, event_entry) {
2872 if (perf_counter_fork_match(counter)) 3058 if (perf_counter_task_match(counter))
2873 perf_counter_fork_output(counter, fork_event); 3059 perf_counter_task_output(counter, task_event);
2874 } 3060 }
2875 rcu_read_unlock(); 3061 rcu_read_unlock();
2876} 3062}
2877 3063
2878static void perf_counter_fork_event(struct perf_fork_event *fork_event) 3064static void perf_counter_task_event(struct perf_task_event *task_event)
2879{ 3065{
2880 struct perf_cpu_context *cpuctx; 3066 struct perf_cpu_context *cpuctx;
2881 struct perf_counter_context *ctx; 3067 struct perf_counter_context *ctx = task_event->task_ctx;
2882 3068
2883 cpuctx = &get_cpu_var(perf_cpu_context); 3069 cpuctx = &get_cpu_var(perf_cpu_context);
2884 perf_counter_fork_ctx(&cpuctx->ctx, fork_event); 3070 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2885 put_cpu_var(perf_cpu_context); 3071 put_cpu_var(perf_cpu_context);
2886 3072
2887 rcu_read_lock(); 3073 rcu_read_lock();
2888 /* 3074 if (!ctx)
2889 * doesn't really matter which of the child contexts the 3075 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
2890 * events ends up in.
2891 */
2892 ctx = rcu_dereference(current->perf_counter_ctxp);
2893 if (ctx) 3076 if (ctx)
2894 perf_counter_fork_ctx(ctx, fork_event); 3077 perf_counter_task_ctx(ctx, task_event);
2895 rcu_read_unlock(); 3078 rcu_read_unlock();
2896} 3079}
2897 3080
2898void perf_counter_fork(struct task_struct *task) 3081static void perf_counter_task(struct task_struct *task,
3082 struct perf_counter_context *task_ctx,
3083 int new)
2899{ 3084{
2900 struct perf_fork_event fork_event; 3085 struct perf_task_event task_event;
2901 3086
2902 if (!atomic_read(&nr_comm_counters) && 3087 if (!atomic_read(&nr_comm_counters) &&
2903 !atomic_read(&nr_mmap_counters)) 3088 !atomic_read(&nr_mmap_counters) &&
3089 !atomic_read(&nr_task_counters))
2904 return; 3090 return;
2905 3091
2906 fork_event = (struct perf_fork_event){ 3092 task_event = (struct perf_task_event){
2907 .task = task, 3093 .task = task,
2908 .event = { 3094 .task_ctx = task_ctx,
3095 .event = {
2909 .header = { 3096 .header = {
2910 .type = PERF_EVENT_FORK, 3097 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2911 .misc = 0, 3098 .misc = 0,
2912 .size = sizeof(fork_event.event), 3099 .size = sizeof(task_event.event),
2913 }, 3100 },
2914 /* .pid */ 3101 /* .pid */
2915 /* .ppid */ 3102 /* .ppid */
3103 /* .tid */
3104 /* .ptid */
2916 }, 3105 },
2917 }; 3106 };
2918 3107
2919 perf_counter_fork_event(&fork_event); 3108 perf_counter_task_event(&task_event);
3109}
3110
3111void perf_counter_fork(struct task_struct *task)
3112{
3113 perf_counter_task(task, NULL, 1);
2920} 3114}
2921 3115
2922/* 3116/*
@@ -3305,125 +3499,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3305 * Generic software counter infrastructure 3499 * Generic software counter infrastructure
3306 */ 3500 */
3307 3501
3308static void perf_swcounter_update(struct perf_counter *counter) 3502/*
3503 * We directly increment counter->count and keep a second value in
3504 * counter->hw.period_left to count intervals. This period counter
3505 * is kept in the range [-sample_period, 0] so that we can use the
3506 * sign as trigger.
3507 */
3508
3509static u64 perf_swcounter_set_period(struct perf_counter *counter)
3309{ 3510{
3310 struct hw_perf_counter *hwc = &counter->hw; 3511 struct hw_perf_counter *hwc = &counter->hw;
3311 u64 prev, now; 3512 u64 period = hwc->last_period;
3312 s64 delta; 3513 u64 nr, offset;
3514 s64 old, val;
3515
3516 hwc->last_period = hwc->sample_period;
3313 3517
3314again: 3518again:
3315 prev = atomic64_read(&hwc->prev_count); 3519 old = val = atomic64_read(&hwc->period_left);
3316 now = atomic64_read(&hwc->count); 3520 if (val < 0)
3317 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3521 return 0;
3318 goto again;
3319 3522
3320 delta = now - prev; 3523 nr = div64_u64(period + val, period);
3524 offset = nr * period;
3525 val -= offset;
3526 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3527 goto again;
3321 3528
3322 atomic64_add(delta, &counter->count); 3529 return nr;
3323 atomic64_sub(delta, &hwc->period_left);
3324} 3530}
3325 3531
3326static void perf_swcounter_set_period(struct perf_counter *counter) 3532static void perf_swcounter_overflow(struct perf_counter *counter,
3533 int nmi, struct perf_sample_data *data)
3327{ 3534{
3328 struct hw_perf_counter *hwc = &counter->hw; 3535 struct hw_perf_counter *hwc = &counter->hw;
3329 s64 left = atomic64_read(&hwc->period_left); 3536 u64 overflow;
3330 s64 period = hwc->sample_period;
3331 3537
3332 if (unlikely(left <= -period)) { 3538 data->period = counter->hw.last_period;
3333 left = period; 3539 overflow = perf_swcounter_set_period(counter);
3334 atomic64_set(&hwc->period_left, left);
3335 hwc->last_period = period;
3336 }
3337 3540
3338 if (unlikely(left <= 0)) { 3541 if (hwc->interrupts == MAX_INTERRUPTS)
3339 left += period; 3542 return;
3340 atomic64_add(period, &hwc->period_left);
3341 hwc->last_period = period;
3342 }
3343 3543
3344 atomic64_set(&hwc->prev_count, -left); 3544 for (; overflow; overflow--) {
3345 atomic64_set(&hwc->count, -left); 3545 if (perf_counter_overflow(counter, nmi, data)) {
3546 /*
3547 * We inhibit the overflow from happening when
3548 * hwc->interrupts == MAX_INTERRUPTS.
3549 */
3550 break;
3551 }
3552 }
3346} 3553}
3347 3554
3348static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3555static void perf_swcounter_unthrottle(struct perf_counter *counter)
3349{ 3556{
3350 enum hrtimer_restart ret = HRTIMER_RESTART;
3351 struct perf_sample_data data;
3352 struct perf_counter *counter;
3353 u64 period;
3354
3355 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3356 counter->pmu->read(counter);
3357
3358 data.addr = 0;
3359 data.regs = get_irq_regs();
3360 /* 3557 /*
3361 * In case we exclude kernel IPs or are somehow not in interrupt 3558 * Nothing to do, we already reset hwc->interrupts.
3362 * context, provide the next best thing, the user IP.
3363 */ 3559 */
3364 if ((counter->attr.exclude_kernel || !data.regs) && 3560}
3365 !counter->attr.exclude_user)
3366 data.regs = task_pt_regs(current);
3367 3561
3368 if (data.regs) { 3562static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3369 if (perf_counter_overflow(counter, 0, &data)) 3563 int nmi, struct perf_sample_data *data)
3370 ret = HRTIMER_NORESTART; 3564{
3371 } 3565 struct hw_perf_counter *hwc = &counter->hw;
3372 3566
3373 period = max_t(u64, 10000, counter->hw.sample_period); 3567 atomic64_add(nr, &counter->count);
3374 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3375 3568
3376 return ret; 3569 if (!hwc->sample_period)
3377} 3570 return;
3378 3571
3379static void perf_swcounter_overflow(struct perf_counter *counter, 3572 if (!data->regs)
3380 int nmi, struct perf_sample_data *data) 3573 return;
3381{
3382 data->period = counter->hw.last_period;
3383 3574
3384 perf_swcounter_update(counter); 3575 if (!atomic64_add_negative(nr, &hwc->period_left))
3385 perf_swcounter_set_period(counter); 3576 perf_swcounter_overflow(counter, nmi, data);
3386 if (perf_counter_overflow(counter, nmi, data))
3387 /* soft-disable the counter */
3388 ;
3389} 3577}
3390 3578
3391static int perf_swcounter_is_counting(struct perf_counter *counter) 3579static int perf_swcounter_is_counting(struct perf_counter *counter)
3392{ 3580{
3393 struct perf_counter_context *ctx; 3581 /*
3394 unsigned long flags; 3582 * The counter is active, we're good!
3395 int count; 3583 */
3396
3397 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3584 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3398 return 1; 3585 return 1;
3399 3586
3587 /*
3588 * The counter is off/error, not counting.
3589 */
3400 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3590 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3401 return 0; 3591 return 0;
3402 3592
3403 /* 3593 /*
3404 * If the counter is inactive, it could be just because 3594 * The counter is inactive, if the context is active
3405 * its task is scheduled out, or because it's in a group 3595 * we're part of a group that didn't make it on the 'pmu',
3406 * which could not go on the PMU. We want to count in 3596 * not counting.
3407 * the first case but not the second. If the context is
3408 * currently active then an inactive software counter must
3409 * be the second case. If it's not currently active then
3410 * we need to know whether the counter was active when the
3411 * context was last active, which we can determine by
3412 * comparing counter->tstamp_stopped with ctx->time.
3413 *
3414 * We are within an RCU read-side critical section,
3415 * which protects the existence of *ctx.
3416 */ 3597 */
3417 ctx = counter->ctx; 3598 if (counter->ctx->is_active)
3418 spin_lock_irqsave(&ctx->lock, flags); 3599 return 0;
3419 count = 1; 3600
3420 /* Re-check state now we have the lock */ 3601 /*
3421 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3602 * We're inactive and the context is too, this means the
3422 counter->ctx->is_active || 3603 * task is scheduled out, we're counting events that happen
3423 counter->tstamp_stopped < ctx->time) 3604 * to us, like migration events.
3424 count = 0; 3605 */
3425 spin_unlock_irqrestore(&ctx->lock, flags); 3606 return 1;
3426 return count;
3427} 3607}
3428 3608
3429static int perf_swcounter_match(struct perf_counter *counter, 3609static int perf_swcounter_match(struct perf_counter *counter,
@@ -3449,15 +3629,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3449 return 1; 3629 return 1;
3450} 3630}
3451 3631
3452static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3453 int nmi, struct perf_sample_data *data)
3454{
3455 int neg = atomic64_add_negative(nr, &counter->hw.count);
3456
3457 if (counter->hw.sample_period && !neg && data->regs)
3458 perf_swcounter_overflow(counter, nmi, data);
3459}
3460
3461static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3632static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3462 enum perf_type_id type, 3633 enum perf_type_id type,
3463 u32 event, u64 nr, int nmi, 3634 u32 event, u64 nr, int nmi,
@@ -3536,27 +3707,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3536 3707
3537static void perf_swcounter_read(struct perf_counter *counter) 3708static void perf_swcounter_read(struct perf_counter *counter)
3538{ 3709{
3539 perf_swcounter_update(counter);
3540} 3710}
3541 3711
3542static int perf_swcounter_enable(struct perf_counter *counter) 3712static int perf_swcounter_enable(struct perf_counter *counter)
3543{ 3713{
3544 perf_swcounter_set_period(counter); 3714 struct hw_perf_counter *hwc = &counter->hw;
3715
3716 if (hwc->sample_period) {
3717 hwc->last_period = hwc->sample_period;
3718 perf_swcounter_set_period(counter);
3719 }
3545 return 0; 3720 return 0;
3546} 3721}
3547 3722
3548static void perf_swcounter_disable(struct perf_counter *counter) 3723static void perf_swcounter_disable(struct perf_counter *counter)
3549{ 3724{
3550 perf_swcounter_update(counter);
3551} 3725}
3552 3726
3553static const struct pmu perf_ops_generic = { 3727static const struct pmu perf_ops_generic = {
3554 .enable = perf_swcounter_enable, 3728 .enable = perf_swcounter_enable,
3555 .disable = perf_swcounter_disable, 3729 .disable = perf_swcounter_disable,
3556 .read = perf_swcounter_read, 3730 .read = perf_swcounter_read,
3731 .unthrottle = perf_swcounter_unthrottle,
3557}; 3732};
3558 3733
3559/* 3734/*
3735 * hrtimer based swcounter callback
3736 */
3737
3738static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3739{
3740 enum hrtimer_restart ret = HRTIMER_RESTART;
3741 struct perf_sample_data data;
3742 struct perf_counter *counter;
3743 u64 period;
3744
3745 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3746 counter->pmu->read(counter);
3747
3748 data.addr = 0;
3749 data.regs = get_irq_regs();
3750 /*
3751 * In case we exclude kernel IPs or are somehow not in interrupt
3752 * context, provide the next best thing, the user IP.
3753 */
3754 if ((counter->attr.exclude_kernel || !data.regs) &&
3755 !counter->attr.exclude_user)
3756 data.regs = task_pt_regs(current);
3757
3758 if (data.regs) {
3759 if (perf_counter_overflow(counter, 0, &data))
3760 ret = HRTIMER_NORESTART;
3761 }
3762
3763 period = max_t(u64, 10000, counter->hw.sample_period);
3764 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3765
3766 return ret;
3767}
3768
3769/*
3560 * Software counter: cpu wall time clock 3770 * Software counter: cpu wall time clock
3561 */ 3771 */
3562 3772
@@ -3673,17 +3883,24 @@ static const struct pmu perf_ops_task_clock = {
3673}; 3883};
3674 3884
3675#ifdef CONFIG_EVENT_PROFILE 3885#ifdef CONFIG_EVENT_PROFILE
3676void perf_tpcounter_event(int event_id) 3886void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3887 int entry_size)
3677{ 3888{
3889 struct perf_raw_record raw = {
3890 .size = entry_size,
3891 .data = record,
3892 };
3893
3678 struct perf_sample_data data = { 3894 struct perf_sample_data data = {
3679 .regs = get_irq_regs(), 3895 .regs = get_irq_regs(),
3680 .addr = 0, 3896 .addr = addr,
3897 .raw = &raw,
3681 }; 3898 };
3682 3899
3683 if (!data.regs) 3900 if (!data.regs)
3684 data.regs = task_pt_regs(current); 3901 data.regs = task_pt_regs(current);
3685 3902
3686 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, 1, 1, &data); 3903 do_perf_swcounter_event(PERF_TYPE_TRACEPOINT, event_id, count, 1, &data);
3687} 3904}
3688EXPORT_SYMBOL_GPL(perf_tpcounter_event); 3905EXPORT_SYMBOL_GPL(perf_tpcounter_event);
3689 3906
@@ -3697,6 +3914,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
3697 3914
3698static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3915static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3699{ 3916{
3917 /*
3918 * Raw tracepoint data is a severe data leak, only allow root to
3919 * have these.
3920 */
3921 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3922 !capable(CAP_SYS_ADMIN))
3923 return ERR_PTR(-EPERM);
3924
3700 if (ftrace_profile_enable(counter->attr.config)) 3925 if (ftrace_profile_enable(counter->attr.config))
3701 return NULL; 3926 return NULL;
3702 3927
@@ -3830,9 +4055,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3830 atomic64_set(&hwc->period_left, hwc->sample_period); 4055 atomic64_set(&hwc->period_left, hwc->sample_period);
3831 4056
3832 /* 4057 /*
3833 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4058 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3834 */ 4059 */
3835 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4060 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3836 goto done; 4061 goto done;
3837 4062
3838 switch (attr->type) { 4063 switch (attr->type) {
@@ -3875,6 +4100,8 @@ done:
3875 atomic_inc(&nr_mmap_counters); 4100 atomic_inc(&nr_mmap_counters);
3876 if (counter->attr.comm) 4101 if (counter->attr.comm)
3877 atomic_inc(&nr_comm_counters); 4102 atomic_inc(&nr_comm_counters);
4103 if (counter->attr.task)
4104 atomic_inc(&nr_task_counters);
3878 } 4105 }
3879 4106
3880 return counter; 4107 return counter;
@@ -4236,8 +4463,10 @@ void perf_counter_exit_task(struct task_struct *child)
4236 struct perf_counter_context *child_ctx; 4463 struct perf_counter_context *child_ctx;
4237 unsigned long flags; 4464 unsigned long flags;
4238 4465
4239 if (likely(!child->perf_counter_ctxp)) 4466 if (likely(!child->perf_counter_ctxp)) {
4467 perf_counter_task(child, NULL, 0);
4240 return; 4468 return;
4469 }
4241 4470
4242 local_irq_save(flags); 4471 local_irq_save(flags);
4243 /* 4472 /*
@@ -4262,8 +4491,14 @@ void perf_counter_exit_task(struct task_struct *child)
4262 * the counters from it. 4491 * the counters from it.
4263 */ 4492 */
4264 unclone_ctx(child_ctx); 4493 unclone_ctx(child_ctx);
4265 spin_unlock(&child_ctx->lock); 4494 spin_unlock_irqrestore(&child_ctx->lock, flags);
4266 local_irq_restore(flags); 4495
4496 /*
4497 * Report the task dead after unscheduling the counters so that we
4498 * won't get any samples after PERF_EVENT_EXIT. We can however still
4499 * get a few PERF_EVENT_READ events.
4500 */
4501 perf_counter_task(child, child_ctx, 0);
4267 4502
4268 /* 4503 /*
4269 * We can recurse on the same lock type through: 4504 * We can recurse on the same lock type through:
@@ -4484,6 +4719,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4484 perf_counter_init_cpu(cpu); 4719 perf_counter_init_cpu(cpu);
4485 break; 4720 break;
4486 4721
4722 case CPU_ONLINE:
4723 case CPU_ONLINE_FROZEN:
4724 hw_perf_counter_setup_online(cpu);
4725 break;
4726
4487 case CPU_DOWN_PREPARE: 4727 case CPU_DOWN_PREPARE:
4488 case CPU_DOWN_PREPARE_FROZEN: 4728 case CPU_DOWN_PREPARE_FROZEN:
4489 perf_counter_exit_cpu(cpu); 4729 perf_counter_exit_cpu(cpu);
@@ -4508,6 +4748,8 @@ void __init perf_counter_init(void)
4508{ 4748{
4509 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4749 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4510 (void *)(long)smp_processor_id()); 4750 (void *)(long)smp_processor_id());
4751 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4752 (void *)(long)smp_processor_id());
4511 register_cpu_notifier(&perf_cpu_nb); 4753 register_cpu_notifier(&perf_cpu_nb);
4512} 4754}
4513 4755
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index bece7c0b67b2..e33a21cb9407 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -521,11 +521,12 @@ void posix_cpu_timers_exit(struct task_struct *tsk)
521} 521}
522void posix_cpu_timers_exit_group(struct task_struct *tsk) 522void posix_cpu_timers_exit_group(struct task_struct *tsk)
523{ 523{
524 struct task_cputime cputime; 524 struct signal_struct *const sig = tsk->signal;
525 525
526 thread_group_cputimer(tsk, &cputime);
527 cleanup_timers(tsk->signal->cpu_timers, 526 cleanup_timers(tsk->signal->cpu_timers,
528 cputime.utime, cputime.stime, cputime.sum_exec_runtime); 527 cputime_add(tsk->utime, sig->utime),
528 cputime_add(tsk->stime, sig->stime),
529 tsk->se.sum_exec_runtime + sig->sum_sched_runtime);
529} 530}
530 531
531static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now) 532static void clear_dead_task(struct k_itimer *timer, union cpu_time_count now)
diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
index 052ec4d195c7..d089d052c4a9 100644
--- a/kernel/posix-timers.c
+++ b/kernel/posix-timers.c
@@ -202,6 +202,12 @@ static int no_timer_create(struct k_itimer *new_timer)
202 return -EOPNOTSUPP; 202 return -EOPNOTSUPP;
203} 203}
204 204
205static int no_nsleep(const clockid_t which_clock, int flags,
206 struct timespec *tsave, struct timespec __user *rmtp)
207{
208 return -EOPNOTSUPP;
209}
210
205/* 211/*
206 * Return nonzero if we know a priori this clockid_t value is bogus. 212 * Return nonzero if we know a priori this clockid_t value is bogus.
207 */ 213 */
@@ -254,6 +260,7 @@ static __init int init_posix_timers(void)
254 .clock_get = posix_get_monotonic_raw, 260 .clock_get = posix_get_monotonic_raw,
255 .clock_set = do_posix_clock_nosettime, 261 .clock_set = do_posix_clock_nosettime,
256 .timer_create = no_timer_create, 262 .timer_create = no_timer_create,
263 .nsleep = no_nsleep,
257 }; 264 };
258 265
259 register_posix_clock(CLOCK_REALTIME, &clock_realtime); 266 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
diff --git a/kernel/rtmutex.c b/kernel/rtmutex.c
index fcd107a78c5a..29bd4baf9e75 100644
--- a/kernel/rtmutex.c
+++ b/kernel/rtmutex.c
@@ -1039,16 +1039,14 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) { 1039 if (!rt_mutex_owner(lock) || try_to_steal_lock(lock, task)) {
1040 /* We got the lock for task. */ 1040 /* We got the lock for task. */
1041 debug_rt_mutex_lock(lock); 1041 debug_rt_mutex_lock(lock);
1042
1043 rt_mutex_set_owner(lock, task, 0); 1042 rt_mutex_set_owner(lock, task, 0);
1044 1043 spin_unlock(&lock->wait_lock);
1045 rt_mutex_deadlock_account_lock(lock, task); 1044 rt_mutex_deadlock_account_lock(lock, task);
1046 return 1; 1045 return 1;
1047 } 1046 }
1048 1047
1049 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock); 1048 ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
1050 1049
1051
1052 if (ret && !waiter->task) { 1050 if (ret && !waiter->task) {
1053 /* 1051 /*
1054 * Reset the return value. We might have 1052 * Reset the return value. We might have
diff --git a/kernel/sched_cpupri.c b/kernel/sched_cpupri.c
index e6c251790dde..d014efbf947a 100644
--- a/kernel/sched_cpupri.c
+++ b/kernel/sched_cpupri.c
@@ -81,8 +81,21 @@ int cpupri_find(struct cpupri *cp, struct task_struct *p,
81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids) 81 if (cpumask_any_and(&p->cpus_allowed, vec->mask) >= nr_cpu_ids)
82 continue; 82 continue;
83 83
84 if (lowest_mask) 84 if (lowest_mask) {
85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask); 85 cpumask_and(lowest_mask, &p->cpus_allowed, vec->mask);
86
87 /*
88 * We have to ensure that we have at least one bit
89 * still set in the array, since the map could have
90 * been concurrently emptied between the first and
91 * second reads of vec->mask. If we hit this
92 * condition, simply act as though we never hit this
93 * priority level and continue on.
94 */
95 if (cpumask_any(lowest_mask) >= nr_cpu_ids)
96 continue;
97 }
98
86 return 1; 99 return 1;
87 } 100 }
88 101
diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c
index 9ffb2b2ceba4..652e8bdef9aa 100644
--- a/kernel/sched_fair.c
+++ b/kernel/sched_fair.c
@@ -611,9 +611,13 @@ account_entity_dequeue(struct cfs_rq *cfs_rq, struct sched_entity *se)
611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se) 611static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
612{ 612{
613#ifdef CONFIG_SCHEDSTATS 613#ifdef CONFIG_SCHEDSTATS
614 struct task_struct *tsk = NULL;
615
616 if (entity_is_task(se))
617 tsk = task_of(se);
618
614 if (se->sleep_start) { 619 if (se->sleep_start) {
615 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start; 620 u64 delta = rq_of(cfs_rq)->clock - se->sleep_start;
616 struct task_struct *tsk = task_of(se);
617 621
618 if ((s64)delta < 0) 622 if ((s64)delta < 0)
619 delta = 0; 623 delta = 0;
@@ -624,11 +628,11 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
624 se->sleep_start = 0; 628 se->sleep_start = 0;
625 se->sum_sleep_runtime += delta; 629 se->sum_sleep_runtime += delta;
626 630
627 account_scheduler_latency(tsk, delta >> 10, 1); 631 if (tsk)
632 account_scheduler_latency(tsk, delta >> 10, 1);
628 } 633 }
629 if (se->block_start) { 634 if (se->block_start) {
630 u64 delta = rq_of(cfs_rq)->clock - se->block_start; 635 u64 delta = rq_of(cfs_rq)->clock - se->block_start;
631 struct task_struct *tsk = task_of(se);
632 636
633 if ((s64)delta < 0) 637 if ((s64)delta < 0)
634 delta = 0; 638 delta = 0;
@@ -639,17 +643,19 @@ static void enqueue_sleeper(struct cfs_rq *cfs_rq, struct sched_entity *se)
639 se->block_start = 0; 643 se->block_start = 0;
640 se->sum_sleep_runtime += delta; 644 se->sum_sleep_runtime += delta;
641 645
642 /* 646 if (tsk) {
643 * Blocking time is in units of nanosecs, so shift by 20 to 647 /*
644 * get a milliseconds-range estimation of the amount of 648 * Blocking time is in units of nanosecs, so shift by
645 * time that the task spent sleeping: 649 * 20 to get a milliseconds-range estimation of the
646 */ 650 * amount of time that the task spent sleeping:
647 if (unlikely(prof_on == SLEEP_PROFILING)) { 651 */
648 652 if (unlikely(prof_on == SLEEP_PROFILING)) {
649 profile_hits(SLEEP_PROFILING, (void *)get_wchan(tsk), 653 profile_hits(SLEEP_PROFILING,
650 delta >> 20); 654 (void *)get_wchan(tsk),
655 delta >> 20);
656 }
657 account_scheduler_latency(tsk, delta >> 10, 0);
651 } 658 }
652 account_scheduler_latency(tsk, delta >> 10, 0);
653 } 659 }
654#endif 660#endif
655} 661}
diff --git a/kernel/signal.c b/kernel/signal.c
index ccf1ceedaebe..64c5deeaca5d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2454,11 +2454,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2454 stack_t oss; 2454 stack_t oss;
2455 int error; 2455 int error;
2456 2456
2457 if (uoss) { 2457 oss.ss_sp = (void __user *) current->sas_ss_sp;
2458 oss.ss_sp = (void __user *) current->sas_ss_sp; 2458 oss.ss_size = current->sas_ss_size;
2459 oss.ss_size = current->sas_ss_size; 2459 oss.ss_flags = sas_ss_flags(sp);
2460 oss.ss_flags = sas_ss_flags(sp);
2461 }
2462 2460
2463 if (uss) { 2461 if (uss) {
2464 void __user *ss_sp; 2462 void __user *ss_sp;
@@ -2466,10 +2464,12 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2466 int ss_flags; 2464 int ss_flags;
2467 2465
2468 error = -EFAULT; 2466 error = -EFAULT;
2469 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)) 2467 if (!access_ok(VERIFY_READ, uss, sizeof(*uss)))
2470 || __get_user(ss_sp, &uss->ss_sp) 2468 goto out;
2471 || __get_user(ss_flags, &uss->ss_flags) 2469 error = __get_user(ss_sp, &uss->ss_sp) |
2472 || __get_user(ss_size, &uss->ss_size)) 2470 __get_user(ss_flags, &uss->ss_flags) |
2471 __get_user(ss_size, &uss->ss_size);
2472 if (error)
2473 goto out; 2473 goto out;
2474 2474
2475 error = -EPERM; 2475 error = -EPERM;
@@ -2501,13 +2501,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
2501 current->sas_ss_size = ss_size; 2501 current->sas_ss_size = ss_size;
2502 } 2502 }
2503 2503
2504 error = 0;
2504 if (uoss) { 2505 if (uoss) {
2505 error = -EFAULT; 2506 error = -EFAULT;
2506 if (copy_to_user(uoss, &oss, sizeof(oss))) 2507 if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
2507 goto out; 2508 goto out;
2509 error = __put_user(oss.ss_sp, &uoss->ss_sp) |
2510 __put_user(oss.ss_size, &uoss->ss_size) |
2511 __put_user(oss.ss_flags, &uoss->ss_flags);
2508 } 2512 }
2509 2513
2510 error = 0;
2511out: 2514out:
2512 return error; 2515 return error;
2513} 2516}
diff --git a/kernel/smp.c b/kernel/smp.c
index ad63d8501207..94188b8ecc33 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -57,7 +57,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
57 return NOTIFY_BAD; 57 return NOTIFY_BAD;
58 break; 58 break;
59 59
60#ifdef CONFIG_CPU_HOTPLUG 60#ifdef CONFIG_HOTPLUG_CPU
61 case CPU_UP_CANCELED: 61 case CPU_UP_CANCELED:
62 case CPU_UP_CANCELED_FROZEN: 62 case CPU_UP_CANCELED_FROZEN:
63 63
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 1090b0aed9ba..7a34cb563fec 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -267,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt)
267{ 267{
268 debugfs_remove(bt->msg_file); 268 debugfs_remove(bt->msg_file);
269 debugfs_remove(bt->dropped_file); 269 debugfs_remove(bt->dropped_file);
270 debugfs_remove(bt->dir);
271 relay_close(bt->rchan); 270 relay_close(bt->rchan);
271 debugfs_remove(bt->dir);
272 free_percpu(bt->sequence); 272 free_percpu(bt->sequence);
273 free_percpu(bt->msg_data); 273 free_percpu(bt->msg_data);
274 kfree(bt); 274 kfree(bt);
@@ -378,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
378 378
379static int blk_remove_buf_file_callback(struct dentry *dentry) 379static int blk_remove_buf_file_callback(struct dentry *dentry)
380{ 380{
381 struct dentry *parent = dentry->d_parent;
382 debugfs_remove(dentry); 381 debugfs_remove(dentry);
383 382
384 /*
385 * this will fail for all but the last file, but that is ok. what we
386 * care about is the top level buts->name directory going away, when
387 * the last trace file is gone. Then we don't have to rmdir() that
388 * manually on trace stop, so it nicely solves the issue with
389 * force killing of running traces.
390 */
391
392 debugfs_remove(parent);
393 return 0; 383 return 0;
394} 384}
395 385
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 4521c77d1a1a..1e1d23c26308 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1662,7 +1662,7 @@ ftrace_regex_open(struct inode *inode, struct file *file, int enable)
1662 1662
1663 mutex_lock(&ftrace_regex_lock); 1663 mutex_lock(&ftrace_regex_lock);
1664 if ((file->f_mode & FMODE_WRITE) && 1664 if ((file->f_mode & FMODE_WRITE) &&
1665 !(file->f_flags & O_APPEND)) 1665 (file->f_flags & O_TRUNC))
1666 ftrace_filter_reset(enable); 1666 ftrace_filter_reset(enable);
1667 1667
1668 if (file->f_mode & FMODE_READ) { 1668 if (file->f_mode & FMODE_READ) {
@@ -2577,7 +2577,7 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2577 2577
2578 mutex_lock(&graph_lock); 2578 mutex_lock(&graph_lock);
2579 if ((file->f_mode & FMODE_WRITE) && 2579 if ((file->f_mode & FMODE_WRITE) &&
2580 !(file->f_flags & O_APPEND)) { 2580 (file->f_flags & O_TRUNC)) {
2581 ftrace_graph_count = 0; 2581 ftrace_graph_count = 0;
2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs)); 2582 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2583 } 2583 }
@@ -2596,6 +2596,14 @@ ftrace_graph_open(struct inode *inode, struct file *file)
2596} 2596}
2597 2597
2598static int 2598static int
2599ftrace_graph_release(struct inode *inode, struct file *file)
2600{
2601 if (file->f_mode & FMODE_READ)
2602 seq_release(inode, file);
2603 return 0;
2604}
2605
2606static int
2599ftrace_set_func(unsigned long *array, int *idx, char *buffer) 2607ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2600{ 2608{
2601 struct dyn_ftrace *rec; 2609 struct dyn_ftrace *rec;
@@ -2724,9 +2732,10 @@ ftrace_graph_write(struct file *file, const char __user *ubuf,
2724} 2732}
2725 2733
2726static const struct file_operations ftrace_graph_fops = { 2734static const struct file_operations ftrace_graph_fops = {
2727 .open = ftrace_graph_open, 2735 .open = ftrace_graph_open,
2728 .read = seq_read, 2736 .read = seq_read,
2729 .write = ftrace_graph_write, 2737 .write = ftrace_graph_write,
2738 .release = ftrace_graph_release,
2730}; 2739};
2731#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 2740#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2732 2741
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index bf27bb7a63e2..a330513d96ce 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -735,6 +735,7 @@ ring_buffer_free(struct ring_buffer *buffer)
735 735
736 put_online_cpus(); 736 put_online_cpus();
737 737
738 kfree(buffer->buffers);
738 free_cpumask_var(buffer->cpumask); 739 free_cpumask_var(buffer->cpumask);
739 740
740 kfree(buffer); 741 kfree(buffer);
@@ -1785,7 +1786,7 @@ void ring_buffer_discard_commit(struct ring_buffer *buffer,
1785 */ 1786 */
1786 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); 1787 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing));
1787 1788
1788 if (!rb_try_to_discard(cpu_buffer, event)) 1789 if (rb_try_to_discard(cpu_buffer, event))
1789 goto out; 1790 goto out;
1790 1791
1791 /* 1792 /*
@@ -2383,7 +2384,6 @@ rb_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2383 * the box. Return the padding, and we will release 2384 * the box. Return the padding, and we will release
2384 * the current locks, and try again. 2385 * the current locks, and try again.
2385 */ 2386 */
2386 rb_advance_reader(cpu_buffer);
2387 return event; 2387 return event;
2388 2388
2389 case RINGBUF_TYPE_TIME_EXTEND: 2389 case RINGBUF_TYPE_TIME_EXTEND:
@@ -2486,7 +2486,7 @@ static inline int rb_ok_to_lock(void)
2486 * buffer too. A one time deal is all you get from reading 2486 * buffer too. A one time deal is all you get from reading
2487 * the ring buffer from an NMI. 2487 * the ring buffer from an NMI.
2488 */ 2488 */
2489 if (likely(!in_nmi() && !oops_in_progress)) 2489 if (likely(!in_nmi()))
2490 return 1; 2490 return 1;
2491 2491
2492 tracing_off_permanent(); 2492 tracing_off_permanent();
@@ -2519,6 +2519,8 @@ ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts)
2519 if (dolock) 2519 if (dolock)
2520 spin_lock(&cpu_buffer->reader_lock); 2520 spin_lock(&cpu_buffer->reader_lock);
2521 event = rb_buffer_peek(buffer, cpu, ts); 2521 event = rb_buffer_peek(buffer, cpu, ts);
2522 if (event && event->type_len == RINGBUF_TYPE_PADDING)
2523 rb_advance_reader(cpu_buffer);
2522 if (dolock) 2524 if (dolock)
2523 spin_unlock(&cpu_buffer->reader_lock); 2525 spin_unlock(&cpu_buffer->reader_lock);
2524 local_irq_restore(flags); 2526 local_irq_restore(flags);
@@ -2590,12 +2592,9 @@ ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts)
2590 spin_lock(&cpu_buffer->reader_lock); 2592 spin_lock(&cpu_buffer->reader_lock);
2591 2593
2592 event = rb_buffer_peek(buffer, cpu, ts); 2594 event = rb_buffer_peek(buffer, cpu, ts);
2593 if (!event) 2595 if (event)
2594 goto out_unlock; 2596 rb_advance_reader(cpu_buffer);
2595
2596 rb_advance_reader(cpu_buffer);
2597 2597
2598 out_unlock:
2599 if (dolock) 2598 if (dolock)
2600 spin_unlock(&cpu_buffer->reader_lock); 2599 spin_unlock(&cpu_buffer->reader_lock);
2601 local_irq_restore(flags); 2600 local_irq_restore(flags);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8bc8d8afea6a..c22b40f8f576 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -848,6 +848,7 @@ tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) | 848 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0); 849 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
850} 850}
851EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
851 852
852struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr, 853struct ring_buffer_event *trace_buffer_lock_reserve(struct trace_array *tr,
853 int type, 854 int type,
@@ -2031,7 +2032,7 @@ static int tracing_open(struct inode *inode, struct file *file)
2031 2032
2032 /* If this file was open for write, then erase contents */ 2033 /* If this file was open for write, then erase contents */
2033 if ((file->f_mode & FMODE_WRITE) && 2034 if ((file->f_mode & FMODE_WRITE) &&
2034 !(file->f_flags & O_APPEND)) { 2035 (file->f_flags & O_TRUNC)) {
2035 long cpu = (long) inode->i_private; 2036 long cpu = (long) inode->i_private;
2036 2037
2037 if (cpu == TRACE_PIPE_ALL_CPU) 2038 if (cpu == TRACE_PIPE_ALL_CPU)
@@ -3085,7 +3086,8 @@ tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3085 break; 3086 break;
3086 } 3087 }
3087 3088
3088 trace_consume(iter); 3089 if (ret != TRACE_TYPE_NO_CONSUME)
3090 trace_consume(iter);
3089 rem -= count; 3091 rem -= count;
3090 if (!find_next_entry_inc(iter)) { 3092 if (!find_next_entry_inc(iter)) {
3091 rem = 0; 3093 rem = 0;
@@ -4233,8 +4235,11 @@ static void __ftrace_dump(bool disable_tracing)
4233 iter.pos = -1; 4235 iter.pos = -1;
4234 4236
4235 if (find_next_entry_inc(&iter) != NULL) { 4237 if (find_next_entry_inc(&iter) != NULL) {
4236 print_trace_line(&iter); 4238 int ret;
4237 trace_consume(&iter); 4239
4240 ret = print_trace_line(&iter);
4241 if (ret != TRACE_TYPE_NO_CONSUME)
4242 trace_consume(&iter);
4238 } 4243 }
4239 4244
4240 trace_printk_seq(&iter.seq); 4245 trace_printk_seq(&iter.seq);
diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
index 3548ae5cc780..8b9f4f6e9559 100644
--- a/kernel/trace/trace.h
+++ b/kernel/trace/trace.h
@@ -438,10 +438,6 @@ struct trace_entry *tracing_get_trace_entry(struct trace_array *tr,
438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, 438struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
439 int *ent_cpu, u64 *ent_ts); 439 int *ent_cpu, u64 *ent_ts);
440 440
441void tracing_generic_entry_update(struct trace_entry *entry,
442 unsigned long flags,
443 int pc);
444
445void default_wait_pipe(struct trace_iterator *iter); 441void default_wait_pipe(struct trace_iterator *iter);
446void poll_wait_pipe(struct trace_iterator *iter); 442void poll_wait_pipe(struct trace_iterator *iter);
447 443
diff --git a/kernel/trace/trace_event_profile.c b/kernel/trace/trace_event_profile.c
index 5b5895afecfe..11ba5bb4ed0a 100644
--- a/kernel/trace/trace_event_profile.c
+++ b/kernel/trace/trace_event_profile.c
@@ -14,7 +14,7 @@ int ftrace_profile_enable(int event_id)
14 14
15 mutex_lock(&event_mutex); 15 mutex_lock(&event_mutex);
16 list_for_each_entry(event, &ftrace_events, list) { 16 list_for_each_entry(event, &ftrace_events, list) {
17 if (event->id == event_id) { 17 if (event->id == event_id && event->profile_enable) {
18 ret = event->profile_enable(event); 18 ret = event->profile_enable(event);
19 break; 19 break;
20 } 20 }
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index 53c8fd376a88..e75276a49cf5 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -376,7 +376,7 @@ ftrace_event_seq_open(struct inode *inode, struct file *file)
376 const struct seq_operations *seq_ops; 376 const struct seq_operations *seq_ops;
377 377
378 if ((file->f_mode & FMODE_WRITE) && 378 if ((file->f_mode & FMODE_WRITE) &&
379 !(file->f_flags & O_APPEND)) 379 (file->f_flags & O_TRUNC))
380 ftrace_clear_events(); 380 ftrace_clear_events();
381 381
382 seq_ops = inode->i_private; 382 seq_ops = inode->i_private;
@@ -940,7 +940,7 @@ event_create_dir(struct ftrace_event_call *call, struct dentry *d_events,
940 entry = trace_create_file("enable", 0644, call->dir, call, 940 entry = trace_create_file("enable", 0644, call->dir, call,
941 enable); 941 enable);
942 942
943 if (call->id) 943 if (call->id && call->profile_enable)
944 entry = trace_create_file("id", 0444, call->dir, call, 944 entry = trace_create_file("id", 0444, call->dir, call,
945 id); 945 id);
946 946
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 936c621bbf46..f32dc9d1ea7b 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -624,9 +624,6 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
624 return -ENOSPC; 624 return -ENOSPC;
625 } 625 }
626 626
627 filter->preds[filter->n_preds] = pred;
628 filter->n_preds++;
629
630 list_for_each_entry(call, &ftrace_events, list) { 627 list_for_each_entry(call, &ftrace_events, list) {
631 628
632 if (!call->define_fields) 629 if (!call->define_fields)
@@ -643,6 +640,9 @@ static int filter_add_subsystem_pred(struct filter_parse_state *ps,
643 } 640 }
644 replace_filter_string(call->filter, filter_string); 641 replace_filter_string(call->filter, filter_string);
645 } 642 }
643
644 filter->preds[filter->n_preds] = pred;
645 filter->n_preds++;
646out: 646out:
647 return err; 647 return err;
648} 648}
@@ -1029,12 +1029,17 @@ static int replace_preds(struct event_subsystem *system,
1029 1029
1030 if (elt->op == OP_AND || elt->op == OP_OR) { 1030 if (elt->op == OP_AND || elt->op == OP_OR) {
1031 pred = create_logical_pred(elt->op); 1031 pred = create_logical_pred(elt->op);
1032 if (!pred)
1033 return -ENOMEM;
1032 if (call) { 1034 if (call) {
1033 err = filter_add_pred(ps, call, pred); 1035 err = filter_add_pred(ps, call, pred);
1034 filter_free_pred(pred); 1036 filter_free_pred(pred);
1035 } else 1037 } else {
1036 err = filter_add_subsystem_pred(ps, system, 1038 err = filter_add_subsystem_pred(ps, system,
1037 pred, filter_string); 1039 pred, filter_string);
1040 if (err)
1041 filter_free_pred(pred);
1042 }
1038 if (err) 1043 if (err)
1039 return err; 1044 return err;
1040 1045
@@ -1048,12 +1053,17 @@ static int replace_preds(struct event_subsystem *system,
1048 } 1053 }
1049 1054
1050 pred = create_pred(elt->op, operand1, operand2); 1055 pred = create_pred(elt->op, operand1, operand2);
1056 if (!pred)
1057 return -ENOMEM;
1051 if (call) { 1058 if (call) {
1052 err = filter_add_pred(ps, call, pred); 1059 err = filter_add_pred(ps, call, pred);
1053 filter_free_pred(pred); 1060 filter_free_pred(pred);
1054 } else 1061 } else {
1055 err = filter_add_subsystem_pred(ps, system, pred, 1062 err = filter_add_subsystem_pred(ps, system, pred,
1056 filter_string); 1063 filter_string);
1064 if (err)
1065 filter_free_pred(pred);
1066 }
1057 if (err) 1067 if (err)
1058 return err; 1068 return err;
1059 1069
diff --git a/kernel/trace/trace_functions_graph.c b/kernel/trace/trace_functions_graph.c
index d2249abafb53..420ec3487579 100644
--- a/kernel/trace/trace_functions_graph.c
+++ b/kernel/trace/trace_functions_graph.c
@@ -843,9 +843,16 @@ print_graph_function(struct trace_iterator *iter)
843 843
844 switch (entry->type) { 844 switch (entry->type) {
845 case TRACE_GRAPH_ENT: { 845 case TRACE_GRAPH_ENT: {
846 struct ftrace_graph_ent_entry *field; 846 /*
847 * print_graph_entry() may consume the current event,
848 * thus @field may become invalid, so we need to save it.
849 * sizeof(struct ftrace_graph_ent_entry) is very small,
850 * it can be safely saved at the stack.
851 */
852 struct ftrace_graph_ent_entry *field, saved;
847 trace_assign_type(field, entry); 853 trace_assign_type(field, entry);
848 return print_graph_entry(field, s, iter); 854 saved = *field;
855 return print_graph_entry(&saved, s, iter);
849 } 856 }
850 case TRACE_GRAPH_RET: { 857 case TRACE_GRAPH_RET: {
851 struct ftrace_graph_ret_entry *field; 858 struct ftrace_graph_ret_entry *field;
diff --git a/kernel/trace/trace_printk.c b/kernel/trace/trace_printk.c
index 7b6278110827..687699d365ae 100644
--- a/kernel/trace/trace_printk.c
+++ b/kernel/trace/trace_printk.c
@@ -176,7 +176,7 @@ static int t_show(struct seq_file *m, void *v)
176 const char *str = *fmt; 176 const char *str = *fmt;
177 int i; 177 int i;
178 178
179 seq_printf(m, "0x%lx : \"", (unsigned long)fmt); 179 seq_printf(m, "0x%lx : \"", *(unsigned long *)fmt);
180 180
181 /* 181 /*
182 * Tabs and new lines need to be converted. 182 * Tabs and new lines need to be converted.
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index e644af910124..6a2a9d484cd6 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -301,17 +301,14 @@ static const struct seq_operations stack_trace_seq_ops = {
301 301
302static int stack_trace_open(struct inode *inode, struct file *file) 302static int stack_trace_open(struct inode *inode, struct file *file)
303{ 303{
304 int ret; 304 return seq_open(file, &stack_trace_seq_ops);
305
306 ret = seq_open(file, &stack_trace_seq_ops);
307
308 return ret;
309} 305}
310 306
311static const struct file_operations stack_trace_fops = { 307static const struct file_operations stack_trace_fops = {
312 .open = stack_trace_open, 308 .open = stack_trace_open,
313 .read = seq_read, 309 .read = seq_read,
314 .llseek = seq_lseek, 310 .llseek = seq_lseek,
311 .release = seq_release,
315}; 312};
316 313
317int 314int
diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c
index e66f5e493342..aea321c82fa0 100644
--- a/kernel/trace/trace_stat.c
+++ b/kernel/trace/trace_stat.c
@@ -73,7 +73,7 @@ static struct rb_node *release_next(struct rb_node *node)
73 } 73 }
74} 74}
75 75
76static void reset_stat_session(struct stat_session *session) 76static void __reset_stat_session(struct stat_session *session)
77{ 77{
78 struct rb_node *node = session->stat_root.rb_node; 78 struct rb_node *node = session->stat_root.rb_node;
79 79
@@ -83,10 +83,17 @@ static void reset_stat_session(struct stat_session *session)
83 session->stat_root = RB_ROOT; 83 session->stat_root = RB_ROOT;
84} 84}
85 85
86static void reset_stat_session(struct stat_session *session)
87{
88 mutex_lock(&session->stat_mutex);
89 __reset_stat_session(session);
90 mutex_unlock(&session->stat_mutex);
91}
92
86static void destroy_session(struct stat_session *session) 93static void destroy_session(struct stat_session *session)
87{ 94{
88 debugfs_remove(session->file); 95 debugfs_remove(session->file);
89 reset_stat_session(session); 96 __reset_stat_session(session);
90 mutex_destroy(&session->stat_mutex); 97 mutex_destroy(&session->stat_mutex);
91 kfree(session); 98 kfree(session);
92} 99}
@@ -150,7 +157,7 @@ static int stat_seq_init(struct stat_session *session)
150 int i; 157 int i;
151 158
152 mutex_lock(&session->stat_mutex); 159 mutex_lock(&session->stat_mutex);
153 reset_stat_session(session); 160 __reset_stat_session(session);
154 161
155 if (!ts->stat_cmp) 162 if (!ts->stat_cmp)
156 ts->stat_cmp = dummy_cmp; 163 ts->stat_cmp = dummy_cmp;
@@ -183,7 +190,7 @@ exit:
183 return ret; 190 return ret;
184 191
185exit_free_rbtree: 192exit_free_rbtree:
186 reset_stat_session(session); 193 __reset_stat_session(session);
187 mutex_unlock(&session->stat_mutex); 194 mutex_unlock(&session->stat_mutex);
188 return ret; 195 return ret;
189} 196}
@@ -250,16 +257,21 @@ static const struct seq_operations trace_stat_seq_ops = {
250static int tracing_stat_open(struct inode *inode, struct file *file) 257static int tracing_stat_open(struct inode *inode, struct file *file)
251{ 258{
252 int ret; 259 int ret;
253 260 struct seq_file *m;
254 struct stat_session *session = inode->i_private; 261 struct stat_session *session = inode->i_private;
255 262
263 ret = stat_seq_init(session);
264 if (ret)
265 return ret;
266
256 ret = seq_open(file, &trace_stat_seq_ops); 267 ret = seq_open(file, &trace_stat_seq_ops);
257 if (!ret) { 268 if (ret) {
258 struct seq_file *m = file->private_data; 269 reset_stat_session(session);
259 m->private = session; 270 return ret;
260 ret = stat_seq_init(session);
261 } 271 }
262 272
273 m = file->private_data;
274 m->private = session;
263 return ret; 275 return ret;
264} 276}
265 277
@@ -270,11 +282,9 @@ static int tracing_stat_release(struct inode *i, struct file *f)
270{ 282{
271 struct stat_session *session = i->i_private; 283 struct stat_session *session = i->i_private;
272 284
273 mutex_lock(&session->stat_mutex);
274 reset_stat_session(session); 285 reset_stat_session(session);
275 mutex_unlock(&session->stat_mutex);
276 286
277 return 0; 287 return seq_release(i, f);
278} 288}
279 289
280static const struct file_operations tracing_stat_fops = { 290static const struct file_operations tracing_stat_fops = {
diff --git a/kernel/wait.c b/kernel/wait.c
index ea7c3b4275cf..c4bd3d825f35 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,13 +10,14 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12 12
13void init_waitqueue_head(wait_queue_head_t *q) 13void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
14{ 14{
15 spin_lock_init(&q->lock); 15 spin_lock_init(&q->lock);
16 lockdep_set_class(&q->lock, key);
16 INIT_LIST_HEAD(&q->task_list); 17 INIT_LIST_HEAD(&q->task_list);
17} 18}
18 19
19EXPORT_SYMBOL(init_waitqueue_head); 20EXPORT_SYMBOL(__init_waitqueue_head);
20 21
21void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 22void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22{ 23{