aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c21
-rw-r--r--kernel/futex.c28
-rw-r--r--kernel/futex_compat.c6
-rw-r--r--kernel/irq/manage.c27
-rw-r--r--kernel/irq/numa_migrate.c4
-rw-r--r--kernel/perf_counter.c592
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/time/clockevents.c16
-rw-r--r--kernel/time/tick-broadcast.c7
-rw-r--r--kernel/time/timer_list.c2
-rw-r--r--kernel/trace/blktrace.c12
-rw-r--r--kernel/trace/ftrace.c17
-rw-r--r--kernel/trace/trace.c12
-rw-r--r--kernel/wait.c5
14 files changed, 479 insertions, 277 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 021e1138556e..e6c04d462ab2 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -426,7 +426,6 @@ static struct mm_struct * mm_init(struct mm_struct * mm, struct task_struct *p)
426 init_rwsem(&mm->mmap_sem); 426 init_rwsem(&mm->mmap_sem);
427 INIT_LIST_HEAD(&mm->mmlist); 427 INIT_LIST_HEAD(&mm->mmlist);
428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter; 428 mm->flags = (current->mm) ? current->mm->flags : default_dump_filter;
429 mm->oom_adj = (current->mm) ? current->mm->oom_adj : 0;
430 mm->core_state = NULL; 429 mm->core_state = NULL;
431 mm->nr_ptes = 0; 430 mm->nr_ptes = 0;
432 set_mm_counter(mm, file_rss, 0); 431 set_mm_counter(mm, file_rss, 0);
@@ -816,11 +815,8 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
816{ 815{
817 struct signal_struct *sig; 816 struct signal_struct *sig;
818 817
819 if (clone_flags & CLONE_THREAD) { 818 if (clone_flags & CLONE_THREAD)
820 atomic_inc(&current->signal->count);
821 atomic_inc(&current->signal->live);
822 return 0; 819 return 0;
823 }
824 820
825 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL); 821 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
826 tsk->signal = sig; 822 tsk->signal = sig;
@@ -878,16 +874,6 @@ void __cleanup_signal(struct signal_struct *sig)
878 kmem_cache_free(signal_cachep, sig); 874 kmem_cache_free(signal_cachep, sig);
879} 875}
880 876
881static void cleanup_signal(struct task_struct *tsk)
882{
883 struct signal_struct *sig = tsk->signal;
884
885 atomic_dec(&sig->live);
886
887 if (atomic_dec_and_test(&sig->count))
888 __cleanup_signal(sig);
889}
890
891static void copy_flags(unsigned long clone_flags, struct task_struct *p) 877static void copy_flags(unsigned long clone_flags, struct task_struct *p)
892{ 878{
893 unsigned long new_flags = p->flags; 879 unsigned long new_flags = p->flags;
@@ -1240,6 +1226,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1240 } 1226 }
1241 1227
1242 if (clone_flags & CLONE_THREAD) { 1228 if (clone_flags & CLONE_THREAD) {
1229 atomic_inc(&current->signal->count);
1230 atomic_inc(&current->signal->live);
1243 p->group_leader = current->group_leader; 1231 p->group_leader = current->group_leader;
1244 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group); 1232 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1245 } 1233 }
@@ -1283,7 +1271,8 @@ bad_fork_cleanup_mm:
1283 if (p->mm) 1271 if (p->mm)
1284 mmput(p->mm); 1272 mmput(p->mm);
1285bad_fork_cleanup_signal: 1273bad_fork_cleanup_signal:
1286 cleanup_signal(p); 1274 if (!(clone_flags & CLONE_THREAD))
1275 __cleanup_signal(p->signal);
1287bad_fork_cleanup_sighand: 1276bad_fork_cleanup_sighand:
1288 __cleanup_sighand(p->sighand); 1277 __cleanup_sighand(p->sighand);
1289bad_fork_cleanup_fs: 1278bad_fork_cleanup_fs:
diff --git a/kernel/futex.c b/kernel/futex.c
index 0672ff88f159..e18cfbdc7190 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -1010,15 +1010,19 @@ void requeue_futex(struct futex_q *q, struct futex_hash_bucket *hb1,
1010 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue 1010 * requeue_pi_wake_futex() - Wake a task that acquired the lock during requeue
1011 * q: the futex_q 1011 * q: the futex_q
1012 * key: the key of the requeue target futex 1012 * key: the key of the requeue target futex
1013 * hb: the hash_bucket of the requeue target futex
1013 * 1014 *
1014 * During futex_requeue, with requeue_pi=1, it is possible to acquire the 1015 * During futex_requeue, with requeue_pi=1, it is possible to acquire the
1015 * target futex if it is uncontended or via a lock steal. Set the futex_q key 1016 * target futex if it is uncontended or via a lock steal. Set the futex_q key
1016 * to the requeue target futex so the waiter can detect the wakeup on the right 1017 * to the requeue target futex so the waiter can detect the wakeup on the right
1017 * futex, but remove it from the hb and NULL the rt_waiter so it can detect 1018 * futex, but remove it from the hb and NULL the rt_waiter so it can detect
1018 * atomic lock acquisition. Must be called with the q->lock_ptr held. 1019 * atomic lock acquisition. Set the q->lock_ptr to the requeue target hb->lock
1020 * to protect access to the pi_state to fixup the owner later. Must be called
1021 * with both q->lock_ptr and hb->lock held.
1019 */ 1022 */
1020static inline 1023static inline
1021void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key) 1024void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key,
1025 struct futex_hash_bucket *hb)
1022{ 1026{
1023 drop_futex_key_refs(&q->key); 1027 drop_futex_key_refs(&q->key);
1024 get_futex_key_refs(key); 1028 get_futex_key_refs(key);
@@ -1030,6 +1034,11 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key)
1030 WARN_ON(!q->rt_waiter); 1034 WARN_ON(!q->rt_waiter);
1031 q->rt_waiter = NULL; 1035 q->rt_waiter = NULL;
1032 1036
1037 q->lock_ptr = &hb->lock;
1038#ifdef CONFIG_DEBUG_PI_LIST
1039 q->list.plist.lock = &hb->lock;
1040#endif
1041
1033 wake_up_state(q->task, TASK_NORMAL); 1042 wake_up_state(q->task, TASK_NORMAL);
1034} 1043}
1035 1044
@@ -1088,7 +1097,7 @@ static int futex_proxy_trylock_atomic(u32 __user *pifutex,
1088 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task, 1097 ret = futex_lock_pi_atomic(pifutex, hb2, key2, ps, top_waiter->task,
1089 set_waiters); 1098 set_waiters);
1090 if (ret == 1) 1099 if (ret == 1)
1091 requeue_pi_wake_futex(top_waiter, key2); 1100 requeue_pi_wake_futex(top_waiter, key2, hb2);
1092 1101
1093 return ret; 1102 return ret;
1094} 1103}
@@ -1247,8 +1256,15 @@ retry_private:
1247 if (!match_futex(&this->key, &key1)) 1256 if (!match_futex(&this->key, &key1))
1248 continue; 1257 continue;
1249 1258
1250 WARN_ON(!requeue_pi && this->rt_waiter); 1259 /*
1251 WARN_ON(requeue_pi && !this->rt_waiter); 1260 * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always
1261 * be paired with each other and no other futex ops.
1262 */
1263 if ((requeue_pi && !this->rt_waiter) ||
1264 (!requeue_pi && this->rt_waiter)) {
1265 ret = -EINVAL;
1266 break;
1267 }
1252 1268
1253 /* 1269 /*
1254 * Wake nr_wake waiters. For requeue_pi, if we acquired the 1270 * Wake nr_wake waiters. For requeue_pi, if we acquired the
@@ -1273,7 +1289,7 @@ retry_private:
1273 this->task, 1); 1289 this->task, 1);
1274 if (ret == 1) { 1290 if (ret == 1) {
1275 /* We got the lock. */ 1291 /* We got the lock. */
1276 requeue_pi_wake_futex(this, &key2); 1292 requeue_pi_wake_futex(this, &key2, hb2);
1277 continue; 1293 continue;
1278 } else if (ret) { 1294 } else if (ret) {
1279 /* -EDEADLK */ 1295 /* -EDEADLK */
diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
index d607a5b9ee29..235716556bf1 100644
--- a/kernel/futex_compat.c
+++ b/kernel/futex_compat.c
@@ -180,7 +180,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
180 int cmd = op & FUTEX_CMD_MASK; 180 int cmd = op & FUTEX_CMD_MASK;
181 181
182 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI || 182 if (utime && (cmd == FUTEX_WAIT || cmd == FUTEX_LOCK_PI ||
183 cmd == FUTEX_WAIT_BITSET)) { 183 cmd == FUTEX_WAIT_BITSET ||
184 cmd == FUTEX_WAIT_REQUEUE_PI)) {
184 if (get_compat_timespec(&ts, utime)) 185 if (get_compat_timespec(&ts, utime))
185 return -EFAULT; 186 return -EFAULT;
186 if (!timespec_valid(&ts)) 187 if (!timespec_valid(&ts))
@@ -191,7 +192,8 @@ asmlinkage long compat_sys_futex(u32 __user *uaddr, int op, u32 val,
191 t = ktime_add_safe(ktime_get(), t); 192 t = ktime_add_safe(ktime_get(), t);
192 tp = &t; 193 tp = &t;
193 } 194 }
194 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE) 195 if (cmd == FUTEX_REQUEUE || cmd == FUTEX_CMP_REQUEUE ||
196 cmd == FUTEX_CMP_REQUEUE_PI || cmd == FUTEX_WAKE_OP)
195 val2 = (int) (unsigned long) utime; 197 val2 = (int) (unsigned long) utime;
196 198
197 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3); 199 return do_futex(uaddr, op, val, tp, uaddr2, val2, val3);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 61c679db4687..0ec9ed831737 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -607,7 +607,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
607 */ 607 */
608 get_task_struct(t); 608 get_task_struct(t);
609 new->thread = t; 609 new->thread = t;
610 wake_up_process(t);
611 } 610 }
612 611
613 /* 612 /*
@@ -690,6 +689,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
690 (int)(new->flags & IRQF_TRIGGER_MASK)); 689 (int)(new->flags & IRQF_TRIGGER_MASK));
691 } 690 }
692 691
692 new->irq = irq;
693 *old_ptr = new; 693 *old_ptr = new;
694 694
695 /* Reset broken irq detection when installing new handler */ 695 /* Reset broken irq detection when installing new handler */
@@ -707,7 +707,13 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
707 707
708 spin_unlock_irqrestore(&desc->lock, flags); 708 spin_unlock_irqrestore(&desc->lock, flags);
709 709
710 new->irq = irq; 710 /*
711 * Strictly no need to wake it up, but hung_task complains
712 * when no hard interrupt wakes the thread up.
713 */
714 if (new->thread)
715 wake_up_process(new->thread);
716
711 register_irq_proc(irq, desc); 717 register_irq_proc(irq, desc);
712 new->dir = NULL; 718 new->dir = NULL;
713 register_handler_proc(irq, new); 719 register_handler_proc(irq, new);
@@ -761,7 +767,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
761{ 767{
762 struct irq_desc *desc = irq_to_desc(irq); 768 struct irq_desc *desc = irq_to_desc(irq);
763 struct irqaction *action, **action_ptr; 769 struct irqaction *action, **action_ptr;
764 struct task_struct *irqthread;
765 unsigned long flags; 770 unsigned long flags;
766 771
767 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq); 772 WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
@@ -809,9 +814,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
809 desc->chip->disable(irq); 814 desc->chip->disable(irq);
810 } 815 }
811 816
812 irqthread = action->thread;
813 action->thread = NULL;
814
815 spin_unlock_irqrestore(&desc->lock, flags); 817 spin_unlock_irqrestore(&desc->lock, flags);
816 818
817 unregister_handler_proc(irq, action); 819 unregister_handler_proc(irq, action);
@@ -819,12 +821,6 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
819 /* Make sure it's not being used on another CPU: */ 821 /* Make sure it's not being used on another CPU: */
820 synchronize_irq(irq); 822 synchronize_irq(irq);
821 823
822 if (irqthread) {
823 if (!test_bit(IRQTF_DIED, &action->thread_flags))
824 kthread_stop(irqthread);
825 put_task_struct(irqthread);
826 }
827
828#ifdef CONFIG_DEBUG_SHIRQ 824#ifdef CONFIG_DEBUG_SHIRQ
829 /* 825 /*
830 * It's a shared IRQ -- the driver ought to be prepared for an IRQ 826 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
@@ -840,6 +836,13 @@ static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
840 local_irq_restore(flags); 836 local_irq_restore(flags);
841 } 837 }
842#endif 838#endif
839
840 if (action->thread) {
841 if (!test_bit(IRQTF_DIED, &action->thread_flags))
842 kthread_stop(action->thread);
843 put_task_struct(action->thread);
844 }
845
843 return action; 846 return action;
844} 847}
845 848
diff --git a/kernel/irq/numa_migrate.c b/kernel/irq/numa_migrate.c
index 2f69bee57bf2..3fd30197da2e 100644
--- a/kernel/irq/numa_migrate.c
+++ b/kernel/irq/numa_migrate.c
@@ -107,8 +107,8 @@ out_unlock:
107 107
108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node) 108struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
109{ 109{
110 /* those all static, do move them */ 110 /* those static or target node is -1, do not move them */
111 if (desc->irq < NR_IRQS_LEGACY) 111 if (desc->irq < NR_IRQS_LEGACY || node == -1)
112 return desc; 112 return desc;
113 113
114 if (desc->node != node) 114 if (desc->node != node)
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 868102172aa4..f274e1959885 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -88,6 +88,7 @@ void __weak hw_perf_disable(void) { barrier(); }
88void __weak hw_perf_enable(void) { barrier(); } 88void __weak hw_perf_enable(void) { barrier(); }
89 89
90void __weak hw_perf_counter_setup(int cpu) { barrier(); } 90void __weak hw_perf_counter_setup(int cpu) { barrier(); }
91void __weak hw_perf_counter_setup_online(int cpu) { barrier(); }
91 92
92int __weak 93int __weak
93hw_perf_group_sched_in(struct perf_counter *group_leader, 94hw_perf_group_sched_in(struct perf_counter *group_leader,
@@ -306,6 +307,10 @@ counter_sched_out(struct perf_counter *counter,
306 return; 307 return;
307 308
308 counter->state = PERF_COUNTER_STATE_INACTIVE; 309 counter->state = PERF_COUNTER_STATE_INACTIVE;
310 if (counter->pending_disable) {
311 counter->pending_disable = 0;
312 counter->state = PERF_COUNTER_STATE_OFF;
313 }
309 counter->tstamp_stopped = ctx->time; 314 counter->tstamp_stopped = ctx->time;
310 counter->pmu->disable(counter); 315 counter->pmu->disable(counter);
311 counter->oncpu = -1; 316 counter->oncpu = -1;
@@ -1498,10 +1503,21 @@ static void perf_counter_enable_on_exec(struct task_struct *task)
1498 */ 1503 */
1499static void __perf_counter_read(void *info) 1504static void __perf_counter_read(void *info)
1500{ 1505{
1506 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1501 struct perf_counter *counter = info; 1507 struct perf_counter *counter = info;
1502 struct perf_counter_context *ctx = counter->ctx; 1508 struct perf_counter_context *ctx = counter->ctx;
1503 unsigned long flags; 1509 unsigned long flags;
1504 1510
1511 /*
1512 * If this is a task context, we need to check whether it is
1513 * the current task context of this cpu. If not it has been
1514 * scheduled out before the smp call arrived. In that case
1515 * counter->count would have been updated to a recent sample
1516 * when the counter was scheduled out.
1517 */
1518 if (ctx->task && cpuctx->task_ctx != ctx)
1519 return;
1520
1505 local_irq_save(flags); 1521 local_irq_save(flags);
1506 if (ctx->is_active) 1522 if (ctx->is_active)
1507 update_context_time(ctx); 1523 update_context_time(ctx);
@@ -1691,7 +1707,32 @@ static int perf_release(struct inode *inode, struct file *file)
1691 return 0; 1707 return 0;
1692} 1708}
1693 1709
1694static u64 perf_counter_read_tree(struct perf_counter *counter) 1710static int perf_counter_read_size(struct perf_counter *counter)
1711{
1712 int entry = sizeof(u64); /* value */
1713 int size = 0;
1714 int nr = 1;
1715
1716 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1717 size += sizeof(u64);
1718
1719 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1720 size += sizeof(u64);
1721
1722 if (counter->attr.read_format & PERF_FORMAT_ID)
1723 entry += sizeof(u64);
1724
1725 if (counter->attr.read_format & PERF_FORMAT_GROUP) {
1726 nr += counter->group_leader->nr_siblings;
1727 size += sizeof(u64);
1728 }
1729
1730 size += entry * nr;
1731
1732 return size;
1733}
1734
1735static u64 perf_counter_read_value(struct perf_counter *counter)
1695{ 1736{
1696 struct perf_counter *child; 1737 struct perf_counter *child;
1697 u64 total = 0; 1738 u64 total = 0;
@@ -1703,14 +1744,96 @@ static u64 perf_counter_read_tree(struct perf_counter *counter)
1703 return total; 1744 return total;
1704} 1745}
1705 1746
1747static int perf_counter_read_entry(struct perf_counter *counter,
1748 u64 read_format, char __user *buf)
1749{
1750 int n = 0, count = 0;
1751 u64 values[2];
1752
1753 values[n++] = perf_counter_read_value(counter);
1754 if (read_format & PERF_FORMAT_ID)
1755 values[n++] = primary_counter_id(counter);
1756
1757 count = n * sizeof(u64);
1758
1759 if (copy_to_user(buf, values, count))
1760 return -EFAULT;
1761
1762 return count;
1763}
1764
1765static int perf_counter_read_group(struct perf_counter *counter,
1766 u64 read_format, char __user *buf)
1767{
1768 struct perf_counter *leader = counter->group_leader, *sub;
1769 int n = 0, size = 0, err = -EFAULT;
1770 u64 values[3];
1771
1772 values[n++] = 1 + leader->nr_siblings;
1773 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1774 values[n++] = leader->total_time_enabled +
1775 atomic64_read(&leader->child_total_time_enabled);
1776 }
1777 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1778 values[n++] = leader->total_time_running +
1779 atomic64_read(&leader->child_total_time_running);
1780 }
1781
1782 size = n * sizeof(u64);
1783
1784 if (copy_to_user(buf, values, size))
1785 return -EFAULT;
1786
1787 err = perf_counter_read_entry(leader, read_format, buf + size);
1788 if (err < 0)
1789 return err;
1790
1791 size += err;
1792
1793 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
1794 err = perf_counter_read_entry(sub, read_format,
1795 buf + size);
1796 if (err < 0)
1797 return err;
1798
1799 size += err;
1800 }
1801
1802 return size;
1803}
1804
1805static int perf_counter_read_one(struct perf_counter *counter,
1806 u64 read_format, char __user *buf)
1807{
1808 u64 values[4];
1809 int n = 0;
1810
1811 values[n++] = perf_counter_read_value(counter);
1812 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1813 values[n++] = counter->total_time_enabled +
1814 atomic64_read(&counter->child_total_time_enabled);
1815 }
1816 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1817 values[n++] = counter->total_time_running +
1818 atomic64_read(&counter->child_total_time_running);
1819 }
1820 if (read_format & PERF_FORMAT_ID)
1821 values[n++] = primary_counter_id(counter);
1822
1823 if (copy_to_user(buf, values, n * sizeof(u64)))
1824 return -EFAULT;
1825
1826 return n * sizeof(u64);
1827}
1828
1706/* 1829/*
1707 * Read the performance counter - simple non blocking version for now 1830 * Read the performance counter - simple non blocking version for now
1708 */ 1831 */
1709static ssize_t 1832static ssize_t
1710perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count) 1833perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1711{ 1834{
1712 u64 values[4]; 1835 u64 read_format = counter->attr.read_format;
1713 int n; 1836 int ret;
1714 1837
1715 /* 1838 /*
1716 * Return end-of-file for a read on a counter that is in 1839 * Return end-of-file for a read on a counter that is in
@@ -1720,28 +1843,18 @@ perf_read_hw(struct perf_counter *counter, char __user *buf, size_t count)
1720 if (counter->state == PERF_COUNTER_STATE_ERROR) 1843 if (counter->state == PERF_COUNTER_STATE_ERROR)
1721 return 0; 1844 return 0;
1722 1845
1846 if (count < perf_counter_read_size(counter))
1847 return -ENOSPC;
1848
1723 WARN_ON_ONCE(counter->ctx->parent_ctx); 1849 WARN_ON_ONCE(counter->ctx->parent_ctx);
1724 mutex_lock(&counter->child_mutex); 1850 mutex_lock(&counter->child_mutex);
1725 values[0] = perf_counter_read_tree(counter); 1851 if (read_format & PERF_FORMAT_GROUP)
1726 n = 1; 1852 ret = perf_counter_read_group(counter, read_format, buf);
1727 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) 1853 else
1728 values[n++] = counter->total_time_enabled + 1854 ret = perf_counter_read_one(counter, read_format, buf);
1729 atomic64_read(&counter->child_total_time_enabled);
1730 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1731 values[n++] = counter->total_time_running +
1732 atomic64_read(&counter->child_total_time_running);
1733 if (counter->attr.read_format & PERF_FORMAT_ID)
1734 values[n++] = primary_counter_id(counter);
1735 mutex_unlock(&counter->child_mutex); 1855 mutex_unlock(&counter->child_mutex);
1736 1856
1737 if (count < n * sizeof(u64)) 1857 return ret;
1738 return -EINVAL;
1739 count = n * sizeof(u64);
1740
1741 if (copy_to_user(buf, values, count))
1742 return -EFAULT;
1743
1744 return count;
1745} 1858}
1746 1859
1747static ssize_t 1860static ssize_t
@@ -1906,6 +2019,10 @@ int perf_counter_task_disable(void)
1906 return 0; 2019 return 0;
1907} 2020}
1908 2021
2022#ifndef PERF_COUNTER_INDEX_OFFSET
2023# define PERF_COUNTER_INDEX_OFFSET 0
2024#endif
2025
1909static int perf_counter_index(struct perf_counter *counter) 2026static int perf_counter_index(struct perf_counter *counter)
1910{ 2027{
1911 if (counter->state != PERF_COUNTER_STATE_ACTIVE) 2028 if (counter->state != PERF_COUNTER_STATE_ACTIVE)
@@ -2245,7 +2362,7 @@ static void perf_pending_counter(struct perf_pending_entry *entry)
2245 2362
2246 if (counter->pending_disable) { 2363 if (counter->pending_disable) {
2247 counter->pending_disable = 0; 2364 counter->pending_disable = 0;
2248 perf_counter_disable(counter); 2365 __perf_counter_disable(counter);
2249 } 2366 }
2250 2367
2251 if (counter->pending_wakeup) { 2368 if (counter->pending_wakeup) {
@@ -2630,7 +2747,80 @@ static u32 perf_counter_tid(struct perf_counter *counter, struct task_struct *p)
2630 return task_pid_nr_ns(p, counter->ns); 2747 return task_pid_nr_ns(p, counter->ns);
2631} 2748}
2632 2749
2633static void perf_counter_output(struct perf_counter *counter, int nmi, 2750static void perf_output_read_one(struct perf_output_handle *handle,
2751 struct perf_counter *counter)
2752{
2753 u64 read_format = counter->attr.read_format;
2754 u64 values[4];
2755 int n = 0;
2756
2757 values[n++] = atomic64_read(&counter->count);
2758 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2759 values[n++] = counter->total_time_enabled +
2760 atomic64_read(&counter->child_total_time_enabled);
2761 }
2762 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2763 values[n++] = counter->total_time_running +
2764 atomic64_read(&counter->child_total_time_running);
2765 }
2766 if (read_format & PERF_FORMAT_ID)
2767 values[n++] = primary_counter_id(counter);
2768
2769 perf_output_copy(handle, values, n * sizeof(u64));
2770}
2771
2772/*
2773 * XXX PERF_FORMAT_GROUP vs inherited counters seems difficult.
2774 */
2775static void perf_output_read_group(struct perf_output_handle *handle,
2776 struct perf_counter *counter)
2777{
2778 struct perf_counter *leader = counter->group_leader, *sub;
2779 u64 read_format = counter->attr.read_format;
2780 u64 values[5];
2781 int n = 0;
2782
2783 values[n++] = 1 + leader->nr_siblings;
2784
2785 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
2786 values[n++] = leader->total_time_enabled;
2787
2788 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
2789 values[n++] = leader->total_time_running;
2790
2791 if (leader != counter)
2792 leader->pmu->read(leader);
2793
2794 values[n++] = atomic64_read(&leader->count);
2795 if (read_format & PERF_FORMAT_ID)
2796 values[n++] = primary_counter_id(leader);
2797
2798 perf_output_copy(handle, values, n * sizeof(u64));
2799
2800 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2801 n = 0;
2802
2803 if (sub != counter)
2804 sub->pmu->read(sub);
2805
2806 values[n++] = atomic64_read(&sub->count);
2807 if (read_format & PERF_FORMAT_ID)
2808 values[n++] = primary_counter_id(sub);
2809
2810 perf_output_copy(handle, values, n * sizeof(u64));
2811 }
2812}
2813
2814static void perf_output_read(struct perf_output_handle *handle,
2815 struct perf_counter *counter)
2816{
2817 if (counter->attr.read_format & PERF_FORMAT_GROUP)
2818 perf_output_read_group(handle, counter);
2819 else
2820 perf_output_read_one(handle, counter);
2821}
2822
2823void perf_counter_output(struct perf_counter *counter, int nmi,
2634 struct perf_sample_data *data) 2824 struct perf_sample_data *data)
2635{ 2825{
2636 int ret; 2826 int ret;
@@ -2641,12 +2831,7 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2641 struct { 2831 struct {
2642 u32 pid, tid; 2832 u32 pid, tid;
2643 } tid_entry; 2833 } tid_entry;
2644 struct {
2645 u64 id;
2646 u64 counter;
2647 } group_entry;
2648 struct perf_callchain_entry *callchain = NULL; 2834 struct perf_callchain_entry *callchain = NULL;
2649 struct perf_tracepoint_record *tp;
2650 int callchain_size = 0; 2835 int callchain_size = 0;
2651 u64 time; 2836 u64 time;
2652 struct { 2837 struct {
@@ -2700,10 +2885,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2700 if (sample_type & PERF_SAMPLE_PERIOD) 2885 if (sample_type & PERF_SAMPLE_PERIOD)
2701 header.size += sizeof(u64); 2886 header.size += sizeof(u64);
2702 2887
2703 if (sample_type & PERF_SAMPLE_GROUP) { 2888 if (sample_type & PERF_SAMPLE_READ)
2704 header.size += sizeof(u64) + 2889 header.size += perf_counter_read_size(counter);
2705 counter->nr_siblings * sizeof(group_entry);
2706 }
2707 2890
2708 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2891 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2709 callchain = perf_callchain(data->regs); 2892 callchain = perf_callchain(data->regs);
@@ -2715,9 +2898,16 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2715 header.size += sizeof(u64); 2898 header.size += sizeof(u64);
2716 } 2899 }
2717 2900
2718 if (sample_type & PERF_SAMPLE_TP_RECORD) { 2901 if (sample_type & PERF_SAMPLE_RAW) {
2719 tp = data->private; 2902 int size = sizeof(u32);
2720 header.size += tp->size; 2903
2904 if (data->raw)
2905 size += data->raw->size;
2906 else
2907 size += sizeof(u32);
2908
2909 WARN_ON_ONCE(size & (sizeof(u64)-1));
2910 header.size += size;
2721 } 2911 }
2722 2912
2723 ret = perf_output_begin(&handle, counter, header.size, nmi, 1); 2913 ret = perf_output_begin(&handle, counter, header.size, nmi, 1);
@@ -2753,26 +2943,8 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2753 if (sample_type & PERF_SAMPLE_PERIOD) 2943 if (sample_type & PERF_SAMPLE_PERIOD)
2754 perf_output_put(&handle, data->period); 2944 perf_output_put(&handle, data->period);
2755 2945
2756 /* 2946 if (sample_type & PERF_SAMPLE_READ)
2757 * XXX PERF_SAMPLE_GROUP vs inherited counters seems difficult. 2947 perf_output_read(&handle, counter);
2758 */
2759 if (sample_type & PERF_SAMPLE_GROUP) {
2760 struct perf_counter *leader, *sub;
2761 u64 nr = counter->nr_siblings;
2762
2763 perf_output_put(&handle, nr);
2764
2765 leader = counter->group_leader;
2766 list_for_each_entry(sub, &leader->sibling_list, list_entry) {
2767 if (sub != counter)
2768 sub->pmu->read(sub);
2769
2770 group_entry.id = primary_counter_id(sub);
2771 group_entry.counter = atomic64_read(&sub->count);
2772
2773 perf_output_put(&handle, group_entry);
2774 }
2775 }
2776 2948
2777 if (sample_type & PERF_SAMPLE_CALLCHAIN) { 2949 if (sample_type & PERF_SAMPLE_CALLCHAIN) {
2778 if (callchain) 2950 if (callchain)
@@ -2783,8 +2955,21 @@ static void perf_counter_output(struct perf_counter *counter, int nmi,
2783 } 2955 }
2784 } 2956 }
2785 2957
2786 if (sample_type & PERF_SAMPLE_TP_RECORD) 2958 if (sample_type & PERF_SAMPLE_RAW) {
2787 perf_output_copy(&handle, tp->record, tp->size); 2959 if (data->raw) {
2960 perf_output_put(&handle, data->raw->size);
2961 perf_output_copy(&handle, data->raw->data, data->raw->size);
2962 } else {
2963 struct {
2964 u32 size;
2965 u32 data;
2966 } raw = {
2967 .size = sizeof(u32),
2968 .data = 0,
2969 };
2970 perf_output_put(&handle, raw);
2971 }
2972 }
2788 2973
2789 perf_output_end(&handle); 2974 perf_output_end(&handle);
2790} 2975}
@@ -2798,8 +2983,6 @@ struct perf_read_event {
2798 2983
2799 u32 pid; 2984 u32 pid;
2800 u32 tid; 2985 u32 tid;
2801 u64 value;
2802 u64 format[3];
2803}; 2986};
2804 2987
2805static void 2988static void
@@ -2811,34 +2994,20 @@ perf_counter_read_event(struct perf_counter *counter,
2811 .header = { 2994 .header = {
2812 .type = PERF_EVENT_READ, 2995 .type = PERF_EVENT_READ,
2813 .misc = 0, 2996 .misc = 0,
2814 .size = sizeof(event) - sizeof(event.format), 2997 .size = sizeof(event) + perf_counter_read_size(counter),
2815 }, 2998 },
2816 .pid = perf_counter_pid(counter, task), 2999 .pid = perf_counter_pid(counter, task),
2817 .tid = perf_counter_tid(counter, task), 3000 .tid = perf_counter_tid(counter, task),
2818 .value = atomic64_read(&counter->count),
2819 }; 3001 };
2820 int ret, i = 0; 3002 int ret;
2821
2822 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
2823 event.header.size += sizeof(u64);
2824 event.format[i++] = counter->total_time_enabled;
2825 }
2826
2827 if (counter->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
2828 event.header.size += sizeof(u64);
2829 event.format[i++] = counter->total_time_running;
2830 }
2831
2832 if (counter->attr.read_format & PERF_FORMAT_ID) {
2833 event.header.size += sizeof(u64);
2834 event.format[i++] = primary_counter_id(counter);
2835 }
2836 3003
2837 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0); 3004 ret = perf_output_begin(&handle, counter, event.header.size, 0, 0);
2838 if (ret) 3005 if (ret)
2839 return; 3006 return;
2840 3007
2841 perf_output_copy(&handle, &event, event.header.size); 3008 perf_output_put(&handle, event);
3009 perf_output_read(&handle, counter);
3010
2842 perf_output_end(&handle); 3011 perf_output_end(&handle);
2843} 3012}
2844 3013
@@ -2849,7 +3018,8 @@ perf_counter_read_event(struct perf_counter *counter,
2849 */ 3018 */
2850 3019
2851struct perf_task_event { 3020struct perf_task_event {
2852 struct task_struct *task; 3021 struct task_struct *task;
3022 struct perf_counter_context *task_ctx;
2853 3023
2854 struct { 3024 struct {
2855 struct perf_event_header header; 3025 struct perf_event_header header;
@@ -2873,10 +3043,10 @@ static void perf_counter_task_output(struct perf_counter *counter,
2873 return; 3043 return;
2874 3044
2875 task_event->event.pid = perf_counter_pid(counter, task); 3045 task_event->event.pid = perf_counter_pid(counter, task);
2876 task_event->event.ppid = perf_counter_pid(counter, task->real_parent); 3046 task_event->event.ppid = perf_counter_pid(counter, current);
2877 3047
2878 task_event->event.tid = perf_counter_tid(counter, task); 3048 task_event->event.tid = perf_counter_tid(counter, task);
2879 task_event->event.ptid = perf_counter_tid(counter, task->real_parent); 3049 task_event->event.ptid = perf_counter_tid(counter, current);
2880 3050
2881 perf_output_put(&handle, task_event->event); 3051 perf_output_put(&handle, task_event->event);
2882 perf_output_end(&handle); 3052 perf_output_end(&handle);
@@ -2909,24 +3079,23 @@ static void perf_counter_task_ctx(struct perf_counter_context *ctx,
2909static void perf_counter_task_event(struct perf_task_event *task_event) 3079static void perf_counter_task_event(struct perf_task_event *task_event)
2910{ 3080{
2911 struct perf_cpu_context *cpuctx; 3081 struct perf_cpu_context *cpuctx;
2912 struct perf_counter_context *ctx; 3082 struct perf_counter_context *ctx = task_event->task_ctx;
2913 3083
2914 cpuctx = &get_cpu_var(perf_cpu_context); 3084 cpuctx = &get_cpu_var(perf_cpu_context);
2915 perf_counter_task_ctx(&cpuctx->ctx, task_event); 3085 perf_counter_task_ctx(&cpuctx->ctx, task_event);
2916 put_cpu_var(perf_cpu_context); 3086 put_cpu_var(perf_cpu_context);
2917 3087
2918 rcu_read_lock(); 3088 rcu_read_lock();
2919 /* 3089 if (!ctx)
2920 * doesn't really matter which of the child contexts the 3090 ctx = rcu_dereference(task_event->task->perf_counter_ctxp);
2921 * events ends up in.
2922 */
2923 ctx = rcu_dereference(current->perf_counter_ctxp);
2924 if (ctx) 3091 if (ctx)
2925 perf_counter_task_ctx(ctx, task_event); 3092 perf_counter_task_ctx(ctx, task_event);
2926 rcu_read_unlock(); 3093 rcu_read_unlock();
2927} 3094}
2928 3095
2929static void perf_counter_task(struct task_struct *task, int new) 3096static void perf_counter_task(struct task_struct *task,
3097 struct perf_counter_context *task_ctx,
3098 int new)
2930{ 3099{
2931 struct perf_task_event task_event; 3100 struct perf_task_event task_event;
2932 3101
@@ -2936,8 +3105,9 @@ static void perf_counter_task(struct task_struct *task, int new)
2936 return; 3105 return;
2937 3106
2938 task_event = (struct perf_task_event){ 3107 task_event = (struct perf_task_event){
2939 .task = task, 3108 .task = task,
2940 .event = { 3109 .task_ctx = task_ctx,
3110 .event = {
2941 .header = { 3111 .header = {
2942 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT, 3112 .type = new ? PERF_EVENT_FORK : PERF_EVENT_EXIT,
2943 .misc = 0, 3113 .misc = 0,
@@ -2955,7 +3125,7 @@ static void perf_counter_task(struct task_struct *task, int new)
2955 3125
2956void perf_counter_fork(struct task_struct *task) 3126void perf_counter_fork(struct task_struct *task)
2957{ 3127{
2958 perf_counter_task(task, 1); 3128 perf_counter_task(task, NULL, 1);
2959} 3129}
2960 3130
2961/* 3131/*
@@ -3344,125 +3514,111 @@ int perf_counter_overflow(struct perf_counter *counter, int nmi,
3344 * Generic software counter infrastructure 3514 * Generic software counter infrastructure
3345 */ 3515 */
3346 3516
3347static void perf_swcounter_update(struct perf_counter *counter) 3517/*
3518 * We directly increment counter->count and keep a second value in
3519 * counter->hw.period_left to count intervals. This period counter
3520 * is kept in the range [-sample_period, 0] so that we can use the
3521 * sign as trigger.
3522 */
3523
3524static u64 perf_swcounter_set_period(struct perf_counter *counter)
3348{ 3525{
3349 struct hw_perf_counter *hwc = &counter->hw; 3526 struct hw_perf_counter *hwc = &counter->hw;
3350 u64 prev, now; 3527 u64 period = hwc->last_period;
3351 s64 delta; 3528 u64 nr, offset;
3529 s64 old, val;
3530
3531 hwc->last_period = hwc->sample_period;
3352 3532
3353again: 3533again:
3354 prev = atomic64_read(&hwc->prev_count); 3534 old = val = atomic64_read(&hwc->period_left);
3355 now = atomic64_read(&hwc->count); 3535 if (val < 0)
3356 if (atomic64_cmpxchg(&hwc->prev_count, prev, now) != prev) 3536 return 0;
3357 goto again;
3358 3537
3359 delta = now - prev; 3538 nr = div64_u64(period + val, period);
3539 offset = nr * period;
3540 val -= offset;
3541 if (atomic64_cmpxchg(&hwc->period_left, old, val) != old)
3542 goto again;
3360 3543
3361 atomic64_add(delta, &counter->count); 3544 return nr;
3362 atomic64_sub(delta, &hwc->period_left);
3363} 3545}
3364 3546
3365static void perf_swcounter_set_period(struct perf_counter *counter) 3547static void perf_swcounter_overflow(struct perf_counter *counter,
3548 int nmi, struct perf_sample_data *data)
3366{ 3549{
3367 struct hw_perf_counter *hwc = &counter->hw; 3550 struct hw_perf_counter *hwc = &counter->hw;
3368 s64 left = atomic64_read(&hwc->period_left); 3551 u64 overflow;
3369 s64 period = hwc->sample_period;
3370 3552
3371 if (unlikely(left <= -period)) { 3553 data->period = counter->hw.last_period;
3372 left = period; 3554 overflow = perf_swcounter_set_period(counter);
3373 atomic64_set(&hwc->period_left, left);
3374 hwc->last_period = period;
3375 }
3376 3555
3377 if (unlikely(left <= 0)) { 3556 if (hwc->interrupts == MAX_INTERRUPTS)
3378 left += period; 3557 return;
3379 atomic64_add(period, &hwc->period_left);
3380 hwc->last_period = period;
3381 }
3382 3558
3383 atomic64_set(&hwc->prev_count, -left); 3559 for (; overflow; overflow--) {
3384 atomic64_set(&hwc->count, -left); 3560 if (perf_counter_overflow(counter, nmi, data)) {
3561 /*
3562 * We inhibit the overflow from happening when
3563 * hwc->interrupts == MAX_INTERRUPTS.
3564 */
3565 break;
3566 }
3567 }
3385} 3568}
3386 3569
3387static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer) 3570static void perf_swcounter_unthrottle(struct perf_counter *counter)
3388{ 3571{
3389 enum hrtimer_restart ret = HRTIMER_RESTART;
3390 struct perf_sample_data data;
3391 struct perf_counter *counter;
3392 u64 period;
3393
3394 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3395 counter->pmu->read(counter);
3396
3397 data.addr = 0;
3398 data.regs = get_irq_regs();
3399 /* 3572 /*
3400 * In case we exclude kernel IPs or are somehow not in interrupt 3573 * Nothing to do, we already reset hwc->interrupts.
3401 * context, provide the next best thing, the user IP.
3402 */ 3574 */
3403 if ((counter->attr.exclude_kernel || !data.regs) && 3575}
3404 !counter->attr.exclude_user)
3405 data.regs = task_pt_regs(current);
3406 3576
3407 if (data.regs) { 3577static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3408 if (perf_counter_overflow(counter, 0, &data)) 3578 int nmi, struct perf_sample_data *data)
3409 ret = HRTIMER_NORESTART; 3579{
3410 } 3580 struct hw_perf_counter *hwc = &counter->hw;
3411 3581
3412 period = max_t(u64, 10000, counter->hw.sample_period); 3582 atomic64_add(nr, &counter->count);
3413 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3414 3583
3415 return ret; 3584 if (!hwc->sample_period)
3416} 3585 return;
3417 3586
3418static void perf_swcounter_overflow(struct perf_counter *counter, 3587 if (!data->regs)
3419 int nmi, struct perf_sample_data *data) 3588 return;
3420{
3421 data->period = counter->hw.last_period;
3422 3589
3423 perf_swcounter_update(counter); 3590 if (!atomic64_add_negative(nr, &hwc->period_left))
3424 perf_swcounter_set_period(counter); 3591 perf_swcounter_overflow(counter, nmi, data);
3425 if (perf_counter_overflow(counter, nmi, data))
3426 /* soft-disable the counter */
3427 ;
3428} 3592}
3429 3593
3430static int perf_swcounter_is_counting(struct perf_counter *counter) 3594static int perf_swcounter_is_counting(struct perf_counter *counter)
3431{ 3595{
3432 struct perf_counter_context *ctx; 3596 /*
3433 unsigned long flags; 3597 * The counter is active, we're good!
3434 int count; 3598 */
3435
3436 if (counter->state == PERF_COUNTER_STATE_ACTIVE) 3599 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
3437 return 1; 3600 return 1;
3438 3601
3602 /*
3603 * The counter is off/error, not counting.
3604 */
3439 if (counter->state != PERF_COUNTER_STATE_INACTIVE) 3605 if (counter->state != PERF_COUNTER_STATE_INACTIVE)
3440 return 0; 3606 return 0;
3441 3607
3442 /* 3608 /*
3443 * If the counter is inactive, it could be just because 3609 * The counter is inactive, if the context is active
3444 * its task is scheduled out, or because it's in a group 3610 * we're part of a group that didn't make it on the 'pmu',
3445 * which could not go on the PMU. We want to count in 3611 * not counting.
3446 * the first case but not the second. If the context is
3447 * currently active then an inactive software counter must
3448 * be the second case. If it's not currently active then
3449 * we need to know whether the counter was active when the
3450 * context was last active, which we can determine by
3451 * comparing counter->tstamp_stopped with ctx->time.
3452 *
3453 * We are within an RCU read-side critical section,
3454 * which protects the existence of *ctx.
3455 */ 3612 */
3456 ctx = counter->ctx; 3613 if (counter->ctx->is_active)
3457 spin_lock_irqsave(&ctx->lock, flags); 3614 return 0;
3458 count = 1; 3615
3459 /* Re-check state now we have the lock */ 3616 /*
3460 if (counter->state < PERF_COUNTER_STATE_INACTIVE || 3617 * We're inactive and the context is too, this means the
3461 counter->ctx->is_active || 3618 * task is scheduled out, we're counting events that happen
3462 counter->tstamp_stopped < ctx->time) 3619 * to us, like migration events.
3463 count = 0; 3620 */
3464 spin_unlock_irqrestore(&ctx->lock, flags); 3621 return 1;
3465 return count;
3466} 3622}
3467 3623
3468static int perf_swcounter_match(struct perf_counter *counter, 3624static int perf_swcounter_match(struct perf_counter *counter,
@@ -3488,15 +3644,6 @@ static int perf_swcounter_match(struct perf_counter *counter,
3488 return 1; 3644 return 1;
3489} 3645}
3490 3646
3491static void perf_swcounter_add(struct perf_counter *counter, u64 nr,
3492 int nmi, struct perf_sample_data *data)
3493{
3494 int neg = atomic64_add_negative(nr, &counter->hw.count);
3495
3496 if (counter->hw.sample_period && !neg && data->regs)
3497 perf_swcounter_overflow(counter, nmi, data);
3498}
3499
3500static void perf_swcounter_ctx_event(struct perf_counter_context *ctx, 3647static void perf_swcounter_ctx_event(struct perf_counter_context *ctx,
3501 enum perf_type_id type, 3648 enum perf_type_id type,
3502 u32 event, u64 nr, int nmi, 3649 u32 event, u64 nr, int nmi,
@@ -3575,27 +3722,66 @@ void __perf_swcounter_event(u32 event, u64 nr, int nmi,
3575 3722
3576static void perf_swcounter_read(struct perf_counter *counter) 3723static void perf_swcounter_read(struct perf_counter *counter)
3577{ 3724{
3578 perf_swcounter_update(counter);
3579} 3725}
3580 3726
3581static int perf_swcounter_enable(struct perf_counter *counter) 3727static int perf_swcounter_enable(struct perf_counter *counter)
3582{ 3728{
3583 perf_swcounter_set_period(counter); 3729 struct hw_perf_counter *hwc = &counter->hw;
3730
3731 if (hwc->sample_period) {
3732 hwc->last_period = hwc->sample_period;
3733 perf_swcounter_set_period(counter);
3734 }
3584 return 0; 3735 return 0;
3585} 3736}
3586 3737
3587static void perf_swcounter_disable(struct perf_counter *counter) 3738static void perf_swcounter_disable(struct perf_counter *counter)
3588{ 3739{
3589 perf_swcounter_update(counter);
3590} 3740}
3591 3741
3592static const struct pmu perf_ops_generic = { 3742static const struct pmu perf_ops_generic = {
3593 .enable = perf_swcounter_enable, 3743 .enable = perf_swcounter_enable,
3594 .disable = perf_swcounter_disable, 3744 .disable = perf_swcounter_disable,
3595 .read = perf_swcounter_read, 3745 .read = perf_swcounter_read,
3746 .unthrottle = perf_swcounter_unthrottle,
3596}; 3747};
3597 3748
3598/* 3749/*
3750 * hrtimer based swcounter callback
3751 */
3752
3753static enum hrtimer_restart perf_swcounter_hrtimer(struct hrtimer *hrtimer)
3754{
3755 enum hrtimer_restart ret = HRTIMER_RESTART;
3756 struct perf_sample_data data;
3757 struct perf_counter *counter;
3758 u64 period;
3759
3760 counter = container_of(hrtimer, struct perf_counter, hw.hrtimer);
3761 counter->pmu->read(counter);
3762
3763 data.addr = 0;
3764 data.regs = get_irq_regs();
3765 /*
3766 * In case we exclude kernel IPs or are somehow not in interrupt
3767 * context, provide the next best thing, the user IP.
3768 */
3769 if ((counter->attr.exclude_kernel || !data.regs) &&
3770 !counter->attr.exclude_user)
3771 data.regs = task_pt_regs(current);
3772
3773 if (data.regs) {
3774 if (perf_counter_overflow(counter, 0, &data))
3775 ret = HRTIMER_NORESTART;
3776 }
3777
3778 period = max_t(u64, 10000, counter->hw.sample_period);
3779 hrtimer_forward_now(hrtimer, ns_to_ktime(period));
3780
3781 return ret;
3782}
3783
3784/*
3599 * Software counter: cpu wall time clock 3785 * Software counter: cpu wall time clock
3600 */ 3786 */
3601 3787
@@ -3715,15 +3901,15 @@ static const struct pmu perf_ops_task_clock = {
3715void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record, 3901void perf_tpcounter_event(int event_id, u64 addr, u64 count, void *record,
3716 int entry_size) 3902 int entry_size)
3717{ 3903{
3718 struct perf_tracepoint_record tp = { 3904 struct perf_raw_record raw = {
3719 .size = entry_size, 3905 .size = entry_size,
3720 .record = record, 3906 .data = record,
3721 }; 3907 };
3722 3908
3723 struct perf_sample_data data = { 3909 struct perf_sample_data data = {
3724 .regs = get_irq_regs(), 3910 .regs = get_irq_regs(),
3725 .addr = addr, 3911 .addr = addr,
3726 .private = &tp, 3912 .raw = &raw,
3727 }; 3913 };
3728 3914
3729 if (!data.regs) 3915 if (!data.regs)
@@ -3743,6 +3929,14 @@ static void tp_perf_counter_destroy(struct perf_counter *counter)
3743 3929
3744static const struct pmu *tp_perf_counter_init(struct perf_counter *counter) 3930static const struct pmu *tp_perf_counter_init(struct perf_counter *counter)
3745{ 3931{
3932 /*
3933 * Raw tracepoint data is a severe data leak, only allow root to
3934 * have these.
3935 */
3936 if ((counter->attr.sample_type & PERF_SAMPLE_RAW) &&
3937 !capable(CAP_SYS_ADMIN))
3938 return ERR_PTR(-EPERM);
3939
3746 if (ftrace_profile_enable(counter->attr.config)) 3940 if (ftrace_profile_enable(counter->attr.config))
3747 return NULL; 3941 return NULL;
3748 3942
@@ -3876,9 +4070,9 @@ perf_counter_alloc(struct perf_counter_attr *attr,
3876 atomic64_set(&hwc->period_left, hwc->sample_period); 4070 atomic64_set(&hwc->period_left, hwc->sample_period);
3877 4071
3878 /* 4072 /*
3879 * we currently do not support PERF_SAMPLE_GROUP on inherited counters 4073 * we currently do not support PERF_FORMAT_GROUP on inherited counters
3880 */ 4074 */
3881 if (attr->inherit && (attr->sample_type & PERF_SAMPLE_GROUP)) 4075 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
3882 goto done; 4076 goto done;
3883 4077
3884 switch (attr->type) { 4078 switch (attr->type) {
@@ -4285,7 +4479,7 @@ void perf_counter_exit_task(struct task_struct *child)
4285 unsigned long flags; 4479 unsigned long flags;
4286 4480
4287 if (likely(!child->perf_counter_ctxp)) { 4481 if (likely(!child->perf_counter_ctxp)) {
4288 perf_counter_task(child, 0); 4482 perf_counter_task(child, NULL, 0);
4289 return; 4483 return;
4290 } 4484 }
4291 4485
@@ -4305,6 +4499,7 @@ void perf_counter_exit_task(struct task_struct *child)
4305 * incremented the context's refcount before we do put_ctx below. 4499 * incremented the context's refcount before we do put_ctx below.
4306 */ 4500 */
4307 spin_lock(&child_ctx->lock); 4501 spin_lock(&child_ctx->lock);
4502 child->perf_counter_ctxp = NULL;
4308 /* 4503 /*
4309 * If this context is a clone; unclone it so it can't get 4504 * If this context is a clone; unclone it so it can't get
4310 * swapped to another process while we're removing all 4505 * swapped to another process while we're removing all
@@ -4318,9 +4513,7 @@ void perf_counter_exit_task(struct task_struct *child)
4318 * won't get any samples after PERF_EVENT_EXIT. We can however still 4513 * won't get any samples after PERF_EVENT_EXIT. We can however still
4319 * get a few PERF_EVENT_READ events. 4514 * get a few PERF_EVENT_READ events.
4320 */ 4515 */
4321 perf_counter_task(child, 0); 4516 perf_counter_task(child, child_ctx, 0);
4322
4323 child->perf_counter_ctxp = NULL;
4324 4517
4325 /* 4518 /*
4326 * We can recurse on the same lock type through: 4519 * We can recurse on the same lock type through:
@@ -4541,6 +4734,11 @@ perf_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
4541 perf_counter_init_cpu(cpu); 4734 perf_counter_init_cpu(cpu);
4542 break; 4735 break;
4543 4736
4737 case CPU_ONLINE:
4738 case CPU_ONLINE_FROZEN:
4739 hw_perf_counter_setup_online(cpu);
4740 break;
4741
4544 case CPU_DOWN_PREPARE: 4742 case CPU_DOWN_PREPARE:
4545 case CPU_DOWN_PREPARE_FROZEN: 4743 case CPU_DOWN_PREPARE_FROZEN:
4546 perf_counter_exit_cpu(cpu); 4744 perf_counter_exit_cpu(cpu);
@@ -4565,6 +4763,8 @@ void __init perf_counter_init(void)
4565{ 4763{
4566 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE, 4764 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_UP_PREPARE,
4567 (void *)(long)smp_processor_id()); 4765 (void *)(long)smp_processor_id());
4766 perf_cpu_notify(&perf_cpu_nb, (unsigned long)CPU_ONLINE,
4767 (void *)(long)smp_processor_id());
4568 register_cpu_notifier(&perf_cpu_nb); 4768 register_cpu_notifier(&perf_cpu_nb);
4569} 4769}
4570 4770
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 98e02328c67d..58be76017fd0 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -49,6 +49,7 @@
49#include <linux/acpi.h> 49#include <linux/acpi.h>
50#include <linux/reboot.h> 50#include <linux/reboot.h>
51#include <linux/ftrace.h> 51#include <linux/ftrace.h>
52#include <linux/security.h>
52#include <linux/slow-work.h> 53#include <linux/slow-work.h>
53#include <linux/perf_counter.h> 54#include <linux/perf_counter.h>
54 55
@@ -1306,10 +1307,10 @@ static struct ctl_table vm_table[] = {
1306 { 1307 {
1307 .ctl_name = CTL_UNNUMBERED, 1308 .ctl_name = CTL_UNNUMBERED,
1308 .procname = "mmap_min_addr", 1309 .procname = "mmap_min_addr",
1309 .data = &mmap_min_addr, 1310 .data = &dac_mmap_min_addr,
1310 .maxlen = sizeof(unsigned long), 1311 .maxlen = sizeof(unsigned long),
1311 .mode = 0644, 1312 .mode = 0644,
1312 .proc_handler = &proc_doulongvec_minmax, 1313 .proc_handler = &mmap_min_addr_handler,
1313 }, 1314 },
1314#ifdef CONFIG_NUMA 1315#ifdef CONFIG_NUMA
1315 { 1316 {
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index a6dcd67b041d..620b58abdc32 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -137,11 +137,12 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
137 */ 137 */
138int clockevents_register_notifier(struct notifier_block *nb) 138int clockevents_register_notifier(struct notifier_block *nb)
139{ 139{
140 unsigned long flags;
140 int ret; 141 int ret;
141 142
142 spin_lock(&clockevents_lock); 143 spin_lock_irqsave(&clockevents_lock, flags);
143 ret = raw_notifier_chain_register(&clockevents_chain, nb); 144 ret = raw_notifier_chain_register(&clockevents_chain, nb);
144 spin_unlock(&clockevents_lock); 145 spin_unlock_irqrestore(&clockevents_lock, flags);
145 146
146 return ret; 147 return ret;
147} 148}
@@ -178,16 +179,18 @@ static void clockevents_notify_released(void)
178 */ 179 */
179void clockevents_register_device(struct clock_event_device *dev) 180void clockevents_register_device(struct clock_event_device *dev)
180{ 181{
182 unsigned long flags;
183
181 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED); 184 BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
182 BUG_ON(!dev->cpumask); 185 BUG_ON(!dev->cpumask);
183 186
184 spin_lock(&clockevents_lock); 187 spin_lock_irqsave(&clockevents_lock, flags);
185 188
186 list_add(&dev->list, &clockevent_devices); 189 list_add(&dev->list, &clockevent_devices);
187 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev); 190 clockevents_do_notify(CLOCK_EVT_NOTIFY_ADD, dev);
188 clockevents_notify_released(); 191 clockevents_notify_released();
189 192
190 spin_unlock(&clockevents_lock); 193 spin_unlock_irqrestore(&clockevents_lock, flags);
191} 194}
192EXPORT_SYMBOL_GPL(clockevents_register_device); 195EXPORT_SYMBOL_GPL(clockevents_register_device);
193 196
@@ -235,8 +238,9 @@ void clockevents_exchange_device(struct clock_event_device *old,
235void clockevents_notify(unsigned long reason, void *arg) 238void clockevents_notify(unsigned long reason, void *arg)
236{ 239{
237 struct list_head *node, *tmp; 240 struct list_head *node, *tmp;
241 unsigned long flags;
238 242
239 spin_lock(&clockevents_lock); 243 spin_lock_irqsave(&clockevents_lock, flags);
240 clockevents_do_notify(reason, arg); 244 clockevents_do_notify(reason, arg);
241 245
242 switch (reason) { 246 switch (reason) {
@@ -251,7 +255,7 @@ void clockevents_notify(unsigned long reason, void *arg)
251 default: 255 default:
252 break; 256 break;
253 } 257 }
254 spin_unlock(&clockevents_lock); 258 spin_unlock_irqrestore(&clockevents_lock, flags);
255} 259}
256EXPORT_SYMBOL_GPL(clockevents_notify); 260EXPORT_SYMBOL_GPL(clockevents_notify);
257#endif 261#endif
diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
index 877dbedc3118..c2ec25087a35 100644
--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -205,11 +205,11 @@ static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
205 * Powerstate information: The system enters/leaves a state, where 205 * Powerstate information: The system enters/leaves a state, where
206 * affected devices might stop 206 * affected devices might stop
207 */ 207 */
208static void tick_do_broadcast_on_off(void *why) 208static void tick_do_broadcast_on_off(unsigned long *reason)
209{ 209{
210 struct clock_event_device *bc, *dev; 210 struct clock_event_device *bc, *dev;
211 struct tick_device *td; 211 struct tick_device *td;
212 unsigned long flags, *reason = why; 212 unsigned long flags;
213 int cpu, bc_stopped; 213 int cpu, bc_stopped;
214 214
215 spin_lock_irqsave(&tick_broadcast_lock, flags); 215 spin_lock_irqsave(&tick_broadcast_lock, flags);
@@ -276,8 +276,7 @@ void tick_broadcast_on_off(unsigned long reason, int *oncpu)
276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for " 276 printk(KERN_ERR "tick-broadcast: ignoring broadcast for "
277 "offline CPU #%d\n", *oncpu); 277 "offline CPU #%d\n", *oncpu);
278 else 278 else
279 smp_call_function_single(*oncpu, tick_do_broadcast_on_off, 279 tick_do_broadcast_on_off(&reason);
280 &reason, 1);
281} 280}
282 281
283/* 282/*
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index a999b92a1277..fddd69d16e03 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -286,7 +286,7 @@ static int __init init_timer_list_procfs(void)
286{ 286{
287 struct proc_dir_entry *pe; 287 struct proc_dir_entry *pe;
288 288
289 pe = proc_create("timer_list", 0644, NULL, &timer_list_fops); 289 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
290 if (!pe) 290 if (!pe)
291 return -ENOMEM; 291 return -ENOMEM;
292 return 0; 292 return 0;
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 1090b0aed9ba..7a34cb563fec 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -267,8 +267,8 @@ static void blk_trace_free(struct blk_trace *bt)
267{ 267{
268 debugfs_remove(bt->msg_file); 268 debugfs_remove(bt->msg_file);
269 debugfs_remove(bt->dropped_file); 269 debugfs_remove(bt->dropped_file);
270 debugfs_remove(bt->dir);
271 relay_close(bt->rchan); 270 relay_close(bt->rchan);
271 debugfs_remove(bt->dir);
272 free_percpu(bt->sequence); 272 free_percpu(bt->sequence);
273 free_percpu(bt->msg_data); 273 free_percpu(bt->msg_data);
274 kfree(bt); 274 kfree(bt);
@@ -378,18 +378,8 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
378 378
379static int blk_remove_buf_file_callback(struct dentry *dentry) 379static int blk_remove_buf_file_callback(struct dentry *dentry)
380{ 380{
381 struct dentry *parent = dentry->d_parent;
382 debugfs_remove(dentry); 381 debugfs_remove(dentry);
383 382
384 /*
385 * this will fail for all but the last file, but that is ok. what we
386 * care about is the top level buts->name directory going away, when
387 * the last trace file is gone. Then we don't have to rmdir() that
388 * manually on trace stop, so it nicely solves the issue with
389 * force killing of running traces.
390 */
391
392 debugfs_remove(parent);
393 return 0; 383 return 0;
394} 384}
395 385
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 1e1d23c26308..25edd5cc5935 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -2278,7 +2278,11 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2278 read++; 2278 read++;
2279 cnt--; 2279 cnt--;
2280 2280
2281 if (!(iter->flags & ~FTRACE_ITER_CONT)) { 2281 /*
2282 * If the parser haven't finished with the last write,
2283 * continue reading the user input without skipping spaces.
2284 */
2285 if (!(iter->flags & FTRACE_ITER_CONT)) {
2282 /* skip white space */ 2286 /* skip white space */
2283 while (cnt && isspace(ch)) { 2287 while (cnt && isspace(ch)) {
2284 ret = get_user(ch, ubuf++); 2288 ret = get_user(ch, ubuf++);
@@ -2288,8 +2292,9 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2288 cnt--; 2292 cnt--;
2289 } 2293 }
2290 2294
2295 /* only spaces were written */
2291 if (isspace(ch)) { 2296 if (isspace(ch)) {
2292 file->f_pos += read; 2297 *ppos += read;
2293 ret = read; 2298 ret = read;
2294 goto out; 2299 goto out;
2295 } 2300 }
@@ -2319,12 +2324,12 @@ ftrace_regex_write(struct file *file, const char __user *ubuf,
2319 if (ret) 2324 if (ret)
2320 goto out; 2325 goto out;
2321 iter->buffer_idx = 0; 2326 iter->buffer_idx = 0;
2322 } else 2327 } else {
2323 iter->flags |= FTRACE_ITER_CONT; 2328 iter->flags |= FTRACE_ITER_CONT;
2329 iter->buffer[iter->buffer_idx++] = ch;
2330 }
2324 2331
2325 2332 *ppos += read;
2326 file->f_pos += read;
2327
2328 ret = read; 2333 ret = read;
2329 out: 2334 out:
2330 mutex_unlock(&ftrace_regex_lock); 2335 mutex_unlock(&ftrace_regex_lock);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c22b40f8f576..8c358395d338 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -3896,17 +3896,9 @@ trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
3896 if (ret < 0) 3896 if (ret < 0)
3897 return ret; 3897 return ret;
3898 3898
3899 switch (val) { 3899 if (val != 0 && val != 1)
3900 case 0:
3901 trace_flags &= ~(1 << index);
3902 break;
3903 case 1:
3904 trace_flags |= 1 << index;
3905 break;
3906
3907 default:
3908 return -EINVAL; 3900 return -EINVAL;
3909 } 3901 set_tracer_flags(1 << index, val);
3910 3902
3911 *ppos += cnt; 3903 *ppos += cnt;
3912 3904
diff --git a/kernel/wait.c b/kernel/wait.c
index ea7c3b4275cf..c4bd3d825f35 100644
--- a/kernel/wait.c
+++ b/kernel/wait.c
@@ -10,13 +10,14 @@
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/hash.h> 11#include <linux/hash.h>
12 12
13void init_waitqueue_head(wait_queue_head_t *q) 13void __init_waitqueue_head(wait_queue_head_t *q, struct lock_class_key *key)
14{ 14{
15 spin_lock_init(&q->lock); 15 spin_lock_init(&q->lock);
16 lockdep_set_class(&q->lock, key);
16 INIT_LIST_HEAD(&q->task_list); 17 INIT_LIST_HEAD(&q->task_list);
17} 18}
18 19
19EXPORT_SYMBOL(init_waitqueue_head); 20EXPORT_SYMBOL(__init_waitqueue_head);
20 21
21void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait) 22void add_wait_queue(wait_queue_head_t *q, wait_queue_t *wait)
22{ 23{