diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 39 |
1 files changed, 21 insertions, 18 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index ff8b4636f845..df319c48c52b 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -16,8 +16,9 @@ | |||
16 | #include <linux/file.h> | 16 | #include <linux/file.h> |
17 | #include <linux/poll.h> | 17 | #include <linux/poll.h> |
18 | #include <linux/sysfs.h> | 18 | #include <linux/sysfs.h> |
19 | #include <linux/ptrace.h> | 19 | #include <linux/dcache.h> |
20 | #include <linux/percpu.h> | 20 | #include <linux/percpu.h> |
21 | #include <linux/ptrace.h> | ||
21 | #include <linux/vmstat.h> | 22 | #include <linux/vmstat.h> |
22 | #include <linux/hardirq.h> | 23 | #include <linux/hardirq.h> |
23 | #include <linux/rculist.h> | 24 | #include <linux/rculist.h> |
@@ -26,7 +27,6 @@ | |||
26 | #include <linux/anon_inodes.h> | 27 | #include <linux/anon_inodes.h> |
27 | #include <linux/kernel_stat.h> | 28 | #include <linux/kernel_stat.h> |
28 | #include <linux/perf_counter.h> | 29 | #include <linux/perf_counter.h> |
29 | #include <linux/dcache.h> | ||
30 | 30 | ||
31 | #include <asm/irq_regs.h> | 31 | #include <asm/irq_regs.h> |
32 | 32 | ||
@@ -65,7 +65,9 @@ void __weak hw_perf_disable(void) { barrier(); } | |||
65 | void __weak hw_perf_enable(void) { barrier(); } | 65 | void __weak hw_perf_enable(void) { barrier(); } |
66 | 66 | ||
67 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } | 67 | void __weak hw_perf_counter_setup(int cpu) { barrier(); } |
68 | int __weak hw_perf_group_sched_in(struct perf_counter *group_leader, | 68 | |
69 | int __weak | ||
70 | hw_perf_group_sched_in(struct perf_counter *group_leader, | ||
69 | struct perf_cpu_context *cpuctx, | 71 | struct perf_cpu_context *cpuctx, |
70 | struct perf_counter_context *ctx, int cpu) | 72 | struct perf_counter_context *ctx, int cpu) |
71 | { | 73 | { |
@@ -127,8 +129,8 @@ static void put_ctx(struct perf_counter_context *ctx) | |||
127 | * This has to cope with with the fact that until it is locked, | 129 | * This has to cope with with the fact that until it is locked, |
128 | * the context could get moved to another task. | 130 | * the context could get moved to another task. |
129 | */ | 131 | */ |
130 | static struct perf_counter_context *perf_lock_task_context( | 132 | static struct perf_counter_context * |
131 | struct task_struct *task, unsigned long *flags) | 133 | perf_lock_task_context(struct task_struct *task, unsigned long *flags) |
132 | { | 134 | { |
133 | struct perf_counter_context *ctx; | 135 | struct perf_counter_context *ctx; |
134 | 136 | ||
@@ -1330,9 +1332,9 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
1330 | 1332 | ||
1331 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) | 1333 | static struct perf_counter_context *find_get_context(pid_t pid, int cpu) |
1332 | { | 1334 | { |
1333 | struct perf_cpu_context *cpuctx; | ||
1334 | struct perf_counter_context *ctx; | ||
1335 | struct perf_counter_context *parent_ctx; | 1335 | struct perf_counter_context *parent_ctx; |
1336 | struct perf_counter_context *ctx; | ||
1337 | struct perf_cpu_context *cpuctx; | ||
1336 | struct task_struct *task; | 1338 | struct task_struct *task; |
1337 | unsigned long flags; | 1339 | unsigned long flags; |
1338 | int err; | 1340 | int err; |
@@ -1664,8 +1666,8 @@ int perf_counter_task_disable(void) | |||
1664 | */ | 1666 | */ |
1665 | void perf_counter_update_userpage(struct perf_counter *counter) | 1667 | void perf_counter_update_userpage(struct perf_counter *counter) |
1666 | { | 1668 | { |
1667 | struct perf_mmap_data *data; | ||
1668 | struct perf_counter_mmap_page *userpg; | 1669 | struct perf_counter_mmap_page *userpg; |
1670 | struct perf_mmap_data *data; | ||
1669 | 1671 | ||
1670 | rcu_read_lock(); | 1672 | rcu_read_lock(); |
1671 | data = rcu_dereference(counter->data); | 1673 | data = rcu_dereference(counter->data); |
@@ -1769,10 +1771,11 @@ fail: | |||
1769 | 1771 | ||
1770 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) | 1772 | static void __perf_mmap_data_free(struct rcu_head *rcu_head) |
1771 | { | 1773 | { |
1772 | struct perf_mmap_data *data = container_of(rcu_head, | 1774 | struct perf_mmap_data *data; |
1773 | struct perf_mmap_data, rcu_head); | ||
1774 | int i; | 1775 | int i; |
1775 | 1776 | ||
1777 | data = container_of(rcu_head, struct perf_mmap_data, rcu_head); | ||
1778 | |||
1776 | free_page((unsigned long)data->user_page); | 1779 | free_page((unsigned long)data->user_page); |
1777 | for (i = 0; i < data->nr_pages; i++) | 1780 | for (i = 0; i < data->nr_pages; i++) |
1778 | free_page((unsigned long)data->data_pages[i]); | 1781 | free_page((unsigned long)data->data_pages[i]); |
@@ -1801,8 +1804,7 @@ static void perf_mmap_close(struct vm_area_struct *vma) | |||
1801 | struct perf_counter *counter = vma->vm_file->private_data; | 1804 | struct perf_counter *counter = vma->vm_file->private_data; |
1802 | 1805 | ||
1803 | WARN_ON_ONCE(counter->ctx->parent_ctx); | 1806 | WARN_ON_ONCE(counter->ctx->parent_ctx); |
1804 | if (atomic_dec_and_mutex_lock(&counter->mmap_count, | 1807 | if (atomic_dec_and_mutex_lock(&counter->mmap_count, &counter->mmap_mutex)) { |
1805 | &counter->mmap_mutex)) { | ||
1806 | struct user_struct *user = current_user(); | 1808 | struct user_struct *user = current_user(); |
1807 | 1809 | ||
1808 | atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); | 1810 | atomic_long_sub(counter->data->nr_pages + 1, &user->locked_vm); |
@@ -1821,11 +1823,11 @@ static struct vm_operations_struct perf_mmap_vmops = { | |||
1821 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) | 1823 | static int perf_mmap(struct file *file, struct vm_area_struct *vma) |
1822 | { | 1824 | { |
1823 | struct perf_counter *counter = file->private_data; | 1825 | struct perf_counter *counter = file->private_data; |
1826 | unsigned long user_locked, user_lock_limit; | ||
1824 | struct user_struct *user = current_user(); | 1827 | struct user_struct *user = current_user(); |
1828 | unsigned long locked, lock_limit; | ||
1825 | unsigned long vma_size; | 1829 | unsigned long vma_size; |
1826 | unsigned long nr_pages; | 1830 | unsigned long nr_pages; |
1827 | unsigned long user_locked, user_lock_limit; | ||
1828 | unsigned long locked, lock_limit; | ||
1829 | long user_extra, extra; | 1831 | long user_extra, extra; |
1830 | int ret = 0; | 1832 | int ret = 0; |
1831 | 1833 | ||
@@ -1900,8 +1902,8 @@ unlock: | |||
1900 | 1902 | ||
1901 | static int perf_fasync(int fd, struct file *filp, int on) | 1903 | static int perf_fasync(int fd, struct file *filp, int on) |
1902 | { | 1904 | { |
1903 | struct perf_counter *counter = filp->private_data; | ||
1904 | struct inode *inode = filp->f_path.dentry->d_inode; | 1905 | struct inode *inode = filp->f_path.dentry->d_inode; |
1906 | struct perf_counter *counter = filp->private_data; | ||
1905 | int retval; | 1907 | int retval; |
1906 | 1908 | ||
1907 | mutex_lock(&inode->i_mutex); | 1909 | mutex_lock(&inode->i_mutex); |
@@ -2412,8 +2414,8 @@ static void perf_counter_output(struct perf_counter *counter, | |||
2412 | */ | 2414 | */ |
2413 | 2415 | ||
2414 | struct perf_comm_event { | 2416 | struct perf_comm_event { |
2415 | struct task_struct *task; | 2417 | struct task_struct *task; |
2416 | char *comm; | 2418 | char *comm; |
2417 | int comm_size; | 2419 | int comm_size; |
2418 | 2420 | ||
2419 | struct { | 2421 | struct { |
@@ -2932,6 +2934,7 @@ static void perf_swcounter_add(struct perf_counter *counter, u64 nr, | |||
2932 | int nmi, struct pt_regs *regs, u64 addr) | 2934 | int nmi, struct pt_regs *regs, u64 addr) |
2933 | { | 2935 | { |
2934 | int neg = atomic64_add_negative(nr, &counter->hw.count); | 2936 | int neg = atomic64_add_negative(nr, &counter->hw.count); |
2937 | |||
2935 | if (counter->hw.irq_period && !neg) | 2938 | if (counter->hw.irq_period && !neg) |
2936 | perf_swcounter_overflow(counter, nmi, regs, addr); | 2939 | perf_swcounter_overflow(counter, nmi, regs, addr); |
2937 | } | 2940 | } |
@@ -3526,7 +3529,7 @@ inherit_counter(struct perf_counter *parent_counter, | |||
3526 | /* | 3529 | /* |
3527 | * Make the child state follow the state of the parent counter, | 3530 | * Make the child state follow the state of the parent counter, |
3528 | * not its hw_event.disabled bit. We hold the parent's mutex, | 3531 | * not its hw_event.disabled bit. We hold the parent's mutex, |
3529 | * so we won't race with perf_counter_{en,dis}able_family. | 3532 | * so we won't race with perf_counter_{en, dis}able_family. |
3530 | */ | 3533 | */ |
3531 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) | 3534 | if (parent_counter->state >= PERF_COUNTER_STATE_INACTIVE) |
3532 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; | 3535 | child_counter->state = PERF_COUNTER_STATE_INACTIVE; |