aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b5c38faa4ead..8efd33753ad3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -110,7 +110,7 @@ struct cpu_hw_events {
110 110
111 unsigned int group_flag; 111 unsigned int group_flag;
112}; 112};
113DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; 113static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
114 114
115/* An event map describes the characteristics of a performance 115/* An event map describes the characteristics of a performance
116 * counter event. In particular it gives the encoding as well as 116 * counter event. In particular it gives the encoding as well as
@@ -1153,7 +1153,7 @@ static void perf_stop_nmi_watchdog(void *unused)
1153 cpuc->pcr[i] = pcr_ops->read_pcr(i); 1153 cpuc->pcr[i] = pcr_ops->read_pcr(i);
1154} 1154}
1155 1155
1156void perf_event_grab_pmc(void) 1156static void perf_event_grab_pmc(void)
1157{ 1157{
1158 if (atomic_inc_not_zero(&active_events)) 1158 if (atomic_inc_not_zero(&active_events))
1159 return; 1159 return;
@@ -1169,7 +1169,7 @@ void perf_event_grab_pmc(void)
1169 mutex_unlock(&pmc_grab_mutex); 1169 mutex_unlock(&pmc_grab_mutex);
1170} 1170}
1171 1171
1172void perf_event_release_pmc(void) 1172static void perf_event_release_pmc(void)
1173{ 1173{
1174 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { 1174 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
1175 if (atomic_read(&nmi_active) == 0) 1175 if (atomic_read(&nmi_active) == 0)
@@ -1669,7 +1669,7 @@ static bool __init supported_pmu(void)
1669 return false; 1669 return false;
1670} 1670}
1671 1671
1672int __init init_hw_perf_events(void) 1672static int __init init_hw_perf_events(void)
1673{ 1673{
1674 pr_info("Performance events: "); 1674 pr_info("Performance events: ");
1675 1675
@@ -1742,10 +1742,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
1742 1742
1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS; 1743 ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
1744 do { 1744 do {
1745 struct sparc_stackf *usf, sf; 1745 struct sparc_stackf __user *usf;
1746 struct sparc_stackf sf;
1746 unsigned long pc; 1747 unsigned long pc;
1747 1748
1748 usf = (struct sparc_stackf *) ufp; 1749 usf = (struct sparc_stackf __user *)ufp;
1749 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1750 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1750 break; 1751 break;
1751 1752
@@ -1765,17 +1766,19 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
1765 unsigned long pc; 1766 unsigned long pc;
1766 1767
1767 if (thread32_stack_is_64bit(ufp)) { 1768 if (thread32_stack_is_64bit(ufp)) {
1768 struct sparc_stackf *usf, sf; 1769 struct sparc_stackf __user *usf;
1770 struct sparc_stackf sf;
1769 1771
1770 ufp += STACK_BIAS; 1772 ufp += STACK_BIAS;
1771 usf = (struct sparc_stackf *) ufp; 1773 usf = (struct sparc_stackf __user *)ufp;
1772 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1774 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1773 break; 1775 break;
1774 pc = sf.callers_pc & 0xffffffff; 1776 pc = sf.callers_pc & 0xffffffff;
1775 ufp = ((unsigned long) sf.fp) & 0xffffffff; 1777 ufp = ((unsigned long) sf.fp) & 0xffffffff;
1776 } else { 1778 } else {
1777 struct sparc_stackf32 *usf, sf; 1779 struct sparc_stackf32 __user *usf;
1778 usf = (struct sparc_stackf32 *) ufp; 1780 struct sparc_stackf32 sf;
1781 usf = (struct sparc_stackf32 __user *)ufp;
1779 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf))) 1782 if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
1780 break; 1783 break;
1781 pc = sf.callers_pc; 1784 pc = sf.callers_pc;