aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c82
-rw-r--r--arch/mips/kernel/irq.c24
-rw-r--r--arch/mips/kernel/perf_event.c601
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c1052
-rw-r--r--arch/mips/kernel/setup.c1
-rw-r--r--arch/mips/kernel/traps.c31
-rw-r--r--arch/mips/kernel/unaligned.c7
8 files changed, 1762 insertions, 38 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 80884983270d..22b2e0e38617 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -104,4 +104,6 @@ obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o
104 104
105obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/ 105obj-$(CONFIG_MIPS_CPUFREQ) += cpufreq/
106 106
107obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
108
107CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) 109CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b1b304ea2128..71620e19827a 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -25,6 +25,8 @@
25#include <asm/system.h> 25#include <asm/system.h>
26#include <asm/watch.h> 26#include <asm/watch.h>
27#include <asm/spram.h> 27#include <asm/spram.h>
28#include <asm/uaccess.h>
29
28/* 30/*
29 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, 31 * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
30 * the implementation of the "wait" feature differs between CPU families. This 32 * the implementation of the "wait" feature differs between CPU families. This
@@ -181,12 +183,13 @@ void __init check_wait(void)
181 case CPU_5KC: 183 case CPU_5KC:
182 case CPU_25KF: 184 case CPU_25KF:
183 case CPU_PR4450: 185 case CPU_PR4450:
184 case CPU_BCM3302: 186 case CPU_BMIPS3300:
185 case CPU_BCM6338: 187 case CPU_BMIPS4350:
186 case CPU_BCM6348: 188 case CPU_BMIPS4380:
187 case CPU_BCM6358: 189 case CPU_BMIPS5000:
188 case CPU_CAVIUM_OCTEON: 190 case CPU_CAVIUM_OCTEON:
189 case CPU_CAVIUM_OCTEON_PLUS: 191 case CPU_CAVIUM_OCTEON_PLUS:
192 case CPU_CAVIUM_OCTEON2:
190 case CPU_JZRISC: 193 case CPU_JZRISC:
191 cpu_wait = r4k_wait; 194 cpu_wait = r4k_wait;
192 break; 195 break;
@@ -902,33 +905,37 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
902{ 905{
903 decode_configs(c); 906 decode_configs(c);
904 switch (c->processor_id & 0xff00) { 907 switch (c->processor_id & 0xff00) {
905 case PRID_IMP_BCM3302: 908 case PRID_IMP_BMIPS32:
906 /* same as PRID_IMP_BCM6338 */ 909 c->cputype = CPU_BMIPS32;
907 c->cputype = CPU_BCM3302; 910 __cpu_name[cpu] = "Broadcom BMIPS32";
908 __cpu_name[cpu] = "Broadcom BCM3302"; 911 break;
909 break; 912 case PRID_IMP_BMIPS3300:
910 case PRID_IMP_BCM4710: 913 case PRID_IMP_BMIPS3300_ALT:
911 c->cputype = CPU_BCM4710; 914 case PRID_IMP_BMIPS3300_BUG:
912 __cpu_name[cpu] = "Broadcom BCM4710"; 915 c->cputype = CPU_BMIPS3300;
913 break; 916 __cpu_name[cpu] = "Broadcom BMIPS3300";
914 case PRID_IMP_BCM6345: 917 break;
915 c->cputype = CPU_BCM6345; 918 case PRID_IMP_BMIPS43XX: {
916 __cpu_name[cpu] = "Broadcom BCM6345"; 919 int rev = c->processor_id & 0xff;
920
921 if (rev >= PRID_REV_BMIPS4380_LO &&
922 rev <= PRID_REV_BMIPS4380_HI) {
923 c->cputype = CPU_BMIPS4380;
924 __cpu_name[cpu] = "Broadcom BMIPS4380";
925 } else {
926 c->cputype = CPU_BMIPS4350;
927 __cpu_name[cpu] = "Broadcom BMIPS4350";
928 }
917 break; 929 break;
918 case PRID_IMP_BCM6348: 930 }
919 c->cputype = CPU_BCM6348; 931 case PRID_IMP_BMIPS5000:
920 __cpu_name[cpu] = "Broadcom BCM6348"; 932 c->cputype = CPU_BMIPS5000;
933 __cpu_name[cpu] = "Broadcom BMIPS5000";
934 c->options |= MIPS_CPU_ULRI;
921 break; 935 break;
922 case PRID_IMP_BCM4350: 936 case PRID_IMP_BMIPS4KC:
923 switch (c->processor_id & 0xf0) { 937 c->cputype = CPU_4KC;
924 case PRID_REV_BCM6358: 938 __cpu_name[cpu] = "MIPS 4Kc";
925 c->cputype = CPU_BCM6358;
926 __cpu_name[cpu] = "Broadcom BCM6358";
927 break;
928 default:
929 c->cputype = CPU_UNKNOWN;
930 break;
931 }
932 break; 939 break;
933 } 940 }
934} 941}
@@ -953,6 +960,12 @@ platform:
953 if (cpu == 0) 960 if (cpu == 0)
954 __elf_platform = "octeon"; 961 __elf_platform = "octeon";
955 break; 962 break;
963 case PRID_IMP_CAVIUM_CN63XX:
964 c->cputype = CPU_CAVIUM_OCTEON2;
965 __cpu_name[cpu] = "Cavium Octeon II";
966 if (cpu == 0)
967 __elf_platform = "octeon2";
968 break;
956 default: 969 default:
957 printk(KERN_INFO "Unknown Octeon chip!\n"); 970 printk(KERN_INFO "Unknown Octeon chip!\n");
958 c->cputype = CPU_UNKNOWN; 971 c->cputype = CPU_UNKNOWN;
@@ -976,6 +989,12 @@ static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
976 } 989 }
977} 990}
978 991
992#ifdef CONFIG_64BIT
993/* For use by uaccess.h */
994u64 __ua_limit;
995EXPORT_SYMBOL(__ua_limit);
996#endif
997
979const char *__cpu_name[NR_CPUS]; 998const char *__cpu_name[NR_CPUS];
980const char *__elf_platform; 999const char *__elf_platform;
981 1000
@@ -1053,6 +1072,11 @@ __cpuinit void cpu_probe(void)
1053 c->srsets = 1; 1072 c->srsets = 1;
1054 1073
1055 cpu_probe_vmbits(c); 1074 cpu_probe_vmbits(c);
1075
1076#ifdef CONFIG_64BIT
1077 if (cpu == 0)
1078 __ua_limit = ~((1ull << cpu_vmbits) - 1);
1079#endif
1056} 1080}
1057 1081
1058__cpuinit void cpu_report(void) 1082__cpuinit void cpu_report(void)
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index c6345f579a8a..4f93db58a79e 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -151,6 +151,29 @@ void __init init_IRQ(void)
151#endif 151#endif
152} 152}
153 153
154#ifdef DEBUG_STACKOVERFLOW
155static inline void check_stack_overflow(void)
156{
157 unsigned long sp;
158
159 __asm__ __volatile__("move %0, $sp" : "=r" (sp));
160 sp &= THREAD_MASK;
161
162 /*
163 * Check for stack overflow: is there less than STACK_WARN free?
164 * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
165 */
166 if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
167 printk("do_IRQ: stack overflow: %ld\n",
168 sp - sizeof(struct thread_info));
169 dump_stack();
170 }
171}
172#else
173static inline void check_stack_overflow(void) {}
174#endif
175
176
154/* 177/*
155 * do_IRQ handles all normal device IRQ's (the special 178 * do_IRQ handles all normal device IRQ's (the special
156 * SMP cross-CPU interrupts have their own specific 179 * SMP cross-CPU interrupts have their own specific
@@ -159,6 +182,7 @@ void __init init_IRQ(void)
159void __irq_entry do_IRQ(unsigned int irq) 182void __irq_entry do_IRQ(unsigned int irq)
160{ 183{
161 irq_enter(); 184 irq_enter();
185 check_stack_overflow();
162 __DO_IRQ_SMTC_HOOK(irq); 186 __DO_IRQ_SMTC_HOOK(irq);
163 generic_handle_irq(irq); 187 generic_handle_irq(irq);
164 irq_exit(); 188 irq_exit();
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000000..2b7f3f703b83
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,601 @@
1/*
2 * Linux performance counter support for MIPS.
3 *
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
6 *
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/cpumask.h>
18#include <linux/interrupt.h>
19#include <linux/smp.h>
20#include <linux/kernel.h>
21#include <linux/perf_event.h>
22#include <linux/uaccess.h>
23
24#include <asm/irq.h>
25#include <asm/irq_regs.h>
26#include <asm/stacktrace.h>
27#include <asm/time.h> /* For perf_irq */
28
29/* These are for 32bit counters. For 64bit ones, define them accordingly. */
30#define MAX_PERIOD ((1ULL << 32) - 1)
31#define VALID_COUNT 0x7fffffff
32#define TOTAL_BITS 32
33#define HIGHEST_BIT 31
34
35#define MIPS_MAX_HWEVENTS 4
36
37struct cpu_hw_events {
38 /* Array of events on this cpu. */
39 struct perf_event *events[MIPS_MAX_HWEVENTS];
40
41 /*
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
44 */
45 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
46
47 /*
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
53 */
54 unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
55
56 /*
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
60 */
61 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
62};
63DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
64 .saved_ctrl = {0},
65};
66
67/* The description of MIPS performance events. */
68struct mips_perf_event {
69 unsigned int event_id;
70 /*
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
73 * even numbers.
74 */
75 unsigned int cntr_mask;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78#ifdef CONFIG_MIPS_MT_SMP
79 enum {
80 T = 0,
81 V = 1,
82 P = 2,
83 } range;
84#else
85 #define T
86 #define V
87 #define P
88#endif
89};
90
91static struct mips_perf_event raw_event;
92static DEFINE_MUTEX(raw_event_mutex);
93
94#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95#define C(x) PERF_COUNT_HW_CACHE_##x
96
97struct mips_pmu {
98 const char *name;
99 int irq;
100 irqreturn_t (*handle_irq)(int irq, void *dev);
101 int (*handle_shared_irq)(void);
102 void (*start)(void);
103 void (*stop)(void);
104 int (*alloc_counter)(struct cpu_hw_events *cpuc,
105 struct hw_perf_event *hwc);
106 u64 (*read_counter)(unsigned int idx);
107 void (*write_counter)(unsigned int idx, u64 val);
108 void (*enable_event)(struct hw_perf_event *evt, int idx);
109 void (*disable_event)(int idx);
110 const struct mips_perf_event *(*map_raw_event)(u64 config);
111 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
112 const struct mips_perf_event (*cache_event_map)
113 [PERF_COUNT_HW_CACHE_MAX]
114 [PERF_COUNT_HW_CACHE_OP_MAX]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX];
116 unsigned int num_counters;
117};
118
119static const struct mips_pmu *mipspmu;
120
121static int
122mipspmu_event_set_period(struct perf_event *event,
123 struct hw_perf_event *hwc,
124 int idx)
125{
126 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
127 s64 left = local64_read(&hwc->period_left);
128 s64 period = hwc->sample_period;
129 int ret = 0;
130 u64 uleft;
131 unsigned long flags;
132
133 if (unlikely(left <= -period)) {
134 left = period;
135 local64_set(&hwc->period_left, left);
136 hwc->last_period = period;
137 ret = 1;
138 }
139
140 if (unlikely(left <= 0)) {
141 left += period;
142 local64_set(&hwc->period_left, left);
143 hwc->last_period = period;
144 ret = 1;
145 }
146
147 if (left > (s64)MAX_PERIOD)
148 left = MAX_PERIOD;
149
150 local64_set(&hwc->prev_count, (u64)-left);
151
152 local_irq_save(flags);
153 uleft = (u64)(-left) & MAX_PERIOD;
154 uleft > VALID_COUNT ?
155 set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
156 mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
157 local_irq_restore(flags);
158
159 perf_event_update_userpage(event);
160
161 return ret;
162}
163
164static int mipspmu_enable(struct perf_event *event)
165{
166 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
167 struct hw_perf_event *hwc = &event->hw;
168 int idx;
169 int err = 0;
170
171 /* To look for a free counter for this event. */
172 idx = mipspmu->alloc_counter(cpuc, hwc);
173 if (idx < 0) {
174 err = idx;
175 goto out;
176 }
177
178 /*
179 * If there is an event in the counter we are going to use then
180 * make sure it is disabled.
181 */
182 event->hw.idx = idx;
183 mipspmu->disable_event(idx);
184 cpuc->events[idx] = event;
185
186 /* Set the period for the event. */
187 mipspmu_event_set_period(event, hwc, idx);
188
189 /* Enable the event. */
190 mipspmu->enable_event(hwc, idx);
191
192 /* Propagate our changes to the userspace mapping. */
193 perf_event_update_userpage(event);
194
195out:
196 return err;
197}
198
199static void mipspmu_event_update(struct perf_event *event,
200 struct hw_perf_event *hwc,
201 int idx)
202{
203 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
204 unsigned long flags;
205 int shift = 64 - TOTAL_BITS;
206 s64 prev_raw_count, new_raw_count;
207 s64 delta;
208
209again:
210 prev_raw_count = local64_read(&hwc->prev_count);
211 local_irq_save(flags);
212 /* Make the counter value be a "real" one. */
213 new_raw_count = mipspmu->read_counter(idx);
214 if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
215 new_raw_count &= VALID_COUNT;
216 clear_bit(idx, cpuc->msbs);
217 } else
218 new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
219 local_irq_restore(flags);
220
221 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
222 new_raw_count) != prev_raw_count)
223 goto again;
224
225 delta = (new_raw_count << shift) - (prev_raw_count << shift);
226 delta >>= shift;
227
228 local64_add(delta, &event->count);
229 local64_sub(delta, &hwc->period_left);
230
231 return;
232}
233
234static void mipspmu_disable(struct perf_event *event)
235{
236 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
237 struct hw_perf_event *hwc = &event->hw;
238 int idx = hwc->idx;
239
240
241 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
242
243 /* We are working on a local event. */
244 mipspmu->disable_event(idx);
245
246 barrier();
247
248 mipspmu_event_update(event, hwc, idx);
249 cpuc->events[idx] = NULL;
250 clear_bit(idx, cpuc->used_mask);
251
252 perf_event_update_userpage(event);
253}
254
255static void mipspmu_unthrottle(struct perf_event *event)
256{
257 struct hw_perf_event *hwc = &event->hw;
258
259 mipspmu->enable_event(hwc, hwc->idx);
260}
261
262static void mipspmu_read(struct perf_event *event)
263{
264 struct hw_perf_event *hwc = &event->hw;
265
266 /* Don't read disabled counters! */
267 if (hwc->idx < 0)
268 return;
269
270 mipspmu_event_update(event, hwc, hwc->idx);
271}
272
273static struct pmu pmu = {
274 .enable = mipspmu_enable,
275 .disable = mipspmu_disable,
276 .unthrottle = mipspmu_unthrottle,
277 .read = mipspmu_read,
278};
279
280static atomic_t active_events = ATOMIC_INIT(0);
281static DEFINE_MUTEX(pmu_reserve_mutex);
282static int (*save_perf_irq)(void);
283
284static int mipspmu_get_irq(void)
285{
286 int err;
287
288 if (mipspmu->irq >= 0) {
289 /* Request my own irq handler. */
290 err = request_irq(mipspmu->irq, mipspmu->handle_irq,
291 IRQF_DISABLED | IRQF_NOBALANCING,
292 "mips_perf_pmu", NULL);
293 if (err) {
294 pr_warning("Unable to request IRQ%d for MIPS "
295 "performance counters!\n", mipspmu->irq);
296 }
297 } else if (cp0_perfcount_irq < 0) {
298 /*
299 * We are sharing the irq number with the timer interrupt.
300 */
301 save_perf_irq = perf_irq;
302 perf_irq = mipspmu->handle_shared_irq;
303 err = 0;
304 } else {
305 pr_warning("The platform hasn't properly defined its "
306 "interrupt controller.\n");
307 err = -ENOENT;
308 }
309
310 return err;
311}
312
313static void mipspmu_free_irq(void)
314{
315 if (mipspmu->irq >= 0)
316 free_irq(mipspmu->irq, NULL);
317 else if (cp0_perfcount_irq < 0)
318 perf_irq = save_perf_irq;
319}
320
321static inline unsigned int
322mipspmu_perf_event_encode(const struct mips_perf_event *pev)
323{
324/*
325 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
326 * event_id.
327 */
328#ifdef CONFIG_MIPS_MT_SMP
329 return ((unsigned int)pev->range << 24) |
330 (pev->cntr_mask & 0xffff00) |
331 (pev->event_id & 0xff);
332#else
333 return (pev->cntr_mask & 0xffff00) |
334 (pev->event_id & 0xff);
335#endif
336}
337
338static const struct mips_perf_event *
339mipspmu_map_general_event(int idx)
340{
341 const struct mips_perf_event *pev;
342
343 pev = ((*mipspmu->general_event_map)[idx].event_id ==
344 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
345 &(*mipspmu->general_event_map)[idx]);
346
347 return pev;
348}
349
350static const struct mips_perf_event *
351mipspmu_map_cache_event(u64 config)
352{
353 unsigned int cache_type, cache_op, cache_result;
354 const struct mips_perf_event *pev;
355
356 cache_type = (config >> 0) & 0xff;
357 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
358 return ERR_PTR(-EINVAL);
359
360 cache_op = (config >> 8) & 0xff;
361 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
362 return ERR_PTR(-EINVAL);
363
364 cache_result = (config >> 16) & 0xff;
365 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
366 return ERR_PTR(-EINVAL);
367
368 pev = &((*mipspmu->cache_event_map)
369 [cache_type]
370 [cache_op]
371 [cache_result]);
372
373 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
374 return ERR_PTR(-EOPNOTSUPP);
375
376 return pev;
377
378}
379
380static int validate_event(struct cpu_hw_events *cpuc,
381 struct perf_event *event)
382{
383 struct hw_perf_event fake_hwc = event->hw;
384
385 if (event->pmu && event->pmu != &pmu)
386 return 0;
387
388 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
389}
390
391static int validate_group(struct perf_event *event)
392{
393 struct perf_event *sibling, *leader = event->group_leader;
394 struct cpu_hw_events fake_cpuc;
395
396 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
397
398 if (!validate_event(&fake_cpuc, leader))
399 return -ENOSPC;
400
401 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
402 if (!validate_event(&fake_cpuc, sibling))
403 return -ENOSPC;
404 }
405
406 if (!validate_event(&fake_cpuc, event))
407 return -ENOSPC;
408
409 return 0;
410}
411
412/*
413 * mipsxx/rm9000/loongson2 have different performance counters, they have
414 * specific low-level init routines.
415 */
416static void reset_counters(void *arg);
417static int __hw_perf_event_init(struct perf_event *event);
418
419static void hw_perf_event_destroy(struct perf_event *event)
420{
421 if (atomic_dec_and_mutex_lock(&active_events,
422 &pmu_reserve_mutex)) {
423 /*
424 * We must not call the destroy function with interrupts
425 * disabled.
426 */
427 on_each_cpu(reset_counters,
428 (void *)(long)mipspmu->num_counters, 1);
429 mipspmu_free_irq();
430 mutex_unlock(&pmu_reserve_mutex);
431 }
432}
433
434const struct pmu *hw_perf_event_init(struct perf_event *event)
435{
436 int err = 0;
437
438 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
439 (event->cpu >= 0 && !cpu_online(event->cpu)))
440 return ERR_PTR(-ENODEV);
441
442 if (!atomic_inc_not_zero(&active_events)) {
443 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
444 atomic_dec(&active_events);
445 return ERR_PTR(-ENOSPC);
446 }
447
448 mutex_lock(&pmu_reserve_mutex);
449 if (atomic_read(&active_events) == 0)
450 err = mipspmu_get_irq();
451
452 if (!err)
453 atomic_inc(&active_events);
454 mutex_unlock(&pmu_reserve_mutex);
455 }
456
457 if (err)
458 return ERR_PTR(err);
459
460 err = __hw_perf_event_init(event);
461 if (err)
462 hw_perf_event_destroy(event);
463
464 return err ? ERR_PTR(err) : &pmu;
465}
466
467void hw_perf_enable(void)
468{
469 if (mipspmu)
470 mipspmu->start();
471}
472
473void hw_perf_disable(void)
474{
475 if (mipspmu)
476 mipspmu->stop();
477}
478
479/* This is needed by specific irq handlers in perf_event_*.c */
480static void
481handle_associated_event(struct cpu_hw_events *cpuc,
482 int idx, struct perf_sample_data *data, struct pt_regs *regs)
483{
484 struct perf_event *event = cpuc->events[idx];
485 struct hw_perf_event *hwc = &event->hw;
486
487 mipspmu_event_update(event, hwc, idx);
488 data->period = event->hw.last_period;
489 if (!mipspmu_event_set_period(event, hwc, idx))
490 return;
491
492 if (perf_event_overflow(event, 0, data, regs))
493 mipspmu->disable_event(idx);
494}
495
496#include "perf_event_mipsxx.c"
497
498/* Callchain handling code. */
499static inline void
500callchain_store(struct perf_callchain_entry *entry,
501 u64 ip)
502{
503 if (entry->nr < PERF_MAX_STACK_DEPTH)
504 entry->ip[entry->nr++] = ip;
505}
506
507/*
508 * Leave userspace callchain empty for now. When we find a way to trace
509 * the user stack callchains, we add here.
510 */
511static void
512perf_callchain_user(struct pt_regs *regs,
513 struct perf_callchain_entry *entry)
514{
515}
516
517static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
518 unsigned long reg29)
519{
520 unsigned long *sp = (unsigned long *)reg29;
521 unsigned long addr;
522
523 while (!kstack_end(sp)) {
524 addr = *sp++;
525 if (__kernel_text_address(addr)) {
526 callchain_store(entry, addr);
527 if (entry->nr >= PERF_MAX_STACK_DEPTH)
528 break;
529 }
530 }
531}
532
533static void
534perf_callchain_kernel(struct pt_regs *regs,
535 struct perf_callchain_entry *entry)
536{
537 unsigned long sp = regs->regs[29];
538#ifdef CONFIG_KALLSYMS
539 unsigned long ra = regs->regs[31];
540 unsigned long pc = regs->cp0_epc;
541
542 callchain_store(entry, PERF_CONTEXT_KERNEL);
543 if (raw_show_trace || !__kernel_text_address(pc)) {
544 unsigned long stack_page =
545 (unsigned long)task_stack_page(current);
546 if (stack_page && sp >= stack_page &&
547 sp <= stack_page + THREAD_SIZE - 32)
548 save_raw_perf_callchain(entry, sp);
549 return;
550 }
551 do {
552 callchain_store(entry, pc);
553 if (entry->nr >= PERF_MAX_STACK_DEPTH)
554 break;
555 pc = unwind_stack(current, &sp, pc, &ra);
556 } while (pc);
557#else
558 callchain_store(entry, PERF_CONTEXT_KERNEL);
559 save_raw_perf_callchain(entry, sp);
560#endif
561}
562
563static void
564perf_do_callchain(struct pt_regs *regs,
565 struct perf_callchain_entry *entry)
566{
567 int is_user;
568
569 if (!regs)
570 return;
571
572 is_user = user_mode(regs);
573
574 if (!current || !current->pid)
575 return;
576
577 if (is_user && current->state != TASK_RUNNING)
578 return;
579
580 if (!is_user) {
581 perf_callchain_kernel(regs, entry);
582 if (current->mm)
583 regs = task_pt_regs(current);
584 else
585 regs = NULL;
586 }
587 if (regs)
588 perf_callchain_user(regs, entry);
589}
590
591static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
592
593struct perf_callchain_entry *
594perf_callchain(struct pt_regs *regs)
595{
596 struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry);
597
598 entry->nr = 0;
599 perf_do_callchain(regs, entry);
600 return entry;
601}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000000..5c7c6fc07565
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,1052 @@
1#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) || \
2 defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_SB1)
3
4#define M_CONFIG1_PC (1 << 4)
5
6#define M_PERFCTL_EXL (1UL << 0)
7#define M_PERFCTL_KERNEL (1UL << 1)
8#define M_PERFCTL_SUPERVISOR (1UL << 2)
9#define M_PERFCTL_USER (1UL << 3)
10#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
11#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
12#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
13#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
14#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
15#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
16#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
17#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
18#define M_PERFCTL_WIDE (1UL << 30)
19#define M_PERFCTL_MORE (1UL << 31)
20
21#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
22 M_PERFCTL_KERNEL | \
23 M_PERFCTL_USER | \
24 M_PERFCTL_SUPERVISOR | \
25 M_PERFCTL_INTERRUPT_ENABLE)
26
27#ifdef CONFIG_MIPS_MT_SMP
28#define M_PERFCTL_CONFIG_MASK 0x3fff801f
29#else
30#define M_PERFCTL_CONFIG_MASK 0x1f
31#endif
32#define M_PERFCTL_EVENT_MASK 0xfe0
33
34#define M_COUNTER_OVERFLOW (1UL << 31)
35
36#ifdef CONFIG_MIPS_MT_SMP
37static int cpu_has_mipsmt_pertccounters;
38
39/*
40 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
41 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
42 */
43#if defined(CONFIG_HW_PERF_EVENTS)
44#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
45 0 : smp_processor_id())
46#else
47#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
48 0 : cpu_data[smp_processor_id()].vpe_id)
49#endif
50
51/* Copied from op_model_mipsxx.c */
52static inline unsigned int vpe_shift(void)
53{
54 if (num_possible_cpus() > 1)
55 return 1;
56
57 return 0;
58}
59#else /* !CONFIG_MIPS_MT_SMP */
60#define vpe_id() 0
61
62static inline unsigned int vpe_shift(void)
63{
64 return 0;
65}
66#endif /* CONFIG_MIPS_MT_SMP */
67
68static inline unsigned int
69counters_total_to_per_cpu(unsigned int counters)
70{
71 return counters >> vpe_shift();
72}
73
74static inline unsigned int
75counters_per_cpu_to_total(unsigned int counters)
76{
77 return counters << vpe_shift();
78}
79
80#define __define_perf_accessors(r, n, np) \
81 \
82static inline unsigned int r_c0_ ## r ## n(void) \
83{ \
84 unsigned int cpu = vpe_id(); \
85 \
86 switch (cpu) { \
87 case 0: \
88 return read_c0_ ## r ## n(); \
89 case 1: \
90 return read_c0_ ## r ## np(); \
91 default: \
92 BUG(); \
93 } \
94 return 0; \
95} \
96 \
97static inline void w_c0_ ## r ## n(unsigned int value) \
98{ \
99 unsigned int cpu = vpe_id(); \
100 \
101 switch (cpu) { \
102 case 0: \
103 write_c0_ ## r ## n(value); \
104 return; \
105 case 1: \
106 write_c0_ ## r ## np(value); \
107 return; \
108 default: \
109 BUG(); \
110 } \
111 return; \
112} \
113
114__define_perf_accessors(perfcntr, 0, 2)
115__define_perf_accessors(perfcntr, 1, 3)
116__define_perf_accessors(perfcntr, 2, 0)
117__define_perf_accessors(perfcntr, 3, 1)
118
119__define_perf_accessors(perfctrl, 0, 2)
120__define_perf_accessors(perfctrl, 1, 3)
121__define_perf_accessors(perfctrl, 2, 0)
122__define_perf_accessors(perfctrl, 3, 1)
123
124static inline int __n_counters(void)
125{
126 if (!(read_c0_config1() & M_CONFIG1_PC))
127 return 0;
128 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
129 return 1;
130 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
131 return 2;
132 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
133 return 3;
134
135 return 4;
136}
137
138static inline int n_counters(void)
139{
140 int counters;
141
142 switch (current_cpu_type()) {
143 case CPU_R10000:
144 counters = 2;
145 break;
146
147 case CPU_R12000:
148 case CPU_R14000:
149 counters = 4;
150 break;
151
152 default:
153 counters = __n_counters();
154 }
155
156 return counters;
157}
158
159static void reset_counters(void *arg)
160{
161 int counters = (int)(long)arg;
162 switch (counters) {
163 case 4:
164 w_c0_perfctrl3(0);
165 w_c0_perfcntr3(0);
166 case 3:
167 w_c0_perfctrl2(0);
168 w_c0_perfcntr2(0);
169 case 2:
170 w_c0_perfctrl1(0);
171 w_c0_perfcntr1(0);
172 case 1:
173 w_c0_perfctrl0(0);
174 w_c0_perfcntr0(0);
175 }
176}
177
178static inline u64
179mipsxx_pmu_read_counter(unsigned int idx)
180{
181 switch (idx) {
182 case 0:
183 return r_c0_perfcntr0();
184 case 1:
185 return r_c0_perfcntr1();
186 case 2:
187 return r_c0_perfcntr2();
188 case 3:
189 return r_c0_perfcntr3();
190 default:
191 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
192 return 0;
193 }
194}
195
196static inline void
197mipsxx_pmu_write_counter(unsigned int idx, u64 val)
198{
199 switch (idx) {
200 case 0:
201 w_c0_perfcntr0(val);
202 return;
203 case 1:
204 w_c0_perfcntr1(val);
205 return;
206 case 2:
207 w_c0_perfcntr2(val);
208 return;
209 case 3:
210 w_c0_perfcntr3(val);
211 return;
212 }
213}
214
215static inline unsigned int
216mipsxx_pmu_read_control(unsigned int idx)
217{
218 switch (idx) {
219 case 0:
220 return r_c0_perfctrl0();
221 case 1:
222 return r_c0_perfctrl1();
223 case 2:
224 return r_c0_perfctrl2();
225 case 3:
226 return r_c0_perfctrl3();
227 default:
228 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
229 return 0;
230 }
231}
232
233static inline void
234mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
235{
236 switch (idx) {
237 case 0:
238 w_c0_perfctrl0(val);
239 return;
240 case 1:
241 w_c0_perfctrl1(val);
242 return;
243 case 2:
244 w_c0_perfctrl2(val);
245 return;
246 case 3:
247 w_c0_perfctrl3(val);
248 return;
249 }
250}
251
252#ifdef CONFIG_MIPS_MT_SMP
253static DEFINE_RWLOCK(pmuint_rwlock);
254#endif
255
256/* 24K/34K/1004K cores can share the same event map. */
257static const struct mips_perf_event mipsxxcore_event_map
258 [PERF_COUNT_HW_MAX] = {
259 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
260 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
261 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
262 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
263 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
264 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
265 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
266};
267
268/* 74K core has different branch event code. */
269static const struct mips_perf_event mipsxx74Kcore_event_map
270 [PERF_COUNT_HW_MAX] = {
271 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
272 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
273 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
274 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
275 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
276 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
277 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
278};
279
280/* 24K/34K/1004K cores can share the same cache event map. */
281static const struct mips_perf_event mipsxxcore_cache_map
282 [PERF_COUNT_HW_CACHE_MAX]
283 [PERF_COUNT_HW_CACHE_OP_MAX]
284 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
285[C(L1D)] = {
286 /*
287 * Like some other architectures (e.g. ARM), the performance
288 * counters don't differentiate between read and write
289 * accesses/misses, so this isn't strictly correct, but it's the
290 * best we can do. Writes and reads get combined.
291 */
292 [C(OP_READ)] = {
293 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
294 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
295 },
296 [C(OP_WRITE)] = {
297 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
298 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
299 },
300 [C(OP_PREFETCH)] = {
301 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
302 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
303 },
304},
305[C(L1I)] = {
306 [C(OP_READ)] = {
307 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
308 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
309 },
310 [C(OP_WRITE)] = {
311 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
312 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
313 },
314 [C(OP_PREFETCH)] = {
315 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
316 /*
317 * Note that MIPS has only "hit" events countable for
318 * the prefetch operation.
319 */
320 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
321 },
322},
323[C(LL)] = {
324 [C(OP_READ)] = {
325 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
326 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
327 },
328 [C(OP_WRITE)] = {
329 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
330 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
331 },
332 [C(OP_PREFETCH)] = {
333 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
334 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
335 },
336},
337[C(DTLB)] = {
338 [C(OP_READ)] = {
339 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
340 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
341 },
342 [C(OP_WRITE)] = {
343 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
344 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
345 },
346 [C(OP_PREFETCH)] = {
347 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
348 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
349 },
350},
351[C(ITLB)] = {
352 [C(OP_READ)] = {
353 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
354 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
355 },
356 [C(OP_WRITE)] = {
357 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
358 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
359 },
360 [C(OP_PREFETCH)] = {
361 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
362 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
363 },
364},
365[C(BPU)] = {
366 /* Using the same code for *HW_BRANCH* */
367 [C(OP_READ)] = {
368 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
369 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
370 },
371 [C(OP_WRITE)] = {
372 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
373 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
374 },
375 [C(OP_PREFETCH)] = {
376 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
377 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
378 },
379},
380};
381
382/* 74K core has completely different cache event map. */
383static const struct mips_perf_event mipsxx74Kcore_cache_map
384 [PERF_COUNT_HW_CACHE_MAX]
385 [PERF_COUNT_HW_CACHE_OP_MAX]
386 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
387[C(L1D)] = {
388 /*
389 * Like some other architectures (e.g. ARM), the performance
390 * counters don't differentiate between read and write
391 * accesses/misses, so this isn't strictly correct, but it's the
392 * best we can do. Writes and reads get combined.
393 */
394 [C(OP_READ)] = {
395 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
396 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
397 },
398 [C(OP_WRITE)] = {
399 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
400 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
401 },
402 [C(OP_PREFETCH)] = {
403 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
404 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
405 },
406},
407[C(L1I)] = {
408 [C(OP_READ)] = {
409 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
410 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
411 },
412 [C(OP_WRITE)] = {
413 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
414 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
415 },
416 [C(OP_PREFETCH)] = {
417 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
418 /*
419 * Note that MIPS has only "hit" events countable for
420 * the prefetch operation.
421 */
422 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
423 },
424},
425[C(LL)] = {
426 [C(OP_READ)] = {
427 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
428 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
429 },
430 [C(OP_WRITE)] = {
431 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
432 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
433 },
434 [C(OP_PREFETCH)] = {
435 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
436 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
437 },
438},
439[C(DTLB)] = {
440 /* 74K core does not have specific DTLB events. */
441 [C(OP_READ)] = {
442 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
443 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
444 },
445 [C(OP_WRITE)] = {
446 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
447 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
448 },
449 [C(OP_PREFETCH)] = {
450 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
451 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
452 },
453},
454[C(ITLB)] = {
455 [C(OP_READ)] = {
456 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
457 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
458 },
459 [C(OP_WRITE)] = {
460 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
461 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
462 },
463 [C(OP_PREFETCH)] = {
464 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
465 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
466 },
467},
468[C(BPU)] = {
469 /* Using the same code for *HW_BRANCH* */
470 [C(OP_READ)] = {
471 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
472 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
473 },
474 [C(OP_WRITE)] = {
475 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
476 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
477 },
478 [C(OP_PREFETCH)] = {
479 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
480 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
481 },
482},
483};
484
485#ifdef CONFIG_MIPS_MT_SMP
486static void
487check_and_calc_range(struct perf_event *event,
488 const struct mips_perf_event *pev)
489{
490 struct hw_perf_event *hwc = &event->hw;
491
492 if (event->cpu >= 0) {
493 if (pev->range > V) {
494 /*
495 * The user selected an event that is processor
496 * wide, while expecting it to be VPE wide.
497 */
498 hwc->config_base |= M_TC_EN_ALL;
499 } else {
500 /*
501 * FIXME: cpu_data[event->cpu].vpe_id reports 0
502 * for both CPUs.
503 */
504 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
505 hwc->config_base |= M_TC_EN_VPE;
506 }
507 } else
508 hwc->config_base |= M_TC_EN_ALL;
509}
510#else
511static void
512check_and_calc_range(struct perf_event *event,
513 const struct mips_perf_event *pev)
514{
515}
516#endif
517
518static int __hw_perf_event_init(struct perf_event *event)
519{
520 struct perf_event_attr *attr = &event->attr;
521 struct hw_perf_event *hwc = &event->hw;
522 const struct mips_perf_event *pev;
523 int err;
524
525 /* Returning MIPS event descriptor for generic perf event. */
526 if (PERF_TYPE_HARDWARE == event->attr.type) {
527 if (event->attr.config >= PERF_COUNT_HW_MAX)
528 return -EINVAL;
529 pev = mipspmu_map_general_event(event->attr.config);
530 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
531 pev = mipspmu_map_cache_event(event->attr.config);
532 } else if (PERF_TYPE_RAW == event->attr.type) {
533 /* We are working on the global raw event. */
534 mutex_lock(&raw_event_mutex);
535 pev = mipspmu->map_raw_event(event->attr.config);
536 } else {
537 /* The event type is not (yet) supported. */
538 return -EOPNOTSUPP;
539 }
540
541 if (IS_ERR(pev)) {
542 if (PERF_TYPE_RAW == event->attr.type)
543 mutex_unlock(&raw_event_mutex);
544 return PTR_ERR(pev);
545 }
546
547 /*
548 * We allow max flexibility on how each individual counter shared
549 * by the single CPU operates (the mode exclusion and the range).
550 */
551 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
552
553 /* Calculate range bits and validate it. */
554 if (num_possible_cpus() > 1)
555 check_and_calc_range(event, pev);
556
557 hwc->event_base = mipspmu_perf_event_encode(pev);
558 if (PERF_TYPE_RAW == event->attr.type)
559 mutex_unlock(&raw_event_mutex);
560
561 if (!attr->exclude_user)
562 hwc->config_base |= M_PERFCTL_USER;
563 if (!attr->exclude_kernel) {
564 hwc->config_base |= M_PERFCTL_KERNEL;
565 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
566 hwc->config_base |= M_PERFCTL_EXL;
567 }
568 if (!attr->exclude_hv)
569 hwc->config_base |= M_PERFCTL_SUPERVISOR;
570
571 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
572 /*
573 * The event can belong to another cpu. We do not assign a local
574 * counter for it for now.
575 */
576 hwc->idx = -1;
577 hwc->config = 0;
578
579 if (!hwc->sample_period) {
580 hwc->sample_period = MAX_PERIOD;
581 hwc->last_period = hwc->sample_period;
582 local64_set(&hwc->period_left, hwc->sample_period);
583 }
584
585 err = 0;
586 if (event->group_leader != event) {
587 err = validate_group(event);
588 if (err)
589 return -EINVAL;
590 }
591
592 event->destroy = hw_perf_event_destroy;
593
594 return err;
595}
596
597static void pause_local_counters(void)
598{
599 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
600 int counters = mipspmu->num_counters;
601 unsigned long flags;
602
603 local_irq_save(flags);
604 switch (counters) {
605 case 4:
606 cpuc->saved_ctrl[3] = r_c0_perfctrl3();
607 w_c0_perfctrl3(cpuc->saved_ctrl[3] &
608 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
609 case 3:
610 cpuc->saved_ctrl[2] = r_c0_perfctrl2();
611 w_c0_perfctrl2(cpuc->saved_ctrl[2] &
612 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
613 case 2:
614 cpuc->saved_ctrl[1] = r_c0_perfctrl1();
615 w_c0_perfctrl1(cpuc->saved_ctrl[1] &
616 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
617 case 1:
618 cpuc->saved_ctrl[0] = r_c0_perfctrl0();
619 w_c0_perfctrl0(cpuc->saved_ctrl[0] &
620 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
621 }
622 local_irq_restore(flags);
623}
624
625static void resume_local_counters(void)
626{
627 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
628 int counters = mipspmu->num_counters;
629 unsigned long flags;
630
631 local_irq_save(flags);
632 switch (counters) {
633 case 4:
634 w_c0_perfctrl3(cpuc->saved_ctrl[3]);
635 case 3:
636 w_c0_perfctrl2(cpuc->saved_ctrl[2]);
637 case 2:
638 w_c0_perfctrl1(cpuc->saved_ctrl[1]);
639 case 1:
640 w_c0_perfctrl0(cpuc->saved_ctrl[0]);
641 }
642 local_irq_restore(flags);
643}
644
645static int mipsxx_pmu_handle_shared_irq(void)
646{
647 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
648 struct perf_sample_data data;
649 unsigned int counters = mipspmu->num_counters;
650 unsigned int counter;
651 int handled = IRQ_NONE;
652 struct pt_regs *regs;
653
654 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
655 return handled;
656
657 /*
658 * First we pause the local counters, so that when we are locked
659 * here, the counters are all paused. When it gets locked due to
660 * perf_disable(), the timer interrupt handler will be delayed.
661 *
662 * See also mipsxx_pmu_start().
663 */
664 pause_local_counters();
665#ifdef CONFIG_MIPS_MT_SMP
666 read_lock(&pmuint_rwlock);
667#endif
668
669 regs = get_irq_regs();
670
671 perf_sample_data_init(&data, 0);
672
673 switch (counters) {
674#define HANDLE_COUNTER(n) \
675 case n + 1: \
676 if (test_bit(n, cpuc->used_mask)) { \
677 counter = r_c0_perfcntr ## n(); \
678 if (counter & M_COUNTER_OVERFLOW) { \
679 w_c0_perfcntr ## n(counter & \
680 VALID_COUNT); \
681 if (test_and_change_bit(n, cpuc->msbs)) \
682 handle_associated_event(cpuc, \
683 n, &data, regs); \
684 handled = IRQ_HANDLED; \
685 } \
686 }
687 HANDLE_COUNTER(3)
688 HANDLE_COUNTER(2)
689 HANDLE_COUNTER(1)
690 HANDLE_COUNTER(0)
691 }
692
693 /*
694 * Do all the work for the pending perf events. We can do this
695 * in here because the performance counter interrupt is a regular
696 * interrupt, not NMI.
697 */
698 if (handled == IRQ_HANDLED)
699 perf_event_do_pending();
700
701#ifdef CONFIG_MIPS_MT_SMP
702 read_unlock(&pmuint_rwlock);
703#endif
704 resume_local_counters();
705 return handled;
706}
707
708static irqreturn_t
709mipsxx_pmu_handle_irq(int irq, void *dev)
710{
711 return mipsxx_pmu_handle_shared_irq();
712}
713
714static void mipsxx_pmu_start(void)
715{
716#ifdef CONFIG_MIPS_MT_SMP
717 write_unlock(&pmuint_rwlock);
718#endif
719 resume_local_counters();
720}
721
722/*
723 * MIPS performance counters can be per-TC. The control registers can
724 * not be directly accessed accross CPUs. Hence if we want to do global
725 * control, we need cross CPU calls. on_each_cpu() can help us, but we
726 * can not make sure this function is called with interrupts enabled. So
727 * here we pause local counters and then grab a rwlock and leave the
728 * counters on other CPUs alone. If any counter interrupt raises while
729 * we own the write lock, simply pause local counters on that CPU and
730 * spin in the handler. Also we know we won't be switched to another
731 * CPU after pausing local counters and before grabbing the lock.
732 */
733static void mipsxx_pmu_stop(void)
734{
735 pause_local_counters();
736#ifdef CONFIG_MIPS_MT_SMP
737 write_lock(&pmuint_rwlock);
738#endif
739}
740
741static int
742mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
743 struct hw_perf_event *hwc)
744{
745 int i;
746
747 /*
748 * We only need to care the counter mask. The range has been
749 * checked definitely.
750 */
751 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
752
753 for (i = mipspmu->num_counters - 1; i >= 0; i--) {
754 /*
755 * Note that some MIPS perf events can be counted by both
756 * even and odd counters, wheresas many other are only by
757 * even _or_ odd counters. This introduces an issue that
758 * when the former kind of event takes the counter the
759 * latter kind of event wants to use, then the "counter
760 * allocation" for the latter event will fail. In fact if
761 * they can be dynamically swapped, they both feel happy.
762 * But here we leave this issue alone for now.
763 */
764 if (test_bit(i, &cntr_mask) &&
765 !test_and_set_bit(i, cpuc->used_mask))
766 return i;
767 }
768
769 return -EAGAIN;
770}
771
772static void
773mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
774{
775 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
776 unsigned long flags;
777
778 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
779
780 local_irq_save(flags);
781 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
782 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
783 /* Make sure interrupt enabled. */
784 M_PERFCTL_INTERRUPT_ENABLE;
785 /*
786 * We do not actually let the counter run. Leave it until start().
787 */
788 local_irq_restore(flags);
789}
790
791static void
792mipsxx_pmu_disable_event(int idx)
793{
794 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
795 unsigned long flags;
796
797 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
798
799 local_irq_save(flags);
800 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
801 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
802 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
803 local_irq_restore(flags);
804}
805
806/* 24K */
807#define IS_UNSUPPORTED_24K_EVENT(r, b) \
808 ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
809 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
810 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
811 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
812 ((b) >= 68 && (b) <= 127))
813#define IS_BOTH_COUNTERS_24K_EVENT(b) \
814 ((b) == 0 || (b) == 1 || (b) == 11)
815
816/* 34K */
817#define IS_UNSUPPORTED_34K_EVENT(r, b) \
818 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
819 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
820 ((b) >= 68 && (b) <= 127))
821#define IS_BOTH_COUNTERS_34K_EVENT(b) \
822 ((b) == 0 || (b) == 1 || (b) == 11)
823#ifdef CONFIG_MIPS_MT_SMP
824#define IS_RANGE_P_34K_EVENT(r, b) \
825 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
826 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
827 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
828 ((b) >= 64 && (b) <= 67))
829#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
830#endif
831
832/* 74K */
833#define IS_UNSUPPORTED_74K_EVENT(r, b) \
834 ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
835 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
836 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
837 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
838 (b) == 61 || (r) == 62 || (r) == 191 || \
839 ((b) >= 64 && (b) <= 127))
840#define IS_BOTH_COUNTERS_74K_EVENT(b) \
841 ((b) == 0 || (b) == 1)
842
843/* 1004K */
844#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
845 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
846 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
847#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
848 ((b) == 0 || (b) == 1 || (b) == 11)
849#ifdef CONFIG_MIPS_MT_SMP
850#define IS_RANGE_P_1004K_EVENT(r, b) \
851 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
852 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
853 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
854 (r) == 188 || (b) == 61 || (b) == 62 || \
855 ((b) >= 64 && (b) <= 67))
856#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
857#endif
858
859/*
860 * User can use 0-255 raw events, where 0-127 for the events of even
861 * counters, and 128-255 for odd counters. Note that bit 7 is used to
862 * indicate the parity. So, for example, when user wants to take the
863 * Event Num of 15 for odd counters (by referring to the user manual),
864 * then 128 needs to be added to 15 as the input for the event config,
865 * i.e., 143 (0x8F) to be used.
866 */
867static const struct mips_perf_event *
868mipsxx_pmu_map_raw_event(u64 config)
869{
870 unsigned int raw_id = config & 0xff;
871 unsigned int base_id = raw_id & 0x7f;
872
873 switch (current_cpu_type()) {
874 case CPU_24K:
875 if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
876 return ERR_PTR(-EOPNOTSUPP);
877 raw_event.event_id = base_id;
878 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
879 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
880 else
881 raw_event.cntr_mask =
882 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
883#ifdef CONFIG_MIPS_MT_SMP
884 /*
885 * This is actually doing nothing. Non-multithreading
886 * CPUs will not check and calculate the range.
887 */
888 raw_event.range = P;
889#endif
890 break;
891 case CPU_34K:
892 if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
893 return ERR_PTR(-EOPNOTSUPP);
894 raw_event.event_id = base_id;
895 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
896 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
897 else
898 raw_event.cntr_mask =
899 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
900#ifdef CONFIG_MIPS_MT_SMP
901 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
902 raw_event.range = P;
903 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
904 raw_event.range = V;
905 else
906 raw_event.range = T;
907#endif
908 break;
909 case CPU_74K:
910 if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
911 return ERR_PTR(-EOPNOTSUPP);
912 raw_event.event_id = base_id;
913 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
914 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
915 else
916 raw_event.cntr_mask =
917 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
918#ifdef CONFIG_MIPS_MT_SMP
919 raw_event.range = P;
920#endif
921 break;
922 case CPU_1004K:
923 if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
924 return ERR_PTR(-EOPNOTSUPP);
925 raw_event.event_id = base_id;
926 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
927 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
928 else
929 raw_event.cntr_mask =
930 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
931#ifdef CONFIG_MIPS_MT_SMP
932 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
933 raw_event.range = P;
934 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
935 raw_event.range = V;
936 else
937 raw_event.range = T;
938#endif
939 break;
940 }
941
942 return &raw_event;
943}
944
945static struct mips_pmu mipsxxcore_pmu = {
946 .handle_irq = mipsxx_pmu_handle_irq,
947 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
948 .start = mipsxx_pmu_start,
949 .stop = mipsxx_pmu_stop,
950 .alloc_counter = mipsxx_pmu_alloc_counter,
951 .read_counter = mipsxx_pmu_read_counter,
952 .write_counter = mipsxx_pmu_write_counter,
953 .enable_event = mipsxx_pmu_enable_event,
954 .disable_event = mipsxx_pmu_disable_event,
955 .map_raw_event = mipsxx_pmu_map_raw_event,
956 .general_event_map = &mipsxxcore_event_map,
957 .cache_event_map = &mipsxxcore_cache_map,
958};
959
960static struct mips_pmu mipsxx74Kcore_pmu = {
961 .handle_irq = mipsxx_pmu_handle_irq,
962 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
963 .start = mipsxx_pmu_start,
964 .stop = mipsxx_pmu_stop,
965 .alloc_counter = mipsxx_pmu_alloc_counter,
966 .read_counter = mipsxx_pmu_read_counter,
967 .write_counter = mipsxx_pmu_write_counter,
968 .enable_event = mipsxx_pmu_enable_event,
969 .disable_event = mipsxx_pmu_disable_event,
970 .map_raw_event = mipsxx_pmu_map_raw_event,
971 .general_event_map = &mipsxx74Kcore_event_map,
972 .cache_event_map = &mipsxx74Kcore_cache_map,
973};
974
975static int __init
976init_hw_perf_events(void)
977{
978 int counters, irq;
979
980 pr_info("Performance counters: ");
981
982 counters = n_counters();
983 if (counters == 0) {
984 pr_cont("No available PMU.\n");
985 return -ENODEV;
986 }
987
988#ifdef CONFIG_MIPS_MT_SMP
989 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
990 if (!cpu_has_mipsmt_pertccounters)
991 counters = counters_total_to_per_cpu(counters);
992#endif
993
994#ifdef MSC01E_INT_BASE
995 if (cpu_has_veic) {
996 /*
997 * Using platform specific interrupt controller defines.
998 */
999 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1000 } else {
1001#endif
1002 if (cp0_perfcount_irq >= 0)
1003 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1004 else
1005 irq = -1;
1006#ifdef MSC01E_INT_BASE
1007 }
1008#endif
1009
1010 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1011
1012 switch (current_cpu_type()) {
1013 case CPU_24K:
1014 mipsxxcore_pmu.name = "mips/24K";
1015 mipsxxcore_pmu.num_counters = counters;
1016 mipsxxcore_pmu.irq = irq;
1017 mipspmu = &mipsxxcore_pmu;
1018 break;
1019 case CPU_34K:
1020 mipsxxcore_pmu.name = "mips/34K";
1021 mipsxxcore_pmu.num_counters = counters;
1022 mipsxxcore_pmu.irq = irq;
1023 mipspmu = &mipsxxcore_pmu;
1024 break;
1025 case CPU_74K:
1026 mipsxx74Kcore_pmu.name = "mips/74K";
1027 mipsxx74Kcore_pmu.num_counters = counters;
1028 mipsxx74Kcore_pmu.irq = irq;
1029 mipspmu = &mipsxx74Kcore_pmu;
1030 break;
1031 case CPU_1004K:
1032 mipsxxcore_pmu.name = "mips/1004K";
1033 mipsxxcore_pmu.num_counters = counters;
1034 mipsxxcore_pmu.irq = irq;
1035 mipspmu = &mipsxxcore_pmu;
1036 break;
1037 default:
1038 pr_cont("Either hardware does not support performance "
1039 "counters, or not yet implemented.\n");
1040 return -ENODEV;
1041 }
1042
1043 if (mipspmu)
1044 pr_cont("%s PMU enabled, %d counters available to each "
1045 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1046 irq < 0 ? " (share with timer interrupt)" : "");
1047
1048 return 0;
1049}
1050arch_initcall(init_hw_perf_events);
1051
1052#endif /* defined(CONFIG_CPU_MIPS32)... */
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index a6b900f2962b..acd3f2c49c06 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -490,6 +490,7 @@ static void __init arch_mem_init(char **cmdline_p)
490 bootmem_init(); 490 bootmem_init();
491 device_tree_init(); 491 device_tree_init();
492 sparse_init(); 492 sparse_init();
493 plat_swiotlb_setup();
493 paging_init(); 494 paging_init();
494} 495}
495 496
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index d053bf4759e4..8e9fbe75894e 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -29,6 +29,7 @@
29#include <linux/notifier.h> 29#include <linux/notifier.h>
30#include <linux/kdb.h> 30#include <linux/kdb.h>
31#include <linux/irq.h> 31#include <linux/irq.h>
32#include <linux/perf_event.h>
32 33
33#include <asm/bootinfo.h> 34#include <asm/bootinfo.h>
34#include <asm/branch.h> 35#include <asm/branch.h>
@@ -576,10 +577,16 @@ static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
576 */ 577 */
577static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) 578static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
578{ 579{
579 if ((opcode & OPCODE) == LL) 580 if ((opcode & OPCODE) == LL) {
581 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
582 1, 0, regs, 0);
580 return simulate_ll(regs, opcode); 583 return simulate_ll(regs, opcode);
581 if ((opcode & OPCODE) == SC) 584 }
585 if ((opcode & OPCODE) == SC) {
586 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
587 1, 0, regs, 0);
582 return simulate_sc(regs, opcode); 588 return simulate_sc(regs, opcode);
589 }
583 590
584 return -1; /* Must be something else ... */ 591 return -1; /* Must be something else ... */
585} 592}
@@ -595,6 +602,8 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
595 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { 602 if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
596 int rd = (opcode & RD) >> 11; 603 int rd = (opcode & RD) >> 11;
597 int rt = (opcode & RT) >> 16; 604 int rt = (opcode & RT) >> 16;
605 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
606 1, 0, regs, 0);
598 switch (rd) { 607 switch (rd) {
599 case 0: /* CPU number */ 608 case 0: /* CPU number */
600 regs->regs[rt] = smp_processor_id(); 609 regs->regs[rt] = smp_processor_id();
@@ -630,8 +639,11 @@ static int simulate_rdhwr(struct pt_regs *regs, unsigned int opcode)
630 639
631static int simulate_sync(struct pt_regs *regs, unsigned int opcode) 640static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
632{ 641{
633 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) 642 if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
643 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
644 1, 0, regs, 0);
634 return 0; 645 return 0;
646 }
635 647
636 return -1; /* Must be something else ... */ 648 return -1; /* Must be something else ... */
637} 649}
@@ -1469,6 +1481,7 @@ void __cpuinit per_cpu_trap_init(void)
1469{ 1481{
1470 unsigned int cpu = smp_processor_id(); 1482 unsigned int cpu = smp_processor_id();
1471 unsigned int status_set = ST0_CU0; 1483 unsigned int status_set = ST0_CU0;
1484 unsigned int hwrena = cpu_hwrena_impl_bits;
1472#ifdef CONFIG_MIPS_MT_SMTC 1485#ifdef CONFIG_MIPS_MT_SMTC
1473 int secondaryTC = 0; 1486 int secondaryTC = 0;
1474 int bootTC = (cpu == 0); 1487 int bootTC = (cpu == 0);
@@ -1501,14 +1514,14 @@ void __cpuinit per_cpu_trap_init(void)
1501 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1514 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1502 status_set); 1515 status_set);
1503 1516
1504 if (cpu_has_mips_r2) { 1517 if (cpu_has_mips_r2)
1505 unsigned int enable = 0x0000000f | cpu_hwrena_impl_bits; 1518 hwrena |= 0x0000000f;
1506 1519
1507 if (!noulri && cpu_has_userlocal) 1520 if (!noulri && cpu_has_userlocal)
1508 enable |= (1 << 29); 1521 hwrena |= (1 << 29);
1509 1522
1510 write_c0_hwrena(enable); 1523 if (hwrena)
1511 } 1524 write_c0_hwrena(hwrena);
1512 1525
1513#ifdef CONFIG_MIPS_MT_SMTC 1526#ifdef CONFIG_MIPS_MT_SMTC
1514 if (!secondaryTC) { 1527 if (!secondaryTC) {
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 33d5a5ce4a29..cfea1adfa153 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -78,6 +78,8 @@
78#include <linux/smp.h> 78#include <linux/smp.h>
79#include <linux/sched.h> 79#include <linux/sched.h>
80#include <linux/debugfs.h> 80#include <linux/debugfs.h>
81#include <linux/perf_event.h>
82
81#include <asm/asm.h> 83#include <asm/asm.h>
82#include <asm/branch.h> 84#include <asm/branch.h>
83#include <asm/byteorder.h> 85#include <asm/byteorder.h>
@@ -109,6 +111,9 @@ static void emulate_load_store_insn(struct pt_regs *regs,
109 unsigned long value; 111 unsigned long value;
110 unsigned int res; 112 unsigned int res;
111 113
114 perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
115 1, 0, regs, 0);
116
112 /* 117 /*
113 * This load never faults. 118 * This load never faults.
114 */ 119 */
@@ -511,6 +516,8 @@ asmlinkage void do_ade(struct pt_regs *regs)
511 unsigned int __user *pc; 516 unsigned int __user *pc;
512 mm_segment_t seg; 517 mm_segment_t seg;
513 518
519 perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
520 1, 0, regs, regs->cp0_badvaddr);
514 /* 521 /*
515 * Did we catch a fault trying to load an instruction? 522 * Did we catch a fault trying to load an instruction?
516 * Or are we running in MIPS16 mode? 523 * Or are we running in MIPS16 mode?