aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-10-14 17:39:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-10-14 17:39:05 -0400
commita5ef3f7dcba17e79c21afec38188c4c6a0baf995 (patch)
tree1961e2e8c2d30dd7a21b93fa7e70dc62f892651d /arch/mips/kernel
parentd25282d1c9b9bc4cda7f9d3c0205108e99aa7a9d (diff)
parent35bafbee4b4732a2820bbd0ef141c8192ff29731 (diff)
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus
Pull MIPS update from Ralf Baechle: "Cleanups and fixes for breakage that occured earlier during this merge phase. Also a few patches that didn't make the first pull request. Of those is the Alchemy work that merges code for many of the SOCs and evaluation boards thus among other code shrinkage, reduces the number of MIPS defconfigs by 5." * 'upstream' of git://git.linux-mips.org/pub/scm/ralf/upstream-linus: (22 commits) MIPS: SNI: Switch RM400 serial to SCCNXP driver MIPS: Remove unused empty_bad_pmd_table[] declaration. MIPS: MT: Remove kspd. MIPS: Malta: Fix section mismatch. MIPS: asm-offset.c: Delete unused irq_cpustat_t struct offsets. MIPS: Alchemy: Merge PB1100/1500 support into DB1000 code. MIPS: Alchemy: merge PB1550 support into DB1550 code MIPS: Alchemy: Single kernel for DB1200/1300/1550 MIPS: Optimize TLB refill for RI/XI configurations. MIPS: proc: Cleanup printing of ASEs. MIPS: Hardwire detection of DSP ASE Rev 2 for systems, as required. MIPS: Add detection of DSP ASE Revision 2. MIPS: Optimize pgd_init and pmd_init MIPS: perf: Add perf functionality for BMIPS5000 MIPS: perf: Split the Kconfig option CONFIG_MIPS_MT_SMP MIPS: perf: Remove unnecessary #ifdef MIPS: perf: Add cpu feature bit for PCI (performance counter interrupt) MIPS: perf: Change the "mips_perf_event" table unsupported indicator. MIPS: Align swapper_pg_dir to 64K for better TLB Refill code. vmlinux.lds.h: Allow architectures to add sections to the front of .bss ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/asm-offsets.c10
-rw-r--r--arch/mips/kernel/cpu-probe.c11
-rw-r--r--arch/mips/kernel/kspd.c423
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c262
-rw-r--r--arch/mips/kernel/proc.c19
-rw-r--r--arch/mips/kernel/vmlinux.lds.S21
-rw-r--r--arch/mips/kernel/vpe.c24
8 files changed, 144 insertions, 627 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 414c26920df8..8b28bc4e14ea 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -54,7 +54,6 @@ obj-$(CONFIG_CPU_MIPSR2) += spram.o
54 54
55obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 55obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
56obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o 56obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
57obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
58 57
59obj-$(CONFIG_I8259) += i8259.o 58obj-$(CONFIG_I8259) += i8259.o
60obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 59obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 6b30fb2caa67..0c4bce4882a6 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -12,7 +12,6 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/interrupt.h>
16#include <linux/kbuild.h> 15#include <linux/kbuild.h>
17#include <linux/suspend.h> 16#include <linux/suspend.h>
18#include <asm/ptrace.h> 17#include <asm/ptrace.h>
@@ -292,15 +291,6 @@ void output_signal_defined(void)
292 BLANK(); 291 BLANK();
293} 292}
294 293
295void output_irq_cpustat_t_defines(void)
296{
297 COMMENT("Linux irq_cpustat_t offsets.");
298 DEFINE(IC_SOFTIRQ_PENDING,
299 offsetof(irq_cpustat_t, __softirq_pending));
300 DEFINE(IC_IRQ_CPUSTAT_T, sizeof(irq_cpustat_t));
301 BLANK();
302}
303
304#ifdef CONFIG_CPU_CAVIUM_OCTEON 294#ifdef CONFIG_CPU_CAVIUM_OCTEON
305void output_octeon_cop2_state_defines(void) 295void output_octeon_cop2_state_defines(void)
306{ 296{
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index bc58bd10a607..b1fb7af3c350 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -142,7 +142,7 @@ int __cpuinitdata mips_dsp_disabled;
142 142
143static int __init dsp_disable(char *s) 143static int __init dsp_disable(char *s)
144{ 144{
145 cpu_data[0].ases &= ~MIPS_ASE_DSP; 145 cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
146 mips_dsp_disabled = 1; 146 mips_dsp_disabled = 1;
147 147
148 return 1; 148 return 1;
@@ -429,6 +429,8 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
429 c->options |= MIPS_CPU_RIXI; 429 c->options |= MIPS_CPU_RIXI;
430 if (config3 & MIPS_CONF3_DSP) 430 if (config3 & MIPS_CONF3_DSP)
431 c->ases |= MIPS_ASE_DSP; 431 c->ases |= MIPS_ASE_DSP;
432 if (config3 & MIPS_CONF3_DSP2P)
433 c->ases |= MIPS_ASE_DSP2P;
432 if (config3 & MIPS_CONF3_VINT) 434 if (config3 & MIPS_CONF3_VINT)
433 c->options |= MIPS_CPU_VINT; 435 c->options |= MIPS_CPU_VINT;
434 if (config3 & MIPS_CONF3_VEIC) 436 if (config3 & MIPS_CONF3_VEIC)
@@ -1180,7 +1182,7 @@ __cpuinit void cpu_probe(void)
1180 c->options &= ~MIPS_CPU_FPU; 1182 c->options &= ~MIPS_CPU_FPU;
1181 1183
1182 if (mips_dsp_disabled) 1184 if (mips_dsp_disabled)
1183 c->ases &= ~MIPS_ASE_DSP; 1185 c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
1184 1186
1185 if (c->options & MIPS_CPU_FPU) { 1187 if (c->options & MIPS_CPU_FPU) {
1186 c->fpu_id = cpu_get_fpu_id(); 1188 c->fpu_id = cpu_get_fpu_id();
@@ -1194,8 +1196,11 @@ __cpuinit void cpu_probe(void)
1194 } 1196 }
1195 } 1197 }
1196 1198
1197 if (cpu_has_mips_r2) 1199 if (cpu_has_mips_r2) {
1198 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1200 c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1201 /* R2 has Performance Counter Interrupt indicator */
1202 c->options |= MIPS_CPU_PCI;
1203 }
1199 else 1204 else
1200 c->srsets = 1; 1205 c->srsets = 1;
1201 1206
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
deleted file mode 100644
index b77f56bbb477..000000000000
--- a/arch/mips/kernel/kspd.c
+++ /dev/null
@@ -1,423 +0,0 @@
1/*
2 * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
3 *
4 * This program is free software; you can distribute it and/or modify it
5 * under the terms of the GNU General Public License (Version 2) as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 * for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
17 */
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/unistd.h>
22#include <linux/file.h>
23#include <linux/fdtable.h>
24#include <linux/fs.h>
25#include <linux/syscalls.h>
26#include <linux/workqueue.h>
27#include <linux/errno.h>
28#include <linux/list.h>
29
30#include <asm/vpe.h>
31#include <asm/rtlx.h>
32#include <asm/kspd.h>
33
34static struct workqueue_struct *workqueue;
35static struct work_struct work;
36
37extern unsigned long cpu_khz;
38
39struct mtsp_syscall {
40 int cmd;
41 unsigned char abi;
42 unsigned char size;
43};
44
45struct mtsp_syscall_ret {
46 int retval;
47 int errno;
48};
49
50struct mtsp_syscall_generic {
51 int arg0;
52 int arg1;
53 int arg2;
54 int arg3;
55 int arg4;
56 int arg5;
57 int arg6;
58};
59
60static struct list_head kspd_notifylist;
61static int sp_stopping;
62
63/* these should match with those in the SDE kit */
64#define MTSP_SYSCALL_BASE 0
65#define MTSP_SYSCALL_EXIT (MTSP_SYSCALL_BASE + 0)
66#define MTSP_SYSCALL_OPEN (MTSP_SYSCALL_BASE + 1)
67#define MTSP_SYSCALL_READ (MTSP_SYSCALL_BASE + 2)
68#define MTSP_SYSCALL_WRITE (MTSP_SYSCALL_BASE + 3)
69#define MTSP_SYSCALL_CLOSE (MTSP_SYSCALL_BASE + 4)
70#define MTSP_SYSCALL_LSEEK32 (MTSP_SYSCALL_BASE + 5)
71#define MTSP_SYSCALL_ISATTY (MTSP_SYSCALL_BASE + 6)
72#define MTSP_SYSCALL_GETTIME (MTSP_SYSCALL_BASE + 7)
73#define MTSP_SYSCALL_PIPEFREQ (MTSP_SYSCALL_BASE + 8)
74#define MTSP_SYSCALL_GETTOD (MTSP_SYSCALL_BASE + 9)
75#define MTSP_SYSCALL_IOCTL (MTSP_SYSCALL_BASE + 10)
76
77#define MTSP_O_RDONLY 0x0000
78#define MTSP_O_WRONLY 0x0001
79#define MTSP_O_RDWR 0x0002
80#define MTSP_O_NONBLOCK 0x0004
81#define MTSP_O_APPEND 0x0008
82#define MTSP_O_SHLOCK 0x0010
83#define MTSP_O_EXLOCK 0x0020
84#define MTSP_O_ASYNC 0x0040
85/* XXX: check which of these is actually O_SYNC vs O_DSYNC */
86#define MTSP_O_FSYNC O_SYNC
87#define MTSP_O_NOFOLLOW 0x0100
88#define MTSP_O_SYNC 0x0080
89#define MTSP_O_CREAT 0x0200
90#define MTSP_O_TRUNC 0x0400
91#define MTSP_O_EXCL 0x0800
92#define MTSP_O_BINARY 0x8000
93
94extern int tclimit;
95
96struct apsp_table {
97 int sp;
98 int ap;
99};
100
101/* we might want to do the mode flags too */
102struct apsp_table open_flags_table[] = {
103 { MTSP_O_RDWR, O_RDWR },
104 { MTSP_O_WRONLY, O_WRONLY },
105 { MTSP_O_CREAT, O_CREAT },
106 { MTSP_O_TRUNC, O_TRUNC },
107 { MTSP_O_NONBLOCK, O_NONBLOCK },
108 { MTSP_O_APPEND, O_APPEND },
109 { MTSP_O_NOFOLLOW, O_NOFOLLOW }
110};
111
112struct apsp_table syscall_command_table[] = {
113 { MTSP_SYSCALL_OPEN, __NR_open },
114 { MTSP_SYSCALL_CLOSE, __NR_close },
115 { MTSP_SYSCALL_READ, __NR_read },
116 { MTSP_SYSCALL_WRITE, __NR_write },
117 { MTSP_SYSCALL_LSEEK32, __NR_lseek },
118 { MTSP_SYSCALL_IOCTL, __NR_ioctl }
119};
120
121static int sp_syscall(int num, int arg0, int arg1, int arg2, int arg3)
122{
123 register long int _num __asm__("$2") = num;
124 register long int _arg0 __asm__("$4") = arg0;
125 register long int _arg1 __asm__("$5") = arg1;
126 register long int _arg2 __asm__("$6") = arg2;
127 register long int _arg3 __asm__("$7") = arg3;
128
129 mm_segment_t old_fs;
130
131 old_fs = get_fs();
132 set_fs(KERNEL_DS);
133
134 __asm__ __volatile__ (
135 " syscall \n"
136 : "=r" (_num), "=r" (_arg3)
137 : "r" (_num), "r" (_arg0), "r" (_arg1), "r" (_arg2), "r" (_arg3));
138
139 set_fs(old_fs);
140
141 /* $a3 is error flag */
142 if (_arg3)
143 return -_num;
144
145 return _num;
146}
147
148static int translate_syscall_command(int cmd)
149{
150 int i;
151 int ret = -1;
152
153 for (i = 0; i < ARRAY_SIZE(syscall_command_table); i++) {
154 if ((cmd == syscall_command_table[i].sp))
155 return syscall_command_table[i].ap;
156 }
157
158 return ret;
159}
160
161static unsigned int translate_open_flags(int flags)
162{
163 int i;
164 unsigned int ret = 0;
165
166 for (i = 0; i < ARRAY_SIZE(open_flags_table); i++) {
167 if( (flags & open_flags_table[i].sp) ) {
168 ret |= open_flags_table[i].ap;
169 }
170 }
171
172 return ret;
173}
174
175
176static int sp_setfsuidgid(uid_t uid, gid_t gid)
177{
178 struct cred *new;
179
180 new = prepare_creds();
181 if (!new)
182 return -ENOMEM;
183
184 new->fsuid = uid;
185 new->fsgid = gid;
186
187 commit_creds(new);
188
189 return 0;
190}
191
192/*
193 * Expects a request to be on the sysio channel. Reads it. Decides whether
194 * its a linux syscall and runs it, or whatever. Puts the return code back
195 * into the request and sends the whole thing back.
196 */
197void sp_work_handle_request(void)
198{
199 struct mtsp_syscall sc;
200 struct mtsp_syscall_generic generic;
201 struct mtsp_syscall_ret ret;
202 struct kspd_notifications *n;
203 unsigned long written;
204 mm_segment_t old_fs;
205 struct timeval tv;
206 struct timezone tz;
207 int err, cmd;
208
209 char *vcwd;
210 int size;
211
212 ret.retval = -1;
213
214 old_fs = get_fs();
215 set_fs(KERNEL_DS);
216
217 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &sc, sizeof(struct mtsp_syscall))) {
218 set_fs(old_fs);
219 printk(KERN_ERR "Expected request but nothing to read\n");
220 return;
221 }
222
223 size = sc.size;
224
225 if (size) {
226 if (!rtlx_read(RTLX_CHANNEL_SYSIO, &generic, size)) {
227 set_fs(old_fs);
228 printk(KERN_ERR "Expected request but nothing to read\n");
229 return;
230 }
231 }
232
233 /* Run the syscall at the privilege of the user who loaded the
234 SP program */
235
236 if (vpe_getuid(tclimit)) {
237 err = sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit));
238 if (!err)
239 pr_err("Change of creds failed\n");
240 }
241
242 switch (sc.cmd) {
243 /* needs the flags argument translating from SDE kit to
244 linux */
245 case MTSP_SYSCALL_PIPEFREQ:
246 ret.retval = cpu_khz * 1000;
247 ret.errno = 0;
248 break;
249
250 case MTSP_SYSCALL_GETTOD:
251 memset(&tz, 0, sizeof(tz));
252 if ((ret.retval = sp_syscall(__NR_gettimeofday, (int)&tv,
253 (int)&tz, 0, 0)) == 0)
254 ret.retval = tv.tv_sec;
255 break;
256
257 case MTSP_SYSCALL_EXIT:
258 list_for_each_entry(n, &kspd_notifylist, list)
259 n->kspd_sp_exit(tclimit);
260 sp_stopping = 1;
261
262 printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n",
263 generic.arg0);
264 break;
265
266 case MTSP_SYSCALL_OPEN:
267 generic.arg1 = translate_open_flags(generic.arg1);
268
269 vcwd = vpe_getcwd(tclimit);
270
271 /* change to cwd of the process that loaded the SP program */
272 old_fs = get_fs();
273 set_fs(KERNEL_DS);
274 sys_chdir(vcwd);
275 set_fs(old_fs);
276
277 sc.cmd = __NR_open;
278
279 /* fall through */
280
281 default:
282 if ((sc.cmd >= __NR_Linux) &&
283 (sc.cmd <= (__NR_Linux + __NR_Linux_syscalls)) )
284 cmd = sc.cmd;
285 else
286 cmd = translate_syscall_command(sc.cmd);
287
288 if (cmd >= 0) {
289 ret.retval = sp_syscall(cmd, generic.arg0, generic.arg1,
290 generic.arg2, generic.arg3);
291 } else
292 printk(KERN_WARNING
293 "KSPD: Unknown SP syscall number %d\n", sc.cmd);
294 break;
295 } /* switch */
296
297 if (vpe_getuid(tclimit)) {
298 err = sp_setfsuidgid(0, 0);
299 if (!err)
300 pr_err("restoring old creds failed\n");
301 }
302
303 old_fs = get_fs();
304 set_fs(KERNEL_DS);
305 written = rtlx_write(RTLX_CHANNEL_SYSIO, &ret, sizeof(ret));
306 set_fs(old_fs);
307 if (written < sizeof(ret))
308 printk("KSPD: sp_work_handle_request failed to send to SP\n");
309}
310
311static void sp_cleanup(void)
312{
313 struct files_struct *files = current->files;
314 int i, j;
315 struct fdtable *fdt;
316
317 j = 0;
318
319 /*
320 * It is safe to dereference the fd table without RCU or
321 * ->file_lock
322 */
323 fdt = files_fdtable(files);
324 for (;;) {
325 unsigned long set;
326 i = j * BITS_PER_LONG;
327 if (i >= fdt->max_fds)
328 break;
329 set = fdt->open_fds[j++];
330 while (set) {
331 if (set & 1) {
332 struct file * file = xchg(&fdt->fd[i], NULL);
333 if (file)
334 filp_close(file, files);
335 }
336 i++;
337 set >>= 1;
338 }
339 }
340
341 /* Put daemon cwd back to root to avoid umount problems */
342 sys_chdir("/");
343}
344
345static int channel_open;
346
347/* the work handler */
348static void sp_work(struct work_struct *unused)
349{
350 if (!channel_open) {
351 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
352 printk("KSPD: unable to open sp channel\n");
353 sp_stopping = 1;
354 } else {
355 channel_open++;
356 printk(KERN_DEBUG "KSPD: SP channel opened\n");
357 }
358 } else {
359 /* wait for some data, allow it to sleep */
360 rtlx_read_poll(RTLX_CHANNEL_SYSIO, 1);
361
362 /* Check we haven't been woken because we are stopping */
363 if (!sp_stopping)
364 sp_work_handle_request();
365 }
366
367 if (!sp_stopping)
368 queue_work(workqueue, &work);
369 else
370 sp_cleanup();
371}
372
373static void startwork(int vpe)
374{
375 sp_stopping = channel_open = 0;
376
377 if (workqueue == NULL) {
378 if ((workqueue = create_singlethread_workqueue("kspd")) == NULL) {
379 printk(KERN_ERR "unable to start kspd\n");
380 return;
381 }
382
383 INIT_WORK(&work, sp_work);
384 }
385
386 queue_work(workqueue, &work);
387}
388
389static void stopwork(int vpe)
390{
391 sp_stopping = 1;
392
393 printk(KERN_DEBUG "KSPD: SP stopping\n");
394}
395
396void kspd_notify(struct kspd_notifications *notify)
397{
398 list_add(&notify->list, &kspd_notifylist);
399}
400
401static struct vpe_notifications notify;
402static int kspd_module_init(void)
403{
404 INIT_LIST_HEAD(&kspd_notifylist);
405
406 notify.start = startwork;
407 notify.stop = stopwork;
408 vpe_notify(tclimit, &notify);
409
410 return 0;
411}
412
413static void kspd_module_exit(void)
414{
415
416}
417
418module_init(kspd_module_init);
419module_exit(kspd_module_exit);
420
421MODULE_DESCRIPTION("MIPS KSPD");
422MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
423MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index 2f28d3b55687..a9b995dcf691 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -28,6 +28,8 @@
28#include <asm/time.h> /* For perf_irq */ 28#include <asm/time.h> /* For perf_irq */
29 29
30#define MIPS_MAX_HWEVENTS 4 30#define MIPS_MAX_HWEVENTS 4
31#define MIPS_TCS_PER_COUNTER 2
32#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
31 33
32struct cpu_hw_events { 34struct cpu_hw_events {
33 /* Array of events on this cpu. */ 35 /* Array of events on this cpu. */
@@ -78,7 +80,6 @@ struct mips_perf_event {
78static struct mips_perf_event raw_event; 80static struct mips_perf_event raw_event;
79static DEFINE_MUTEX(raw_event_mutex); 81static DEFINE_MUTEX(raw_event_mutex);
80 82
81#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
82#define C(x) PERF_COUNT_HW_CACHE_##x 83#define C(x) PERF_COUNT_HW_CACHE_##x
83 84
84struct mips_pmu { 85struct mips_pmu {
@@ -109,13 +110,20 @@ static struct mips_pmu mipspmu;
109#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4) 110#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
110#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5) 111#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
111#define M_PERFCTL_VPEID(vpe) ((vpe) << 16) 112#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
113
114#ifdef CONFIG_CPU_BMIPS5000
115#define M_PERFCTL_MT_EN(filter) 0
116#else /* !CONFIG_CPU_BMIPS5000 */
112#define M_PERFCTL_MT_EN(filter) ((filter) << 20) 117#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
118#endif /* CONFIG_CPU_BMIPS5000 */
119
113#define M_TC_EN_ALL M_PERFCTL_MT_EN(0) 120#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
114#define M_TC_EN_VPE M_PERFCTL_MT_EN(1) 121#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
115#define M_TC_EN_TC M_PERFCTL_MT_EN(2) 122#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
116#define M_PERFCTL_TCID(tcid) ((tcid) << 22) 123#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
117#define M_PERFCTL_WIDE (1 << 30) 124#define M_PERFCTL_WIDE (1 << 30)
118#define M_PERFCTL_MORE (1 << 31) 125#define M_PERFCTL_MORE (1 << 31)
126#define M_PERFCTL_TC (1 << 30)
119 127
120#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \ 128#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
121 M_PERFCTL_KERNEL | \ 129 M_PERFCTL_KERNEL | \
@@ -131,21 +139,21 @@ static struct mips_pmu mipspmu;
131#define M_PERFCTL_EVENT_MASK 0xfe0 139#define M_PERFCTL_EVENT_MASK 0xfe0
132 140
133 141
134#ifdef CONFIG_MIPS_MT_SMP 142#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
135static int cpu_has_mipsmt_pertccounters; 143static int cpu_has_mipsmt_pertccounters;
136 144
137static DEFINE_RWLOCK(pmuint_rwlock); 145static DEFINE_RWLOCK(pmuint_rwlock);
138 146
147#if defined(CONFIG_CPU_BMIPS5000)
148#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
149 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
150#else
139/* 151/*
140 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because 152 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
141 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs. 153 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
142 */ 154 */
143#if defined(CONFIG_HW_PERF_EVENTS)
144#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
145 0 : smp_processor_id())
146#else
147#define vpe_id() (cpu_has_mipsmt_pertccounters ? \ 155#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
148 0 : cpu_data[smp_processor_id()].vpe_id) 156 0 : smp_processor_id())
149#endif 157#endif
150 158
151/* Copied from op_model_mipsxx.c */ 159/* Copied from op_model_mipsxx.c */
@@ -162,10 +170,10 @@ static unsigned int counters_total_to_per_cpu(unsigned int counters)
162 return counters >> vpe_shift(); 170 return counters >> vpe_shift();
163} 171}
164 172
165#else /* !CONFIG_MIPS_MT_SMP */ 173#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
166#define vpe_id() 0 174#define vpe_id() 0
167 175
168#endif /* CONFIG_MIPS_MT_SMP */ 176#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
169 177
170static void resume_local_counters(void); 178static void resume_local_counters(void);
171static void pause_local_counters(void); 179static void pause_local_counters(void);
@@ -340,6 +348,11 @@ static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
340 (evt->config_base & M_PERFCTL_CONFIG_MASK) | 348 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
341 /* Make sure interrupt enabled. */ 349 /* Make sure interrupt enabled. */
342 M_PERFCTL_INTERRUPT_ENABLE; 350 M_PERFCTL_INTERRUPT_ENABLE;
351 if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
352 /* enable the counter for the calling thread */
353 cpuc->saved_ctrl[idx] |=
354 (1 << (12 + vpe_id())) | M_PERFCTL_TC;
355
343 /* 356 /*
344 * We do not actually let the counter run. Leave it until start(). 357 * We do not actually let the counter run. Leave it until start().
345 */ 358 */
@@ -509,7 +522,7 @@ static void mipspmu_read(struct perf_event *event)
509 522
510static void mipspmu_enable(struct pmu *pmu) 523static void mipspmu_enable(struct pmu *pmu)
511{ 524{
512#ifdef CONFIG_MIPS_MT_SMP 525#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
513 write_unlock(&pmuint_rwlock); 526 write_unlock(&pmuint_rwlock);
514#endif 527#endif
515 resume_local_counters(); 528 resume_local_counters();
@@ -529,7 +542,7 @@ static void mipspmu_enable(struct pmu *pmu)
529static void mipspmu_disable(struct pmu *pmu) 542static void mipspmu_disable(struct pmu *pmu)
530{ 543{
531 pause_local_counters(); 544 pause_local_counters();
532#ifdef CONFIG_MIPS_MT_SMP 545#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
533 write_lock(&pmuint_rwlock); 546 write_lock(&pmuint_rwlock);
534#endif 547#endif
535} 548}
@@ -664,13 +677,10 @@ static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
664 677
665static const struct mips_perf_event *mipspmu_map_general_event(int idx) 678static const struct mips_perf_event *mipspmu_map_general_event(int idx)
666{ 679{
667 const struct mips_perf_event *pev;
668
669 pev = ((*mipspmu.general_event_map)[idx].event_id ==
670 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
671 &(*mipspmu.general_event_map)[idx]);
672 680
673 return pev; 681 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
682 return ERR_PTR(-EOPNOTSUPP);
683 return &(*mipspmu.general_event_map)[idx];
674} 684}
675 685
676static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) 686static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
@@ -695,7 +705,7 @@ static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
695 [cache_op] 705 [cache_op]
696 [cache_result]); 706 [cache_result]);
697 707
698 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) 708 if (pev->cntr_mask == 0)
699 return ERR_PTR(-EOPNOTSUPP); 709 return ERR_PTR(-EOPNOTSUPP);
700 710
701 return pev; 711 return pev;
@@ -800,11 +810,8 @@ static const struct mips_perf_event mipsxxcore_event_map
800 [PERF_COUNT_HW_MAX] = { 810 [PERF_COUNT_HW_MAX] = {
801 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 811 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
802 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 812 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
803 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
804 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
805 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, 813 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
806 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, 814 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
807 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
808}; 815};
809 816
810/* 74K core has different branch event code. */ 817/* 74K core has different branch event code. */
@@ -812,11 +819,8 @@ static const struct mips_perf_event mipsxx74Kcore_event_map
812 [PERF_COUNT_HW_MAX] = { 819 [PERF_COUNT_HW_MAX] = {
813 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, 820 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
814 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, 821 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
815 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
816 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
817 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, 822 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
818 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, 823 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
819 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
820}; 824};
821 825
822static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { 826static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
@@ -829,6 +833,13 @@ static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
829 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, 833 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
830}; 834};
831 835
836static const struct mips_perf_event bmips5000_event_map
837 [PERF_COUNT_HW_MAX] = {
838 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
839 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
840 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
841};
842
832/* 24K/34K/1004K cores can share the same cache event map. */ 843/* 24K/34K/1004K cores can share the same cache event map. */
833static const struct mips_perf_event mipsxxcore_cache_map 844static const struct mips_perf_event mipsxxcore_cache_map
834 [PERF_COUNT_HW_CACHE_MAX] 845 [PERF_COUNT_HW_CACHE_MAX]
@@ -849,10 +860,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
849 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, 860 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
850 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, 861 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
851 }, 862 },
852 [C(OP_PREFETCH)] = {
853 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
854 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
855 },
856}, 863},
857[C(L1I)] = { 864[C(L1I)] = {
858 [C(OP_READ)] = { 865 [C(OP_READ)] = {
@@ -869,7 +876,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
869 * Note that MIPS has only "hit" events countable for 876 * Note that MIPS has only "hit" events countable for
870 * the prefetch operation. 877 * the prefetch operation.
871 */ 878 */
872 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
873 }, 879 },
874}, 880},
875[C(LL)] = { 881[C(LL)] = {
@@ -881,10 +887,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
881 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, 887 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
882 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, 888 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
883 }, 889 },
884 [C(OP_PREFETCH)] = {
885 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
886 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
887 },
888}, 890},
889[C(DTLB)] = { 891[C(DTLB)] = {
890 [C(OP_READ)] = { 892 [C(OP_READ)] = {
@@ -895,10 +897,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
895 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, 897 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
896 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, 898 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
897 }, 899 },
898 [C(OP_PREFETCH)] = {
899 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
900 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
901 },
902}, 900},
903[C(ITLB)] = { 901[C(ITLB)] = {
904 [C(OP_READ)] = { 902 [C(OP_READ)] = {
@@ -909,10 +907,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
909 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, 907 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
910 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, 908 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
911 }, 909 },
912 [C(OP_PREFETCH)] = {
913 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
914 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
915 },
916}, 910},
917[C(BPU)] = { 911[C(BPU)] = {
918 /* Using the same code for *HW_BRANCH* */ 912 /* Using the same code for *HW_BRANCH* */
@@ -924,24 +918,6 @@ static const struct mips_perf_event mipsxxcore_cache_map
924 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, 918 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
925 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, 919 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
926 }, 920 },
927 [C(OP_PREFETCH)] = {
928 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
929 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
930 },
931},
932[C(NODE)] = {
933 [C(OP_READ)] = {
934 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
935 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
936 },
937 [C(OP_WRITE)] = {
938 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
939 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
940 },
941 [C(OP_PREFETCH)] = {
942 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
943 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
944 },
945}, 921},
946}; 922};
947 923
@@ -965,10 +941,6 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
965 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, 941 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
966 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, 942 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
967 }, 943 },
968 [C(OP_PREFETCH)] = {
969 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
970 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
971 },
972}, 944},
973[C(L1I)] = { 945[C(L1I)] = {
974 [C(OP_READ)] = { 946 [C(OP_READ)] = {
@@ -985,7 +957,6 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
985 * Note that MIPS has only "hit" events countable for 957 * Note that MIPS has only "hit" events countable for
986 * the prefetch operation. 958 * the prefetch operation.
987 */ 959 */
988 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
989 }, 960 },
990}, 961},
991[C(LL)] = { 962[C(LL)] = {
@@ -997,25 +968,6 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
997 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, 968 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
998 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P }, 969 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
999 }, 970 },
1000 [C(OP_PREFETCH)] = {
1001 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1002 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1003 },
1004},
1005[C(DTLB)] = {
1006 /* 74K core does not have specific DTLB events. */
1007 [C(OP_READ)] = {
1008 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1009 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1010 },
1011 [C(OP_WRITE)] = {
1012 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1013 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1014 },
1015 [C(OP_PREFETCH)] = {
1016 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1017 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1018 },
1019}, 971},
1020[C(ITLB)] = { 972[C(ITLB)] = {
1021 [C(OP_READ)] = { 973 [C(OP_READ)] = {
@@ -1026,10 +978,6 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
1026 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, 978 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1027 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, 979 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1028 }, 980 },
1029 [C(OP_PREFETCH)] = {
1030 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1031 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1032 },
1033}, 981},
1034[C(BPU)] = { 982[C(BPU)] = {
1035 /* Using the same code for *HW_BRANCH* */ 983 /* Using the same code for *HW_BRANCH* */
@@ -1041,23 +989,64 @@ static const struct mips_perf_event mipsxx74Kcore_cache_map
1041 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, 989 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1042 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, 990 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1043 }, 991 },
1044 [C(OP_PREFETCH)] = { 992},
1045 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 993};
1046 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 994
995/* BMIPS5000 */
996static const struct mips_perf_event bmips5000_cache_map
997 [PERF_COUNT_HW_CACHE_MAX]
998 [PERF_COUNT_HW_CACHE_OP_MAX]
999 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1000[C(L1D)] = {
1001 /*
1002 * Like some other architectures (e.g. ARM), the performance
1003 * counters don't differentiate between read and write
1004 * accesses/misses, so this isn't strictly correct, but it's the
1005 * best we can do. Writes and reads get combined.
1006 */
1007 [C(OP_READ)] = {
1008 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1009 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1010 },
1011 [C(OP_WRITE)] = {
1012 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1013 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1047 }, 1014 },
1048}, 1015},
1049[C(NODE)] = { 1016[C(L1I)] = {
1050 [C(OP_READ)] = { 1017 [C(OP_READ)] = {
1051 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1018 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1052 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1019 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1053 }, 1020 },
1054 [C(OP_WRITE)] = { 1021 [C(OP_WRITE)] = {
1055 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1022 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1056 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1023 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1057 }, 1024 },
1058 [C(OP_PREFETCH)] = { 1025 [C(OP_PREFETCH)] = {
1059 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1026 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1060 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID }, 1027 /*
1028 * Note that MIPS has only "hit" events countable for
1029 * the prefetch operation.
1030 */
1031 },
1032},
1033[C(LL)] = {
1034 [C(OP_READ)] = {
1035 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1036 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1037 },
1038 [C(OP_WRITE)] = {
1039 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1040 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1041 },
1042},
1043[C(BPU)] = {
1044 /* Using the same code for *HW_BRANCH* */
1045 [C(OP_READ)] = {
1046 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1047 },
1048 [C(OP_WRITE)] = {
1049 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1061 }, 1050 },
1062}, 1051},
1063}; 1052};
@@ -1074,39 +1063,14 @@ static const struct mips_perf_event octeon_cache_map
1074 }, 1063 },
1075 [C(OP_WRITE)] = { 1064 [C(OP_WRITE)] = {
1076 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, 1065 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1077 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1078 },
1079 [C(OP_PREFETCH)] = {
1080 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1081 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1082 }, 1066 },
1083}, 1067},
1084[C(L1I)] = { 1068[C(L1I)] = {
1085 [C(OP_READ)] = { 1069 [C(OP_READ)] = {
1086 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, 1070 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1087 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1088 },
1089 [C(OP_WRITE)] = {
1090 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1091 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1092 }, 1071 },
1093 [C(OP_PREFETCH)] = { 1072 [C(OP_PREFETCH)] = {
1094 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, 1073 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1095 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1096 },
1097},
1098[C(LL)] = {
1099 [C(OP_READ)] = {
1100 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1101 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1102 },
1103 [C(OP_WRITE)] = {
1104 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1105 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1106 },
1107 [C(OP_PREFETCH)] = {
1108 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1109 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1110 }, 1074 },
1111}, 1075},
1112[C(DTLB)] = { 1076[C(DTLB)] = {
@@ -1115,46 +1079,16 @@ static const struct mips_perf_event octeon_cache_map
1115 * read and write. 1079 * read and write.
1116 */ 1080 */
1117 [C(OP_READ)] = { 1081 [C(OP_READ)] = {
1118 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1119 [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 1082 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1120 }, 1083 },
1121 [C(OP_WRITE)] = { 1084 [C(OP_WRITE)] = {
1122 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1123 [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, 1085 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1124 }, 1086 },
1125 [C(OP_PREFETCH)] = {
1126 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1127 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1128 },
1129}, 1087},
1130[C(ITLB)] = { 1088[C(ITLB)] = {
1131 [C(OP_READ)] = { 1089 [C(OP_READ)] = {
1132 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1133 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, 1090 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1134 }, 1091 },
1135 [C(OP_WRITE)] = {
1136 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1137 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1138 },
1139 [C(OP_PREFETCH)] = {
1140 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1141 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1142 },
1143},
1144[C(BPU)] = {
1145 /* Using the same code for *HW_BRANCH* */
1146 [C(OP_READ)] = {
1147 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1148 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149 },
1150 [C(OP_WRITE)] = {
1151 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1152 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1153 },
1154 [C(OP_PREFETCH)] = {
1155 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1156 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1157 },
1158}, 1092},
1159}; 1093};
1160 1094
@@ -1304,7 +1238,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
1304 int handled = IRQ_NONE; 1238 int handled = IRQ_NONE;
1305 struct pt_regs *regs; 1239 struct pt_regs *regs;
1306 1240
1307 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26))) 1241 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
1308 return handled; 1242 return handled;
1309 /* 1243 /*
1310 * First we pause the local counters, so that when we are locked 1244 * First we pause the local counters, so that when we are locked
@@ -1314,7 +1248,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
1314 * See also mipsxx_pmu_start(). 1248 * See also mipsxx_pmu_start().
1315 */ 1249 */
1316 pause_local_counters(); 1250 pause_local_counters();
1317#ifdef CONFIG_MIPS_MT_SMP 1251#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1318 read_lock(&pmuint_rwlock); 1252 read_lock(&pmuint_rwlock);
1319#endif 1253#endif
1320 1254
@@ -1346,7 +1280,7 @@ static int mipsxx_pmu_handle_shared_irq(void)
1346 if (handled == IRQ_HANDLED) 1280 if (handled == IRQ_HANDLED)
1347 irq_work_run(); 1281 irq_work_run();
1348 1282
1349#ifdef CONFIG_MIPS_MT_SMP 1283#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1350 read_unlock(&pmuint_rwlock); 1284 read_unlock(&pmuint_rwlock);
1351#endif 1285#endif
1352 resume_local_counters(); 1286 resume_local_counters();
@@ -1391,6 +1325,11 @@ static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
1391#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) 1325#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1392#endif 1326#endif
1393 1327
1328/* BMIPS5000 */
1329#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1330 ((b) == 0 || (b) == 1)
1331
1332
1394/* 1333/*
1395 * User can use 0-255 raw events, where 0-127 for the events of even 1334 * User can use 0-255 raw events, where 0-127 for the events of even
1396 * counters, and 128-255 for odd counters. Note that bit 7 is used to 1335 * counters, and 128-255 for odd counters. Note that bit 7 is used to
@@ -1461,6 +1400,12 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
1461 raw_event.range = T; 1400 raw_event.range = T;
1462#endif 1401#endif
1463 break; 1402 break;
1403 case CPU_BMIPS5000:
1404 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1405 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1406 else
1407 raw_event.cntr_mask =
1408 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1464 } 1409 }
1465 1410
1466 return &raw_event; 1411 return &raw_event;
@@ -1513,7 +1458,7 @@ init_hw_perf_events(void)
1513 return -ENODEV; 1458 return -ENODEV;
1514 } 1459 }
1515 1460
1516#ifdef CONFIG_MIPS_MT_SMP 1461#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
1517 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19); 1462 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1518 if (!cpu_has_mipsmt_pertccounters) 1463 if (!cpu_has_mipsmt_pertccounters)
1519 counters = counters_total_to_per_cpu(counters); 1464 counters = counters_total_to_per_cpu(counters);
@@ -1572,6 +1517,11 @@ init_hw_perf_events(void)
1572 mipspmu.cache_event_map = &octeon_cache_map; 1517 mipspmu.cache_event_map = &octeon_cache_map;
1573 mipspmu.map_raw_event = octeon_pmu_map_raw_event; 1518 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1574 break; 1519 break;
1520 case CPU_BMIPS5000:
1521 mipspmu.name = "BMIPS5000";
1522 mipspmu.general_event_map = &bmips5000_event_map;
1523 mipspmu.cache_event_map = &bmips5000_cache_map;
1524 break;
1575 default: 1525 default:
1576 pr_cont("Either hardware does not support performance " 1526 pr_cont("Either hardware does not support performance "
1577 "counters, or not yet implemented.\n"); 1527 "counters, or not yet implemented.\n");
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 5542817c1b49..07dff54f2ce8 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -64,14 +64,17 @@ static int show_cpuinfo(struct seq_file *m, void *v)
64 cpu_data[n].watch_reg_masks[i]); 64 cpu_data[n].watch_reg_masks[i]);
65 seq_printf(m, "]\n"); 65 seq_printf(m, "]\n");
66 } 66 }
67 seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", 67
68 cpu_has_mips16 ? " mips16" : "", 68 seq_printf(m, "ASEs implemented\t:");
69 cpu_has_mdmx ? " mdmx" : "", 69 if (cpu_has_mips16) seq_printf(m, "%s", " mips16");
70 cpu_has_mips3d ? " mips3d" : "", 70 if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx");
71 cpu_has_smartmips ? " smartmips" : "", 71 if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d");
72 cpu_has_dsp ? " dsp" : "", 72 if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
73 cpu_has_mipsmt ? " mt" : "" 73 if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
74 ); 74 if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
75 if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
76 seq_printf(m, "\n");
77
75 seq_printf(m, "shadow register sets\t: %d\n", 78 seq_printf(m, "shadow register sets\t: %d\n",
76 cpu_data[n].srsets); 79 cpu_data[n].srsets);
77 seq_printf(m, "kscratch registers\t: %d\n", 80 seq_printf(m, "kscratch registers\t: %d\n",
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index df243a64f430..007ccbe1e264 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,6 +1,13 @@
1#include <asm/asm-offsets.h> 1#include <asm/asm-offsets.h>
2#include <asm/page.h> 2#include <asm/page.h>
3#include <asm/thread_info.h> 3#include <asm/thread_info.h>
4
5/*
6 * Put .bss..swapper_pg_dir as the first thing in .bss. This will
7 * ensure that it has .bss alignment (64K).
8 */
9#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
10
4#include <asm-generic/vmlinux.lds.h> 11#include <asm-generic/vmlinux.lds.h>
5 12
6#undef mips 13#undef mips
@@ -119,11 +126,21 @@ SECTIONS
119 } 126 }
120 127
121 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) 128 PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
122 . = ALIGN(PAGE_SIZE); 129 /*
130 * Align to 64K in attempt to eliminate holes before the
131 * .bss..swapper_pg_dir section at the start of .bss. This
132 * also satisfies PAGE_SIZE alignment as the largest page size
133 * allowed is 64K.
134 */
135 . = ALIGN(0x10000);
123 __init_end = .; 136 __init_end = .;
124 /* freed after init ends here */ 137 /* freed after init ends here */
125 138
126 BSS_SECTION(0, 0, 0) 139 /*
140 * Force .bss to 64K alignment so that .bss..swapper_pg_dir
141 * gets that alignment. .sbss should be empty, so there will be
142 * no holes after __init_end. */
143 BSS_SECTION(0, 0x10000, 0)
127 144
128 _end = . ; 145 _end = . ;
129 146
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index f6f91523cb1c..eec690af6581 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -50,7 +50,6 @@
50#include <asm/mips_mt.h> 50#include <asm/mips_mt.h>
51#include <asm/processor.h> 51#include <asm/processor.h>
52#include <asm/vpe.h> 52#include <asm/vpe.h>
53#include <asm/kspd.h>
54 53
55typedef void *vpe_handle; 54typedef void *vpe_handle;
56 55
@@ -69,11 +68,6 @@ static char module_name[] = "vpe";
69static int major; 68static int major;
70static const int minor = 1; /* fixed for now */ 69static const int minor = 1; /* fixed for now */
71 70
72#ifdef CONFIG_MIPS_APSP_KSPD
73static struct kspd_notifications kspd_events;
74static int kspd_events_reqd;
75#endif
76
77/* grab the likely amount of memory we will need. */ 71/* grab the likely amount of memory we will need. */
78#ifdef CONFIG_MIPS_VPE_LOADER_TOM 72#ifdef CONFIG_MIPS_VPE_LOADER_TOM
79#define P_SIZE (2 * 1024 * 1024) 73#define P_SIZE (2 * 1024 * 1024)
@@ -1101,14 +1095,6 @@ static int vpe_open(struct inode *inode, struct file *filp)
1101 v->uid = filp->f_cred->fsuid; 1095 v->uid = filp->f_cred->fsuid;
1102 v->gid = filp->f_cred->fsgid; 1096 v->gid = filp->f_cred->fsgid;
1103 1097
1104#ifdef CONFIG_MIPS_APSP_KSPD
1105 /* get kspd to tell us when a syscall_exit happens */
1106 if (!kspd_events_reqd) {
1107 kspd_notify(&kspd_events);
1108 kspd_events_reqd++;
1109 }
1110#endif
1111
1112 v->cwd[0] = 0; 1098 v->cwd[0] = 0;
1113 ret = getcwd(v->cwd, VPE_PATH_MAX); 1099 ret = getcwd(v->cwd, VPE_PATH_MAX);
1114 if (ret < 0) 1100 if (ret < 0)
@@ -1341,13 +1327,6 @@ char *vpe_getcwd(int index)
1341 1327
1342EXPORT_SYMBOL(vpe_getcwd); 1328EXPORT_SYMBOL(vpe_getcwd);
1343 1329
1344#ifdef CONFIG_MIPS_APSP_KSPD
1345static void kspd_sp_exit( int sp_id)
1346{
1347 cleanup_tc(get_tc(sp_id));
1348}
1349#endif
1350
1351static ssize_t store_kill(struct device *dev, struct device_attribute *attr, 1330static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
1352 const char *buf, size_t len) 1331 const char *buf, size_t len)
1353{ 1332{
@@ -1585,9 +1564,6 @@ out_reenable:
1585 emt(mtflags); 1564 emt(mtflags);
1586 local_irq_restore(flags); 1565 local_irq_restore(flags);
1587 1566
1588#ifdef CONFIG_MIPS_APSP_KSPD
1589 kspd_events.kspd_sp_exit = kspd_sp_exit;
1590#endif
1591 return 0; 1567 return 0;
1592 1568
1593out_class: 1569out_class: