diff options
Diffstat (limited to 'arch/mips/kernel/vpe-mt.c')
-rw-r--r-- | arch/mips/kernel/vpe-mt.c | 523 |
1 files changed, 523 insertions, 0 deletions
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c new file mode 100644 index 000000000000..949ae0e17018 --- /dev/null +++ b/arch/mips/kernel/vpe-mt.c | |||
@@ -0,0 +1,523 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/export.h> | ||
14 | |||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/mipsmtregs.h> | ||
17 | #include <asm/mips_mt.h> | ||
18 | #include <asm/vpe.h> | ||
19 | |||
20 | static int major; | ||
21 | |||
22 | /* The number of TCs and VPEs physically available on the core */ | ||
23 | static int hw_tcs, hw_vpes; | ||
24 | |||
25 | /* We are prepared so configure and start the VPE... */ | ||
26 | int vpe_run(struct vpe *v) | ||
27 | { | ||
28 | unsigned long flags, val, dmt_flag; | ||
29 | struct vpe_notifications *notifier; | ||
30 | unsigned int vpeflags; | ||
31 | struct tc *t; | ||
32 | |||
33 | /* check we are the Master VPE */ | ||
34 | local_irq_save(flags); | ||
35 | val = read_c0_vpeconf0(); | ||
36 | if (!(val & VPECONF0_MVP)) { | ||
37 | pr_warn("VPE loader: only Master VPE's are able to config MT\n"); | ||
38 | local_irq_restore(flags); | ||
39 | |||
40 | return -1; | ||
41 | } | ||
42 | |||
43 | dmt_flag = dmt(); | ||
44 | vpeflags = dvpe(); | ||
45 | |||
46 | if (list_empty(&v->tc)) { | ||
47 | evpe(vpeflags); | ||
48 | emt(dmt_flag); | ||
49 | local_irq_restore(flags); | ||
50 | |||
51 | pr_warn("VPE loader: No TC's associated with VPE %d\n", | ||
52 | v->minor); | ||
53 | |||
54 | return -ENOEXEC; | ||
55 | } | ||
56 | |||
57 | t = list_first_entry(&v->tc, struct tc, tc); | ||
58 | |||
59 | /* Put MVPE's into 'configuration state' */ | ||
60 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
61 | |||
62 | settc(t->index); | ||
63 | |||
64 | /* should check it is halted, and not activated */ | ||
65 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || | ||
66 | !(read_tc_c0_tchalt() & TCHALT_H)) { | ||
67 | evpe(vpeflags); | ||
68 | emt(dmt_flag); | ||
69 | local_irq_restore(flags); | ||
70 | |||
71 | pr_warn("VPE loader: TC %d is already active!\n", | ||
72 | t->index); | ||
73 | |||
74 | return -ENOEXEC; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Write the address we want it to start running from in the TCPC | ||
79 | * register. | ||
80 | */ | ||
81 | write_tc_c0_tcrestart((unsigned long)v->__start); | ||
82 | write_tc_c0_tccontext((unsigned long)0); | ||
83 | |||
84 | /* | ||
85 | * Mark the TC as activated, not interrupt exempt and not dynamically | ||
86 | * allocatable | ||
87 | */ | ||
88 | val = read_tc_c0_tcstatus(); | ||
89 | val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; | ||
90 | write_tc_c0_tcstatus(val); | ||
91 | |||
92 | write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); | ||
93 | |||
94 | /* | ||
95 | * The sde-kit passes 'memsize' to __start in $a3, so set something | ||
96 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | ||
97 | * DFLT_HEAP_SIZE when you compile your program | ||
98 | */ | ||
99 | mttgpr(6, v->ntcs); | ||
100 | mttgpr(7, physical_memsize); | ||
101 | |||
102 | /* set up VPE1 */ | ||
103 | /* | ||
104 | * bind the TC to VPE 1 as late as possible so we only have the final | ||
105 | * VPE registers to set up, and so an EJTAG probe can trigger on it | ||
106 | */ | ||
107 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); | ||
108 | |||
109 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | ||
110 | |||
111 | back_to_back_c0_hazard(); | ||
112 | |||
113 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | ||
114 | write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | ||
115 | | (t->index << VPECONF0_XTC_SHIFT)); | ||
116 | |||
117 | back_to_back_c0_hazard(); | ||
118 | |||
119 | /* enable this VPE */ | ||
120 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
121 | |||
122 | /* clear out any left overs from a previous program */ | ||
123 | write_vpe_c0_status(0); | ||
124 | write_vpe_c0_cause(0); | ||
125 | |||
126 | /* take system out of configuration state */ | ||
127 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
128 | |||
129 | /* | ||
130 | * SMTC/SMVP kernels manage VPE enable independently, | ||
131 | * but uniprocessor kernels need to turn it on, even | ||
132 | * if that wasn't the pre-dvpe() state. | ||
133 | */ | ||
134 | #ifdef CONFIG_SMP | ||
135 | evpe(vpeflags); | ||
136 | #else | ||
137 | evpe(EVPE_ENABLE); | ||
138 | #endif | ||
139 | emt(dmt_flag); | ||
140 | local_irq_restore(flags); | ||
141 | |||
142 | list_for_each_entry(notifier, &v->notify, list) | ||
143 | notifier->start(VPE_MODULE_MINOR); | ||
144 | |||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | void cleanup_tc(struct tc *tc) | ||
149 | { | ||
150 | unsigned long flags; | ||
151 | unsigned int mtflags, vpflags; | ||
152 | int tmp; | ||
153 | |||
154 | local_irq_save(flags); | ||
155 | mtflags = dmt(); | ||
156 | vpflags = dvpe(); | ||
157 | /* Put MVPE's into 'configuration state' */ | ||
158 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
159 | |||
160 | settc(tc->index); | ||
161 | tmp = read_tc_c0_tcstatus(); | ||
162 | |||
163 | /* mark not allocated and not dynamically allocatable */ | ||
164 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
165 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
166 | write_tc_c0_tcstatus(tmp); | ||
167 | |||
168 | write_tc_c0_tchalt(TCHALT_H); | ||
169 | mips_ihb(); | ||
170 | |||
171 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
172 | evpe(vpflags); | ||
173 | emt(mtflags); | ||
174 | local_irq_restore(flags); | ||
175 | } | ||
176 | |||
177 | /* module wrapper entry points */ | ||
178 | /* give me a vpe */ | ||
179 | void *vpe_alloc(void) | ||
180 | { | ||
181 | int i; | ||
182 | struct vpe *v; | ||
183 | |||
184 | /* find a vpe */ | ||
185 | for (i = 1; i < MAX_VPES; i++) { | ||
186 | v = get_vpe(i); | ||
187 | if (v != NULL) { | ||
188 | v->state = VPE_STATE_INUSE; | ||
189 | return v; | ||
190 | } | ||
191 | } | ||
192 | return NULL; | ||
193 | } | ||
194 | EXPORT_SYMBOL(vpe_alloc); | ||
195 | |||
196 | /* start running from here */ | ||
197 | int vpe_start(void *vpe, unsigned long start) | ||
198 | { | ||
199 | struct vpe *v = vpe; | ||
200 | |||
201 | v->__start = start; | ||
202 | return vpe_run(v); | ||
203 | } | ||
204 | EXPORT_SYMBOL(vpe_start); | ||
205 | |||
206 | /* halt it for now */ | ||
207 | int vpe_stop(void *vpe) | ||
208 | { | ||
209 | struct vpe *v = vpe; | ||
210 | struct tc *t; | ||
211 | unsigned int evpe_flags; | ||
212 | |||
213 | evpe_flags = dvpe(); | ||
214 | |||
215 | t = list_entry(v->tc.next, struct tc, tc); | ||
216 | if (t != NULL) { | ||
217 | settc(t->index); | ||
218 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
219 | } | ||
220 | |||
221 | evpe(evpe_flags); | ||
222 | |||
223 | return 0; | ||
224 | } | ||
225 | EXPORT_SYMBOL(vpe_stop); | ||
226 | |||
227 | /* I've done with it thank you */ | ||
228 | int vpe_free(void *vpe) | ||
229 | { | ||
230 | struct vpe *v = vpe; | ||
231 | struct tc *t; | ||
232 | unsigned int evpe_flags; | ||
233 | |||
234 | t = list_entry(v->tc.next, struct tc, tc); | ||
235 | if (t == NULL) | ||
236 | return -ENOEXEC; | ||
237 | |||
238 | evpe_flags = dvpe(); | ||
239 | |||
240 | /* Put MVPE's into 'configuration state' */ | ||
241 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
242 | |||
243 | settc(t->index); | ||
244 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
245 | |||
246 | /* halt the TC */ | ||
247 | write_tc_c0_tchalt(TCHALT_H); | ||
248 | mips_ihb(); | ||
249 | |||
250 | /* mark the TC unallocated */ | ||
251 | write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); | ||
252 | |||
253 | v->state = VPE_STATE_UNUSED; | ||
254 | |||
255 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
256 | evpe(evpe_flags); | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | EXPORT_SYMBOL(vpe_free); | ||
261 | |||
262 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | ||
263 | const char *buf, size_t len) | ||
264 | { | ||
265 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
266 | struct vpe_notifications *notifier; | ||
267 | |||
268 | list_for_each_entry(notifier, &vpe->notify, list) | ||
269 | notifier->stop(aprp_cpu_index()); | ||
270 | |||
271 | release_progmem(vpe->load_addr); | ||
272 | cleanup_tc(get_tc(aprp_cpu_index())); | ||
273 | vpe_stop(vpe); | ||
274 | vpe_free(vpe); | ||
275 | |||
276 | return len; | ||
277 | } | ||
278 | static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); | ||
279 | |||
280 | static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, | ||
281 | char *buf) | ||
282 | { | ||
283 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
284 | |||
285 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
286 | } | ||
287 | |||
288 | static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, | ||
289 | const char *buf, size_t len) | ||
290 | { | ||
291 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
292 | unsigned long new; | ||
293 | int ret; | ||
294 | |||
295 | ret = kstrtoul(buf, 0, &new); | ||
296 | if (ret < 0) | ||
297 | return ret; | ||
298 | |||
299 | if (new == 0 || new > (hw_tcs - aprp_cpu_index())) | ||
300 | return -EINVAL; | ||
301 | |||
302 | vpe->ntcs = new; | ||
303 | |||
304 | return len; | ||
305 | } | ||
306 | static DEVICE_ATTR_RW(ntcs); | ||
307 | |||
308 | static struct attribute *vpe_attrs[] = { | ||
309 | &dev_attr_kill.attr, | ||
310 | &dev_attr_ntcs.attr, | ||
311 | NULL, | ||
312 | }; | ||
313 | ATTRIBUTE_GROUPS(vpe); | ||
314 | |||
315 | static void vpe_device_release(struct device *cd) | ||
316 | { | ||
317 | kfree(cd); | ||
318 | } | ||
319 | |||
320 | static struct class vpe_class = { | ||
321 | .name = "vpe", | ||
322 | .owner = THIS_MODULE, | ||
323 | .dev_release = vpe_device_release, | ||
324 | .dev_groups = vpe_groups, | ||
325 | }; | ||
326 | |||
327 | static struct device vpe_device; | ||
328 | |||
329 | int __init vpe_module_init(void) | ||
330 | { | ||
331 | unsigned int mtflags, vpflags; | ||
332 | unsigned long flags, val; | ||
333 | struct vpe *v = NULL; | ||
334 | struct tc *t; | ||
335 | int tc, err; | ||
336 | |||
337 | if (!cpu_has_mipsmt) { | ||
338 | pr_warn("VPE loader: not a MIPS MT capable processor\n"); | ||
339 | return -ENODEV; | ||
340 | } | ||
341 | |||
342 | if (vpelimit == 0) { | ||
343 | pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" | ||
344 | "Pass maxvpes=<n> argument as kernel argument\n"); | ||
345 | |||
346 | return -ENODEV; | ||
347 | } | ||
348 | |||
349 | if (aprp_cpu_index() == 0) { | ||
350 | pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" | ||
351 | "Pass maxtcs=<n> argument as kernel argument\n"); | ||
352 | |||
353 | return -ENODEV; | ||
354 | } | ||
355 | |||
356 | major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); | ||
357 | if (major < 0) { | ||
358 | pr_warn("VPE loader: unable to register character device\n"); | ||
359 | return major; | ||
360 | } | ||
361 | |||
362 | err = class_register(&vpe_class); | ||
363 | if (err) { | ||
364 | pr_err("vpe_class registration failed\n"); | ||
365 | goto out_chrdev; | ||
366 | } | ||
367 | |||
368 | device_initialize(&vpe_device); | ||
369 | vpe_device.class = &vpe_class, | ||
370 | vpe_device.parent = NULL, | ||
371 | dev_set_name(&vpe_device, "vpe1"); | ||
372 | vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); | ||
373 | err = device_add(&vpe_device); | ||
374 | if (err) { | ||
375 | pr_err("Adding vpe_device failed\n"); | ||
376 | goto out_class; | ||
377 | } | ||
378 | |||
379 | local_irq_save(flags); | ||
380 | mtflags = dmt(); | ||
381 | vpflags = dvpe(); | ||
382 | |||
383 | /* Put MVPE's into 'configuration state' */ | ||
384 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
385 | |||
386 | val = read_c0_mvpconf0(); | ||
387 | hw_tcs = (val & MVPCONF0_PTC) + 1; | ||
388 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
389 | |||
390 | for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { | ||
391 | /* | ||
392 | * Must re-enable multithreading temporarily or in case we | ||
393 | * reschedule send IPIs or similar we might hang. | ||
394 | */ | ||
395 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
396 | evpe(vpflags); | ||
397 | emt(mtflags); | ||
398 | local_irq_restore(flags); | ||
399 | t = alloc_tc(tc); | ||
400 | if (!t) { | ||
401 | err = -ENOMEM; | ||
402 | goto out_dev; | ||
403 | } | ||
404 | |||
405 | local_irq_save(flags); | ||
406 | mtflags = dmt(); | ||
407 | vpflags = dvpe(); | ||
408 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
409 | |||
410 | /* VPE's */ | ||
411 | if (tc < hw_tcs) { | ||
412 | settc(tc); | ||
413 | |||
414 | v = alloc_vpe(tc); | ||
415 | if (v == NULL) { | ||
416 | pr_warn("VPE: unable to allocate VPE\n"); | ||
417 | goto out_reenable; | ||
418 | } | ||
419 | |||
420 | v->ntcs = hw_tcs - aprp_cpu_index(); | ||
421 | |||
422 | /* add the tc to the list of this vpe's tc's. */ | ||
423 | list_add(&t->tc, &v->tc); | ||
424 | |||
425 | /* deactivate all but vpe0 */ | ||
426 | if (tc >= aprp_cpu_index()) { | ||
427 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
428 | |||
429 | tmp &= ~VPECONF0_VPA; | ||
430 | |||
431 | /* master VPE */ | ||
432 | tmp |= VPECONF0_MVP; | ||
433 | write_vpe_c0_vpeconf0(tmp); | ||
434 | } | ||
435 | |||
436 | /* disable multi-threading with TC's */ | ||
437 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & | ||
438 | ~VPECONTROL_TE); | ||
439 | |||
440 | if (tc >= vpelimit) { | ||
441 | /* | ||
442 | * Set config to be the same as vpe0, | ||
443 | * particularly kseg0 coherency alg | ||
444 | */ | ||
445 | write_vpe_c0_config(read_c0_config()); | ||
446 | } | ||
447 | } | ||
448 | |||
449 | /* TC's */ | ||
450 | t->pvpe = v; /* set the parent vpe */ | ||
451 | |||
452 | if (tc >= aprp_cpu_index()) { | ||
453 | unsigned long tmp; | ||
454 | |||
455 | settc(tc); | ||
456 | |||
457 | /* Any TC that is bound to VPE0 gets left as is - in | ||
458 | * case we are running SMTC on VPE0. A TC that is bound | ||
459 | * to any other VPE gets bound to VPE0, ideally I'd like | ||
460 | * to make it homeless but it doesn't appear to let me | ||
461 | * bind a TC to a non-existent VPE. Which is perfectly | ||
462 | * reasonable. | ||
463 | * | ||
464 | * The (un)bound state is visible to an EJTAG probe so | ||
465 | * may notify GDB... | ||
466 | */ | ||
467 | tmp = read_tc_c0_tcbind(); | ||
468 | if (tmp & TCBIND_CURVPE) { | ||
469 | /* tc is bound >vpe0 */ | ||
470 | write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); | ||
471 | |||
472 | t->pvpe = get_vpe(0); /* set the parent vpe */ | ||
473 | } | ||
474 | |||
475 | /* halt the TC */ | ||
476 | write_tc_c0_tchalt(TCHALT_H); | ||
477 | mips_ihb(); | ||
478 | |||
479 | tmp = read_tc_c0_tcstatus(); | ||
480 | |||
481 | /* mark not activated and not dynamically allocatable */ | ||
482 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
483 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
484 | write_tc_c0_tcstatus(tmp); | ||
485 | } | ||
486 | } | ||
487 | |||
488 | out_reenable: | ||
489 | /* release config state */ | ||
490 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
491 | |||
492 | evpe(vpflags); | ||
493 | emt(mtflags); | ||
494 | local_irq_restore(flags); | ||
495 | |||
496 | return 0; | ||
497 | |||
498 | out_dev: | ||
499 | device_del(&vpe_device); | ||
500 | |||
501 | out_class: | ||
502 | class_unregister(&vpe_class); | ||
503 | |||
504 | out_chrdev: | ||
505 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
506 | |||
507 | return err; | ||
508 | } | ||
509 | |||
510 | void __exit vpe_module_exit(void) | ||
511 | { | ||
512 | struct vpe *v, *n; | ||
513 | |||
514 | device_del(&vpe_device); | ||
515 | class_unregister(&vpe_class); | ||
516 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
517 | |||
518 | /* No locking needed here */ | ||
519 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { | ||
520 | if (v->state != VPE_STATE_UNUSED) | ||
521 | release_vpe(v); | ||
522 | } | ||
523 | } | ||