diff options
author | Ralf Baechle <ralf@linux-mips.org> | 2007-07-27 14:31:10 -0400 |
---|---|---|
committer | Ralf Baechle <ralf@linux-mips.org> | 2007-07-31 16:35:24 -0400 |
commit | 07cc0c9e65d3e262f871ea357dd77b41950b1ca5 (patch) | |
tree | fa797fa236da6e03c7b778cdc364ac1c3e03b52f | |
parent | c3a005f4b6a7752608e75d016ef8d07c55285e48 (diff) |
[MIPS] MT: Enable coexistence of AP/SP with VSMP and SMTC.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r-- | arch/mips/Kconfig | 24 | ||||
-rw-r--r-- | arch/mips/kernel/kspd.c | 19 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 22 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx.c | 22 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 16 | ||||
-rw-r--r-- | arch/mips/kernel/vpe.c | 263 | ||||
-rw-r--r-- | include/asm-mips/mips_mt.h | 6 |
7 files changed, 203 insertions, 169 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 0893e084150e..3513e226837b 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1377,17 +1377,6 @@ config MIPS_MT_SMTC | |||
1377 | This is a kernel model which is known a SMTC or lately has been | 1377 | This is a kernel model which is known a SMTC or lately has been |
1378 | marketesed into SMVP. | 1378 | marketesed into SMVP. |
1379 | 1379 | ||
1380 | config MIPS_VPE_LOADER | ||
1381 | bool "VPE loader support." | ||
1382 | depends on SYS_SUPPORTS_MULTITHREADING | ||
1383 | select CPU_MIPSR2_IRQ_VI | ||
1384 | select CPU_MIPSR2_IRQ_EI | ||
1385 | select CPU_MIPSR2_SRS | ||
1386 | select MIPS_MT | ||
1387 | help | ||
1388 | Includes a loader for loading an elf relocatable object | ||
1389 | onto another VPE and running it. | ||
1390 | |||
1391 | endchoice | 1380 | endchoice |
1392 | 1381 | ||
1393 | config MIPS_MT | 1382 | config MIPS_MT |
@@ -1398,8 +1387,19 @@ config SYS_SUPPORTS_MULTITHREADING | |||
1398 | 1387 | ||
1399 | config MIPS_MT_FPAFF | 1388 | config MIPS_MT_FPAFF |
1400 | bool "Dynamic FPU affinity for FP-intensive threads" | 1389 | bool "Dynamic FPU affinity for FP-intensive threads" |
1401 | depends on MIPS_MT | ||
1402 | default y | 1390 | default y |
1391 | depends on MIPS_MT_SMP || MIPS_MT_SMTC | ||
1392 | |||
1393 | config MIPS_VPE_LOADER | ||
1394 | bool "VPE loader support." | ||
1395 | depends on SYS_SUPPORTS_MULTITHREADING | ||
1396 | select CPU_MIPSR2_IRQ_VI | ||
1397 | select CPU_MIPSR2_IRQ_EI | ||
1398 | select CPU_MIPSR2_SRS | ||
1399 | select MIPS_MT | ||
1400 | help | ||
1401 | Includes a loader for loading an elf relocatable object | ||
1402 | onto another VPE and running it. | ||
1403 | 1403 | ||
1404 | config MIPS_MT_SMTC_INSTANT_REPLAY | 1404 | config MIPS_MT_SMTC_INSTANT_REPLAY |
1405 | bool "Low-latency Dispatch of Deferred SMTC IPIs" | 1405 | bool "Low-latency Dispatch of Deferred SMTC IPIs" |
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index c6580018c94b..cb9a14a1ca5b 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -89,7 +89,7 @@ static int sp_stopping = 0; | |||
89 | #define MTSP_O_EXCL 0x0800 | 89 | #define MTSP_O_EXCL 0x0800 |
90 | #define MTSP_O_BINARY 0x8000 | 90 | #define MTSP_O_BINARY 0x8000 |
91 | 91 | ||
92 | #define SP_VPE 1 | 92 | extern int tclimit; |
93 | 93 | ||
94 | struct apsp_table { | 94 | struct apsp_table { |
95 | int sp; | 95 | int sp; |
@@ -225,8 +225,8 @@ void sp_work_handle_request(void) | |||
225 | /* Run the syscall at the priviledge of the user who loaded the | 225 | /* Run the syscall at the priviledge of the user who loaded the |
226 | SP program */ | 226 | SP program */ |
227 | 227 | ||
228 | if (vpe_getuid(SP_VPE)) | 228 | if (vpe_getuid(tclimit)) |
229 | sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE)); | 229 | sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit)); |
230 | 230 | ||
231 | switch (sc.cmd) { | 231 | switch (sc.cmd) { |
232 | /* needs the flags argument translating from SDE kit to | 232 | /* needs the flags argument translating from SDE kit to |
@@ -245,7 +245,7 @@ void sp_work_handle_request(void) | |||
245 | 245 | ||
246 | case MTSP_SYSCALL_EXIT: | 246 | case MTSP_SYSCALL_EXIT: |
247 | list_for_each_entry(n, &kspd_notifylist, list) | 247 | list_for_each_entry(n, &kspd_notifylist, list) |
248 | n->kspd_sp_exit(SP_VPE); | 248 | n->kspd_sp_exit(tclimit); |
249 | sp_stopping = 1; | 249 | sp_stopping = 1; |
250 | 250 | ||
251 | printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", | 251 | printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", |
@@ -255,7 +255,7 @@ void sp_work_handle_request(void) | |||
255 | case MTSP_SYSCALL_OPEN: | 255 | case MTSP_SYSCALL_OPEN: |
256 | generic.arg1 = translate_open_flags(generic.arg1); | 256 | generic.arg1 = translate_open_flags(generic.arg1); |
257 | 257 | ||
258 | vcwd = vpe_getcwd(SP_VPE); | 258 | vcwd = vpe_getcwd(tclimit); |
259 | 259 | ||
260 | /* change to the cwd of the process that loaded the SP program */ | 260 | /* change to the cwd of the process that loaded the SP program */ |
261 | old_fs = get_fs(); | 261 | old_fs = get_fs(); |
@@ -283,7 +283,7 @@ void sp_work_handle_request(void) | |||
283 | break; | 283 | break; |
284 | } /* switch */ | 284 | } /* switch */ |
285 | 285 | ||
286 | if (vpe_getuid(SP_VPE)) | 286 | if (vpe_getuid(tclimit)) |
287 | sp_setfsuidgid( 0, 0); | 287 | sp_setfsuidgid( 0, 0); |
288 | 288 | ||
289 | old_fs = get_fs(); | 289 | old_fs = get_fs(); |
@@ -364,10 +364,9 @@ static void startwork(int vpe) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | INIT_WORK(&work, sp_work); | 366 | INIT_WORK(&work, sp_work); |
367 | queue_work(workqueue, &work); | 367 | } |
368 | } else | ||
369 | queue_work(workqueue, &work); | ||
370 | 368 | ||
369 | queue_work(workqueue, &work); | ||
371 | } | 370 | } |
372 | 371 | ||
373 | static void stopwork(int vpe) | 372 | static void stopwork(int vpe) |
@@ -389,7 +388,7 @@ static int kspd_module_init(void) | |||
389 | 388 | ||
390 | notify.start = startwork; | 389 | notify.start = startwork; |
391 | notify.stop = stopwork; | 390 | notify.stop = stopwork; |
392 | vpe_notify(SP_VPE, ¬ify); | 391 | vpe_notify(tclimit, ¬ify); |
393 | 392 | ||
394 | return 0; | 393 | return 0; |
395 | } | 394 | } |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 1a7d89231299..7169a4db37b8 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -21,6 +21,28 @@ | |||
21 | #include <asm/r4kcache.h> | 21 | #include <asm/r4kcache.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | 23 | ||
24 | int vpelimit; | ||
25 | |||
26 | static int __init maxvpes(char *str) | ||
27 | { | ||
28 | get_option(&str, &vpelimit); | ||
29 | |||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | __setup("maxvpes=", maxvpes); | ||
34 | |||
35 | int tclimit; | ||
36 | |||
37 | static int __init maxtcs(char *str) | ||
38 | { | ||
39 | get_option(&str, &tclimit); | ||
40 | |||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | __setup("maxtcs=", maxtcs); | ||
45 | |||
24 | /* | 46 | /* |
25 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | 47 | * Dump new MIPS MT state for the core. Does not leave TCs halted. |
26 | * Takes an argument which taken to be a pre-call MVPControl value. | 48 | * Takes an argument which taken to be a pre-call MVPControl value. |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 8cf24d716d41..5c040060560e 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -40,12 +40,11 @@ | |||
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | #include <asm/cpu.h> | 41 | #include <asm/cpu.h> |
42 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
43 | #include <asm/mips_mt.h> | ||
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/vpe.h> | 45 | #include <asm/vpe.h> |
45 | #include <asm/rtlx.h> | 46 | #include <asm/rtlx.h> |
46 | 47 | ||
47 | #define RTLX_TARG_VPE 1 | ||
48 | |||
49 | static struct rtlx_info *rtlx; | 48 | static struct rtlx_info *rtlx; |
50 | static int major; | 49 | static int major; |
51 | static char module_name[] = "rtlx"; | 50 | static char module_name[] = "rtlx"; |
@@ -165,10 +164,10 @@ int rtlx_open(int index, int can_sleep) | |||
165 | } | 164 | } |
166 | 165 | ||
167 | if (rtlx == NULL) { | 166 | if (rtlx == NULL) { |
168 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | 167 | if( (p = vpe_get_shared(tclimit)) == NULL) { |
169 | if (can_sleep) { | 168 | if (can_sleep) { |
170 | __wait_event_interruptible(channel_wqs[index].lx_queue, | 169 | __wait_event_interruptible(channel_wqs[index].lx_queue, |
171 | (p = vpe_get_shared(RTLX_TARG_VPE)), | 170 | (p = vpe_get_shared(tclimit)), |
172 | ret); | 171 | ret); |
173 | if (ret) | 172 | if (ret) |
174 | goto out_fail; | 173 | goto out_fail; |
@@ -477,6 +476,19 @@ static int rtlx_module_init(void) | |||
477 | struct device *dev; | 476 | struct device *dev; |
478 | int i, err; | 477 | int i, err; |
479 | 478 | ||
479 | if (!cpu_has_mipsmt) { | ||
480 | printk("VPE loader: not a MIPS MT capable processor\n"); | ||
481 | return -ENODEV; | ||
482 | } | ||
483 | |||
484 | if (tclimit == 0) { | ||
485 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
486 | "initializing RTLX.\nPass maxtcs=<n> argument as kernel " | ||
487 | "argument\n"); | ||
488 | |||
489 | return -ENODEV; | ||
490 | } | ||
491 | |||
480 | major = register_chrdev(0, module_name, &rtlx_fops); | 492 | major = register_chrdev(0, module_name, &rtlx_fops); |
481 | if (major < 0) { | 493 | if (major < 0) { |
482 | printk(register_chrdev_failed); | 494 | printk(register_chrdev_failed); |
@@ -501,7 +513,7 @@ static int rtlx_module_init(void) | |||
501 | /* set up notifiers */ | 513 | /* set up notifiers */ |
502 | notify.start = starting; | 514 | notify.start = starting; |
503 | notify.stop = stopping; | 515 | notify.stop = stopping; |
504 | vpe_notify(RTLX_TARG_VPE, ¬ify); | 516 | vpe_notify(tclimit, ¬ify); |
505 | 517 | ||
506 | if (cpu_has_vint) | 518 | if (cpu_has_vint) |
507 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | 519 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index f2c7aed663e7..6b3c3ea8bc61 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -86,25 +86,11 @@ unsigned int smtc_status = 0; | |||
86 | 86 | ||
87 | /* Boot command line configuration overrides */ | 87 | /* Boot command line configuration overrides */ |
88 | 88 | ||
89 | static int vpelimit = 0; | ||
90 | static int tclimit = 0; | ||
91 | static int ipibuffers = 0; | 89 | static int ipibuffers = 0; |
92 | static int nostlb = 0; | 90 | static int nostlb = 0; |
93 | static int asidmask = 0; | 91 | static int asidmask = 0; |
94 | unsigned long smtc_asid_mask = 0xff; | 92 | unsigned long smtc_asid_mask = 0xff; |
95 | 93 | ||
96 | static int __init maxvpes(char *str) | ||
97 | { | ||
98 | get_option(&str, &vpelimit); | ||
99 | return 1; | ||
100 | } | ||
101 | |||
102 | static int __init maxtcs(char *str) | ||
103 | { | ||
104 | get_option(&str, &tclimit); | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | static int __init ipibufs(char *str) | 94 | static int __init ipibufs(char *str) |
109 | { | 95 | { |
110 | get_option(&str, &ipibuffers); | 96 | get_option(&str, &ipibuffers); |
@@ -137,8 +123,6 @@ static int __init asidmask_set(char *str) | |||
137 | return 1; | 123 | return 1; |
138 | } | 124 | } |
139 | 125 | ||
140 | __setup("maxvpes=", maxvpes); | ||
141 | __setup("maxtcs=", maxtcs); | ||
142 | __setup("ipibufs=", ipibufs); | 126 | __setup("ipibufs=", ipibufs); |
143 | __setup("nostlb", stlb_disable); | 127 | __setup("nostlb", stlb_disable); |
144 | __setup("asidmask=", asidmask_set); | 128 | __setup("asidmask=", asidmask_set); |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index a2bee10f04cf..c726c47cd2c3 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -27,7 +27,6 @@ | |||
27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | 27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. |
28 | * i.e cat spapp >/dev/vpe1. | 28 | * i.e cat spapp >/dev/vpe1. |
29 | */ | 29 | */ |
30 | |||
31 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
32 | #include <linux/device.h> | 31 | #include <linux/device.h> |
33 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -54,6 +53,7 @@ | |||
54 | #include <asm/system.h> | 53 | #include <asm/system.h> |
55 | #include <asm/vpe.h> | 54 | #include <asm/vpe.h> |
56 | #include <asm/kspd.h> | 55 | #include <asm/kspd.h> |
56 | #include <asm/mips_mt.h> | ||
57 | 57 | ||
58 | typedef void *vpe_handle; | 58 | typedef void *vpe_handle; |
59 | 59 | ||
@@ -132,14 +132,9 @@ struct tc { | |||
132 | enum tc_state state; | 132 | enum tc_state state; |
133 | int index; | 133 | int index; |
134 | 134 | ||
135 | /* parent VPE */ | 135 | struct vpe *pvpe; /* parent VPE */ |
136 | struct vpe *pvpe; | 136 | struct list_head tc; /* The list of TC's with this VPE */ |
137 | 137 | struct list_head list; /* The global list of tc's */ | |
138 | /* The list of TC's with this VPE */ | ||
139 | struct list_head tc; | ||
140 | |||
141 | /* The global list of tc's */ | ||
142 | struct list_head list; | ||
143 | }; | 138 | }; |
144 | 139 | ||
145 | struct { | 140 | struct { |
@@ -217,18 +212,17 @@ struct vpe *alloc_vpe(int minor) | |||
217 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | 212 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ |
218 | struct tc *alloc_tc(int index) | 213 | struct tc *alloc_tc(int index) |
219 | { | 214 | { |
220 | struct tc *t; | 215 | struct tc *tc; |
221 | |||
222 | if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { | ||
223 | return NULL; | ||
224 | } | ||
225 | 216 | ||
226 | INIT_LIST_HEAD(&t->tc); | 217 | if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) |
227 | list_add_tail(&t->list, &vpecontrol.tc_list); | 218 | goto out; |
228 | 219 | ||
229 | t->index = index; | 220 | INIT_LIST_HEAD(&tc->tc); |
221 | tc->index = index; | ||
222 | list_add_tail(&tc->list, &vpecontrol.tc_list); | ||
230 | 223 | ||
231 | return t; | 224 | out: |
225 | return tc; | ||
232 | } | 226 | } |
233 | 227 | ||
234 | /* clean up and free everything */ | 228 | /* clean up and free everything */ |
@@ -663,66 +657,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | |||
663 | } | 657 | } |
664 | #endif | 658 | #endif |
665 | 659 | ||
666 | static void dump_tc(struct tc *t) | ||
667 | { | ||
668 | unsigned long val; | ||
669 | |||
670 | settc(t->index); | ||
671 | printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld " | ||
672 | "TCStatus 0x%lx halt 0x%lx\n", | ||
673 | t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC, | ||
674 | read_tc_c0_tcstatus(), read_tc_c0_tchalt()); | ||
675 | |||
676 | printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
677 | printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind()); | ||
678 | |||
679 | val = read_c0_vpeconf0(); | ||
680 | printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val, | ||
681 | (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); | ||
682 | |||
683 | printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status()); | ||
684 | printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause()); | ||
685 | |||
686 | printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr()); | ||
687 | printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc()); | ||
688 | } | ||
689 | |||
690 | static void dump_tclist(void) | ||
691 | { | ||
692 | struct tc *t; | ||
693 | |||
694 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
695 | dump_tc(t); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* We are prepared so configure and start the VPE... */ | 660 | /* We are prepared so configure and start the VPE... */ |
700 | static int vpe_run(struct vpe * v) | 661 | static int vpe_run(struct vpe * v) |
701 | { | 662 | { |
663 | unsigned long flags, val, dmt_flag; | ||
702 | struct vpe_notifications *n; | 664 | struct vpe_notifications *n; |
703 | unsigned long val, dmt_flag; | 665 | unsigned int vpeflags; |
704 | struct tc *t; | 666 | struct tc *t; |
705 | 667 | ||
706 | /* check we are the Master VPE */ | 668 | /* check we are the Master VPE */ |
669 | local_irq_save(flags); | ||
707 | val = read_c0_vpeconf0(); | 670 | val = read_c0_vpeconf0(); |
708 | if (!(val & VPECONF0_MVP)) { | 671 | if (!(val & VPECONF0_MVP)) { |
709 | printk(KERN_WARNING | 672 | printk(KERN_WARNING |
710 | "VPE loader: only Master VPE's are allowed to configure MT\n"); | 673 | "VPE loader: only Master VPE's are allowed to configure MT\n"); |
674 | local_irq_restore(flags); | ||
675 | |||
711 | return -1; | 676 | return -1; |
712 | } | 677 | } |
713 | 678 | ||
714 | /* disable MT (using dvpe) */ | 679 | dmt_flag = dmt(); |
715 | dvpe(); | 680 | vpeflags = dvpe(); |
716 | 681 | ||
717 | if (!list_empty(&v->tc)) { | 682 | if (!list_empty(&v->tc)) { |
718 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | 683 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { |
719 | printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", | 684 | evpe(vpeflags); |
720 | t->index); | 685 | emt(dmt_flag); |
686 | local_irq_restore(flags); | ||
687 | |||
688 | printk(KERN_WARNING | ||
689 | "VPE loader: TC %d is already in use.\n", | ||
690 | t->index); | ||
721 | return -ENOEXEC; | 691 | return -ENOEXEC; |
722 | } | 692 | } |
723 | } else { | 693 | } else { |
724 | printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", | 694 | evpe(vpeflags); |
695 | emt(dmt_flag); | ||
696 | local_irq_restore(flags); | ||
697 | |||
698 | printk(KERN_WARNING | ||
699 | "VPE loader: No TC's associated with VPE %d\n", | ||
725 | v->minor); | 700 | v->minor); |
701 | |||
726 | return -ENOEXEC; | 702 | return -ENOEXEC; |
727 | } | 703 | } |
728 | 704 | ||
@@ -733,21 +709,20 @@ static int vpe_run(struct vpe * v) | |||
733 | 709 | ||
734 | /* should check it is halted, and not activated */ | 710 | /* should check it is halted, and not activated */ |
735 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | 711 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { |
736 | printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n", | 712 | evpe(vpeflags); |
713 | emt(dmt_flag); | ||
714 | local_irq_restore(flags); | ||
715 | |||
716 | printk(KERN_WARNING "VPE loader: TC %d is already active!\n", | ||
737 | t->index); | 717 | t->index); |
738 | dump_tclist(); | 718 | |
739 | return -ENOEXEC; | 719 | return -ENOEXEC; |
740 | } | 720 | } |
741 | 721 | ||
742 | /* | ||
743 | * Disable multi-threaded execution whilst we activate, clear the | ||
744 | * halt bit and bound the tc to the other VPE... | ||
745 | */ | ||
746 | dmt_flag = dmt(); | ||
747 | |||
748 | /* Write the address we want it to start running from in the TCPC register. */ | 722 | /* Write the address we want it to start running from in the TCPC register. */ |
749 | write_tc_c0_tcrestart((unsigned long)v->__start); | 723 | write_tc_c0_tcrestart((unsigned long)v->__start); |
750 | write_tc_c0_tccontext((unsigned long)0); | 724 | write_tc_c0_tccontext((unsigned long)0); |
725 | |||
751 | /* | 726 | /* |
752 | * Mark the TC as activated, not interrupt exempt and not dynamically | 727 | * Mark the TC as activated, not interrupt exempt and not dynamically |
753 | * allocatable | 728 | * allocatable |
@@ -763,15 +738,14 @@ static int vpe_run(struct vpe * v) | |||
763 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | 738 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and |
764 | * DFLT_HEAP_SIZE when you compile your program | 739 | * DFLT_HEAP_SIZE when you compile your program |
765 | */ | 740 | */ |
766 | mttgpr(7, physical_memsize); | 741 | mttgpr(7, physical_memsize); |
767 | |||
768 | 742 | ||
769 | /* set up VPE1 */ | 743 | /* set up VPE1 */ |
770 | /* | 744 | /* |
771 | * bind the TC to VPE 1 as late as possible so we only have the final | 745 | * bind the TC to VPE 1 as late as possible so we only have the final |
772 | * VPE registers to set up, and so an EJTAG probe can trigger on it | 746 | * VPE registers to set up, and so an EJTAG probe can trigger on it |
773 | */ | 747 | */ |
774 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor); | 748 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); |
775 | 749 | ||
776 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | 750 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); |
777 | 751 | ||
@@ -793,15 +767,16 @@ static int vpe_run(struct vpe * v) | |||
793 | /* take system out of configuration state */ | 767 | /* take system out of configuration state */ |
794 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 768 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
795 | 769 | ||
796 | /* now safe to re-enable multi-threading */ | 770 | #ifdef CONFIG_SMP |
797 | emt(dmt_flag); | ||
798 | |||
799 | /* set it running */ | ||
800 | evpe(EVPE_ENABLE); | 771 | evpe(EVPE_ENABLE); |
772 | #else | ||
773 | evpe(vpeflags); | ||
774 | #endif | ||
775 | emt(dmt_flag); | ||
776 | local_irq_restore(flags); | ||
801 | 777 | ||
802 | list_for_each_entry(n, &v->notify, list) { | 778 | list_for_each_entry(n, &v->notify, list) |
803 | n->start(v->minor); | 779 | n->start(minor); |
804 | } | ||
805 | 780 | ||
806 | return 0; | 781 | return 0; |
807 | } | 782 | } |
@@ -1023,23 +998,15 @@ static int vpe_elfload(struct vpe * v) | |||
1023 | return 0; | 998 | return 0; |
1024 | } | 999 | } |
1025 | 1000 | ||
1026 | void __used dump_vpe(struct vpe * v) | ||
1027 | { | ||
1028 | struct tc *t; | ||
1029 | |||
1030 | settc(v->minor); | ||
1031 | |||
1032 | printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
1033 | printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
1034 | |||
1035 | list_for_each_entry(t, &vpecontrol.tc_list, list) | ||
1036 | dump_tc(t); | ||
1037 | } | ||
1038 | |||
1039 | static void cleanup_tc(struct tc *tc) | 1001 | static void cleanup_tc(struct tc *tc) |
1040 | { | 1002 | { |
1003 | unsigned long flags; | ||
1004 | unsigned int mtflags, vpflags; | ||
1041 | int tmp; | 1005 | int tmp; |
1042 | 1006 | ||
1007 | local_irq_save(flags); | ||
1008 | mtflags = dmt(); | ||
1009 | vpflags = dvpe(); | ||
1043 | /* Put MVPE's into 'configuration state' */ | 1010 | /* Put MVPE's into 'configuration state' */ |
1044 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1011 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1045 | 1012 | ||
@@ -1054,9 +1021,12 @@ static void cleanup_tc(struct tc *tc) | |||
1054 | write_tc_c0_tchalt(TCHALT_H); | 1021 | write_tc_c0_tchalt(TCHALT_H); |
1055 | 1022 | ||
1056 | /* bind it to anything other than VPE1 */ | 1023 | /* bind it to anything other than VPE1 */ |
1057 | write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE | 1024 | // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE |
1058 | 1025 | ||
1059 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1026 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1027 | evpe(vpflags); | ||
1028 | emt(mtflags); | ||
1029 | local_irq_restore(flags); | ||
1060 | } | 1030 | } |
1061 | 1031 | ||
1062 | static int getcwd(char *buff, int size) | 1032 | static int getcwd(char *buff, int size) |
@@ -1077,36 +1047,32 @@ static int getcwd(char *buff, int size) | |||
1077 | /* checks VPE is unused and gets ready to load program */ | 1047 | /* checks VPE is unused and gets ready to load program */ |
1078 | static int vpe_open(struct inode *inode, struct file *filp) | 1048 | static int vpe_open(struct inode *inode, struct file *filp) |
1079 | { | 1049 | { |
1080 | int minor, ret; | ||
1081 | enum vpe_state state; | 1050 | enum vpe_state state; |
1082 | struct vpe *v; | ||
1083 | struct vpe_notifications *not; | 1051 | struct vpe_notifications *not; |
1052 | struct vpe *v; | ||
1053 | int ret; | ||
1084 | 1054 | ||
1085 | /* assume only 1 device at the mo. */ | 1055 | if (minor != iminor(inode)) { |
1086 | if ((minor = iminor(inode)) != 1) { | 1056 | /* assume only 1 device at the moment. */ |
1087 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); | 1057 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); |
1088 | return -ENODEV; | 1058 | return -ENODEV; |
1089 | } | 1059 | } |
1090 | 1060 | ||
1091 | if ((v = get_vpe(minor)) == NULL) { | 1061 | if ((v = get_vpe(tclimit)) == NULL) { |
1092 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); | 1062 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); |
1093 | return -ENODEV; | 1063 | return -ENODEV; |
1094 | } | 1064 | } |
1095 | 1065 | ||
1096 | state = xchg(&v->state, VPE_STATE_INUSE); | 1066 | state = xchg(&v->state, VPE_STATE_INUSE); |
1097 | if (state != VPE_STATE_UNUSED) { | 1067 | if (state != VPE_STATE_UNUSED) { |
1098 | dvpe(); | ||
1099 | |||
1100 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); | 1068 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); |
1101 | 1069 | ||
1102 | dump_tc(get_tc(minor)); | ||
1103 | |||
1104 | list_for_each_entry(not, &v->notify, list) { | 1070 | list_for_each_entry(not, &v->notify, list) { |
1105 | not->stop(minor); | 1071 | not->stop(tclimit); |
1106 | } | 1072 | } |
1107 | 1073 | ||
1108 | release_progmem(v->load_addr); | 1074 | release_progmem(v->load_addr); |
1109 | cleanup_tc(get_tc(minor)); | 1075 | cleanup_tc(get_tc(tclimit)); |
1110 | } | 1076 | } |
1111 | 1077 | ||
1112 | /* this of-course trashes what was there before... */ | 1078 | /* this of-course trashes what was there before... */ |
@@ -1133,26 +1099,25 @@ static int vpe_open(struct inode *inode, struct file *filp) | |||
1133 | 1099 | ||
1134 | v->shared_ptr = NULL; | 1100 | v->shared_ptr = NULL; |
1135 | v->__start = 0; | 1101 | v->__start = 0; |
1102 | |||
1136 | return 0; | 1103 | return 0; |
1137 | } | 1104 | } |
1138 | 1105 | ||
1139 | static int vpe_release(struct inode *inode, struct file *filp) | 1106 | static int vpe_release(struct inode *inode, struct file *filp) |
1140 | { | 1107 | { |
1141 | int minor, ret = 0; | ||
1142 | struct vpe *v; | 1108 | struct vpe *v; |
1143 | Elf_Ehdr *hdr; | 1109 | Elf_Ehdr *hdr; |
1110 | int ret = 0; | ||
1144 | 1111 | ||
1145 | minor = iminor(inode); | 1112 | v = get_vpe(tclimit); |
1146 | if ((v = get_vpe(minor)) == NULL) | 1113 | if (v == NULL) |
1147 | return -ENODEV; | 1114 | return -ENODEV; |
1148 | 1115 | ||
1149 | // simple case of fire and forget, so tell the VPE to run... | ||
1150 | |||
1151 | hdr = (Elf_Ehdr *) v->pbuffer; | 1116 | hdr = (Elf_Ehdr *) v->pbuffer; |
1152 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | 1117 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { |
1153 | if (vpe_elfload(v) >= 0) | 1118 | if (vpe_elfload(v) >= 0) { |
1154 | vpe_run(v); | 1119 | vpe_run(v); |
1155 | else { | 1120 | } else { |
1156 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); | 1121 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); |
1157 | ret = -ENOEXEC; | 1122 | ret = -ENOEXEC; |
1158 | } | 1123 | } |
@@ -1179,12 +1144,14 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1179 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | 1144 | static ssize_t vpe_write(struct file *file, const char __user * buffer, |
1180 | size_t count, loff_t * ppos) | 1145 | size_t count, loff_t * ppos) |
1181 | { | 1146 | { |
1182 | int minor; | ||
1183 | size_t ret = count; | 1147 | size_t ret = count; |
1184 | struct vpe *v; | 1148 | struct vpe *v; |
1185 | 1149 | ||
1186 | minor = iminor(file->f_path.dentry->d_inode); | 1150 | if (iminor(file->f_path.dentry->d_inode) != minor) |
1187 | if ((v = get_vpe(minor)) == NULL) | 1151 | return -ENODEV; |
1152 | |||
1153 | v = get_vpe(tclimit); | ||
1154 | if (v == NULL) | ||
1188 | return -ENODEV; | 1155 | return -ENODEV; |
1189 | 1156 | ||
1190 | if (v->pbuffer == NULL) { | 1157 | if (v->pbuffer == NULL) { |
@@ -1370,17 +1337,34 @@ static struct device *vpe_dev; | |||
1370 | 1337 | ||
1371 | static int __init vpe_module_init(void) | 1338 | static int __init vpe_module_init(void) |
1372 | { | 1339 | { |
1340 | unsigned int mtflags, vpflags; | ||
1341 | int hw_tcs, hw_vpes, tc, err = 0; | ||
1342 | unsigned long flags, val; | ||
1373 | struct vpe *v = NULL; | 1343 | struct vpe *v = NULL; |
1374 | struct device *dev; | 1344 | struct device *dev; |
1375 | struct tc *t; | 1345 | struct tc *t; |
1376 | unsigned long val; | ||
1377 | int i, err; | ||
1378 | 1346 | ||
1379 | if (!cpu_has_mipsmt) { | 1347 | if (!cpu_has_mipsmt) { |
1380 | printk("VPE loader: not a MIPS MT capable processor\n"); | 1348 | printk("VPE loader: not a MIPS MT capable processor\n"); |
1381 | return -ENODEV; | 1349 | return -ENODEV; |
1382 | } | 1350 | } |
1383 | 1351 | ||
1352 | if (vpelimit == 0) { | ||
1353 | printk(KERN_WARNING "No VPEs reserved for AP/SP, not " | ||
1354 | "initializing VPE loader.\nPass maxvpes=<n> argument as " | ||
1355 | "kernel argument\n"); | ||
1356 | |||
1357 | return -ENODEV; | ||
1358 | } | ||
1359 | |||
1360 | if (tclimit == 0) { | ||
1361 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
1362 | "initializing VPE loader.\nPass maxtcs=<n> argument as " | ||
1363 | "kernel argument\n"); | ||
1364 | |||
1365 | return -ENODEV; | ||
1366 | } | ||
1367 | |||
1384 | major = register_chrdev(0, module_name, &vpe_fops); | 1368 | major = register_chrdev(0, module_name, &vpe_fops); |
1385 | if (major < 0) { | 1369 | if (major < 0) { |
1386 | printk("VPE loader: unable to register character device\n"); | 1370 | printk("VPE loader: unable to register character device\n"); |
@@ -1388,40 +1372,61 @@ static int __init vpe_module_init(void) | |||
1388 | } | 1372 | } |
1389 | 1373 | ||
1390 | dev = device_create(mt_class, NULL, MKDEV(major, minor), | 1374 | dev = device_create(mt_class, NULL, MKDEV(major, minor), |
1391 | "tc%d", minor); | 1375 | "vpe%d", minor); |
1392 | if (IS_ERR(dev)) { | 1376 | if (IS_ERR(dev)) { |
1393 | err = PTR_ERR(dev); | 1377 | err = PTR_ERR(dev); |
1394 | goto out_chrdev; | 1378 | goto out_chrdev; |
1395 | } | 1379 | } |
1396 | vpe_dev = dev; | 1380 | vpe_dev = dev; |
1397 | 1381 | ||
1398 | dmt(); | 1382 | local_irq_save(flags); |
1399 | dvpe(); | 1383 | mtflags = dmt(); |
1384 | vpflags = dvpe(); | ||
1400 | 1385 | ||
1401 | /* Put MVPE's into 'configuration state' */ | 1386 | /* Put MVPE's into 'configuration state' */ |
1402 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1387 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1403 | 1388 | ||
1404 | /* dump_mtregs(); */ | 1389 | /* dump_mtregs(); */ |
1405 | 1390 | ||
1406 | |||
1407 | val = read_c0_mvpconf0(); | 1391 | val = read_c0_mvpconf0(); |
1408 | for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) { | 1392 | hw_tcs = (val & MVPCONF0_PTC) + 1; |
1409 | t = alloc_tc(i); | 1393 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
1394 | |||
1395 | for (tc = tclimit; tc < hw_tcs; tc++) { | ||
1396 | /* | ||
1397 | * Must re-enable multithreading temporarily or in case we | ||
1398 | * reschedule send IPIs or similar we might hang. | ||
1399 | */ | ||
1400 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1401 | evpe(vpflags); | ||
1402 | emt(mtflags); | ||
1403 | local_irq_restore(flags); | ||
1404 | t = alloc_tc(tc); | ||
1405 | if (!t) { | ||
1406 | err = -ENOMEM; | ||
1407 | goto out; | ||
1408 | } | ||
1409 | |||
1410 | local_irq_save(flags); | ||
1411 | mtflags = dmt(); | ||
1412 | vpflags = dvpe(); | ||
1413 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1410 | 1414 | ||
1411 | /* VPE's */ | 1415 | /* VPE's */ |
1412 | if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) { | 1416 | if (tc < hw_tcs) { |
1413 | settc(i); | 1417 | settc(tc); |
1414 | 1418 | ||
1415 | if ((v = alloc_vpe(i)) == NULL) { | 1419 | if ((v = alloc_vpe(tc)) == NULL) { |
1416 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | 1420 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); |
1417 | return -ENODEV; | 1421 | |
1422 | goto out_reenable; | ||
1418 | } | 1423 | } |
1419 | 1424 | ||
1420 | /* add the tc to the list of this vpe's tc's. */ | 1425 | /* add the tc to the list of this vpe's tc's. */ |
1421 | list_add(&t->tc, &v->tc); | 1426 | list_add(&t->tc, &v->tc); |
1422 | 1427 | ||
1423 | /* deactivate all but vpe0 */ | 1428 | /* deactivate all but vpe0 */ |
1424 | if (i != 0) { | 1429 | if (tc >= tclimit) { |
1425 | unsigned long tmp = read_vpe_c0_vpeconf0(); | 1430 | unsigned long tmp = read_vpe_c0_vpeconf0(); |
1426 | 1431 | ||
1427 | tmp &= ~VPECONF0_VPA; | 1432 | tmp &= ~VPECONF0_VPA; |
@@ -1434,7 +1439,7 @@ static int __init vpe_module_init(void) | |||
1434 | /* disable multi-threading with TC's */ | 1439 | /* disable multi-threading with TC's */ |
1435 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | 1440 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); |
1436 | 1441 | ||
1437 | if (i != 0) { | 1442 | if (tc >= vpelimit) { |
1438 | /* | 1443 | /* |
1439 | * Set config to be the same as vpe0, | 1444 | * Set config to be the same as vpe0, |
1440 | * particularly kseg0 coherency alg | 1445 | * particularly kseg0 coherency alg |
@@ -1446,10 +1451,10 @@ static int __init vpe_module_init(void) | |||
1446 | /* TC's */ | 1451 | /* TC's */ |
1447 | t->pvpe = v; /* set the parent vpe */ | 1452 | t->pvpe = v; /* set the parent vpe */ |
1448 | 1453 | ||
1449 | if (i != 0) { | 1454 | if (tc >= tclimit) { |
1450 | unsigned long tmp; | 1455 | unsigned long tmp; |
1451 | 1456 | ||
1452 | settc(i); | 1457 | settc(tc); |
1453 | 1458 | ||
1454 | /* Any TC that is bound to VPE0 gets left as is - in case | 1459 | /* Any TC that is bound to VPE0 gets left as is - in case |
1455 | we are running SMTC on VPE0. A TC that is bound to any | 1460 | we are running SMTC on VPE0. A TC that is bound to any |
@@ -1479,9 +1484,14 @@ static int __init vpe_module_init(void) | |||
1479 | } | 1484 | } |
1480 | } | 1485 | } |
1481 | 1486 | ||
1487 | out_reenable: | ||
1482 | /* release config state */ | 1488 | /* release config state */ |
1483 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1489 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1484 | 1490 | ||
1491 | evpe(vpflags); | ||
1492 | emt(mtflags); | ||
1493 | local_irq_restore(flags); | ||
1494 | |||
1485 | #ifdef CONFIG_MIPS_APSP_KSPD | 1495 | #ifdef CONFIG_MIPS_APSP_KSPD |
1486 | kspd_events.kspd_sp_exit = kspd_sp_exit; | 1496 | kspd_events.kspd_sp_exit = kspd_sp_exit; |
1487 | #endif | 1497 | #endif |
@@ -1490,6 +1500,7 @@ static int __init vpe_module_init(void) | |||
1490 | out_chrdev: | 1500 | out_chrdev: |
1491 | unregister_chrdev(major, module_name); | 1501 | unregister_chrdev(major, module_name); |
1492 | 1502 | ||
1503 | out: | ||
1493 | return err; | 1504 | return err; |
1494 | } | 1505 | } |
1495 | 1506 | ||
diff --git a/include/asm-mips/mips_mt.h b/include/asm-mips/mips_mt.h index 8045abc78d0f..ac7935203f89 100644 --- a/include/asm-mips/mips_mt.h +++ b/include/asm-mips/mips_mt.h | |||
@@ -8,6 +8,12 @@ | |||
8 | 8 | ||
9 | #include <linux/cpumask.h> | 9 | #include <linux/cpumask.h> |
10 | 10 | ||
11 | /* | ||
12 | * How many VPEs and TCs is Linux allowed to use? 0 means no limit. | ||
13 | */ | ||
14 | extern int tclimit; | ||
15 | extern int vpelimit; | ||
16 | |||
11 | extern cpumask_t mt_fpu_cpumask; | 17 | extern cpumask_t mt_fpu_cpumask; |
12 | extern unsigned long mt_fpemul_threshold; | 18 | extern unsigned long mt_fpemul_threshold; |
13 | 19 | ||