aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/vpe.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2006-04-05 04:45:45 -0400
committerRalf Baechle <ralf@linux-mips.org>2006-04-18 22:14:27 -0400
commit2600990e640e3bef29ed89d565864cf16ee83833 (patch)
tree7c7a43916d509d56ad89b951c485c7d75f40468e /arch/mips/kernel/vpe.c
parentbce1a28686ed6527977a198f698278b67c6bf9ec (diff)
[MIPS] kpsd and other AP/SP improvements.
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/vpe.c')
-rw-r--r--arch/mips/kernel/vpe.c659
1 files changed, 434 insertions, 225 deletions
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index ae83b755cf4a..80ffaa6d50ad 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -13,7 +13,6 @@
13 * You should have received a copy of the GNU General Public License along 13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc., 14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. 15 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
16 *
17 */ 16 */
18 17
19/* 18/*
@@ -27,11 +26,8 @@
27 * 26 *
28 * To load and run, simply cat a SP 'program file' to /dev/vpe1. 27 * To load and run, simply cat a SP 'program file' to /dev/vpe1.
29 * i.e cat spapp >/dev/vpe1. 28 * i.e cat spapp >/dev/vpe1.
30 *
31 * You'll need to have the following device files.
32 * mknod /dev/vpe0 c 63 0
33 * mknod /dev/vpe1 c 63 1
34 */ 29 */
30
35#include <linux/config.h> 31#include <linux/config.h>
36#include <linux/kernel.h> 32#include <linux/kernel.h>
37#include <linux/module.h> 33#include <linux/module.h>
@@ -55,6 +51,8 @@
55#include <asm/cpu.h> 51#include <asm/cpu.h>
56#include <asm/processor.h> 52#include <asm/processor.h>
57#include <asm/system.h> 53#include <asm/system.h>
54#include <asm/vpe.h>
55#include <asm/kspd.h>
58 56
59typedef void *vpe_handle; 57typedef void *vpe_handle;
60 58
@@ -68,6 +66,11 @@ typedef void *vpe_handle;
68static char module_name[] = "vpe"; 66static char module_name[] = "vpe";
69static int major; 67static int major;
70 68
69#ifdef CONFIG_MIPS_APSP_KSPD
70 static struct kspd_notifications kspd_events;
71static int kspd_events_reqd = 0;
72#endif
73
71/* grab the likely amount of memory we will need. */ 74/* grab the likely amount of memory we will need. */
72#ifdef CONFIG_MIPS_VPE_LOADER_TOM 75#ifdef CONFIG_MIPS_VPE_LOADER_TOM
73#define P_SIZE (2 * 1024 * 1024) 76#define P_SIZE (2 * 1024 * 1024)
@@ -76,7 +79,10 @@ static int major;
76#define P_SIZE (256 * 1024) 79#define P_SIZE (256 * 1024)
77#endif 80#endif
78 81
82extern unsigned long physical_memsize;
83
79#define MAX_VPES 16 84#define MAX_VPES 16
85#define VPE_PATH_MAX 256
80 86
81enum vpe_state { 87enum vpe_state {
82 VPE_STATE_UNUSED = 0, 88 VPE_STATE_UNUSED = 0,
@@ -102,6 +108,8 @@ struct vpe {
102 unsigned long len; 108 unsigned long len;
103 char *pbuffer; 109 char *pbuffer;
104 unsigned long plen; 110 unsigned long plen;
111 unsigned int uid, gid;
112 char cwd[VPE_PATH_MAX];
105 113
106 unsigned long __start; 114 unsigned long __start;
107 115
@@ -113,6 +121,9 @@ struct vpe {
113 121
114 /* shared symbol address */ 122 /* shared symbol address */
115 void *shared_ptr; 123 void *shared_ptr;
124
125 /* the list of who wants to know when something major happens */
126 struct list_head notify;
116}; 127};
117 128
118struct tc { 129struct tc {
@@ -138,7 +149,7 @@ struct vpecontrol_ {
138} vpecontrol; 149} vpecontrol;
139 150
140static void release_progmem(void *ptr); 151static void release_progmem(void *ptr);
141static void dump_vpe(struct vpe * v); 152/* static __attribute_used__ void dump_vpe(struct vpe * v); */
142extern void save_gp_address(unsigned int secbase, unsigned int rel); 153extern void save_gp_address(unsigned int secbase, unsigned int rel);
143 154
144/* get the vpe associated with this minor */ 155/* get the vpe associated with this minor */
@@ -146,12 +157,14 @@ struct vpe *get_vpe(int minor)
146{ 157{
147 struct vpe *v; 158 struct vpe *v;
148 159
160 if (!cpu_has_mipsmt)
161 return NULL;
162
149 list_for_each_entry(v, &vpecontrol.vpe_list, list) { 163 list_for_each_entry(v, &vpecontrol.vpe_list, list) {
150 if (v->minor == minor) 164 if (v->minor == minor)
151 return v; 165 return v;
152 } 166 }
153 167
154 printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor);
155 return NULL; 168 return NULL;
156} 169}
157 170
@@ -165,8 +178,6 @@ struct tc *get_tc(int index)
165 return t; 178 return t;
166 } 179 }
167 180
168 printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index);
169
170 return NULL; 181 return NULL;
171} 182}
172 183
@@ -179,8 +190,6 @@ struct tc *get_tc_unused(void)
179 return t; 190 return t;
180 } 191 }
181 192
182 printk(KERN_DEBUG "VPE: All TC's are in use\n");
183
184 return NULL; 193 return NULL;
185} 194}
186 195
@@ -190,13 +199,13 @@ struct vpe *alloc_vpe(int minor)
190 struct vpe *v; 199 struct vpe *v;
191 200
192 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { 201 if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) {
193 printk(KERN_WARNING "VPE: alloc_vpe no mem\n");
194 return NULL; 202 return NULL;
195 } 203 }
196 204
197 INIT_LIST_HEAD(&v->tc); 205 INIT_LIST_HEAD(&v->tc);
198 list_add_tail(&v->list, &vpecontrol.vpe_list); 206 list_add_tail(&v->list, &vpecontrol.vpe_list);
199 207
208 INIT_LIST_HEAD(&v->notify);
200 v->minor = minor; 209 v->minor = minor;
201 return v; 210 return v;
202} 211}
@@ -207,7 +216,6 @@ struct tc *alloc_tc(int index)
207 struct tc *t; 216 struct tc *t;
208 217
209 if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { 218 if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) {
210 printk(KERN_WARNING "VPE: alloc_tc no mem\n");
211 return NULL; 219 return NULL;
212 } 220 }
213 221
@@ -236,20 +244,16 @@ void dump_mtregs(void)
236 printk("config3 0x%lx MT %ld\n", val, 244 printk("config3 0x%lx MT %ld\n", val,
237 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); 245 (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT);
238 246
239 val = read_c0_mvpconf0();
240 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
241 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
242 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
243
244 val = read_c0_mvpcontrol(); 247 val = read_c0_mvpcontrol();
245 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, 248 printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val,
246 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, 249 (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT,
247 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, 250 (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT,
248 (val & MVPCONTROL_EVP)); 251 (val & MVPCONTROL_EVP));
249 252
250 val = read_c0_vpeconf0(); 253 val = read_c0_mvpconf0();
251 printk("VPEConf0 0x%lx MVP %ld\n", val, 254 printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val,
252 (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); 255 (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT,
256 val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT);
253} 257}
254 258
255/* Find some VPE program space */ 259/* Find some VPE program space */
@@ -354,9 +358,9 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
354 } 358 }
355 359
356 if( (rel > 32768) || (rel < -32768) ) { 360 if( (rel > 32768) || (rel < -32768) ) {
357 printk(KERN_ERR 361 printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: "
358 "apply_r_mips_gprel16: relative address out of range 0x%x %d\n", 362 "relative address 0x%x out of range of gp register\n",
359 rel, rel); 363 rel);
360 return -ENOEXEC; 364 return -ENOEXEC;
361 } 365 }
362 366
@@ -374,8 +378,8 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location,
374 rel -= 1; // and one instruction less due to the branch delay slot. 378 rel -= 1; // and one instruction less due to the branch delay slot.
375 379
376 if( (rel > 32768) || (rel < -32768) ) { 380 if( (rel > 32768) || (rel < -32768) ) {
377 printk(KERN_ERR 381 printk(KERN_DEBUG "VPE loader: "
378 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); 382 "apply_r_mips_pc16: relative address out of range 0x%x\n", rel);
379 return -ENOEXEC; 383 return -ENOEXEC;
380 } 384 }
381 385
@@ -396,7 +400,8 @@ static int apply_r_mips_26(struct module *me, uint32_t *location,
396 Elf32_Addr v) 400 Elf32_Addr v)
397{ 401{
398 if (v % 4) { 402 if (v % 4) {
399 printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name); 403 printk(KERN_DEBUG "VPE loader: apply_r_mips_26 "
404 " unaligned relocation\n");
400 return -ENOEXEC; 405 return -ENOEXEC;
401 } 406 }
402 407
@@ -459,12 +464,13 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
459 /* 464 /*
460 * The value for the HI16 had best be the same. 465 * The value for the HI16 had best be the same.
461 */ 466 */
462 if (v != l->value) { 467 if (v != l->value) {
463 printk("%d != %d\n", v, l->value); 468 printk(KERN_DEBUG "VPE loader: "
464 goto out_danger; 469 "apply_r_mips_lo16/hi16: "
470 "inconsistent value information\n");
471 return -ENOEXEC;
465 } 472 }
466 473
467
468 /* 474 /*
469 * Do the HI16 relocation. Note that we actually don't 475 * Do the HI16 relocation. Note that we actually don't
470 * need to know anything about the LO16 itself, except 476 * need to know anything about the LO16 itself, except
@@ -500,11 +506,6 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location,
500 *location = insnlo; 506 *location = insnlo;
501 507
502 return 0; 508 return 0;
503
504out_danger:
505 printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name);
506
507 return -ENOEXEC;
508} 509}
509 510
510static int (*reloc_handlers[]) (struct module *me, uint32_t *location, 511static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
@@ -518,6 +519,15 @@ static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
518 [R_MIPS_PC16] = apply_r_mips_pc16 519 [R_MIPS_PC16] = apply_r_mips_pc16
519}; 520};
520 521
522static char *rstrs[] = {
523 [R_MIPS_NONE] = "MIPS_NONE",
524 [R_MIPS_32] = "MIPS_32",
525 [R_MIPS_26] = "MIPS_26",
526 [R_MIPS_HI16] = "MIPS_HI16",
527 [R_MIPS_LO16] = "MIPS_LO16",
528 [R_MIPS_GPREL16] = "MIPS_GPREL16",
529 [R_MIPS_PC16] = "MIPS_PC16"
530};
521 531
522int apply_relocations(Elf32_Shdr *sechdrs, 532int apply_relocations(Elf32_Shdr *sechdrs,
523 const char *strtab, 533 const char *strtab,
@@ -552,15 +562,13 @@ int apply_relocations(Elf32_Shdr *sechdrs,
552 562
553 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); 563 res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
554 if( res ) { 564 if( res ) {
555 printk(KERN_DEBUG 565 char *r = rstrs[ELF32_R_TYPE(r_info)];
556 "relocation error 0x%x sym refer <%s> value 0x%x " 566 printk(KERN_WARNING "VPE loader: .text+0x%x "
557 "type 0x%x r_info 0x%x\n", 567 "relocation type %s for symbol \"%s\" failed\n",
558 (unsigned int)location, strtab + sym->st_name, v, 568 rel[i].r_offset, r ? r : "UNKNOWN",
559 r_info, ELF32_R_TYPE(r_info)); 569 strtab + sym->st_name);
560 }
561
562 if (res)
563 return res; 570 return res;
571 }
564 } 572 }
565 573
566 return 0; 574 return 0;
@@ -576,7 +584,7 @@ void save_gp_address(unsigned int secbase, unsigned int rel)
576 584
577 585
578/* Change all symbols so that sh_value encodes the pointer directly. */ 586/* Change all symbols so that sh_value encodes the pointer directly. */
579static int simplify_symbols(Elf_Shdr * sechdrs, 587static void simplify_symbols(Elf_Shdr * sechdrs,
580 unsigned int symindex, 588 unsigned int symindex,
581 const char *strtab, 589 const char *strtab,
582 const char *secstrings, 590 const char *secstrings,
@@ -585,18 +593,21 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
585 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; 593 Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
586 unsigned long secbase, bssbase = 0; 594 unsigned long secbase, bssbase = 0;
587 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); 595 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
588 int ret = 0, size; 596 int size;
589 597
590 /* find the .bss section for COMMON symbols */ 598 /* find the .bss section for COMMON symbols */
591 for (i = 0; i < nsecs; i++) { 599 for (i = 0; i < nsecs; i++) {
592 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) 600 if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
593 bssbase = sechdrs[i].sh_addr; 601 bssbase = sechdrs[i].sh_addr;
602 break;
603 }
594 } 604 }
595 605
596 for (i = 1; i < n; i++) { 606 for (i = 1; i < n; i++) {
597 switch (sym[i].st_shndx) { 607 switch (sym[i].st_shndx) {
598 case SHN_COMMON: 608 case SHN_COMMON:
599 /* Allocate space for the symbol in the .bss section. st_value is currently size. 609 /* Allocate space for the symbol in the .bss section.
610 st_value is currently size.
600 We want it to have the address of the symbol. */ 611 We want it to have the address of the symbol. */
601 612
602 size = sym[i].st_value; 613 size = sym[i].st_value;
@@ -614,11 +625,9 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
614 break; 625 break;
615 626
616 case SHN_MIPS_SCOMMON: 627 case SHN_MIPS_SCOMMON:
617 628 printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON"
618 printk(KERN_DEBUG 629 "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name,
619 "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", 630 sym[i].st_shndx);
620 strtab + sym[i].st_name, sym[i].st_shndx);
621
622 // .sbss section 631 // .sbss section
623 break; 632 break;
624 633
@@ -632,10 +641,7 @@ static int simplify_symbols(Elf_Shdr * sechdrs,
632 sym[i].st_value += secbase; 641 sym[i].st_value += secbase;
633 break; 642 break;
634 } 643 }
635
636 } 644 }
637
638 return ret;
639} 645}
640 646
641#ifdef DEBUG_ELFLOADER 647#ifdef DEBUG_ELFLOADER
@@ -655,9 +661,26 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex,
655 661
656static void dump_tc(struct tc *t) 662static void dump_tc(struct tc *t)
657{ 663{
658 printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n", 664 unsigned long val;
659 t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt()); 665
660 printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart()); 666 settc(t->index);
667 printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld "
668 "TCStatus 0x%lx halt 0x%lx\n",
669 t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC,
670 read_tc_c0_tcstatus(), read_tc_c0_tchalt());
671
672 printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart());
673 printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind());
674
675 val = read_c0_vpeconf0();
676 printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val,
677 (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT);
678
679 printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status());
680 printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause());
681
682 printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr());
683 printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc());
661} 684}
662 685
663static void dump_tclist(void) 686static void dump_tclist(void)
@@ -672,96 +695,108 @@ static void dump_tclist(void)
672/* We are prepared so configure and start the VPE... */ 695/* We are prepared so configure and start the VPE... */
673int vpe_run(struct vpe * v) 696int vpe_run(struct vpe * v)
674{ 697{
675 unsigned long val; 698 struct vpe_notifications *n;
699 unsigned long val, dmt_flag;
676 struct tc *t; 700 struct tc *t;
677 701
678 /* check we are the Master VPE */ 702 /* check we are the Master VPE */
679 val = read_c0_vpeconf0(); 703 val = read_c0_vpeconf0();
680 if (!(val & VPECONF0_MVP)) { 704 if (!(val & VPECONF0_MVP)) {
681 printk(KERN_WARNING 705 printk(KERN_WARNING
682 "VPE: only Master VPE's are allowed to configure MT\n"); 706 "VPE loader: only Master VPE's are allowed to configure MT\n");
683 return -1; 707 return -1;
684 } 708 }
685 709
686 /* disable MT (using dvpe) */ 710 /* disable MT (using dvpe) */
687 dvpe(); 711 dvpe();
688 712
713 if (!list_empty(&v->tc)) {
714 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
715 printk(KERN_WARNING "VPE loader: TC %d is already in use.\n",
716 t->index);
717 return -ENOEXEC;
718 }
719 } else {
720 printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n",
721 v->minor);
722 return -ENOEXEC;
723 }
724
689 /* Put MVPE's into 'configuration state' */ 725 /* Put MVPE's into 'configuration state' */
690 set_c0_mvpcontrol(MVPCONTROL_VPC); 726 set_c0_mvpcontrol(MVPCONTROL_VPC);
691 727
692 if (!list_empty(&v->tc)) {
693 if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) {
694 printk(KERN_WARNING "VPE: TC %d is already in use.\n",
695 t->index);
696 return -ENOEXEC;
697 }
698 } else {
699 printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n",
700 v->minor);
701 return -ENOEXEC;
702 }
703
704 settc(t->index); 728 settc(t->index);
705 729
706 val = read_vpe_c0_vpeconf0();
707
708 /* should check it is halted, and not activated */ 730 /* should check it is halted, and not activated */
709 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { 731 if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) {
710 printk(KERN_WARNING "VPE: TC %d is already doing something!\n", 732 printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n",
711 t->index); 733 t->index);
712
713 dump_tclist(); 734 dump_tclist();
714 return -ENOEXEC; 735 return -ENOEXEC;
715 } 736 }
716 737
738 /*
739 * Disable multi-threaded execution whilst we activate, clear the
740 * halt bit and bound the tc to the other VPE...
741 */
742 dmt_flag = dmt();
743
717 /* Write the address we want it to start running from in the TCPC register. */ 744 /* Write the address we want it to start running from in the TCPC register. */
718 write_tc_c0_tcrestart((unsigned long)v->__start); 745 write_tc_c0_tcrestart((unsigned long)v->__start);
719
720 /* write the sivc_info address to tccontext */
721 write_tc_c0_tccontext((unsigned long)0); 746 write_tc_c0_tccontext((unsigned long)0);
722 747 /*
723 /* Set up the XTC bit in vpeconf0 to point at our tc */ 748 * Mark the TC as activated, not interrupt exempt and not dynamically
724 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT)); 749 * allocatable
725 750 */
726 /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */
727 val = read_tc_c0_tcstatus(); 751 val = read_tc_c0_tcstatus();
728 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; 752 val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
729 write_tc_c0_tcstatus(val); 753 write_tc_c0_tcstatus(val);
730 754
731 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); 755 write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
732 756
733 /* set up VPE1 */
734 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's
735 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE
736
737 /* 757 /*
738 * The sde-kit passes 'memsize' to __start in $a3, so set something 758 * The sde-kit passes 'memsize' to __start in $a3, so set something
739 * here... 759 * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
740 * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and
741 * DFLT_HEAP_SIZE when you compile your program 760 * DFLT_HEAP_SIZE when you compile your program
742 */ 761 */
762 mttgpr(7, physical_memsize);
763
764
765 /* set up VPE1 */
766 /*
767 * bind the TC to VPE 1 as late as possible so we only have the final
768 * VPE registers to set up, and so an EJTAG probe can trigger on it
769 */
770 write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor);
743 771
744 mttgpr(7, 0); 772 /* Set up the XTC bit in vpeconf0 to point at our tc */
773 write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
774 | (t->index << VPECONF0_XTC_SHIFT));
745 775
746 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 776 /* enable this VPE */
747 write_vpe_c0_config(read_c0_config()); 777 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
748 778
749 /* clear out any left overs from a previous program */ 779 /* clear out any left overs from a previous program */
780 write_vpe_c0_status(0);
750 write_vpe_c0_cause(0); 781 write_vpe_c0_cause(0);
751 782
752 /* take system out of configuration state */ 783 /* take system out of configuration state */
753 clear_c0_mvpcontrol(MVPCONTROL_VPC); 784 clear_c0_mvpcontrol(MVPCONTROL_VPC);
754 785
755 /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */ 786 /* now safe to re-enable multi-threading */
756 write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL)); 787 emt(dmt_flag);
757 788
758 /* set it running */ 789 /* set it running */
759 evpe(EVPE_ENABLE); 790 evpe(EVPE_ENABLE);
760 791
792 list_for_each_entry(n, &v->notify, list) {
793 n->start(v->minor);
794 }
795
761 return 0; 796 return 0;
762} 797}
763 798
764static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, 799static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
765 unsigned int symindex, const char *strtab, 800 unsigned int symindex, const char *strtab,
766 struct module *mod) 801 struct module *mod)
767{ 802{
@@ -778,26 +813,28 @@ static unsigned long find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs,
778 } 813 }
779 } 814 }
780 815
816 if ( (v->__start == 0) || (v->shared_ptr == NULL))
817 return -1;
818
781 return 0; 819 return 0;
782} 820}
783 821
784/* 822/*
785 * Allocates a VPE with some program code space(the load address), copies 823 * Allocates a VPE with some program code space(the load address), copies the
786 * the contents of the program (p)buffer performing relocatations/etc, 824 * contents of the program (p)buffer performing relocatations/etc, free's it
787 * free's it when finished. 825 * when finished.
788*/ 826 */
789int vpe_elfload(struct vpe * v) 827int vpe_elfload(struct vpe * v)
790{ 828{
791 Elf_Ehdr *hdr; 829 Elf_Ehdr *hdr;
792 Elf_Shdr *sechdrs; 830 Elf_Shdr *sechdrs;
793 long err = 0; 831 long err = 0;
794 char *secstrings, *strtab = NULL; 832 char *secstrings, *strtab = NULL;
795 unsigned int len, i, symindex = 0, strindex = 0; 833 unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
796
797 struct module mod; // so we can re-use the relocations code 834 struct module mod; // so we can re-use the relocations code
798 835
799 memset(&mod, 0, sizeof(struct module)); 836 memset(&mod, 0, sizeof(struct module));
800 strcpy(mod.name, "VPE dummy prog module"); 837 strcpy(mod.name, "VPE loader");
801 838
802 hdr = (Elf_Ehdr *) v->pbuffer; 839 hdr = (Elf_Ehdr *) v->pbuffer;
803 len = v->plen; 840 len = v->plen;
@@ -805,16 +842,22 @@ int vpe_elfload(struct vpe * v)
805 /* Sanity checks against insmoding binaries or wrong arch, 842 /* Sanity checks against insmoding binaries or wrong arch,
806 weird elf version */ 843 weird elf version */
807 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 844 if (memcmp(hdr->e_ident, ELFMAG, 4) != 0
808 || hdr->e_type != ET_REL || !elf_check_arch(hdr) 845 || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
846 || !elf_check_arch(hdr)
809 || hdr->e_shentsize != sizeof(*sechdrs)) { 847 || hdr->e_shentsize != sizeof(*sechdrs)) {
810 printk(KERN_WARNING 848 printk(KERN_WARNING
811 "VPE program, wrong arch or weird elf version\n"); 849 "VPE loader: program wrong arch or weird elf version\n");
812 850
813 return -ENOEXEC; 851 return -ENOEXEC;
814 } 852 }
815 853
854 if (hdr->e_type == ET_REL)
855 relocate = 1;
856
816 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { 857 if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
817 printk(KERN_ERR "VPE program length %u truncated\n", len); 858 printk(KERN_ERR "VPE loader: program length %u truncated\n",
859 len);
860
818 return -ENOEXEC; 861 return -ENOEXEC;
819 } 862 }
820 863
@@ -826,82 +869,126 @@ int vpe_elfload(struct vpe * v)
826 /* And these should exist, but gcc whinges if we don't init them */ 869 /* And these should exist, but gcc whinges if we don't init them */
827 symindex = strindex = 0; 870 symindex = strindex = 0;
828 871
829 for (i = 1; i < hdr->e_shnum; i++) { 872 if (relocate) {
830 873 for (i = 1; i < hdr->e_shnum; i++) {
831 if (sechdrs[i].sh_type != SHT_NOBITS 874 if (sechdrs[i].sh_type != SHT_NOBITS
832 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { 875 && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) {
833 printk(KERN_ERR "VPE program length %u truncated\n", 876 printk(KERN_ERR "VPE program length %u truncated\n",
834 len); 877 len);
835 return -ENOEXEC; 878 return -ENOEXEC;
836 } 879 }
837 880
838 /* Mark all sections sh_addr with their address in the 881 /* Mark all sections sh_addr with their address in the
839 temporary image. */ 882 temporary image. */
840 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; 883 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
841 884
842 /* Internal symbols and strings. */ 885 /* Internal symbols and strings. */
843 if (sechdrs[i].sh_type == SHT_SYMTAB) { 886 if (sechdrs[i].sh_type == SHT_SYMTAB) {
844 symindex = i; 887 symindex = i;
845 strindex = sechdrs[i].sh_link; 888 strindex = sechdrs[i].sh_link;
846 strtab = (char *)hdr + sechdrs[strindex].sh_offset; 889 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
890 }
847 } 891 }
892 layout_sections(&mod, hdr, sechdrs, secstrings);
848 } 893 }
849 894
850 layout_sections(&mod, hdr, sechdrs, secstrings);
851
852 v->load_addr = alloc_progmem(mod.core_size); 895 v->load_addr = alloc_progmem(mod.core_size);
853 memset(v->load_addr, 0, mod.core_size); 896 memset(v->load_addr, 0, mod.core_size);
854 897
855 printk("VPE elf_loader: loading to %p\n", v->load_addr); 898 printk("VPE loader: loading to %p\n", v->load_addr);
856 899
857 for (i = 0; i < hdr->e_shnum; i++) { 900 if (relocate) {
858 void *dest; 901 for (i = 0; i < hdr->e_shnum; i++) {
902 void *dest;
859 903
860 if (!(sechdrs[i].sh_flags & SHF_ALLOC)) 904 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
861 continue; 905 continue;
862 906
863 dest = v->load_addr + sechdrs[i].sh_entsize; 907 dest = v->load_addr + sechdrs[i].sh_entsize;
864 908
865 if (sechdrs[i].sh_type != SHT_NOBITS) 909 if (sechdrs[i].sh_type != SHT_NOBITS)
866 memcpy(dest, (void *)sechdrs[i].sh_addr, 910 memcpy(dest, (void *)sechdrs[i].sh_addr,
867 sechdrs[i].sh_size); 911 sechdrs[i].sh_size);
868 /* Update sh_addr to point to copy in image. */ 912 /* Update sh_addr to point to copy in image. */
869 sechdrs[i].sh_addr = (unsigned long)dest; 913 sechdrs[i].sh_addr = (unsigned long)dest;
870 }
871 914
872 /* Fix up syms, so that st_value is a pointer to location. */ 915 printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n",
873 err = 916 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr);
874 simplify_symbols(sechdrs, symindex, strtab, secstrings, 917 }
875 hdr->e_shnum, &mod);
876 if (err < 0) {
877 printk(KERN_WARNING "VPE: unable to simplify symbols\n");
878 goto cleanup;
879 }
880 918
881 /* Now do relocations. */ 919 /* Fix up syms, so that st_value is a pointer to location. */
882 for (i = 1; i < hdr->e_shnum; i++) { 920 simplify_symbols(sechdrs, symindex, strtab, secstrings,
883 const char *strtab = (char *)sechdrs[strindex].sh_addr; 921 hdr->e_shnum, &mod);
884 unsigned int info = sechdrs[i].sh_info; 922
885 923 /* Now do relocations. */
886 /* Not a valid relocation section? */ 924 for (i = 1; i < hdr->e_shnum; i++) {
887 if (info >= hdr->e_shnum) 925 const char *strtab = (char *)sechdrs[strindex].sh_addr;
888 continue; 926 unsigned int info = sechdrs[i].sh_info;
889 927
890 /* Don't bother with non-allocated sections */ 928 /* Not a valid relocation section? */
891 if (!(sechdrs[info].sh_flags & SHF_ALLOC)) 929 if (info >= hdr->e_shnum)
892 continue; 930 continue;
893 931
894 if (sechdrs[i].sh_type == SHT_REL) 932 /* Don't bother with non-allocated sections */
895 err = 933 if (!(sechdrs[info].sh_flags & SHF_ALLOC))
896 apply_relocations(sechdrs, strtab, symindex, i, &mod); 934 continue;
897 else if (sechdrs[i].sh_type == SHT_RELA) 935
898 err = apply_relocate_add(sechdrs, strtab, symindex, i, 936 if (sechdrs[i].sh_type == SHT_REL)
899 &mod); 937 err = apply_relocations(sechdrs, strtab, symindex, i,
900 if (err < 0) { 938 &mod);
901 printk(KERN_WARNING 939 else if (sechdrs[i].sh_type == SHT_RELA)
902 "vpe_elfload: error in relocations err %ld\n", 940 err = apply_relocate_add(sechdrs, strtab, symindex, i,
903 err); 941 &mod);
904 goto cleanup; 942 if (err < 0)
943 return err;
944
945 }
946 } else {
947 for (i = 0; i < hdr->e_shnum; i++) {
948
949 /* Internal symbols and strings. */
950 if (sechdrs[i].sh_type == SHT_SYMTAB) {
951 symindex = i;
952 strindex = sechdrs[i].sh_link;
953 strtab = (char *)hdr + sechdrs[strindex].sh_offset;
954
955 /* mark the symtab's address for when we try to find the
956 magic symbols */
957 sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset;
958 }
959
960 /* filter sections we dont want in the final image */
961 if (!(sechdrs[i].sh_flags & SHF_ALLOC) ||
962 (sechdrs[i].sh_type == SHT_MIPS_REGINFO)) {
963 printk( KERN_DEBUG " ignoring section, "
964 "name %s type %x address 0x%x \n",
965 secstrings + sechdrs[i].sh_name,
966 sechdrs[i].sh_type, sechdrs[i].sh_addr);
967 continue;
968 }
969
970 if (sechdrs[i].sh_addr < (unsigned int)v->load_addr) {
971 printk( KERN_WARNING "VPE loader: "
972 "fully linked image has invalid section, "
973 "name %s type %x address 0x%x, before load "
974 "address of 0x%x\n",
975 secstrings + sechdrs[i].sh_name,
976 sechdrs[i].sh_type, sechdrs[i].sh_addr,
977 (unsigned int)v->load_addr);
978 return -ENOEXEC;
979 }
980
981 printk(KERN_DEBUG " copying section sh_name %s, sh_addr 0x%x "
982 "size 0x%x0 from x%p\n",
983 secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr,
984 sechdrs[i].sh_size, hdr + sechdrs[i].sh_offset);
985
986 if (sechdrs[i].sh_type != SHT_NOBITS)
987 memcpy((void *)sechdrs[i].sh_addr,
988 (char *)hdr + sechdrs[i].sh_offset,
989 sechdrs[i].sh_size);
990 else
991 memset((void *)sechdrs[i].sh_addr, 0, sechdrs[i].sh_size);
905 } 992 }
906 } 993 }
907 994
@@ -910,71 +997,104 @@ int vpe_elfload(struct vpe * v)
910 (unsigned long)v->load_addr + v->len); 997 (unsigned long)v->load_addr + v->len);
911 998
912 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { 999 if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
1000 if (v->__start == 0) {
1001 printk(KERN_WARNING "VPE loader: program does not contain "
1002 "a __start symbol\n");
1003 return -ENOEXEC;
1004 }
913 1005
914 printk(KERN_WARNING 1006 if (v->shared_ptr == NULL)
915 "VPE: program doesn't contain __start or vpe_shared symbols\n"); 1007 printk(KERN_WARNING "VPE loader: "
916 err = -ENOEXEC; 1008 "program does not contain vpe_shared symbol.\n"
1009 " Unable to use AMVP (AP/SP) facilities.\n");
917 } 1010 }
918 1011
919 printk(" elf loaded\n"); 1012 printk(" elf loaded\n");
920 1013 return 0;
921cleanup:
922 return err;
923} 1014}
924 1015
925static void dump_vpe(struct vpe * v) 1016__attribute_used__ void dump_vpe(struct vpe * v)
926{ 1017{
927 struct tc *t; 1018 struct tc *t;
928 1019
1020 settc(v->minor);
1021
929 printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); 1022 printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol());
930 printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); 1023 printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0());
931 1024
932 list_for_each_entry(t, &vpecontrol.tc_list, list) { 1025 list_for_each_entry(t, &vpecontrol.tc_list, list)
933 dump_tc(t); 1026 dump_tc(t);
934 }
935} 1027}
936 1028
937/* checks for VPE is unused and gets ready to load program */ 1029static void cleanup_tc(struct tc *tc)
1030{
1031 int tmp;
1032
1033 /* Put MVPE's into 'configuration state' */
1034 set_c0_mvpcontrol(MVPCONTROL_VPC);
1035
1036 settc(tc->index);
1037 tmp = read_tc_c0_tcstatus();
1038
1039 /* mark not allocated and not dynamically allocatable */
1040 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1041 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1042 write_tc_c0_tcstatus(tmp);
1043
1044 write_tc_c0_tchalt(TCHALT_H);
1045
1046 /* bind it to anything other than VPE1 */
1047 write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE
1048
1049 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1050}
1051
1052static int getcwd(char *buff, int size)
1053{
1054 mm_segment_t old_fs;
1055 int ret;
1056
1057 old_fs = get_fs();
1058 set_fs(KERNEL_DS);
1059
1060 ret = sys_getcwd(buff,size);
1061
1062 set_fs(old_fs);
1063
1064 return ret;
1065}
1066
1067/* checks VPE is unused and gets ready to load program */
938static int vpe_open(struct inode *inode, struct file *filp) 1068static int vpe_open(struct inode *inode, struct file *filp)
939{ 1069{
940 int minor; 1070 int minor, ret;
941 struct vpe *v; 1071 struct vpe *v;
1072 struct vpe_notifications *not;
942 1073
943 /* assume only 1 device at the mo. */ 1074 /* assume only 1 device at the mo. */
944 if ((minor = MINOR(inode->i_rdev)) != 1) { 1075 if ((minor = MINOR(inode->i_rdev)) != 1) {
945 printk(KERN_WARNING "VPE: only vpe1 is supported\n"); 1076 printk(KERN_WARNING "VPE loader: only vpe1 is supported\n");
946 return -ENODEV; 1077 return -ENODEV;
947 } 1078 }
948 1079
949 if ((v = get_vpe(minor)) == NULL) { 1080 if ((v = get_vpe(minor)) == NULL) {
950 printk(KERN_WARNING "VPE: unable to get vpe\n"); 1081 printk(KERN_WARNING "VPE loader: unable to get vpe\n");
951 return -ENODEV; 1082 return -ENODEV;
952 } 1083 }
953 1084
954 if (v->state != VPE_STATE_UNUSED) { 1085 if (v->state != VPE_STATE_UNUSED) {
955 unsigned long tmp;
956 struct tc *t;
957
958 printk(KERN_WARNING "VPE: device %d already in use\n", minor);
959
960 dvpe(); 1086 dvpe();
961 dump_vpe(v);
962
963 printk(KERN_WARNING "VPE: re-initialising %d\n", minor);
964
965 release_progmem(v->load_addr);
966 1087
967 t = get_tc(minor); 1088 printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n");
968 settc(minor);
969 tmp = read_tc_c0_tcstatus();
970 1089
971 /* mark not allocated and not dynamically allocatable */ 1090 dump_tc(get_tc(minor));
972 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
973 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
974 write_tc_c0_tcstatus(tmp);
975 1091
976 write_tc_c0_tchalt(TCHALT_H); 1092 list_for_each_entry(not, &v->notify, list) {
1093 not->stop(minor);
1094 }
977 1095
1096 release_progmem(v->load_addr);
1097 cleanup_tc(get_tc(minor));
978 } 1098 }
979 1099
980 // allocate it so when we get write ops we know it's expected. 1100 // allocate it so when we get write ops we know it's expected.
@@ -986,6 +1106,24 @@ static int vpe_open(struct inode *inode, struct file *filp)
986 v->load_addr = NULL; 1106 v->load_addr = NULL;
987 v->len = 0; 1107 v->len = 0;
988 1108
1109 v->uid = filp->f_uid;
1110 v->gid = filp->f_gid;
1111
1112#ifdef CONFIG_MIPS_APSP_KSPD
1113 /* get kspd to tell us when a syscall_exit happens */
1114 if (!kspd_events_reqd) {
1115 kspd_notify(&kspd_events);
1116 kspd_events_reqd++;
1117 }
1118#endif
1119
1120 v->cwd[0] = 0;
1121 ret = getcwd(v->cwd, VPE_PATH_MAX);
1122 if (ret < 0)
1123 printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret);
1124
1125 v->shared_ptr = NULL;
1126 v->__start = 0;
989 return 0; 1127 return 0;
990} 1128}
991 1129
@@ -1006,14 +1144,22 @@ static int vpe_release(struct inode *inode, struct file *filp)
1006 if (vpe_elfload(v) >= 0) 1144 if (vpe_elfload(v) >= 0)
1007 vpe_run(v); 1145 vpe_run(v);
1008 else { 1146 else {
1009 printk(KERN_WARNING "VPE: ELF load failed.\n"); 1147 printk(KERN_WARNING "VPE loader: ELF load failed.\n");
1010 ret = -ENOEXEC; 1148 ret = -ENOEXEC;
1011 } 1149 }
1012 } else { 1150 } else {
1013 printk(KERN_WARNING "VPE: only elf files are supported\n"); 1151 printk(KERN_WARNING "VPE loader: only elf files are supported\n");
1014 ret = -ENOEXEC; 1152 ret = -ENOEXEC;
1015 } 1153 }
1016 1154
1155 /* It's good to be able to run the SP and if it chokes have a look at
1156 the /dev/rt?. But if we reset the pointer to the shared struct we
1157 loose what has happened. So perhaps if garbage is sent to the vpe
1158 device, use it as a trigger for the reset. Hopefully a nice
1159 executable will be along shortly. */
1160 if (ret < 0)
1161 v->shared_ptr = NULL;
1162
1017 // cleanup any temp buffers 1163 // cleanup any temp buffers
1018 if (v->pbuffer) 1164 if (v->pbuffer)
1019 vfree(v->pbuffer); 1165 vfree(v->pbuffer);
@@ -1033,21 +1179,19 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1033 return -ENODEV; 1179 return -ENODEV;
1034 1180
1035 if (v->pbuffer == NULL) { 1181 if (v->pbuffer == NULL) {
1036 printk(KERN_ERR "vpe_write: no pbuffer\n"); 1182 printk(KERN_ERR "VPE loader: no buffer for program\n");
1037 return -ENOMEM; 1183 return -ENOMEM;
1038 } 1184 }
1039 1185
1040 if ((count + v->len) > v->plen) { 1186 if ((count + v->len) > v->plen) {
1041 printk(KERN_WARNING 1187 printk(KERN_WARNING
1042 "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n"); 1188 "VPE loader: elf size too big. Perhaps strip uneeded symbols\n");
1043 return -ENOMEM; 1189 return -ENOMEM;
1044 } 1190 }
1045 1191
1046 count -= copy_from_user(v->pbuffer + v->len, buffer, count); 1192 count -= copy_from_user(v->pbuffer + v->len, buffer, count);
1047 if (!count) { 1193 if (!count)
1048 printk("vpe_write: copy_to_user failed\n");
1049 return -EFAULT; 1194 return -EFAULT;
1050 }
1051 1195
1052 v->len += count; 1196 v->len += count;
1053 return ret; 1197 return ret;
@@ -1149,16 +1293,70 @@ void *vpe_get_shared(int index)
1149{ 1293{
1150 struct vpe *v; 1294 struct vpe *v;
1151 1295
1152 if ((v = get_vpe(index)) == NULL) { 1296 if ((v = get_vpe(index)) == NULL)
1153 printk(KERN_WARNING "vpe: invalid vpe index %d\n", index);
1154 return NULL; 1297 return NULL;
1155 }
1156 1298
1157 return v->shared_ptr; 1299 return v->shared_ptr;
1158} 1300}
1159 1301
1160EXPORT_SYMBOL(vpe_get_shared); 1302EXPORT_SYMBOL(vpe_get_shared);
1161 1303
1304int vpe_getuid(int index)
1305{
1306 struct vpe *v;
1307
1308 if ((v = get_vpe(index)) == NULL)
1309 return -1;
1310
1311 return v->uid;
1312}
1313
1314EXPORT_SYMBOL(vpe_getuid);
1315
1316int vpe_getgid(int index)
1317{
1318 struct vpe *v;
1319
1320 if ((v = get_vpe(index)) == NULL)
1321 return -1;
1322
1323 return v->gid;
1324}
1325
1326EXPORT_SYMBOL(vpe_getgid);
1327
1328int vpe_notify(int index, struct vpe_notifications *notify)
1329{
1330 struct vpe *v;
1331
1332 if ((v = get_vpe(index)) == NULL)
1333 return -1;
1334
1335 list_add(&notify->list, &v->notify);
1336 return 0;
1337}
1338
1339EXPORT_SYMBOL(vpe_notify);
1340
1341char *vpe_getcwd(int index)
1342{
1343 struct vpe *v;
1344
1345 if ((v = get_vpe(index)) == NULL)
1346 return NULL;
1347
1348 return v->cwd;
1349}
1350
1351EXPORT_SYMBOL(vpe_getcwd);
1352
1353#ifdef CONFIG_MIPS_APSP_KSPD
1354static void kspd_sp_exit( int sp_id)
1355{
1356 cleanup_tc(get_tc(sp_id));
1357}
1358#endif
1359
1162static int __init vpe_module_init(void) 1360static int __init vpe_module_init(void)
1163{ 1361{
1164 struct vpe *v = NULL; 1362 struct vpe *v = NULL;
@@ -1201,7 +1399,8 @@ static int __init vpe_module_init(void)
1201 return -ENODEV; 1399 return -ENODEV;
1202 } 1400 }
1203 1401
1204 list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */ 1402 /* add the tc to the list of this vpe's tc's. */
1403 list_add(&t->tc, &v->tc);
1205 1404
1206 /* deactivate all but vpe0 */ 1405 /* deactivate all but vpe0 */
1207 if (i != 0) { 1406 if (i != 0) {
@@ -1222,10 +1421,12 @@ static int __init vpe_module_init(void)
1222 ~(ST0_IM | ST0_IE | ST0_KSU)) 1421 ~(ST0_IM | ST0_IE | ST0_KSU))
1223 | ST0_CU0); 1422 | ST0_CU0);
1224 1423
1225 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 1424 /*
1425 * Set config to be the same as vpe0,
1426 * particularly kseg0 coherency alg
1427 */
1226 write_vpe_c0_config(read_c0_config()); 1428 write_vpe_c0_config(read_c0_config());
1227 } 1429 }
1228
1229 } 1430 }
1230 1431
1231 /* TC's */ 1432 /* TC's */
@@ -1234,23 +1435,28 @@ static int __init vpe_module_init(void)
1234 if (i != 0) { 1435 if (i != 0) {
1235 unsigned long tmp; 1436 unsigned long tmp;
1236 1437
1237 /* tc 0 will of course be running.... */
1238 if (i == 0)
1239 t->state = TC_STATE_RUNNING;
1240
1241 settc(i); 1438 settc(i);
1242 1439
1243 /* bind a TC to each VPE, May as well put all excess TC's 1440 /* Any TC that is bound to VPE0 gets left as is - in case
1244 on the last VPE */ 1441 we are running SMTC on VPE0. A TC that is bound to any
1245 if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1)) 1442 other VPE gets bound to VPE0, ideally I'd like to make
1246 write_tc_c0_tcbind(read_tc_c0_tcbind() | 1443 it homeless but it doesn't appear to let me bind a TC
1247 ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); 1444 to a non-existent VPE. Which is perfectly reasonable.
1248 else 1445
1249 write_tc_c0_tcbind(read_tc_c0_tcbind() | i); 1446 The (un)bound state is visible to an EJTAG probe so may
1447 notify GDB...
1448 */
1449
1450 if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) {
1451 /* tc is bound >vpe0 */
1452 write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
1453
1454 t->pvpe = get_vpe(0); /* set the parent vpe */
1455 }
1250 1456
1251 tmp = read_tc_c0_tcstatus(); 1457 tmp = read_tc_c0_tcstatus();
1252 1458
1253 /* mark not allocated and not dynamically allocatable */ 1459 /* mark not activated and not dynamically allocatable */
1254 tmp &= ~(TCSTATUS_A | TCSTATUS_DA); 1460 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
1255 tmp |= TCSTATUS_IXMT; /* interrupt exempt */ 1461 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
1256 write_tc_c0_tcstatus(tmp); 1462 write_tc_c0_tcstatus(tmp);
@@ -1262,6 +1468,9 @@ static int __init vpe_module_init(void)
1262 /* release config state */ 1468 /* release config state */
1263 clear_c0_mvpcontrol(MVPCONTROL_VPC); 1469 clear_c0_mvpcontrol(MVPCONTROL_VPC);
1264 1470
1471#ifdef CONFIG_MIPS_APSP_KSPD
1472 kspd_events.kspd_sp_exit = kspd_sp_exit;
1473#endif
1265 return 0; 1474 return 0;
1266} 1475}
1267 1476
@@ -1281,5 +1490,5 @@ static void __exit vpe_module_exit(void)
1281module_init(vpe_module_init); 1490module_init(vpe_module_init);
1282module_exit(vpe_module_exit); 1491module_exit(vpe_module_exit);
1283MODULE_DESCRIPTION("MIPS VPE Loader"); 1492MODULE_DESCRIPTION("MIPS VPE Loader");
1284MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); 1493MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
1285MODULE_LICENSE("GPL"); 1494MODULE_LICENSE("GPL");