diff options
Diffstat (limited to 'arch/mips/kernel/vpe.c')
-rw-r--r-- | arch/mips/kernel/vpe.c | 359 |
1 files changed, 226 insertions, 133 deletions
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index a2bee10f04cf..3c09b9785f4c 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -27,7 +27,6 @@ | |||
27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | 27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. |
28 | * i.e cat spapp >/dev/vpe1. | 28 | * i.e cat spapp >/dev/vpe1. |
29 | */ | 29 | */ |
30 | |||
31 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
32 | #include <linux/device.h> | 31 | #include <linux/device.h> |
33 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -54,6 +53,7 @@ | |||
54 | #include <asm/system.h> | 53 | #include <asm/system.h> |
55 | #include <asm/vpe.h> | 54 | #include <asm/vpe.h> |
56 | #include <asm/kspd.h> | 55 | #include <asm/kspd.h> |
56 | #include <asm/mips_mt.h> | ||
57 | 57 | ||
58 | typedef void *vpe_handle; | 58 | typedef void *vpe_handle; |
59 | 59 | ||
@@ -64,6 +64,10 @@ typedef void *vpe_handle; | |||
64 | /* If this is set, the section belongs in the init part of the module */ | 64 | /* If this is set, the section belongs in the init part of the module */ |
65 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 65 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
66 | 66 | ||
67 | /* | ||
68 | * The number of TCs and VPEs physically available on the core | ||
69 | */ | ||
70 | static int hw_tcs, hw_vpes; | ||
67 | static char module_name[] = "vpe"; | 71 | static char module_name[] = "vpe"; |
68 | static int major; | 72 | static int major; |
69 | static const int minor = 1; /* fixed for now */ | 73 | static const int minor = 1; /* fixed for now */ |
@@ -126,20 +130,17 @@ struct vpe { | |||
126 | 130 | ||
127 | /* the list of who wants to know when something major happens */ | 131 | /* the list of who wants to know when something major happens */ |
128 | struct list_head notify; | 132 | struct list_head notify; |
133 | |||
134 | unsigned int ntcs; | ||
129 | }; | 135 | }; |
130 | 136 | ||
131 | struct tc { | 137 | struct tc { |
132 | enum tc_state state; | 138 | enum tc_state state; |
133 | int index; | 139 | int index; |
134 | 140 | ||
135 | /* parent VPE */ | 141 | struct vpe *pvpe; /* parent VPE */ |
136 | struct vpe *pvpe; | 142 | struct list_head tc; /* The list of TC's with this VPE */ |
137 | 143 | struct list_head list; /* The global list of tc's */ | |
138 | /* The list of TC's with this VPE */ | ||
139 | struct list_head tc; | ||
140 | |||
141 | /* The global list of tc's */ | ||
142 | struct list_head list; | ||
143 | }; | 144 | }; |
144 | 145 | ||
145 | struct { | 146 | struct { |
@@ -217,18 +218,17 @@ struct vpe *alloc_vpe(int minor) | |||
217 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | 218 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ |
218 | struct tc *alloc_tc(int index) | 219 | struct tc *alloc_tc(int index) |
219 | { | 220 | { |
220 | struct tc *t; | 221 | struct tc *tc; |
221 | 222 | ||
222 | if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { | 223 | if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) |
223 | return NULL; | 224 | goto out; |
224 | } | ||
225 | |||
226 | INIT_LIST_HEAD(&t->tc); | ||
227 | list_add_tail(&t->list, &vpecontrol.tc_list); | ||
228 | 225 | ||
229 | t->index = index; | 226 | INIT_LIST_HEAD(&tc->tc); |
227 | tc->index = index; | ||
228 | list_add_tail(&tc->list, &vpecontrol.tc_list); | ||
230 | 229 | ||
231 | return t; | 230 | out: |
231 | return tc; | ||
232 | } | 232 | } |
233 | 233 | ||
234 | /* clean up and free everything */ | 234 | /* clean up and free everything */ |
@@ -663,66 +663,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | |||
663 | } | 663 | } |
664 | #endif | 664 | #endif |
665 | 665 | ||
666 | static void dump_tc(struct tc *t) | ||
667 | { | ||
668 | unsigned long val; | ||
669 | |||
670 | settc(t->index); | ||
671 | printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld " | ||
672 | "TCStatus 0x%lx halt 0x%lx\n", | ||
673 | t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC, | ||
674 | read_tc_c0_tcstatus(), read_tc_c0_tchalt()); | ||
675 | |||
676 | printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
677 | printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind()); | ||
678 | |||
679 | val = read_c0_vpeconf0(); | ||
680 | printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val, | ||
681 | (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); | ||
682 | |||
683 | printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status()); | ||
684 | printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause()); | ||
685 | |||
686 | printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr()); | ||
687 | printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc()); | ||
688 | } | ||
689 | |||
690 | static void dump_tclist(void) | ||
691 | { | ||
692 | struct tc *t; | ||
693 | |||
694 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
695 | dump_tc(t); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* We are prepared so configure and start the VPE... */ | 666 | /* We are prepared so configure and start the VPE... */ |
700 | static int vpe_run(struct vpe * v) | 667 | static int vpe_run(struct vpe * v) |
701 | { | 668 | { |
669 | unsigned long flags, val, dmt_flag; | ||
702 | struct vpe_notifications *n; | 670 | struct vpe_notifications *n; |
703 | unsigned long val, dmt_flag; | 671 | unsigned int vpeflags; |
704 | struct tc *t; | 672 | struct tc *t; |
705 | 673 | ||
706 | /* check we are the Master VPE */ | 674 | /* check we are the Master VPE */ |
675 | local_irq_save(flags); | ||
707 | val = read_c0_vpeconf0(); | 676 | val = read_c0_vpeconf0(); |
708 | if (!(val & VPECONF0_MVP)) { | 677 | if (!(val & VPECONF0_MVP)) { |
709 | printk(KERN_WARNING | 678 | printk(KERN_WARNING |
710 | "VPE loader: only Master VPE's are allowed to configure MT\n"); | 679 | "VPE loader: only Master VPE's are allowed to configure MT\n"); |
680 | local_irq_restore(flags); | ||
681 | |||
711 | return -1; | 682 | return -1; |
712 | } | 683 | } |
713 | 684 | ||
714 | /* disable MT (using dvpe) */ | 685 | dmt_flag = dmt(); |
715 | dvpe(); | 686 | vpeflags = dvpe(); |
716 | 687 | ||
717 | if (!list_empty(&v->tc)) { | 688 | if (!list_empty(&v->tc)) { |
718 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | 689 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { |
719 | printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", | 690 | evpe(vpeflags); |
720 | t->index); | 691 | emt(dmt_flag); |
692 | local_irq_restore(flags); | ||
693 | |||
694 | printk(KERN_WARNING | ||
695 | "VPE loader: TC %d is already in use.\n", | ||
696 | t->index); | ||
721 | return -ENOEXEC; | 697 | return -ENOEXEC; |
722 | } | 698 | } |
723 | } else { | 699 | } else { |
724 | printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", | 700 | evpe(vpeflags); |
701 | emt(dmt_flag); | ||
702 | local_irq_restore(flags); | ||
703 | |||
704 | printk(KERN_WARNING | ||
705 | "VPE loader: No TC's associated with VPE %d\n", | ||
725 | v->minor); | 706 | v->minor); |
707 | |||
726 | return -ENOEXEC; | 708 | return -ENOEXEC; |
727 | } | 709 | } |
728 | 710 | ||
@@ -733,21 +715,20 @@ static int vpe_run(struct vpe * v) | |||
733 | 715 | ||
734 | /* should check it is halted, and not activated */ | 716 | /* should check it is halted, and not activated */ |
735 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | 717 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { |
736 | printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n", | 718 | evpe(vpeflags); |
719 | emt(dmt_flag); | ||
720 | local_irq_restore(flags); | ||
721 | |||
722 | printk(KERN_WARNING "VPE loader: TC %d is already active!\n", | ||
737 | t->index); | 723 | t->index); |
738 | dump_tclist(); | 724 | |
739 | return -ENOEXEC; | 725 | return -ENOEXEC; |
740 | } | 726 | } |
741 | 727 | ||
742 | /* | ||
743 | * Disable multi-threaded execution whilst we activate, clear the | ||
744 | * halt bit and bound the tc to the other VPE... | ||
745 | */ | ||
746 | dmt_flag = dmt(); | ||
747 | |||
748 | /* Write the address we want it to start running from in the TCPC register. */ | 728 | /* Write the address we want it to start running from in the TCPC register. */ |
749 | write_tc_c0_tcrestart((unsigned long)v->__start); | 729 | write_tc_c0_tcrestart((unsigned long)v->__start); |
750 | write_tc_c0_tccontext((unsigned long)0); | 730 | write_tc_c0_tccontext((unsigned long)0); |
731 | |||
751 | /* | 732 | /* |
752 | * Mark the TC as activated, not interrupt exempt and not dynamically | 733 | * Mark the TC as activated, not interrupt exempt and not dynamically |
753 | * allocatable | 734 | * allocatable |
@@ -763,15 +744,15 @@ static int vpe_run(struct vpe * v) | |||
763 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | 744 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and |
764 | * DFLT_HEAP_SIZE when you compile your program | 745 | * DFLT_HEAP_SIZE when you compile your program |
765 | */ | 746 | */ |
766 | mttgpr(7, physical_memsize); | 747 | mttgpr(6, v->ntcs); |
767 | 748 | mttgpr(7, physical_memsize); | |
768 | 749 | ||
769 | /* set up VPE1 */ | 750 | /* set up VPE1 */ |
770 | /* | 751 | /* |
771 | * bind the TC to VPE 1 as late as possible so we only have the final | 752 | * bind the TC to VPE 1 as late as possible so we only have the final |
772 | * VPE registers to set up, and so an EJTAG probe can trigger on it | 753 | * VPE registers to set up, and so an EJTAG probe can trigger on it |
773 | */ | 754 | */ |
774 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor); | 755 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); |
775 | 756 | ||
776 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | 757 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); |
777 | 758 | ||
@@ -793,15 +774,16 @@ static int vpe_run(struct vpe * v) | |||
793 | /* take system out of configuration state */ | 774 | /* take system out of configuration state */ |
794 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 775 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
795 | 776 | ||
796 | /* now safe to re-enable multi-threading */ | 777 | #ifdef CONFIG_SMP |
797 | emt(dmt_flag); | ||
798 | |||
799 | /* set it running */ | ||
800 | evpe(EVPE_ENABLE); | 778 | evpe(EVPE_ENABLE); |
779 | #else | ||
780 | evpe(vpeflags); | ||
781 | #endif | ||
782 | emt(dmt_flag); | ||
783 | local_irq_restore(flags); | ||
801 | 784 | ||
802 | list_for_each_entry(n, &v->notify, list) { | 785 | list_for_each_entry(n, &v->notify, list) |
803 | n->start(v->minor); | 786 | n->start(minor); |
804 | } | ||
805 | 787 | ||
806 | return 0; | 788 | return 0; |
807 | } | 789 | } |
@@ -1023,23 +1005,15 @@ static int vpe_elfload(struct vpe * v) | |||
1023 | return 0; | 1005 | return 0; |
1024 | } | 1006 | } |
1025 | 1007 | ||
1026 | void __used dump_vpe(struct vpe * v) | ||
1027 | { | ||
1028 | struct tc *t; | ||
1029 | |||
1030 | settc(v->minor); | ||
1031 | |||
1032 | printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
1033 | printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
1034 | |||
1035 | list_for_each_entry(t, &vpecontrol.tc_list, list) | ||
1036 | dump_tc(t); | ||
1037 | } | ||
1038 | |||
1039 | static void cleanup_tc(struct tc *tc) | 1008 | static void cleanup_tc(struct tc *tc) |
1040 | { | 1009 | { |
1010 | unsigned long flags; | ||
1011 | unsigned int mtflags, vpflags; | ||
1041 | int tmp; | 1012 | int tmp; |
1042 | 1013 | ||
1014 | local_irq_save(flags); | ||
1015 | mtflags = dmt(); | ||
1016 | vpflags = dvpe(); | ||
1043 | /* Put MVPE's into 'configuration state' */ | 1017 | /* Put MVPE's into 'configuration state' */ |
1044 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1018 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1045 | 1019 | ||
@@ -1054,9 +1028,12 @@ static void cleanup_tc(struct tc *tc) | |||
1054 | write_tc_c0_tchalt(TCHALT_H); | 1028 | write_tc_c0_tchalt(TCHALT_H); |
1055 | 1029 | ||
1056 | /* bind it to anything other than VPE1 */ | 1030 | /* bind it to anything other than VPE1 */ |
1057 | write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE | 1031 | // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE |
1058 | 1032 | ||
1059 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1033 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1034 | evpe(vpflags); | ||
1035 | emt(mtflags); | ||
1036 | local_irq_restore(flags); | ||
1060 | } | 1037 | } |
1061 | 1038 | ||
1062 | static int getcwd(char *buff, int size) | 1039 | static int getcwd(char *buff, int size) |
@@ -1077,36 +1054,32 @@ static int getcwd(char *buff, int size) | |||
1077 | /* checks VPE is unused and gets ready to load program */ | 1054 | /* checks VPE is unused and gets ready to load program */ |
1078 | static int vpe_open(struct inode *inode, struct file *filp) | 1055 | static int vpe_open(struct inode *inode, struct file *filp) |
1079 | { | 1056 | { |
1080 | int minor, ret; | ||
1081 | enum vpe_state state; | 1057 | enum vpe_state state; |
1082 | struct vpe *v; | ||
1083 | struct vpe_notifications *not; | 1058 | struct vpe_notifications *not; |
1059 | struct vpe *v; | ||
1060 | int ret; | ||
1084 | 1061 | ||
1085 | /* assume only 1 device at the mo. */ | 1062 | if (minor != iminor(inode)) { |
1086 | if ((minor = iminor(inode)) != 1) { | 1063 | /* assume only 1 device at the moment. */ |
1087 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); | 1064 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); |
1088 | return -ENODEV; | 1065 | return -ENODEV; |
1089 | } | 1066 | } |
1090 | 1067 | ||
1091 | if ((v = get_vpe(minor)) == NULL) { | 1068 | if ((v = get_vpe(tclimit)) == NULL) { |
1092 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); | 1069 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); |
1093 | return -ENODEV; | 1070 | return -ENODEV; |
1094 | } | 1071 | } |
1095 | 1072 | ||
1096 | state = xchg(&v->state, VPE_STATE_INUSE); | 1073 | state = xchg(&v->state, VPE_STATE_INUSE); |
1097 | if (state != VPE_STATE_UNUSED) { | 1074 | if (state != VPE_STATE_UNUSED) { |
1098 | dvpe(); | ||
1099 | |||
1100 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); | 1075 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); |
1101 | 1076 | ||
1102 | dump_tc(get_tc(minor)); | ||
1103 | |||
1104 | list_for_each_entry(not, &v->notify, list) { | 1077 | list_for_each_entry(not, &v->notify, list) { |
1105 | not->stop(minor); | 1078 | not->stop(tclimit); |
1106 | } | 1079 | } |
1107 | 1080 | ||
1108 | release_progmem(v->load_addr); | 1081 | release_progmem(v->load_addr); |
1109 | cleanup_tc(get_tc(minor)); | 1082 | cleanup_tc(get_tc(tclimit)); |
1110 | } | 1083 | } |
1111 | 1084 | ||
1112 | /* this of-course trashes what was there before... */ | 1085 | /* this of-course trashes what was there before... */ |
@@ -1133,26 +1106,25 @@ static int vpe_open(struct inode *inode, struct file *filp) | |||
1133 | 1106 | ||
1134 | v->shared_ptr = NULL; | 1107 | v->shared_ptr = NULL; |
1135 | v->__start = 0; | 1108 | v->__start = 0; |
1109 | |||
1136 | return 0; | 1110 | return 0; |
1137 | } | 1111 | } |
1138 | 1112 | ||
1139 | static int vpe_release(struct inode *inode, struct file *filp) | 1113 | static int vpe_release(struct inode *inode, struct file *filp) |
1140 | { | 1114 | { |
1141 | int minor, ret = 0; | ||
1142 | struct vpe *v; | 1115 | struct vpe *v; |
1143 | Elf_Ehdr *hdr; | 1116 | Elf_Ehdr *hdr; |
1117 | int ret = 0; | ||
1144 | 1118 | ||
1145 | minor = iminor(inode); | 1119 | v = get_vpe(tclimit); |
1146 | if ((v = get_vpe(minor)) == NULL) | 1120 | if (v == NULL) |
1147 | return -ENODEV; | 1121 | return -ENODEV; |
1148 | 1122 | ||
1149 | // simple case of fire and forget, so tell the VPE to run... | ||
1150 | |||
1151 | hdr = (Elf_Ehdr *) v->pbuffer; | 1123 | hdr = (Elf_Ehdr *) v->pbuffer; |
1152 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | 1124 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { |
1153 | if (vpe_elfload(v) >= 0) | 1125 | if (vpe_elfload(v) >= 0) { |
1154 | vpe_run(v); | 1126 | vpe_run(v); |
1155 | else { | 1127 | } else { |
1156 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); | 1128 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); |
1157 | ret = -ENOEXEC; | 1129 | ret = -ENOEXEC; |
1158 | } | 1130 | } |
@@ -1179,12 +1151,14 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1179 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | 1151 | static ssize_t vpe_write(struct file *file, const char __user * buffer, |
1180 | size_t count, loff_t * ppos) | 1152 | size_t count, loff_t * ppos) |
1181 | { | 1153 | { |
1182 | int minor; | ||
1183 | size_t ret = count; | 1154 | size_t ret = count; |
1184 | struct vpe *v; | 1155 | struct vpe *v; |
1185 | 1156 | ||
1186 | minor = iminor(file->f_path.dentry->d_inode); | 1157 | if (iminor(file->f_path.dentry->d_inode) != minor) |
1187 | if ((v = get_vpe(minor)) == NULL) | 1158 | return -ENODEV; |
1159 | |||
1160 | v = get_vpe(tclimit); | ||
1161 | if (v == NULL) | ||
1188 | return -ENODEV; | 1162 | return -ENODEV; |
1189 | 1163 | ||
1190 | if (v->pbuffer == NULL) { | 1164 | if (v->pbuffer == NULL) { |
@@ -1366,62 +1340,173 @@ static void kspd_sp_exit( int sp_id) | |||
1366 | } | 1340 | } |
1367 | #endif | 1341 | #endif |
1368 | 1342 | ||
1369 | static struct device *vpe_dev; | 1343 | static ssize_t store_kill(struct class_device *dev, const char *buf, size_t len) |
1344 | { | ||
1345 | struct vpe *vpe = get_vpe(tclimit); | ||
1346 | struct vpe_notifications *not; | ||
1347 | |||
1348 | list_for_each_entry(not, &vpe->notify, list) { | ||
1349 | not->stop(tclimit); | ||
1350 | } | ||
1351 | |||
1352 | release_progmem(vpe->load_addr); | ||
1353 | cleanup_tc(get_tc(tclimit)); | ||
1354 | vpe_stop(vpe); | ||
1355 | vpe_free(vpe); | ||
1356 | |||
1357 | return len; | ||
1358 | } | ||
1359 | |||
1360 | static ssize_t show_ntcs(struct class_device *cd, char *buf) | ||
1361 | { | ||
1362 | struct vpe *vpe = get_vpe(tclimit); | ||
1363 | |||
1364 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
1365 | } | ||
1366 | |||
1367 | static ssize_t store_ntcs(struct class_device *dev, const char *buf, size_t len) | ||
1368 | { | ||
1369 | struct vpe *vpe = get_vpe(tclimit); | ||
1370 | unsigned long new; | ||
1371 | char *endp; | ||
1372 | |||
1373 | new = simple_strtoul(buf, &endp, 0); | ||
1374 | if (endp == buf) | ||
1375 | goto out_einval; | ||
1376 | |||
1377 | if (new == 0 || new > (hw_tcs - tclimit)) | ||
1378 | goto out_einval; | ||
1379 | |||
1380 | vpe->ntcs = new; | ||
1381 | |||
1382 | return len; | ||
1383 | |||
1384 | out_einval: | ||
1385 | return -EINVAL;; | ||
1386 | } | ||
1387 | |||
1388 | static struct class_device_attribute vpe_class_attributes[] = { | ||
1389 | __ATTR(kill, S_IWUSR, NULL, store_kill), | ||
1390 | __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), | ||
1391 | {} | ||
1392 | }; | ||
1393 | |||
1394 | static void vpe_class_device_release(struct class_device *cd) | ||
1395 | { | ||
1396 | kfree(cd); | ||
1397 | } | ||
1398 | |||
1399 | struct class vpe_class = { | ||
1400 | .name = "vpe", | ||
1401 | .owner = THIS_MODULE, | ||
1402 | .release = vpe_class_device_release, | ||
1403 | .class_dev_attrs = vpe_class_attributes, | ||
1404 | }; | ||
1405 | |||
1406 | struct class_device vpe_device; | ||
1370 | 1407 | ||
1371 | static int __init vpe_module_init(void) | 1408 | static int __init vpe_module_init(void) |
1372 | { | 1409 | { |
1410 | unsigned int mtflags, vpflags; | ||
1411 | unsigned long flags, val; | ||
1373 | struct vpe *v = NULL; | 1412 | struct vpe *v = NULL; |
1374 | struct device *dev; | ||
1375 | struct tc *t; | 1413 | struct tc *t; |
1376 | unsigned long val; | 1414 | int tc, err; |
1377 | int i, err; | ||
1378 | 1415 | ||
1379 | if (!cpu_has_mipsmt) { | 1416 | if (!cpu_has_mipsmt) { |
1380 | printk("VPE loader: not a MIPS MT capable processor\n"); | 1417 | printk("VPE loader: not a MIPS MT capable processor\n"); |
1381 | return -ENODEV; | 1418 | return -ENODEV; |
1382 | } | 1419 | } |
1383 | 1420 | ||
1421 | if (vpelimit == 0) { | ||
1422 | printk(KERN_WARNING "No VPEs reserved for AP/SP, not " | ||
1423 | "initializing VPE loader.\nPass maxvpes=<n> argument as " | ||
1424 | "kernel argument\n"); | ||
1425 | |||
1426 | return -ENODEV; | ||
1427 | } | ||
1428 | |||
1429 | if (tclimit == 0) { | ||
1430 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
1431 | "initializing VPE loader.\nPass maxtcs=<n> argument as " | ||
1432 | "kernel argument\n"); | ||
1433 | |||
1434 | return -ENODEV; | ||
1435 | } | ||
1436 | |||
1384 | major = register_chrdev(0, module_name, &vpe_fops); | 1437 | major = register_chrdev(0, module_name, &vpe_fops); |
1385 | if (major < 0) { | 1438 | if (major < 0) { |
1386 | printk("VPE loader: unable to register character device\n"); | 1439 | printk("VPE loader: unable to register character device\n"); |
1387 | return major; | 1440 | return major; |
1388 | } | 1441 | } |
1389 | 1442 | ||
1390 | dev = device_create(mt_class, NULL, MKDEV(major, minor), | 1443 | err = class_register(&vpe_class); |
1391 | "tc%d", minor); | 1444 | if (err) { |
1392 | if (IS_ERR(dev)) { | 1445 | printk(KERN_ERR "vpe_class registration failed\n"); |
1393 | err = PTR_ERR(dev); | ||
1394 | goto out_chrdev; | 1446 | goto out_chrdev; |
1395 | } | 1447 | } |
1396 | vpe_dev = dev; | ||
1397 | 1448 | ||
1398 | dmt(); | 1449 | class_device_initialize(&vpe_device); |
1399 | dvpe(); | 1450 | vpe_device.class = &vpe_class, |
1451 | vpe_device.parent = NULL, | ||
1452 | strlcpy(vpe_device.class_id, "vpe1", BUS_ID_SIZE); | ||
1453 | vpe_device.devt = MKDEV(major, minor); | ||
1454 | err = class_device_add(&vpe_device); | ||
1455 | if (err) { | ||
1456 | printk(KERN_ERR "Adding vpe_device failed\n"); | ||
1457 | goto out_class; | ||
1458 | } | ||
1459 | |||
1460 | local_irq_save(flags); | ||
1461 | mtflags = dmt(); | ||
1462 | vpflags = dvpe(); | ||
1400 | 1463 | ||
1401 | /* Put MVPE's into 'configuration state' */ | 1464 | /* Put MVPE's into 'configuration state' */ |
1402 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1465 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1403 | 1466 | ||
1404 | /* dump_mtregs(); */ | 1467 | /* dump_mtregs(); */ |
1405 | 1468 | ||
1406 | |||
1407 | val = read_c0_mvpconf0(); | 1469 | val = read_c0_mvpconf0(); |
1408 | for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) { | 1470 | hw_tcs = (val & MVPCONF0_PTC) + 1; |
1409 | t = alloc_tc(i); | 1471 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
1472 | |||
1473 | for (tc = tclimit; tc < hw_tcs; tc++) { | ||
1474 | /* | ||
1475 | * Must re-enable multithreading temporarily or in case we | ||
1476 | * reschedule send IPIs or similar we might hang. | ||
1477 | */ | ||
1478 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1479 | evpe(vpflags); | ||
1480 | emt(mtflags); | ||
1481 | local_irq_restore(flags); | ||
1482 | t = alloc_tc(tc); | ||
1483 | if (!t) { | ||
1484 | err = -ENOMEM; | ||
1485 | goto out; | ||
1486 | } | ||
1487 | |||
1488 | local_irq_save(flags); | ||
1489 | mtflags = dmt(); | ||
1490 | vpflags = dvpe(); | ||
1491 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1410 | 1492 | ||
1411 | /* VPE's */ | 1493 | /* VPE's */ |
1412 | if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) { | 1494 | if (tc < hw_tcs) { |
1413 | settc(i); | 1495 | settc(tc); |
1414 | 1496 | ||
1415 | if ((v = alloc_vpe(i)) == NULL) { | 1497 | if ((v = alloc_vpe(tc)) == NULL) { |
1416 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | 1498 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); |
1417 | return -ENODEV; | 1499 | |
1500 | goto out_reenable; | ||
1418 | } | 1501 | } |
1419 | 1502 | ||
1503 | v->ntcs = hw_tcs - tclimit; | ||
1504 | |||
1420 | /* add the tc to the list of this vpe's tc's. */ | 1505 | /* add the tc to the list of this vpe's tc's. */ |
1421 | list_add(&t->tc, &v->tc); | 1506 | list_add(&t->tc, &v->tc); |
1422 | 1507 | ||
1423 | /* deactivate all but vpe0 */ | 1508 | /* deactivate all but vpe0 */ |
1424 | if (i != 0) { | 1509 | if (tc >= tclimit) { |
1425 | unsigned long tmp = read_vpe_c0_vpeconf0(); | 1510 | unsigned long tmp = read_vpe_c0_vpeconf0(); |
1426 | 1511 | ||
1427 | tmp &= ~VPECONF0_VPA; | 1512 | tmp &= ~VPECONF0_VPA; |
@@ -1434,7 +1519,7 @@ static int __init vpe_module_init(void) | |||
1434 | /* disable multi-threading with TC's */ | 1519 | /* disable multi-threading with TC's */ |
1435 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | 1520 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); |
1436 | 1521 | ||
1437 | if (i != 0) { | 1522 | if (tc >= vpelimit) { |
1438 | /* | 1523 | /* |
1439 | * Set config to be the same as vpe0, | 1524 | * Set config to be the same as vpe0, |
1440 | * particularly kseg0 coherency alg | 1525 | * particularly kseg0 coherency alg |
@@ -1446,10 +1531,10 @@ static int __init vpe_module_init(void) | |||
1446 | /* TC's */ | 1531 | /* TC's */ |
1447 | t->pvpe = v; /* set the parent vpe */ | 1532 | t->pvpe = v; /* set the parent vpe */ |
1448 | 1533 | ||
1449 | if (i != 0) { | 1534 | if (tc >= tclimit) { |
1450 | unsigned long tmp; | 1535 | unsigned long tmp; |
1451 | 1536 | ||
1452 | settc(i); | 1537 | settc(tc); |
1453 | 1538 | ||
1454 | /* Any TC that is bound to VPE0 gets left as is - in case | 1539 | /* Any TC that is bound to VPE0 gets left as is - in case |
1455 | we are running SMTC on VPE0. A TC that is bound to any | 1540 | we are running SMTC on VPE0. A TC that is bound to any |
@@ -1479,17 +1564,25 @@ static int __init vpe_module_init(void) | |||
1479 | } | 1564 | } |
1480 | } | 1565 | } |
1481 | 1566 | ||
1567 | out_reenable: | ||
1482 | /* release config state */ | 1568 | /* release config state */ |
1483 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1569 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1484 | 1570 | ||
1571 | evpe(vpflags); | ||
1572 | emt(mtflags); | ||
1573 | local_irq_restore(flags); | ||
1574 | |||
1485 | #ifdef CONFIG_MIPS_APSP_KSPD | 1575 | #ifdef CONFIG_MIPS_APSP_KSPD |
1486 | kspd_events.kspd_sp_exit = kspd_sp_exit; | 1576 | kspd_events.kspd_sp_exit = kspd_sp_exit; |
1487 | #endif | 1577 | #endif |
1488 | return 0; | 1578 | return 0; |
1489 | 1579 | ||
1580 | out_class: | ||
1581 | class_unregister(&vpe_class); | ||
1490 | out_chrdev: | 1582 | out_chrdev: |
1491 | unregister_chrdev(major, module_name); | 1583 | unregister_chrdev(major, module_name); |
1492 | 1584 | ||
1585 | out: | ||
1493 | return err; | 1586 | return err; |
1494 | } | 1587 | } |
1495 | 1588 | ||
@@ -1503,7 +1596,7 @@ static void __exit vpe_module_exit(void) | |||
1503 | } | 1596 | } |
1504 | } | 1597 | } |
1505 | 1598 | ||
1506 | device_destroy(mt_class, MKDEV(major, minor)); | 1599 | class_device_del(&vpe_device); |
1507 | unregister_chrdev(major, module_name); | 1600 | unregister_chrdev(major, module_name); |
1508 | } | 1601 | } |
1509 | 1602 | ||