diff options
Diffstat (limited to 'arch/mips/kernel/vpe.c')
-rw-r--r-- | arch/mips/kernel/vpe.c | 910 |
1 files changed, 124 insertions, 786 deletions
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index 59b2b3cd7885..11da314565cc 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -1,37 +1,22 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | 2 | * This file is subject to the terms and conditions of the GNU General Public |
3 | * | 3 | * License. See the file "COPYING" in the main directory of this archive |
4 | * This program is free software; you can distribute it and/or modify it | 4 | * for more details. |
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | 5 | * |
13 | * You should have received a copy of the GNU General Public License along | 6 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. |
14 | * with this program; if not, write to the Free Software Foundation, Inc., | 7 | * Copyright (C) 2013 Imagination Technologies Ltd. |
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | */ | ||
17 | |||
18 | /* | ||
19 | * VPE support module | ||
20 | * | ||
21 | * Provides support for loading a MIPS SP program on VPE1. | ||
22 | * The SP environment is rather simple, no tlb's. It needs to be relocatable | ||
23 | * (or partially linked). You should initialise your stack in the startup | ||
24 | * code. This loader looks for the symbol __start and sets up | ||
25 | * execution to resume from there. The MIPS SDE kit contains suitable examples. | ||
26 | * | 8 | * |
27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | 9 | * VPE spport module for loading a MIPS SP program into VPE1. The SP |
28 | * i.e cat spapp >/dev/vpe1. | 10 | * environment is rather simple since there are no TLBs. It needs |
11 | * to be relocatable (or partiall linked). Initialize your stack in | ||
12 | * the startup-code. The loader looks for the symbol __start and sets | ||
13 | * up the execution to resume from there. To load and run, simply do | ||
14 | * a cat SP 'binary' to the /dev/vpe1 device. | ||
29 | */ | 15 | */ |
30 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
31 | #include <linux/device.h> | 17 | #include <linux/device.h> |
32 | #include <linux/fs.h> | 18 | #include <linux/fs.h> |
33 | #include <linux/init.h> | 19 | #include <linux/init.h> |
34 | #include <asm/uaccess.h> | ||
35 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
36 | #include <linux/list.h> | 21 | #include <linux/list.h> |
37 | #include <linux/vmalloc.h> | 22 | #include <linux/vmalloc.h> |
@@ -46,13 +31,10 @@ | |||
46 | #include <asm/mipsmtregs.h> | 31 | #include <asm/mipsmtregs.h> |
47 | #include <asm/cacheflush.h> | 32 | #include <asm/cacheflush.h> |
48 | #include <linux/atomic.h> | 33 | #include <linux/atomic.h> |
49 | #include <asm/cpu.h> | ||
50 | #include <asm/mips_mt.h> | 34 | #include <asm/mips_mt.h> |
51 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
52 | #include <asm/vpe.h> | 36 | #include <asm/vpe.h> |
53 | 37 | ||
54 | typedef void *vpe_handle; | ||
55 | |||
56 | #ifndef ARCH_SHF_SMALL | 38 | #ifndef ARCH_SHF_SMALL |
57 | #define ARCH_SHF_SMALL 0 | 39 | #define ARCH_SHF_SMALL 0 |
58 | #endif | 40 | #endif |
@@ -60,96 +42,15 @@ typedef void *vpe_handle; | |||
60 | /* If this is set, the section belongs in the init part of the module */ | 42 | /* If this is set, the section belongs in the init part of the module */ |
61 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 43 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
62 | 44 | ||
63 | /* | 45 | struct vpe_control vpecontrol = { |
64 | * The number of TCs and VPEs physically available on the core | ||
65 | */ | ||
66 | static int hw_tcs, hw_vpes; | ||
67 | static char module_name[] = "vpe"; | ||
68 | static int major; | ||
69 | static const int minor = 1; /* fixed for now */ | ||
70 | |||
71 | /* grab the likely amount of memory we will need. */ | ||
72 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
73 | #define P_SIZE (2 * 1024 * 1024) | ||
74 | #else | ||
75 | /* add an overhead to the max kmalloc size for non-striped symbols/etc */ | ||
76 | #define P_SIZE (256 * 1024) | ||
77 | #endif | ||
78 | |||
79 | extern unsigned long physical_memsize; | ||
80 | |||
81 | #define MAX_VPES 16 | ||
82 | #define VPE_PATH_MAX 256 | ||
83 | |||
84 | enum vpe_state { | ||
85 | VPE_STATE_UNUSED = 0, | ||
86 | VPE_STATE_INUSE, | ||
87 | VPE_STATE_RUNNING | ||
88 | }; | ||
89 | |||
90 | enum tc_state { | ||
91 | TC_STATE_UNUSED = 0, | ||
92 | TC_STATE_INUSE, | ||
93 | TC_STATE_RUNNING, | ||
94 | TC_STATE_DYNAMIC | ||
95 | }; | ||
96 | |||
97 | struct vpe { | ||
98 | enum vpe_state state; | ||
99 | |||
100 | /* (device) minor associated with this vpe */ | ||
101 | int minor; | ||
102 | |||
103 | /* elfloader stuff */ | ||
104 | void *load_addr; | ||
105 | unsigned long len; | ||
106 | char *pbuffer; | ||
107 | unsigned long plen; | ||
108 | unsigned int uid, gid; | ||
109 | char cwd[VPE_PATH_MAX]; | ||
110 | |||
111 | unsigned long __start; | ||
112 | |||
113 | /* tc's associated with this vpe */ | ||
114 | struct list_head tc; | ||
115 | |||
116 | /* The list of vpe's */ | ||
117 | struct list_head list; | ||
118 | |||
119 | /* shared symbol address */ | ||
120 | void *shared_ptr; | ||
121 | |||
122 | /* the list of who wants to know when something major happens */ | ||
123 | struct list_head notify; | ||
124 | |||
125 | unsigned int ntcs; | ||
126 | }; | ||
127 | |||
128 | struct tc { | ||
129 | enum tc_state state; | ||
130 | int index; | ||
131 | |||
132 | struct vpe *pvpe; /* parent VPE */ | ||
133 | struct list_head tc; /* The list of TC's with this VPE */ | ||
134 | struct list_head list; /* The global list of tc's */ | ||
135 | }; | ||
136 | |||
137 | struct { | ||
138 | spinlock_t vpe_list_lock; | ||
139 | struct list_head vpe_list; /* Virtual processing elements */ | ||
140 | spinlock_t tc_list_lock; | ||
141 | struct list_head tc_list; /* Thread contexts */ | ||
142 | } vpecontrol = { | ||
143 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), | 46 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), |
144 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | 47 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), |
145 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), | 48 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), |
146 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | 49 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) |
147 | }; | 50 | }; |
148 | 51 | ||
149 | static void release_progmem(void *ptr); | ||
150 | |||
151 | /* get the vpe associated with this minor */ | 52 | /* get the vpe associated with this minor */ |
152 | static struct vpe *get_vpe(int minor) | 53 | struct vpe *get_vpe(int minor) |
153 | { | 54 | { |
154 | struct vpe *res, *v; | 55 | struct vpe *res, *v; |
155 | 56 | ||
@@ -159,7 +60,7 @@ static struct vpe *get_vpe(int minor) | |||
159 | res = NULL; | 60 | res = NULL; |
160 | spin_lock(&vpecontrol.vpe_list_lock); | 61 | spin_lock(&vpecontrol.vpe_list_lock); |
161 | list_for_each_entry(v, &vpecontrol.vpe_list, list) { | 62 | list_for_each_entry(v, &vpecontrol.vpe_list, list) { |
162 | if (v->minor == minor) { | 63 | if (v->minor == VPE_MODULE_MINOR) { |
163 | res = v; | 64 | res = v; |
164 | break; | 65 | break; |
165 | } | 66 | } |
@@ -170,7 +71,7 @@ static struct vpe *get_vpe(int minor) | |||
170 | } | 71 | } |
171 | 72 | ||
172 | /* get the vpe associated with this minor */ | 73 | /* get the vpe associated with this minor */ |
173 | static struct tc *get_tc(int index) | 74 | struct tc *get_tc(int index) |
174 | { | 75 | { |
175 | struct tc *res, *t; | 76 | struct tc *res, *t; |
176 | 77 | ||
@@ -188,12 +89,13 @@ static struct tc *get_tc(int index) | |||
188 | } | 89 | } |
189 | 90 | ||
190 | /* allocate a vpe and associate it with this minor (or index) */ | 91 | /* allocate a vpe and associate it with this minor (or index) */ |
191 | static struct vpe *alloc_vpe(int minor) | 92 | struct vpe *alloc_vpe(int minor) |
192 | { | 93 | { |
193 | struct vpe *v; | 94 | struct vpe *v; |
194 | 95 | ||
195 | if ((v = kzalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) | 96 | v = kzalloc(sizeof(struct vpe), GFP_KERNEL); |
196 | return NULL; | 97 | if (v == NULL) |
98 | goto out; | ||
197 | 99 | ||
198 | INIT_LIST_HEAD(&v->tc); | 100 | INIT_LIST_HEAD(&v->tc); |
199 | spin_lock(&vpecontrol.vpe_list_lock); | 101 | spin_lock(&vpecontrol.vpe_list_lock); |
@@ -201,17 +103,19 @@ static struct vpe *alloc_vpe(int minor) | |||
201 | spin_unlock(&vpecontrol.vpe_list_lock); | 103 | spin_unlock(&vpecontrol.vpe_list_lock); |
202 | 104 | ||
203 | INIT_LIST_HEAD(&v->notify); | 105 | INIT_LIST_HEAD(&v->notify); |
204 | v->minor = minor; | 106 | v->minor = VPE_MODULE_MINOR; |
205 | 107 | ||
108 | out: | ||
206 | return v; | 109 | return v; |
207 | } | 110 | } |
208 | 111 | ||
209 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | 112 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ |
210 | static struct tc *alloc_tc(int index) | 113 | struct tc *alloc_tc(int index) |
211 | { | 114 | { |
212 | struct tc *tc; | 115 | struct tc *tc; |
213 | 116 | ||
214 | if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) | 117 | tc = kzalloc(sizeof(struct tc), GFP_KERNEL); |
118 | if (tc == NULL) | ||
215 | goto out; | 119 | goto out; |
216 | 120 | ||
217 | INIT_LIST_HEAD(&tc->tc); | 121 | INIT_LIST_HEAD(&tc->tc); |
@@ -226,7 +130,7 @@ out: | |||
226 | } | 130 | } |
227 | 131 | ||
228 | /* clean up and free everything */ | 132 | /* clean up and free everything */ |
229 | static void release_vpe(struct vpe *v) | 133 | void release_vpe(struct vpe *v) |
230 | { | 134 | { |
231 | list_del(&v->list); | 135 | list_del(&v->list); |
232 | if (v->load_addr) | 136 | if (v->load_addr) |
@@ -234,28 +138,8 @@ static void release_vpe(struct vpe *v) | |||
234 | kfree(v); | 138 | kfree(v); |
235 | } | 139 | } |
236 | 140 | ||
237 | static void __maybe_unused dump_mtregs(void) | 141 | /* Find some VPE program space */ |
238 | { | 142 | void *alloc_progmem(unsigned long len) |
239 | unsigned long val; | ||
240 | |||
241 | val = read_c0_config3(); | ||
242 | printk("config3 0x%lx MT %ld\n", val, | ||
243 | (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); | ||
244 | |||
245 | val = read_c0_mvpcontrol(); | ||
246 | printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, | ||
247 | (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, | ||
248 | (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, | ||
249 | (val & MVPCONTROL_EVP)); | ||
250 | |||
251 | val = read_c0_mvpconf0(); | ||
252 | printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, | ||
253 | (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, | ||
254 | val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); | ||
255 | } | ||
256 | |||
257 | /* Find some VPE program space */ | ||
258 | static void *alloc_progmem(unsigned long len) | ||
259 | { | 143 | { |
260 | void *addr; | 144 | void *addr; |
261 | 145 | ||
@@ -274,7 +158,7 @@ static void *alloc_progmem(unsigned long len) | |||
274 | return addr; | 158 | return addr; |
275 | } | 159 | } |
276 | 160 | ||
277 | static void release_progmem(void *ptr) | 161 | void release_progmem(void *ptr) |
278 | { | 162 | { |
279 | #ifndef CONFIG_MIPS_VPE_LOADER_TOM | 163 | #ifndef CONFIG_MIPS_VPE_LOADER_TOM |
280 | kfree(ptr); | 164 | kfree(ptr); |
@@ -282,7 +166,7 @@ static void release_progmem(void *ptr) | |||
282 | } | 166 | } |
283 | 167 | ||
284 | /* Update size with this section: return offset. */ | 168 | /* Update size with this section: return offset. */ |
285 | static long get_offset(unsigned long *size, Elf_Shdr * sechdr) | 169 | static long get_offset(unsigned long *size, Elf_Shdr *sechdr) |
286 | { | 170 | { |
287 | long ret; | 171 | long ret; |
288 | 172 | ||
@@ -295,8 +179,8 @@ static long get_offset(unsigned long *size, Elf_Shdr * sechdr) | |||
295 | might -- code, read-only data, read-write data, small data. Tally | 179 | might -- code, read-only data, read-write data, small data. Tally |
296 | sizes, and place the offsets into sh_entsize fields: high bit means it | 180 | sizes, and place the offsets into sh_entsize fields: high bit means it |
297 | belongs in init. */ | 181 | belongs in init. */ |
298 | static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | 182 | static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, |
299 | Elf_Shdr * sechdrs, const char *secstrings) | 183 | Elf_Shdr *sechdrs, const char *secstrings) |
300 | { | 184 | { |
301 | static unsigned long const masks[][2] = { | 185 | static unsigned long const masks[][2] = { |
302 | /* NOTE: all executable code must be the first section | 186 | /* NOTE: all executable code must be the first section |
@@ -316,7 +200,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | |||
316 | for (i = 0; i < hdr->e_shnum; ++i) { | 200 | for (i = 0; i < hdr->e_shnum; ++i) { |
317 | Elf_Shdr *s = &sechdrs[i]; | 201 | Elf_Shdr *s = &sechdrs[i]; |
318 | 202 | ||
319 | // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) | ||
320 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | 203 | if ((s->sh_flags & masks[m][0]) != masks[m][0] |
321 | || (s->sh_flags & masks[m][1]) | 204 | || (s->sh_flags & masks[m][1]) |
322 | || s->sh_entsize != ~0UL) | 205 | || s->sh_entsize != ~0UL) |
@@ -331,7 +214,6 @@ static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | |||
331 | } | 214 | } |
332 | } | 215 | } |
333 | 216 | ||
334 | |||
335 | /* from module-elf32.c, but subverted a little */ | 217 | /* from module-elf32.c, but subverted a little */ |
336 | 218 | ||
337 | struct mips_hi16 { | 219 | struct mips_hi16 { |
@@ -354,20 +236,18 @@ static int apply_r_mips_gprel16(struct module *me, uint32_t *location, | |||
354 | { | 236 | { |
355 | int rel; | 237 | int rel; |
356 | 238 | ||
357 | if( !(*location & 0xffff) ) { | 239 | if (!(*location & 0xffff)) { |
358 | rel = (int)v - gp_addr; | 240 | rel = (int)v - gp_addr; |
359 | } | 241 | } else { |
360 | else { | ||
361 | /* .sbss + gp(relative) + offset */ | 242 | /* .sbss + gp(relative) + offset */ |
362 | /* kludge! */ | 243 | /* kludge! */ |
363 | rel = (int)(short)((int)v + gp_offs + | 244 | rel = (int)(short)((int)v + gp_offs + |
364 | (int)(short)(*location & 0xffff) - gp_addr); | 245 | (int)(short)(*location & 0xffff) - gp_addr); |
365 | } | 246 | } |
366 | 247 | ||
367 | if( (rel > 32768) || (rel < -32768) ) { | 248 | if ((rel > 32768) || (rel < -32768)) { |
368 | printk(KERN_DEBUG "VPE loader: apply_r_mips_gprel16: " | 249 | pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n", |
369 | "relative address 0x%x out of range of gp register\n", | 250 | rel); |
370 | rel); | ||
371 | return -ENOEXEC; | 251 | return -ENOEXEC; |
372 | } | 252 | } |
373 | 253 | ||
@@ -381,12 +261,12 @@ static int apply_r_mips_pc16(struct module *me, uint32_t *location, | |||
381 | { | 261 | { |
382 | int rel; | 262 | int rel; |
383 | rel = (((unsigned int)v - (unsigned int)location)); | 263 | rel = (((unsigned int)v - (unsigned int)location)); |
384 | rel >>= 2; // because the offset is in _instructions_ not bytes. | 264 | rel >>= 2; /* because the offset is in _instructions_ not bytes. */ |
385 | rel -= 1; // and one instruction less due to the branch delay slot. | 265 | rel -= 1; /* and one instruction less due to the branch delay slot. */ |
386 | 266 | ||
387 | if( (rel > 32768) || (rel < -32768) ) { | 267 | if ((rel > 32768) || (rel < -32768)) { |
388 | printk(KERN_DEBUG "VPE loader: " | 268 | pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n", |
389 | "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); | 269 | rel); |
390 | return -ENOEXEC; | 270 | return -ENOEXEC; |
391 | } | 271 | } |
392 | 272 | ||
@@ -407,8 +287,7 @@ static int apply_r_mips_26(struct module *me, uint32_t *location, | |||
407 | Elf32_Addr v) | 287 | Elf32_Addr v) |
408 | { | 288 | { |
409 | if (v % 4) { | 289 | if (v % 4) { |
410 | printk(KERN_DEBUG "VPE loader: apply_r_mips_26 " | 290 | pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n"); |
411 | " unaligned relocation\n"); | ||
412 | return -ENOEXEC; | 291 | return -ENOEXEC; |
413 | } | 292 | } |
414 | 293 | ||
@@ -439,7 +318,7 @@ static int apply_r_mips_hi16(struct module *me, uint32_t *location, | |||
439 | * the carry we need to add. Save the information, and let LO16 do the | 318 | * the carry we need to add. Save the information, and let LO16 do the |
440 | * actual relocation. | 319 | * actual relocation. |
441 | */ | 320 | */ |
442 | n = kmalloc(sizeof *n, GFP_KERNEL); | 321 | n = kmalloc(sizeof(*n), GFP_KERNEL); |
443 | if (!n) | 322 | if (!n) |
444 | return -ENOMEM; | 323 | return -ENOMEM; |
445 | 324 | ||
@@ -471,9 +350,7 @@ static int apply_r_mips_lo16(struct module *me, uint32_t *location, | |||
471 | * The value for the HI16 had best be the same. | 350 | * The value for the HI16 had best be the same. |
472 | */ | 351 | */ |
473 | if (v != l->value) { | 352 | if (v != l->value) { |
474 | printk(KERN_DEBUG "VPE loader: " | 353 | pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n"); |
475 | "apply_r_mips_lo16/hi16: \t" | ||
476 | "inconsistent value information\n"); | ||
477 | goto out_free; | 354 | goto out_free; |
478 | } | 355 | } |
479 | 356 | ||
@@ -569,20 +446,19 @@ static int apply_relocations(Elf32_Shdr *sechdrs, | |||
569 | + ELF32_R_SYM(r_info); | 446 | + ELF32_R_SYM(r_info); |
570 | 447 | ||
571 | if (!sym->st_value) { | 448 | if (!sym->st_value) { |
572 | printk(KERN_DEBUG "%s: undefined weak symbol %s\n", | 449 | pr_debug("%s: undefined weak symbol %s\n", |
573 | me->name, strtab + sym->st_name); | 450 | me->name, strtab + sym->st_name); |
574 | /* just print the warning, dont barf */ | 451 | /* just print the warning, dont barf */ |
575 | } | 452 | } |
576 | 453 | ||
577 | v = sym->st_value; | 454 | v = sym->st_value; |
578 | 455 | ||
579 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); | 456 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); |
580 | if( res ) { | 457 | if (res) { |
581 | char *r = rstrs[ELF32_R_TYPE(r_info)]; | 458 | char *r = rstrs[ELF32_R_TYPE(r_info)]; |
582 | printk(KERN_WARNING "VPE loader: .text+0x%x " | 459 | pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n", |
583 | "relocation type %s for symbol \"%s\" failed\n", | 460 | rel[i].r_offset, r ? r : "UNKNOWN", |
584 | rel[i].r_offset, r ? r : "UNKNOWN", | 461 | strtab + sym->st_name); |
585 | strtab + sym->st_name); | ||
586 | return res; | 462 | return res; |
587 | } | 463 | } |
588 | } | 464 | } |
@@ -597,10 +473,8 @@ static inline void save_gp_address(unsigned int secbase, unsigned int rel) | |||
597 | } | 473 | } |
598 | /* end module-elf32.c */ | 474 | /* end module-elf32.c */ |
599 | 475 | ||
600 | |||
601 | |||
602 | /* Change all symbols so that sh_value encodes the pointer directly. */ | 476 | /* Change all symbols so that sh_value encodes the pointer directly. */ |
603 | static void simplify_symbols(Elf_Shdr * sechdrs, | 477 | static void simplify_symbols(Elf_Shdr *sechdrs, |
604 | unsigned int symindex, | 478 | unsigned int symindex, |
605 | const char *strtab, | 479 | const char *strtab, |
606 | const char *secstrings, | 480 | const char *secstrings, |
@@ -641,18 +515,16 @@ static void simplify_symbols(Elf_Shdr * sechdrs, | |||
641 | break; | 515 | break; |
642 | 516 | ||
643 | case SHN_MIPS_SCOMMON: | 517 | case SHN_MIPS_SCOMMON: |
644 | printk(KERN_DEBUG "simplify_symbols: ignoring SHN_MIPS_SCOMMON " | 518 | pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", |
645 | "symbol <%s> st_shndx %d\n", strtab + sym[i].st_name, | 519 | strtab + sym[i].st_name, sym[i].st_shndx); |
646 | sym[i].st_shndx); | 520 | /* .sbss section */ |
647 | // .sbss section | ||
648 | break; | 521 | break; |
649 | 522 | ||
650 | default: | 523 | default: |
651 | secbase = sechdrs[sym[i].st_shndx].sh_addr; | 524 | secbase = sechdrs[sym[i].st_shndx].sh_addr; |
652 | 525 | ||
653 | if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { | 526 | if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) |
654 | save_gp_address(secbase, sym[i].st_value); | 527 | save_gp_address(secbase, sym[i].st_value); |
655 | } | ||
656 | 528 | ||
657 | sym[i].st_value += secbase; | 529 | sym[i].st_value += secbase; |
658 | break; | 530 | break; |
@@ -661,142 +533,21 @@ static void simplify_symbols(Elf_Shdr * sechdrs, | |||
661 | } | 533 | } |
662 | 534 | ||
663 | #ifdef DEBUG_ELFLOADER | 535 | #ifdef DEBUG_ELFLOADER |
664 | static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | 536 | static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex, |
665 | const char *strtab, struct module *mod) | 537 | const char *strtab, struct module *mod) |
666 | { | 538 | { |
667 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | 539 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; |
668 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | 540 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); |
669 | 541 | ||
670 | printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); | 542 | pr_debug("dump_elfsymbols: n %d\n", n); |
671 | for (i = 1; i < n; i++) { | 543 | for (i = 1; i < n; i++) { |
672 | printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, | 544 | pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name, |
673 | strtab + sym[i].st_name, sym[i].st_value); | 545 | sym[i].st_value); |
674 | } | 546 | } |
675 | } | 547 | } |
676 | #endif | 548 | #endif |
677 | 549 | ||
678 | /* We are prepared so configure and start the VPE... */ | 550 | static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs, |
679 | static int vpe_run(struct vpe * v) | ||
680 | { | ||
681 | unsigned long flags, val, dmt_flag; | ||
682 | struct vpe_notifications *n; | ||
683 | unsigned int vpeflags; | ||
684 | struct tc *t; | ||
685 | |||
686 | /* check we are the Master VPE */ | ||
687 | local_irq_save(flags); | ||
688 | val = read_c0_vpeconf0(); | ||
689 | if (!(val & VPECONF0_MVP)) { | ||
690 | printk(KERN_WARNING | ||
691 | "VPE loader: only Master VPE's are allowed to configure MT\n"); | ||
692 | local_irq_restore(flags); | ||
693 | |||
694 | return -1; | ||
695 | } | ||
696 | |||
697 | dmt_flag = dmt(); | ||
698 | vpeflags = dvpe(); | ||
699 | |||
700 | if (list_empty(&v->tc)) { | ||
701 | evpe(vpeflags); | ||
702 | emt(dmt_flag); | ||
703 | local_irq_restore(flags); | ||
704 | |||
705 | printk(KERN_WARNING | ||
706 | "VPE loader: No TC's associated with VPE %d\n", | ||
707 | v->minor); | ||
708 | |||
709 | return -ENOEXEC; | ||
710 | } | ||
711 | |||
712 | t = list_first_entry(&v->tc, struct tc, tc); | ||
713 | |||
714 | /* Put MVPE's into 'configuration state' */ | ||
715 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
716 | |||
717 | settc(t->index); | ||
718 | |||
719 | /* should check it is halted, and not activated */ | ||
720 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | ||
721 | evpe(vpeflags); | ||
722 | emt(dmt_flag); | ||
723 | local_irq_restore(flags); | ||
724 | |||
725 | printk(KERN_WARNING "VPE loader: TC %d is already active!\n", | ||
726 | t->index); | ||
727 | |||
728 | return -ENOEXEC; | ||
729 | } | ||
730 | |||
731 | /* Write the address we want it to start running from in the TCPC register. */ | ||
732 | write_tc_c0_tcrestart((unsigned long)v->__start); | ||
733 | write_tc_c0_tccontext((unsigned long)0); | ||
734 | |||
735 | /* | ||
736 | * Mark the TC as activated, not interrupt exempt and not dynamically | ||
737 | * allocatable | ||
738 | */ | ||
739 | val = read_tc_c0_tcstatus(); | ||
740 | val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; | ||
741 | write_tc_c0_tcstatus(val); | ||
742 | |||
743 | write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); | ||
744 | |||
745 | /* | ||
746 | * The sde-kit passes 'memsize' to __start in $a3, so set something | ||
747 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | ||
748 | * DFLT_HEAP_SIZE when you compile your program | ||
749 | */ | ||
750 | mttgpr(6, v->ntcs); | ||
751 | mttgpr(7, physical_memsize); | ||
752 | |||
753 | /* set up VPE1 */ | ||
754 | /* | ||
755 | * bind the TC to VPE 1 as late as possible so we only have the final | ||
756 | * VPE registers to set up, and so an EJTAG probe can trigger on it | ||
757 | */ | ||
758 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); | ||
759 | |||
760 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | ||
761 | |||
762 | back_to_back_c0_hazard(); | ||
763 | |||
764 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | ||
765 | write_vpe_c0_vpeconf0( (read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | ||
766 | | (t->index << VPECONF0_XTC_SHIFT)); | ||
767 | |||
768 | back_to_back_c0_hazard(); | ||
769 | |||
770 | /* enable this VPE */ | ||
771 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
772 | |||
773 | /* clear out any left overs from a previous program */ | ||
774 | write_vpe_c0_status(0); | ||
775 | write_vpe_c0_cause(0); | ||
776 | |||
777 | /* take system out of configuration state */ | ||
778 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
779 | |||
780 | /* | ||
781 | * SMTC/SMVP kernels manage VPE enable independently, | ||
782 | * but uniprocessor kernels need to turn it on, even | ||
783 | * if that wasn't the pre-dvpe() state. | ||
784 | */ | ||
785 | #ifdef CONFIG_SMP | ||
786 | evpe(vpeflags); | ||
787 | #else | ||
788 | evpe(EVPE_ENABLE); | ||
789 | #endif | ||
790 | emt(dmt_flag); | ||
791 | local_irq_restore(flags); | ||
792 | |||
793 | list_for_each_entry(n, &v->notify, list) | ||
794 | n->start(minor); | ||
795 | |||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, | ||
800 | unsigned int symindex, const char *strtab, | 551 | unsigned int symindex, const char *strtab, |
801 | struct module *mod) | 552 | struct module *mod) |
802 | { | 553 | { |
@@ -804,16 +555,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, | |||
804 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | 555 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); |
805 | 556 | ||
806 | for (i = 1; i < n; i++) { | 557 | for (i = 1; i < n; i++) { |
807 | if (strcmp(strtab + sym[i].st_name, "__start") == 0) { | 558 | if (strcmp(strtab + sym[i].st_name, "__start") == 0) |
808 | v->__start = sym[i].st_value; | 559 | v->__start = sym[i].st_value; |
809 | } | ||
810 | 560 | ||
811 | if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { | 561 | if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) |
812 | v->shared_ptr = (void *)sym[i].st_value; | 562 | v->shared_ptr = (void *)sym[i].st_value; |
813 | } | ||
814 | } | 563 | } |
815 | 564 | ||
816 | if ( (v->__start == 0) || (v->shared_ptr == NULL)) | 565 | if ((v->__start == 0) || (v->shared_ptr == NULL)) |
817 | return -1; | 566 | return -1; |
818 | 567 | ||
819 | return 0; | 568 | return 0; |
@@ -824,14 +573,14 @@ static int find_vpe_symbols(struct vpe * v, Elf_Shdr * sechdrs, | |||
824 | * contents of the program (p)buffer performing relocatations/etc, free's it | 573 | * contents of the program (p)buffer performing relocatations/etc, free's it |
825 | * when finished. | 574 | * when finished. |
826 | */ | 575 | */ |
827 | static int vpe_elfload(struct vpe * v) | 576 | static int vpe_elfload(struct vpe *v) |
828 | { | 577 | { |
829 | Elf_Ehdr *hdr; | 578 | Elf_Ehdr *hdr; |
830 | Elf_Shdr *sechdrs; | 579 | Elf_Shdr *sechdrs; |
831 | long err = 0; | 580 | long err = 0; |
832 | char *secstrings, *strtab = NULL; | 581 | char *secstrings, *strtab = NULL; |
833 | unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; | 582 | unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; |
834 | struct module mod; // so we can re-use the relocations code | 583 | struct module mod; /* so we can re-use the relocations code */ |
835 | 584 | ||
836 | memset(&mod, 0, sizeof(struct module)); | 585 | memset(&mod, 0, sizeof(struct module)); |
837 | strcpy(mod.name, "VPE loader"); | 586 | strcpy(mod.name, "VPE loader"); |
@@ -845,8 +594,7 @@ static int vpe_elfload(struct vpe * v) | |||
845 | || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) | 594 | || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) |
846 | || !elf_check_arch(hdr) | 595 | || !elf_check_arch(hdr) |
847 | || hdr->e_shentsize != sizeof(*sechdrs)) { | 596 | || hdr->e_shentsize != sizeof(*sechdrs)) { |
848 | printk(KERN_WARNING | 597 | pr_warn("VPE loader: program wrong arch or weird elf version\n"); |
849 | "VPE loader: program wrong arch or weird elf version\n"); | ||
850 | 598 | ||
851 | return -ENOEXEC; | 599 | return -ENOEXEC; |
852 | } | 600 | } |
@@ -855,8 +603,7 @@ static int vpe_elfload(struct vpe * v) | |||
855 | relocate = 1; | 603 | relocate = 1; |
856 | 604 | ||
857 | if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { | 605 | if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { |
858 | printk(KERN_ERR "VPE loader: program length %u truncated\n", | 606 | pr_err("VPE loader: program length %u truncated\n", len); |
859 | len); | ||
860 | 607 | ||
861 | return -ENOEXEC; | 608 | return -ENOEXEC; |
862 | } | 609 | } |
@@ -871,22 +618,24 @@ static int vpe_elfload(struct vpe * v) | |||
871 | 618 | ||
872 | if (relocate) { | 619 | if (relocate) { |
873 | for (i = 1; i < hdr->e_shnum; i++) { | 620 | for (i = 1; i < hdr->e_shnum; i++) { |
874 | if (sechdrs[i].sh_type != SHT_NOBITS | 621 | if ((sechdrs[i].sh_type != SHT_NOBITS) && |
875 | && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { | 622 | (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) { |
876 | printk(KERN_ERR "VPE program length %u truncated\n", | 623 | pr_err("VPE program length %u truncated\n", |
877 | len); | 624 | len); |
878 | return -ENOEXEC; | 625 | return -ENOEXEC; |
879 | } | 626 | } |
880 | 627 | ||
881 | /* Mark all sections sh_addr with their address in the | 628 | /* Mark all sections sh_addr with their address in the |
882 | temporary image. */ | 629 | temporary image. */ |
883 | sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; | 630 | sechdrs[i].sh_addr = (size_t) hdr + |
631 | sechdrs[i].sh_offset; | ||
884 | 632 | ||
885 | /* Internal symbols and strings. */ | 633 | /* Internal symbols and strings. */ |
886 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | 634 | if (sechdrs[i].sh_type == SHT_SYMTAB) { |
887 | symindex = i; | 635 | symindex = i; |
888 | strindex = sechdrs[i].sh_link; | 636 | strindex = sechdrs[i].sh_link; |
889 | strtab = (char *)hdr + sechdrs[strindex].sh_offset; | 637 | strtab = (char *)hdr + |
638 | sechdrs[strindex].sh_offset; | ||
890 | } | 639 | } |
891 | } | 640 | } |
892 | layout_sections(&mod, hdr, sechdrs, secstrings); | 641 | layout_sections(&mod, hdr, sechdrs, secstrings); |
@@ -913,8 +662,9 @@ static int vpe_elfload(struct vpe * v) | |||
913 | /* Update sh_addr to point to copy in image. */ | 662 | /* Update sh_addr to point to copy in image. */ |
914 | sechdrs[i].sh_addr = (unsigned long)dest; | 663 | sechdrs[i].sh_addr = (unsigned long)dest; |
915 | 664 | ||
916 | printk(KERN_DEBUG " section sh_name %s sh_addr 0x%x\n", | 665 | pr_debug(" section sh_name %s sh_addr 0x%x\n", |
917 | secstrings + sechdrs[i].sh_name, sechdrs[i].sh_addr); | 666 | secstrings + sechdrs[i].sh_name, |
667 | sechdrs[i].sh_addr); | ||
918 | } | 668 | } |
919 | 669 | ||
920 | /* Fix up syms, so that st_value is a pointer to location. */ | 670 | /* Fix up syms, so that st_value is a pointer to location. */ |
@@ -935,17 +685,18 @@ static int vpe_elfload(struct vpe * v) | |||
935 | continue; | 685 | continue; |
936 | 686 | ||
937 | if (sechdrs[i].sh_type == SHT_REL) | 687 | if (sechdrs[i].sh_type == SHT_REL) |
938 | err = apply_relocations(sechdrs, strtab, symindex, i, | 688 | err = apply_relocations(sechdrs, strtab, |
939 | &mod); | 689 | symindex, i, &mod); |
940 | else if (sechdrs[i].sh_type == SHT_RELA) | 690 | else if (sechdrs[i].sh_type == SHT_RELA) |
941 | err = apply_relocate_add(sechdrs, strtab, symindex, i, | 691 | err = apply_relocate_add(sechdrs, strtab, |
942 | &mod); | 692 | symindex, i, &mod); |
943 | if (err < 0) | 693 | if (err < 0) |
944 | return err; | 694 | return err; |
945 | 695 | ||
946 | } | 696 | } |
947 | } else { | 697 | } else { |
948 | struct elf_phdr *phdr = (struct elf_phdr *) ((char *)hdr + hdr->e_phoff); | 698 | struct elf_phdr *phdr = (struct elf_phdr *) |
699 | ((char *)hdr + hdr->e_phoff); | ||
949 | 700 | ||
950 | for (i = 0; i < hdr->e_phnum; i++) { | 701 | for (i = 0; i < hdr->e_phnum; i++) { |
951 | if (phdr->p_type == PT_LOAD) { | 702 | if (phdr->p_type == PT_LOAD) { |
@@ -963,11 +714,15 @@ static int vpe_elfload(struct vpe * v) | |||
963 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | 714 | if (sechdrs[i].sh_type == SHT_SYMTAB) { |
964 | symindex = i; | 715 | symindex = i; |
965 | strindex = sechdrs[i].sh_link; | 716 | strindex = sechdrs[i].sh_link; |
966 | strtab = (char *)hdr + sechdrs[strindex].sh_offset; | 717 | strtab = (char *)hdr + |
718 | sechdrs[strindex].sh_offset; | ||
967 | 719 | ||
968 | /* mark the symtab's address for when we try to find the | 720 | /* |
969 | magic symbols */ | 721 | * mark symtab's address for when we try |
970 | sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; | 722 | * to find the magic symbols |
723 | */ | ||
724 | sechdrs[i].sh_addr = (size_t) hdr + | ||
725 | sechdrs[i].sh_offset; | ||
971 | } | 726 | } |
972 | } | 727 | } |
973 | } | 728 | } |
@@ -978,53 +733,19 @@ static int vpe_elfload(struct vpe * v) | |||
978 | 733 | ||
979 | if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { | 734 | if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { |
980 | if (v->__start == 0) { | 735 | if (v->__start == 0) { |
981 | printk(KERN_WARNING "VPE loader: program does not contain " | 736 | pr_warn("VPE loader: program does not contain a __start symbol\n"); |
982 | "a __start symbol\n"); | ||
983 | return -ENOEXEC; | 737 | return -ENOEXEC; |
984 | } | 738 | } |
985 | 739 | ||
986 | if (v->shared_ptr == NULL) | 740 | if (v->shared_ptr == NULL) |
987 | printk(KERN_WARNING "VPE loader: " | 741 | pr_warn("VPE loader: program does not contain vpe_shared symbol.\n" |
988 | "program does not contain vpe_shared symbol.\n" | 742 | " Unable to use AMVP (AP/SP) facilities.\n"); |
989 | " Unable to use AMVP (AP/SP) facilities.\n"); | ||
990 | } | 743 | } |
991 | 744 | ||
992 | printk(" elf loaded\n"); | 745 | pr_info(" elf loaded\n"); |
993 | return 0; | 746 | return 0; |
994 | } | 747 | } |
995 | 748 | ||
996 | static void cleanup_tc(struct tc *tc) | ||
997 | { | ||
998 | unsigned long flags; | ||
999 | unsigned int mtflags, vpflags; | ||
1000 | int tmp; | ||
1001 | |||
1002 | local_irq_save(flags); | ||
1003 | mtflags = dmt(); | ||
1004 | vpflags = dvpe(); | ||
1005 | /* Put MVPE's into 'configuration state' */ | ||
1006 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1007 | |||
1008 | settc(tc->index); | ||
1009 | tmp = read_tc_c0_tcstatus(); | ||
1010 | |||
1011 | /* mark not allocated and not dynamically allocatable */ | ||
1012 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
1013 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
1014 | write_tc_c0_tcstatus(tmp); | ||
1015 | |||
1016 | write_tc_c0_tchalt(TCHALT_H); | ||
1017 | mips_ihb(); | ||
1018 | |||
1019 | /* bind it to anything other than VPE1 */ | ||
1020 | // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE | ||
1021 | |||
1022 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1023 | evpe(vpflags); | ||
1024 | emt(mtflags); | ||
1025 | local_irq_restore(flags); | ||
1026 | } | ||
1027 | |||
1028 | static int getcwd(char *buff, int size) | 749 | static int getcwd(char *buff, int size) |
1029 | { | 750 | { |
1030 | mm_segment_t old_fs; | 751 | mm_segment_t old_fs; |
@@ -1044,52 +765,49 @@ static int getcwd(char *buff, int size) | |||
1044 | static int vpe_open(struct inode *inode, struct file *filp) | 765 | static int vpe_open(struct inode *inode, struct file *filp) |
1045 | { | 766 | { |
1046 | enum vpe_state state; | 767 | enum vpe_state state; |
1047 | struct vpe_notifications *not; | 768 | struct vpe_notifications *notifier; |
1048 | struct vpe *v; | 769 | struct vpe *v; |
1049 | int ret; | 770 | int ret; |
1050 | 771 | ||
1051 | if (minor != iminor(inode)) { | 772 | if (VPE_MODULE_MINOR != iminor(inode)) { |
1052 | /* assume only 1 device at the moment. */ | 773 | /* assume only 1 device at the moment. */ |
1053 | pr_warning("VPE loader: only vpe1 is supported\n"); | 774 | pr_warn("VPE loader: only vpe1 is supported\n"); |
1054 | 775 | ||
1055 | return -ENODEV; | 776 | return -ENODEV; |
1056 | } | 777 | } |
1057 | 778 | ||
1058 | if ((v = get_vpe(tclimit)) == NULL) { | 779 | v = get_vpe(aprp_cpu_index()); |
1059 | pr_warning("VPE loader: unable to get vpe\n"); | 780 | if (v == NULL) { |
781 | pr_warn("VPE loader: unable to get vpe\n"); | ||
1060 | 782 | ||
1061 | return -ENODEV; | 783 | return -ENODEV; |
1062 | } | 784 | } |
1063 | 785 | ||
1064 | state = xchg(&v->state, VPE_STATE_INUSE); | 786 | state = xchg(&v->state, VPE_STATE_INUSE); |
1065 | if (state != VPE_STATE_UNUSED) { | 787 | if (state != VPE_STATE_UNUSED) { |
1066 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); | 788 | pr_debug("VPE loader: tc in use dumping regs\n"); |
1067 | 789 | ||
1068 | list_for_each_entry(not, &v->notify, list) { | 790 | list_for_each_entry(notifier, &v->notify, list) |
1069 | not->stop(tclimit); | 791 | notifier->stop(aprp_cpu_index()); |
1070 | } | ||
1071 | 792 | ||
1072 | release_progmem(v->load_addr); | 793 | release_progmem(v->load_addr); |
1073 | cleanup_tc(get_tc(tclimit)); | 794 | cleanup_tc(get_tc(aprp_cpu_index())); |
1074 | } | 795 | } |
1075 | 796 | ||
1076 | /* this of-course trashes what was there before... */ | 797 | /* this of-course trashes what was there before... */ |
1077 | v->pbuffer = vmalloc(P_SIZE); | 798 | v->pbuffer = vmalloc(P_SIZE); |
1078 | if (!v->pbuffer) { | 799 | if (!v->pbuffer) { |
1079 | pr_warning("VPE loader: unable to allocate memory\n"); | 800 | pr_warn("VPE loader: unable to allocate memory\n"); |
1080 | return -ENOMEM; | 801 | return -ENOMEM; |
1081 | } | 802 | } |
1082 | v->plen = P_SIZE; | 803 | v->plen = P_SIZE; |
1083 | v->load_addr = NULL; | 804 | v->load_addr = NULL; |
1084 | v->len = 0; | 805 | v->len = 0; |
1085 | 806 | ||
1086 | v->uid = filp->f_cred->fsuid; | ||
1087 | v->gid = filp->f_cred->fsgid; | ||
1088 | |||
1089 | v->cwd[0] = 0; | 807 | v->cwd[0] = 0; |
1090 | ret = getcwd(v->cwd, VPE_PATH_MAX); | 808 | ret = getcwd(v->cwd, VPE_PATH_MAX); |
1091 | if (ret < 0) | 809 | if (ret < 0) |
1092 | printk(KERN_WARNING "VPE loader: open, getcwd returned %d\n", ret); | 810 | pr_warn("VPE loader: open, getcwd returned %d\n", ret); |
1093 | 811 | ||
1094 | v->shared_ptr = NULL; | 812 | v->shared_ptr = NULL; |
1095 | v->__start = 0; | 813 | v->__start = 0; |
@@ -1103,20 +821,20 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1103 | Elf_Ehdr *hdr; | 821 | Elf_Ehdr *hdr; |
1104 | int ret = 0; | 822 | int ret = 0; |
1105 | 823 | ||
1106 | v = get_vpe(tclimit); | 824 | v = get_vpe(aprp_cpu_index()); |
1107 | if (v == NULL) | 825 | if (v == NULL) |
1108 | return -ENODEV; | 826 | return -ENODEV; |
1109 | 827 | ||
1110 | hdr = (Elf_Ehdr *) v->pbuffer; | 828 | hdr = (Elf_Ehdr *) v->pbuffer; |
1111 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { | 829 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { |
1112 | if (vpe_elfload(v) >= 0) { | 830 | if ((vpe_elfload(v) >= 0) && vpe_run) { |
1113 | vpe_run(v); | 831 | vpe_run(v); |
1114 | } else { | 832 | } else { |
1115 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); | 833 | pr_warn("VPE loader: ELF load failed.\n"); |
1116 | ret = -ENOEXEC; | 834 | ret = -ENOEXEC; |
1117 | } | 835 | } |
1118 | } else { | 836 | } else { |
1119 | printk(KERN_WARNING "VPE loader: only elf files are supported\n"); | 837 | pr_warn("VPE loader: only elf files are supported\n"); |
1120 | ret = -ENOEXEC; | 838 | ret = -ENOEXEC; |
1121 | } | 839 | } |
1122 | 840 | ||
@@ -1134,22 +852,22 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1134 | return ret; | 852 | return ret; |
1135 | } | 853 | } |
1136 | 854 | ||
1137 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | 855 | static ssize_t vpe_write(struct file *file, const char __user *buffer, |
1138 | size_t count, loff_t * ppos) | 856 | size_t count, loff_t *ppos) |
1139 | { | 857 | { |
1140 | size_t ret = count; | 858 | size_t ret = count; |
1141 | struct vpe *v; | 859 | struct vpe *v; |
1142 | 860 | ||
1143 | if (iminor(file_inode(file)) != minor) | 861 | if (iminor(file_inode(file)) != VPE_MODULE_MINOR) |
1144 | return -ENODEV; | 862 | return -ENODEV; |
1145 | 863 | ||
1146 | v = get_vpe(tclimit); | 864 | v = get_vpe(aprp_cpu_index()); |
865 | |||
1147 | if (v == NULL) | 866 | if (v == NULL) |
1148 | return -ENODEV; | 867 | return -ENODEV; |
1149 | 868 | ||
1150 | if ((count + v->len) > v->plen) { | 869 | if ((count + v->len) > v->plen) { |
1151 | printk(KERN_WARNING | 870 | pr_warn("VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); |
1152 | "VPE loader: elf size too big. Perhaps strip uneeded symbols\n"); | ||
1153 | return -ENOMEM; | 871 | return -ENOMEM; |
1154 | } | 872 | } |
1155 | 873 | ||
@@ -1161,7 +879,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer, | |||
1161 | return ret; | 879 | return ret; |
1162 | } | 880 | } |
1163 | 881 | ||
1164 | static const struct file_operations vpe_fops = { | 882 | const struct file_operations vpe_fops = { |
1165 | .owner = THIS_MODULE, | 883 | .owner = THIS_MODULE, |
1166 | .open = vpe_open, | 884 | .open = vpe_open, |
1167 | .release = vpe_release, | 885 | .release = vpe_release, |
@@ -1169,420 +887,40 @@ static const struct file_operations vpe_fops = { | |||
1169 | .llseek = noop_llseek, | 887 | .llseek = noop_llseek, |
1170 | }; | 888 | }; |
1171 | 889 | ||
1172 | /* module wrapper entry points */ | ||
1173 | /* give me a vpe */ | ||
1174 | vpe_handle vpe_alloc(void) | ||
1175 | { | ||
1176 | int i; | ||
1177 | struct vpe *v; | ||
1178 | |||
1179 | /* find a vpe */ | ||
1180 | for (i = 1; i < MAX_VPES; i++) { | ||
1181 | if ((v = get_vpe(i)) != NULL) { | ||
1182 | v->state = VPE_STATE_INUSE; | ||
1183 | return v; | ||
1184 | } | ||
1185 | } | ||
1186 | return NULL; | ||
1187 | } | ||
1188 | |||
1189 | EXPORT_SYMBOL(vpe_alloc); | ||
1190 | |||
1191 | /* start running from here */ | ||
1192 | int vpe_start(vpe_handle vpe, unsigned long start) | ||
1193 | { | ||
1194 | struct vpe *v = vpe; | ||
1195 | |||
1196 | v->__start = start; | ||
1197 | return vpe_run(v); | ||
1198 | } | ||
1199 | |||
1200 | EXPORT_SYMBOL(vpe_start); | ||
1201 | |||
1202 | /* halt it for now */ | ||
1203 | int vpe_stop(vpe_handle vpe) | ||
1204 | { | ||
1205 | struct vpe *v = vpe; | ||
1206 | struct tc *t; | ||
1207 | unsigned int evpe_flags; | ||
1208 | |||
1209 | evpe_flags = dvpe(); | ||
1210 | |||
1211 | if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { | ||
1212 | |||
1213 | settc(t->index); | ||
1214 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1215 | } | ||
1216 | |||
1217 | evpe(evpe_flags); | ||
1218 | |||
1219 | return 0; | ||
1220 | } | ||
1221 | |||
1222 | EXPORT_SYMBOL(vpe_stop); | ||
1223 | |||
1224 | /* I've done with it thank you */ | ||
1225 | int vpe_free(vpe_handle vpe) | ||
1226 | { | ||
1227 | struct vpe *v = vpe; | ||
1228 | struct tc *t; | ||
1229 | unsigned int evpe_flags; | ||
1230 | |||
1231 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | ||
1232 | return -ENOEXEC; | ||
1233 | } | ||
1234 | |||
1235 | evpe_flags = dvpe(); | ||
1236 | |||
1237 | /* Put MVPE's into 'configuration state' */ | ||
1238 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1239 | |||
1240 | settc(t->index); | ||
1241 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1242 | |||
1243 | /* halt the TC */ | ||
1244 | write_tc_c0_tchalt(TCHALT_H); | ||
1245 | mips_ihb(); | ||
1246 | |||
1247 | /* mark the TC unallocated */ | ||
1248 | write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); | ||
1249 | |||
1250 | v->state = VPE_STATE_UNUSED; | ||
1251 | |||
1252 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1253 | evpe(evpe_flags); | ||
1254 | |||
1255 | return 0; | ||
1256 | } | ||
1257 | |||
1258 | EXPORT_SYMBOL(vpe_free); | ||
1259 | |||
1260 | void *vpe_get_shared(int index) | 890 | void *vpe_get_shared(int index) |
1261 | { | 891 | { |
1262 | struct vpe *v; | 892 | struct vpe *v = get_vpe(index); |
1263 | 893 | ||
1264 | if ((v = get_vpe(index)) == NULL) | 894 | if (v == NULL) |
1265 | return NULL; | 895 | return NULL; |
1266 | 896 | ||
1267 | return v->shared_ptr; | 897 | return v->shared_ptr; |
1268 | } | 898 | } |
1269 | |||
1270 | EXPORT_SYMBOL(vpe_get_shared); | 899 | EXPORT_SYMBOL(vpe_get_shared); |
1271 | 900 | ||
1272 | int vpe_getuid(int index) | ||
1273 | { | ||
1274 | struct vpe *v; | ||
1275 | |||
1276 | if ((v = get_vpe(index)) == NULL) | ||
1277 | return -1; | ||
1278 | |||
1279 | return v->uid; | ||
1280 | } | ||
1281 | |||
1282 | EXPORT_SYMBOL(vpe_getuid); | ||
1283 | |||
1284 | int vpe_getgid(int index) | ||
1285 | { | ||
1286 | struct vpe *v; | ||
1287 | |||
1288 | if ((v = get_vpe(index)) == NULL) | ||
1289 | return -1; | ||
1290 | |||
1291 | return v->gid; | ||
1292 | } | ||
1293 | |||
1294 | EXPORT_SYMBOL(vpe_getgid); | ||
1295 | |||
1296 | int vpe_notify(int index, struct vpe_notifications *notify) | 901 | int vpe_notify(int index, struct vpe_notifications *notify) |
1297 | { | 902 | { |
1298 | struct vpe *v; | 903 | struct vpe *v = get_vpe(index); |
1299 | 904 | ||
1300 | if ((v = get_vpe(index)) == NULL) | 905 | if (v == NULL) |
1301 | return -1; | 906 | return -1; |
1302 | 907 | ||
1303 | list_add(¬ify->list, &v->notify); | 908 | list_add(¬ify->list, &v->notify); |
1304 | return 0; | 909 | return 0; |
1305 | } | 910 | } |
1306 | |||
1307 | EXPORT_SYMBOL(vpe_notify); | 911 | EXPORT_SYMBOL(vpe_notify); |
1308 | 912 | ||
1309 | char *vpe_getcwd(int index) | 913 | char *vpe_getcwd(int index) |
1310 | { | 914 | { |
1311 | struct vpe *v; | 915 | struct vpe *v = get_vpe(index); |
1312 | 916 | ||
1313 | if ((v = get_vpe(index)) == NULL) | 917 | if (v == NULL) |
1314 | return NULL; | 918 | return NULL; |
1315 | 919 | ||
1316 | return v->cwd; | 920 | return v->cwd; |
1317 | } | 921 | } |
1318 | |||
1319 | EXPORT_SYMBOL(vpe_getcwd); | 922 | EXPORT_SYMBOL(vpe_getcwd); |
1320 | 923 | ||
1321 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | ||
1322 | const char *buf, size_t len) | ||
1323 | { | ||
1324 | struct vpe *vpe = get_vpe(tclimit); | ||
1325 | struct vpe_notifications *not; | ||
1326 | |||
1327 | list_for_each_entry(not, &vpe->notify, list) { | ||
1328 | not->stop(tclimit); | ||
1329 | } | ||
1330 | |||
1331 | release_progmem(vpe->load_addr); | ||
1332 | cleanup_tc(get_tc(tclimit)); | ||
1333 | vpe_stop(vpe); | ||
1334 | vpe_free(vpe); | ||
1335 | |||
1336 | return len; | ||
1337 | } | ||
1338 | static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); | ||
1339 | |||
1340 | static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, | ||
1341 | char *buf) | ||
1342 | { | ||
1343 | struct vpe *vpe = get_vpe(tclimit); | ||
1344 | |||
1345 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
1346 | } | ||
1347 | |||
1348 | static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, | ||
1349 | const char *buf, size_t len) | ||
1350 | { | ||
1351 | struct vpe *vpe = get_vpe(tclimit); | ||
1352 | unsigned long new; | ||
1353 | char *endp; | ||
1354 | |||
1355 | new = simple_strtoul(buf, &endp, 0); | ||
1356 | if (endp == buf) | ||
1357 | goto out_einval; | ||
1358 | |||
1359 | if (new == 0 || new > (hw_tcs - tclimit)) | ||
1360 | goto out_einval; | ||
1361 | |||
1362 | vpe->ntcs = new; | ||
1363 | |||
1364 | return len; | ||
1365 | |||
1366 | out_einval: | ||
1367 | return -EINVAL; | ||
1368 | } | ||
1369 | static DEVICE_ATTR_RW(ntcs); | ||
1370 | |||
1371 | static struct attribute *vpe_attrs[] = { | ||
1372 | &dev_attr_kill.attr, | ||
1373 | &dev_attr_ntcs.attr, | ||
1374 | NULL, | ||
1375 | }; | ||
1376 | ATTRIBUTE_GROUPS(vpe); | ||
1377 | |||
1378 | static void vpe_device_release(struct device *cd) | ||
1379 | { | ||
1380 | kfree(cd); | ||
1381 | } | ||
1382 | |||
1383 | struct class vpe_class = { | ||
1384 | .name = "vpe", | ||
1385 | .owner = THIS_MODULE, | ||
1386 | .dev_release = vpe_device_release, | ||
1387 | .dev_groups = vpe_groups, | ||
1388 | }; | ||
1389 | |||
1390 | struct device vpe_device; | ||
1391 | |||
1392 | static int __init vpe_module_init(void) | ||
1393 | { | ||
1394 | unsigned int mtflags, vpflags; | ||
1395 | unsigned long flags, val; | ||
1396 | struct vpe *v = NULL; | ||
1397 | struct tc *t; | ||
1398 | int tc, err; | ||
1399 | |||
1400 | if (!cpu_has_mipsmt) { | ||
1401 | printk("VPE loader: not a MIPS MT capable processor\n"); | ||
1402 | return -ENODEV; | ||
1403 | } | ||
1404 | |||
1405 | if (vpelimit == 0) { | ||
1406 | printk(KERN_WARNING "No VPEs reserved for AP/SP, not " | ||
1407 | "initializing VPE loader.\nPass maxvpes=<n> argument as " | ||
1408 | "kernel argument\n"); | ||
1409 | |||
1410 | return -ENODEV; | ||
1411 | } | ||
1412 | |||
1413 | if (tclimit == 0) { | ||
1414 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
1415 | "initializing VPE loader.\nPass maxtcs=<n> argument as " | ||
1416 | "kernel argument\n"); | ||
1417 | |||
1418 | return -ENODEV; | ||
1419 | } | ||
1420 | |||
1421 | major = register_chrdev(0, module_name, &vpe_fops); | ||
1422 | if (major < 0) { | ||
1423 | printk("VPE loader: unable to register character device\n"); | ||
1424 | return major; | ||
1425 | } | ||
1426 | |||
1427 | err = class_register(&vpe_class); | ||
1428 | if (err) { | ||
1429 | printk(KERN_ERR "vpe_class registration failed\n"); | ||
1430 | goto out_chrdev; | ||
1431 | } | ||
1432 | |||
1433 | device_initialize(&vpe_device); | ||
1434 | vpe_device.class = &vpe_class, | ||
1435 | vpe_device.parent = NULL, | ||
1436 | dev_set_name(&vpe_device, "vpe1"); | ||
1437 | vpe_device.devt = MKDEV(major, minor); | ||
1438 | err = device_add(&vpe_device); | ||
1439 | if (err) { | ||
1440 | printk(KERN_ERR "Adding vpe_device failed\n"); | ||
1441 | goto out_class; | ||
1442 | } | ||
1443 | |||
1444 | local_irq_save(flags); | ||
1445 | mtflags = dmt(); | ||
1446 | vpflags = dvpe(); | ||
1447 | |||
1448 | /* Put MVPE's into 'configuration state' */ | ||
1449 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1450 | |||
1451 | /* dump_mtregs(); */ | ||
1452 | |||
1453 | val = read_c0_mvpconf0(); | ||
1454 | hw_tcs = (val & MVPCONF0_PTC) + 1; | ||
1455 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
1456 | |||
1457 | for (tc = tclimit; tc < hw_tcs; tc++) { | ||
1458 | /* | ||
1459 | * Must re-enable multithreading temporarily or in case we | ||
1460 | * reschedule send IPIs or similar we might hang. | ||
1461 | */ | ||
1462 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1463 | evpe(vpflags); | ||
1464 | emt(mtflags); | ||
1465 | local_irq_restore(flags); | ||
1466 | t = alloc_tc(tc); | ||
1467 | if (!t) { | ||
1468 | err = -ENOMEM; | ||
1469 | goto out; | ||
1470 | } | ||
1471 | |||
1472 | local_irq_save(flags); | ||
1473 | mtflags = dmt(); | ||
1474 | vpflags = dvpe(); | ||
1475 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1476 | |||
1477 | /* VPE's */ | ||
1478 | if (tc < hw_tcs) { | ||
1479 | settc(tc); | ||
1480 | |||
1481 | if ((v = alloc_vpe(tc)) == NULL) { | ||
1482 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | ||
1483 | |||
1484 | goto out_reenable; | ||
1485 | } | ||
1486 | |||
1487 | v->ntcs = hw_tcs - tclimit; | ||
1488 | |||
1489 | /* add the tc to the list of this vpe's tc's. */ | ||
1490 | list_add(&t->tc, &v->tc); | ||
1491 | |||
1492 | /* deactivate all but vpe0 */ | ||
1493 | if (tc >= tclimit) { | ||
1494 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
1495 | |||
1496 | tmp &= ~VPECONF0_VPA; | ||
1497 | |||
1498 | /* master VPE */ | ||
1499 | tmp |= VPECONF0_MVP; | ||
1500 | write_vpe_c0_vpeconf0(tmp); | ||
1501 | } | ||
1502 | |||
1503 | /* disable multi-threading with TC's */ | ||
1504 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
1505 | |||
1506 | if (tc >= vpelimit) { | ||
1507 | /* | ||
1508 | * Set config to be the same as vpe0, | ||
1509 | * particularly kseg0 coherency alg | ||
1510 | */ | ||
1511 | write_vpe_c0_config(read_c0_config()); | ||
1512 | } | ||
1513 | } | ||
1514 | |||
1515 | /* TC's */ | ||
1516 | t->pvpe = v; /* set the parent vpe */ | ||
1517 | |||
1518 | if (tc >= tclimit) { | ||
1519 | unsigned long tmp; | ||
1520 | |||
1521 | settc(tc); | ||
1522 | |||
1523 | /* Any TC that is bound to VPE0 gets left as is - in case | ||
1524 | we are running SMTC on VPE0. A TC that is bound to any | ||
1525 | other VPE gets bound to VPE0, ideally I'd like to make | ||
1526 | it homeless but it doesn't appear to let me bind a TC | ||
1527 | to a non-existent VPE. Which is perfectly reasonable. | ||
1528 | |||
1529 | The (un)bound state is visible to an EJTAG probe so may | ||
1530 | notify GDB... | ||
1531 | */ | ||
1532 | |||
1533 | if (((tmp = read_tc_c0_tcbind()) & TCBIND_CURVPE)) { | ||
1534 | /* tc is bound >vpe0 */ | ||
1535 | write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); | ||
1536 | |||
1537 | t->pvpe = get_vpe(0); /* set the parent vpe */ | ||
1538 | } | ||
1539 | |||
1540 | /* halt the TC */ | ||
1541 | write_tc_c0_tchalt(TCHALT_H); | ||
1542 | mips_ihb(); | ||
1543 | |||
1544 | tmp = read_tc_c0_tcstatus(); | ||
1545 | |||
1546 | /* mark not activated and not dynamically allocatable */ | ||
1547 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
1548 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
1549 | write_tc_c0_tcstatus(tmp); | ||
1550 | } | ||
1551 | } | ||
1552 | |||
1553 | out_reenable: | ||
1554 | /* release config state */ | ||
1555 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1556 | |||
1557 | evpe(vpflags); | ||
1558 | emt(mtflags); | ||
1559 | local_irq_restore(flags); | ||
1560 | |||
1561 | return 0; | ||
1562 | |||
1563 | out_class: | ||
1564 | class_unregister(&vpe_class); | ||
1565 | out_chrdev: | ||
1566 | unregister_chrdev(major, module_name); | ||
1567 | |||
1568 | out: | ||
1569 | return err; | ||
1570 | } | ||
1571 | |||
1572 | static void __exit vpe_module_exit(void) | ||
1573 | { | ||
1574 | struct vpe *v, *n; | ||
1575 | |||
1576 | device_del(&vpe_device); | ||
1577 | unregister_chrdev(major, module_name); | ||
1578 | |||
1579 | /* No locking needed here */ | ||
1580 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { | ||
1581 | if (v->state != VPE_STATE_UNUSED) | ||
1582 | release_vpe(v); | ||
1583 | } | ||
1584 | } | ||
1585 | |||
1586 | module_init(vpe_module_init); | 924 | module_init(vpe_module_init); |
1587 | module_exit(vpe_module_exit); | 925 | module_exit(vpe_module_exit); |
1588 | MODULE_DESCRIPTION("MIPS VPE Loader"); | 926 | MODULE_DESCRIPTION("MIPS VPE Loader"); |