diff options
author | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-31 23:41:53 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-31 23:41:53 -0400 |
commit | d6dd9e93c7531fa31370e27d053a3940d8d662fb (patch) | |
tree | afab573031b3f0b9bbe5e417a890f7cae09a7224 /arch/mips/kernel | |
parent | dd9cd6d4351076c78bb8c0f9146d1904b481fdbb (diff) | |
parent | b4b2917cc8babe8eaf4bc133bca31b11ed7dac13 (diff) |
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (50 commits)
[MIPS] Add smp_call_function_single()
[MIPS] thread_info.h: kmalloc + memset conversion to kzalloc
[MIPS] Kexec: Fix several 64-bit bugs.
[MIPS] Kexec: Fix several warnings.
[MIPS] DDB5477: Remove support
[MIPS] Fulong: Remove unneeded header file
[MIPS] Cobalt: Enable UART on RaQ1
[MIPS] Remove unused GROUP_TOSHIBA_NAMES
[MIPS] remove some duplicate includes
[MIPS] Oprofile: Fix rm9000 performance counter handler
[MIPS] Use -Werror on subdirectories which build cleanly.
[MIPS] Yosemite: Fix warning.
[MIPS] PMON: Fix cpustart declaration.
[MIPS] Yosemite: Only build ll_ht_smp_irq_handler() if HYPERTRANSPORT.
[MIPS] Yosemite: Fix build error due to undeclared titan_mailbox_irq().
[MIPS] Yosemite: Don't declare titan_mailbox_irq() as asmlinkage.
[MIPS] Yosemite: Fix warnings in i2c-yoesmite by deleting the unused code.
[MIPS] Delete unused arch/mips/gt64120/common/
[MIPS] Fix build warning in unaligned load/store emulator.
[MIPS] IP32: Don't ignore request_irq's return value.
...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r-- | arch/mips/kernel/Makefile | 2 | ||||
-rw-r--r-- | arch/mips/kernel/asm-offsets.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/head.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/kspd.c | 19 | ||||
-rw-r--r-- | arch/mips/kernel/linux32.c | 7 | ||||
-rw-r--r-- | arch/mips/kernel/machine_kexec.c | 13 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt-fpaff.c | 9 | ||||
-rw-r--r-- | arch/mips/kernel/mips-mt.c | 22 | ||||
-rw-r--r-- | arch/mips/kernel/process.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/ptrace.c | 14 | ||||
-rw-r--r-- | arch/mips/kernel/relocate_kernel.S | 78 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx.c | 24 | ||||
-rw-r--r-- | arch/mips/kernel/scall32-o32.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-64.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-n32.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/scall64-o32.S | 1 | ||||
-rw-r--r-- | arch/mips/kernel/signal32.c | 1 | ||||
-rw-r--r-- | arch/mips/kernel/smp.c | 55 | ||||
-rw-r--r-- | arch/mips/kernel/smtc.c | 36 | ||||
-rw-r--r-- | arch/mips/kernel/syscall.c | 18 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 2 | ||||
-rw-r--r-- | arch/mips/kernel/unaligned.c | 53 | ||||
-rw-r--r-- | arch/mips/kernel/vpe.c | 359 |
23 files changed, 452 insertions, 268 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 5c8085b6d7ab..07344cb37596 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -71,3 +71,5 @@ obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | |||
71 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | 71 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) |
72 | 72 | ||
73 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o | 73 | obj-$(CONFIG_HAVE_STD_PC_SERIAL_PORT) += 8250-platform.o |
74 | |||
75 | EXTRA_CFLAGS += -Werror | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c index 3b27309d54b1..013327286c26 100644 --- a/arch/mips/kernel/asm-offsets.c +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -132,7 +132,6 @@ void output_thread_defines(void) | |||
132 | offset("#define THREAD_ECODE ", struct task_struct, \ | 132 | offset("#define THREAD_ECODE ", struct task_struct, \ |
133 | thread.error_code); | 133 | thread.error_code); |
134 | offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no); | 134 | offset("#define THREAD_TRAPNO ", struct task_struct, thread.trap_no); |
135 | offset("#define THREAD_MFLAGS ", struct task_struct, thread.mflags); | ||
136 | offset("#define THREAD_TRAMP ", struct task_struct, \ | 135 | offset("#define THREAD_TRAMP ", struct task_struct, \ |
137 | thread.irix_trampoline); | 136 | thread.irix_trampoline); |
138 | offset("#define THREAD_OLDCTX ", struct task_struct, \ | 137 | offset("#define THREAD_OLDCTX ", struct task_struct, \ |
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S index c15bbc436bbd..e46782b0ebc8 100644 --- a/arch/mips/kernel/head.S +++ b/arch/mips/kernel/head.S | |||
@@ -138,7 +138,6 @@ | |||
138 | .fill 0x400 | 138 | .fill 0x400 |
139 | #endif | 139 | #endif |
140 | 140 | ||
141 | EXPORT(stext) # used for profiling | ||
142 | EXPORT(_stext) | 141 | EXPORT(_stext) |
143 | 142 | ||
144 | #ifndef CONFIG_BOOT_RAW | 143 | #ifndef CONFIG_BOOT_RAW |
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c index c6580018c94b..cb9a14a1ca5b 100644 --- a/arch/mips/kernel/kspd.c +++ b/arch/mips/kernel/kspd.c | |||
@@ -89,7 +89,7 @@ static int sp_stopping = 0; | |||
89 | #define MTSP_O_EXCL 0x0800 | 89 | #define MTSP_O_EXCL 0x0800 |
90 | #define MTSP_O_BINARY 0x8000 | 90 | #define MTSP_O_BINARY 0x8000 |
91 | 91 | ||
92 | #define SP_VPE 1 | 92 | extern int tclimit; |
93 | 93 | ||
94 | struct apsp_table { | 94 | struct apsp_table { |
95 | int sp; | 95 | int sp; |
@@ -225,8 +225,8 @@ void sp_work_handle_request(void) | |||
225 | /* Run the syscall at the priviledge of the user who loaded the | 225 | /* Run the syscall at the priviledge of the user who loaded the |
226 | SP program */ | 226 | SP program */ |
227 | 227 | ||
228 | if (vpe_getuid(SP_VPE)) | 228 | if (vpe_getuid(tclimit)) |
229 | sp_setfsuidgid( vpe_getuid(SP_VPE), vpe_getgid(SP_VPE)); | 229 | sp_setfsuidgid(vpe_getuid(tclimit), vpe_getgid(tclimit)); |
230 | 230 | ||
231 | switch (sc.cmd) { | 231 | switch (sc.cmd) { |
232 | /* needs the flags argument translating from SDE kit to | 232 | /* needs the flags argument translating from SDE kit to |
@@ -245,7 +245,7 @@ void sp_work_handle_request(void) | |||
245 | 245 | ||
246 | case MTSP_SYSCALL_EXIT: | 246 | case MTSP_SYSCALL_EXIT: |
247 | list_for_each_entry(n, &kspd_notifylist, list) | 247 | list_for_each_entry(n, &kspd_notifylist, list) |
248 | n->kspd_sp_exit(SP_VPE); | 248 | n->kspd_sp_exit(tclimit); |
249 | sp_stopping = 1; | 249 | sp_stopping = 1; |
250 | 250 | ||
251 | printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", | 251 | printk(KERN_DEBUG "KSPD got exit syscall from SP exitcode %d\n", |
@@ -255,7 +255,7 @@ void sp_work_handle_request(void) | |||
255 | case MTSP_SYSCALL_OPEN: | 255 | case MTSP_SYSCALL_OPEN: |
256 | generic.arg1 = translate_open_flags(generic.arg1); | 256 | generic.arg1 = translate_open_flags(generic.arg1); |
257 | 257 | ||
258 | vcwd = vpe_getcwd(SP_VPE); | 258 | vcwd = vpe_getcwd(tclimit); |
259 | 259 | ||
260 | /* change to the cwd of the process that loaded the SP program */ | 260 | /* change to the cwd of the process that loaded the SP program */ |
261 | old_fs = get_fs(); | 261 | old_fs = get_fs(); |
@@ -283,7 +283,7 @@ void sp_work_handle_request(void) | |||
283 | break; | 283 | break; |
284 | } /* switch */ | 284 | } /* switch */ |
285 | 285 | ||
286 | if (vpe_getuid(SP_VPE)) | 286 | if (vpe_getuid(tclimit)) |
287 | sp_setfsuidgid( 0, 0); | 287 | sp_setfsuidgid( 0, 0); |
288 | 288 | ||
289 | old_fs = get_fs(); | 289 | old_fs = get_fs(); |
@@ -364,10 +364,9 @@ static void startwork(int vpe) | |||
364 | } | 364 | } |
365 | 365 | ||
366 | INIT_WORK(&work, sp_work); | 366 | INIT_WORK(&work, sp_work); |
367 | queue_work(workqueue, &work); | 367 | } |
368 | } else | ||
369 | queue_work(workqueue, &work); | ||
370 | 368 | ||
369 | queue_work(workqueue, &work); | ||
371 | } | 370 | } |
372 | 371 | ||
373 | static void stopwork(int vpe) | 372 | static void stopwork(int vpe) |
@@ -389,7 +388,7 @@ static int kspd_module_init(void) | |||
389 | 388 | ||
390 | notify.start = startwork; | 389 | notify.start = startwork; |
391 | notify.stop = stopwork; | 390 | notify.stop = stopwork; |
392 | vpe_notify(SP_VPE, ¬ify); | 391 | vpe_notify(tclimit, ¬ify); |
393 | 392 | ||
394 | return 0; | 393 | return 0; |
395 | } | 394 | } |
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c index c37568d6fb55..135d9a5fe337 100644 --- a/arch/mips/kernel/linux32.c +++ b/arch/mips/kernel/linux32.c | |||
@@ -566,6 +566,13 @@ asmlinkage long sys32_fadvise64_64(int fd, int __pad, | |||
566 | flags); | 566 | flags); |
567 | } | 567 | } |
568 | 568 | ||
569 | asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, | ||
570 | unsigned offset_a3, unsigned len_a4, unsigned len_a5) | ||
571 | { | ||
572 | return sys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), | ||
573 | merge_64(len_a4, len_a5)); | ||
574 | } | ||
575 | |||
569 | save_static_function(sys32_clone); | 576 | save_static_function(sys32_clone); |
570 | static int noinline __used | 577 | static int noinline __used |
571 | _sys32_clone(nabi_no_regargs struct pt_regs regs) | 578 | _sys32_clone(nabi_no_regargs struct pt_regs regs) |
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c index 8f42fa85ac9e..22960d67cf07 100644 --- a/arch/mips/kernel/machine_kexec.c +++ b/arch/mips/kernel/machine_kexec.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include <asm/page.h> | 14 | #include <asm/page.h> |
15 | 15 | ||
16 | extern const unsigned char relocate_new_kernel[]; | 16 | extern const unsigned char relocate_new_kernel[]; |
17 | extern const unsigned int relocate_new_kernel_size; | 17 | extern const size_t relocate_new_kernel_size; |
18 | 18 | ||
19 | extern unsigned long kexec_start_address; | 19 | extern unsigned long kexec_start_address; |
20 | extern unsigned long kexec_indirection_page; | 20 | extern unsigned long kexec_indirection_page; |
@@ -40,6 +40,8 @@ machine_crash_shutdown(struct pt_regs *regs) | |||
40 | { | 40 | { |
41 | } | 41 | } |
42 | 42 | ||
43 | typedef void (*noretfun_t)(void) __attribute__((noreturn)); | ||
44 | |||
43 | void | 45 | void |
44 | machine_kexec(struct kimage *image) | 46 | machine_kexec(struct kimage *image) |
45 | { | 47 | { |
@@ -51,7 +53,8 @@ machine_kexec(struct kimage *image) | |||
51 | (unsigned long)page_address(image->control_code_page); | 53 | (unsigned long)page_address(image->control_code_page); |
52 | 54 | ||
53 | kexec_start_address = image->start; | 55 | kexec_start_address = image->start; |
54 | kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK); | 56 | kexec_indirection_page = |
57 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | ||
55 | 58 | ||
56 | memcpy((void*)reboot_code_buffer, relocate_new_kernel, | 59 | memcpy((void*)reboot_code_buffer, relocate_new_kernel, |
57 | relocate_new_kernel_size); | 60 | relocate_new_kernel_size); |
@@ -67,7 +70,7 @@ machine_kexec(struct kimage *image) | |||
67 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { | 70 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { |
68 | if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || | 71 | if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || |
69 | *ptr & IND_DESTINATION) | 72 | *ptr & IND_DESTINATION) |
70 | *ptr = phys_to_virt(*ptr); | 73 | *ptr = (unsigned long) phys_to_virt(*ptr); |
71 | } | 74 | } |
72 | 75 | ||
73 | /* | 76 | /* |
@@ -78,8 +81,8 @@ machine_kexec(struct kimage *image) | |||
78 | flush_icache_range(reboot_code_buffer, | 81 | flush_icache_range(reboot_code_buffer, |
79 | reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); | 82 | reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); |
80 | 83 | ||
81 | printk("Will call new kernel at %08x\n", image->start); | 84 | printk("Will call new kernel at %08lx\n", image->start); |
82 | printk("Bye ...\n"); | 85 | printk("Bye ...\n"); |
83 | flush_cache_all(); | 86 | flush_cache_all(); |
84 | ((void (*)(void))reboot_code_buffer)(); | 87 | ((noretfun_t) reboot_code_buffer)(); |
85 | } | 88 | } |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index ede5d73d652e..892665bb12b1 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -50,6 +50,7 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
50 | cpumask_t effective_mask; | 50 | cpumask_t effective_mask; |
51 | int retval; | 51 | int retval; |
52 | struct task_struct *p; | 52 | struct task_struct *p; |
53 | struct thread_info *ti; | ||
53 | 54 | ||
54 | if (len < sizeof(new_mask)) | 55 | if (len < sizeof(new_mask)) |
55 | return -EINVAL; | 56 | return -EINVAL; |
@@ -93,16 +94,16 @@ asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | |||
93 | read_unlock(&tasklist_lock); | 94 | read_unlock(&tasklist_lock); |
94 | 95 | ||
95 | /* Compute new global allowed CPU set if necessary */ | 96 | /* Compute new global allowed CPU set if necessary */ |
96 | if ((p->thread.mflags & MF_FPUBOUND) | 97 | ti = task_thread_info(p); |
97 | && cpus_intersects(new_mask, mt_fpu_cpumask)) { | 98 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && |
99 | cpus_intersects(new_mask, mt_fpu_cpumask)) { | ||
98 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); | 100 | cpus_and(effective_mask, new_mask, mt_fpu_cpumask); |
99 | retval = set_cpus_allowed(p, effective_mask); | 101 | retval = set_cpus_allowed(p, effective_mask); |
100 | } else { | 102 | } else { |
101 | p->thread.mflags &= ~MF_FPUBOUND; | 103 | clear_ti_thread_flag(ti, TIF_FPUBOUND); |
102 | retval = set_cpus_allowed(p, new_mask); | 104 | retval = set_cpus_allowed(p, new_mask); |
103 | } | 105 | } |
104 | 106 | ||
105 | |||
106 | out_unlock: | 107 | out_unlock: |
107 | put_task_struct(p); | 108 | put_task_struct(p); |
108 | unlock_cpu_hotplug(); | 109 | unlock_cpu_hotplug(); |
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c index 1a7d89231299..7169a4db37b8 100644 --- a/arch/mips/kernel/mips-mt.c +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -21,6 +21,28 @@ | |||
21 | #include <asm/r4kcache.h> | 21 | #include <asm/r4kcache.h> |
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | 23 | ||
24 | int vpelimit; | ||
25 | |||
26 | static int __init maxvpes(char *str) | ||
27 | { | ||
28 | get_option(&str, &vpelimit); | ||
29 | |||
30 | return 1; | ||
31 | } | ||
32 | |||
33 | __setup("maxvpes=", maxvpes); | ||
34 | |||
35 | int tclimit; | ||
36 | |||
37 | static int __init maxtcs(char *str) | ||
38 | { | ||
39 | get_option(&str, &tclimit); | ||
40 | |||
41 | return 1; | ||
42 | } | ||
43 | |||
44 | __setup("maxtcs=", maxtcs); | ||
45 | |||
24 | /* | 46 | /* |
25 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | 47 | * Dump new MIPS MT state for the core. Does not leave TCs halted. |
26 | * Takes an argument which taken to be a pre-call MVPControl value. | 48 | * Takes an argument which taken to be a pre-call MVPControl value. |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index bd05f5a927ea..e6ce943099a0 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -77,7 +77,7 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | |||
77 | status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK); | 77 | status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|KU_MASK); |
78 | #ifdef CONFIG_64BIT | 78 | #ifdef CONFIG_64BIT |
79 | status &= ~ST0_FR; | 79 | status &= ~ST0_FR; |
80 | status |= (current->thread.mflags & MF_32BIT_REGS) ? 0 : ST0_FR; | 80 | status |= test_thread_flag(TIF_32BIT_REGS) ? 0 : ST0_FR; |
81 | #endif | 81 | #endif |
82 | status |= KU_USER; | 82 | status |= KU_USER; |
83 | regs->cp0_status = status; | 83 | regs->cp0_status = status; |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 893e7bccf226..bbd57b20b43e 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -20,11 +20,11 @@ | |||
20 | #include <linux/mm.h> | 20 | #include <linux/mm.h> |
21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
22 | #include <linux/ptrace.h> | 22 | #include <linux/ptrace.h> |
23 | #include <linux/audit.h> | ||
24 | #include <linux/smp.h> | 23 | #include <linux/smp.h> |
25 | #include <linux/user.h> | 24 | #include <linux/user.h> |
26 | #include <linux/security.h> | 25 | #include <linux/security.h> |
27 | #include <linux/signal.h> | 26 | #include <linux/audit.h> |
27 | #include <linux/seccomp.h> | ||
28 | 28 | ||
29 | #include <asm/byteorder.h> | 29 | #include <asm/byteorder.h> |
30 | #include <asm/cpu.h> | 30 | #include <asm/cpu.h> |
@@ -470,12 +470,17 @@ static inline int audit_arch(void) | |||
470 | */ | 470 | */ |
471 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | 471 | asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) |
472 | { | 472 | { |
473 | /* do the secure computing check first */ | ||
474 | if (!entryexit) | ||
475 | secure_computing(regs->regs[0]); | ||
476 | |||
473 | if (unlikely(current->audit_context) && entryexit) | 477 | if (unlikely(current->audit_context) && entryexit) |
474 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), | 478 | audit_syscall_exit(AUDITSC_RESULT(regs->regs[2]), |
475 | regs->regs[2]); | 479 | regs->regs[2]); |
476 | 480 | ||
477 | if (!(current->ptrace & PT_PTRACED)) | 481 | if (!(current->ptrace & PT_PTRACED)) |
478 | goto out; | 482 | goto out; |
483 | |||
479 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) | 484 | if (!test_thread_flag(TIF_SYSCALL_TRACE)) |
480 | goto out; | 485 | goto out; |
481 | 486 | ||
@@ -493,9 +498,10 @@ asmlinkage void do_syscall_trace(struct pt_regs *regs, int entryexit) | |||
493 | send_sig(current->exit_code, current, 1); | 498 | send_sig(current->exit_code, current, 1); |
494 | current->exit_code = 0; | 499 | current->exit_code = 0; |
495 | } | 500 | } |
496 | out: | 501 | |
502 | out: | ||
497 | if (unlikely(current->audit_context) && !entryexit) | 503 | if (unlikely(current->audit_context) && !entryexit) |
498 | audit_syscall_entry(audit_arch(), regs->regs[2], | 504 | audit_syscall_entry(audit_arch(), regs->regs[0], |
499 | regs->regs[4], regs->regs[5], | 505 | regs->regs[4], regs->regs[5], |
500 | regs->regs[6], regs->regs[7]); | 506 | regs->regs[6], regs->regs[7]); |
501 | } | 507 | } |
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S index a3f0d00c1334..87481f916a61 100644 --- a/arch/mips/kernel/relocate_kernel.S +++ b/arch/mips/kernel/relocate_kernel.S | |||
@@ -14,67 +14,69 @@ | |||
14 | #include <asm/stackframe.h> | 14 | #include <asm/stackframe.h> |
15 | #include <asm/addrspace.h> | 15 | #include <asm/addrspace.h> |
16 | 16 | ||
17 | .globl relocate_new_kernel | 17 | LEAF(relocate_new_kernel) |
18 | relocate_new_kernel: | 18 | PTR_L s0, kexec_indirection_page |
19 | 19 | PTR_L s1, kexec_start_address | |
20 | PTR_L s0, kexec_indirection_page | ||
21 | PTR_L s1, kexec_start_address | ||
22 | 20 | ||
23 | process_entry: | 21 | process_entry: |
24 | PTR_L s2, (s0) | 22 | PTR_L s2, (s0) |
25 | PTR_ADD s0, s0, SZREG | 23 | PTR_ADD s0, s0, SZREG |
26 | 24 | ||
27 | /* destination page */ | 25 | /* destination page */ |
28 | and s3, s2, 0x1 | 26 | and s3, s2, 0x1 |
29 | beq s3, zero, 1f | 27 | beq s3, zero, 1f |
30 | and s4, s2, ~0x1 /* store destination addr in s4 */ | 28 | and s4, s2, ~0x1 /* store destination addr in s4 */ |
31 | move a0, s4 | 29 | move a0, s4 |
32 | b process_entry | 30 | b process_entry |
33 | 31 | ||
34 | 1: | 32 | 1: |
35 | /* indirection page, update s0 */ | 33 | /* indirection page, update s0 */ |
36 | and s3, s2, 0x2 | 34 | and s3, s2, 0x2 |
37 | beq s3, zero, 1f | 35 | beq s3, zero, 1f |
38 | and s0, s2, ~0x2 | 36 | and s0, s2, ~0x2 |
39 | b process_entry | 37 | b process_entry |
40 | 38 | ||
41 | 1: | 39 | 1: |
42 | /* done page */ | 40 | /* done page */ |
43 | and s3, s2, 0x4 | 41 | and s3, s2, 0x4 |
44 | beq s3, zero, 1f | 42 | beq s3, zero, 1f |
45 | b done | 43 | b done |
46 | 1: | 44 | 1: |
47 | /* source page */ | 45 | /* source page */ |
48 | and s3, s2, 0x8 | 46 | and s3, s2, 0x8 |
49 | beq s3, zero, process_entry | 47 | beq s3, zero, process_entry |
50 | and s2, s2, ~0x8 | 48 | and s2, s2, ~0x8 |
51 | li s6, (1 << PAGE_SHIFT) / SZREG | 49 | li s6, (1 << PAGE_SHIFT) / SZREG |
52 | 50 | ||
53 | copy_word: | 51 | copy_word: |
54 | /* copy page word by word */ | 52 | /* copy page word by word */ |
55 | REG_L s5, (s2) | 53 | REG_L s5, (s2) |
56 | REG_S s5, (s4) | 54 | REG_S s5, (s4) |
57 | INT_ADD s4, s4, SZREG | 55 | PTR_ADD s4, s4, SZREG |
58 | INT_ADD s2, s2, SZREG | 56 | PTR_ADD s2, s2, SZREG |
59 | INT_SUB s6, s6, 1 | 57 | LONG_SUB s6, s6, 1 |
60 | beq s6, zero, process_entry | 58 | beq s6, zero, process_entry |
61 | b copy_word | 59 | b copy_word |
62 | b process_entry | 60 | b process_entry |
63 | 61 | ||
64 | done: | 62 | done: |
65 | /* jump to kexec_start_address */ | 63 | /* jump to kexec_start_address */ |
66 | j s1 | 64 | j s1 |
65 | END(relocate_new_kernel) | ||
67 | 66 | ||
68 | .globl kexec_start_address | ||
69 | kexec_start_address: | 67 | kexec_start_address: |
70 | .long 0x0 | 68 | EXPORT(kexec_start_address) |
69 | PTR 0x0 | ||
70 | .size kexec_start_address, PTRSIZE | ||
71 | 71 | ||
72 | .globl kexec_indirection_page | ||
73 | kexec_indirection_page: | 72 | kexec_indirection_page: |
74 | .long 0x0 | 73 | EXPORT(kexec_indirection_page) |
74 | PTR 0 | ||
75 | .size kexec_indirection_page, PTRSIZE | ||
75 | 76 | ||
76 | relocate_new_kernel_end: | 77 | relocate_new_kernel_end: |
77 | 78 | ||
78 | .globl relocate_new_kernel_size | ||
79 | relocate_new_kernel_size: | 79 | relocate_new_kernel_size: |
80 | .long relocate_new_kernel_end - relocate_new_kernel | 80 | EXPORT(relocate_new_kernel_size) |
81 | PTR relocate_new_kernel_end - relocate_new_kernel | ||
82 | .size relocate_new_kernel_size, PTRSIZE | ||
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c index 8cf24d716d41..aab89e97abb5 100644 --- a/arch/mips/kernel/rtlx.c +++ b/arch/mips/kernel/rtlx.c | |||
@@ -40,12 +40,11 @@ | |||
40 | #include <asm/atomic.h> | 40 | #include <asm/atomic.h> |
41 | #include <asm/cpu.h> | 41 | #include <asm/cpu.h> |
42 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
43 | #include <asm/mips_mt.h> | ||
43 | #include <asm/system.h> | 44 | #include <asm/system.h> |
44 | #include <asm/vpe.h> | 45 | #include <asm/vpe.h> |
45 | #include <asm/rtlx.h> | 46 | #include <asm/rtlx.h> |
46 | 47 | ||
47 | #define RTLX_TARG_VPE 1 | ||
48 | |||
49 | static struct rtlx_info *rtlx; | 48 | static struct rtlx_info *rtlx; |
50 | static int major; | 49 | static int major; |
51 | static char module_name[] = "rtlx"; | 50 | static char module_name[] = "rtlx"; |
@@ -165,10 +164,10 @@ int rtlx_open(int index, int can_sleep) | |||
165 | } | 164 | } |
166 | 165 | ||
167 | if (rtlx == NULL) { | 166 | if (rtlx == NULL) { |
168 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | 167 | if( (p = vpe_get_shared(tclimit)) == NULL) { |
169 | if (can_sleep) { | 168 | if (can_sleep) { |
170 | __wait_event_interruptible(channel_wqs[index].lx_queue, | 169 | __wait_event_interruptible(channel_wqs[index].lx_queue, |
171 | (p = vpe_get_shared(RTLX_TARG_VPE)), | 170 | (p = vpe_get_shared(tclimit)), |
172 | ret); | 171 | ret); |
173 | if (ret) | 172 | if (ret) |
174 | goto out_fail; | 173 | goto out_fail; |
@@ -472,11 +471,24 @@ static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; | |||
472 | static char register_chrdev_failed[] __initdata = | 471 | static char register_chrdev_failed[] __initdata = |
473 | KERN_ERR "rtlx_module_init: unable to register device\n"; | 472 | KERN_ERR "rtlx_module_init: unable to register device\n"; |
474 | 473 | ||
475 | static int rtlx_module_init(void) | 474 | static int __init rtlx_module_init(void) |
476 | { | 475 | { |
477 | struct device *dev; | 476 | struct device *dev; |
478 | int i, err; | 477 | int i, err; |
479 | 478 | ||
479 | if (!cpu_has_mipsmt) { | ||
480 | printk("VPE loader: not a MIPS MT capable processor\n"); | ||
481 | return -ENODEV; | ||
482 | } | ||
483 | |||
484 | if (tclimit == 0) { | ||
485 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
486 | "initializing RTLX.\nPass maxtcs=<n> argument as kernel " | ||
487 | "argument\n"); | ||
488 | |||
489 | return -ENODEV; | ||
490 | } | ||
491 | |||
480 | major = register_chrdev(0, module_name, &rtlx_fops); | 492 | major = register_chrdev(0, module_name, &rtlx_fops); |
481 | if (major < 0) { | 493 | if (major < 0) { |
482 | printk(register_chrdev_failed); | 494 | printk(register_chrdev_failed); |
@@ -501,7 +513,7 @@ static int rtlx_module_init(void) | |||
501 | /* set up notifiers */ | 513 | /* set up notifiers */ |
502 | notify.start = starting; | 514 | notify.start = starting; |
503 | notify.stop = stopping; | 515 | notify.stop = stopping; |
504 | vpe_notify(RTLX_TARG_VPE, ¬ify); | 516 | vpe_notify(tclimit, ¬ify); |
505 | 517 | ||
506 | if (cpu_has_vint) | 518 | if (cpu_has_vint) |
507 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | 519 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); |
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S index ae985d1fcca1..82480a1717d8 100644 --- a/arch/mips/kernel/scall32-o32.S +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -662,6 +662,7 @@ einval: li v0, -EINVAL | |||
662 | sys sys_signalfd 3 | 662 | sys sys_signalfd 3 |
663 | sys sys_timerfd 4 | 663 | sys sys_timerfd 4 |
664 | sys sys_eventfd 1 | 664 | sys sys_eventfd 1 |
665 | sys sys_fallocate 6 /* 4320 */ | ||
665 | .endm | 666 | .endm |
666 | 667 | ||
667 | /* We pre-compute the number of _instruction_ bytes needed to | 668 | /* We pre-compute the number of _instruction_ bytes needed to |
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S index 7bcd5a1a85f5..c2c10876da2e 100644 --- a/arch/mips/kernel/scall64-64.S +++ b/arch/mips/kernel/scall64-64.S | |||
@@ -477,4 +477,5 @@ sys_call_table: | |||
477 | PTR sys_signalfd | 477 | PTR sys_signalfd |
478 | PTR sys_timerfd | 478 | PTR sys_timerfd |
479 | PTR sys_eventfd | 479 | PTR sys_eventfd |
480 | PTR sys_fallocate | ||
480 | .size sys_call_table,.-sys_call_table | 481 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S index 532a2f3b42fc..53d7a977193c 100644 --- a/arch/mips/kernel/scall64-n32.S +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -403,4 +403,5 @@ EXPORT(sysn32_call_table) | |||
403 | PTR compat_sys_signalfd /* 5280 */ | 403 | PTR compat_sys_signalfd /* 5280 */ |
404 | PTR compat_sys_timerfd | 404 | PTR compat_sys_timerfd |
405 | PTR sys_eventfd | 405 | PTR sys_eventfd |
406 | PTR sys_fallocate | ||
406 | .size sysn32_call_table,.-sysn32_call_table | 407 | .size sysn32_call_table,.-sysn32_call_table |
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S index 6bbe0f4ed8ba..b3ed731a24c6 100644 --- a/arch/mips/kernel/scall64-o32.S +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -525,4 +525,5 @@ sys_call_table: | |||
525 | PTR compat_sys_signalfd | 525 | PTR compat_sys_signalfd |
526 | PTR compat_sys_timerfd | 526 | PTR compat_sys_timerfd |
527 | PTR sys_eventfd | 527 | PTR sys_eventfd |
528 | PTR sys_fallocate /* 4320 */ | ||
528 | .size sys_call_table,.-sys_call_table | 529 | .size sys_call_table,.-sys_call_table |
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c index 486b8e5f52d0..64b612a0a622 100644 --- a/arch/mips/kernel/signal32.c +++ b/arch/mips/kernel/signal32.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | #include <linux/wait.h> | 19 | #include <linux/wait.h> |
20 | #include <linux/ptrace.h> | 20 | #include <linux/ptrace.h> |
21 | #include <linux/compat.h> | ||
22 | #include <linux/suspend.h> | 21 | #include <linux/suspend.h> |
23 | #include <linux/compiler.h> | 22 | #include <linux/compiler.h> |
24 | #include <linux/uaccess.h> | 23 | #include <linux/uaccess.h> |
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c index 04bbbd8d91ab..73b0dab02668 100644 --- a/arch/mips/kernel/smp.c +++ b/arch/mips/kernel/smp.c | |||
@@ -194,6 +194,61 @@ void smp_call_function_interrupt(void) | |||
194 | } | 194 | } |
195 | } | 195 | } |
196 | 196 | ||
197 | int smp_call_function_single(int cpu, void (*func) (void *info), void *info, | ||
198 | int retry, int wait) | ||
199 | { | ||
200 | struct call_data_struct data; | ||
201 | int me; | ||
202 | |||
203 | /* | ||
204 | * Can die spectacularly if this CPU isn't yet marked online | ||
205 | */ | ||
206 | if (!cpu_online(cpu)) | ||
207 | return 0; | ||
208 | |||
209 | me = get_cpu(); | ||
210 | BUG_ON(!cpu_online(me)); | ||
211 | |||
212 | if (cpu == me) { | ||
213 | local_irq_disable(); | ||
214 | func(info); | ||
215 | local_irq_enable(); | ||
216 | put_cpu(); | ||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | /* Can deadlock when called with interrupts disabled */ | ||
221 | WARN_ON(irqs_disabled()); | ||
222 | |||
223 | data.func = func; | ||
224 | data.info = info; | ||
225 | atomic_set(&data.started, 0); | ||
226 | data.wait = wait; | ||
227 | if (wait) | ||
228 | atomic_set(&data.finished, 0); | ||
229 | |||
230 | spin_lock(&smp_call_lock); | ||
231 | call_data = &data; | ||
232 | smp_mb(); | ||
233 | |||
234 | /* Send a message to the other CPU */ | ||
235 | core_send_ipi(cpu, SMP_CALL_FUNCTION); | ||
236 | |||
237 | /* Wait for response */ | ||
238 | /* FIXME: lock-up detection, backtrace on lock-up */ | ||
239 | while (atomic_read(&data.started) != 1) | ||
240 | barrier(); | ||
241 | |||
242 | if (wait) | ||
243 | while (atomic_read(&data.finished) != 1) | ||
244 | barrier(); | ||
245 | call_data = NULL; | ||
246 | spin_unlock(&smp_call_lock); | ||
247 | |||
248 | put_cpu(); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
197 | static void stop_this_cpu(void *dummy) | 252 | static void stop_this_cpu(void *dummy) |
198 | { | 253 | { |
199 | /* | 254 | /* |
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index 342d873b2ecc..16aa5d37117c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -86,25 +86,11 @@ unsigned int smtc_status = 0; | |||
86 | 86 | ||
87 | /* Boot command line configuration overrides */ | 87 | /* Boot command line configuration overrides */ |
88 | 88 | ||
89 | static int vpelimit = 0; | ||
90 | static int tclimit = 0; | ||
91 | static int ipibuffers = 0; | 89 | static int ipibuffers = 0; |
92 | static int nostlb = 0; | 90 | static int nostlb = 0; |
93 | static int asidmask = 0; | 91 | static int asidmask = 0; |
94 | unsigned long smtc_asid_mask = 0xff; | 92 | unsigned long smtc_asid_mask = 0xff; |
95 | 93 | ||
96 | static int __init maxvpes(char *str) | ||
97 | { | ||
98 | get_option(&str, &vpelimit); | ||
99 | return 1; | ||
100 | } | ||
101 | |||
102 | static int __init maxtcs(char *str) | ||
103 | { | ||
104 | get_option(&str, &tclimit); | ||
105 | return 1; | ||
106 | } | ||
107 | |||
108 | static int __init ipibufs(char *str) | 94 | static int __init ipibufs(char *str) |
109 | { | 95 | { |
110 | get_option(&str, &ipibuffers); | 96 | get_option(&str, &ipibuffers); |
@@ -137,8 +123,6 @@ static int __init asidmask_set(char *str) | |||
137 | return 1; | 123 | return 1; |
138 | } | 124 | } |
139 | 125 | ||
140 | __setup("maxvpes=", maxvpes); | ||
141 | __setup("maxtcs=", maxtcs); | ||
142 | __setup("ipibufs=", ipibufs); | 126 | __setup("ipibufs=", ipibufs); |
143 | __setup("nostlb", stlb_disable); | 127 | __setup("nostlb", stlb_disable); |
144 | __setup("asidmask=", asidmask_set); | 128 | __setup("asidmask=", asidmask_set); |
@@ -168,9 +152,9 @@ static int __init tintq(char *str) | |||
168 | 152 | ||
169 | __setup("tintq=", tintq); | 153 | __setup("tintq=", tintq); |
170 | 154 | ||
171 | int imstuckcount[2][8]; | 155 | static int imstuckcount[2][8]; |
172 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ | 156 | /* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */ |
173 | int vpemask[2][8] = { | 157 | static int vpemask[2][8] = { |
174 | {0, 0, 1, 0, 0, 0, 0, 1}, | 158 | {0, 0, 1, 0, 0, 0, 0, 1}, |
175 | {0, 0, 0, 0, 0, 0, 0, 1} | 159 | {0, 0, 0, 0, 0, 0, 0, 1} |
176 | }; | 160 | }; |
@@ -540,7 +524,7 @@ void mipsmt_prepare_cpus(void) | |||
540 | * (unsigned long)idle->thread_info the gp | 524 | * (unsigned long)idle->thread_info the gp |
541 | * | 525 | * |
542 | */ | 526 | */ |
543 | void smtc_boot_secondary(int cpu, struct task_struct *idle) | 527 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
544 | { | 528 | { |
545 | extern u32 kernelsp[NR_CPUS]; | 529 | extern u32 kernelsp[NR_CPUS]; |
546 | long flags; | 530 | long flags; |
@@ -876,7 +860,7 @@ void deferred_smtc_ipi(void) | |||
876 | * Send clock tick to all TCs except the one executing the funtion | 860 | * Send clock tick to all TCs except the one executing the funtion |
877 | */ | 861 | */ |
878 | 862 | ||
879 | void smtc_timer_broadcast(int vpe) | 863 | void smtc_timer_broadcast(void) |
880 | { | 864 | { |
881 | int cpu; | 865 | int cpu; |
882 | int myTC = cpu_data[smp_processor_id()].tc_id; | 866 | int myTC = cpu_data[smp_processor_id()].tc_id; |
@@ -975,7 +959,12 @@ static void ipi_irq_dispatch(void) | |||
975 | do_IRQ(cpu_ipi_irq); | 959 | do_IRQ(cpu_ipi_irq); |
976 | } | 960 | } |
977 | 961 | ||
978 | static struct irqaction irq_ipi; | 962 | static struct irqaction irq_ipi = { |
963 | .handler = ipi_interrupt, | ||
964 | .flags = IRQF_DISABLED, | ||
965 | .name = "SMTC_IPI", | ||
966 | .flags = IRQF_PERCPU | ||
967 | }; | ||
979 | 968 | ||
980 | static void setup_cross_vpe_interrupts(unsigned int nvpe) | 969 | static void setup_cross_vpe_interrupts(unsigned int nvpe) |
981 | { | 970 | { |
@@ -987,13 +976,8 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
987 | 976 | ||
988 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); | 977 | set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch); |
989 | 978 | ||
990 | irq_ipi.handler = ipi_interrupt; | ||
991 | irq_ipi.flags = IRQF_DISABLED; | ||
992 | irq_ipi.name = "SMTC_IPI"; | ||
993 | |||
994 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); | 979 | setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); |
995 | 980 | ||
996 | irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; | ||
997 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); | 981 | set_irq_handler(cpu_ipi_irq, handle_percpu_irq); |
998 | } | 982 | } |
999 | 983 | ||
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c index 541b5005957e..7c800ec3ff55 100644 --- a/arch/mips/kernel/syscall.c +++ b/arch/mips/kernel/syscall.c | |||
@@ -281,16 +281,24 @@ asmlinkage int sys_set_thread_area(unsigned long addr) | |||
281 | 281 | ||
282 | asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) | 282 | asmlinkage int _sys_sysmips(int cmd, long arg1, int arg2, int arg3) |
283 | { | 283 | { |
284 | int tmp; | 284 | switch (cmd) { |
285 | |||
286 | switch(cmd) { | ||
287 | case MIPS_ATOMIC_SET: | 285 | case MIPS_ATOMIC_SET: |
288 | printk(KERN_CRIT "How did I get here?\n"); | 286 | printk(KERN_CRIT "How did I get here?\n"); |
289 | return -EINVAL; | 287 | return -EINVAL; |
290 | 288 | ||
291 | case MIPS_FIXADE: | 289 | case MIPS_FIXADE: |
292 | tmp = current->thread.mflags & ~3; | 290 | if (arg1 & ~3) |
293 | current->thread.mflags = tmp | (arg1 & 3); | 291 | return -EINVAL; |
292 | |||
293 | if (arg1 & 1) | ||
294 | set_thread_flag(TIF_FIXADE); | ||
295 | else | ||
296 | clear_thread_flag(TIF_FIXADE); | ||
297 | if (arg1 & 2) | ||
298 | set_thread_flag(TIF_LOGADE); | ||
299 | else | ||
300 | clear_thread_flag(TIF_FIXADE); | ||
301 | |||
294 | return 0; | 302 | return 0; |
295 | 303 | ||
296 | case FLUSH_CACHE: | 304 | case FLUSH_CACHE: |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index ce277cb34dd0..c8e291c83057 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -775,7 +775,7 @@ static void mt_ase_fp_affinity(void) | |||
775 | cpus_and(tmask, current->thread.user_cpus_allowed, | 775 | cpus_and(tmask, current->thread.user_cpus_allowed, |
776 | mt_fpu_cpumask); | 776 | mt_fpu_cpumask); |
777 | set_cpus_allowed(current, tmask); | 777 | set_cpus_allowed(current, tmask); |
778 | current->thread.mflags |= MF_FPUBOUND; | 778 | set_thread_flag(TIF_FPUBOUND); |
779 | } | 779 | } |
780 | } | 780 | } |
781 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 781 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c index 8b9c34ffae18..d34b1fb3665d 100644 --- a/arch/mips/kernel/unaligned.c +++ b/arch/mips/kernel/unaligned.c | |||
@@ -101,16 +101,14 @@ static u32 unaligned_action; | |||
101 | #endif | 101 | #endif |
102 | extern void show_registers(struct pt_regs *regs); | 102 | extern void show_registers(struct pt_regs *regs); |
103 | 103 | ||
104 | static inline int emulate_load_store_insn(struct pt_regs *regs, | 104 | static void emulate_load_store_insn(struct pt_regs *regs, |
105 | void __user *addr, unsigned int __user *pc, | 105 | void __user *addr, unsigned int __user *pc) |
106 | unsigned long **regptr, unsigned long *newvalue) | ||
107 | { | 106 | { |
108 | union mips_instruction insn; | 107 | union mips_instruction insn; |
109 | unsigned long value; | 108 | unsigned long value; |
110 | unsigned int res; | 109 | unsigned int res; |
111 | 110 | ||
112 | regs->regs[0] = 0; | 111 | regs->regs[0] = 0; |
113 | *regptr=NULL; | ||
114 | 112 | ||
115 | /* | 113 | /* |
116 | * This load never faults. | 114 | * This load never faults. |
@@ -179,8 +177,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
179 | : "r" (addr), "i" (-EFAULT)); | 177 | : "r" (addr), "i" (-EFAULT)); |
180 | if (res) | 178 | if (res) |
181 | goto fault; | 179 | goto fault; |
182 | *newvalue = value; | 180 | compute_return_epc(regs); |
183 | *regptr = ®s->regs[insn.i_format.rt]; | 181 | regs->regs[insn.i_format.rt] = value; |
184 | break; | 182 | break; |
185 | 183 | ||
186 | case lw_op: | 184 | case lw_op: |
@@ -209,8 +207,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
209 | : "r" (addr), "i" (-EFAULT)); | 207 | : "r" (addr), "i" (-EFAULT)); |
210 | if (res) | 208 | if (res) |
211 | goto fault; | 209 | goto fault; |
212 | *newvalue = value; | 210 | compute_return_epc(regs); |
213 | *regptr = ®s->regs[insn.i_format.rt]; | 211 | regs->regs[insn.i_format.rt] = value; |
214 | break; | 212 | break; |
215 | 213 | ||
216 | case lhu_op: | 214 | case lhu_op: |
@@ -243,8 +241,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
243 | : "r" (addr), "i" (-EFAULT)); | 241 | : "r" (addr), "i" (-EFAULT)); |
244 | if (res) | 242 | if (res) |
245 | goto fault; | 243 | goto fault; |
246 | *newvalue = value; | 244 | compute_return_epc(regs); |
247 | *regptr = ®s->regs[insn.i_format.rt]; | 245 | regs->regs[insn.i_format.rt] = value; |
248 | break; | 246 | break; |
249 | 247 | ||
250 | case lwu_op: | 248 | case lwu_op: |
@@ -283,8 +281,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
283 | : "r" (addr), "i" (-EFAULT)); | 281 | : "r" (addr), "i" (-EFAULT)); |
284 | if (res) | 282 | if (res) |
285 | goto fault; | 283 | goto fault; |
286 | *newvalue = value; | 284 | compute_return_epc(regs); |
287 | *regptr = ®s->regs[insn.i_format.rt]; | 285 | regs->regs[insn.i_format.rt] = value; |
288 | break; | 286 | break; |
289 | #endif /* CONFIG_64BIT */ | 287 | #endif /* CONFIG_64BIT */ |
290 | 288 | ||
@@ -325,8 +323,8 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
325 | : "r" (addr), "i" (-EFAULT)); | 323 | : "r" (addr), "i" (-EFAULT)); |
326 | if (res) | 324 | if (res) |
327 | goto fault; | 325 | goto fault; |
328 | *newvalue = value; | 326 | compute_return_epc(regs); |
329 | *regptr = ®s->regs[insn.i_format.rt]; | 327 | regs->regs[insn.i_format.rt] = value; |
330 | break; | 328 | break; |
331 | #endif /* CONFIG_64BIT */ | 329 | #endif /* CONFIG_64BIT */ |
332 | 330 | ||
@@ -367,6 +365,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
367 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 365 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
368 | if (res) | 366 | if (res) |
369 | goto fault; | 367 | goto fault; |
368 | compute_return_epc(regs); | ||
370 | break; | 369 | break; |
371 | 370 | ||
372 | case sw_op: | 371 | case sw_op: |
@@ -397,6 +396,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
397 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 396 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
398 | if (res) | 397 | if (res) |
399 | goto fault; | 398 | goto fault; |
399 | compute_return_epc(regs); | ||
400 | break; | 400 | break; |
401 | 401 | ||
402 | case sd_op: | 402 | case sd_op: |
@@ -435,6 +435,7 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
435 | : "r" (value), "r" (addr), "i" (-EFAULT)); | 435 | : "r" (value), "r" (addr), "i" (-EFAULT)); |
436 | if (res) | 436 | if (res) |
437 | goto fault; | 437 | goto fault; |
438 | compute_return_epc(regs); | ||
438 | break; | 439 | break; |
439 | #endif /* CONFIG_64BIT */ | 440 | #endif /* CONFIG_64BIT */ |
440 | 441 | ||
@@ -473,34 +474,31 @@ static inline int emulate_load_store_insn(struct pt_regs *regs, | |||
473 | unaligned_instructions++; | 474 | unaligned_instructions++; |
474 | #endif | 475 | #endif |
475 | 476 | ||
476 | return 0; | 477 | return; |
477 | 478 | ||
478 | fault: | 479 | fault: |
479 | /* Did we have an exception handler installed? */ | 480 | /* Did we have an exception handler installed? */ |
480 | if (fixup_exception(regs)) | 481 | if (fixup_exception(regs)) |
481 | return 1; | 482 | return; |
482 | 483 | ||
483 | die_if_kernel ("Unhandled kernel unaligned access", regs); | 484 | die_if_kernel ("Unhandled kernel unaligned access", regs); |
484 | send_sig(SIGSEGV, current, 1); | 485 | send_sig(SIGSEGV, current, 1); |
485 | 486 | ||
486 | return 0; | 487 | return; |
487 | 488 | ||
488 | sigbus: | 489 | sigbus: |
489 | die_if_kernel("Unhandled kernel unaligned access", regs); | 490 | die_if_kernel("Unhandled kernel unaligned access", regs); |
490 | send_sig(SIGBUS, current, 1); | 491 | send_sig(SIGBUS, current, 1); |
491 | 492 | ||
492 | return 0; | 493 | return; |
493 | 494 | ||
494 | sigill: | 495 | sigill: |
495 | die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); | 496 | die_if_kernel("Unhandled kernel unaligned access or invalid instruction", regs); |
496 | send_sig(SIGILL, current, 1); | 497 | send_sig(SIGILL, current, 1); |
497 | |||
498 | return 0; | ||
499 | } | 498 | } |
500 | 499 | ||
501 | asmlinkage void do_ade(struct pt_regs *regs) | 500 | asmlinkage void do_ade(struct pt_regs *regs) |
502 | { | 501 | { |
503 | unsigned long *regptr, newval; | ||
504 | extern int do_dsemulret(struct pt_regs *); | 502 | extern int do_dsemulret(struct pt_regs *); |
505 | unsigned int __user *pc; | 503 | unsigned int __user *pc; |
506 | mm_segment_t seg; | 504 | mm_segment_t seg; |
@@ -524,7 +522,7 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
524 | goto sigbus; | 522 | goto sigbus; |
525 | 523 | ||
526 | pc = (unsigned int __user *) exception_epc(regs); | 524 | pc = (unsigned int __user *) exception_epc(regs); |
527 | if (user_mode(regs) && (current->thread.mflags & MF_FIXADE) == 0) | 525 | if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) |
528 | goto sigbus; | 526 | goto sigbus; |
529 | if (unaligned_action == UNALIGNED_ACTION_SIGNAL) | 527 | if (unaligned_action == UNALIGNED_ACTION_SIGNAL) |
530 | goto sigbus; | 528 | goto sigbus; |
@@ -538,16 +536,7 @@ asmlinkage void do_ade(struct pt_regs *regs) | |||
538 | seg = get_fs(); | 536 | seg = get_fs(); |
539 | if (!user_mode(regs)) | 537 | if (!user_mode(regs)) |
540 | set_fs(KERNEL_DS); | 538 | set_fs(KERNEL_DS); |
541 | if (!emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc, | 539 | emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); |
542 | ®ptr, &newval)) { | ||
543 | compute_return_epc(regs); | ||
544 | /* | ||
545 | * Now that branch is evaluated, update the dest | ||
546 | * register if necessary | ||
547 | */ | ||
548 | if (regptr) | ||
549 | *regptr = newval; | ||
550 | } | ||
551 | set_fs(seg); | 540 | set_fs(seg); |
552 | 541 | ||
553 | return; | 542 | return; |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c index a2bee10f04cf..3c09b9785f4c 100644 --- a/arch/mips/kernel/vpe.c +++ b/arch/mips/kernel/vpe.c | |||
@@ -27,7 +27,6 @@ | |||
27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | 27 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. |
28 | * i.e cat spapp >/dev/vpe1. | 28 | * i.e cat spapp >/dev/vpe1. |
29 | */ | 29 | */ |
30 | |||
31 | #include <linux/kernel.h> | 30 | #include <linux/kernel.h> |
32 | #include <linux/device.h> | 31 | #include <linux/device.h> |
33 | #include <linux/module.h> | 32 | #include <linux/module.h> |
@@ -54,6 +53,7 @@ | |||
54 | #include <asm/system.h> | 53 | #include <asm/system.h> |
55 | #include <asm/vpe.h> | 54 | #include <asm/vpe.h> |
56 | #include <asm/kspd.h> | 55 | #include <asm/kspd.h> |
56 | #include <asm/mips_mt.h> | ||
57 | 57 | ||
58 | typedef void *vpe_handle; | 58 | typedef void *vpe_handle; |
59 | 59 | ||
@@ -64,6 +64,10 @@ typedef void *vpe_handle; | |||
64 | /* If this is set, the section belongs in the init part of the module */ | 64 | /* If this is set, the section belongs in the init part of the module */ |
65 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | 65 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) |
66 | 66 | ||
67 | /* | ||
68 | * The number of TCs and VPEs physically available on the core | ||
69 | */ | ||
70 | static int hw_tcs, hw_vpes; | ||
67 | static char module_name[] = "vpe"; | 71 | static char module_name[] = "vpe"; |
68 | static int major; | 72 | static int major; |
69 | static const int minor = 1; /* fixed for now */ | 73 | static const int minor = 1; /* fixed for now */ |
@@ -126,20 +130,17 @@ struct vpe { | |||
126 | 130 | ||
127 | /* the list of who wants to know when something major happens */ | 131 | /* the list of who wants to know when something major happens */ |
128 | struct list_head notify; | 132 | struct list_head notify; |
133 | |||
134 | unsigned int ntcs; | ||
129 | }; | 135 | }; |
130 | 136 | ||
131 | struct tc { | 137 | struct tc { |
132 | enum tc_state state; | 138 | enum tc_state state; |
133 | int index; | 139 | int index; |
134 | 140 | ||
135 | /* parent VPE */ | 141 | struct vpe *pvpe; /* parent VPE */ |
136 | struct vpe *pvpe; | 142 | struct list_head tc; /* The list of TC's with this VPE */ |
137 | 143 | struct list_head list; /* The global list of tc's */ | |
138 | /* The list of TC's with this VPE */ | ||
139 | struct list_head tc; | ||
140 | |||
141 | /* The global list of tc's */ | ||
142 | struct list_head list; | ||
143 | }; | 144 | }; |
144 | 145 | ||
145 | struct { | 146 | struct { |
@@ -217,18 +218,17 @@ struct vpe *alloc_vpe(int minor) | |||
217 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | 218 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ |
218 | struct tc *alloc_tc(int index) | 219 | struct tc *alloc_tc(int index) |
219 | { | 220 | { |
220 | struct tc *t; | 221 | struct tc *tc; |
221 | 222 | ||
222 | if ((t = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { | 223 | if ((tc = kzalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) |
223 | return NULL; | 224 | goto out; |
224 | } | ||
225 | |||
226 | INIT_LIST_HEAD(&t->tc); | ||
227 | list_add_tail(&t->list, &vpecontrol.tc_list); | ||
228 | 225 | ||
229 | t->index = index; | 226 | INIT_LIST_HEAD(&tc->tc); |
227 | tc->index = index; | ||
228 | list_add_tail(&tc->list, &vpecontrol.tc_list); | ||
230 | 229 | ||
231 | return t; | 230 | out: |
231 | return tc; | ||
232 | } | 232 | } |
233 | 233 | ||
234 | /* clean up and free everything */ | 234 | /* clean up and free everything */ |
@@ -663,66 +663,48 @@ static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | |||
663 | } | 663 | } |
664 | #endif | 664 | #endif |
665 | 665 | ||
666 | static void dump_tc(struct tc *t) | ||
667 | { | ||
668 | unsigned long val; | ||
669 | |||
670 | settc(t->index); | ||
671 | printk(KERN_DEBUG "VPE loader: TC index %d targtc %ld " | ||
672 | "TCStatus 0x%lx halt 0x%lx\n", | ||
673 | t->index, read_c0_vpecontrol() & VPECONTROL_TARGTC, | ||
674 | read_tc_c0_tcstatus(), read_tc_c0_tchalt()); | ||
675 | |||
676 | printk(KERN_DEBUG " tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
677 | printk(KERN_DEBUG " tcbind 0x%lx\n", read_tc_c0_tcbind()); | ||
678 | |||
679 | val = read_c0_vpeconf0(); | ||
680 | printk(KERN_DEBUG " VPEConf0 0x%lx MVP %ld\n", val, | ||
681 | (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); | ||
682 | |||
683 | printk(KERN_DEBUG " c0 status 0x%lx\n", read_vpe_c0_status()); | ||
684 | printk(KERN_DEBUG " c0 cause 0x%lx\n", read_vpe_c0_cause()); | ||
685 | |||
686 | printk(KERN_DEBUG " c0 badvaddr 0x%lx\n", read_vpe_c0_badvaddr()); | ||
687 | printk(KERN_DEBUG " c0 epc 0x%lx\n", read_vpe_c0_epc()); | ||
688 | } | ||
689 | |||
690 | static void dump_tclist(void) | ||
691 | { | ||
692 | struct tc *t; | ||
693 | |||
694 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
695 | dump_tc(t); | ||
696 | } | ||
697 | } | ||
698 | |||
699 | /* We are prepared so configure and start the VPE... */ | 666 | /* We are prepared so configure and start the VPE... */ |
700 | static int vpe_run(struct vpe * v) | 667 | static int vpe_run(struct vpe * v) |
701 | { | 668 | { |
669 | unsigned long flags, val, dmt_flag; | ||
702 | struct vpe_notifications *n; | 670 | struct vpe_notifications *n; |
703 | unsigned long val, dmt_flag; | 671 | unsigned int vpeflags; |
704 | struct tc *t; | 672 | struct tc *t; |
705 | 673 | ||
706 | /* check we are the Master VPE */ | 674 | /* check we are the Master VPE */ |
675 | local_irq_save(flags); | ||
707 | val = read_c0_vpeconf0(); | 676 | val = read_c0_vpeconf0(); |
708 | if (!(val & VPECONF0_MVP)) { | 677 | if (!(val & VPECONF0_MVP)) { |
709 | printk(KERN_WARNING | 678 | printk(KERN_WARNING |
710 | "VPE loader: only Master VPE's are allowed to configure MT\n"); | 679 | "VPE loader: only Master VPE's are allowed to configure MT\n"); |
680 | local_irq_restore(flags); | ||
681 | |||
711 | return -1; | 682 | return -1; |
712 | } | 683 | } |
713 | 684 | ||
714 | /* disable MT (using dvpe) */ | 685 | dmt_flag = dmt(); |
715 | dvpe(); | 686 | vpeflags = dvpe(); |
716 | 687 | ||
717 | if (!list_empty(&v->tc)) { | 688 | if (!list_empty(&v->tc)) { |
718 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | 689 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { |
719 | printk(KERN_WARNING "VPE loader: TC %d is already in use.\n", | 690 | evpe(vpeflags); |
720 | t->index); | 691 | emt(dmt_flag); |
692 | local_irq_restore(flags); | ||
693 | |||
694 | printk(KERN_WARNING | ||
695 | "VPE loader: TC %d is already in use.\n", | ||
696 | t->index); | ||
721 | return -ENOEXEC; | 697 | return -ENOEXEC; |
722 | } | 698 | } |
723 | } else { | 699 | } else { |
724 | printk(KERN_WARNING "VPE loader: No TC's associated with VPE %d\n", | 700 | evpe(vpeflags); |
701 | emt(dmt_flag); | ||
702 | local_irq_restore(flags); | ||
703 | |||
704 | printk(KERN_WARNING | ||
705 | "VPE loader: No TC's associated with VPE %d\n", | ||
725 | v->minor); | 706 | v->minor); |
707 | |||
726 | return -ENOEXEC; | 708 | return -ENOEXEC; |
727 | } | 709 | } |
728 | 710 | ||
@@ -733,21 +715,20 @@ static int vpe_run(struct vpe * v) | |||
733 | 715 | ||
734 | /* should check it is halted, and not activated */ | 716 | /* should check it is halted, and not activated */ |
735 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | 717 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { |
736 | printk(KERN_WARNING "VPE loader: TC %d is already doing something!\n", | 718 | evpe(vpeflags); |
719 | emt(dmt_flag); | ||
720 | local_irq_restore(flags); | ||
721 | |||
722 | printk(KERN_WARNING "VPE loader: TC %d is already active!\n", | ||
737 | t->index); | 723 | t->index); |
738 | dump_tclist(); | 724 | |
739 | return -ENOEXEC; | 725 | return -ENOEXEC; |
740 | } | 726 | } |
741 | 727 | ||
742 | /* | ||
743 | * Disable multi-threaded execution whilst we activate, clear the | ||
744 | * halt bit and bound the tc to the other VPE... | ||
745 | */ | ||
746 | dmt_flag = dmt(); | ||
747 | |||
748 | /* Write the address we want it to start running from in the TCPC register. */ | 728 | /* Write the address we want it to start running from in the TCPC register. */ |
749 | write_tc_c0_tcrestart((unsigned long)v->__start); | 729 | write_tc_c0_tcrestart((unsigned long)v->__start); |
750 | write_tc_c0_tccontext((unsigned long)0); | 730 | write_tc_c0_tccontext((unsigned long)0); |
731 | |||
751 | /* | 732 | /* |
752 | * Mark the TC as activated, not interrupt exempt and not dynamically | 733 | * Mark the TC as activated, not interrupt exempt and not dynamically |
753 | * allocatable | 734 | * allocatable |
@@ -763,15 +744,15 @@ static int vpe_run(struct vpe * v) | |||
763 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | 744 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and |
764 | * DFLT_HEAP_SIZE when you compile your program | 745 | * DFLT_HEAP_SIZE when you compile your program |
765 | */ | 746 | */ |
766 | mttgpr(7, physical_memsize); | 747 | mttgpr(6, v->ntcs); |
767 | 748 | mttgpr(7, physical_memsize); | |
768 | 749 | ||
769 | /* set up VPE1 */ | 750 | /* set up VPE1 */ |
770 | /* | 751 | /* |
771 | * bind the TC to VPE 1 as late as possible so we only have the final | 752 | * bind the TC to VPE 1 as late as possible so we only have the final |
772 | * VPE registers to set up, and so an EJTAG probe can trigger on it | 753 | * VPE registers to set up, and so an EJTAG probe can trigger on it |
773 | */ | 754 | */ |
774 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | v->minor); | 755 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); |
775 | 756 | ||
776 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | 757 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); |
777 | 758 | ||
@@ -793,15 +774,16 @@ static int vpe_run(struct vpe * v) | |||
793 | /* take system out of configuration state */ | 774 | /* take system out of configuration state */ |
794 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 775 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
795 | 776 | ||
796 | /* now safe to re-enable multi-threading */ | 777 | #ifdef CONFIG_SMP |
797 | emt(dmt_flag); | ||
798 | |||
799 | /* set it running */ | ||
800 | evpe(EVPE_ENABLE); | 778 | evpe(EVPE_ENABLE); |
779 | #else | ||
780 | evpe(vpeflags); | ||
781 | #endif | ||
782 | emt(dmt_flag); | ||
783 | local_irq_restore(flags); | ||
801 | 784 | ||
802 | list_for_each_entry(n, &v->notify, list) { | 785 | list_for_each_entry(n, &v->notify, list) |
803 | n->start(v->minor); | 786 | n->start(minor); |
804 | } | ||
805 | 787 | ||
806 | return 0; | 788 | return 0; |
807 | } | 789 | } |
@@ -1023,23 +1005,15 @@ static int vpe_elfload(struct vpe * v) | |||
1023 | return 0; | 1005 | return 0; |
1024 | } | 1006 | } |
1025 | 1007 | ||
1026 | void __used dump_vpe(struct vpe * v) | ||
1027 | { | ||
1028 | struct tc *t; | ||
1029 | |||
1030 | settc(v->minor); | ||
1031 | |||
1032 | printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
1033 | printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
1034 | |||
1035 | list_for_each_entry(t, &vpecontrol.tc_list, list) | ||
1036 | dump_tc(t); | ||
1037 | } | ||
1038 | |||
1039 | static void cleanup_tc(struct tc *tc) | 1008 | static void cleanup_tc(struct tc *tc) |
1040 | { | 1009 | { |
1010 | unsigned long flags; | ||
1011 | unsigned int mtflags, vpflags; | ||
1041 | int tmp; | 1012 | int tmp; |
1042 | 1013 | ||
1014 | local_irq_save(flags); | ||
1015 | mtflags = dmt(); | ||
1016 | vpflags = dvpe(); | ||
1043 | /* Put MVPE's into 'configuration state' */ | 1017 | /* Put MVPE's into 'configuration state' */ |
1044 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1018 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1045 | 1019 | ||
@@ -1054,9 +1028,12 @@ static void cleanup_tc(struct tc *tc) | |||
1054 | write_tc_c0_tchalt(TCHALT_H); | 1028 | write_tc_c0_tchalt(TCHALT_H); |
1055 | 1029 | ||
1056 | /* bind it to anything other than VPE1 */ | 1030 | /* bind it to anything other than VPE1 */ |
1057 | write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE | 1031 | // write_tc_c0_tcbind(read_tc_c0_tcbind() & ~TCBIND_CURVPE); // | TCBIND_CURVPE |
1058 | 1032 | ||
1059 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1033 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1034 | evpe(vpflags); | ||
1035 | emt(mtflags); | ||
1036 | local_irq_restore(flags); | ||
1060 | } | 1037 | } |
1061 | 1038 | ||
1062 | static int getcwd(char *buff, int size) | 1039 | static int getcwd(char *buff, int size) |
@@ -1077,36 +1054,32 @@ static int getcwd(char *buff, int size) | |||
1077 | /* checks VPE is unused and gets ready to load program */ | 1054 | /* checks VPE is unused and gets ready to load program */ |
1078 | static int vpe_open(struct inode *inode, struct file *filp) | 1055 | static int vpe_open(struct inode *inode, struct file *filp) |
1079 | { | 1056 | { |
1080 | int minor, ret; | ||
1081 | enum vpe_state state; | 1057 | enum vpe_state state; |
1082 | struct vpe *v; | ||
1083 | struct vpe_notifications *not; | 1058 | struct vpe_notifications *not; |
1059 | struct vpe *v; | ||
1060 | int ret; | ||
1084 | 1061 | ||
1085 | /* assume only 1 device at the mo. */ | 1062 | if (minor != iminor(inode)) { |
1086 | if ((minor = iminor(inode)) != 1) { | 1063 | /* assume only 1 device at the moment. */ |
1087 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); | 1064 | printk(KERN_WARNING "VPE loader: only vpe1 is supported\n"); |
1088 | return -ENODEV; | 1065 | return -ENODEV; |
1089 | } | 1066 | } |
1090 | 1067 | ||
1091 | if ((v = get_vpe(minor)) == NULL) { | 1068 | if ((v = get_vpe(tclimit)) == NULL) { |
1092 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); | 1069 | printk(KERN_WARNING "VPE loader: unable to get vpe\n"); |
1093 | return -ENODEV; | 1070 | return -ENODEV; |
1094 | } | 1071 | } |
1095 | 1072 | ||
1096 | state = xchg(&v->state, VPE_STATE_INUSE); | 1073 | state = xchg(&v->state, VPE_STATE_INUSE); |
1097 | if (state != VPE_STATE_UNUSED) { | 1074 | if (state != VPE_STATE_UNUSED) { |
1098 | dvpe(); | ||
1099 | |||
1100 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); | 1075 | printk(KERN_DEBUG "VPE loader: tc in use dumping regs\n"); |
1101 | 1076 | ||
1102 | dump_tc(get_tc(minor)); | ||
1103 | |||
1104 | list_for_each_entry(not, &v->notify, list) { | 1077 | list_for_each_entry(not, &v->notify, list) { |
1105 | not->stop(minor); | 1078 | not->stop(tclimit); |
1106 | } | 1079 | } |
1107 | 1080 | ||
1108 | release_progmem(v->load_addr); | 1081 | release_progmem(v->load_addr); |
1109 | cleanup_tc(get_tc(minor)); | 1082 | cleanup_tc(get_tc(tclimit)); |
1110 | } | 1083 | } |
1111 | 1084 | ||
1112 | /* this of-course trashes what was there before... */ | 1085 | /* this of-course trashes what was there before... */ |
@@ -1133,26 +1106,25 @@ static int vpe_open(struct inode *inode, struct file *filp) | |||
1133 | 1106 | ||
1134 | v->shared_ptr = NULL; | 1107 | v->shared_ptr = NULL; |
1135 | v->__start = 0; | 1108 | v->__start = 0; |
1109 | |||
1136 | return 0; | 1110 | return 0; |
1137 | } | 1111 | } |
1138 | 1112 | ||
1139 | static int vpe_release(struct inode *inode, struct file *filp) | 1113 | static int vpe_release(struct inode *inode, struct file *filp) |
1140 | { | 1114 | { |
1141 | int minor, ret = 0; | ||
1142 | struct vpe *v; | 1115 | struct vpe *v; |
1143 | Elf_Ehdr *hdr; | 1116 | Elf_Ehdr *hdr; |
1117 | int ret = 0; | ||
1144 | 1118 | ||
1145 | minor = iminor(inode); | 1119 | v = get_vpe(tclimit); |
1146 | if ((v = get_vpe(minor)) == NULL) | 1120 | if (v == NULL) |
1147 | return -ENODEV; | 1121 | return -ENODEV; |
1148 | 1122 | ||
1149 | // simple case of fire and forget, so tell the VPE to run... | ||
1150 | |||
1151 | hdr = (Elf_Ehdr *) v->pbuffer; | 1123 | hdr = (Elf_Ehdr *) v->pbuffer; |
1152 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | 1124 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { |
1153 | if (vpe_elfload(v) >= 0) | 1125 | if (vpe_elfload(v) >= 0) { |
1154 | vpe_run(v); | 1126 | vpe_run(v); |
1155 | else { | 1127 | } else { |
1156 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); | 1128 | printk(KERN_WARNING "VPE loader: ELF load failed.\n"); |
1157 | ret = -ENOEXEC; | 1129 | ret = -ENOEXEC; |
1158 | } | 1130 | } |
@@ -1179,12 +1151,14 @@ static int vpe_release(struct inode *inode, struct file *filp) | |||
1179 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | 1151 | static ssize_t vpe_write(struct file *file, const char __user * buffer, |
1180 | size_t count, loff_t * ppos) | 1152 | size_t count, loff_t * ppos) |
1181 | { | 1153 | { |
1182 | int minor; | ||
1183 | size_t ret = count; | 1154 | size_t ret = count; |
1184 | struct vpe *v; | 1155 | struct vpe *v; |
1185 | 1156 | ||
1186 | minor = iminor(file->f_path.dentry->d_inode); | 1157 | if (iminor(file->f_path.dentry->d_inode) != minor) |
1187 | if ((v = get_vpe(minor)) == NULL) | 1158 | return -ENODEV; |
1159 | |||
1160 | v = get_vpe(tclimit); | ||
1161 | if (v == NULL) | ||
1188 | return -ENODEV; | 1162 | return -ENODEV; |
1189 | 1163 | ||
1190 | if (v->pbuffer == NULL) { | 1164 | if (v->pbuffer == NULL) { |
@@ -1366,62 +1340,173 @@ static void kspd_sp_exit( int sp_id) | |||
1366 | } | 1340 | } |
1367 | #endif | 1341 | #endif |
1368 | 1342 | ||
1369 | static struct device *vpe_dev; | 1343 | static ssize_t store_kill(struct class_device *dev, const char *buf, size_t len) |
1344 | { | ||
1345 | struct vpe *vpe = get_vpe(tclimit); | ||
1346 | struct vpe_notifications *not; | ||
1347 | |||
1348 | list_for_each_entry(not, &vpe->notify, list) { | ||
1349 | not->stop(tclimit); | ||
1350 | } | ||
1351 | |||
1352 | release_progmem(vpe->load_addr); | ||
1353 | cleanup_tc(get_tc(tclimit)); | ||
1354 | vpe_stop(vpe); | ||
1355 | vpe_free(vpe); | ||
1356 | |||
1357 | return len; | ||
1358 | } | ||
1359 | |||
1360 | static ssize_t show_ntcs(struct class_device *cd, char *buf) | ||
1361 | { | ||
1362 | struct vpe *vpe = get_vpe(tclimit); | ||
1363 | |||
1364 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
1365 | } | ||
1366 | |||
1367 | static ssize_t store_ntcs(struct class_device *dev, const char *buf, size_t len) | ||
1368 | { | ||
1369 | struct vpe *vpe = get_vpe(tclimit); | ||
1370 | unsigned long new; | ||
1371 | char *endp; | ||
1372 | |||
1373 | new = simple_strtoul(buf, &endp, 0); | ||
1374 | if (endp == buf) | ||
1375 | goto out_einval; | ||
1376 | |||
1377 | if (new == 0 || new > (hw_tcs - tclimit)) | ||
1378 | goto out_einval; | ||
1379 | |||
1380 | vpe->ntcs = new; | ||
1381 | |||
1382 | return len; | ||
1383 | |||
1384 | out_einval: | ||
1385 | return -EINVAL;; | ||
1386 | } | ||
1387 | |||
1388 | static struct class_device_attribute vpe_class_attributes[] = { | ||
1389 | __ATTR(kill, S_IWUSR, NULL, store_kill), | ||
1390 | __ATTR(ntcs, S_IRUGO | S_IWUSR, show_ntcs, store_ntcs), | ||
1391 | {} | ||
1392 | }; | ||
1393 | |||
1394 | static void vpe_class_device_release(struct class_device *cd) | ||
1395 | { | ||
1396 | kfree(cd); | ||
1397 | } | ||
1398 | |||
1399 | struct class vpe_class = { | ||
1400 | .name = "vpe", | ||
1401 | .owner = THIS_MODULE, | ||
1402 | .release = vpe_class_device_release, | ||
1403 | .class_dev_attrs = vpe_class_attributes, | ||
1404 | }; | ||
1405 | |||
1406 | struct class_device vpe_device; | ||
1370 | 1407 | ||
1371 | static int __init vpe_module_init(void) | 1408 | static int __init vpe_module_init(void) |
1372 | { | 1409 | { |
1410 | unsigned int mtflags, vpflags; | ||
1411 | unsigned long flags, val; | ||
1373 | struct vpe *v = NULL; | 1412 | struct vpe *v = NULL; |
1374 | struct device *dev; | ||
1375 | struct tc *t; | 1413 | struct tc *t; |
1376 | unsigned long val; | 1414 | int tc, err; |
1377 | int i, err; | ||
1378 | 1415 | ||
1379 | if (!cpu_has_mipsmt) { | 1416 | if (!cpu_has_mipsmt) { |
1380 | printk("VPE loader: not a MIPS MT capable processor\n"); | 1417 | printk("VPE loader: not a MIPS MT capable processor\n"); |
1381 | return -ENODEV; | 1418 | return -ENODEV; |
1382 | } | 1419 | } |
1383 | 1420 | ||
1421 | if (vpelimit == 0) { | ||
1422 | printk(KERN_WARNING "No VPEs reserved for AP/SP, not " | ||
1423 | "initializing VPE loader.\nPass maxvpes=<n> argument as " | ||
1424 | "kernel argument\n"); | ||
1425 | |||
1426 | return -ENODEV; | ||
1427 | } | ||
1428 | |||
1429 | if (tclimit == 0) { | ||
1430 | printk(KERN_WARNING "No TCs reserved for AP/SP, not " | ||
1431 | "initializing VPE loader.\nPass maxtcs=<n> argument as " | ||
1432 | "kernel argument\n"); | ||
1433 | |||
1434 | return -ENODEV; | ||
1435 | } | ||
1436 | |||
1384 | major = register_chrdev(0, module_name, &vpe_fops); | 1437 | major = register_chrdev(0, module_name, &vpe_fops); |
1385 | if (major < 0) { | 1438 | if (major < 0) { |
1386 | printk("VPE loader: unable to register character device\n"); | 1439 | printk("VPE loader: unable to register character device\n"); |
1387 | return major; | 1440 | return major; |
1388 | } | 1441 | } |
1389 | 1442 | ||
1390 | dev = device_create(mt_class, NULL, MKDEV(major, minor), | 1443 | err = class_register(&vpe_class); |
1391 | "tc%d", minor); | 1444 | if (err) { |
1392 | if (IS_ERR(dev)) { | 1445 | printk(KERN_ERR "vpe_class registration failed\n"); |
1393 | err = PTR_ERR(dev); | ||
1394 | goto out_chrdev; | 1446 | goto out_chrdev; |
1395 | } | 1447 | } |
1396 | vpe_dev = dev; | ||
1397 | 1448 | ||
1398 | dmt(); | 1449 | class_device_initialize(&vpe_device); |
1399 | dvpe(); | 1450 | vpe_device.class = &vpe_class, |
1451 | vpe_device.parent = NULL, | ||
1452 | strlcpy(vpe_device.class_id, "vpe1", BUS_ID_SIZE); | ||
1453 | vpe_device.devt = MKDEV(major, minor); | ||
1454 | err = class_device_add(&vpe_device); | ||
1455 | if (err) { | ||
1456 | printk(KERN_ERR "Adding vpe_device failed\n"); | ||
1457 | goto out_class; | ||
1458 | } | ||
1459 | |||
1460 | local_irq_save(flags); | ||
1461 | mtflags = dmt(); | ||
1462 | vpflags = dvpe(); | ||
1400 | 1463 | ||
1401 | /* Put MVPE's into 'configuration state' */ | 1464 | /* Put MVPE's into 'configuration state' */ |
1402 | set_c0_mvpcontrol(MVPCONTROL_VPC); | 1465 | set_c0_mvpcontrol(MVPCONTROL_VPC); |
1403 | 1466 | ||
1404 | /* dump_mtregs(); */ | 1467 | /* dump_mtregs(); */ |
1405 | 1468 | ||
1406 | |||
1407 | val = read_c0_mvpconf0(); | 1469 | val = read_c0_mvpconf0(); |
1408 | for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) { | 1470 | hw_tcs = (val & MVPCONF0_PTC) + 1; |
1409 | t = alloc_tc(i); | 1471 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; |
1472 | |||
1473 | for (tc = tclimit; tc < hw_tcs; tc++) { | ||
1474 | /* | ||
1475 | * Must re-enable multithreading temporarily or in case we | ||
1476 | * reschedule send IPIs or similar we might hang. | ||
1477 | */ | ||
1478 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1479 | evpe(vpflags); | ||
1480 | emt(mtflags); | ||
1481 | local_irq_restore(flags); | ||
1482 | t = alloc_tc(tc); | ||
1483 | if (!t) { | ||
1484 | err = -ENOMEM; | ||
1485 | goto out; | ||
1486 | } | ||
1487 | |||
1488 | local_irq_save(flags); | ||
1489 | mtflags = dmt(); | ||
1490 | vpflags = dvpe(); | ||
1491 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
1410 | 1492 | ||
1411 | /* VPE's */ | 1493 | /* VPE's */ |
1412 | if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) { | 1494 | if (tc < hw_tcs) { |
1413 | settc(i); | 1495 | settc(tc); |
1414 | 1496 | ||
1415 | if ((v = alloc_vpe(i)) == NULL) { | 1497 | if ((v = alloc_vpe(tc)) == NULL) { |
1416 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | 1498 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); |
1417 | return -ENODEV; | 1499 | |
1500 | goto out_reenable; | ||
1418 | } | 1501 | } |
1419 | 1502 | ||
1503 | v->ntcs = hw_tcs - tclimit; | ||
1504 | |||
1420 | /* add the tc to the list of this vpe's tc's. */ | 1505 | /* add the tc to the list of this vpe's tc's. */ |
1421 | list_add(&t->tc, &v->tc); | 1506 | list_add(&t->tc, &v->tc); |
1422 | 1507 | ||
1423 | /* deactivate all but vpe0 */ | 1508 | /* deactivate all but vpe0 */ |
1424 | if (i != 0) { | 1509 | if (tc >= tclimit) { |
1425 | unsigned long tmp = read_vpe_c0_vpeconf0(); | 1510 | unsigned long tmp = read_vpe_c0_vpeconf0(); |
1426 | 1511 | ||
1427 | tmp &= ~VPECONF0_VPA; | 1512 | tmp &= ~VPECONF0_VPA; |
@@ -1434,7 +1519,7 @@ static int __init vpe_module_init(void) | |||
1434 | /* disable multi-threading with TC's */ | 1519 | /* disable multi-threading with TC's */ |
1435 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | 1520 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); |
1436 | 1521 | ||
1437 | if (i != 0) { | 1522 | if (tc >= vpelimit) { |
1438 | /* | 1523 | /* |
1439 | * Set config to be the same as vpe0, | 1524 | * Set config to be the same as vpe0, |
1440 | * particularly kseg0 coherency alg | 1525 | * particularly kseg0 coherency alg |
@@ -1446,10 +1531,10 @@ static int __init vpe_module_init(void) | |||
1446 | /* TC's */ | 1531 | /* TC's */ |
1447 | t->pvpe = v; /* set the parent vpe */ | 1532 | t->pvpe = v; /* set the parent vpe */ |
1448 | 1533 | ||
1449 | if (i != 0) { | 1534 | if (tc >= tclimit) { |
1450 | unsigned long tmp; | 1535 | unsigned long tmp; |
1451 | 1536 | ||
1452 | settc(i); | 1537 | settc(tc); |
1453 | 1538 | ||
1454 | /* Any TC that is bound to VPE0 gets left as is - in case | 1539 | /* Any TC that is bound to VPE0 gets left as is - in case |
1455 | we are running SMTC on VPE0. A TC that is bound to any | 1540 | we are running SMTC on VPE0. A TC that is bound to any |
@@ -1479,17 +1564,25 @@ static int __init vpe_module_init(void) | |||
1479 | } | 1564 | } |
1480 | } | 1565 | } |
1481 | 1566 | ||
1567 | out_reenable: | ||
1482 | /* release config state */ | 1568 | /* release config state */ |
1483 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | 1569 | clear_c0_mvpcontrol(MVPCONTROL_VPC); |
1484 | 1570 | ||
1571 | evpe(vpflags); | ||
1572 | emt(mtflags); | ||
1573 | local_irq_restore(flags); | ||
1574 | |||
1485 | #ifdef CONFIG_MIPS_APSP_KSPD | 1575 | #ifdef CONFIG_MIPS_APSP_KSPD |
1486 | kspd_events.kspd_sp_exit = kspd_sp_exit; | 1576 | kspd_events.kspd_sp_exit = kspd_sp_exit; |
1487 | #endif | 1577 | #endif |
1488 | return 0; | 1578 | return 0; |
1489 | 1579 | ||
1580 | out_class: | ||
1581 | class_unregister(&vpe_class); | ||
1490 | out_chrdev: | 1582 | out_chrdev: |
1491 | unregister_chrdev(major, module_name); | 1583 | unregister_chrdev(major, module_name); |
1492 | 1584 | ||
1585 | out: | ||
1493 | return err; | 1586 | return err; |
1494 | } | 1587 | } |
1495 | 1588 | ||
@@ -1503,7 +1596,7 @@ static void __exit vpe_module_exit(void) | |||
1503 | } | 1596 | } |
1504 | } | 1597 | } |
1505 | 1598 | ||
1506 | device_destroy(mt_class, MKDEV(major, minor)); | 1599 | class_device_del(&vpe_device); |
1507 | unregister_chrdev(major, module_name); | 1600 | unregister_chrdev(major, module_name); |
1508 | } | 1601 | } |
1509 | 1602 | ||