diff options
Diffstat (limited to 'arch/mips')
-rw-r--r-- | arch/mips/Kconfig | 72 | ||||
-rw-r--r-- | arch/mips/kernel/Makefile | 4 | ||||
-rw-r--r-- | arch/mips/kernel/genex.S | 32 | ||||
-rw-r--r-- | arch/mips/kernel/irq-msc01.c | 8 | ||||
-rw-r--r-- | arch/mips/kernel/rtlx.c | 341 | ||||
-rw-r--r-- | arch/mips/kernel/traps.c | 227 | ||||
-rw-r--r-- | arch/mips/kernel/vpe.c | 1295 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/init.c | 29 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/memory.c | 29 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/mipsIRQ.S | 110 | ||||
-rw-r--r-- | arch/mips/mips-boards/generic/time.c | 49 | ||||
-rw-r--r-- | arch/mips/mips-boards/malta/malta_int.c | 136 | ||||
-rw-r--r-- | arch/mips/mm/c-r4k.c | 3 | ||||
-rw-r--r-- | arch/mips/mm/c-sb1.c | 5 |
14 files changed, 2208 insertions, 132 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 749fa871e8e7..4ef015f580f9 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -13,6 +13,22 @@ mainmenu "Linux/MIPS Kernel Configuration" | |||
13 | 13 | ||
14 | source "init/Kconfig" | 14 | source "init/Kconfig" |
15 | 15 | ||
16 | config CPU_MIPS32 | ||
17 | bool | ||
18 | default y if CPU_MIPS32_R1 || CPU_MIPS32_R2 | ||
19 | |||
20 | config CPU_MIPS64 | ||
21 | bool | ||
22 | default y if CPU_MIPS64_R1 || CPU_MIPS64_R2 | ||
23 | |||
24 | config CPU_MIPSR1 | ||
25 | bool | ||
26 | default y if CPU_MIPS32_R1 || CPU_MIPS64_R1 | ||
27 | |||
28 | config CPU_MIPSR2 | ||
29 | bool | ||
30 | default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 | ||
31 | |||
16 | config SYS_SUPPORTS_32BIT_KERNEL | 32 | config SYS_SUPPORTS_32BIT_KERNEL |
17 | bool | 33 | bool |
18 | config SYS_SUPPORTS_64BIT_KERNEL | 34 | config SYS_SUPPORTS_64BIT_KERNEL |
@@ -233,6 +249,7 @@ config MIPS_EV64120 | |||
233 | bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)" | 249 | bool "Support for Galileo EV64120 Evaluation board (EXPERIMENTAL)" |
234 | depends on EXPERIMENTAL | 250 | depends on EXPERIMENTAL |
235 | select DMA_NONCOHERENT | 251 | select DMA_NONCOHERENT |
252 | select IRQ_CPU | ||
236 | select HW_HAS_PCI | 253 | select HW_HAS_PCI |
237 | select MIPS_GT64120 | 254 | select MIPS_GT64120 |
238 | select SYS_SUPPORTS_32BIT_KERNEL | 255 | select SYS_SUPPORTS_32BIT_KERNEL |
@@ -344,6 +361,7 @@ config MIPS_MALTA | |||
344 | select BOOT_ELF32 | 361 | select BOOT_ELF32 |
345 | select HAVE_STD_PC_SERIAL_PORT | 362 | select HAVE_STD_PC_SERIAL_PORT |
346 | select DMA_NONCOHERENT | 363 | select DMA_NONCOHERENT |
364 | select IRQ_CPU | ||
347 | select GENERIC_ISA_DMA | 365 | select GENERIC_ISA_DMA |
348 | select HW_HAS_PCI | 366 | select HW_HAS_PCI |
349 | select I8259 | 367 | select I8259 |
@@ -1277,6 +1295,31 @@ config CPU_HAS_PREFETCH | |||
1277 | bool "Enable prefetches" if CPU_SB1 && !CPU_SB1_PASS_2 | 1295 | bool "Enable prefetches" if CPU_SB1 && !CPU_SB1_PASS_2 |
1278 | default y if CPU_MIPS32 || CPU_MIPS64 || CPU_RM7000 || CPU_RM9000 || CPU_R10000 | 1296 | default y if CPU_MIPS32 || CPU_MIPS64 || CPU_RM7000 || CPU_RM9000 || CPU_R10000 |
1279 | 1297 | ||
1298 | config MIPS_MT | ||
1299 | bool "Enable MIPS MT" | ||
1300 | |||
1301 | config MIPS_VPE_LOADER | ||
1302 | bool "VPE loader support." | ||
1303 | depends on MIPS_MT | ||
1304 | help | ||
1305 | Includes a loader for loading an elf relocatable object | ||
1306 | onto another VPE and running it. | ||
1307 | |||
1308 | config MIPS_VPE_LOADER_TOM | ||
1309 | bool "Load VPE program into memory hidden from linux" | ||
1310 | depends on MIPS_VPE_LOADER | ||
1311 | default y | ||
1312 | help | ||
1313 | The loader can use memory that is present but has been hidden from | ||
1314 | Linux using the kernel command line option "mem=xxMB". It's up to | ||
1315 | you to ensure the amount you put in the option and the space your | ||
1316 | program requires is less or equal to the amount physically present. | ||
1317 | |||
1318 | # this should possibly be in drivers/char, but it is rather cpu related. Hmmm | ||
1319 | config MIPS_VPE_APSP_API | ||
1320 | bool "Enable support for AP/SP API (RTLX)" | ||
1321 | depends on MIPS_VPE_LOADER | ||
1322 | |||
1280 | config VTAG_ICACHE | 1323 | config VTAG_ICACHE |
1281 | bool "Support for Virtual Tagged I-cache" if CPU_MIPS64 || CPU_MIPS32 | 1324 | bool "Support for Virtual Tagged I-cache" if CPU_MIPS64 || CPU_MIPS32 |
1282 | default y if CPU_SB1 | 1325 | default y if CPU_SB1 |
@@ -1335,6 +1378,35 @@ config CPU_HAS_WB | |||
1335 | machines which require flushing of write buffers in software. Saying | 1378 | machines which require flushing of write buffers in software. Saying |
1336 | Y is the safe option; N may result in kernel malfunction and crashes. | 1379 | Y is the safe option; N may result in kernel malfunction and crashes. |
1337 | 1380 | ||
1381 | menu "MIPSR2 Interrupt handling" | ||
1382 | depends on CPU_MIPSR2 && CPU_ADVANCED | ||
1383 | |||
1384 | config CPU_MIPSR2_IRQ_VI | ||
1385 | bool "Vectored interrupt mode" | ||
1386 | help | ||
1387 | Vectored interrupt mode allowing faster dispatching of interrupts. | ||
1388 | The board support code needs to be written to take advantage of this | ||
1389 | mode. Compatibility code is included to allow the kernel to run on | ||
1390 | a CPU that does not support vectored interrupts. It's safe to | ||
1391 | say Y here. | ||
1392 | |||
1393 | config CPU_MIPSR2_IRQ_EI | ||
1394 | bool "External interrupt controller mode" | ||
1395 | help | ||
1396 | Extended interrupt mode takes advantage of an external interrupt | ||
1397 | controller to allow fast dispatching from many possible interrupt | ||
1398 | sources. Say N unless you know that external interrupt support is | ||
1399 | required. | ||
1400 | |||
1401 | config CPU_MIPSR2_SRS | ||
1402 | bool "Make shadow set registers available for interrupt handlers" | ||
1403 | depends on CPU_MIPSR2_IRQ_VI || CPU_MIPSR2_IRQ_EI | ||
1404 | help | ||
1405 | Allow the kernel to use shadow register sets for fast interrupts. | ||
1406 | Interrupt handlers must be specially written to use shadow sets. | ||
1407 | Say N unless you know that shadow register set upport is needed. | ||
1408 | endmenu | ||
1409 | |||
1338 | config CPU_HAS_SYNC | 1410 | config CPU_HAS_SYNC |
1339 | bool | 1411 | bool |
1340 | depends on !CPU_R3000 | 1412 | depends on !CPU_R3000 |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 0867417032f2..0f527063a8a8 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -34,12 +34,16 @@ obj-$(CONFIG_CPU_R6000) += r6000_fpu.o r4k_switch.o | |||
34 | 34 | ||
35 | obj-$(CONFIG_SMP) += smp.o | 35 | obj-$(CONFIG_SMP) += smp.o |
36 | 36 | ||
37 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | ||
38 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | ||
39 | |||
37 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o | 40 | obj-$(CONFIG_NO_ISA) += dma-no-isa.o |
38 | obj-$(CONFIG_I8259) += i8259.o | 41 | obj-$(CONFIG_I8259) += i8259.o |
39 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o | 42 | obj-$(CONFIG_IRQ_CPU) += irq_cpu.o |
40 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | 43 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o |
41 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o | 44 | obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o |
42 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o | 45 | obj-$(CONFIG_IRQ_MV64340) += irq-mv6434x.o |
46 | obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o | ||
43 | 47 | ||
44 | obj-$(CONFIG_32BIT) += scall32-o32.o | 48 | obj-$(CONFIG_32BIT) += scall32-o32.o |
45 | obj-$(CONFIG_64BIT) += scall64-64.o | 49 | obj-$(CONFIG_64BIT) += scall64-64.o |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index 349ec301168f..fd904d1e4190 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -148,6 +148,38 @@ NESTED(except_vec_ejtag_debug, 0, sp) | |||
148 | __FINIT | 148 | __FINIT |
149 | 149 | ||
150 | /* | 150 | /* |
151 | * Vectored interrupt handler. | ||
152 | * This prototype is copied to ebase + n*IntCtl.VS and patched | ||
153 | * to invoke the handler | ||
154 | */ | ||
155 | NESTED(except_vec_vi, 0, sp) | ||
156 | SAVE_SOME | ||
157 | SAVE_AT | ||
158 | .set push | ||
159 | .set noreorder | ||
160 | EXPORT(except_vec_vi_lui) | ||
161 | lui v0, 0 /* Patched */ | ||
162 | j except_vec_vi_handler | ||
163 | EXPORT(except_vec_vi_ori) | ||
164 | ori v0, 0 /* Patched */ | ||
165 | .set pop | ||
166 | END(except_vec_vi) | ||
167 | EXPORT(except_vec_vi_end) | ||
168 | |||
169 | /* | ||
170 | * Common Vectored Interrupt code | ||
171 | * Complete the register saves and invoke the handler which is passed in $v0 | ||
172 | */ | ||
173 | NESTED(except_vec_vi_handler, 0, sp) | ||
174 | SAVE_TEMP | ||
175 | SAVE_STATIC | ||
176 | CLI | ||
177 | move a0, sp | ||
178 | jalr v0 | ||
179 | j ret_from_irq | ||
180 | END(except_vec_vi_handler) | ||
181 | |||
182 | /* | ||
151 | * EJTAG debug exception handler. | 183 | * EJTAG debug exception handler. |
152 | */ | 184 | */ |
153 | NESTED(ejtag_debug_handler, PT_SIZE, sp) | 185 | NESTED(ejtag_debug_handler, PT_SIZE, sp) |
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c index bf759e33c5ef..3f653c7cfbf3 100644 --- a/arch/mips/kernel/irq-msc01.c +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -74,7 +74,7 @@ static void disable_msc_irq(unsigned int irq) | |||
74 | static void level_mask_and_ack_msc_irq(unsigned int irq) | 74 | static void level_mask_and_ack_msc_irq(unsigned int irq) |
75 | { | 75 | { |
76 | mask_msc_irq(irq); | 76 | mask_msc_irq(irq); |
77 | if (!cpu_has_ei) | 77 | if (!cpu_has_veic) |
78 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 78 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
79 | } | 79 | } |
80 | 80 | ||
@@ -84,7 +84,7 @@ static void level_mask_and_ack_msc_irq(unsigned int irq) | |||
84 | static void edge_mask_and_ack_msc_irq(unsigned int irq) | 84 | static void edge_mask_and_ack_msc_irq(unsigned int irq) |
85 | { | 85 | { |
86 | mask_msc_irq(irq); | 86 | mask_msc_irq(irq); |
87 | if (!cpu_has_ei) | 87 | if (!cpu_has_veic) |
88 | MSCIC_WRITE(MSC01_IC_EOI, 0); | 88 | MSCIC_WRITE(MSC01_IC_EOI, 0); |
89 | else { | 89 | else { |
90 | u32 r; | 90 | u32 r; |
@@ -166,14 +166,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq) | |||
166 | switch (imp->im_type) { | 166 | switch (imp->im_type) { |
167 | case MSC01_IRQ_EDGE: | 167 | case MSC01_IRQ_EDGE: |
168 | irq_desc[base+n].handler = &msc_edgeirq_type; | 168 | irq_desc[base+n].handler = &msc_edgeirq_type; |
169 | if (cpu_has_ei) | 169 | if (cpu_has_veic) |
170 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); | 170 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); |
171 | else | 171 | else |
172 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); | 172 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); |
173 | break; | 173 | break; |
174 | case MSC01_IRQ_LEVEL: | 174 | case MSC01_IRQ_LEVEL: |
175 | irq_desc[base+n].handler = &msc_levelirq_type; | 175 | irq_desc[base+n].handler = &msc_levelirq_type; |
176 | if (cpu_has_ei) | 176 | if (cpu_has_veic) |
177 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); | 177 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); |
178 | else | 178 | else |
179 | MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); | 179 | MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); |
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c new file mode 100644 index 000000000000..8c81f3cb4e2d --- /dev/null +++ b/arch/mips/kernel/rtlx.c | |||
@@ -0,0 +1,341 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/fs.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <asm/uaccess.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/list.h> | ||
26 | #include <linux/vmalloc.h> | ||
27 | #include <linux/elf.h> | ||
28 | #include <linux/seq_file.h> | ||
29 | #include <linux/syscalls.h> | ||
30 | #include <linux/moduleloader.h> | ||
31 | #include <linux/interrupt.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/sched.h> | ||
34 | #include <linux/wait.h> | ||
35 | #include <asm/mipsmtregs.h> | ||
36 | #include <asm/cacheflush.h> | ||
37 | #include <asm/atomic.h> | ||
38 | #include <asm/cpu.h> | ||
39 | #include <asm/processor.h> | ||
40 | #include <asm/system.h> | ||
41 | #include <asm/rtlx.h> | ||
42 | |||
43 | #define RTLX_MAJOR 64 | ||
44 | #define RTLX_TARG_VPE 1 | ||
45 | |||
46 | struct rtlx_info *rtlx; | ||
47 | static int major; | ||
48 | static char module_name[] = "rtlx"; | ||
49 | static inline int spacefree(int read, int write, int size); | ||
50 | |||
51 | static struct chan_waitqueues { | ||
52 | wait_queue_head_t rt_queue; | ||
53 | wait_queue_head_t lx_queue; | ||
54 | } channel_wqs[RTLX_CHANNELS]; | ||
55 | |||
56 | static struct irqaction irq; | ||
57 | static int irq_num; | ||
58 | |||
59 | extern void *vpe_get_shared(int index); | ||
60 | |||
61 | static void rtlx_dispatch(struct pt_regs *regs) | ||
62 | { | ||
63 | do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ, regs); | ||
64 | } | ||
65 | |||
66 | irqreturn_t rtlx_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
67 | { | ||
68 | irqreturn_t r = IRQ_HANDLED; | ||
69 | int i; | ||
70 | |||
71 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
72 | struct rtlx_channel *chan = &rtlx->channel[i]; | ||
73 | |||
74 | if (chan->lx_read != chan->lx_write) | ||
75 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
76 | } | ||
77 | |||
78 | return r; | ||
79 | } | ||
80 | |||
81 | void dump_rtlx(void) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); | ||
86 | |||
87 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
88 | struct rtlx_channel *chan = &rtlx->channel[i]; | ||
89 | |||
90 | printk(" rt_state %d lx_state %d buffer_size %d\n", | ||
91 | chan->rt_state, chan->lx_state, chan->buffer_size); | ||
92 | |||
93 | printk(" rt_read %d rt_write %d\n", | ||
94 | chan->rt_read, chan->rt_write); | ||
95 | |||
96 | printk(" lx_read %d lx_write %d\n", | ||
97 | chan->lx_read, chan->lx_write); | ||
98 | |||
99 | printk(" rt_buffer <%s>\n", chan->rt_buffer); | ||
100 | printk(" lx_buffer <%s>\n", chan->lx_buffer); | ||
101 | } | ||
102 | } | ||
103 | |||
104 | /* call when we have the address of the shared structure from the SP side. */ | ||
105 | static int rtlx_init(struct rtlx_info *rtlxi) | ||
106 | { | ||
107 | int i; | ||
108 | |||
109 | if (rtlxi->id != RTLX_ID) { | ||
110 | printk(KERN_WARNING "no valid RTLX id at 0x%p\n", rtlxi); | ||
111 | return (-ENOEXEC); | ||
112 | } | ||
113 | |||
114 | /* initialise the wait queues */ | ||
115 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
116 | init_waitqueue_head(&channel_wqs[i].rt_queue); | ||
117 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
118 | } | ||
119 | |||
120 | /* set up for interrupt handling */ | ||
121 | memset(&irq, 0, sizeof(struct irqaction)); | ||
122 | |||
123 | if (cpu_has_vint) { | ||
124 | set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); | ||
125 | } | ||
126 | |||
127 | irq_num = MIPSCPU_INT_BASE + MIPS_CPU_RTLX_IRQ; | ||
128 | irq.handler = rtlx_interrupt; | ||
129 | irq.flags = SA_INTERRUPT; | ||
130 | irq.name = "RTLX"; | ||
131 | irq.dev_id = rtlx; | ||
132 | setup_irq(irq_num, &irq); | ||
133 | |||
134 | rtlx = rtlxi; | ||
135 | return (0); | ||
136 | } | ||
137 | |||
138 | /* only allow one open process at a time to open each channel */ | ||
139 | static int rtlx_open(struct inode *inode, struct file *filp) | ||
140 | { | ||
141 | int minor, ret; | ||
142 | struct rtlx_channel *chan; | ||
143 | |||
144 | /* assume only 1 device at the mo. */ | ||
145 | minor = MINOR(inode->i_rdev); | ||
146 | |||
147 | if (rtlx == NULL) { | ||
148 | struct rtlx_info **p; | ||
149 | if( (p = vpe_get_shared(RTLX_TARG_VPE)) == NULL) { | ||
150 | printk(" vpe_get_shared is NULL. Has an SP program been loaded?\n"); | ||
151 | return (-EFAULT); | ||
152 | } | ||
153 | |||
154 | if (*p == NULL) { | ||
155 | printk(" vpe_shared %p %p\n", p, *p); | ||
156 | return (-EFAULT); | ||
157 | } | ||
158 | |||
159 | if ((ret = rtlx_init(*p)) < 0) | ||
160 | return (ret); | ||
161 | } | ||
162 | |||
163 | chan = &rtlx->channel[minor]; | ||
164 | |||
165 | /* already open? */ | ||
166 | if (chan->lx_state == RTLX_STATE_OPENED) | ||
167 | return (-EBUSY); | ||
168 | |||
169 | chan->lx_state = RTLX_STATE_OPENED; | ||
170 | return (0); | ||
171 | } | ||
172 | |||
173 | static int rtlx_release(struct inode *inode, struct file *filp) | ||
174 | { | ||
175 | int minor; | ||
176 | |||
177 | minor = MINOR(inode->i_rdev); | ||
178 | rtlx->channel[minor].lx_state = RTLX_STATE_UNUSED; | ||
179 | return (0); | ||
180 | } | ||
181 | |||
182 | static unsigned int rtlx_poll(struct file *file, poll_table * wait) | ||
183 | { | ||
184 | int minor; | ||
185 | unsigned int mask = 0; | ||
186 | struct rtlx_channel *chan; | ||
187 | |||
188 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
189 | chan = &rtlx->channel[minor]; | ||
190 | |||
191 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | ||
192 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | ||
193 | |||
194 | /* data available to read? */ | ||
195 | if (chan->lx_read != chan->lx_write) | ||
196 | mask |= POLLIN | POLLRDNORM; | ||
197 | |||
198 | /* space to write */ | ||
199 | if (spacefree(chan->rt_read, chan->rt_write, chan->buffer_size)) | ||
200 | mask |= POLLOUT | POLLWRNORM; | ||
201 | |||
202 | return (mask); | ||
203 | } | ||
204 | |||
205 | static ssize_t rtlx_read(struct file *file, char __user * buffer, size_t count, | ||
206 | loff_t * ppos) | ||
207 | { | ||
208 | size_t fl = 0L; | ||
209 | int minor; | ||
210 | struct rtlx_channel *lx; | ||
211 | DECLARE_WAITQUEUE(wait, current); | ||
212 | |||
213 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
214 | lx = &rtlx->channel[minor]; | ||
215 | |||
216 | /* data available? */ | ||
217 | if (lx->lx_write == lx->lx_read) { | ||
218 | if (file->f_flags & O_NONBLOCK) | ||
219 | return (0); // -EAGAIN makes cat whinge | ||
220 | |||
221 | /* go to sleep */ | ||
222 | add_wait_queue(&channel_wqs[minor].lx_queue, &wait); | ||
223 | set_current_state(TASK_INTERRUPTIBLE); | ||
224 | |||
225 | while (lx->lx_write == lx->lx_read) | ||
226 | schedule(); | ||
227 | |||
228 | set_current_state(TASK_RUNNING); | ||
229 | remove_wait_queue(&channel_wqs[minor].lx_queue, &wait); | ||
230 | |||
231 | /* back running */ | ||
232 | } | ||
233 | |||
234 | /* find out how much in total */ | ||
235 | count = min( count, | ||
236 | (size_t)(lx->lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); | ||
237 | |||
238 | /* then how much from the read pointer onwards */ | ||
239 | fl = min( count, (size_t)lx->buffer_size - lx->lx_read); | ||
240 | |||
241 | copy_to_user (buffer, &lx->lx_buffer[lx->lx_read], fl); | ||
242 | |||
243 | /* and if there is anything left at the beginning of the buffer */ | ||
244 | if ( count - fl ) | ||
245 | copy_to_user (buffer + fl, lx->lx_buffer, count - fl); | ||
246 | |||
247 | /* update the index */ | ||
248 | lx->lx_read += count; | ||
249 | lx->lx_read %= lx->buffer_size; | ||
250 | |||
251 | return (count); | ||
252 | } | ||
253 | |||
254 | static inline int spacefree(int read, int write, int size) | ||
255 | { | ||
256 | if (read == write) { | ||
257 | /* never fill the buffer completely, so indexes are always equal if empty | ||
258 | and only empty, or !equal if data available */ | ||
259 | return (size - 1); | ||
260 | } | ||
261 | |||
262 | return ((read + size - write) % size) - 1; | ||
263 | } | ||
264 | |||
265 | static ssize_t rtlx_write(struct file *file, const char __user * buffer, | ||
266 | size_t count, loff_t * ppos) | ||
267 | { | ||
268 | int minor; | ||
269 | struct rtlx_channel *rt; | ||
270 | size_t fl; | ||
271 | DECLARE_WAITQUEUE(wait, current); | ||
272 | |||
273 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
274 | rt = &rtlx->channel[minor]; | ||
275 | |||
276 | /* any space left... */ | ||
277 | if (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) { | ||
278 | |||
279 | if (file->f_flags & O_NONBLOCK) | ||
280 | return (-EAGAIN); | ||
281 | |||
282 | add_wait_queue(&channel_wqs[minor].rt_queue, &wait); | ||
283 | set_current_state(TASK_INTERRUPTIBLE); | ||
284 | |||
285 | while (!spacefree(rt->rt_read, rt->rt_write, rt->buffer_size)) | ||
286 | schedule(); | ||
287 | |||
288 | set_current_state(TASK_RUNNING); | ||
289 | remove_wait_queue(&channel_wqs[minor].rt_queue, &wait); | ||
290 | } | ||
291 | |||
292 | /* total number of bytes to copy */ | ||
293 | count = min( count, (size_t)spacefree(rt->rt_read, rt->rt_write, rt->buffer_size) ); | ||
294 | |||
295 | /* first bit from write pointer to the end of the buffer, or count */ | ||
296 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | ||
297 | |||
298 | copy_from_user(&rt->rt_buffer[rt->rt_write], buffer, fl); | ||
299 | |||
300 | /* if there's any left copy to the beginning of the buffer */ | ||
301 | if( count - fl ) | ||
302 | copy_from_user(rt->rt_buffer, buffer + fl, count - fl); | ||
303 | |||
304 | rt->rt_write += count; | ||
305 | rt->rt_write %= rt->buffer_size; | ||
306 | |||
307 | return(count); | ||
308 | } | ||
309 | |||
310 | static struct file_operations rtlx_fops = { | ||
311 | .owner = THIS_MODULE, | ||
312 | .open = rtlx_open, | ||
313 | .release = rtlx_release, | ||
314 | .write = rtlx_write, | ||
315 | .read = rtlx_read, | ||
316 | .poll = rtlx_poll | ||
317 | }; | ||
318 | |||
319 | static int rtlx_module_init(void) | ||
320 | { | ||
321 | if ((major = register_chrdev(RTLX_MAJOR, module_name, &rtlx_fops)) < 0) { | ||
322 | printk("rtlx_module_init: unable to register device\n"); | ||
323 | return (-EBUSY); | ||
324 | } | ||
325 | |||
326 | if (major == 0) | ||
327 | major = RTLX_MAJOR; | ||
328 | |||
329 | return (0); | ||
330 | } | ||
331 | |||
332 | static void rtlx_module_exit(void) | ||
333 | { | ||
334 | unregister_chrdev(major, module_name); | ||
335 | } | ||
336 | |||
337 | module_init(rtlx_module_init); | ||
338 | module_exit(rtlx_module_exit); | ||
339 | MODULE_DESCRIPTION("MIPS RTLX"); | ||
340 | MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); | ||
341 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index b2fa607eeeb8..0a3969aa8dc6 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/smp_lock.h> | 20 | #include <linux/smp_lock.h> |
21 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
22 | #include <linux/kallsyms.h> | 22 | #include <linux/kallsyms.h> |
23 | #include <linux/bootmem.h> | ||
23 | 24 | ||
24 | #include <asm/bootinfo.h> | 25 | #include <asm/bootinfo.h> |
25 | #include <asm/branch.h> | 26 | #include <asm/branch.h> |
@@ -64,6 +65,9 @@ extern int fpu_emulator_cop1Handler(int xcptno, struct pt_regs *xcp, | |||
64 | 65 | ||
65 | void (*board_be_init)(void); | 66 | void (*board_be_init)(void); |
66 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | 67 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); |
68 | void (*board_nmi_handler_setup)(void); | ||
69 | void (*board_ejtag_handler_setup)(void); | ||
70 | void (*board_bind_eic_interrupt)(int irq, int regset); | ||
67 | 71 | ||
68 | /* | 72 | /* |
69 | * These constant is for searching for possible module text segments. | 73 | * These constant is for searching for possible module text segments. |
@@ -813,6 +817,12 @@ asmlinkage void do_reserved(struct pt_regs *regs) | |||
813 | (regs->cp0_cause & 0x7f) >> 2); | 817 | (regs->cp0_cause & 0x7f) >> 2); |
814 | } | 818 | } |
815 | 819 | ||
820 | asmlinkage void do_default_vi(struct pt_regs *regs) | ||
821 | { | ||
822 | show_regs(regs); | ||
823 | panic("Caught unexpected vectored interrupt."); | ||
824 | } | ||
825 | |||
816 | /* | 826 | /* |
817 | * Some MIPS CPUs can enable/disable for cache parity detection, but do | 827 | * Some MIPS CPUs can enable/disable for cache parity detection, but do |
818 | * it different ways. | 828 | * it different ways. |
@@ -921,7 +931,11 @@ void nmi_exception_handler(struct pt_regs *regs) | |||
921 | while(1) ; | 931 | while(1) ; |
922 | } | 932 | } |
923 | 933 | ||
934 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
935 | |||
936 | unsigned long ebase; | ||
924 | unsigned long exception_handlers[32]; | 937 | unsigned long exception_handlers[32]; |
938 | unsigned long vi_handlers[64]; | ||
925 | 939 | ||
926 | /* | 940 | /* |
927 | * As a side effect of the way this is implemented we're limited | 941 | * As a side effect of the way this is implemented we're limited |
@@ -935,13 +949,156 @@ void *set_except_vector(int n, void *addr) | |||
935 | 949 | ||
936 | exception_handlers[n] = handler; | 950 | exception_handlers[n] = handler; |
937 | if (n == 0 && cpu_has_divec) { | 951 | if (n == 0 && cpu_has_divec) { |
938 | *(volatile u32 *)(CAC_BASE + 0x200) = 0x08000000 | | 952 | *(volatile u32 *)(ebase + 0x200) = 0x08000000 | |
939 | (0x03ffffff & (handler >> 2)); | 953 | (0x03ffffff & (handler >> 2)); |
940 | flush_icache_range(CAC_BASE + 0x200, CAC_BASE + 0x204); | 954 | flush_icache_range(ebase + 0x200, ebase + 0x204); |
955 | } | ||
956 | return (void *)old_handler; | ||
957 | } | ||
958 | |||
959 | #ifdef CONFIG_CPU_MIPSR2 | ||
960 | /* | ||
961 | * Shadow register allocation | ||
962 | * FIXME: SMP... | ||
963 | */ | ||
964 | |||
965 | /* MIPSR2 shadow register sets */ | ||
966 | struct shadow_registers { | ||
967 | spinlock_t sr_lock; /* */ | ||
968 | int sr_supported; /* Number of shadow register sets supported */ | ||
969 | int sr_allocated; /* Bitmap of allocated shadow registers */ | ||
970 | } shadow_registers; | ||
971 | |||
972 | void mips_srs_init(void) | ||
973 | { | ||
974 | #ifdef CONFIG_CPU_MIPSR2_SRS | ||
975 | shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | ||
976 | printk ("%d MIPSR2 register sets available\n", shadow_registers.sr_supported); | ||
977 | #else | ||
978 | shadow_registers.sr_supported = 1; | ||
979 | #endif | ||
980 | shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ | ||
981 | spin_lock_init(&shadow_registers.sr_lock); | ||
982 | } | ||
983 | |||
984 | int mips_srs_max(void) | ||
985 | { | ||
986 | return shadow_registers.sr_supported; | ||
987 | } | ||
988 | |||
989 | int mips_srs_alloc (void) | ||
990 | { | ||
991 | struct shadow_registers *sr = &shadow_registers; | ||
992 | unsigned long flags; | ||
993 | int set; | ||
994 | |||
995 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
996 | |||
997 | for (set = 0; set < sr->sr_supported; set++) { | ||
998 | if ((sr->sr_allocated & (1 << set)) == 0) { | ||
999 | sr->sr_allocated |= 1 << set; | ||
1000 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1001 | return set; | ||
1002 | } | ||
1003 | } | ||
1004 | |||
1005 | /* None available */ | ||
1006 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1007 | return -1; | ||
1008 | } | ||
1009 | |||
1010 | void mips_srs_free (int set) | ||
1011 | { | ||
1012 | struct shadow_registers *sr = &shadow_registers; | ||
1013 | unsigned long flags; | ||
1014 | |||
1015 | spin_lock_irqsave(&sr->sr_lock, flags); | ||
1016 | sr->sr_allocated &= ~(1 << set); | ||
1017 | spin_unlock_irqrestore(&sr->sr_lock, flags); | ||
1018 | } | ||
1019 | |||
1020 | void *set_vi_srs_handler (int n, void *addr, int srs) | ||
1021 | { | ||
1022 | unsigned long handler; | ||
1023 | unsigned long old_handler = vi_handlers[n]; | ||
1024 | u32 *w; | ||
1025 | unsigned char *b; | ||
1026 | |||
1027 | if (!cpu_has_veic && !cpu_has_vint) | ||
1028 | BUG(); | ||
1029 | |||
1030 | if (addr == NULL) { | ||
1031 | handler = (unsigned long) do_default_vi; | ||
1032 | srs = 0; | ||
1033 | } | ||
1034 | else | ||
1035 | handler = (unsigned long) addr; | ||
1036 | vi_handlers[n] = (unsigned long) addr; | ||
1037 | |||
1038 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | ||
1039 | |||
1040 | if (srs >= mips_srs_max()) | ||
1041 | panic("Shadow register set %d not supported", srs); | ||
1042 | |||
1043 | if (cpu_has_veic) { | ||
1044 | if (board_bind_eic_interrupt) | ||
1045 | board_bind_eic_interrupt (n, srs); | ||
1046 | } | ||
1047 | else if (cpu_has_vint) { | ||
1048 | /* SRSMap is only defined if shadow sets are implemented */ | ||
1049 | if (mips_srs_max() > 1) | ||
1050 | change_c0_srsmap (0xf << n*4, srs << n*4); | ||
1051 | } | ||
1052 | |||
1053 | if (srs == 0) { | ||
1054 | /* | ||
1055 | * If no shadow set is selected then use the default handler | ||
1056 | * that does normal register saving and a standard interrupt exit | ||
1057 | */ | ||
1058 | |||
1059 | extern char except_vec_vi, except_vec_vi_lui; | ||
1060 | extern char except_vec_vi_ori, except_vec_vi_end; | ||
1061 | const int handler_len = &except_vec_vi_end - &except_vec_vi; | ||
1062 | const int lui_offset = &except_vec_vi_lui - &except_vec_vi; | ||
1063 | const int ori_offset = &except_vec_vi_ori - &except_vec_vi; | ||
1064 | |||
1065 | if (handler_len > VECTORSPACING) { | ||
1066 | /* | ||
1067 | * Sigh... panicing won't help as the console | ||
1068 | * is probably not configured :( | ||
1069 | */ | ||
1070 | panic ("VECTORSPACING too small"); | ||
1071 | } | ||
1072 | |||
1073 | memcpy (b, &except_vec_vi, handler_len); | ||
1074 | w = (u32 *)(b + lui_offset); | ||
1075 | *w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff); | ||
1076 | w = (u32 *)(b + ori_offset); | ||
1077 | *w = (*w & 0xffff0000) | ((u32)handler & 0xffff); | ||
1078 | flush_icache_range((unsigned long)b, (unsigned long)(b+handler_len)); | ||
1079 | } | ||
1080 | else { | ||
1081 | /* | ||
1082 | * In other cases jump directly to the interrupt handler | ||
1083 | * | ||
1084 | * It is the handlers responsibility to save registers if required | ||
1085 | * (eg hi/lo) and return from the exception using "eret" | ||
1086 | */ | ||
1087 | w = (u32 *)b; | ||
1088 | *w++ = 0x08000000 | (((u32)handler >> 2) & 0x03fffff); /* j handler */ | ||
1089 | *w = 0; | ||
1090 | flush_icache_range((unsigned long)b, (unsigned long)(b+8)); | ||
941 | } | 1091 | } |
1092 | |||
942 | return (void *)old_handler; | 1093 | return (void *)old_handler; |
943 | } | 1094 | } |
944 | 1095 | ||
1096 | void *set_vi_handler (int n, void *addr) | ||
1097 | { | ||
1098 | return set_vi_srs_handler (n, addr, 0); | ||
1099 | } | ||
1100 | #endif | ||
1101 | |||
945 | /* | 1102 | /* |
946 | * This is used by native signal handling | 1103 | * This is used by native signal handling |
947 | */ | 1104 | */ |
@@ -1016,10 +1173,18 @@ void __init per_cpu_trap_init(void) | |||
1016 | if (cpu_has_dsp) | 1173 | if (cpu_has_dsp) |
1017 | set_c0_status(ST0_MX); | 1174 | set_c0_status(ST0_MX); |
1018 | 1175 | ||
1176 | #ifdef CONFIG_CPU_MIPSR2 | ||
1177 | write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */ | ||
1178 | #endif | ||
1179 | |||
1019 | /* | 1180 | /* |
1020 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1181 | * Interrupt handling. |
1021 | * interrupt processing overhead. Use it where available. | ||
1022 | */ | 1182 | */ |
1183 | if (cpu_has_veic || cpu_has_vint) { | ||
1184 | write_c0_ebase (ebase); | ||
1185 | /* Setting vector spacing enables EI/VI mode */ | ||
1186 | change_c0_intctl (0x3e0, VECTORSPACING); | ||
1187 | } | ||
1023 | if (cpu_has_divec) | 1188 | if (cpu_has_divec) |
1024 | set_c0_cause(CAUSEF_IV); | 1189 | set_c0_cause(CAUSEF_IV); |
1025 | 1190 | ||
@@ -1035,13 +1200,41 @@ void __init per_cpu_trap_init(void) | |||
1035 | tlb_init(); | 1200 | tlb_init(); |
1036 | } | 1201 | } |
1037 | 1202 | ||
1203 | /* Install CPU exception handler */ | ||
1204 | void __init set_handler (unsigned long offset, void *addr, unsigned long size) | ||
1205 | { | ||
1206 | memcpy((void *)(ebase + offset), addr, size); | ||
1207 | flush_icache_range(ebase + offset, ebase + offset + size); | ||
1208 | } | ||
1209 | |||
1210 | /* Install uncached CPU exception handler */ | ||
1211 | void __init set_uncached_handler (unsigned long offset, void *addr, unsigned long size) | ||
1212 | { | ||
1213 | #ifdef CONFIG_32BIT | ||
1214 | unsigned long uncached_ebase = KSEG1ADDR(ebase); | ||
1215 | #endif | ||
1216 | #ifdef CONFIG_64BIT | ||
1217 | unsigned long uncached_ebase = TO_UNCAC(ebase); | ||
1218 | #endif | ||
1219 | |||
1220 | memcpy((void *)(uncached_ebase + offset), addr, size); | ||
1221 | } | ||
1222 | |||
1038 | void __init trap_init(void) | 1223 | void __init trap_init(void) |
1039 | { | 1224 | { |
1040 | extern char except_vec3_generic, except_vec3_r4000; | 1225 | extern char except_vec3_generic, except_vec3_r4000; |
1041 | extern char except_vec_ejtag_debug; | ||
1042 | extern char except_vec4; | 1226 | extern char except_vec4; |
1043 | unsigned long i; | 1227 | unsigned long i; |
1044 | 1228 | ||
1229 | if (cpu_has_veic || cpu_has_vint) | ||
1230 | ebase = (unsigned long) alloc_bootmem_low_pages (0x200 + VECTORSPACING*64); | ||
1231 | else | ||
1232 | ebase = CAC_BASE; | ||
1233 | |||
1234 | #ifdef CONFIG_CPU_MIPSR2 | ||
1235 | mips_srs_init(); | ||
1236 | #endif | ||
1237 | |||
1045 | per_cpu_trap_init(); | 1238 | per_cpu_trap_init(); |
1046 | 1239 | ||
1047 | /* | 1240 | /* |
@@ -1049,7 +1242,7 @@ void __init trap_init(void) | |||
1049 | * This will be overriden later as suitable for a particular | 1242 | * This will be overriden later as suitable for a particular |
1050 | * configuration. | 1243 | * configuration. |
1051 | */ | 1244 | */ |
1052 | memcpy((void *)(CAC_BASE + 0x180), &except_vec3_generic, 0x80); | 1245 | set_handler(0x180, &except_vec3_generic, 0x80); |
1053 | 1246 | ||
1054 | /* | 1247 | /* |
1055 | * Setup default vectors | 1248 | * Setup default vectors |
@@ -1061,8 +1254,8 @@ void __init trap_init(void) | |||
1061 | * Copy the EJTAG debug exception vector handler code to it's final | 1254 | * Copy the EJTAG debug exception vector handler code to it's final |
1062 | * destination. | 1255 | * destination. |
1063 | */ | 1256 | */ |
1064 | if (cpu_has_ejtag) | 1257 | if (cpu_has_ejtag && board_ejtag_handler_setup) |
1065 | memcpy((void *)(CAC_BASE + 0x300), &except_vec_ejtag_debug, 0x80); | 1258 | board_ejtag_handler_setup (); |
1066 | 1259 | ||
1067 | /* | 1260 | /* |
1068 | * Only some CPUs have the watch exceptions. | 1261 | * Only some CPUs have the watch exceptions. |
@@ -1071,11 +1264,15 @@ void __init trap_init(void) | |||
1071 | set_except_vector(23, handle_watch); | 1264 | set_except_vector(23, handle_watch); |
1072 | 1265 | ||
1073 | /* | 1266 | /* |
1074 | * Some MIPS CPUs have a dedicated interrupt vector which reduces the | 1267 | * Initialise interrupt handlers |
1075 | * interrupt processing overhead. Use it where available. | ||
1076 | */ | 1268 | */ |
1077 | if (cpu_has_divec) | 1269 | if (cpu_has_veic || cpu_has_vint) { |
1078 | memcpy((void *)(CAC_BASE + 0x200), &except_vec4, 0x8); | 1270 | int nvec = cpu_has_veic ? 64 : 8; |
1271 | for (i = 0; i < nvec; i++) | ||
1272 | set_vi_handler (i, NULL); | ||
1273 | } | ||
1274 | else if (cpu_has_divec) | ||
1275 | set_handler(0x200, &except_vec4, 0x8); | ||
1079 | 1276 | ||
1080 | /* | 1277 | /* |
1081 | * Some CPUs can enable/disable for cache parity detection, but does | 1278 | * Some CPUs can enable/disable for cache parity detection, but does |
@@ -1122,6 +1319,10 @@ void __init trap_init(void) | |||
1122 | //set_except_vector(15, handle_ndc); | 1319 | //set_except_vector(15, handle_ndc); |
1123 | } | 1320 | } |
1124 | 1321 | ||
1322 | |||
1323 | if (board_nmi_handler_setup) | ||
1324 | board_nmi_handler_setup(); | ||
1325 | |||
1125 | if (cpu_has_fpu && !cpu_has_nofpuex) | 1326 | if (cpu_has_fpu && !cpu_has_nofpuex) |
1126 | set_except_vector(15, handle_fpe); | 1327 | set_except_vector(15, handle_fpe); |
1127 | 1328 | ||
@@ -1146,5 +1347,5 @@ void __init trap_init(void) | |||
1146 | signal32_init(); | 1347 | signal32_init(); |
1147 | #endif | 1348 | #endif |
1148 | 1349 | ||
1149 | flush_icache_range(CAC_BASE, CAC_BASE + 0x400); | 1350 | flush_icache_range(ebase, ebase + 0x400); |
1150 | } | 1351 | } |
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c new file mode 100644 index 000000000000..6bf42ba08f09 --- /dev/null +++ b/arch/mips/kernel/vpe.c | |||
@@ -0,0 +1,1295 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can distribute it and/or modify it | ||
5 | * under the terms of the GNU General Public License (Version 2) as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope it will be useful, but WITHOUT | ||
9 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | ||
10 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | ||
11 | * for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License along | ||
14 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
15 | * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. | ||
16 | * | ||
17 | */ | ||
18 | |||
19 | /* | ||
20 | * VPE support module | ||
21 | * | ||
22 | * Provides support for loading a MIPS SP program on VPE1. | ||
23 | * The SP enviroment is rather simple, no tlb's. It needs to be relocatable | ||
24 | * (or partially linked). You should initialise your stack in the startup | ||
25 | * code. This loader looks for the symbol __start and sets up | ||
26 | * execution to resume from there. The MIPS SDE kit contains suitable examples. | ||
27 | * | ||
28 | * To load and run, simply cat a SP 'program file' to /dev/vpe1. | ||
29 | * i.e cat spapp >/dev/vpe1. | ||
30 | * | ||
31 | * You'll need to have the following device files. | ||
32 | * mknod /dev/vpe0 c 63 0 | ||
33 | * mknod /dev/vpe1 c 63 1 | ||
34 | */ | ||
35 | |||
36 | #include <linux/kernel.h> | ||
37 | #include <linux/module.h> | ||
38 | #include <linux/fs.h> | ||
39 | #include <linux/init.h> | ||
40 | #include <asm/uaccess.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/list.h> | ||
43 | #include <linux/vmalloc.h> | ||
44 | #include <linux/elf.h> | ||
45 | #include <linux/seq_file.h> | ||
46 | #include <linux/syscalls.h> | ||
47 | #include <linux/moduleloader.h> | ||
48 | #include <linux/interrupt.h> | ||
49 | #include <linux/poll.h> | ||
50 | #include <linux/bootmem.h> | ||
51 | #include <asm/mipsregs.h> | ||
52 | #include <asm/cacheflush.h> | ||
53 | #include <asm/atomic.h> | ||
54 | #include <asm/cpu.h> | ||
55 | #include <asm/processor.h> | ||
56 | #include <asm/system.h> | ||
57 | |||
58 | typedef void *vpe_handle; | ||
59 | |||
60 | // defined here because the kernel module loader doesn't have | ||
61 | // anything to do with it. | ||
62 | #define SHN_MIPS_SCOMMON 0xff03 | ||
63 | |||
64 | #ifndef ARCH_SHF_SMALL | ||
65 | #define ARCH_SHF_SMALL 0 | ||
66 | #endif | ||
67 | |||
68 | /* If this is set, the section belongs in the init part of the module */ | ||
69 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | ||
70 | |||
71 | // temp number, | ||
72 | #define VPE_MAJOR 63 | ||
73 | |||
74 | static char module_name[] = "vpe"; | ||
75 | static int major = 0; | ||
76 | |||
77 | /* grab the likely amount of memory we will need. */ | ||
78 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
79 | #define P_SIZE (2 * 1024 * 1024) | ||
80 | #else | ||
81 | /* add an overhead to the max kmalloc size for non-striped symbols/etc */ | ||
82 | #define P_SIZE (256 * 1024) | ||
83 | #endif | ||
84 | |||
85 | #define MAX_VPES 16 | ||
86 | |||
87 | enum vpe_state { | ||
88 | VPE_STATE_UNUSED = 0, | ||
89 | VPE_STATE_INUSE, | ||
90 | VPE_STATE_RUNNING | ||
91 | }; | ||
92 | |||
93 | enum tc_state { | ||
94 | TC_STATE_UNUSED = 0, | ||
95 | TC_STATE_INUSE, | ||
96 | TC_STATE_RUNNING, | ||
97 | TC_STATE_DYNAMIC | ||
98 | }; | ||
99 | |||
100 | struct vpe; | ||
101 | typedef struct tc { | ||
102 | enum tc_state state; | ||
103 | int index; | ||
104 | |||
105 | /* parent VPE */ | ||
106 | struct vpe *pvpe; | ||
107 | |||
108 | /* The list of TC's with this VPE */ | ||
109 | struct list_head tc; | ||
110 | |||
111 | /* The global list of tc's */ | ||
112 | struct list_head list; | ||
113 | } tc_t; | ||
114 | |||
115 | typedef struct vpe { | ||
116 | enum vpe_state state; | ||
117 | |||
118 | /* (device) minor associated with this vpe */ | ||
119 | int minor; | ||
120 | |||
121 | /* elfloader stuff */ | ||
122 | void *load_addr; | ||
123 | u32 len; | ||
124 | char *pbuffer; | ||
125 | u32 plen; | ||
126 | |||
127 | unsigned long __start; | ||
128 | |||
129 | /* tc's associated with this vpe */ | ||
130 | struct list_head tc; | ||
131 | |||
132 | /* The list of vpe's */ | ||
133 | struct list_head list; | ||
134 | |||
135 | /* shared symbol address */ | ||
136 | void *shared_ptr; | ||
137 | } vpe_t; | ||
138 | |||
139 | struct vpecontrol_ { | ||
140 | /* Virtual processing elements */ | ||
141 | struct list_head vpe_list; | ||
142 | |||
143 | /* Thread contexts */ | ||
144 | struct list_head tc_list; | ||
145 | } vpecontrol; | ||
146 | |||
147 | static void release_progmem(void *ptr); | ||
148 | static void dump_vpe(vpe_t * v); | ||
149 | extern void save_gp_address(unsigned int secbase, unsigned int rel); | ||
150 | |||
151 | /* get the vpe associated with this minor */ | ||
152 | struct vpe *get_vpe(int minor) | ||
153 | { | ||
154 | struct vpe *v; | ||
155 | |||
156 | list_for_each_entry(v, &vpecontrol.vpe_list, list) { | ||
157 | if (v->minor == minor) | ||
158 | return v; | ||
159 | } | ||
160 | |||
161 | printk(KERN_DEBUG "VPE: get_vpe minor %d not found\n", minor); | ||
162 | return NULL; | ||
163 | } | ||
164 | |||
165 | /* get the vpe associated with this minor */ | ||
166 | struct tc *get_tc(int index) | ||
167 | { | ||
168 | struct tc *t; | ||
169 | |||
170 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
171 | if (t->index == index) | ||
172 | return t; | ||
173 | } | ||
174 | |||
175 | printk(KERN_DEBUG "VPE: get_tc index %d not found\n", index); | ||
176 | |||
177 | return NULL; | ||
178 | } | ||
179 | |||
180 | struct tc *get_tc_unused(void) | ||
181 | { | ||
182 | struct tc *t; | ||
183 | |||
184 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
185 | if (t->state == TC_STATE_UNUSED) | ||
186 | return t; | ||
187 | } | ||
188 | |||
189 | printk(KERN_DEBUG "VPE: All TC's are in use\n"); | ||
190 | |||
191 | return NULL; | ||
192 | } | ||
193 | |||
194 | /* allocate a vpe and associate it with this minor (or index) */ | ||
195 | struct vpe *alloc_vpe(int minor) | ||
196 | { | ||
197 | struct vpe *v; | ||
198 | |||
199 | if ((v = kmalloc(sizeof(struct vpe), GFP_KERNEL)) == NULL) { | ||
200 | printk(KERN_WARNING "VPE: alloc_vpe no mem\n"); | ||
201 | return NULL; | ||
202 | } | ||
203 | |||
204 | memset(v, 0, sizeof(struct vpe)); | ||
205 | |||
206 | INIT_LIST_HEAD(&v->tc); | ||
207 | list_add_tail(&v->list, &vpecontrol.vpe_list); | ||
208 | |||
209 | v->minor = minor; | ||
210 | return v; | ||
211 | } | ||
212 | |||
213 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | ||
214 | struct tc *alloc_tc(int index) | ||
215 | { | ||
216 | struct tc *t; | ||
217 | |||
218 | if ((t = kmalloc(sizeof(struct tc), GFP_KERNEL)) == NULL) { | ||
219 | printk(KERN_WARNING "VPE: alloc_tc no mem\n"); | ||
220 | return NULL; | ||
221 | } | ||
222 | |||
223 | memset(t, 0, sizeof(struct tc)); | ||
224 | |||
225 | INIT_LIST_HEAD(&t->tc); | ||
226 | list_add_tail(&t->list, &vpecontrol.tc_list); | ||
227 | |||
228 | t->index = index; | ||
229 | |||
230 | return t; | ||
231 | } | ||
232 | |||
233 | /* clean up and free everything */ | ||
234 | void release_vpe(struct vpe *v) | ||
235 | { | ||
236 | list_del(&v->list); | ||
237 | if (v->load_addr) | ||
238 | release_progmem(v); | ||
239 | kfree(v); | ||
240 | } | ||
241 | |||
242 | void dump_mtregs(void) | ||
243 | { | ||
244 | unsigned long val; | ||
245 | |||
246 | val = read_c0_config3(); | ||
247 | printk("config3 0x%lx MT %ld\n", val, | ||
248 | (val & CONFIG3_MT) >> CONFIG3_MT_SHIFT); | ||
249 | |||
250 | val = read_c0_mvpconf0(); | ||
251 | printk("mvpconf0 0x%lx, PVPE %ld PTC %ld M %ld\n", val, | ||
252 | (val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT, | ||
253 | val & MVPCONF0_PTC, (val & MVPCONF0_M) >> MVPCONF0_M_SHIFT); | ||
254 | |||
255 | val = read_c0_mvpcontrol(); | ||
256 | printk("MVPControl 0x%lx, STLB %ld VPC %ld EVP %ld\n", val, | ||
257 | (val & MVPCONTROL_STLB) >> MVPCONTROL_STLB_SHIFT, | ||
258 | (val & MVPCONTROL_VPC) >> MVPCONTROL_VPC_SHIFT, | ||
259 | (val & MVPCONTROL_EVP)); | ||
260 | |||
261 | val = read_c0_vpeconf0(); | ||
262 | printk("VPEConf0 0x%lx MVP %ld\n", val, | ||
263 | (val & VPECONF0_MVP) >> VPECONF0_MVP_SHIFT); | ||
264 | } | ||
265 | |||
266 | /* Find some VPE program space */ | ||
267 | static void *alloc_progmem(u32 len) | ||
268 | { | ||
269 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
270 | /* this means you must tell linux to use less memory than you physically have */ | ||
271 | return (void *)((max_pfn * PAGE_SIZE) + KSEG0); | ||
272 | #else | ||
273 | // simple grab some mem for now | ||
274 | return kmalloc(len, GFP_KERNEL); | ||
275 | #endif | ||
276 | } | ||
277 | |||
278 | static void release_progmem(void *ptr) | ||
279 | { | ||
280 | #ifndef CONFIG_MIPS_VPE_LOADER_TOM | ||
281 | kfree(ptr); | ||
282 | #endif | ||
283 | } | ||
284 | |||
285 | /* Update size with this section: return offset. */ | ||
286 | static long get_offset(unsigned long *size, Elf_Shdr * sechdr) | ||
287 | { | ||
288 | long ret; | ||
289 | |||
290 | ret = ALIGN(*size, sechdr->sh_addralign ? : 1); | ||
291 | *size = ret + sechdr->sh_size; | ||
292 | return ret; | ||
293 | } | ||
294 | |||
295 | /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld | ||
296 | might -- code, read-only data, read-write data, small data. Tally | ||
297 | sizes, and place the offsets into sh_entsize fields: high bit means it | ||
298 | belongs in init. */ | ||
299 | static void layout_sections(struct module *mod, const Elf_Ehdr * hdr, | ||
300 | Elf_Shdr * sechdrs, const char *secstrings) | ||
301 | { | ||
302 | static unsigned long const masks[][2] = { | ||
303 | /* NOTE: all executable code must be the first section | ||
304 | * in this array; otherwise modify the text_size | ||
305 | * finder in the two loops below */ | ||
306 | {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
307 | {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL}, | ||
308 | {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
309 | {ARCH_SHF_SMALL | SHF_ALLOC, 0} | ||
310 | }; | ||
311 | unsigned int m, i; | ||
312 | |||
313 | for (i = 0; i < hdr->e_shnum; i++) | ||
314 | sechdrs[i].sh_entsize = ~0UL; | ||
315 | |||
316 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { | ||
317 | for (i = 0; i < hdr->e_shnum; ++i) { | ||
318 | Elf_Shdr *s = &sechdrs[i]; | ||
319 | |||
320 | // || strncmp(secstrings + s->sh_name, ".init", 5) == 0) | ||
321 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | ||
322 | || (s->sh_flags & masks[m][1]) | ||
323 | || s->sh_entsize != ~0UL) | ||
324 | continue; | ||
325 | s->sh_entsize = get_offset(&mod->core_size, s); | ||
326 | } | ||
327 | |||
328 | if (m == 0) | ||
329 | mod->core_text_size = mod->core_size; | ||
330 | |||
331 | } | ||
332 | } | ||
333 | |||
334 | |||
335 | /* from module-elf32.c, but subverted a little */ | ||
336 | |||
337 | struct mips_hi16 { | ||
338 | struct mips_hi16 *next; | ||
339 | Elf32_Addr *addr; | ||
340 | Elf32_Addr value; | ||
341 | }; | ||
342 | |||
343 | static struct mips_hi16 *mips_hi16_list; | ||
344 | static unsigned int gp_offs, gp_addr; | ||
345 | |||
346 | static int apply_r_mips_none(struct module *me, uint32_t *location, | ||
347 | Elf32_Addr v) | ||
348 | { | ||
349 | return 0; | ||
350 | } | ||
351 | |||
352 | static int apply_r_mips_gprel16(struct module *me, uint32_t *location, | ||
353 | Elf32_Addr v) | ||
354 | { | ||
355 | int rel; | ||
356 | |||
357 | if( !(*location & 0xffff) ) { | ||
358 | rel = (int)v - gp_addr; | ||
359 | } | ||
360 | else { | ||
361 | /* .sbss + gp(relative) + offset */ | ||
362 | /* kludge! */ | ||
363 | rel = (int)(short)((int)v + gp_offs + | ||
364 | (int)(short)(*location & 0xffff) - gp_addr); | ||
365 | } | ||
366 | |||
367 | if( (rel > 32768) || (rel < -32768) ) { | ||
368 | printk(KERN_ERR | ||
369 | "apply_r_mips_gprel16: relative address out of range 0x%x %d\n", | ||
370 | rel, rel); | ||
371 | return -ENOEXEC; | ||
372 | } | ||
373 | |||
374 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | static int apply_r_mips_pc16(struct module *me, uint32_t *location, | ||
380 | Elf32_Addr v) | ||
381 | { | ||
382 | int rel; | ||
383 | rel = (((unsigned int)v - (unsigned int)location)); | ||
384 | rel >>= 2; // because the offset is in _instructions_ not bytes. | ||
385 | rel -= 1; // and one instruction less due to the branch delay slot. | ||
386 | |||
387 | if( (rel > 32768) || (rel < -32768) ) { | ||
388 | printk(KERN_ERR | ||
389 | "apply_r_mips_pc16: relative address out of range 0x%x\n", rel); | ||
390 | return -ENOEXEC; | ||
391 | } | ||
392 | |||
393 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
394 | |||
395 | return 0; | ||
396 | } | ||
397 | |||
398 | static int apply_r_mips_32(struct module *me, uint32_t *location, | ||
399 | Elf32_Addr v) | ||
400 | { | ||
401 | *location += v; | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | static int apply_r_mips_26(struct module *me, uint32_t *location, | ||
407 | Elf32_Addr v) | ||
408 | { | ||
409 | if (v % 4) { | ||
410 | printk(KERN_ERR "module %s: dangerous relocation mod4\n", me->name); | ||
411 | return -ENOEXEC; | ||
412 | } | ||
413 | |||
414 | /* Not desperately convinced this is a good check of an overflow condition | ||
415 | anyway. But it gets in the way of handling undefined weak symbols which | ||
416 | we want to set to zero. | ||
417 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
418 | printk(KERN_ERR | ||
419 | "module %s: relocation overflow\n", | ||
420 | me->name); | ||
421 | return -ENOEXEC; | ||
422 | } | ||
423 | */ | ||
424 | |||
425 | *location = (*location & ~0x03ffffff) | | ||
426 | ((*location + (v >> 2)) & 0x03ffffff); | ||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static int apply_r_mips_hi16(struct module *me, uint32_t *location, | ||
431 | Elf32_Addr v) | ||
432 | { | ||
433 | struct mips_hi16 *n; | ||
434 | |||
435 | /* | ||
436 | * We cannot relocate this one now because we don't know the value of | ||
437 | * the carry we need to add. Save the information, and let LO16 do the | ||
438 | * actual relocation. | ||
439 | */ | ||
440 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
441 | if (!n) | ||
442 | return -ENOMEM; | ||
443 | |||
444 | n->addr = location; | ||
445 | n->value = v; | ||
446 | n->next = mips_hi16_list; | ||
447 | mips_hi16_list = n; | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | static int apply_r_mips_lo16(struct module *me, uint32_t *location, | ||
453 | Elf32_Addr v) | ||
454 | { | ||
455 | unsigned long insnlo = *location; | ||
456 | Elf32_Addr val, vallo; | ||
457 | |||
458 | /* Sign extend the addend we extract from the lo insn. */ | ||
459 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
460 | |||
461 | if (mips_hi16_list != NULL) { | ||
462 | struct mips_hi16 *l; | ||
463 | |||
464 | l = mips_hi16_list; | ||
465 | while (l != NULL) { | ||
466 | struct mips_hi16 *next; | ||
467 | unsigned long insn; | ||
468 | |||
469 | /* | ||
470 | * The value for the HI16 had best be the same. | ||
471 | */ | ||
472 | if (v != l->value) { | ||
473 | printk("%d != %d\n", v, l->value); | ||
474 | goto out_danger; | ||
475 | } | ||
476 | |||
477 | |||
478 | /* | ||
479 | * Do the HI16 relocation. Note that we actually don't | ||
480 | * need to know anything about the LO16 itself, except | ||
481 | * where to find the low 16 bits of the addend needed | ||
482 | * by the LO16. | ||
483 | */ | ||
484 | insn = *l->addr; | ||
485 | val = ((insn & 0xffff) << 16) + vallo; | ||
486 | val += v; | ||
487 | |||
488 | /* | ||
489 | * Account for the sign extension that will happen in | ||
490 | * the low bits. | ||
491 | */ | ||
492 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
493 | |||
494 | insn = (insn & ~0xffff) | val; | ||
495 | *l->addr = insn; | ||
496 | |||
497 | next = l->next; | ||
498 | kfree(l); | ||
499 | l = next; | ||
500 | } | ||
501 | |||
502 | mips_hi16_list = NULL; | ||
503 | } | ||
504 | |||
505 | /* | ||
506 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
507 | */ | ||
508 | val = v + vallo; | ||
509 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
510 | *location = insnlo; | ||
511 | |||
512 | return 0; | ||
513 | |||
514 | out_danger: | ||
515 | printk(KERN_ERR "module %s: dangerous " "relocation\n", me->name); | ||
516 | |||
517 | return -ENOEXEC; | ||
518 | } | ||
519 | |||
520 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | ||
521 | Elf32_Addr v) = { | ||
522 | [R_MIPS_NONE] = apply_r_mips_none, | ||
523 | [R_MIPS_32] = apply_r_mips_32, | ||
524 | [R_MIPS_26] = apply_r_mips_26, | ||
525 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
526 | [R_MIPS_LO16] = apply_r_mips_lo16, | ||
527 | [R_MIPS_GPREL16] = apply_r_mips_gprel16, | ||
528 | [R_MIPS_PC16] = apply_r_mips_pc16 | ||
529 | }; | ||
530 | |||
531 | |||
532 | int apply_relocations(Elf32_Shdr *sechdrs, | ||
533 | const char *strtab, | ||
534 | unsigned int symindex, | ||
535 | unsigned int relsec, | ||
536 | struct module *me) | ||
537 | { | ||
538 | Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; | ||
539 | Elf32_Sym *sym; | ||
540 | uint32_t *location; | ||
541 | unsigned int i; | ||
542 | Elf32_Addr v; | ||
543 | int res; | ||
544 | |||
545 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
546 | Elf32_Word r_info = rel[i].r_info; | ||
547 | |||
548 | /* This is where to make the change */ | ||
549 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
550 | + rel[i].r_offset; | ||
551 | /* This is the symbol it is referring to */ | ||
552 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
553 | + ELF32_R_SYM(r_info); | ||
554 | |||
555 | if (!sym->st_value) { | ||
556 | printk(KERN_DEBUG "%s: undefined weak symbol %s\n", | ||
557 | me->name, strtab + sym->st_name); | ||
558 | /* just print the warning, dont barf */ | ||
559 | } | ||
560 | |||
561 | v = sym->st_value; | ||
562 | |||
563 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); | ||
564 | if( res ) { | ||
565 | printk(KERN_DEBUG | ||
566 | "relocation error 0x%x sym refer <%s> value 0x%x " | ||
567 | "type 0x%x r_info 0x%x\n", | ||
568 | (unsigned int)location, strtab + sym->st_name, v, | ||
569 | r_info, ELF32_R_TYPE(r_info)); | ||
570 | } | ||
571 | |||
572 | if (res) | ||
573 | return res; | ||
574 | } | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | void save_gp_address(unsigned int secbase, unsigned int rel) | ||
580 | { | ||
581 | gp_addr = secbase + rel; | ||
582 | gp_offs = gp_addr - (secbase & 0xffff0000); | ||
583 | } | ||
584 | /* end module-elf32.c */ | ||
585 | |||
586 | |||
587 | |||
588 | /* Change all symbols so that sh_value encodes the pointer directly. */ | ||
589 | static int simplify_symbols(Elf_Shdr * sechdrs, | ||
590 | unsigned int symindex, | ||
591 | const char *strtab, | ||
592 | const char *secstrings, | ||
593 | unsigned int nsecs, struct module *mod) | ||
594 | { | ||
595 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
596 | unsigned long secbase, bssbase = 0; | ||
597 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
598 | int ret = 0, size; | ||
599 | |||
600 | /* find the .bss section for COMMON symbols */ | ||
601 | for (i = 0; i < nsecs; i++) { | ||
602 | if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) | ||
603 | bssbase = sechdrs[i].sh_addr; | ||
604 | } | ||
605 | |||
606 | for (i = 1; i < n; i++) { | ||
607 | switch (sym[i].st_shndx) { | ||
608 | case SHN_COMMON: | ||
609 | /* Allocate space for the symbol in the .bss section. st_value is currently size. | ||
610 | We want it to have the address of the symbol. */ | ||
611 | |||
612 | size = sym[i].st_value; | ||
613 | sym[i].st_value = bssbase; | ||
614 | |||
615 | bssbase += size; | ||
616 | break; | ||
617 | |||
618 | case SHN_ABS: | ||
619 | /* Don't need to do anything */ | ||
620 | break; | ||
621 | |||
622 | case SHN_UNDEF: | ||
623 | /* ret = -ENOENT; */ | ||
624 | break; | ||
625 | |||
626 | case SHN_MIPS_SCOMMON: | ||
627 | |||
628 | printk(KERN_DEBUG | ||
629 | "simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", | ||
630 | strtab + sym[i].st_name, sym[i].st_shndx); | ||
631 | |||
632 | // .sbss section | ||
633 | break; | ||
634 | |||
635 | default: | ||
636 | secbase = sechdrs[sym[i].st_shndx].sh_addr; | ||
637 | |||
638 | if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) { | ||
639 | save_gp_address(secbase, sym[i].st_value); | ||
640 | } | ||
641 | |||
642 | sym[i].st_value += secbase; | ||
643 | break; | ||
644 | } | ||
645 | |||
646 | } | ||
647 | |||
648 | return ret; | ||
649 | } | ||
650 | |||
651 | #ifdef DEBUG_ELFLOADER | ||
652 | static void dump_elfsymbols(Elf_Shdr * sechdrs, unsigned int symindex, | ||
653 | const char *strtab, struct module *mod) | ||
654 | { | ||
655 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
656 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
657 | |||
658 | printk(KERN_DEBUG "dump_elfsymbols: n %d\n", n); | ||
659 | for (i = 1; i < n; i++) { | ||
660 | printk(KERN_DEBUG " i %d name <%s> 0x%x\n", i, | ||
661 | strtab + sym[i].st_name, sym[i].st_value); | ||
662 | } | ||
663 | } | ||
664 | #endif | ||
665 | |||
666 | static void dump_tc(struct tc *t) | ||
667 | { | ||
668 | printk(KERN_WARNING "VPE: TC index %d TCStatus 0x%lx halt 0x%lx\n", | ||
669 | t->index, read_tc_c0_tcstatus(), read_tc_c0_tchalt()); | ||
670 | printk(KERN_WARNING "VPE: tcrestart 0x%lx\n", read_tc_c0_tcrestart()); | ||
671 | } | ||
672 | |||
673 | static void dump_tclist(void) | ||
674 | { | ||
675 | struct tc *t; | ||
676 | |||
677 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
678 | dump_tc(t); | ||
679 | } | ||
680 | } | ||
681 | |||
682 | /* We are prepared so configure and start the VPE... */ | ||
683 | int vpe_run(vpe_t * v) | ||
684 | { | ||
685 | unsigned long val; | ||
686 | struct tc *t; | ||
687 | |||
688 | /* check we are the Master VPE */ | ||
689 | val = read_c0_vpeconf0(); | ||
690 | if (!(val & VPECONF0_MVP)) { | ||
691 | printk(KERN_WARNING | ||
692 | "VPE: only Master VPE's are allowed to configure MT\n"); | ||
693 | return -1; | ||
694 | } | ||
695 | |||
696 | /* disable MT (using dvpe) */ | ||
697 | dvpe(); | ||
698 | |||
699 | /* Put MVPE's into 'configuration state' */ | ||
700 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC); | ||
701 | |||
702 | if (!list_empty(&v->tc)) { | ||
703 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | ||
704 | printk(KERN_WARNING "VPE: TC %d is already in use.\n", | ||
705 | t->index); | ||
706 | return -ENOEXEC; | ||
707 | } | ||
708 | } else { | ||
709 | printk(KERN_WARNING "VPE: No TC's associated with VPE %d\n", | ||
710 | v->minor); | ||
711 | return -ENOEXEC; | ||
712 | } | ||
713 | |||
714 | settc(t->index); | ||
715 | |||
716 | val = read_vpe_c0_vpeconf0(); | ||
717 | |||
718 | /* should check it is halted, and not activated */ | ||
719 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || !(read_tc_c0_tchalt() & TCHALT_H)) { | ||
720 | printk(KERN_WARNING "VPE: TC %d is already doing something!\n", | ||
721 | t->index); | ||
722 | |||
723 | dump_tclist(); | ||
724 | return -ENOEXEC; | ||
725 | } | ||
726 | |||
727 | /* Write the address we want it to start running from in the TCPC register. */ | ||
728 | write_tc_c0_tcrestart((unsigned long)v->__start); | ||
729 | |||
730 | /* write the sivc_info address to tccontext */ | ||
731 | write_tc_c0_tccontext((unsigned long)0); | ||
732 | |||
733 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | ||
734 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (t->index << VPECONF0_XTC_SHIFT)); | ||
735 | |||
736 | /* mark the TC as activated, not interrupt exempt and not dynamically allocatable */ | ||
737 | val = read_tc_c0_tcstatus(); | ||
738 | val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; | ||
739 | write_tc_c0_tcstatus(val); | ||
740 | |||
741 | write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); | ||
742 | |||
743 | /* set up VPE1 */ | ||
744 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); // no multiple TC's | ||
745 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); // enable this VPE | ||
746 | |||
747 | /* | ||
748 | * The sde-kit passes 'memsize' to __start in $a3, so set something | ||
749 | * here... | ||
750 | * Or set $a3 (register 7) to zero and define DFLT_STACK_SIZE and | ||
751 | * DFLT_HEAP_SIZE when you compile your program | ||
752 | */ | ||
753 | |||
754 | mttgpr(7, 0); | ||
755 | |||
756 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
757 | write_vpe_c0_config(read_c0_config()); | ||
758 | |||
759 | /* clear out any left overs from a previous program */ | ||
760 | write_vpe_c0_cause(0); | ||
761 | |||
762 | /* take system out of configuration state */ | ||
763 | write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC); | ||
764 | |||
765 | /* clear interrupts enabled IE, ERL, EXL, and KSU from c0 status */ | ||
766 | write_vpe_c0_status(read_vpe_c0_status() & ~(ST0_ERL | ST0_KSU | ST0_IE | ST0_EXL)); | ||
767 | |||
768 | /* set it running */ | ||
769 | evpe(EVPE_ENABLE); | ||
770 | |||
771 | return 0; | ||
772 | } | ||
773 | |||
774 | static unsigned long find_vpe_symbols(vpe_t * v, Elf_Shdr * sechdrs, | ||
775 | unsigned int symindex, const char *strtab, | ||
776 | struct module *mod) | ||
777 | { | ||
778 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
779 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
780 | |||
781 | for (i = 1; i < n; i++) { | ||
782 | if (strcmp(strtab + sym[i].st_name, "__start") == 0) { | ||
783 | v->__start = sym[i].st_value; | ||
784 | } | ||
785 | |||
786 | if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) { | ||
787 | v->shared_ptr = (void *)sym[i].st_value; | ||
788 | } | ||
789 | } | ||
790 | |||
791 | return 0; | ||
792 | } | ||
793 | |||
794 | /* Allocates a VPE with some program code space(the load address), copies the contents | ||
795 | of the program (p)buffer performing relocatations/etc, free's it when finished. | ||
796 | */ | ||
797 | int vpe_elfload(vpe_t * v) | ||
798 | { | ||
799 | Elf_Ehdr *hdr; | ||
800 | Elf_Shdr *sechdrs; | ||
801 | long err = 0; | ||
802 | char *secstrings, *strtab = NULL; | ||
803 | unsigned int len, i, symindex = 0, strindex = 0; | ||
804 | |||
805 | struct module mod; // so we can re-use the relocations code | ||
806 | |||
807 | memset(&mod, 0, sizeof(struct module)); | ||
808 | strcpy(mod.name, "VPE dummy prog module"); | ||
809 | |||
810 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
811 | len = v->plen; | ||
812 | |||
813 | /* Sanity checks against insmoding binaries or wrong arch, | ||
814 | weird elf version */ | ||
815 | if (memcmp(hdr->e_ident, ELFMAG, 4) != 0 | ||
816 | || hdr->e_type != ET_REL || !elf_check_arch(hdr) | ||
817 | || hdr->e_shentsize != sizeof(*sechdrs)) { | ||
818 | printk(KERN_WARNING | ||
819 | "VPE program, wrong arch or weird elf version\n"); | ||
820 | |||
821 | return -ENOEXEC; | ||
822 | } | ||
823 | |||
824 | if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { | ||
825 | printk(KERN_ERR "VPE program length %u truncated\n", len); | ||
826 | return -ENOEXEC; | ||
827 | } | ||
828 | |||
829 | /* Convenience variables */ | ||
830 | sechdrs = (void *)hdr + hdr->e_shoff; | ||
831 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
832 | sechdrs[0].sh_addr = 0; | ||
833 | |||
834 | /* And these should exist, but gcc whinges if we don't init them */ | ||
835 | symindex = strindex = 0; | ||
836 | |||
837 | for (i = 1; i < hdr->e_shnum; i++) { | ||
838 | |||
839 | if (sechdrs[i].sh_type != SHT_NOBITS | ||
840 | && len < sechdrs[i].sh_offset + sechdrs[i].sh_size) { | ||
841 | printk(KERN_ERR "VPE program length %u truncated\n", | ||
842 | len); | ||
843 | return -ENOEXEC; | ||
844 | } | ||
845 | |||
846 | /* Mark all sections sh_addr with their address in the | ||
847 | temporary image. */ | ||
848 | sechdrs[i].sh_addr = (size_t) hdr + sechdrs[i].sh_offset; | ||
849 | |||
850 | /* Internal symbols and strings. */ | ||
851 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | ||
852 | symindex = i; | ||
853 | strindex = sechdrs[i].sh_link; | ||
854 | strtab = (char *)hdr + sechdrs[strindex].sh_offset; | ||
855 | } | ||
856 | } | ||
857 | |||
858 | layout_sections(&mod, hdr, sechdrs, secstrings); | ||
859 | |||
860 | v->load_addr = alloc_progmem(mod.core_size); | ||
861 | memset(v->load_addr, 0, mod.core_size); | ||
862 | |||
863 | printk("VPE elf_loader: loading to %p\n", v->load_addr); | ||
864 | |||
865 | for (i = 0; i < hdr->e_shnum; i++) { | ||
866 | void *dest; | ||
867 | |||
868 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | ||
869 | continue; | ||
870 | |||
871 | dest = v->load_addr + sechdrs[i].sh_entsize; | ||
872 | |||
873 | if (sechdrs[i].sh_type != SHT_NOBITS) | ||
874 | memcpy(dest, (void *)sechdrs[i].sh_addr, | ||
875 | sechdrs[i].sh_size); | ||
876 | /* Update sh_addr to point to copy in image. */ | ||
877 | sechdrs[i].sh_addr = (unsigned long)dest; | ||
878 | } | ||
879 | |||
880 | /* Fix up syms, so that st_value is a pointer to location. */ | ||
881 | err = | ||
882 | simplify_symbols(sechdrs, symindex, strtab, secstrings, | ||
883 | hdr->e_shnum, &mod); | ||
884 | if (err < 0) { | ||
885 | printk(KERN_WARNING "VPE: unable to simplify symbols\n"); | ||
886 | goto cleanup; | ||
887 | } | ||
888 | |||
889 | /* Now do relocations. */ | ||
890 | for (i = 1; i < hdr->e_shnum; i++) { | ||
891 | const char *strtab = (char *)sechdrs[strindex].sh_addr; | ||
892 | unsigned int info = sechdrs[i].sh_info; | ||
893 | |||
894 | /* Not a valid relocation section? */ | ||
895 | if (info >= hdr->e_shnum) | ||
896 | continue; | ||
897 | |||
898 | /* Don't bother with non-allocated sections */ | ||
899 | if (!(sechdrs[info].sh_flags & SHF_ALLOC)) | ||
900 | continue; | ||
901 | |||
902 | if (sechdrs[i].sh_type == SHT_REL) | ||
903 | err = | ||
904 | apply_relocations(sechdrs, strtab, symindex, i, &mod); | ||
905 | else if (sechdrs[i].sh_type == SHT_RELA) | ||
906 | err = apply_relocate_add(sechdrs, strtab, symindex, i, | ||
907 | &mod); | ||
908 | if (err < 0) { | ||
909 | printk(KERN_WARNING | ||
910 | "vpe_elfload: error in relocations err %ld\n", | ||
911 | err); | ||
912 | goto cleanup; | ||
913 | } | ||
914 | } | ||
915 | |||
916 | /* make sure it's physically written out */ | ||
917 | flush_icache_range((unsigned long)v->load_addr, | ||
918 | (unsigned long)v->load_addr + v->len); | ||
919 | |||
920 | if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { | ||
921 | |||
922 | printk(KERN_WARNING | ||
923 | "VPE: program doesn't contain __start or vpe_shared symbols\n"); | ||
924 | err = -ENOEXEC; | ||
925 | } | ||
926 | |||
927 | printk(" elf loaded\n"); | ||
928 | |||
929 | cleanup: | ||
930 | return err; | ||
931 | } | ||
932 | |||
933 | static void dump_vpe(vpe_t * v) | ||
934 | { | ||
935 | struct tc *t; | ||
936 | |||
937 | printk(KERN_DEBUG "VPEControl 0x%lx\n", read_vpe_c0_vpecontrol()); | ||
938 | printk(KERN_DEBUG "VPEConf0 0x%lx\n", read_vpe_c0_vpeconf0()); | ||
939 | |||
940 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
941 | dump_tc(t); | ||
942 | } | ||
943 | } | ||
944 | |||
945 | /* checks for VPE is unused and gets ready to load program */ | ||
946 | static int vpe_open(struct inode *inode, struct file *filp) | ||
947 | { | ||
948 | int minor; | ||
949 | vpe_t *v; | ||
950 | |||
951 | /* assume only 1 device at the mo. */ | ||
952 | if ((minor = MINOR(inode->i_rdev)) != 1) { | ||
953 | printk(KERN_WARNING "VPE: only vpe1 is supported\n"); | ||
954 | return -ENODEV; | ||
955 | } | ||
956 | |||
957 | if ((v = get_vpe(minor)) == NULL) { | ||
958 | printk(KERN_WARNING "VPE: unable to get vpe\n"); | ||
959 | return -ENODEV; | ||
960 | } | ||
961 | |||
962 | if (v->state != VPE_STATE_UNUSED) { | ||
963 | unsigned long tmp; | ||
964 | struct tc *t; | ||
965 | |||
966 | printk(KERN_WARNING "VPE: device %d already in use\n", minor); | ||
967 | |||
968 | dvpe(); | ||
969 | dump_vpe(v); | ||
970 | |||
971 | printk(KERN_WARNING "VPE: re-initialising %d\n", minor); | ||
972 | |||
973 | release_progmem(v->load_addr); | ||
974 | |||
975 | t = get_tc(minor); | ||
976 | settc(minor); | ||
977 | tmp = read_tc_c0_tcstatus(); | ||
978 | |||
979 | /* mark not allocated and not dynamically allocatable */ | ||
980 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
981 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
982 | write_tc_c0_tcstatus(tmp); | ||
983 | |||
984 | write_tc_c0_tchalt(TCHALT_H); | ||
985 | |||
986 | } | ||
987 | |||
988 | // allocate it so when we get write ops we know it's expected. | ||
989 | v->state = VPE_STATE_INUSE; | ||
990 | |||
991 | /* this of-course trashes what was there before... */ | ||
992 | v->pbuffer = vmalloc(P_SIZE); | ||
993 | v->plen = P_SIZE; | ||
994 | v->load_addr = NULL; | ||
995 | v->len = 0; | ||
996 | |||
997 | return 0; | ||
998 | } | ||
999 | |||
1000 | static int vpe_release(struct inode *inode, struct file *filp) | ||
1001 | { | ||
1002 | int minor, ret = 0; | ||
1003 | vpe_t *v; | ||
1004 | Elf_Ehdr *hdr; | ||
1005 | |||
1006 | minor = MINOR(inode->i_rdev); | ||
1007 | if ((v = get_vpe(minor)) == NULL) | ||
1008 | return -ENODEV; | ||
1009 | |||
1010 | // simple case of fire and forget, so tell the VPE to run... | ||
1011 | |||
1012 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
1013 | if (memcmp(hdr->e_ident, ELFMAG, 4) == 0) { | ||
1014 | if (vpe_elfload(v) >= 0) | ||
1015 | vpe_run(v); | ||
1016 | else { | ||
1017 | printk(KERN_WARNING "VPE: ELF load failed.\n"); | ||
1018 | ret = -ENOEXEC; | ||
1019 | } | ||
1020 | } else { | ||
1021 | printk(KERN_WARNING "VPE: only elf files are supported\n"); | ||
1022 | ret = -ENOEXEC; | ||
1023 | } | ||
1024 | |||
1025 | // cleanup any temp buffers | ||
1026 | if (v->pbuffer) | ||
1027 | vfree(v->pbuffer); | ||
1028 | v->plen = 0; | ||
1029 | return ret; | ||
1030 | } | ||
1031 | |||
1032 | static ssize_t vpe_write(struct file *file, const char __user * buffer, | ||
1033 | size_t count, loff_t * ppos) | ||
1034 | { | ||
1035 | int minor; | ||
1036 | size_t ret = count; | ||
1037 | vpe_t *v; | ||
1038 | |||
1039 | minor = MINOR(file->f_dentry->d_inode->i_rdev); | ||
1040 | if ((v = get_vpe(minor)) == NULL) | ||
1041 | return -ENODEV; | ||
1042 | |||
1043 | if (v->pbuffer == NULL) { | ||
1044 | printk(KERN_ERR "vpe_write: no pbuffer\n"); | ||
1045 | return -ENOMEM; | ||
1046 | } | ||
1047 | |||
1048 | if ((count + v->len) > v->plen) { | ||
1049 | printk(KERN_WARNING | ||
1050 | "VPE Loader: elf size too big. Perhaps strip uneeded symbols\n"); | ||
1051 | return -ENOMEM; | ||
1052 | } | ||
1053 | |||
1054 | count -= copy_from_user(v->pbuffer + v->len, buffer, count); | ||
1055 | if (!count) { | ||
1056 | printk("vpe_write: copy_to_user failed\n"); | ||
1057 | return -EFAULT; | ||
1058 | } | ||
1059 | |||
1060 | v->len += count; | ||
1061 | return ret; | ||
1062 | } | ||
1063 | |||
1064 | static struct file_operations vpe_fops = { | ||
1065 | .owner = THIS_MODULE, | ||
1066 | .open = vpe_open, | ||
1067 | .release = vpe_release, | ||
1068 | .write = vpe_write | ||
1069 | }; | ||
1070 | |||
1071 | /* module wrapper entry points */ | ||
1072 | /* give me a vpe */ | ||
1073 | vpe_handle vpe_alloc(void) | ||
1074 | { | ||
1075 | int i; | ||
1076 | struct vpe *v; | ||
1077 | |||
1078 | /* find a vpe */ | ||
1079 | for (i = 1; i < MAX_VPES; i++) { | ||
1080 | if ((v = get_vpe(i)) != NULL) { | ||
1081 | v->state = VPE_STATE_INUSE; | ||
1082 | return v; | ||
1083 | } | ||
1084 | } | ||
1085 | return NULL; | ||
1086 | } | ||
1087 | |||
1088 | EXPORT_SYMBOL(vpe_alloc); | ||
1089 | |||
1090 | /* start running from here */ | ||
1091 | int vpe_start(vpe_handle vpe, unsigned long start) | ||
1092 | { | ||
1093 | struct vpe *v = vpe; | ||
1094 | |||
1095 | v->__start = start; | ||
1096 | return vpe_run(v); | ||
1097 | } | ||
1098 | |||
1099 | EXPORT_SYMBOL(vpe_start); | ||
1100 | |||
1101 | /* halt it for now */ | ||
1102 | int vpe_stop(vpe_handle vpe) | ||
1103 | { | ||
1104 | struct vpe *v = vpe; | ||
1105 | struct tc *t; | ||
1106 | unsigned int evpe_flags; | ||
1107 | |||
1108 | evpe_flags = dvpe(); | ||
1109 | |||
1110 | if ((t = list_entry(v->tc.next, struct tc, tc)) != NULL) { | ||
1111 | |||
1112 | settc(t->index); | ||
1113 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1114 | } | ||
1115 | |||
1116 | evpe(evpe_flags); | ||
1117 | |||
1118 | return 0; | ||
1119 | } | ||
1120 | |||
1121 | EXPORT_SYMBOL(vpe_stop); | ||
1122 | |||
1123 | /* I've done with it thank you */ | ||
1124 | int vpe_free(vpe_handle vpe) | ||
1125 | { | ||
1126 | struct vpe *v = vpe; | ||
1127 | struct tc *t; | ||
1128 | unsigned int evpe_flags; | ||
1129 | |||
1130 | if ((t = list_entry(v->tc.next, struct tc, tc)) == NULL) { | ||
1131 | return -ENOEXEC; | ||
1132 | } | ||
1133 | |||
1134 | evpe_flags = dvpe(); | ||
1135 | |||
1136 | /* Put MVPE's into 'configuration state' */ | ||
1137 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC); | ||
1138 | |||
1139 | settc(t->index); | ||
1140 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
1141 | |||
1142 | /* mark the TC unallocated and halt'ed */ | ||
1143 | write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); | ||
1144 | write_tc_c0_tchalt(TCHALT_H); | ||
1145 | |||
1146 | v->state = VPE_STATE_UNUSED; | ||
1147 | |||
1148 | write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC); | ||
1149 | evpe(evpe_flags); | ||
1150 | |||
1151 | return 0; | ||
1152 | } | ||
1153 | |||
1154 | EXPORT_SYMBOL(vpe_free); | ||
1155 | |||
1156 | void *vpe_get_shared(int index) | ||
1157 | { | ||
1158 | struct vpe *v; | ||
1159 | |||
1160 | if ((v = get_vpe(index)) == NULL) { | ||
1161 | printk(KERN_WARNING "vpe: invalid vpe index %d\n", index); | ||
1162 | return NULL; | ||
1163 | } | ||
1164 | |||
1165 | return v->shared_ptr; | ||
1166 | } | ||
1167 | |||
1168 | EXPORT_SYMBOL(vpe_get_shared); | ||
1169 | |||
1170 | static int __init vpe_module_init(void) | ||
1171 | { | ||
1172 | struct vpe *v = NULL; | ||
1173 | struct tc *t; | ||
1174 | unsigned long val; | ||
1175 | int i; | ||
1176 | |||
1177 | if (!cpu_has_mipsmt) { | ||
1178 | printk("VPE loader: not a MIPS MT capable processor\n"); | ||
1179 | return -ENODEV; | ||
1180 | } | ||
1181 | |||
1182 | if ((major = register_chrdev(VPE_MAJOR, module_name, &vpe_fops) < 0)) { | ||
1183 | printk("VPE loader: unable to register character device\n"); | ||
1184 | return -EBUSY; | ||
1185 | } | ||
1186 | |||
1187 | if (major == 0) | ||
1188 | major = VPE_MAJOR; | ||
1189 | |||
1190 | dmt(); | ||
1191 | dvpe(); | ||
1192 | |||
1193 | /* Put MVPE's into 'configuration state' */ | ||
1194 | write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_VPC); | ||
1195 | |||
1196 | /* dump_mtregs(); */ | ||
1197 | |||
1198 | INIT_LIST_HEAD(&vpecontrol.vpe_list); | ||
1199 | INIT_LIST_HEAD(&vpecontrol.tc_list); | ||
1200 | |||
1201 | val = read_c0_mvpconf0(); | ||
1202 | for (i = 0; i < ((val & MVPCONF0_PTC) + 1); i++) { | ||
1203 | t = alloc_tc(i); | ||
1204 | |||
1205 | /* VPE's */ | ||
1206 | if (i < ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1) { | ||
1207 | settc(i); | ||
1208 | |||
1209 | if ((v = alloc_vpe(i)) == NULL) { | ||
1210 | printk(KERN_WARNING "VPE: unable to allocate VPE\n"); | ||
1211 | return -ENODEV; | ||
1212 | } | ||
1213 | |||
1214 | list_add(&t->tc, &v->tc); /* add the tc to the list of this vpe's tc's. */ | ||
1215 | |||
1216 | /* deactivate all but vpe0 */ | ||
1217 | if (i != 0) { | ||
1218 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
1219 | |||
1220 | tmp &= ~VPECONF0_VPA; | ||
1221 | |||
1222 | /* master VPE */ | ||
1223 | tmp |= VPECONF0_MVP; | ||
1224 | write_vpe_c0_vpeconf0(tmp); | ||
1225 | } | ||
1226 | |||
1227 | /* disable multi-threading with TC's */ | ||
1228 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
1229 | |||
1230 | if (i != 0) { | ||
1231 | write_vpe_c0_status((read_c0_status() & | ||
1232 | ~(ST0_IM | ST0_IE | ST0_KSU)) | ||
1233 | | ST0_CU0); | ||
1234 | |||
1235 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
1236 | write_vpe_c0_config(read_c0_config()); | ||
1237 | } | ||
1238 | |||
1239 | } | ||
1240 | |||
1241 | /* TC's */ | ||
1242 | t->pvpe = v; /* set the parent vpe */ | ||
1243 | |||
1244 | if (i != 0) { | ||
1245 | unsigned long tmp; | ||
1246 | |||
1247 | /* tc 0 will of course be running.... */ | ||
1248 | if (i == 0) | ||
1249 | t->state = TC_STATE_RUNNING; | ||
1250 | |||
1251 | settc(i); | ||
1252 | |||
1253 | /* bind a TC to each VPE, May as well put all excess TC's | ||
1254 | on the last VPE */ | ||
1255 | if (i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1)) | ||
1256 | write_tc_c0_tcbind(read_tc_c0_tcbind() | | ||
1257 | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); | ||
1258 | else | ||
1259 | write_tc_c0_tcbind(read_tc_c0_tcbind() | i); | ||
1260 | |||
1261 | tmp = read_tc_c0_tcstatus(); | ||
1262 | |||
1263 | /* mark not allocated and not dynamically allocatable */ | ||
1264 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
1265 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
1266 | write_tc_c0_tcstatus(tmp); | ||
1267 | |||
1268 | write_tc_c0_tchalt(TCHALT_H); | ||
1269 | } | ||
1270 | } | ||
1271 | |||
1272 | /* release config state */ | ||
1273 | write_c0_mvpcontrol(read_c0_mvpcontrol() & ~MVPCONTROL_VPC); | ||
1274 | |||
1275 | return 0; | ||
1276 | } | ||
1277 | |||
1278 | static void __exit vpe_module_exit(void) | ||
1279 | { | ||
1280 | struct vpe *v, *n; | ||
1281 | |||
1282 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { | ||
1283 | if (v->state != VPE_STATE_UNUSED) { | ||
1284 | release_vpe(v); | ||
1285 | } | ||
1286 | } | ||
1287 | |||
1288 | unregister_chrdev(major, module_name); | ||
1289 | } | ||
1290 | |||
1291 | module_init(vpe_module_init); | ||
1292 | module_exit(vpe_module_exit); | ||
1293 | MODULE_DESCRIPTION("MIPS VPE Loader"); | ||
1294 | MODULE_AUTHOR("Elizabeth Clarke, MIPS Technologies, Inc"); | ||
1295 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c index d821b13d24a0..58256ea33102 100644 --- a/arch/mips/mips-boards/generic/init.c +++ b/arch/mips/mips-boards/generic/init.c | |||
@@ -28,6 +28,8 @@ | |||
28 | #include <asm/gt64120.h> | 28 | #include <asm/gt64120.h> |
29 | #include <asm/io.h> | 29 | #include <asm/io.h> |
30 | #include <asm/system.h> | 30 | #include <asm/system.h> |
31 | #include <asm/cacheflush.h> | ||
32 | #include <asm/traps.h> | ||
31 | 33 | ||
32 | #include <asm/mips-boards/prom.h> | 34 | #include <asm/mips-boards/prom.h> |
33 | #include <asm/mips-boards/generic.h> | 35 | #include <asm/mips-boards/generic.h> |
@@ -224,6 +226,30 @@ void __init kgdb_config (void) | |||
224 | } | 226 | } |
225 | #endif | 227 | #endif |
226 | 228 | ||
229 | void __init mips_nmi_setup (void) | ||
230 | { | ||
231 | void *base; | ||
232 | extern char except_vec_nmi; | ||
233 | |||
234 | base = cpu_has_veic ? | ||
235 | (void *)(CAC_BASE + 0xa80) : | ||
236 | (void *)(CAC_BASE + 0x380); | ||
237 | memcpy(base, &except_vec_nmi, 0x80); | ||
238 | flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); | ||
239 | } | ||
240 | |||
241 | void __init mips_ejtag_setup (void) | ||
242 | { | ||
243 | void *base; | ||
244 | extern char except_vec_ejtag_debug; | ||
245 | |||
246 | base = cpu_has_veic ? | ||
247 | (void *)(CAC_BASE + 0xa00) : | ||
248 | (void *)(CAC_BASE + 0x300); | ||
249 | memcpy(base, &except_vec_ejtag_debug, 0x80); | ||
250 | flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); | ||
251 | } | ||
252 | |||
227 | void __init prom_init(void) | 253 | void __init prom_init(void) |
228 | { | 254 | { |
229 | u32 start, map, mask, data; | 255 | u32 start, map, mask, data; |
@@ -353,6 +379,9 @@ void __init prom_init(void) | |||
353 | while(1); /* We die here... */ | 379 | while(1); /* We die here... */ |
354 | } | 380 | } |
355 | #endif | 381 | #endif |
382 | board_nmi_handler_setup = mips_nmi_setup; | ||
383 | board_ejtag_handler_setup = mips_ejtag_setup; | ||
384 | |||
356 | prom_printf("\nLINUX started...\n"); | 385 | prom_printf("\nLINUX started...\n"); |
357 | prom_init_cmdline(); | 386 | prom_init_cmdline(); |
358 | prom_meminit(); | 387 | prom_meminit(); |
diff --git a/arch/mips/mips-boards/generic/memory.c b/arch/mips/mips-boards/generic/memory.c index 5ae2b43e4c2e..2c8afd77a20b 100644 --- a/arch/mips/mips-boards/generic/memory.c +++ b/arch/mips/mips-boards/generic/memory.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/bootmem.h> | 24 | #include <linux/bootmem.h> |
25 | #include <linux/string.h> | ||
25 | 26 | ||
26 | #include <asm/bootinfo.h> | 27 | #include <asm/bootinfo.h> |
27 | #include <asm/page.h> | 28 | #include <asm/page.h> |
@@ -55,18 +56,30 @@ struct prom_pmemblock * __init prom_getmdesc(void) | |||
55 | { | 56 | { |
56 | char *memsize_str; | 57 | char *memsize_str; |
57 | unsigned int memsize; | 58 | unsigned int memsize; |
59 | char cmdline[CL_SIZE], *ptr; | ||
58 | 60 | ||
59 | memsize_str = prom_getenv("memsize"); | 61 | /* Check the command line first for a memsize directive */ |
60 | if (!memsize_str) { | 62 | strcpy(cmdline, arcs_cmdline); |
61 | prom_printf("memsize not set in boot prom, set to default (32Mb)\n"); | 63 | ptr = strstr(cmdline, "memsize="); |
62 | memsize = 0x02000000; | 64 | if (ptr && (ptr != cmdline) && (*(ptr - 1) != ' ')) |
63 | } else { | 65 | ptr = strstr(ptr, " memsize="); |
66 | |||
67 | if (ptr) { | ||
68 | memsize = memparse(ptr + 8, &ptr); | ||
69 | } | ||
70 | else { | ||
71 | /* otherwise look in the environment */ | ||
72 | memsize_str = prom_getenv("memsize"); | ||
73 | if (!memsize_str) { | ||
74 | prom_printf("memsize not set in boot prom, set to default (32Mb)\n"); | ||
75 | memsize = 0x02000000; | ||
76 | } else { | ||
64 | #ifdef DEBUG | 77 | #ifdef DEBUG |
65 | prom_printf("prom_memsize = %s\n", memsize_str); | 78 | prom_printf("prom_memsize = %s\n", memsize_str); |
66 | #endif | 79 | #endif |
67 | memsize = simple_strtol(memsize_str, NULL, 0); | 80 | memsize = simple_strtol(memsize_str, NULL, 0); |
81 | } | ||
68 | } | 82 | } |
69 | |||
70 | memset(mdesc, 0, sizeof(mdesc)); | 83 | memset(mdesc, 0, sizeof(mdesc)); |
71 | 84 | ||
72 | mdesc[0].type = yamon_dontuse; | 85 | mdesc[0].type = yamon_dontuse; |
diff --git a/arch/mips/mips-boards/generic/mipsIRQ.S b/arch/mips/mips-boards/generic/mipsIRQ.S index 131f49bccb20..a397ecb872d6 100644 --- a/arch/mips/mips-boards/generic/mipsIRQ.S +++ b/arch/mips/mips-boards/generic/mipsIRQ.S | |||
@@ -29,6 +29,20 @@ | |||
29 | #include <asm/regdef.h> | 29 | #include <asm/regdef.h> |
30 | #include <asm/stackframe.h> | 30 | #include <asm/stackframe.h> |
31 | 31 | ||
32 | #ifdef CONFIG_MIPS_ATLAS | ||
33 | #include <asm/mips-boards/atlasint.h> | ||
34 | #define CASCADE_IRQ MIPSCPU_INT_ATLAS | ||
35 | #define CASCADE_DISPATCH atlas_hw0_irqdispatch | ||
36 | #endif | ||
37 | #ifdef CONFIG_MIPS_MALTA | ||
38 | #include <asm/mips-boards/maltaint.h> | ||
39 | #define CASCADE_IRQ MIPSCPU_INT_I8259A | ||
40 | #define CASCADE_DISPATCH malta_hw0_irqdispatch | ||
41 | #endif | ||
42 | #ifdef CONFIG_MIPS_SEAD | ||
43 | #include <asm/mips-boards/seadint.h> | ||
44 | #endif | ||
45 | |||
32 | /* A lot of complication here is taken away because: | 46 | /* A lot of complication here is taken away because: |
33 | * | 47 | * |
34 | * 1) We handle one interrupt and return, sitting in a loop and moving across | 48 | * 1) We handle one interrupt and return, sitting in a loop and moving across |
@@ -80,74 +94,62 @@ | |||
80 | 94 | ||
81 | mfc0 s0, CP0_CAUSE # get irq bits | 95 | mfc0 s0, CP0_CAUSE # get irq bits |
82 | mfc0 s1, CP0_STATUS # get irq mask | 96 | mfc0 s1, CP0_STATUS # get irq mask |
97 | andi s0, ST0_IM # CAUSE.CE may be non-zero! | ||
83 | and s0, s1 | 98 | and s0, s1 |
84 | 99 | ||
85 | /* First we check for r4k counter/timer IRQ. */ | 100 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) |
86 | andi a0, s0, CAUSEF_IP7 | 101 | .set mips32 |
87 | beq a0, zero, 1f | 102 | clz a0, s0 |
88 | andi a0, s0, CAUSEF_IP2 # delay slot, check hw0 interrupt | 103 | .set mips0 |
104 | negu a0 | ||
105 | addu a0, 31-CAUSEB_IP | ||
106 | bltz a0, spurious | ||
107 | #else | ||
108 | beqz s0, spurious | ||
109 | li a0, 7 | ||
89 | 110 | ||
90 | /* Wheee, a timer interrupt. */ | 111 | and t0, s0, 0xf000 |
91 | move a0, sp | 112 | sltiu t0, t0, 1 |
92 | jal mips_timer_interrupt | 113 | sll t0, 2 |
93 | nop | 114 | subu a0, t0 |
115 | sll s0, t0 | ||
94 | 116 | ||
95 | j ret_from_irq | 117 | and t0, s0, 0xc000 |
96 | nop | 118 | sltiu t0, t0, 1 |
119 | sll t0, 1 | ||
120 | subu a0, t0 | ||
121 | sll s0, t0 | ||
97 | 122 | ||
98 | 1: | 123 | and t0, s0, 0x8000 |
99 | #if defined(CONFIG_MIPS_SEAD) | 124 | sltiu t0, t0, 1 |
100 | beq a0, zero, 1f | 125 | # sll t0, 0 |
101 | andi a0, s0, CAUSEF_IP3 # delay slot, check hw1 interrupt | 126 | subu a0, t0 |
102 | #else | 127 | # sll s0, t0 |
103 | beq a0, zero, 1f # delay slot, check hw3 interrupt | ||
104 | andi a0, s0, CAUSEF_IP5 | ||
105 | #endif | 128 | #endif |
106 | 129 | ||
107 | /* Wheee, combined hardware level zero interrupt. */ | 130 | #ifdef CASCADE_IRQ |
108 | #if defined(CONFIG_MIPS_ATLAS) | 131 | li a1, CASCADE_IRQ |
109 | jal atlas_hw0_irqdispatch | 132 | bne a0, a1, 1f |
110 | #elif defined(CONFIG_MIPS_MALTA) | 133 | addu a0, MIPSCPU_INT_BASE |
111 | jal malta_hw0_irqdispatch | ||
112 | #elif defined(CONFIG_MIPS_SEAD) | ||
113 | jal sead_hw0_irqdispatch | ||
114 | #else | ||
115 | #error "MIPS board not supported\n" | ||
116 | #endif | ||
117 | move a0, sp # delay slot | ||
118 | 134 | ||
119 | j ret_from_irq | 135 | jal CASCADE_DISPATCH |
120 | nop # delay slot | 136 | move a0, sp |
121 | 137 | ||
122 | 1: | ||
123 | #if defined(CONFIG_MIPS_SEAD) | ||
124 | beq a0, zero, 1f | ||
125 | andi a0, s0, CAUSEF_IP5 # delay slot, check hw3 interrupt | ||
126 | jal sead_hw1_irqdispatch | ||
127 | move a0, sp # delay slot | ||
128 | j ret_from_irq | ||
129 | nop # delay slot | ||
130 | 1: | ||
131 | #endif | ||
132 | #if defined(CONFIG_MIPS_MALTA) | ||
133 | beq a0, zero, 1f # check hw3 (coreHI) interrupt | ||
134 | nop | ||
135 | jal corehi_irqdispatch | ||
136 | move a0, sp | ||
137 | j ret_from_irq | 138 | j ret_from_irq |
138 | nop | 139 | nop |
139 | 1: | 140 | 1: |
141 | #else | ||
142 | addu a0, MIPSCPU_INT_BASE | ||
140 | #endif | 143 | #endif |
141 | /* | 144 | |
142 | * Here by mistake? This is possible, what can happen is that by the | 145 | jal do_IRQ |
143 | * time we take the exception the IRQ pin goes low, so just leave if | 146 | move a1, sp |
144 | * this is the case. | ||
145 | */ | ||
146 | move a1,s0 | ||
147 | PRINT("Got interrupt: c0_cause = %08x\n") | ||
148 | mfc0 a1, CP0_EPC | ||
149 | PRINT("c0_epc = %08x\n") | ||
150 | 147 | ||
151 | j ret_from_irq | 148 | j ret_from_irq |
152 | nop | 149 | nop |
150 | |||
151 | |||
152 | spurious: | ||
153 | j spurious_interrupt | ||
154 | nop | ||
153 | END(mipsIRQ) | 155 | END(mipsIRQ) |
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c index 16315444dd5a..3a6f1428b2cb 100644 --- a/arch/mips/mips-boards/generic/time.c +++ b/arch/mips/mips-boards/generic/time.c | |||
@@ -31,22 +31,21 @@ | |||
31 | 31 | ||
32 | #include <asm/mipsregs.h> | 32 | #include <asm/mipsregs.h> |
33 | #include <asm/ptrace.h> | 33 | #include <asm/ptrace.h> |
34 | #include <asm/hardirq.h> | ||
35 | #include <asm/irq.h> | ||
34 | #include <asm/div64.h> | 36 | #include <asm/div64.h> |
35 | #include <asm/cpu.h> | 37 | #include <asm/cpu.h> |
36 | #include <asm/time.h> | 38 | #include <asm/time.h> |
37 | #include <asm/mc146818-time.h> | 39 | #include <asm/mc146818-time.h> |
40 | #include <asm/msc01_ic.h> | ||
38 | 41 | ||
39 | #include <asm/mips-boards/generic.h> | 42 | #include <asm/mips-boards/generic.h> |
40 | #include <asm/mips-boards/prom.h> | 43 | #include <asm/mips-boards/prom.h> |
44 | #include <asm/mips-boards/maltaint.h> | ||
45 | #include <asm/mc146818-time.h> | ||
41 | 46 | ||
42 | unsigned long cpu_khz; | 47 | unsigned long cpu_khz; |
43 | 48 | ||
44 | #if defined(CONFIG_MIPS_SEAD) | ||
45 | #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ5) | ||
46 | #else | ||
47 | #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) | ||
48 | #endif | ||
49 | |||
50 | #if defined(CONFIG_MIPS_ATLAS) | 49 | #if defined(CONFIG_MIPS_ATLAS) |
51 | static char display_string[] = " LINUX ON ATLAS "; | 50 | static char display_string[] = " LINUX ON ATLAS "; |
52 | #endif | 51 | #endif |
@@ -59,20 +58,27 @@ static char display_string[] = " LINUX ON SEAD "; | |||
59 | static unsigned int display_count = 0; | 58 | static unsigned int display_count = 0; |
60 | #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8) | 59 | #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8) |
61 | 60 | ||
62 | #define MIPS_CPU_TIMER_IRQ (NR_IRQS-1) | ||
63 | |||
64 | static unsigned int timer_tick_count=0; | 61 | static unsigned int timer_tick_count=0; |
62 | static int mips_cpu_timer_irq; | ||
65 | 63 | ||
66 | void mips_timer_interrupt(struct pt_regs *regs) | 64 | static void mips_timer_dispatch (struct pt_regs *regs) |
67 | { | 65 | { |
66 | do_IRQ (mips_cpu_timer_irq, regs); | ||
67 | } | ||
68 | |||
69 | irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs) | ||
70 | { | ||
71 | irqreturn_t r; | ||
72 | |||
73 | r = timer_interrupt(irq, dev_id, regs); | ||
74 | |||
68 | if ((timer_tick_count++ % HZ) == 0) { | 75 | if ((timer_tick_count++ % HZ) == 0) { |
69 | mips_display_message(&display_string[display_count++]); | 76 | mips_display_message(&display_string[display_count++]); |
70 | if (display_count == MAX_DISPLAY_COUNT) | 77 | if (display_count == MAX_DISPLAY_COUNT) |
71 | display_count = 0; | 78 | display_count = 0; |
72 | |||
73 | } | 79 | } |
74 | 80 | ||
75 | ll_timer_interrupt(MIPS_CPU_TIMER_IRQ, regs); | 81 | return r; |
76 | } | 82 | } |
77 | 83 | ||
78 | /* | 84 | /* |
@@ -140,10 +146,8 @@ void __init mips_time_init(void) | |||
140 | 146 | ||
141 | local_irq_save(flags); | 147 | local_irq_save(flags); |
142 | 148 | ||
143 | #if defined(CONFIG_MIPS_ATLAS) || defined(CONFIG_MIPS_MALTA) | ||
144 | /* Set Data mode - binary. */ | 149 | /* Set Data mode - binary. */ |
145 | CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL); | 150 | CMOS_WRITE(CMOS_READ(RTC_CONTROL) | RTC_DM_BINARY, RTC_CONTROL); |
146 | #endif | ||
147 | 151 | ||
148 | est_freq = estimate_cpu_frequency (); | 152 | est_freq = estimate_cpu_frequency (); |
149 | 153 | ||
@@ -157,11 +161,22 @@ void __init mips_time_init(void) | |||
157 | 161 | ||
158 | void __init mips_timer_setup(struct irqaction *irq) | 162 | void __init mips_timer_setup(struct irqaction *irq) |
159 | { | 163 | { |
164 | if (cpu_has_veic) { | ||
165 | set_vi_handler (MSC01E_INT_CPUCTR, mips_timer_dispatch); | ||
166 | mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; | ||
167 | } | ||
168 | else { | ||
169 | if (cpu_has_vint) | ||
170 | set_vi_handler (MIPSCPU_INT_CPUCTR, mips_timer_dispatch); | ||
171 | mips_cpu_timer_irq = MIPSCPU_INT_BASE + MIPSCPU_INT_CPUCTR; | ||
172 | } | ||
173 | |||
174 | |||
160 | /* we are using the cpu counter for timer interrupts */ | 175 | /* we are using the cpu counter for timer interrupts */ |
161 | irq->handler = no_action; /* we use our own handler */ | 176 | irq->handler = mips_timer_interrupt; /* we use our own handler */ |
162 | setup_irq(MIPS_CPU_TIMER_IRQ, irq); | 177 | setup_irq(mips_cpu_timer_irq, irq); |
178 | |||
163 | 179 | ||
164 | /* to generate the first timer interrupt */ | 180 | /* to generate the first timer interrupt */ |
165 | write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); | 181 | write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ); |
166 | set_c0_status(ALLINTS); | ||
167 | } | 182 | } |
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c index dd2db35966bc..6f99a4492015 100644 --- a/arch/mips/mips-boards/malta/malta_int.c +++ b/arch/mips/mips-boards/malta/malta_int.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <linux/random.h> | 30 | #include <linux/random.h> |
31 | 31 | ||
32 | #include <asm/i8259.h> | 32 | #include <asm/i8259.h> |
33 | #include <asm/irq_cpu.h> | ||
33 | #include <asm/io.h> | 34 | #include <asm/io.h> |
34 | #include <asm/mips-boards/malta.h> | 35 | #include <asm/mips-boards/malta.h> |
35 | #include <asm/mips-boards/maltaint.h> | 36 | #include <asm/mips-boards/maltaint.h> |
@@ -37,8 +38,10 @@ | |||
37 | #include <asm/gt64120.h> | 38 | #include <asm/gt64120.h> |
38 | #include <asm/mips-boards/generic.h> | 39 | #include <asm/mips-boards/generic.h> |
39 | #include <asm/mips-boards/msc01_pci.h> | 40 | #include <asm/mips-boards/msc01_pci.h> |
41 | #include <asm/msc01_ic.h> | ||
40 | 42 | ||
41 | extern asmlinkage void mipsIRQ(void); | 43 | extern asmlinkage void mipsIRQ(void); |
44 | extern void mips_timer_interrupt(void); | ||
42 | 45 | ||
43 | static DEFINE_SPINLOCK(mips_irq_lock); | 46 | static DEFINE_SPINLOCK(mips_irq_lock); |
44 | 47 | ||
@@ -91,13 +94,13 @@ static inline int mips_pcibios_iack(void) | |||
91 | return irq; | 94 | return irq; |
92 | } | 95 | } |
93 | 96 | ||
94 | static inline int get_int(int *irq) | 97 | static inline int get_int(void) |
95 | { | 98 | { |
96 | unsigned long flags; | 99 | unsigned long flags; |
97 | 100 | int irq; | |
98 | spin_lock_irqsave(&mips_irq_lock, flags); | 101 | spin_lock_irqsave(&mips_irq_lock, flags); |
99 | 102 | ||
100 | *irq = mips_pcibios_iack(); | 103 | irq = mips_pcibios_iack(); |
101 | 104 | ||
102 | /* | 105 | /* |
103 | * IRQ7 is used to detect spurious interrupts. | 106 | * IRQ7 is used to detect spurious interrupts. |
@@ -106,73 +109,82 @@ static inline int get_int(int *irq) | |||
106 | * We can differentiate between this situation and a | 109 | * We can differentiate between this situation and a |
107 | * "Normal" IRQ7 by reading the ISR. | 110 | * "Normal" IRQ7 by reading the ISR. |
108 | */ | 111 | */ |
109 | if (*irq == 7) | 112 | if (irq == 7) |
110 | { | 113 | { |
111 | outb(PIIX4_OCW3_SEL | PIIX4_OCW3_ISR, | 114 | outb(PIIX4_OCW3_SEL | PIIX4_OCW3_ISR, |
112 | PIIX4_ICTLR1_OCW3); | 115 | PIIX4_ICTLR1_OCW3); |
113 | if (!(inb(PIIX4_ICTLR1_OCW3) & (1 << 7))) { | 116 | if (!(inb(PIIX4_ICTLR1_OCW3) & (1 << 7))) { |
114 | spin_unlock_irqrestore(&mips_irq_lock, flags); | 117 | irq = -1; /* Spurious interrupt */ |
115 | printk("We got a spurious interrupt from PIIX4.\n"); | 118 | printk("We got a spurious interrupt from PIIX4.\n"); |
116 | atomic_inc(&irq_err_count); | 119 | atomic_inc(&irq_err_count); |
117 | return -1; /* Spurious interrupt. */ | ||
118 | } | 120 | } |
119 | } | 121 | } |
120 | 122 | ||
121 | spin_unlock_irqrestore(&mips_irq_lock, flags); | 123 | spin_unlock_irqrestore(&mips_irq_lock, flags); |
122 | 124 | ||
123 | return 0; | 125 | return irq; |
124 | } | 126 | } |
125 | 127 | ||
126 | void malta_hw0_irqdispatch(struct pt_regs *regs) | 128 | void malta_hw0_irqdispatch(struct pt_regs *regs) |
127 | { | 129 | { |
128 | int irq; | 130 | int irq; |
129 | 131 | ||
130 | if (get_int(&irq)) | 132 | irq = get_int(); |
131 | return; /* interrupt has already been cleared */ | 133 | if (irq < 0) |
134 | return; /* interrupt has already been cleared */ | ||
132 | 135 | ||
133 | do_IRQ(irq, regs); | 136 | do_IRQ(MALTA_INT_BASE+irq, regs); |
134 | } | 137 | } |
135 | 138 | ||
136 | void corehi_irqdispatch(struct pt_regs *regs) | 139 | void corehi_irqdispatch(struct pt_regs *regs) |
137 | { | 140 | { |
138 | unsigned int data,datahi; | 141 | unsigned int intrcause,datalo,datahi; |
139 | 142 | unsigned int pcimstat, intisr, inten, intpol, intedge, intsteer, pcicmd, pcibadaddr; | |
140 | /* Mask out corehi interrupt. */ | ||
141 | clear_c0_status(IE_IRQ3); | ||
142 | 143 | ||
143 | printk("CoreHI interrupt, shouldn't happen, so we die here!!!\n"); | 144 | printk("CoreHI interrupt, shouldn't happen, so we die here!!!\n"); |
144 | printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\nbadVaddr : %08lx\n" | 145 | printk("epc : %08lx\nStatus: %08lx\nCause : %08lx\nbadVaddr : %08lx\n" |
145 | , regs->cp0_epc, regs->cp0_status, regs->cp0_cause, regs->cp0_badvaddr); | 146 | , regs->cp0_epc, regs->cp0_status, regs->cp0_cause, regs->cp0_badvaddr); |
147 | |||
148 | /* Read all the registers and then print them as there is a | ||
149 | problem with interspersed printk's upsetting the Bonito controller. | ||
150 | Do it for the others too. | ||
151 | */ | ||
152 | |||
146 | switch(mips_revision_corid) { | 153 | switch(mips_revision_corid) { |
147 | case MIPS_REVISION_CORID_CORE_MSC: | 154 | case MIPS_REVISION_CORID_CORE_MSC: |
148 | case MIPS_REVISION_CORID_CORE_FPGA2: | 155 | case MIPS_REVISION_CORID_CORE_FPGA2: |
149 | case MIPS_REVISION_CORID_CORE_EMUL_MSC: | 156 | case MIPS_REVISION_CORID_CORE_EMUL_MSC: |
157 | ll_msc_irq(regs); | ||
150 | break; | 158 | break; |
151 | case MIPS_REVISION_CORID_QED_RM5261: | 159 | case MIPS_REVISION_CORID_QED_RM5261: |
152 | case MIPS_REVISION_CORID_CORE_LV: | 160 | case MIPS_REVISION_CORID_CORE_LV: |
153 | case MIPS_REVISION_CORID_CORE_FPGA: | 161 | case MIPS_REVISION_CORID_CORE_FPGA: |
154 | case MIPS_REVISION_CORID_CORE_FPGAR2: | 162 | case MIPS_REVISION_CORID_CORE_FPGAR2: |
155 | data = GT_READ(GT_INTRCAUSE_OFS); | 163 | intrcause = GT_READ(GT_INTRCAUSE_OFS); |
156 | printk("GT_INTRCAUSE = %08x\n", data); | 164 | datalo = GT_READ(GT_CPUERR_ADDRLO_OFS); |
157 | data = GT_READ(GT_CPUERR_ADDRLO_OFS); | ||
158 | datahi = GT_READ(GT_CPUERR_ADDRHI_OFS); | 165 | datahi = GT_READ(GT_CPUERR_ADDRHI_OFS); |
159 | printk("GT_CPUERR_ADDR = %02x%08x\n", datahi, data); | 166 | printk("GT_INTRCAUSE = %08x\n", intrcause); |
167 | printk("GT_CPUERR_ADDR = %02x%08x\n", datahi, datalo); | ||
160 | break; | 168 | break; |
161 | case MIPS_REVISION_CORID_BONITO64: | 169 | case MIPS_REVISION_CORID_BONITO64: |
162 | case MIPS_REVISION_CORID_CORE_20K: | 170 | case MIPS_REVISION_CORID_CORE_20K: |
163 | case MIPS_REVISION_CORID_CORE_EMUL_BON: | 171 | case MIPS_REVISION_CORID_CORE_EMUL_BON: |
164 | data = BONITO_INTISR; | 172 | pcibadaddr = BONITO_PCIBADADDR; |
165 | printk("BONITO_INTISR = %08x\n", data); | 173 | pcimstat = BONITO_PCIMSTAT; |
166 | data = BONITO_INTEN; | 174 | intisr = BONITO_INTISR; |
167 | printk("BONITO_INTEN = %08x\n", data); | 175 | inten = BONITO_INTEN; |
168 | data = BONITO_INTPOL; | 176 | intpol = BONITO_INTPOL; |
169 | printk("BONITO_INTPOL = %08x\n", data); | 177 | intedge = BONITO_INTEDGE; |
170 | data = BONITO_INTEDGE; | 178 | intsteer = BONITO_INTSTEER; |
171 | printk("BONITO_INTEDGE = %08x\n", data); | 179 | pcicmd = BONITO_PCICMD; |
172 | data = BONITO_INTSTEER; | 180 | printk("BONITO_INTISR = %08x\n", intisr); |
173 | printk("BONITO_INTSTEER = %08x\n", data); | 181 | printk("BONITO_INTEN = %08x\n", inten); |
174 | data = BONITO_PCICMD; | 182 | printk("BONITO_INTPOL = %08x\n", intpol); |
175 | printk("BONITO_PCICMD = %08x\n", data); | 183 | printk("BONITO_INTEDGE = %08x\n", intedge); |
184 | printk("BONITO_INTSTEER = %08x\n", intsteer); | ||
185 | printk("BONITO_PCICMD = %08x\n", pcicmd); | ||
186 | printk("BONITO_PCIBADADDR = %08x\n", pcibadaddr); | ||
187 | printk("BONITO_PCIMSTAT = %08x\n", pcimstat); | ||
176 | break; | 188 | break; |
177 | } | 189 | } |
178 | 190 | ||
@@ -180,8 +192,70 @@ void corehi_irqdispatch(struct pt_regs *regs) | |||
180 | die("CoreHi interrupt", regs); | 192 | die("CoreHi interrupt", regs); |
181 | } | 193 | } |
182 | 194 | ||
195 | static struct irqaction i8259irq = { | ||
196 | .handler = no_action, | ||
197 | .name = "XT-PIC cascade" | ||
198 | }; | ||
199 | |||
200 | static struct irqaction corehi_irqaction = { | ||
201 | .handler = no_action, | ||
202 | .name = "CoreHi" | ||
203 | }; | ||
204 | |||
205 | msc_irqmap_t __initdata msc_irqmap[] = { | ||
206 | {MSC01C_INT_TMR, MSC01_IRQ_EDGE, 0}, | ||
207 | {MSC01C_INT_PCI, MSC01_IRQ_LEVEL, 0}, | ||
208 | }; | ||
209 | int __initdata msc_nr_irqs = sizeof(msc_irqmap)/sizeof(msc_irqmap_t); | ||
210 | |||
211 | msc_irqmap_t __initdata msc_eicirqmap[] = { | ||
212 | {MSC01E_INT_SW0, MSC01_IRQ_LEVEL, 0}, | ||
213 | {MSC01E_INT_SW1, MSC01_IRQ_LEVEL, 0}, | ||
214 | {MSC01E_INT_I8259A, MSC01_IRQ_LEVEL, 0}, | ||
215 | {MSC01E_INT_SMI, MSC01_IRQ_LEVEL, 0}, | ||
216 | {MSC01E_INT_COREHI, MSC01_IRQ_LEVEL, 0}, | ||
217 | {MSC01E_INT_CORELO, MSC01_IRQ_LEVEL, 0}, | ||
218 | {MSC01E_INT_TMR, MSC01_IRQ_EDGE, 0}, | ||
219 | {MSC01E_INT_PCI, MSC01_IRQ_LEVEL, 0}, | ||
220 | {MSC01E_INT_PERFCTR, MSC01_IRQ_LEVEL, 0}, | ||
221 | {MSC01E_INT_CPUCTR, MSC01_IRQ_LEVEL, 0} | ||
222 | }; | ||
223 | int __initdata msc_nr_eicirqs = sizeof(msc_eicirqmap)/sizeof(msc_irqmap_t); | ||
224 | |||
183 | void __init arch_init_irq(void) | 225 | void __init arch_init_irq(void) |
184 | { | 226 | { |
185 | set_except_vector(0, mipsIRQ); | 227 | set_except_vector(0, mipsIRQ); |
186 | init_i8259_irqs(); | 228 | init_i8259_irqs(); |
229 | |||
230 | if (!cpu_has_veic) | ||
231 | mips_cpu_irq_init (MIPSCPU_INT_BASE); | ||
232 | |||
233 | switch(mips_revision_corid) { | ||
234 | case MIPS_REVISION_CORID_CORE_MSC: | ||
235 | case MIPS_REVISION_CORID_CORE_FPGA2: | ||
236 | case MIPS_REVISION_CORID_CORE_EMUL_MSC: | ||
237 | if (cpu_has_veic) | ||
238 | init_msc_irqs (MSC01E_INT_BASE, msc_eicirqmap, msc_nr_eicirqs); | ||
239 | else | ||
240 | init_msc_irqs (MSC01C_INT_BASE, msc_irqmap, msc_nr_irqs); | ||
241 | } | ||
242 | |||
243 | if (cpu_has_veic) { | ||
244 | set_vi_handler (MSC01E_INT_I8259A, malta_hw0_irqdispatch); | ||
245 | set_vi_handler (MSC01E_INT_COREHI, corehi_irqdispatch); | ||
246 | setup_irq (MSC01E_INT_BASE+MSC01E_INT_I8259A, &i8259irq); | ||
247 | setup_irq (MSC01E_INT_BASE+MSC01E_INT_COREHI, &corehi_irqaction); | ||
248 | } | ||
249 | else if (cpu_has_vint) { | ||
250 | set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); | ||
251 | set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch); | ||
252 | |||
253 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); | ||
254 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); | ||
255 | } | ||
256 | else { | ||
257 | set_except_vector(0, mipsIRQ); | ||
258 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq); | ||
259 | setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction); | ||
260 | } | ||
187 | } | 261 | } |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index d3e352fb3243..6a1267ad071f 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -1228,8 +1228,7 @@ void __init ld_mmu_r4xx0(void) | |||
1228 | struct cpuinfo_mips *c = ¤t_cpu_data; | 1228 | struct cpuinfo_mips *c = ¤t_cpu_data; |
1229 | 1229 | ||
1230 | /* Default cache error handler for R4000 and R5000 family */ | 1230 | /* Default cache error handler for R4000 and R5000 family */ |
1231 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_generic, 0x80); | 1231 | set_uncached_handler (0x100, &except_vec2_generic, 0x80); |
1232 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_generic, 0x80); | ||
1233 | 1232 | ||
1234 | probe_pcache(); | 1233 | probe_pcache(); |
1235 | setup_scache(); | 1234 | setup_scache(); |
diff --git a/arch/mips/mm/c-sb1.c b/arch/mips/mm/c-sb1.c index e65b1bb576fd..d183dbced687 100644 --- a/arch/mips/mm/c-sb1.c +++ b/arch/mips/mm/c-sb1.c | |||
@@ -502,9 +502,8 @@ void ld_mmu_sb1(void) | |||
502 | extern char handle_vec2_sb1; | 502 | extern char handle_vec2_sb1; |
503 | 503 | ||
504 | /* Special cache error handler for SB1 */ | 504 | /* Special cache error handler for SB1 */ |
505 | memcpy((void *)(CAC_BASE + 0x100), &except_vec2_sb1, 0x80); | 505 | set_uncached_handler (0x100, &except_vec2_sb1, 0x80); |
506 | memcpy((void *)(UNCAC_BASE + 0x100), &except_vec2_sb1, 0x80); | 506 | memcpy((void *)KSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); |
507 | memcpy((void *)CKSEG1ADDR(&handle_vec2_sb1), &handle_vec2_sb1, 0x80); | ||
508 | 507 | ||
509 | probe_cache_sizes(); | 508 | probe_cache_sizes(); |
510 | 509 | ||