aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile3
-rw-r--r--arch/mips/kernel/apm.c2
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c1
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c1
-rw-r--r--arch/mips/kernel/cpu-probe.c19
-rw-r--r--arch/mips/kernel/dma-no-isa.c28
-rw-r--r--arch/mips/kernel/genex.S63
-rw-r--r--arch/mips/kernel/head.S5
-rw-r--r--arch/mips/kernel/i8259.c179
-rw-r--r--arch/mips/kernel/irixelf.c10
-rw-r--r--arch/mips/kernel/irq-msc01.c47
-rw-r--r--arch/mips/kernel/irq-mv6434x.c64
-rw-r--r--arch/mips/kernel/irq-rm7000.c61
-rw-r--r--arch/mips/kernel/irq-rm9000.c55
-rw-r--r--arch/mips/kernel/irq.c34
-rw-r--r--arch/mips/kernel/irq_cpu.c90
-rw-r--r--arch/mips/kernel/kspd.c6
-rw-r--r--arch/mips/kernel/linux32.c586
-rw-r--r--arch/mips/kernel/machine_kexec.c85
-rw-r--r--arch/mips/kernel/mips_ksyms.c2
-rw-r--r--arch/mips/kernel/module.c15
-rw-r--r--arch/mips/kernel/relocate_kernel.S80
-rw-r--r--arch/mips/kernel/reset.c2
-rw-r--r--arch/mips/kernel/rtlx.c6
-rw-r--r--arch/mips/kernel/scall32-o32.S2
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S16
-rw-r--r--arch/mips/kernel/scall64-o32.S2
-rw-r--r--arch/mips/kernel/setup.c87
-rw-r--r--arch/mips/kernel/signal_n32.c1
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smp.c8
-rw-r--r--arch/mips/kernel/smtc.c1
-rw-r--r--arch/mips/kernel/sysirix.c10
-rw-r--r--arch/mips/kernel/time.c66
-rw-r--r--arch/mips/kernel/traps.c72
-rw-r--r--arch/mips/kernel/vmlinux.lds.S2
-rw-r--r--arch/mips/kernel/vpe.c2
38 files changed, 589 insertions, 1128 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6bfbbed0897e..bbbb8d7cb89b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -45,7 +45,6 @@ obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
45obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 45obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
46obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o 46obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
47 47
48obj-$(CONFIG_NO_ISA) += dma-no-isa.o
49obj-$(CONFIG_I8259) += i8259.o 48obj-$(CONFIG_I8259) += i8259.o
50obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 49obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
51obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 50obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
@@ -67,6 +66,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
67 66
68obj-$(CONFIG_I8253) += i8253.o 67obj-$(CONFIG_I8253) += i8253.o
69 68
69obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
70
70CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 71CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
71 72
72EXTRA_AFLAGS := $(CFLAGS) 73EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/apm.c b/arch/mips/kernel/apm.c
index 528e731049c1..ba16d07588cb 100644
--- a/arch/mips/kernel/apm.c
+++ b/arch/mips/kernel/apm.c
@@ -356,7 +356,7 @@ static int apm_open(struct inode * inode, struct file * filp)
356{ 356{
357 struct apm_user *as; 357 struct apm_user *as;
358 358
359 as = (struct apm_user *)kzalloc(sizeof(*as), GFP_KERNEL); 359 as = kzalloc(sizeof(*as), GFP_KERNEL);
360 if (as) { 360 if (as) {
361 /* 361 /*
362 * XXX - this is a tiny bit broken, when we consider BSD 362 * XXX - this is a tiny bit broken, when we consider BSD
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ecefaf2..9b34238d41c0 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -90,7 +90,6 @@ struct elf_prpsinfo32
90 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 90 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
91}; 91};
92 92
93#define elf_addr_t u32
94#define elf_caddr_t u32 93#define elf_caddr_t u32
95#define init_elf_binfmt init_elfn32_binfmt 94#define init_elf_binfmt init_elfn32_binfmt
96 95
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e31813779895..993f7ec70f35 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -92,7 +92,6 @@ struct elf_prpsinfo32
92 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 92 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
93}; 93};
94 94
95#define elf_addr_t u32
96#define elf_caddr_t u32 95#define elf_caddr_t u32
97#define init_elf_binfmt init_elf32_binfmt 96#define init_elf_binfmt init_elf32_binfmt
98 97
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 8485af340ee1..442839e9578c 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -110,9 +110,8 @@ static inline void check_wait(void)
110{ 110{
111 struct cpuinfo_mips *c = &current_cpu_data; 111 struct cpuinfo_mips *c = &current_cpu_data;
112 112
113 printk("Checking for 'wait' instruction... ");
114 if (nowait) { 113 if (nowait) {
115 printk (" disabled.\n"); 114 printk("Wait instruction disabled.\n");
116 return; 115 return;
117 } 116 }
118 117
@@ -120,11 +119,9 @@ static inline void check_wait(void)
120 case CPU_R3081: 119 case CPU_R3081:
121 case CPU_R3081E: 120 case CPU_R3081E:
122 cpu_wait = r3081_wait; 121 cpu_wait = r3081_wait;
123 printk(" available.\n");
124 break; 122 break;
125 case CPU_TX3927: 123 case CPU_TX3927:
126 cpu_wait = r39xx_wait; 124 cpu_wait = r39xx_wait;
127 printk(" available.\n");
128 break; 125 break;
129 case CPU_R4200: 126 case CPU_R4200:
130/* case CPU_R4300: */ 127/* case CPU_R4300: */
@@ -146,33 +143,23 @@ static inline void check_wait(void)
146 case CPU_74K: 143 case CPU_74K:
147 case CPU_PR4450: 144 case CPU_PR4450:
148 cpu_wait = r4k_wait; 145 cpu_wait = r4k_wait;
149 printk(" available.\n");
150 break; 146 break;
151 case CPU_TX49XX: 147 case CPU_TX49XX:
152 cpu_wait = r4k_wait_irqoff; 148 cpu_wait = r4k_wait_irqoff;
153 printk(" available.\n");
154 break; 149 break;
155 case CPU_AU1000: 150 case CPU_AU1000:
156 case CPU_AU1100: 151 case CPU_AU1100:
157 case CPU_AU1500: 152 case CPU_AU1500:
158 case CPU_AU1550: 153 case CPU_AU1550:
159 case CPU_AU1200: 154 case CPU_AU1200:
160 if (allow_au1k_wait) { 155 if (allow_au1k_wait)
161 cpu_wait = au1k_wait; 156 cpu_wait = au1k_wait;
162 printk(" available.\n");
163 } else
164 printk(" unavailable.\n");
165 break; 157 break;
166 case CPU_RM9000: 158 case CPU_RM9000:
167 if ((c->processor_id & 0x00ff) >= 0x40) { 159 if ((c->processor_id & 0x00ff) >= 0x40)
168 cpu_wait = r4k_wait; 160 cpu_wait = r4k_wait;
169 printk(" available.\n");
170 } else {
171 printk(" unavailable.\n");
172 }
173 break; 161 break;
174 default: 162 default:
175 printk(" unavailable.\n");
176 break; 163 break;
177 } 164 }
178} 165}
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
deleted file mode 100644
index 6df8b07741e3..000000000000
--- a/arch/mips/kernel/dma-no-isa.c
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004 by Ralf Baechle
7 *
8 * Dummy ISA DMA functions for systems that don't have ISA but share drivers
9 * with ISA such as legacy free PCI.
10 */
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15DEFINE_SPINLOCK(dma_spin_lock);
16
17int request_dma(unsigned int dmanr, const char * device_id)
18{
19 return -EINVAL;
20}
21
22void free_dma(unsigned int dmanr)
23{
24}
25
26EXPORT_SYMBOL(dma_spin_lock);
27EXPORT_SYMBOL(request_dma);
28EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5baca16993d0..aacd4a005c5f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -19,6 +19,7 @@
19#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
20#include <asm/stackframe.h> 20#include <asm/stackframe.h>
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/page.h>
22 23
23#define PANIC_PIC(msg) \ 24#define PANIC_PIC(msg) \
24 .set push; \ 25 .set push; \
@@ -378,6 +379,68 @@ NESTED(nmi_handler, PT_SIZE, sp)
378 BUILD_HANDLER dsp dsp sti silent /* #26 */ 379 BUILD_HANDLER dsp dsp sti silent /* #26 */
379 BUILD_HANDLER reserved reserved sti verbose /* others */ 380 BUILD_HANDLER reserved reserved sti verbose /* others */
380 381
382 .align 5
383 LEAF(handle_ri_rdhwr_vivt)
384#ifdef CONFIG_MIPS_MT_SMTC
385 PANIC_PIC("handle_ri_rdhwr_vivt called")
386#else
387 .set push
388 .set noat
389 .set noreorder
390 /* check if TLB contains a entry for EPC */
391 MFC0 k1, CP0_ENTRYHI
392 andi k1, 0xff /* ASID_MASK */
393 MFC0 k0, CP0_EPC
394 PTR_SRL k0, PAGE_SHIFT + 1
395 PTR_SLL k0, PAGE_SHIFT + 1
396 or k1, k0
397 MTC0 k1, CP0_ENTRYHI
398 mtc0_tlbw_hazard
399 tlbp
400 tlb_probe_hazard
401 mfc0 k1, CP0_INDEX
402 .set pop
403 bltz k1, handle_ri /* slow path */
404 /* fall thru */
405#endif
406 END(handle_ri_rdhwr_vivt)
407
408 LEAF(handle_ri_rdhwr)
409 .set push
410 .set noat
411 .set noreorder
412 /* 0x7c03e83b: rdhwr v1,$29 */
413 MFC0 k1, CP0_EPC
414 lui k0, 0x7c03
415 lw k1, (k1)
416 ori k0, 0xe83b
417 .set reorder
418 bne k0, k1, handle_ri /* if not ours */
419 /* The insn is rdhwr. No need to check CAUSE.BD here. */
420 get_saved_sp /* k1 := current_thread_info */
421 .set noreorder
422 MFC0 k0, CP0_EPC
423#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
424 ori k1, _THREAD_MASK
425 xori k1, _THREAD_MASK
426 LONG_L v1, TI_TP_VALUE(k1)
427 LONG_ADDIU k0, 4
428 jr k0
429 rfe
430#else
431 LONG_ADDIU k0, 4 /* stall on $k0 */
432 MTC0 k0, CP0_EPC
433 /* I hope three instructions between MTC0 and ERET are enough... */
434 ori k1, _THREAD_MASK
435 xori k1, _THREAD_MASK
436 LONG_L v1, TI_TP_VALUE(k1)
437 .set mips3
438 eret
439 .set mips0
440#endif
441 .set pop
442 END(handle_ri_rdhwr)
443
381#ifdef CONFIG_64BIT 444#ifdef CONFIG_64BIT
382/* A temporary overflow handler used by check_daddi(). */ 445/* A temporary overflow handler used by check_daddi(). */
383 446
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index ddc1b71c9378..9a7811d13db2 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -138,7 +138,7 @@
138EXPORT(stext) # used for profiling 138EXPORT(stext) # used for profiling
139EXPORT(_stext) 139EXPORT(_stext)
140 140
141#if defined(CONFIG_QEMU) || defined(CONFIG_MIPS_SIM) 141#ifdef CONFIG_MIPS_SIM
142 /* 142 /*
143 * Give us a fighting chance of running if execution beings at the 143 * Give us a fighting chance of running if execution beings at the
144 * kernel load address. This is needed because this platform does 144 * kernel load address. This is needed because this platform does
@@ -250,6 +250,9 @@ NESTED(smp_bootstrap, 16, sp)
250 */ 250 */
251 page swapper_pg_dir, _PGD_ORDER 251 page swapper_pg_dir, _PGD_ORDER
252#ifdef CONFIG_64BIT 252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
253 page invalid_pmd_table, _PMD_ORDER 256 page invalid_pmd_table, _PMD_ORDER
254#endif 257#endif
255 page invalid_pte_table, _PTE_ORDER 258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 48e3418c217b..b59a676c6d0e 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -19,9 +19,6 @@
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22void enable_8259A_irq(unsigned int irq);
23void disable_8259A_irq(unsigned int irq);
24
25/* 22/*
26 * This is the 'legacy' 8259A Programmable Interrupt Controller, 23 * This is the 'legacy' 8259A Programmable Interrupt Controller,
27 * present in the majority of PC/AT boxes. 24 * present in the majority of PC/AT boxes.
@@ -31,34 +28,16 @@ void disable_8259A_irq(unsigned int irq);
31 * moves to arch independent land 28 * moves to arch independent land
32 */ 29 */
33 30
31static int i8259A_auto_eoi;
34DEFINE_SPINLOCK(i8259A_lock); 32DEFINE_SPINLOCK(i8259A_lock);
35 33/* some platforms call this... */
36static void end_8259A_irq (unsigned int irq)
37{
38 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
39 irq_desc[irq].action)
40 enable_8259A_irq(irq);
41}
42
43#define shutdown_8259A_irq disable_8259A_irq
44
45void mask_and_ack_8259A(unsigned int); 34void mask_and_ack_8259A(unsigned int);
46 35
47static unsigned int startup_8259A_irq(unsigned int irq) 36static struct irq_chip i8259A_chip = {
48{ 37 .name = "XT-PIC",
49 enable_8259A_irq(irq); 38 .mask = disable_8259A_irq,
50 39 .unmask = enable_8259A_irq,
51 return 0; /* never anything pending */ 40 .mask_ack = mask_and_ack_8259A,
52}
53
54static struct irq_chip i8259A_irq_type = {
55 .typename = "XT-PIC",
56 .startup = startup_8259A_irq,
57 .shutdown = shutdown_8259A_irq,
58 .enable = enable_8259A_irq,
59 .disable = disable_8259A_irq,
60 .ack = mask_and_ack_8259A,
61 .end = end_8259A_irq,
62}; 41};
63 42
64/* 43/*
@@ -70,8 +49,8 @@ static struct irq_chip i8259A_irq_type = {
70 */ 49 */
71static unsigned int cached_irq_mask = 0xffff; 50static unsigned int cached_irq_mask = 0xffff;
72 51
73#define cached_21 (cached_irq_mask) 52#define cached_master_mask (cached_irq_mask)
74#define cached_A1 (cached_irq_mask >> 8) 53#define cached_slave_mask (cached_irq_mask >> 8)
75 54
76void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
77{ 56{
@@ -81,9 +60,9 @@ void disable_8259A_irq(unsigned int irq)
81 spin_lock_irqsave(&i8259A_lock, flags); 60 spin_lock_irqsave(&i8259A_lock, flags);
82 cached_irq_mask |= mask; 61 cached_irq_mask |= mask;
83 if (irq & 8) 62 if (irq & 8)
84 outb(cached_A1,0xA1); 63 outb(cached_slave_mask, PIC_SLAVE_IMR);
85 else 64 else
86 outb(cached_21,0x21); 65 outb(cached_master_mask, PIC_MASTER_IMR);
87 spin_unlock_irqrestore(&i8259A_lock, flags); 66 spin_unlock_irqrestore(&i8259A_lock, flags);
88} 67}
89 68
@@ -95,9 +74,9 @@ void enable_8259A_irq(unsigned int irq)
95 spin_lock_irqsave(&i8259A_lock, flags); 74 spin_lock_irqsave(&i8259A_lock, flags);
96 cached_irq_mask &= mask; 75 cached_irq_mask &= mask;
97 if (irq & 8) 76 if (irq & 8)
98 outb(cached_A1,0xA1); 77 outb(cached_slave_mask, PIC_SLAVE_IMR);
99 else 78 else
100 outb(cached_21,0x21); 79 outb(cached_master_mask, PIC_MASTER_IMR);
101 spin_unlock_irqrestore(&i8259A_lock, flags); 80 spin_unlock_irqrestore(&i8259A_lock, flags);
102} 81}
103 82
@@ -109,9 +88,9 @@ int i8259A_irq_pending(unsigned int irq)
109 88
110 spin_lock_irqsave(&i8259A_lock, flags); 89 spin_lock_irqsave(&i8259A_lock, flags);
111 if (irq < 8) 90 if (irq < 8)
112 ret = inb(0x20) & mask; 91 ret = inb(PIC_MASTER_CMD) & mask;
113 else 92 else
114 ret = inb(0xA0) & (mask >> 8); 93 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
115 spin_unlock_irqrestore(&i8259A_lock, flags); 94 spin_unlock_irqrestore(&i8259A_lock, flags);
116 95
117 return ret; 96 return ret;
@@ -120,7 +99,7 @@ int i8259A_irq_pending(unsigned int irq)
120void make_8259A_irq(unsigned int irq) 99void make_8259A_irq(unsigned int irq)
121{ 100{
122 disable_irq_nosync(irq); 101 disable_irq_nosync(irq);
123 irq_desc[irq].chip = &i8259A_irq_type; 102 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
124 enable_irq(irq); 103 enable_irq(irq);
125} 104}
126 105
@@ -136,14 +115,14 @@ static inline int i8259A_irq_real(unsigned int irq)
136 int irqmask = 1 << irq; 115 int irqmask = 1 << irq;
137 116
138 if (irq < 8) { 117 if (irq < 8) {
139 outb(0x0B,0x20); /* ISR register */ 118 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
140 value = inb(0x20) & irqmask; 119 value = inb(PIC_MASTER_CMD) & irqmask;
141 outb(0x0A,0x20); /* back to the IRR register */ 120 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
142 return value; 121 return value;
143 } 122 }
144 outb(0x0B,0xA0); /* ISR register */ 123 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
145 value = inb(0xA0) & (irqmask >> 8); 124 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
146 outb(0x0A,0xA0); /* back to the IRR register */ 125 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
147 return value; 126 return value;
148} 127}
149 128
@@ -160,17 +139,19 @@ void mask_and_ack_8259A(unsigned int irq)
160 139
161 spin_lock_irqsave(&i8259A_lock, flags); 140 spin_lock_irqsave(&i8259A_lock, flags);
162 /* 141 /*
163 * Lightweight spurious IRQ detection. We do not want to overdo 142 * Lightweight spurious IRQ detection. We do not want
164 * spurious IRQ handling - it's usually a sign of hardware problems, so 143 * to overdo spurious IRQ handling - it's usually a sign
165 * we only do the checks we can do without slowing down good hardware 144 * of hardware problems, so we only do the checks we can
166 * nnecesserily. 145 * do without slowing down good hardware unnecessarily.
167 * 146 *
168 * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting 147 * Note that IRQ7 and IRQ15 (the two spurious IRQs
169 * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A. 148 * usually resulting from the 8259A-1|2 PICs) occur
170 * Thus we can check spurious 8259A IRQs without doing the quite slow 149 * even if the IRQ is masked in the 8259A. Thus we
171 * i8259A_irq_real() call for every IRQ. This does not cover 100% of 150 * can check spurious 8259A IRQs without doing the
172 * spurious interrupts, but should be enough to warn the user that 151 * quite slow i8259A_irq_real() call for every IRQ.
173 * there is something bad going on ... 152 * This does not cover 100% of spurious interrupts,
153 * but should be enough to warn the user that there
154 * is something bad going on ...
174 */ 155 */
175 if (cached_irq_mask & irqmask) 156 if (cached_irq_mask & irqmask)
176 goto spurious_8259A_irq; 157 goto spurious_8259A_irq;
@@ -178,14 +159,14 @@ void mask_and_ack_8259A(unsigned int irq)
178 159
179handle_real_irq: 160handle_real_irq:
180 if (irq & 8) { 161 if (irq & 8) {
181 inb(0xA1); /* DUMMY - (do we need this?) */ 162 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
182 outb(cached_A1,0xA1); 163 outb(cached_slave_mask, PIC_SLAVE_IMR);
183 outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */ 164 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
184 outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ 165 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
185 } else { 166 } else {
186 inb(0x21); /* DUMMY - (do we need this?) */ 167 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
187 outb(cached_21,0x21); 168 outb(cached_master_mask, PIC_MASTER_IMR);
188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */ 169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
189 } 170 }
190#ifdef CONFIG_MIPS_MT_SMTC 171#ifdef CONFIG_MIPS_MT_SMTC
191 if (irq_hwmask[irq] & ST0_IM) 172 if (irq_hwmask[irq] & ST0_IM)
@@ -206,7 +187,7 @@ spurious_8259A_irq:
206 goto handle_real_irq; 187 goto handle_real_irq;
207 188
208 { 189 {
209 static int spurious_irq_mask = 0; 190 static int spurious_irq_mask;
210 /* 191 /*
211 * At this point we can be sure the IRQ is spurious, 192 * At this point we can be sure the IRQ is spurious,
212 * lets ACK and report it. [once per IRQ] 193 * lets ACK and report it. [once per IRQ]
@@ -227,13 +208,25 @@ spurious_8259A_irq:
227 208
228static int i8259A_resume(struct sys_device *dev) 209static int i8259A_resume(struct sys_device *dev)
229{ 210{
230 init_8259A(0); 211 init_8259A(i8259A_auto_eoi);
212 return 0;
213}
214
215static int i8259A_shutdown(struct sys_device *dev)
216{
217 /* Put the i8259A into a quiescent state that
218 * the kernel initialization code can get it
219 * out of.
220 */
221 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
222 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
231 return 0; 223 return 0;
232} 224}
233 225
234static struct sysdev_class i8259_sysdev_class = { 226static struct sysdev_class i8259_sysdev_class = {
235 set_kset_name("i8259"), 227 set_kset_name("i8259"),
236 .resume = i8259A_resume, 228 .resume = i8259A_resume,
229 .shutdown = i8259A_shutdown,
237}; 230};
238 231
239static struct sys_device device_i8259A = { 232static struct sys_device device_i8259A = {
@@ -255,41 +248,41 @@ void __init init_8259A(int auto_eoi)
255{ 248{
256 unsigned long flags; 249 unsigned long flags;
257 250
251 i8259A_auto_eoi = auto_eoi;
252
258 spin_lock_irqsave(&i8259A_lock, flags); 253 spin_lock_irqsave(&i8259A_lock, flags);
259 254
260 outb(0xff, 0x21); /* mask all of 8259A-1 */ 255 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
261 outb(0xff, 0xA1); /* mask all of 8259A-2 */ 256 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
262 257
263 /* 258 /*
264 * outb_p - this has to work on a wide range of PC hardware. 259 * outb_p - this has to work on a wide range of PC hardware.
265 */ 260 */
266 outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ 261 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
267 outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */ 262 outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
268 outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ 263 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
269 if (auto_eoi) 264 if (auto_eoi) /* master does Auto EOI */
270 outb_p(0x03, 0x21); /* master does Auto EOI */ 265 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
271 else 266 else /* master expects normal EOI */
272 outb_p(0x01, 0x21); /* master expects normal EOI */ 267 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
273 268
274 outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ 269 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
275 outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */ 270 outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
276 outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ 271 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
277 outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode 272 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
278 is to be investigated) */
279
280 if (auto_eoi) 273 if (auto_eoi)
281 /* 274 /*
282 * in AEOI mode we just have to mask the interrupt 275 * In AEOI mode we just have to mask the interrupt
283 * when acking. 276 * when acking.
284 */ 277 */
285 i8259A_irq_type.ack = disable_8259A_irq; 278 i8259A_chip.mask_ack = disable_8259A_irq;
286 else 279 else
287 i8259A_irq_type.ack = mask_and_ack_8259A; 280 i8259A_chip.mask_ack = mask_and_ack_8259A;
288 281
289 udelay(100); /* wait for 8259A to initialize */ 282 udelay(100); /* wait for 8259A to initialize */
290 283
291 outb(cached_21, 0x21); /* restore master IRQ mask */ 284 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
292 outb(cached_A1, 0xA1); /* restore slave IRQ mask */ 285 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
293 286
294 spin_unlock_irqrestore(&i8259A_lock, flags); 287 spin_unlock_irqrestore(&i8259A_lock, flags);
295} 288}
@@ -302,11 +295,17 @@ static struct irqaction irq2 = {
302}; 295};
303 296
304static struct resource pic1_io_resource = { 297static struct resource pic1_io_resource = {
305 .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY 298 .name = "pic1",
299 .start = PIC_MASTER_CMD,
300 .end = PIC_MASTER_IMR,
301 .flags = IORESOURCE_BUSY
306}; 302};
307 303
308static struct resource pic2_io_resource = { 304static struct resource pic2_io_resource = {
309 .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY 305 .name = "pic2",
306 .start = PIC_SLAVE_CMD,
307 .end = PIC_SLAVE_IMR,
308 .flags = IORESOURCE_BUSY
310}; 309};
311 310
312/* 311/*
@@ -323,12 +322,8 @@ void __init init_i8259_irqs (void)
323 322
324 init_8259A(0); 323 init_8259A(0);
325 324
326 for (i = 0; i < 16; i++) { 325 for (i = 0; i < 16; i++)
327 irq_desc[i].status = IRQ_DISABLED; 326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
328 irq_desc[i].action = NULL;
329 irq_desc[i].depth = 1;
330 irq_desc[i].chip = &i8259A_irq_type;
331 }
332 327
333 setup_irq(2, &irq2); 328 setup_irq(PIC_CASCADE_IR, &irq2);
334} 329}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index ab12c8f01518..37cad5de515c 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -52,10 +52,6 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifndef elf_addr_t
56#define elf_addr_t unsigned long
57#endif
58
59#ifdef DEBUG 55#ifdef DEBUG
60/* Debugging routines. */ 56/* Debugging routines. */
61static char *get_elf_p_type(Elf32_Word p_type) 57static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@ static int notesize(struct memelfnote *en)
1013 int sz; 1009 int sz;
1014 1010
1015 sz = sizeof(struct elf_note); 1011 sz = sizeof(struct elf_note);
1016 sz += roundup(strlen(en->name), 4); 1012 sz += roundup(strlen(en->name) + 1, 4);
1017 sz += roundup(en->datasz, 4); 1013 sz += roundup(en->datasz, 4);
1018 1014
1019 return sz; 1015 return sz;
@@ -1032,7 +1028,7 @@ static int writenote(struct memelfnote *men, struct file *file)
1032{ 1028{
1033 struct elf_note en; 1029 struct elf_note en;
1034 1030
1035 en.n_namesz = strlen(men->name); 1031 en.n_namesz = strlen(men->name) + 1;
1036 en.n_descsz = men->datasz; 1032 en.n_descsz = men->datasz;
1037 en.n_type = men->type; 1033 en.n_type = men->type;
1038 1034
@@ -1149,7 +1145,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1149 psinfo.pr_pid = prstatus.pr_pid = current->pid; 1145 psinfo.pr_pid = prstatus.pr_pid = current->pid;
1150 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid; 1146 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
1151 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current); 1147 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current);
1152 psinfo.pr_sid = prstatus.pr_sid = current->signal->session; 1148 psinfo.pr_sid = prstatus.pr_sid = process_session(current);
1153 if (current->pid == current->tgid) { 1149 if (current->pid == current->tgid) {
1154 /* 1150 /*
1155 * This is the record for the group leader. Add in the 1151 * This is the record for the group leader. Add in the
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 650a80ca3741..bcaad6696082 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -45,31 +45,6 @@ static inline void unmask_msc_irq(unsigned int irq)
45} 45}
46 46
47/* 47/*
48 * Enables the IRQ on SOC-it
49 */
50static void enable_msc_irq(unsigned int irq)
51{
52 unmask_msc_irq(irq);
53}
54
55/*
56 * Initialize the IRQ on SOC-it
57 */
58static unsigned int startup_msc_irq(unsigned int irq)
59{
60 unmask_msc_irq(irq);
61 return 0;
62}
63
64/*
65 * Disables the IRQ on SOC-it
66 */
67static void disable_msc_irq(unsigned int irq)
68{
69 mask_msc_irq(irq);
70}
71
72/*
73 * Masks and ACKs an IRQ 48 * Masks and ACKs an IRQ
74 */ 49 */
75static void level_mask_and_ack_msc_irq(unsigned int irq) 50static void level_mask_and_ack_msc_irq(unsigned int irq)
@@ -136,25 +111,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
136 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); 111 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
137} 112}
138 113
139#define shutdown_msc_irq disable_msc_irq
140
141struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
142 .typename = "SOC-it-Level", 115 .typename = "SOC-it-Level",
143 .startup = startup_msc_irq,
144 .shutdown = shutdown_msc_irq,
145 .enable = enable_msc_irq,
146 .disable = disable_msc_irq,
147 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq,
119 .unmask = unmask_msc_irq,
120 .eoi = unmask_msc_irq,
148 .end = end_msc_irq, 121 .end = end_msc_irq,
149}; 122};
150 123
151struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
152 .typename = "SOC-it-Edge", 125 .typename = "SOC-it-Edge",
153 .startup =startup_msc_irq,
154 .shutdown = shutdown_msc_irq,
155 .enable = enable_msc_irq,
156 .disable = disable_msc_irq,
157 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq,
129 .unmask = unmask_msc_irq,
130 .eoi = unmask_msc_irq,
158 .end = end_msc_irq, 131 .end = end_msc_irq,
159}; 132};
160 133
@@ -175,14 +148,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
175 148
176 switch (imp->im_type) { 149 switch (imp->im_type) {
177 case MSC01_IRQ_EDGE: 150 case MSC01_IRQ_EDGE:
178 irq_desc[base+n].chip = &msc_edgeirq_type; 151 set_irq_chip(base+n, &msc_edgeirq_type);
179 if (cpu_has_veic) 152 if (cpu_has_veic)
180 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 153 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
181 else 154 else
182 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
183 break; 156 break;
184 case MSC01_IRQ_LEVEL: 157 case MSC01_IRQ_LEVEL:
185 irq_desc[base+n].chip = &msc_levelirq_type; 158 set_irq_chip(base+n, &msc_levelirq_type);
186 if (cpu_has_veic) 159 if (cpu_has_veic)
187 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 160 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
188 else 161 else
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 37d106202b83..efbd219845b5 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -67,48 +67,6 @@ static inline void unmask_mv64340_irq(unsigned int irq)
67} 67}
68 68
69/* 69/*
70 * Enables the IRQ on Marvell Chip
71 */
72static void enable_mv64340_irq(unsigned int irq)
73{
74 unmask_mv64340_irq(irq);
75}
76
77/*
78 * Initialize the IRQ on Marvell Chip
79 */
80static unsigned int startup_mv64340_irq(unsigned int irq)
81{
82 unmask_mv64340_irq(irq);
83 return 0;
84}
85
86/*
87 * Disables the IRQ on Marvell Chip
88 */
89static void disable_mv64340_irq(unsigned int irq)
90{
91 mask_mv64340_irq(irq);
92}
93
94/*
95 * Masks and ACKs an IRQ
96 */
97static void mask_and_ack_mv64340_irq(unsigned int irq)
98{
99 mask_mv64340_irq(irq);
100}
101
102/*
103 * End IRQ processing
104 */
105static void end_mv64340_irq(unsigned int irq)
106{
107 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
108 unmask_mv64340_irq(irq);
109}
110
111/*
112 * Interrupt handler for interrupts coming from the Marvell chip. 70 * Interrupt handler for interrupts coming from the Marvell chip.
113 * It could be built in ethernet ports etc... 71 * It could be built in ethernet ports etc...
114 */ 72 */
@@ -133,29 +91,21 @@ void ll_mv64340_irq(void)
133 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32); 91 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
134} 92}
135 93
136#define shutdown_mv64340_irq disable_mv64340_irq
137
138struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
139 .typename = "MV-64340", 95 .typename = "MV-64340",
140 .startup = startup_mv64340_irq, 96 .ack = mask_mv64340_irq,
141 .shutdown = shutdown_mv64340_irq, 97 .mask = mask_mv64340_irq,
142 .enable = enable_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
143 .disable = disable_mv64340_irq, 99 .unmask = unmask_mv64340_irq,
144 .ack = mask_and_ack_mv64340_irq,
145 .end = end_mv64340_irq,
146}; 100};
147 101
148void __init mv64340_irq_init(unsigned int base) 102void __init mv64340_irq_init(unsigned int base)
149{ 103{
150 int i; 104 int i;
151 105
152 /* Reset irq handlers pointers to NULL */ 106 for (i = base; i < base + 64; i++)
153 for (i = base; i < base + 64; i++) { 107 set_irq_chip_and_handler(i, &mv64340_irq_type,
154 irq_desc[i].status = IRQ_DISABLED; 108 handle_level_irq);
155 irq_desc[i].action = 0;
156 irq_desc[i].depth = 2;
157 irq_desc[i].chip = &mv64340_irq_type;
158 }
159 109
160 irq_base = base; 110 irq_base = base;
161} 111}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 6b54c7109e2e..123324ba8c14 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -29,56 +29,12 @@ static inline void mask_rm7k_irq(unsigned int irq)
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 29 clear_c0_intcontrol(0x100 << (irq - irq_base));
30} 30}
31 31
32static inline void rm7k_cpu_irq_enable(unsigned int irq)
33{
34 unsigned long flags;
35
36 local_irq_save(flags);
37 unmask_rm7k_irq(irq);
38 local_irq_restore(flags);
39}
40
41static void rm7k_cpu_irq_disable(unsigned int irq)
42{
43 unsigned long flags;
44
45 local_irq_save(flags);
46 mask_rm7k_irq(irq);
47 local_irq_restore(flags);
48}
49
50static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
51{
52 rm7k_cpu_irq_enable(irq);
53
54 return 0;
55}
56
57#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
58
59/*
60 * While we ack the interrupt interrupts are disabled and thus we don't need
61 * to deal with concurrency issues. Same for rm7k_cpu_irq_end.
62 */
63static void rm7k_cpu_irq_ack(unsigned int irq)
64{
65 mask_rm7k_irq(irq);
66}
67
68static void rm7k_cpu_irq_end(unsigned int irq)
69{
70 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
71 unmask_rm7k_irq(irq);
72}
73
74static struct irq_chip rm7k_irq_controller = { 32static struct irq_chip rm7k_irq_controller = {
75 .typename = "RM7000", 33 .typename = "RM7000",
76 .startup = rm7k_cpu_irq_startup, 34 .ack = mask_rm7k_irq,
77 .shutdown = rm7k_cpu_irq_shutdown, 35 .mask = mask_rm7k_irq,
78 .enable = rm7k_cpu_irq_enable, 36 .mask_ack = mask_rm7k_irq,
79 .disable = rm7k_cpu_irq_disable, 37 .unmask = unmask_rm7k_irq,
80 .ack = rm7k_cpu_irq_ack,
81 .end = rm7k_cpu_irq_end,
82}; 38};
83 39
84void __init rm7k_cpu_irq_init(int base) 40void __init rm7k_cpu_irq_init(int base)
@@ -87,12 +43,9 @@ void __init rm7k_cpu_irq_init(int base)
87 43
88 clear_c0_intcontrol(0x00000f00); /* Mask all */ 44 clear_c0_intcontrol(0x00000f00); /* Mask all */
89 45
90 for (i = base; i < base + 4; i++) { 46 for (i = base; i < base + 4; i++)
91 irq_desc[i].status = IRQ_DISABLED; 47 set_irq_chip_and_handler(i, &rm7k_irq_controller,
92 irq_desc[i].action = NULL; 48 handle_level_irq);
93 irq_desc[i].depth = 1;
94 irq_desc[i].chip = &rm7k_irq_controller;
95 }
96 49
97 irq_base = base; 50 irq_base = base;
98} 51}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 62f011ba97a2..0e6f4c5349d2 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -48,15 +48,6 @@ static void rm9k_cpu_irq_disable(unsigned int irq)
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49} 49}
50 50
51static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
52{
53 rm9k_cpu_irq_enable(irq);
54
55 return 0;
56}
57
58#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
59
60/* 51/*
61 * Performance counter interrupts are global on all processors. 52 * Performance counter interrupts are global on all processors.
62 */ 53 */
@@ -89,40 +80,22 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
89 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); 80 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
90} 81}
91 82
92
93/*
94 * While we ack the interrupt interrupts are disabled and thus we don't need
95 * to deal with concurrency issues. Same for rm9k_cpu_irq_end.
96 */
97static void rm9k_cpu_irq_ack(unsigned int irq)
98{
99 mask_rm9k_irq(irq);
100}
101
102static void rm9k_cpu_irq_end(unsigned int irq)
103{
104 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
105 unmask_rm9k_irq(irq);
106}
107
108static struct irq_chip rm9k_irq_controller = { 83static struct irq_chip rm9k_irq_controller = {
109 .typename = "RM9000", 84 .typename = "RM9000",
110 .startup = rm9k_cpu_irq_startup, 85 .ack = mask_rm9k_irq,
111 .shutdown = rm9k_cpu_irq_shutdown, 86 .mask = mask_rm9k_irq,
112 .enable = rm9k_cpu_irq_enable, 87 .mask_ack = mask_rm9k_irq,
113 .disable = rm9k_cpu_irq_disable, 88 .unmask = unmask_rm9k_irq,
114 .ack = rm9k_cpu_irq_ack,
115 .end = rm9k_cpu_irq_end,
116}; 89};
117 90
118static struct irq_chip rm9k_perfcounter_irq = { 91static struct irq_chip rm9k_perfcounter_irq = {
119 .typename = "RM9000", 92 .typename = "RM9000",
120 .startup = rm9k_perfcounter_irq_startup, 93 .startup = rm9k_perfcounter_irq_startup,
121 .shutdown = rm9k_perfcounter_irq_shutdown, 94 .shutdown = rm9k_perfcounter_irq_shutdown,
122 .enable = rm9k_cpu_irq_enable, 95 .ack = mask_rm9k_irq,
123 .disable = rm9k_cpu_irq_disable, 96 .mask = mask_rm9k_irq,
124 .ack = rm9k_cpu_irq_ack, 97 .mask_ack = mask_rm9k_irq,
125 .end = rm9k_cpu_irq_end, 98 .unmask = unmask_rm9k_irq,
126}; 99};
127 100
128unsigned int rm9000_perfcount_irq; 101unsigned int rm9000_perfcount_irq;
@@ -135,15 +108,13 @@ void __init rm9k_cpu_irq_init(int base)
135 108
136 clear_c0_intcontrol(0x0000f000); /* Mask all */ 109 clear_c0_intcontrol(0x0000f000); /* Mask all */
137 110
138 for (i = base; i < base + 4; i++) { 111 for (i = base; i < base + 4; i++)
139 irq_desc[i].status = IRQ_DISABLED; 112 set_irq_chip_and_handler(i, &rm9k_irq_controller,
140 irq_desc[i].action = NULL; 113 handle_level_irq);
141 irq_desc[i].depth = 1;
142 irq_desc[i].chip = &rm9k_irq_controller;
143 }
144 114
145 rm9000_perfcount_irq = base + 1; 115 rm9000_perfcount_irq = base + 1;
146 irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq; 116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq);
147 118
148 irq_base = base; 119 irq_base = base;
149} 120}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 9b0e49d63d7b..2fe4c868a801 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -88,25 +88,6 @@ atomic_t irq_err_count;
88unsigned long irq_hwmask[NR_IRQS]; 88unsigned long irq_hwmask[NR_IRQS];
89#endif /* CONFIG_MIPS_MT_SMTC */ 89#endif /* CONFIG_MIPS_MT_SMTC */
90 90
91#undef do_IRQ
92
93/*
94 * do_IRQ handles all normal device IRQ's (the special
95 * SMP cross-CPU interrupts have their own specific
96 * handlers).
97 */
98asmlinkage unsigned int do_IRQ(unsigned int irq)
99{
100 irq_enter();
101
102 __DO_IRQ_SMTC_HOOK();
103 __do_IRQ(irq);
104
105 irq_exit();
106
107 return 1;
108}
109
110/* 91/*
111 * Generic, controller-independent functions: 92 * Generic, controller-independent functions:
112 */ 93 */
@@ -136,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
136 for_each_online_cpu(j) 117 for_each_online_cpu(j)
137 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 118 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
138#endif 119#endif
139 seq_printf(p, " %14s", irq_desc[i].chip->typename); 120 seq_printf(p, " %14s", irq_desc[i].chip->name);
140 seq_printf(p, " %s", action->name); 121 seq_printf(p, " %s", action->name);
141 122
142 for (action=action->next; action; action = action->next) 123 for (action=action->next; action; action = action->next)
@@ -172,19 +153,6 @@ __setup("nokgdb", nokgdb);
172 153
173void __init init_IRQ(void) 154void __init init_IRQ(void)
174{ 155{
175 int i;
176
177 for (i = 0; i < NR_IRQS; i++) {
178 irq_desc[i].status = IRQ_DISABLED;
179 irq_desc[i].action = NULL;
180 irq_desc[i].depth = 1;
181 irq_desc[i].chip = &no_irq_chip;
182 spin_lock_init(&irq_desc[i].lock);
183#ifdef CONFIG_MIPS_MT_SMTC
184 irq_hwmask[i] = 0;
185#endif /* CONFIG_MIPS_MT_SMTC */
186 }
187
188 arch_init_irq(); 156 arch_init_irq();
189 157
190#ifdef CONFIG_KGDB 158#ifdef CONFIG_KGDB
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 9bb21c7f2149..fcc86b96ccf6 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -50,58 +50,13 @@ static inline void mask_mips_irq(unsigned int irq)
50 irq_disable_hazard(); 50 irq_disable_hazard();
51} 51}
52 52
53static inline void mips_cpu_irq_enable(unsigned int irq)
54{
55 unsigned long flags;
56
57 local_irq_save(flags);
58 unmask_mips_irq(irq);
59 back_to_back_c0_hazard();
60 local_irq_restore(flags);
61}
62
63static void mips_cpu_irq_disable(unsigned int irq)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68 mask_mips_irq(irq);
69 back_to_back_c0_hazard();
70 local_irq_restore(flags);
71}
72
73static unsigned int mips_cpu_irq_startup(unsigned int irq)
74{
75 mips_cpu_irq_enable(irq);
76
77 return 0;
78}
79
80#define mips_cpu_irq_shutdown mips_cpu_irq_disable
81
82/*
83 * While we ack the interrupt interrupts are disabled and thus we don't need
84 * to deal with concurrency issues. Same for mips_cpu_irq_end.
85 */
86static void mips_cpu_irq_ack(unsigned int irq)
87{
88 mask_mips_irq(irq);
89}
90
91static void mips_cpu_irq_end(unsigned int irq)
92{
93 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
94 unmask_mips_irq(irq);
95}
96
97static struct irq_chip mips_cpu_irq_controller = { 53static struct irq_chip mips_cpu_irq_controller = {
98 .typename = "MIPS", 54 .typename = "MIPS",
99 .startup = mips_cpu_irq_startup, 55 .ack = mask_mips_irq,
100 .shutdown = mips_cpu_irq_shutdown, 56 .mask = mask_mips_irq,
101 .enable = mips_cpu_irq_enable, 57 .mask_ack = mask_mips_irq,
102 .disable = mips_cpu_irq_disable, 58 .unmask = unmask_mips_irq,
103 .ack = mips_cpu_irq_ack, 59 .eoi = unmask_mips_irq,
104 .end = mips_cpu_irq_end,
105}; 60};
106 61
107/* 62/*
@@ -110,8 +65,6 @@ static struct irq_chip mips_cpu_irq_controller = {
110 65
111#define unmask_mips_mt_irq unmask_mips_irq 66#define unmask_mips_mt_irq unmask_mips_irq
112#define mask_mips_mt_irq mask_mips_irq 67#define mask_mips_mt_irq mask_mips_irq
113#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
114#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
115 68
116static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) 69static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
117{ 70{
@@ -119,13 +72,11 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
119 72
120 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
121 evpe(vpflags); 74 evpe(vpflags);
122 mips_mt_cpu_irq_enable(irq); 75 unmask_mips_mt_irq(irq);
123 76
124 return 0; 77 return 0;
125} 78}
126 79
127#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
128
129/* 80/*
130 * While we ack the interrupt interrupts are disabled and thus we don't need 81 * While we ack the interrupt interrupts are disabled and thus we don't need
131 * to deal with concurrency issues. Same for mips_cpu_irq_end. 82 * to deal with concurrency issues. Same for mips_cpu_irq_end.
@@ -138,16 +89,14 @@ static void mips_mt_cpu_irq_ack(unsigned int irq)
138 mask_mips_mt_irq(irq); 89 mask_mips_mt_irq(irq);
139} 90}
140 91
141#define mips_mt_cpu_irq_end mips_cpu_irq_end
142
143static struct irq_chip mips_mt_cpu_irq_controller = { 92static struct irq_chip mips_mt_cpu_irq_controller = {
144 .typename = "MIPS", 93 .typename = "MIPS",
145 .startup = mips_mt_cpu_irq_startup, 94 .startup = mips_mt_cpu_irq_startup,
146 .shutdown = mips_mt_cpu_irq_shutdown,
147 .enable = mips_mt_cpu_irq_enable,
148 .disable = mips_mt_cpu_irq_disable,
149 .ack = mips_mt_cpu_irq_ack, 95 .ack = mips_mt_cpu_irq_ack,
150 .end = mips_mt_cpu_irq_end, 96 .mask = mask_mips_mt_irq,
97 .mask_ack = mips_mt_cpu_irq_ack,
98 .unmask = unmask_mips_mt_irq,
99 .eoi = unmask_mips_mt_irq,
151}; 100};
152 101
153void __init mips_cpu_irq_init(int irq_base) 102void __init mips_cpu_irq_init(int irq_base)
@@ -163,19 +112,12 @@ void __init mips_cpu_irq_init(int irq_base)
163 * leave them uninitialized for other processors. 112 * leave them uninitialized for other processors.
164 */ 113 */
165 if (cpu_has_mipsmt) 114 if (cpu_has_mipsmt)
166 for (i = irq_base; i < irq_base + 2; i++) { 115 for (i = irq_base; i < irq_base + 2; i++)
167 irq_desc[i].status = IRQ_DISABLED; 116 set_irq_chip(i, &mips_mt_cpu_irq_controller);
168 irq_desc[i].action = NULL; 117
169 irq_desc[i].depth = 1; 118 for (i = irq_base + 2; i < irq_base + 8; i++)
170 irq_desc[i].chip = &mips_mt_cpu_irq_controller; 119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
171 } 120 handle_level_irq);
172
173 for (i = irq_base + 2; i < irq_base + 8; i++) {
174 irq_desc[i].status = IRQ_DISABLED;
175 irq_desc[i].action = NULL;
176 irq_desc[i].depth = 1;
177 irq_desc[i].chip = &mips_cpu_irq_controller;
178 }
179 121
180 mips_cpu_irq_base = irq_base; 122 mips_cpu_irq_base = irq_base;
181} 123}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144c7881..5929f883e46b 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -301,7 +301,7 @@ static void sp_cleanup(void)
301 for (;;) { 301 for (;;) {
302 unsigned long set; 302 unsigned long set;
303 i = j * __NFDBITS; 303 i = j * __NFDBITS;
304 if (i >= fdt->max_fdset || i >= fdt->max_fds) 304 if (i >= fdt->max_fds)
305 break; 305 break;
306 set = fdt->open_fds->fds_bits[j++]; 306 set = fdt->open_fds->fds_bits[j++];
307 while (set) { 307 while (set) {
@@ -319,7 +319,7 @@ static void sp_cleanup(void)
319static int channel_open = 0; 319static int channel_open = 0;
320 320
321/* the work handler */ 321/* the work handler */
322static void sp_work(void *data) 322static void sp_work(struct work_struct *unused)
323{ 323{
324 if (!channel_open) { 324 if (!channel_open) {
325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { 325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@ static void startwork(int vpe)
354 return; 354 return;
355 } 355 }
356 356
357 INIT_WORK(&work, sp_work, NULL); 357 INIT_WORK(&work, sp_work);
358 queue_work(workqueue, &work); 358 queue_work(workqueue, &work);
359 } else 359 } else
360 queue_work(workqueue, &work); 360 queue_work(workqueue, &work);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 7a3ebbeba1f3..de3fae260ff8 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -382,531 +382,6 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
382 return ret; 382 return ret;
383} 383}
384 384
385struct msgbuf32 { s32 mtype; char mtext[1]; };
386
387struct ipc_perm32
388{
389 key_t key;
390 __compat_uid_t uid;
391 __compat_gid_t gid;
392 __compat_uid_t cuid;
393 __compat_gid_t cgid;
394 compat_mode_t mode;
395 unsigned short seq;
396};
397
398struct ipc64_perm32 {
399 key_t key;
400 __compat_uid_t uid;
401 __compat_gid_t gid;
402 __compat_uid_t cuid;
403 __compat_gid_t cgid;
404 compat_mode_t mode;
405 unsigned short seq;
406 unsigned short __pad1;
407 unsigned int __unused1;
408 unsigned int __unused2;
409};
410
411struct semid_ds32 {
412 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
413 compat_time_t sem_otime; /* last semop time */
414 compat_time_t sem_ctime; /* last change time */
415 u32 sem_base; /* ptr to first semaphore in array */
416 u32 sem_pending; /* pending operations to be processed */
417 u32 sem_pending_last; /* last pending operation */
418 u32 undo; /* undo requests on this array */
419 unsigned short sem_nsems; /* no. of semaphores in array */
420};
421
422struct semid64_ds32 {
423 struct ipc64_perm32 sem_perm;
424 compat_time_t sem_otime;
425 compat_time_t sem_ctime;
426 unsigned int sem_nsems;
427 unsigned int __unused1;
428 unsigned int __unused2;
429};
430
431struct msqid_ds32
432{
433 struct ipc_perm32 msg_perm;
434 u32 msg_first;
435 u32 msg_last;
436 compat_time_t msg_stime;
437 compat_time_t msg_rtime;
438 compat_time_t msg_ctime;
439 u32 wwait;
440 u32 rwait;
441 unsigned short msg_cbytes;
442 unsigned short msg_qnum;
443 unsigned short msg_qbytes;
444 compat_ipc_pid_t msg_lspid;
445 compat_ipc_pid_t msg_lrpid;
446};
447
448struct msqid64_ds32 {
449 struct ipc64_perm32 msg_perm;
450 compat_time_t msg_stime;
451 unsigned int __unused1;
452 compat_time_t msg_rtime;
453 unsigned int __unused2;
454 compat_time_t msg_ctime;
455 unsigned int __unused3;
456 unsigned int msg_cbytes;
457 unsigned int msg_qnum;
458 unsigned int msg_qbytes;
459 compat_pid_t msg_lspid;
460 compat_pid_t msg_lrpid;
461 unsigned int __unused4;
462 unsigned int __unused5;
463};
464
465struct shmid_ds32 {
466 struct ipc_perm32 shm_perm;
467 int shm_segsz;
468 compat_time_t shm_atime;
469 compat_time_t shm_dtime;
470 compat_time_t shm_ctime;
471 compat_ipc_pid_t shm_cpid;
472 compat_ipc_pid_t shm_lpid;
473 unsigned short shm_nattch;
474};
475
476struct shmid64_ds32 {
477 struct ipc64_perm32 shm_perm;
478 compat_size_t shm_segsz;
479 compat_time_t shm_atime;
480 compat_time_t shm_dtime;
481 compat_time_t shm_ctime;
482 compat_pid_t shm_cpid;
483 compat_pid_t shm_lpid;
484 unsigned int shm_nattch;
485 unsigned int __unused1;
486 unsigned int __unused2;
487};
488
489struct ipc_kludge32 {
490 u32 msgp;
491 s32 msgtyp;
492};
493
494static int
495do_sys32_semctl(int first, int second, int third, void __user *uptr)
496{
497 union semun fourth;
498 u32 pad;
499 int err, err2;
500 struct semid64_ds s;
501 mm_segment_t old_fs;
502
503 if (!uptr)
504 return -EINVAL;
505 err = -EFAULT;
506 if (get_user (pad, (u32 __user *)uptr))
507 return err;
508 if ((third & ~IPC_64) == SETVAL)
509 fourth.val = (int)pad;
510 else
511 fourth.__pad = (void __user *)A(pad);
512 switch (third & ~IPC_64) {
513 case IPC_INFO:
514 case IPC_RMID:
515 case IPC_SET:
516 case SEM_INFO:
517 case GETVAL:
518 case GETPID:
519 case GETNCNT:
520 case GETZCNT:
521 case GETALL:
522 case SETVAL:
523 case SETALL:
524 err = sys_semctl (first, second, third, fourth);
525 break;
526
527 case IPC_STAT:
528 case SEM_STAT:
529 fourth.__pad = (struct semid64_ds __user *)&s;
530 old_fs = get_fs();
531 set_fs(KERNEL_DS);
532 err = sys_semctl(first, second, third | IPC_64, fourth);
533 set_fs(old_fs);
534
535 if (third & IPC_64) {
536 struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
537
538 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
539 err = -EFAULT;
540 break;
541 }
542 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
543 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
544 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
545 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
546 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
547 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
548 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
549 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
550 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
551 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
552 } else {
553 struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
554
555 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
556 err = -EFAULT;
557 break;
558 }
559 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
560 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
561 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
562 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
563 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
564 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
565 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
566 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
567 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
568 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
569 }
570 if (err2)
571 err = -EFAULT;
572 break;
573
574 default:
575 err = - EINVAL;
576 break;
577 }
578
579 return err;
580}
581
582static int
583do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
584{
585 struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
586 struct msgbuf *p;
587 mm_segment_t old_fs;
588 int err;
589
590 if (second < 0)
591 return -EINVAL;
592 p = kmalloc (second + sizeof (struct msgbuf)
593 + 4, GFP_USER);
594 if (!p)
595 return -ENOMEM;
596 err = get_user (p->mtype, &up->mtype);
597 if (err)
598 goto out;
599 err |= __copy_from_user (p->mtext, &up->mtext, second);
600 if (err)
601 goto out;
602 old_fs = get_fs ();
603 set_fs (KERNEL_DS);
604 err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
605 set_fs (old_fs);
606out:
607 kfree (p);
608
609 return err;
610}
611
612static int
613do_sys32_msgrcv (int first, int second, int msgtyp, int third,
614 int version, void __user *uptr)
615{
616 struct msgbuf32 __user *up;
617 struct msgbuf *p;
618 mm_segment_t old_fs;
619 int err;
620
621 if (!version) {
622 struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
623 struct ipc_kludge32 ipck;
624
625 err = -EINVAL;
626 if (!uptr)
627 goto out;
628 err = -EFAULT;
629 if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
630 goto out;
631 uptr = (void __user *)AA(ipck.msgp);
632 msgtyp = ipck.msgtyp;
633 }
634
635 if (second < 0)
636 return -EINVAL;
637 err = -ENOMEM;
638 p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
639 if (!p)
640 goto out;
641 old_fs = get_fs ();
642 set_fs (KERNEL_DS);
643 err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
644 set_fs (old_fs);
645 if (err < 0)
646 goto free_then_out;
647 up = (struct msgbuf32 __user *)uptr;
648 if (put_user (p->mtype, &up->mtype) ||
649 __copy_to_user (&up->mtext, p->mtext, err))
650 err = -EFAULT;
651free_then_out:
652 kfree (p);
653out:
654 return err;
655}
656
657static int
658do_sys32_msgctl (int first, int second, void __user *uptr)
659{
660 int err = -EINVAL, err2;
661 struct msqid64_ds m;
662 struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
663 struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
664 mm_segment_t old_fs;
665
666 switch (second & ~IPC_64) {
667 case IPC_INFO:
668 case IPC_RMID:
669 case MSG_INFO:
670 err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
671 break;
672
673 case IPC_SET:
674 if (second & IPC_64) {
675 if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
676 err = -EFAULT;
677 break;
678 }
679 err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
680 err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
681 err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
682 err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
683 } else {
684 if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
685 err = -EFAULT;
686 break;
687 }
688 err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
689 err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
690 err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
691 err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
692 }
693 if (err)
694 break;
695 old_fs = get_fs();
696 set_fs(KERNEL_DS);
697 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
698 set_fs(old_fs);
699 break;
700
701 case IPC_STAT:
702 case MSG_STAT:
703 old_fs = get_fs();
704 set_fs(KERNEL_DS);
705 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
706 set_fs(old_fs);
707 if (second & IPC_64) {
708 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
709 err = -EFAULT;
710 break;
711 }
712 err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
713 err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
714 err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
715 err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
716 err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
717 err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
718 err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
719 err2 |= __put_user(m.msg_stime, &up64->msg_stime);
720 err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
721 err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
722 err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
723 err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
724 err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
725 err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
726 err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
727 if (err2)
728 err = -EFAULT;
729 } else {
730 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
731 err = -EFAULT;
732 break;
733 }
734 err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
735 err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
736 err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
737 err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
738 err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
739 err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
740 err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
741 err2 |= __put_user(m.msg_stime, &up32->msg_stime);
742 err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
743 err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
744 err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
745 err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
746 err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
747 err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
748 err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
749 if (err2)
750 err = -EFAULT;
751 }
752 break;
753 }
754
755 return err;
756}
757
758static int
759do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
760{
761 unsigned long raddr;
762 u32 __user *uaddr = (u32 __user *)A((u32)third);
763 int err = -EINVAL;
764
765 if (version == 1)
766 return err;
767 err = do_shmat (first, uptr, second, &raddr);
768 if (err)
769 return err;
770 err = put_user (raddr, uaddr);
771 return err;
772}
773
774struct shm_info32 {
775 int used_ids;
776 u32 shm_tot, shm_rss, shm_swp;
777 u32 swap_attempts, swap_successes;
778};
779
780static int
781do_sys32_shmctl (int first, int second, void __user *uptr)
782{
783 struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
784 struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
785 struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
786 int err = -EFAULT, err2;
787 struct shmid64_ds s64;
788 mm_segment_t old_fs;
789 struct shm_info si;
790 struct shmid_ds s;
791
792 switch (second & ~IPC_64) {
793 case IPC_INFO:
794 second = IPC_INFO; /* So that we don't have to translate it */
795 case IPC_RMID:
796 case SHM_LOCK:
797 case SHM_UNLOCK:
798 err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
799 break;
800 case IPC_SET:
801 if (second & IPC_64) {
802 err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
803 err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
804 err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
805 } else {
806 err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
807 err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
808 err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
809 }
810 if (err)
811 break;
812 old_fs = get_fs();
813 set_fs(KERNEL_DS);
814 err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
815 set_fs(old_fs);
816 break;
817
818 case IPC_STAT:
819 case SHM_STAT:
820 old_fs = get_fs();
821 set_fs(KERNEL_DS);
822 err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
823 set_fs(old_fs);
824 if (err < 0)
825 break;
826 if (second & IPC_64) {
827 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
828 err = -EFAULT;
829 break;
830 }
831 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
832 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
833 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
834 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
835 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
836 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
837 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
838 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
839 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
840 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
841 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
842 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
843 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
844 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
845 } else {
846 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
847 err = -EFAULT;
848 break;
849 }
850 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
851 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
852 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
853 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
854 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
855 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
856 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
857 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
858 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
859 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
860 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
861 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
862 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
863 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
864 }
865 if (err2)
866 err = -EFAULT;
867 break;
868
869 case SHM_INFO:
870 old_fs = get_fs();
871 set_fs(KERNEL_DS);
872 err = sys_shmctl(first, second, (void __user *)&si);
873 set_fs(old_fs);
874 if (err < 0)
875 break;
876 err2 = put_user(si.used_ids, &uip->used_ids);
877 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
878 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
879 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
880 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
881 err2 |= __put_user (si.swap_successes, &uip->swap_successes);
882 if (err2)
883 err = -EFAULT;
884 break;
885
886 default:
887 err = -EINVAL;
888 break;
889 }
890
891 return err;
892}
893
894static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
895 const struct compat_timespec __user *timeout32)
896{
897 struct compat_timespec t32;
898 struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
899
900 if (copy_from_user(&t32, timeout32, sizeof(t32)))
901 return -EFAULT;
902
903 if (put_user(t32.tv_sec, &t64->tv_sec) ||
904 put_user(t32.tv_nsec, &t64->tv_nsec))
905 return -EFAULT;
906
907 return sys_semtimedop(semid, tsems, nsems, t64);
908}
909
910asmlinkage long 385asmlinkage long
911sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 386sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
912{ 387{
@@ -918,48 +393,43 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
918 switch (call) { 393 switch (call) {
919 case SEMOP: 394 case SEMOP:
920 /* struct sembuf is the same on 32 and 64bit :)) */ 395 /* struct sembuf is the same on 32 and 64bit :)) */
921 err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 396 err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
922 NULL);
923 break; 397 break;
924 case SEMTIMEDOP: 398 case SEMTIMEDOP:
925 err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 399 err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
926 (const struct compat_timespec __user *)AA(fifth)); 400 compat_ptr(fifth));
927 break; 401 break;
928 case SEMGET: 402 case SEMGET:
929 err = sys_semget (first, second, third); 403 err = sys_semget(first, second, third);
930 break; 404 break;
931 case SEMCTL: 405 case SEMCTL:
932 err = do_sys32_semctl (first, second, third, 406 err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
933 (void __user *)AA(ptr));
934 break; 407 break;
935
936 case MSGSND: 408 case MSGSND:
937 err = do_sys32_msgsnd (first, second, third, 409 err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
938 (void __user *)AA(ptr));
939 break; 410 break;
940 case MSGRCV: 411 case MSGRCV:
941 err = do_sys32_msgrcv (first, second, fifth, third, 412 err = compat_sys_msgrcv(first, second, fifth, third,
942 version, (void __user *)AA(ptr)); 413 version, compat_ptr(ptr));
943 break; 414 break;
944 case MSGGET: 415 case MSGGET:
945 err = sys_msgget ((key_t) first, second); 416 err = sys_msgget((key_t) first, second);
946 break; 417 break;
947 case MSGCTL: 418 case MSGCTL:
948 err = do_sys32_msgctl (first, second, (void __user *)AA(ptr)); 419 err = compat_sys_msgctl(first, second, compat_ptr(ptr));
949 break; 420 break;
950
951 case SHMAT: 421 case SHMAT:
952 err = do_sys32_shmat (first, second, third, 422 err = compat_sys_shmat(first, second, third, version,
953 version, (void __user *)AA(ptr)); 423 compat_ptr(ptr));
954 break; 424 break;
955 case SHMDT: 425 case SHMDT:
956 err = sys_shmdt ((char __user *)A(ptr)); 426 err = sys_shmdt(compat_ptr(ptr));
957 break; 427 break;
958 case SHMGET: 428 case SHMGET:
959 err = sys_shmget (first, (unsigned)second, third); 429 err = sys_shmget(first, (unsigned)second, third);
960 break; 430 break;
961 case SHMCTL: 431 case SHMCTL:
962 err = do_sys32_shmctl (first, second, (void __user *)AA(ptr)); 432 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
963 break; 433 break;
964 default: 434 default:
965 err = -EINVAL; 435 err = -EINVAL;
@@ -969,18 +439,28 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
969 return err; 439 return err;
970} 440}
971 441
972asmlinkage long sys32_shmat(int shmid, char __user *shmaddr, 442#ifdef CONFIG_MIPS32_N32
973 int shmflg, int32_t __user *addr) 443asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, u32 arg)
974{ 444{
975 unsigned long raddr; 445 /* compat_sys_semctl expects a pointer to union semun */
976 int err; 446 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
447 if (put_user(arg, uptr))
448 return -EFAULT;
449 return compat_sys_semctl(semid, semnum, cmd, uptr);
450}
977 451
978 err = do_shmat(shmid, shmaddr, shmflg, &raddr); 452asmlinkage long sysn32_msgsnd(int msqid, u32 msgp, unsigned msgsz, int msgflg)
979 if (err) 453{
980 return err; 454 return compat_sys_msgsnd(msqid, msgsz, msgflg, compat_ptr(msgp));
455}
981 456
982 return put_user(raddr, addr); 457asmlinkage long sysn32_msgrcv(int msqid, u32 msgp, size_t msgsz, int msgtyp,
458 int msgflg)
459{
460 return compat_sys_msgrcv(msqid, msgsz, msgtyp, msgflg, IPC_64,
461 compat_ptr(msgp));
983} 462}
463#endif
984 464
985struct sysctl_args32 465struct sysctl_args32
986{ 466{
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 000000000000..e0ad754c7edd
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,85 @@
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16const extern unsigned char relocate_new_kernel[];
17const extern unsigned int relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int
23machine_kexec_prepare(struct kimage *kimage)
24{
25 return 0;
26}
27
28void
29machine_kexec_cleanup(struct kimage *kimage)
30{
31}
32
33void
34machine_shutdown(void)
35{
36}
37
38void
39machine_crash_shutdown(struct pt_regs *regs)
40{
41}
42
43void
44machine_kexec(struct kimage *image)
45{
46 unsigned long reboot_code_buffer;
47 unsigned long entry;
48 unsigned long *ptr;
49
50 reboot_code_buffer =
51 (unsigned long)page_address(image->control_code_page);
52
53 kexec_start_address = image->start;
54 kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
55
56 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
57 relocate_new_kernel_size);
58
59 /*
60 * The generic kexec code builds a page list with physical
61 * addresses. they are directly accessible through KSEG0 (or
62 * CKSEG0 or XPHYS if on 64bit system), hence the
63 * pys_to_virt() call.
64 */
65 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
66 ptr = (entry & IND_INDIRECTION) ?
67 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
68 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
69 *ptr & IND_DESTINATION)
70 *ptr = phys_to_virt(*ptr);
71 }
72
73 /*
74 * we do not want to be bothered.
75 */
76 local_irq_disable();
77
78 flush_icache_range(reboot_code_buffer,
79 reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
80
81 printk("Will call new kernel at %08x\n", image->start);
82 printk("Bye ...\n");
83 flush_cache_all();
84 ((void (*)(void))reboot_code_buffer)();
85}
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index f44a01357ada..2ef857c3ee53 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -46,5 +46,7 @@ EXPORT_SYMBOL(__strnlen_user_nocheck_asm);
46EXPORT_SYMBOL(__strnlen_user_asm); 46EXPORT_SYMBOL(__strnlen_user_asm);
47 47
48EXPORT_SYMBOL(csum_partial); 48EXPORT_SYMBOL(csum_partial);
49EXPORT_SYMBOL(csum_partial_copy_nocheck);
50EXPORT_SYMBOL(__csum_partial_copy_user);
49 51
50EXPORT_SYMBOL(invalid_pte_table); 52EXPORT_SYMBOL(invalid_pte_table);
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index d7bf0215bc1d..cb0801437b66 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <asm/pgtable.h> /* MODULE_START */
32 33
33struct mips_hi16 { 34struct mips_hi16 {
34 struct mips_hi16 *next; 35 struct mips_hi16 *next;
@@ -43,9 +44,23 @@ static DEFINE_SPINLOCK(dbe_lock);
43 44
44void *module_alloc(unsigned long size) 45void *module_alloc(unsigned long size)
45{ 46{
47#ifdef MODULE_START
48 struct vm_struct *area;
49
50 size = PAGE_ALIGN(size);
51 if (!size)
52 return NULL;
53
54 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
55 if (!area)
56 return NULL;
57
58 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
59#else
46 if (size == 0) 60 if (size == 0)
47 return NULL; 61 return NULL;
48 return vmalloc(size); 62 return vmalloc(size);
63#endif
49} 64}
50 65
51/* Free memory returned from module_alloc */ 66/* Free memory returned from module_alloc */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..a3f0d00c1334
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,80 @@
1/*
2 * relocate_kernel.S for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <asm/asm.h>
10#include <asm/asmmacro.h>
11#include <asm/regdef.h>
12#include <asm/page.h>
13#include <asm/mipsregs.h>
14#include <asm/stackframe.h>
15#include <asm/addrspace.h>
16
17 .globl relocate_new_kernel
18relocate_new_kernel:
19
20 PTR_L s0, kexec_indirection_page
21 PTR_L s1, kexec_start_address
22
23process_entry:
24 PTR_L s2, (s0)
25 PTR_ADD s0, s0, SZREG
26
27 /* destination page */
28 and s3, s2, 0x1
29 beq s3, zero, 1f
30 and s4, s2, ~0x1 /* store destination addr in s4 */
31 move a0, s4
32 b process_entry
33
341:
35 /* indirection page, update s0 */
36 and s3, s2, 0x2
37 beq s3, zero, 1f
38 and s0, s2, ~0x2
39 b process_entry
40
411:
42 /* done page */
43 and s3, s2, 0x4
44 beq s3, zero, 1f
45 b done
461:
47 /* source page */
48 and s3, s2, 0x8
49 beq s3, zero, process_entry
50 and s2, s2, ~0x8
51 li s6, (1 << PAGE_SHIFT) / SZREG
52
53copy_word:
54 /* copy page word by word */
55 REG_L s5, (s2)
56 REG_S s5, (s4)
57 INT_ADD s4, s4, SZREG
58 INT_ADD s2, s2, SZREG
59 INT_SUB s6, s6, 1
60 beq s6, zero, process_entry
61 b copy_word
62 b process_entry
63
64done:
65 /* jump to kexec_start_address */
66 j s1
67
68 .globl kexec_start_address
69kexec_start_address:
70 .long 0x0
71
72 .globl kexec_indirection_page
73kexec_indirection_page:
74 .long 0x0
75
76relocate_new_kernel_end:
77
78 .globl relocate_new_kernel_size
79relocate_new_kernel_size:
80 .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 621037db2290..060563a712b6 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -23,6 +23,8 @@ void (*_machine_restart)(char *command);
23void (*_machine_halt)(void); 23void (*_machine_halt)(void);
24void (*pm_power_off)(void); 24void (*pm_power_off)(void);
25 25
26EXPORT_SYMBOL(pm_power_off);
27
26void machine_restart(char *command) 28void machine_restart(char *command)
27{ 29{
28 if (_machine_restart) 30 if (_machine_restart)
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 8c8c8324f775..5a99e3e0c96d 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -415,7 +415,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
415 int minor; 415 int minor;
416 unsigned int mask = 0; 416 unsigned int mask = 0;
417 417
418 minor = iminor(file->f_dentry->d_inode); 418 minor = iminor(file->f_path.dentry->d_inode);
419 419
420 poll_wait(file, &channel_wqs[minor].rt_queue, wait); 420 poll_wait(file, &channel_wqs[minor].rt_queue, wait);
421 poll_wait(file, &channel_wqs[minor].lx_queue, wait); 421 poll_wait(file, &channel_wqs[minor].lx_queue, wait);
@@ -437,7 +437,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
437static ssize_t file_read(struct file *file, char __user * buffer, size_t count, 437static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
438 loff_t * ppos) 438 loff_t * ppos)
439{ 439{
440 int minor = iminor(file->f_dentry->d_inode); 440 int minor = iminor(file->f_path.dentry->d_inode);
441 441
442 /* data available? */ 442 /* data available? */
443 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { 443 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
@@ -454,7 +454,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
454 struct rtlx_channel *rt; 454 struct rtlx_channel *rt;
455 DECLARE_WAITQUEUE(wait, current); 455 DECLARE_WAITQUEUE(wait, current);
456 456
457 minor = iminor(file->f_dentry->d_inode); 457 minor = iminor(file->f_path.dentry->d_inode);
458 rt = &rtlx->channel[minor]; 458 rt = &rtlx->channel[minor];
459 459
460 /* any space left... */ 460 /* any space left... */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a95f37de080e..7c0b3936ba44 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -653,7 +653,7 @@ einval: li v0, -EINVAL
653 sys sys_move_pages 6 653 sys sys_move_pages 6
654 sys sys_set_robust_list 2 654 sys sys_set_robust_list 2
655 sys sys_get_robust_list 3 /* 4310 */ 655 sys sys_get_robust_list 3 /* 4310 */
656 sys sys_ni_syscall 0 656 sys sys_kexec_load 4
657 sys sys_getcpu 3 657 sys sys_getcpu 3
658 sys sys_epoll_pwait 6 658 sys sys_epoll_pwait 6
659 .endm 659 .endm
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 8fb0f60f657b..e569b846e9a3 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -468,6 +468,6 @@ sys_call_table:
468 PTR sys_move_pages 468 PTR sys_move_pages
469 PTR sys_set_robust_list 469 PTR sys_set_robust_list
470 PTR sys_get_robust_list 470 PTR sys_get_robust_list
471 PTR sys_ni_syscall /* 5270 */ 471 PTR sys_kexec_load /* 5270 */
472 PTR sys_getcpu 472 PTR sys_getcpu
473 PTR sys_epoll_pwait 473 PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 0da5ca2040ff..a7bff2a54723 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -149,8 +149,8 @@ EXPORT(sysn32_call_table)
149 PTR sys_mincore 149 PTR sys_mincore
150 PTR sys_madvise 150 PTR sys_madvise
151 PTR sys_shmget 151 PTR sys_shmget
152 PTR sys32_shmat 152 PTR sys_shmat
153 PTR sys_shmctl /* 6030 */ 153 PTR compat_sys_shmctl /* 6030 */
154 PTR sys_dup 154 PTR sys_dup
155 PTR sys_dup2 155 PTR sys_dup2
156 PTR sys_pause 156 PTR sys_pause
@@ -184,12 +184,12 @@ EXPORT(sysn32_call_table)
184 PTR sys32_newuname 184 PTR sys32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sys_semctl 187 PTR sysn32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sys_msgsnd 190 PTR sysn32_msgsnd
191 PTR sys_msgrcv 191 PTR sysn32_msgrcv
192 PTR sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
195 PTR sys_fsync 195 PTR sys_fsync
@@ -335,7 +335,7 @@ EXPORT(sysn32_call_table)
335 PTR compat_sys_fcntl64 335 PTR compat_sys_fcntl64
336 PTR sys_set_tid_address 336 PTR sys_set_tid_address
337 PTR sys_restart_syscall 337 PTR sys_restart_syscall
338 PTR sys_semtimedop /* 6215 */ 338 PTR compat_sys_semtimedop /* 6215 */
339 PTR sys_fadvise64_64 339 PTR sys_fadvise64_64
340 PTR compat_sys_statfs64 340 PTR compat_sys_statfs64
341 PTR compat_sys_fstatfs64 341 PTR compat_sys_fstatfs64
@@ -394,6 +394,6 @@ EXPORT(sysn32_call_table)
394 PTR sys_move_pages 394 PTR sys_move_pages
395 PTR compat_sys_set_robust_list 395 PTR compat_sys_set_robust_list
396 PTR compat_sys_get_robust_list 396 PTR compat_sys_get_robust_list
397 PTR sys_ni_syscall 397 PTR compat_sys_kexec_load
398 PTR sys_getcpu 398 PTR sys_getcpu
399 PTR sys_epoll_pwait 399 PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index b9d00cae8b5f..e91379c1be1d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -516,7 +516,7 @@ sys_call_table:
516 PTR compat_sys_move_pages 516 PTR compat_sys_move_pages
517 PTR compat_sys_set_robust_list 517 PTR compat_sys_set_robust_list
518 PTR compat_sys_get_robust_list /* 4310 */ 518 PTR compat_sys_get_robust_list /* 4310 */
519 PTR sys_ni_syscall 519 PTR compat_sys_kexec_load
520 PTR sys_getcpu 520 PTR sys_getcpu
521 PTR sys_epoll_pwait 521 PTR sys_epoll_pwait
522 .size sys_call_table,.-sys_call_table 522 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 8f6e89697ccf..89440a0d8528 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -145,13 +145,12 @@ static int __init rd_start_early(char *p)
145 unsigned long start = memparse(p, &p); 145 unsigned long start = memparse(p, &p);
146 146
147#ifdef CONFIG_64BIT 147#ifdef CONFIG_64BIT
148 /* HACK: Guess if the sign extension was forgotten */ 148 /* Guess if the sign extension was forgotten by bootloader */
149 if (start > 0x0000000080000000 && start < 0x00000000ffffffff) 149 if (start < XKPHYS)
150 start |= 0xffffffff00000000UL; 150 start = (int)start;
151#endif 151#endif
152 initrd_start = start; 152 initrd_start = start;
153 initrd_end += start; 153 initrd_end += start;
154
155 return 0; 154 return 0;
156} 155}
157early_param("rd_start", rd_start_early); 156early_param("rd_start", rd_start_early);
@@ -159,41 +158,64 @@ early_param("rd_start", rd_start_early);
159static int __init rd_size_early(char *p) 158static int __init rd_size_early(char *p)
160{ 159{
161 initrd_end += memparse(p, &p); 160 initrd_end += memparse(p, &p);
162
163 return 0; 161 return 0;
164} 162}
165early_param("rd_size", rd_size_early); 163early_param("rd_size", rd_size_early);
166 164
165/* it returns the next free pfn after initrd */
167static unsigned long __init init_initrd(void) 166static unsigned long __init init_initrd(void)
168{ 167{
169 unsigned long tmp, end, size; 168 unsigned long end;
170 u32 *initrd_header; 169 u32 *initrd_header;
171 170
172 ROOT_DEV = Root_RAM0;
173
174 /* 171 /*
175 * Board specific code or command line parser should have 172 * Board specific code or command line parser should have
176 * already set up initrd_start and initrd_end. In these cases 173 * already set up initrd_start and initrd_end. In these cases
177 * perfom sanity checks and use them if all looks good. 174 * perfom sanity checks and use them if all looks good.
178 */ 175 */
179 size = initrd_end - initrd_start; 176 if (initrd_start && initrd_end > initrd_start)
180 if (initrd_end == 0 || size == 0) { 177 goto sanitize;
181 initrd_start = 0; 178
182 initrd_end = 0; 179 /*
183 } else 180 * See if initrd has been added to the kernel image by
184 return initrd_end; 181 * arch/mips/boot/addinitrd.c. In that case a header is
185 182 * prepended to initrd and is made up by 8 bytes. The fisrt
186 end = (unsigned long)&_end; 183 * word is a magic number and the second one is the size of
187 tmp = PAGE_ALIGN(end) - sizeof(u32) * 2; 184 * initrd. Initrd start must be page aligned in any cases.
188 if (tmp < end) 185 */
189 tmp += PAGE_SIZE; 186 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
190 187 if (initrd_header[0] != 0x494E5244)
191 initrd_header = (u32 *)tmp; 188 goto disable;
192 if (initrd_header[0] == 0x494E5244) { 189 initrd_start = (unsigned long)(initrd_header + 2);
193 initrd_start = (unsigned long)&initrd_header[2]; 190 initrd_end = initrd_start + initrd_header[1];
194 initrd_end = initrd_start + initrd_header[1]; 191
192sanitize:
193 if (initrd_start & ~PAGE_MASK) {
194 printk(KERN_ERR "initrd start must be page aligned\n");
195 goto disable;
195 } 196 }
196 return initrd_end; 197 if (initrd_start < PAGE_OFFSET) {
198 printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
199 goto disable;
200 }
201
202 /*
203 * Sanitize initrd addresses. For example firmware
204 * can't guess if they need to pass them through
205 * 64-bits values if the kernel has been built in pure
206 * 32-bit. We need also to switch from KSEG0 to XKPHYS
207 * addresses now, so the code can now safely use __pa().
208 */
209 end = __pa(initrd_end);
210 initrd_end = (unsigned long)__va(end);
211 initrd_start = (unsigned long)__va(__pa(initrd_start));
212
213 ROOT_DEV = Root_RAM0;
214 return PFN_UP(end);
215disable:
216 initrd_start = 0;
217 initrd_end = 0;
218 return 0;
197} 219}
198 220
199static void __init finalize_initrd(void) 221static void __init finalize_initrd(void)
@@ -204,12 +226,12 @@ static void __init finalize_initrd(void)
204 printk(KERN_INFO "Initrd not found or empty"); 226 printk(KERN_INFO "Initrd not found or empty");
205 goto disable; 227 goto disable;
206 } 228 }
207 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { 229 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
208 printk("Initrd extends beyond end of memory"); 230 printk("Initrd extends beyond end of memory");
209 goto disable; 231 goto disable;
210 } 232 }
211 233
212 reserve_bootmem(CPHYSADDR(initrd_start), size); 234 reserve_bootmem(__pa(initrd_start), size);
213 initrd_below_start_ok = 1; 235 initrd_below_start_ok = 1;
214 236
215 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 237 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -259,8 +281,7 @@ static void __init bootmem_init(void)
259 * not selected. Once that done we can determine the low bound 281 * not selected. Once that done we can determine the low bound
260 * of usable memory. 282 * of usable memory.
261 */ 283 */
262 reserved_end = init_initrd(); 284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
263 reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
264 285
265 /* 286 /*
266 * Find the highest page frame number we have available. 287 * Find the highest page frame number we have available.
@@ -432,10 +453,10 @@ static void __init resource_init(void)
432 if (UNCAC_BASE != IO_BASE) 453 if (UNCAC_BASE != IO_BASE)
433 return; 454 return;
434 455
435 code_resource.start = virt_to_phys(&_text); 456 code_resource.start = __pa_symbol(&_text);
436 code_resource.end = virt_to_phys(&_etext) - 1; 457 code_resource.end = __pa_symbol(&_etext) - 1;
437 data_resource.start = virt_to_phys(&_etext); 458 data_resource.start = __pa_symbol(&_etext);
438 data_resource.end = virt_to_phys(&_edata) - 1; 459 data_resource.end = __pa_symbol(&_edata) - 1;
439 460
440 /* 461 /*
441 * Request address space for all standard RAM. 462 * Request address space for all standard RAM.
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 477c5334ec1b..a67c18555ed3 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -17,7 +17,6 @@
17 */ 17 */
18#include <linux/cache.h> 18#include <linux/cache.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/sched.h>
21#include <linux/mm.h> 20#include <linux/mm.h>
22#include <linux/smp.h> 21#include <linux/smp.h>
23#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 2ac19a6cbf68..1ee689c0e0c9 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -278,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
278 278
279 /* need to mark IPI's as IRQ_PER_CPU */ 279 /* need to mark IPI's as IRQ_PER_CPU */
280 irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; 280 irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
281 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
281 irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; 282 irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
283 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
282} 284}
283 285
284/* 286/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 49db516789e0..0555fc554f65 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
172 172
173 spin_lock(&smp_call_lock); 173 spin_lock(&smp_call_lock);
174 call_data = &data; 174 call_data = &data;
175 mb(); 175 smp_mb();
176 176
177 /* Send a message to all other CPUs and wait for them to respond */ 177 /* Send a message to all other CPUs and wait for them to respond */
178 for_each_online_cpu(i) 178 for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
204 * Notify initiating CPU that I've grabbed the data and am 204 * Notify initiating CPU that I've grabbed the data and am
205 * about to execute the function. 205 * about to execute the function.
206 */ 206 */
207 mb(); 207 smp_mb();
208 atomic_inc(&call_data->started); 208 atomic_inc(&call_data->started);
209 209
210 /* 210 /*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
215 irq_exit(); 215 irq_exit();
216 216
217 if (wait) { 217 if (wait) {
218 mb(); 218 smp_mb();
219 atomic_inc(&call_data->finished); 219 atomic_inc(&call_data->finished);
220 } 220 }
221} 221}
@@ -271,7 +271,7 @@ void __devinit smp_prepare_boot_cpu(void)
271 * and keep control until "cpu_online(cpu)" is set. Note: cpu is 271 * and keep control until "cpu_online(cpu)" is set. Note: cpu is
272 * physical, not logical. 272 * physical, not logical.
273 */ 273 */
274int __devinit __cpu_up(unsigned int cpu) 274int __cpuinit __cpu_up(unsigned int cpu)
275{ 275{
276 struct task_struct *idle; 276 struct task_struct *idle;
277 277
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 3b78caf112f5..802febed7df5 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1009,6 +1009,7 @@ void setup_cross_vpe_interrupts(void)
1009 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1009 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1010 1010
1011 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; 1011 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
1012 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1012} 1013}
1013 1014
1014/* 1015/*
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 93c74fefff76..6c2406a93f2b 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -732,7 +732,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
732 goto out; 732 goto out;
733 } 733 }
734 734
735 error = vfs_statfs(file->f_dentry, &kbuf); 735 error = vfs_statfs(file->f_path.dentry, &kbuf);
736 if (error) 736 if (error)
737 goto out_f; 737 goto out_f;
738 738
@@ -1041,7 +1041,7 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
1041 unsigned long old_pos; 1041 unsigned long old_pos;
1042 long max_size = offset + len; 1042 long max_size = offset + len;
1043 1043
1044 if (max_size > file->f_dentry->d_inode->i_size) { 1044 if (max_size > file->f_path.dentry->d_inode->i_size) {
1045 old_pos = sys_lseek (fd, max_size - 1, 0); 1045 old_pos = sys_lseek (fd, max_size - 1, 0);
1046 sys_write (fd, (void __user *) "", 1); 1046 sys_write (fd, (void __user *) "", 1);
1047 sys_lseek (fd, old_pos, 0); 1047 sys_lseek (fd, old_pos, 0);
@@ -1406,7 +1406,7 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
1406 error = -EBADF; 1406 error = -EBADF;
1407 goto out; 1407 goto out;
1408 } 1408 }
1409 error = vfs_statfs(file->f_dentry, &kbuf); 1409 error = vfs_statfs(file->f_path.dentry, &kbuf);
1410 if (error) 1410 if (error)
1411 goto out_f; 1411 goto out_f;
1412 1412
@@ -1526,7 +1526,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
1526 unsigned long old_pos; 1526 unsigned long old_pos;
1527 long max_size = off2 + len; 1527 long max_size = off2 + len;
1528 1528
1529 if (max_size > file->f_dentry->d_inode->i_size) { 1529 if (max_size > file->f_path.dentry->d_inode->i_size) {
1530 old_pos = sys_lseek (fd, max_size - 1, 0); 1530 old_pos = sys_lseek (fd, max_size - 1, 0);
1531 sys_write (fd, (void __user *) "", 1); 1531 sys_write (fd, (void __user *) "", 1);
1532 sys_lseek (fd, old_pos, 0); 1532 sys_lseek (fd, old_pos, 0);
@@ -1658,7 +1658,7 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
1658 error = -EBADF; 1658 error = -EBADF;
1659 goto out; 1659 goto out;
1660 } 1660 }
1661 error = vfs_statfs(file->f_dentry, &kbuf); 1661 error = vfs_statfs(file->f_path.dentry, &kbuf);
1662 if (error) 1662 if (error)
1663 goto out_f; 1663 goto out_f;
1664 1664
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index e535f86efa2f..8aa544f73a5e 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -11,7 +11,6 @@
11 * Free Software Foundation; either version 2 of the License, or (at your 11 * Free Software Foundation; either version 2 of the License, or (at your
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14#include <linux/clocksource.h>
15#include <linux/types.h> 14#include <linux/types.h>
16#include <linux/kernel.h> 15#include <linux/kernel.h>
17#include <linux/init.h> 16#include <linux/init.h>
@@ -83,17 +82,11 @@ static void null_timer_ack(void) { /* nothing */ }
83/* 82/*
84 * Null high precision timer functions for systems lacking one. 83 * Null high precision timer functions for systems lacking one.
85 */ 84 */
86static unsigned int null_hpt_read(void) 85static cycle_t null_hpt_read(void)
87{ 86{
88 return 0; 87 return 0;
89} 88}
90 89
91static void __init null_hpt_init(void)
92{
93 /* nothing */
94}
95
96
97/* 90/*
98 * Timer ack for an R4k-compatible timer of a known frequency. 91 * Timer ack for an R4k-compatible timer of a known frequency.
99 */ 92 */
@@ -101,10 +94,8 @@ static void c0_timer_ack(void)
101{ 94{
102 unsigned int count; 95 unsigned int count;
103 96
104#ifndef CONFIG_SOC_PNX8550 /* pnx8550 resets to zero */
105 /* Ack this timer interrupt and set the next one. */ 97 /* Ack this timer interrupt and set the next one. */
106 expirelo += cycles_per_jiffy; 98 expirelo += cycles_per_jiffy;
107#endif
108 write_c0_compare(expirelo); 99 write_c0_compare(expirelo);
109 100
110 /* Check to see if we have missed any timer interrupts. */ 101 /* Check to see if we have missed any timer interrupts. */
@@ -118,7 +109,7 @@ static void c0_timer_ack(void)
118/* 109/*
119 * High precision timer functions for a R4k-compatible timer. 110 * High precision timer functions for a R4k-compatible timer.
120 */ 111 */
121static unsigned int c0_hpt_read(void) 112static cycle_t c0_hpt_read(void)
122{ 113{
123 return read_c0_count(); 114 return read_c0_count();
124} 115}
@@ -132,9 +123,6 @@ static void __init c0_hpt_timer_init(void)
132 123
133int (*mips_timer_state)(void); 124int (*mips_timer_state)(void);
134void (*mips_timer_ack)(void); 125void (*mips_timer_ack)(void);
135unsigned int (*mips_hpt_read)(void);
136void (*mips_hpt_init)(void) __initdata = null_hpt_init;
137unsigned int mips_hpt_mask = 0xffffffff;
138 126
139/* last time when xtime and rtc are sync'ed up */ 127/* last time when xtime and rtc are sync'ed up */
140static long last_rtc_update; 128static long last_rtc_update;
@@ -276,8 +264,7 @@ static struct irqaction timer_irqaction = {
276 264
277static unsigned int __init calibrate_hpt(void) 265static unsigned int __init calibrate_hpt(void)
278{ 266{
279 u64 frequency; 267 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
280 u32 hpt_start, hpt_end, hpt_count, hz;
281 268
282 const int loops = HZ / 10; 269 const int loops = HZ / 10;
283 int log_2_loops = 0; 270 int log_2_loops = 0;
@@ -303,28 +290,23 @@ static unsigned int __init calibrate_hpt(void)
303 * during the calculated number of periods between timer 290 * during the calculated number of periods between timer
304 * interrupts. 291 * interrupts.
305 */ 292 */
306 hpt_start = mips_hpt_read(); 293 hpt_start = clocksource_mips.read();
307 do { 294 do {
308 while (mips_timer_state()); 295 while (mips_timer_state());
309 while (!mips_timer_state()); 296 while (!mips_timer_state());
310 } while (--i); 297 } while (--i);
311 hpt_end = mips_hpt_read(); 298 hpt_end = clocksource_mips.read();
312 299
313 hpt_count = (hpt_end - hpt_start) & mips_hpt_mask; 300 hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
314 hz = HZ; 301 hz = HZ;
315 frequency = (u64)hpt_count * (u64)hz; 302 frequency = hpt_count * hz;
316 303
317 return frequency >> log_2_loops; 304 return frequency >> log_2_loops;
318} 305}
319 306
320static cycle_t read_mips_hpt(void) 307struct clocksource clocksource_mips = {
321{
322 return (cycle_t)mips_hpt_read();
323}
324
325static struct clocksource clocksource_mips = {
326 .name = "MIPS", 308 .name = "MIPS",
327 .read = read_mips_hpt, 309 .mask = 0xffffffff,
328 .is_continuous = 1, 310 .is_continuous = 1,
329}; 311};
330 312
@@ -333,7 +315,7 @@ static void __init init_mips_clocksource(void)
333 u64 temp; 315 u64 temp;
334 u32 shift; 316 u32 shift;
335 317
336 if (!mips_hpt_frequency || mips_hpt_read == null_hpt_read) 318 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
337 return; 319 return;
338 320
339 /* Calclate a somewhat reasonable rating value */ 321 /* Calclate a somewhat reasonable rating value */
@@ -347,7 +329,6 @@ static void __init init_mips_clocksource(void)
347 } 329 }
348 clocksource_mips.shift = shift; 330 clocksource_mips.shift = shift;
349 clocksource_mips.mult = (u32)temp; 331 clocksource_mips.mult = (u32)temp;
350 clocksource_mips.mask = mips_hpt_mask;
351 332
352 clocksource_register(&clocksource_mips); 333 clocksource_register(&clocksource_mips);
353} 334}
@@ -367,32 +348,36 @@ void __init time_init(void)
367 -xtime.tv_sec, -xtime.tv_nsec); 348 -xtime.tv_sec, -xtime.tv_nsec);
368 349
369 /* Choose appropriate high precision timer routines. */ 350 /* Choose appropriate high precision timer routines. */
370 if (!cpu_has_counter && !mips_hpt_read) 351 if (!cpu_has_counter && !clocksource_mips.read)
371 /* No high precision timer -- sorry. */ 352 /* No high precision timer -- sorry. */
372 mips_hpt_read = null_hpt_read; 353 clocksource_mips.read = null_hpt_read;
373 else if (!mips_hpt_frequency && !mips_timer_state) { 354 else if (!mips_hpt_frequency && !mips_timer_state) {
374 /* A high precision timer of unknown frequency. */ 355 /* A high precision timer of unknown frequency. */
375 if (!mips_hpt_read) 356 if (!clocksource_mips.read)
376 /* No external high precision timer -- use R4k. */ 357 /* No external high precision timer -- use R4k. */
377 mips_hpt_read = c0_hpt_read; 358 clocksource_mips.read = c0_hpt_read;
378 } else { 359 } else {
379 /* We know counter frequency. Or we can get it. */ 360 /* We know counter frequency. Or we can get it. */
380 if (!mips_hpt_read) { 361 if (!clocksource_mips.read) {
381 /* No external high precision timer -- use R4k. */ 362 /* No external high precision timer -- use R4k. */
382 mips_hpt_read = c0_hpt_read; 363 clocksource_mips.read = c0_hpt_read;
383 364
384 if (!mips_timer_state) { 365 if (!mips_timer_state) {
385 /* No external timer interrupt -- use R4k. */ 366 /* No external timer interrupt -- use R4k. */
386 mips_hpt_init = c0_hpt_timer_init;
387 mips_timer_ack = c0_timer_ack; 367 mips_timer_ack = c0_timer_ack;
368 /* Calculate cache parameters. */
369 cycles_per_jiffy =
370 (mips_hpt_frequency + HZ / 2) / HZ;
371 /*
372 * This sets up the high precision
373 * timer for the first interrupt.
374 */
375 c0_hpt_timer_init();
388 } 376 }
389 } 377 }
390 if (!mips_hpt_frequency) 378 if (!mips_hpt_frequency)
391 mips_hpt_frequency = calibrate_hpt(); 379 mips_hpt_frequency = calibrate_hpt();
392 380
393 /* Calculate cache parameters. */
394 cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
395
396 /* Report the high precision timer rate for a reference. */ 381 /* Report the high precision timer rate for a reference. */
397 printk("Using %u.%03u MHz high precision timer.\n", 382 printk("Using %u.%03u MHz high precision timer.\n",
398 ((mips_hpt_frequency + 500) / 1000) / 1000, 383 ((mips_hpt_frequency + 500) / 1000) / 1000,
@@ -403,9 +388,6 @@ void __init time_init(void)
403 /* No timer interrupt ack (e.g. i8254). */ 388 /* No timer interrupt ack (e.g. i8254). */
404 mips_timer_ack = null_timer_ack; 389 mips_timer_ack = null_timer_ack;
405 390
406 /* This sets up the high precision timer for the first interrupt. */
407 mips_hpt_init();
408
409 /* 391 /*
410 * Call board specific timer interrupt setup. 392 * Call board specific timer interrupt setup.
411 * 393 *
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 9fda1b8be3a7..2a932cada244 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -54,6 +54,8 @@ extern asmlinkage void handle_dbe(void);
54extern asmlinkage void handle_sys(void); 54extern asmlinkage void handle_sys(void);
55extern asmlinkage void handle_bp(void); 55extern asmlinkage void handle_bp(void);
56extern asmlinkage void handle_ri(void); 56extern asmlinkage void handle_ri(void);
57extern asmlinkage void handle_ri_rdhwr_vivt(void);
58extern asmlinkage void handle_ri_rdhwr(void);
57extern asmlinkage void handle_cpu(void); 59extern asmlinkage void handle_cpu(void);
58extern asmlinkage void handle_ov(void); 60extern asmlinkage void handle_ov(void);
59extern asmlinkage void handle_tr(void); 61extern asmlinkage void handle_tr(void);
@@ -397,19 +399,6 @@ asmlinkage void do_be(struct pt_regs *regs)
397 force_sig(SIGBUS, current); 399 force_sig(SIGBUS, current);
398} 400}
399 401
400static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
401{
402 unsigned int __user *epc;
403
404 epc = (unsigned int __user *) regs->cp0_epc +
405 ((regs->cp0_cause & CAUSEF_BD) != 0);
406 if (!get_user(*opcode, epc))
407 return 0;
408
409 force_sig(SIGSEGV, current);
410 return 1;
411}
412
413/* 402/*
414 * ll/sc emulation 403 * ll/sc emulation
415 */ 404 */
@@ -544,8 +533,8 @@ static inline int simulate_llsc(struct pt_regs *regs)
544{ 533{
545 unsigned int opcode; 534 unsigned int opcode;
546 535
547 if (unlikely(get_insn_opcode(regs, &opcode))) 536 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
548 return -EFAULT; 537 goto out_sigsegv;
549 538
550 if ((opcode & OPCODE) == LL) { 539 if ((opcode & OPCODE) == LL) {
551 simulate_ll(regs, opcode); 540 simulate_ll(regs, opcode);
@@ -557,6 +546,10 @@ static inline int simulate_llsc(struct pt_regs *regs)
557 } 546 }
558 547
559 return -EFAULT; /* Strange things going on ... */ 548 return -EFAULT; /* Strange things going on ... */
549
550out_sigsegv:
551 force_sig(SIGSEGV, current);
552 return -EFAULT;
560} 553}
561 554
562/* 555/*
@@ -569,8 +562,8 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
569 struct thread_info *ti = task_thread_info(current); 562 struct thread_info *ti = task_thread_info(current);
570 unsigned int opcode; 563 unsigned int opcode;
571 564
572 if (unlikely(get_insn_opcode(regs, &opcode))) 565 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
573 return -EFAULT; 566 goto out_sigsegv;
574 567
575 if (unlikely(compute_return_epc(regs))) 568 if (unlikely(compute_return_epc(regs)))
576 return -EFAULT; 569 return -EFAULT;
@@ -589,6 +582,10 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
589 582
590 /* Not ours. */ 583 /* Not ours. */
591 return -EFAULT; 584 return -EFAULT;
585
586out_sigsegv:
587 force_sig(SIGSEGV, current);
588 return -EFAULT;
592} 589}
593 590
594asmlinkage void do_ov(struct pt_regs *regs) 591asmlinkage void do_ov(struct pt_regs *regs)
@@ -672,10 +669,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
672 unsigned int opcode, bcode; 669 unsigned int opcode, bcode;
673 siginfo_t info; 670 siginfo_t info;
674 671
675 die_if_kernel("Break instruction in kernel code", regs); 672 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
676 673 goto out_sigsegv;
677 if (get_insn_opcode(regs, &opcode))
678 return;
679 674
680 /* 675 /*
681 * There is the ancient bug in the MIPS assemblers that the break 676 * There is the ancient bug in the MIPS assemblers that the break
@@ -696,6 +691,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
696 switch (bcode) { 691 switch (bcode) {
697 case BRK_OVERFLOW << 10: 692 case BRK_OVERFLOW << 10:
698 case BRK_DIVZERO << 10: 693 case BRK_DIVZERO << 10:
694 die_if_kernel("Break instruction in kernel code", regs);
699 if (bcode == (BRK_DIVZERO << 10)) 695 if (bcode == (BRK_DIVZERO << 10))
700 info.si_code = FPE_INTDIV; 696 info.si_code = FPE_INTDIV;
701 else 697 else
@@ -705,9 +701,16 @@ asmlinkage void do_bp(struct pt_regs *regs)
705 info.si_addr = (void __user *) regs->cp0_epc; 701 info.si_addr = (void __user *) regs->cp0_epc;
706 force_sig_info(SIGFPE, &info, current); 702 force_sig_info(SIGFPE, &info, current);
707 break; 703 break;
704 case BRK_BUG:
705 die("Kernel bug detected", regs);
706 break;
708 default: 707 default:
708 die_if_kernel("Break instruction in kernel code", regs);
709 force_sig(SIGTRAP, current); 709 force_sig(SIGTRAP, current);
710 } 710 }
711
712out_sigsegv:
713 force_sig(SIGSEGV, current);
711} 714}
712 715
713asmlinkage void do_tr(struct pt_regs *regs) 716asmlinkage void do_tr(struct pt_regs *regs)
@@ -715,10 +718,8 @@ asmlinkage void do_tr(struct pt_regs *regs)
715 unsigned int opcode, tcode = 0; 718 unsigned int opcode, tcode = 0;
716 siginfo_t info; 719 siginfo_t info;
717 720
718 die_if_kernel("Trap instruction in kernel code", regs); 721 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
719 722 goto out_sigsegv;
720 if (get_insn_opcode(regs, &opcode))
721 return;
722 723
723 /* Immediate versions don't provide a code. */ 724 /* Immediate versions don't provide a code. */
724 if (!(opcode & OPCODE)) 725 if (!(opcode & OPCODE))
@@ -733,6 +734,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
733 switch (tcode) { 734 switch (tcode) {
734 case BRK_OVERFLOW: 735 case BRK_OVERFLOW:
735 case BRK_DIVZERO: 736 case BRK_DIVZERO:
737 die_if_kernel("Trap instruction in kernel code", regs);
736 if (tcode == BRK_DIVZERO) 738 if (tcode == BRK_DIVZERO)
737 info.si_code = FPE_INTDIV; 739 info.si_code = FPE_INTDIV;
738 else 740 else
@@ -742,9 +744,16 @@ asmlinkage void do_tr(struct pt_regs *regs)
742 info.si_addr = (void __user *) regs->cp0_epc; 744 info.si_addr = (void __user *) regs->cp0_epc;
743 force_sig_info(SIGFPE, &info, current); 745 force_sig_info(SIGFPE, &info, current);
744 break; 746 break;
747 case BRK_BUG:
748 die("Kernel bug detected", regs);
749 break;
745 default: 750 default:
751 die_if_kernel("Trap instruction in kernel code", regs);
746 force_sig(SIGTRAP, current); 752 force_sig(SIGTRAP, current);
747 } 753 }
754
755out_sigsegv:
756 force_sig(SIGSEGV, current);
748} 757}
749 758
750asmlinkage void do_ri(struct pt_regs *regs) 759asmlinkage void do_ri(struct pt_regs *regs)
@@ -1423,6 +1432,15 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
1423 memcpy((void *)(uncached_ebase + offset), addr, size); 1432 memcpy((void *)(uncached_ebase + offset), addr, size);
1424} 1433}
1425 1434
1435static int __initdata rdhwr_noopt;
1436static int __init set_rdhwr_noopt(char *str)
1437{
1438 rdhwr_noopt = 1;
1439 return 1;
1440}
1441
1442__setup("rdhwr_noopt", set_rdhwr_noopt);
1443
1426void __init trap_init(void) 1444void __init trap_init(void)
1427{ 1445{
1428 extern char except_vec3_generic, except_vec3_r4000; 1446 extern char except_vec3_generic, except_vec3_r4000;
@@ -1502,7 +1520,9 @@ void __init trap_init(void)
1502 1520
1503 set_except_vector(8, handle_sys); 1521 set_except_vector(8, handle_sys);
1504 set_except_vector(9, handle_bp); 1522 set_except_vector(9, handle_bp);
1505 set_except_vector(10, handle_ri); 1523 set_except_vector(10, rdhwr_noopt ? handle_ri :
1524 (cpu_has_vtag_icache ?
1525 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1506 set_except_vector(11, handle_cpu); 1526 set_except_vector(11, handle_cpu);
1507 set_except_vector(12, handle_ov); 1527 set_except_vector(12, handle_ov);
1508 set_except_vector(13, handle_tr); 1528 set_except_vector(13, handle_tr);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 79f0317d84ac..cecff24cc972 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -112,6 +112,7 @@ SECTIONS
112 /* .exit.text is discarded at runtime, not link time, to deal with 112 /* .exit.text is discarded at runtime, not link time, to deal with
113 references from .rodata */ 113 references from .rodata */
114 .exit.text : { *(.exit.text) } 114 .exit.text : { *(.exit.text) }
115 .exit.data : { *(.exit.data) }
115 . = ALIGN(_PAGE_SIZE); 116 . = ALIGN(_PAGE_SIZE);
116 __initramfs_start = .; 117 __initramfs_start = .;
117 .init.ramfs : { *(.init.ramfs) } 118 .init.ramfs : { *(.init.ramfs) }
@@ -139,7 +140,6 @@ SECTIONS
139 140
140 /* Sections to be discarded */ 141 /* Sections to be discarded */
141 /DISCARD/ : { 142 /DISCARD/ : {
142 *(.exit.data)
143 *(.exitcall.exit) 143 *(.exitcall.exit)
144 144
145 /* ABI crap starts here */ 145 /* ABI crap starts here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 51ddd2166898..666bef484dcb 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1179,7 +1179,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1179 size_t ret = count; 1179 size_t ret = count;
1180 struct vpe *v; 1180 struct vpe *v;
1181 1181
1182 minor = iminor(file->f_dentry->d_inode); 1182 minor = iminor(file->f_path.dentry->d_inode);
1183 if ((v = get_vpe(minor)) == NULL) 1183 if ((v = get_vpe(minor)) == NULL)
1184 return -ENODEV; 1184 return -ENODEV;
1185 1185