aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
committerDave Jones <davej@redhat.com>2006-12-12 17:41:41 -0500
commitc4366889dda8110247be59ca41fddb82951a8c26 (patch)
tree705c1a996bed8fd48ce94ff33ec9fd00f9b94875 /arch/mips/kernel
parentdb2fb9db5735cc532fd4fc55e94b9a3c3750378e (diff)
parente1036502e5263851259d147771226161e5ccc85a (diff)
Merge ../linus
Conflicts: drivers/cpufreq/cpufreq.c
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile5
-rw-r--r--arch/mips/kernel/asm-offsets.c2
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c1
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c1
-rw-r--r--arch/mips/kernel/cpu-probe.c19
-rw-r--r--arch/mips/kernel/dma-no-isa.c28
-rw-r--r--arch/mips/kernel/entry.S3
-rw-r--r--arch/mips/kernel/genex.S63
-rw-r--r--arch/mips/kernel/head.S8
-rw-r--r--arch/mips/kernel/i8259.c179
-rw-r--r--arch/mips/kernel/irixelf.c10
-rw-r--r--arch/mips/kernel/irq-msc01.c47
-rw-r--r--arch/mips/kernel/irq-mv6434x.c64
-rw-r--r--arch/mips/kernel/irq-rm7000.c61
-rw-r--r--arch/mips/kernel/irq-rm9000.c55
-rw-r--r--arch/mips/kernel/irq.c76
-rw-r--r--arch/mips/kernel/irq_cpu.c90
-rw-r--r--arch/mips/kernel/kspd.c6
-rw-r--r--arch/mips/kernel/linux32.c582
-rw-r--r--arch/mips/kernel/machine_kexec.c85
-rw-r--r--arch/mips/kernel/module.c15
-rw-r--r--arch/mips/kernel/process.c8
-rw-r--r--arch/mips/kernel/r4k_switch.S5
-rw-r--r--arch/mips/kernel/relocate_kernel.S80
-rw-r--r--arch/mips/kernel/reset.c2
-rw-r--r--arch/mips/kernel/rtlx.c6
-rw-r--r--arch/mips/kernel/scall32-o32.S5
-rw-r--r--arch/mips/kernel/scall64-64.S3
-rw-r--r--arch/mips/kernel/scall64-n32.S19
-rw-r--r--arch/mips/kernel/scall64-o32.S5
-rw-r--r--arch/mips/kernel/setup.c93
-rw-r--r--arch/mips/kernel/signal_n32.c1
-rw-r--r--arch/mips/kernel/smp-mt.c156
-rw-r--r--arch/mips/kernel/smp.c29
-rw-r--r--arch/mips/kernel/smtc-asm.S7
-rw-r--r--arch/mips/kernel/smtc.c2
-rw-r--r--arch/mips/kernel/stacktrace.c2
-rw-r--r--arch/mips/kernel/sysirix.c10
-rw-r--r--arch/mips/kernel/time.c349
-rw-r--r--arch/mips/kernel/topology.c29
-rw-r--r--arch/mips/kernel/traps.c74
-rw-r--r--arch/mips/kernel/vmlinux.lds.S20
-rw-r--r--arch/mips/kernel/vpe.c2
43 files changed, 820 insertions, 1487 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index cd9cec9e39e9..bbbb8d7cb89b 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -6,7 +6,7 @@ extra-y := head.o init_task.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
8 ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \ 8 ptrace.o reset.o semaphore.o setup.o signal.o syscall.o \
9 time.o traps.o unaligned.o 9 time.o topology.o traps.o unaligned.o
10 10
11binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ 11binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
12 irix5sys.o sysirix.o 12 irix5sys.o sysirix.o
@@ -45,7 +45,6 @@ obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
45obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 45obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
46obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o 46obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
47 47
48obj-$(CONFIG_NO_ISA) += dma-no-isa.o
49obj-$(CONFIG_I8259) += i8259.o 48obj-$(CONFIG_I8259) += i8259.o
50obj-$(CONFIG_IRQ_CPU) += irq_cpu.o 49obj-$(CONFIG_IRQ_CPU) += irq_cpu.o
51obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o 50obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
@@ -67,6 +66,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
67 66
68obj-$(CONFIG_I8253) += i8253.o 67obj-$(CONFIG_I8253) += i8253.o
69 68
69obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
70
70CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) 71CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(CFLAGS) -Wa,-mdaddi -c -o /dev/null -xc /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
71 72
72EXTRA_AFLAGS := $(CFLAGS) 73EXTRA_AFLAGS := $(CFLAGS)
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index e9ce5b3721af..ff88b06f89df 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -22,7 +22,7 @@
22#define offset(string, ptr, member) \ 22#define offset(string, ptr, member) \
23 __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member))) 23 __asm__("\n@@@" string "%0" : : "i" (_offset(ptr, member)))
24#define constant(string, member) \ 24#define constant(string, member) \
25 __asm__("\n@@@" string "%x0" : : "ri" (member)) 25 __asm__("\n@@@" string "%X0" : : "ri" (member))
26#define size(string, size) \ 26#define size(string, size) \
27 __asm__("\n@@@" string "%0" : : "i" (sizeof(size))) 27 __asm__("\n@@@" string "%0" : : "i" (sizeof(size)))
28#define linefeed text("") 28#define linefeed text("")
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 4a9f1ecefaf2..9b34238d41c0 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -90,7 +90,6 @@ struct elf_prpsinfo32
90 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 90 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
91}; 91};
92 92
93#define elf_addr_t u32
94#define elf_caddr_t u32 93#define elf_caddr_t u32
95#define init_elf_binfmt init_elfn32_binfmt 94#define init_elf_binfmt init_elfn32_binfmt
96 95
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index e31813779895..993f7ec70f35 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -92,7 +92,6 @@ struct elf_prpsinfo32
92 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ 92 char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */
93}; 93};
94 94
95#define elf_addr_t u32
96#define elf_caddr_t u32 95#define elf_caddr_t u32
97#define init_elf_binfmt init_elf32_binfmt 96#define init_elf_binfmt init_elf32_binfmt
98 97
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 8485af340ee1..442839e9578c 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -110,9 +110,8 @@ static inline void check_wait(void)
110{ 110{
111 struct cpuinfo_mips *c = &current_cpu_data; 111 struct cpuinfo_mips *c = &current_cpu_data;
112 112
113 printk("Checking for 'wait' instruction... ");
114 if (nowait) { 113 if (nowait) {
115 printk (" disabled.\n"); 114 printk("Wait instruction disabled.\n");
116 return; 115 return;
117 } 116 }
118 117
@@ -120,11 +119,9 @@ static inline void check_wait(void)
120 case CPU_R3081: 119 case CPU_R3081:
121 case CPU_R3081E: 120 case CPU_R3081E:
122 cpu_wait = r3081_wait; 121 cpu_wait = r3081_wait;
123 printk(" available.\n");
124 break; 122 break;
125 case CPU_TX3927: 123 case CPU_TX3927:
126 cpu_wait = r39xx_wait; 124 cpu_wait = r39xx_wait;
127 printk(" available.\n");
128 break; 125 break;
129 case CPU_R4200: 126 case CPU_R4200:
130/* case CPU_R4300: */ 127/* case CPU_R4300: */
@@ -146,33 +143,23 @@ static inline void check_wait(void)
146 case CPU_74K: 143 case CPU_74K:
147 case CPU_PR4450: 144 case CPU_PR4450:
148 cpu_wait = r4k_wait; 145 cpu_wait = r4k_wait;
149 printk(" available.\n");
150 break; 146 break;
151 case CPU_TX49XX: 147 case CPU_TX49XX:
152 cpu_wait = r4k_wait_irqoff; 148 cpu_wait = r4k_wait_irqoff;
153 printk(" available.\n");
154 break; 149 break;
155 case CPU_AU1000: 150 case CPU_AU1000:
156 case CPU_AU1100: 151 case CPU_AU1100:
157 case CPU_AU1500: 152 case CPU_AU1500:
158 case CPU_AU1550: 153 case CPU_AU1550:
159 case CPU_AU1200: 154 case CPU_AU1200:
160 if (allow_au1k_wait) { 155 if (allow_au1k_wait)
161 cpu_wait = au1k_wait; 156 cpu_wait = au1k_wait;
162 printk(" available.\n");
163 } else
164 printk(" unavailable.\n");
165 break; 157 break;
166 case CPU_RM9000: 158 case CPU_RM9000:
167 if ((c->processor_id & 0x00ff) >= 0x40) { 159 if ((c->processor_id & 0x00ff) >= 0x40)
168 cpu_wait = r4k_wait; 160 cpu_wait = r4k_wait;
169 printk(" available.\n");
170 } else {
171 printk(" unavailable.\n");
172 }
173 break; 161 break;
174 default: 162 default:
175 printk(" unavailable.\n");
176 break; 163 break;
177 } 164 }
178} 165}
diff --git a/arch/mips/kernel/dma-no-isa.c b/arch/mips/kernel/dma-no-isa.c
deleted file mode 100644
index 6df8b07741e3..000000000000
--- a/arch/mips/kernel/dma-no-isa.c
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004 by Ralf Baechle
7 *
8 * Dummy ISA DMA functions for systems that don't have ISA but share drivers
9 * with ISA such as legacy free PCI.
10 */
11#include <linux/errno.h>
12#include <linux/module.h>
13#include <linux/spinlock.h>
14
15DEFINE_SPINLOCK(dma_spin_lock);
16
17int request_dma(unsigned int dmanr, const char * device_id)
18{
19 return -EINVAL;
20}
21
22void free_dma(unsigned int dmanr)
23{
24}
25
26EXPORT_SYMBOL(dma_spin_lock);
27EXPORT_SYMBOL(request_dma);
28EXPORT_SYMBOL(free_dma);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 417c08ac76eb..f10b6a19f8bf 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -83,7 +83,10 @@ FEXPORT(syscall_exit)
83FEXPORT(restore_all) # restore full frame 83FEXPORT(restore_all) # restore full frame
84#ifdef CONFIG_MIPS_MT_SMTC 84#ifdef CONFIG_MIPS_MT_SMTC
85/* Detect and execute deferred IPI "interrupts" */ 85/* Detect and execute deferred IPI "interrupts" */
86 LONG_L s0, TI_REGS($28)
87 LONG_S sp, TI_REGS($28)
86 jal deferred_smtc_ipi 88 jal deferred_smtc_ipi
89 LONG_S s0, TI_REGS($28)
87/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 90/* Re-arm any temporarily masked interrupts not explicitly "acked" */
88 mfc0 v0, CP0_TCSTATUS 91 mfc0 v0, CP0_TCSTATUS
89 ori v1, v0, TCSTATUS_IXMT 92 ori v1, v0, TCSTATUS_IXMT
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 5baca16993d0..aacd4a005c5f 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -19,6 +19,7 @@
19#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
20#include <asm/stackframe.h> 20#include <asm/stackframe.h>
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/page.h>
22 23
23#define PANIC_PIC(msg) \ 24#define PANIC_PIC(msg) \
24 .set push; \ 25 .set push; \
@@ -378,6 +379,68 @@ NESTED(nmi_handler, PT_SIZE, sp)
378 BUILD_HANDLER dsp dsp sti silent /* #26 */ 379 BUILD_HANDLER dsp dsp sti silent /* #26 */
379 BUILD_HANDLER reserved reserved sti verbose /* others */ 380 BUILD_HANDLER reserved reserved sti verbose /* others */
380 381
382 .align 5
383 LEAF(handle_ri_rdhwr_vivt)
384#ifdef CONFIG_MIPS_MT_SMTC
385 PANIC_PIC("handle_ri_rdhwr_vivt called")
386#else
387 .set push
388 .set noat
389 .set noreorder
390 /* check if TLB contains a entry for EPC */
391 MFC0 k1, CP0_ENTRYHI
392 andi k1, 0xff /* ASID_MASK */
393 MFC0 k0, CP0_EPC
394 PTR_SRL k0, PAGE_SHIFT + 1
395 PTR_SLL k0, PAGE_SHIFT + 1
396 or k1, k0
397 MTC0 k1, CP0_ENTRYHI
398 mtc0_tlbw_hazard
399 tlbp
400 tlb_probe_hazard
401 mfc0 k1, CP0_INDEX
402 .set pop
403 bltz k1, handle_ri /* slow path */
404 /* fall thru */
405#endif
406 END(handle_ri_rdhwr_vivt)
407
408 LEAF(handle_ri_rdhwr)
409 .set push
410 .set noat
411 .set noreorder
412 /* 0x7c03e83b: rdhwr v1,$29 */
413 MFC0 k1, CP0_EPC
414 lui k0, 0x7c03
415 lw k1, (k1)
416 ori k0, 0xe83b
417 .set reorder
418 bne k0, k1, handle_ri /* if not ours */
419 /* The insn is rdhwr. No need to check CAUSE.BD here. */
420 get_saved_sp /* k1 := current_thread_info */
421 .set noreorder
422 MFC0 k0, CP0_EPC
423#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
424 ori k1, _THREAD_MASK
425 xori k1, _THREAD_MASK
426 LONG_L v1, TI_TP_VALUE(k1)
427 LONG_ADDIU k0, 4
428 jr k0
429 rfe
430#else
431 LONG_ADDIU k0, 4 /* stall on $k0 */
432 MTC0 k0, CP0_EPC
433 /* I hope three instructions between MTC0 and ERET are enough... */
434 ori k1, _THREAD_MASK
435 xori k1, _THREAD_MASK
436 LONG_L v1, TI_TP_VALUE(k1)
437 .set mips3
438 eret
439 .set mips0
440#endif
441 .set pop
442 END(handle_ri_rdhwr)
443
381#ifdef CONFIG_64BIT 444#ifdef CONFIG_64BIT
382/* A temporary overflow handler used by check_daddi(). */ 445/* A temporary overflow handler used by check_daddi(). */
383 446
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 8c6db0fc72f0..9a7811d13db2 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -138,7 +138,7 @@
138EXPORT(stext) # used for profiling 138EXPORT(stext) # used for profiling
139EXPORT(_stext) 139EXPORT(_stext)
140 140
141#if defined(CONFIG_QEMU) || defined(CONFIG_MIPS_SIM) 141#ifdef CONFIG_MIPS_SIM
142 /* 142 /*
143 * Give us a fighting chance of running if execution beings at the 143 * Give us a fighting chance of running if execution beings at the
144 * kernel load address. This is needed because this platform does 144 * kernel load address. This is needed because this platform does
@@ -189,7 +189,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
189 189
190 MTC0 zero, CP0_CONTEXT # clear context register 190 MTC0 zero, CP0_CONTEXT # clear context register
191 PTR_LA $28, init_thread_union 191 PTR_LA $28, init_thread_union
192 PTR_ADDIU sp, $28, _THREAD_SIZE - 32 192 PTR_LI sp, _THREAD_SIZE - 32
193 PTR_ADDU sp, $28
193 set_saved_sp sp, t0, t1 194 set_saved_sp sp, t0, t1
194 PTR_SUBU sp, 4 * SZREG # init stack pointer 195 PTR_SUBU sp, 4 * SZREG # init stack pointer
195 196
@@ -249,6 +250,9 @@ NESTED(smp_bootstrap, 16, sp)
249 */ 250 */
250 page swapper_pg_dir, _PGD_ORDER 251 page swapper_pg_dir, _PGD_ORDER
251#ifdef CONFIG_64BIT 252#ifdef CONFIG_64BIT
253#if defined(CONFIG_MODULES) && !defined(CONFIG_BUILD_ELF64)
254 page module_pg_dir, _PGD_ORDER
255#endif
252 page invalid_pmd_table, _PMD_ORDER 256 page invalid_pmd_table, _PMD_ORDER
253#endif 257#endif
254 page invalid_pte_table, _PTE_ORDER 258 page invalid_pte_table, _PTE_ORDER
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 48e3418c217b..b59a676c6d0e 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -19,9 +19,6 @@
19#include <asm/i8259.h> 19#include <asm/i8259.h>
20#include <asm/io.h> 20#include <asm/io.h>
21 21
22void enable_8259A_irq(unsigned int irq);
23void disable_8259A_irq(unsigned int irq);
24
25/* 22/*
26 * This is the 'legacy' 8259A Programmable Interrupt Controller, 23 * This is the 'legacy' 8259A Programmable Interrupt Controller,
27 * present in the majority of PC/AT boxes. 24 * present in the majority of PC/AT boxes.
@@ -31,34 +28,16 @@ void disable_8259A_irq(unsigned int irq);
31 * moves to arch independent land 28 * moves to arch independent land
32 */ 29 */
33 30
31static int i8259A_auto_eoi;
34DEFINE_SPINLOCK(i8259A_lock); 32DEFINE_SPINLOCK(i8259A_lock);
35 33/* some platforms call this... */
36static void end_8259A_irq (unsigned int irq)
37{
38 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)) &&
39 irq_desc[irq].action)
40 enable_8259A_irq(irq);
41}
42
43#define shutdown_8259A_irq disable_8259A_irq
44
45void mask_and_ack_8259A(unsigned int); 34void mask_and_ack_8259A(unsigned int);
46 35
47static unsigned int startup_8259A_irq(unsigned int irq) 36static struct irq_chip i8259A_chip = {
48{ 37 .name = "XT-PIC",
49 enable_8259A_irq(irq); 38 .mask = disable_8259A_irq,
50 39 .unmask = enable_8259A_irq,
51 return 0; /* never anything pending */ 40 .mask_ack = mask_and_ack_8259A,
52}
53
54static struct irq_chip i8259A_irq_type = {
55 .typename = "XT-PIC",
56 .startup = startup_8259A_irq,
57 .shutdown = shutdown_8259A_irq,
58 .enable = enable_8259A_irq,
59 .disable = disable_8259A_irq,
60 .ack = mask_and_ack_8259A,
61 .end = end_8259A_irq,
62}; 41};
63 42
64/* 43/*
@@ -70,8 +49,8 @@ static struct irq_chip i8259A_irq_type = {
70 */ 49 */
71static unsigned int cached_irq_mask = 0xffff; 50static unsigned int cached_irq_mask = 0xffff;
72 51
73#define cached_21 (cached_irq_mask) 52#define cached_master_mask (cached_irq_mask)
74#define cached_A1 (cached_irq_mask >> 8) 53#define cached_slave_mask (cached_irq_mask >> 8)
75 54
76void disable_8259A_irq(unsigned int irq) 55void disable_8259A_irq(unsigned int irq)
77{ 56{
@@ -81,9 +60,9 @@ void disable_8259A_irq(unsigned int irq)
81 spin_lock_irqsave(&i8259A_lock, flags); 60 spin_lock_irqsave(&i8259A_lock, flags);
82 cached_irq_mask |= mask; 61 cached_irq_mask |= mask;
83 if (irq & 8) 62 if (irq & 8)
84 outb(cached_A1,0xA1); 63 outb(cached_slave_mask, PIC_SLAVE_IMR);
85 else 64 else
86 outb(cached_21,0x21); 65 outb(cached_master_mask, PIC_MASTER_IMR);
87 spin_unlock_irqrestore(&i8259A_lock, flags); 66 spin_unlock_irqrestore(&i8259A_lock, flags);
88} 67}
89 68
@@ -95,9 +74,9 @@ void enable_8259A_irq(unsigned int irq)
95 spin_lock_irqsave(&i8259A_lock, flags); 74 spin_lock_irqsave(&i8259A_lock, flags);
96 cached_irq_mask &= mask; 75 cached_irq_mask &= mask;
97 if (irq & 8) 76 if (irq & 8)
98 outb(cached_A1,0xA1); 77 outb(cached_slave_mask, PIC_SLAVE_IMR);
99 else 78 else
100 outb(cached_21,0x21); 79 outb(cached_master_mask, PIC_MASTER_IMR);
101 spin_unlock_irqrestore(&i8259A_lock, flags); 80 spin_unlock_irqrestore(&i8259A_lock, flags);
102} 81}
103 82
@@ -109,9 +88,9 @@ int i8259A_irq_pending(unsigned int irq)
109 88
110 spin_lock_irqsave(&i8259A_lock, flags); 89 spin_lock_irqsave(&i8259A_lock, flags);
111 if (irq < 8) 90 if (irq < 8)
112 ret = inb(0x20) & mask; 91 ret = inb(PIC_MASTER_CMD) & mask;
113 else 92 else
114 ret = inb(0xA0) & (mask >> 8); 93 ret = inb(PIC_SLAVE_CMD) & (mask >> 8);
115 spin_unlock_irqrestore(&i8259A_lock, flags); 94 spin_unlock_irqrestore(&i8259A_lock, flags);
116 95
117 return ret; 96 return ret;
@@ -120,7 +99,7 @@ int i8259A_irq_pending(unsigned int irq)
120void make_8259A_irq(unsigned int irq) 99void make_8259A_irq(unsigned int irq)
121{ 100{
122 disable_irq_nosync(irq); 101 disable_irq_nosync(irq);
123 irq_desc[irq].chip = &i8259A_irq_type; 102 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
124 enable_irq(irq); 103 enable_irq(irq);
125} 104}
126 105
@@ -136,14 +115,14 @@ static inline int i8259A_irq_real(unsigned int irq)
136 int irqmask = 1 << irq; 115 int irqmask = 1 << irq;
137 116
138 if (irq < 8) { 117 if (irq < 8) {
139 outb(0x0B,0x20); /* ISR register */ 118 outb(0x0B,PIC_MASTER_CMD); /* ISR register */
140 value = inb(0x20) & irqmask; 119 value = inb(PIC_MASTER_CMD) & irqmask;
141 outb(0x0A,0x20); /* back to the IRR register */ 120 outb(0x0A,PIC_MASTER_CMD); /* back to the IRR register */
142 return value; 121 return value;
143 } 122 }
144 outb(0x0B,0xA0); /* ISR register */ 123 outb(0x0B,PIC_SLAVE_CMD); /* ISR register */
145 value = inb(0xA0) & (irqmask >> 8); 124 value = inb(PIC_SLAVE_CMD) & (irqmask >> 8);
146 outb(0x0A,0xA0); /* back to the IRR register */ 125 outb(0x0A,PIC_SLAVE_CMD); /* back to the IRR register */
147 return value; 126 return value;
148} 127}
149 128
@@ -160,17 +139,19 @@ void mask_and_ack_8259A(unsigned int irq)
160 139
161 spin_lock_irqsave(&i8259A_lock, flags); 140 spin_lock_irqsave(&i8259A_lock, flags);
162 /* 141 /*
163 * Lightweight spurious IRQ detection. We do not want to overdo 142 * Lightweight spurious IRQ detection. We do not want
164 * spurious IRQ handling - it's usually a sign of hardware problems, so 143 * to overdo spurious IRQ handling - it's usually a sign
165 * we only do the checks we can do without slowing down good hardware 144 * of hardware problems, so we only do the checks we can
166 * nnecesserily. 145 * do without slowing down good hardware unnecessarily.
167 * 146 *
168 * Note that IRQ7 and IRQ15 (the two spurious IRQs usually resulting 147 * Note that IRQ7 and IRQ15 (the two spurious IRQs
169 * rom the 8259A-1|2 PICs) occur even if the IRQ is masked in the 8259A. 148 * usually resulting from the 8259A-1|2 PICs) occur
170 * Thus we can check spurious 8259A IRQs without doing the quite slow 149 * even if the IRQ is masked in the 8259A. Thus we
171 * i8259A_irq_real() call for every IRQ. This does not cover 100% of 150 * can check spurious 8259A IRQs without doing the
172 * spurious interrupts, but should be enough to warn the user that 151 * quite slow i8259A_irq_real() call for every IRQ.
173 * there is something bad going on ... 152 * This does not cover 100% of spurious interrupts,
153 * but should be enough to warn the user that there
154 * is something bad going on ...
174 */ 155 */
175 if (cached_irq_mask & irqmask) 156 if (cached_irq_mask & irqmask)
176 goto spurious_8259A_irq; 157 goto spurious_8259A_irq;
@@ -178,14 +159,14 @@ void mask_and_ack_8259A(unsigned int irq)
178 159
179handle_real_irq: 160handle_real_irq:
180 if (irq & 8) { 161 if (irq & 8) {
181 inb(0xA1); /* DUMMY - (do we need this?) */ 162 inb(PIC_SLAVE_IMR); /* DUMMY - (do we need this?) */
182 outb(cached_A1,0xA1); 163 outb(cached_slave_mask, PIC_SLAVE_IMR);
183 outb(0x60+(irq&7),0xA0);/* 'Specific EOI' to slave */ 164 outb(0x60+(irq&7),PIC_SLAVE_CMD);/* 'Specific EOI' to slave */
184 outb(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ 165 outb(0x60+PIC_CASCADE_IR,PIC_MASTER_CMD); /* 'Specific EOI' to master-IRQ2 */
185 } else { 166 } else {
186 inb(0x21); /* DUMMY - (do we need this?) */ 167 inb(PIC_MASTER_IMR); /* DUMMY - (do we need this?) */
187 outb(cached_21,0x21); 168 outb(cached_master_mask, PIC_MASTER_IMR);
188 outb(0x60+irq,0x20); /* 'Specific EOI' to master */ 169 outb(0x60+irq,PIC_MASTER_CMD); /* 'Specific EOI to master */
189 } 170 }
190#ifdef CONFIG_MIPS_MT_SMTC 171#ifdef CONFIG_MIPS_MT_SMTC
191 if (irq_hwmask[irq] & ST0_IM) 172 if (irq_hwmask[irq] & ST0_IM)
@@ -206,7 +187,7 @@ spurious_8259A_irq:
206 goto handle_real_irq; 187 goto handle_real_irq;
207 188
208 { 189 {
209 static int spurious_irq_mask = 0; 190 static int spurious_irq_mask;
210 /* 191 /*
211 * At this point we can be sure the IRQ is spurious, 192 * At this point we can be sure the IRQ is spurious,
212 * lets ACK and report it. [once per IRQ] 193 * lets ACK and report it. [once per IRQ]
@@ -227,13 +208,25 @@ spurious_8259A_irq:
227 208
228static int i8259A_resume(struct sys_device *dev) 209static int i8259A_resume(struct sys_device *dev)
229{ 210{
230 init_8259A(0); 211 init_8259A(i8259A_auto_eoi);
212 return 0;
213}
214
215static int i8259A_shutdown(struct sys_device *dev)
216{
217 /* Put the i8259A into a quiescent state that
218 * the kernel initialization code can get it
219 * out of.
220 */
221 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
222 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-1 */
231 return 0; 223 return 0;
232} 224}
233 225
234static struct sysdev_class i8259_sysdev_class = { 226static struct sysdev_class i8259_sysdev_class = {
235 set_kset_name("i8259"), 227 set_kset_name("i8259"),
236 .resume = i8259A_resume, 228 .resume = i8259A_resume,
229 .shutdown = i8259A_shutdown,
237}; 230};
238 231
239static struct sys_device device_i8259A = { 232static struct sys_device device_i8259A = {
@@ -255,41 +248,41 @@ void __init init_8259A(int auto_eoi)
255{ 248{
256 unsigned long flags; 249 unsigned long flags;
257 250
251 i8259A_auto_eoi = auto_eoi;
252
258 spin_lock_irqsave(&i8259A_lock, flags); 253 spin_lock_irqsave(&i8259A_lock, flags);
259 254
260 outb(0xff, 0x21); /* mask all of 8259A-1 */ 255 outb(0xff, PIC_MASTER_IMR); /* mask all of 8259A-1 */
261 outb(0xff, 0xA1); /* mask all of 8259A-2 */ 256 outb(0xff, PIC_SLAVE_IMR); /* mask all of 8259A-2 */
262 257
263 /* 258 /*
264 * outb_p - this has to work on a wide range of PC hardware. 259 * outb_p - this has to work on a wide range of PC hardware.
265 */ 260 */
266 outb_p(0x11, 0x20); /* ICW1: select 8259A-1 init */ 261 outb_p(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
267 outb_p(0x00, 0x21); /* ICW2: 8259A-1 IR0-7 mapped to 0x00-0x07 */ 262 outb_p(I8259A_IRQ_BASE + 0, PIC_MASTER_IMR); /* ICW2: 8259A-1 IR0 mapped to I8259A_IRQ_BASE + 0x00 */
268 outb_p(0x04, 0x21); /* 8259A-1 (the master) has a slave on IR2 */ 263 outb_p(1U << PIC_CASCADE_IR, PIC_MASTER_IMR); /* 8259A-1 (the master) has a slave on IR2 */
269 if (auto_eoi) 264 if (auto_eoi) /* master does Auto EOI */
270 outb_p(0x03, 0x21); /* master does Auto EOI */ 265 outb_p(MASTER_ICW4_DEFAULT | PIC_ICW4_AEOI, PIC_MASTER_IMR);
271 else 266 else /* master expects normal EOI */
272 outb_p(0x01, 0x21); /* master expects normal EOI */ 267 outb_p(MASTER_ICW4_DEFAULT, PIC_MASTER_IMR);
273 268
274 outb_p(0x11, 0xA0); /* ICW1: select 8259A-2 init */ 269 outb_p(0x11, PIC_SLAVE_CMD); /* ICW1: select 8259A-2 init */
275 outb_p(0x08, 0xA1); /* ICW2: 8259A-2 IR0-7 mapped to 0x08-0x0f */ 270 outb_p(I8259A_IRQ_BASE + 8, PIC_SLAVE_IMR); /* ICW2: 8259A-2 IR0 mapped to I8259A_IRQ_BASE + 0x08 */
276 outb_p(0x02, 0xA1); /* 8259A-2 is a slave on master's IR2 */ 271 outb_p(PIC_CASCADE_IR, PIC_SLAVE_IMR); /* 8259A-2 is a slave on master's IR2 */
277 outb_p(0x01, 0xA1); /* (slave's support for AEOI in flat mode 272 outb_p(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR); /* (slave's support for AEOI in flat mode is to be investigated) */
278 is to be investigated) */
279
280 if (auto_eoi) 273 if (auto_eoi)
281 /* 274 /*
282 * in AEOI mode we just have to mask the interrupt 275 * In AEOI mode we just have to mask the interrupt
283 * when acking. 276 * when acking.
284 */ 277 */
285 i8259A_irq_type.ack = disable_8259A_irq; 278 i8259A_chip.mask_ack = disable_8259A_irq;
286 else 279 else
287 i8259A_irq_type.ack = mask_and_ack_8259A; 280 i8259A_chip.mask_ack = mask_and_ack_8259A;
288 281
289 udelay(100); /* wait for 8259A to initialize */ 282 udelay(100); /* wait for 8259A to initialize */
290 283
291 outb(cached_21, 0x21); /* restore master IRQ mask */ 284 outb(cached_master_mask, PIC_MASTER_IMR); /* restore master IRQ mask */
292 outb(cached_A1, 0xA1); /* restore slave IRQ mask */ 285 outb(cached_slave_mask, PIC_SLAVE_IMR); /* restore slave IRQ mask */
293 286
294 spin_unlock_irqrestore(&i8259A_lock, flags); 287 spin_unlock_irqrestore(&i8259A_lock, flags);
295} 288}
@@ -302,11 +295,17 @@ static struct irqaction irq2 = {
302}; 295};
303 296
304static struct resource pic1_io_resource = { 297static struct resource pic1_io_resource = {
305 .name = "pic1", .start = 0x20, .end = 0x21, .flags = IORESOURCE_BUSY 298 .name = "pic1",
299 .start = PIC_MASTER_CMD,
300 .end = PIC_MASTER_IMR,
301 .flags = IORESOURCE_BUSY
306}; 302};
307 303
308static struct resource pic2_io_resource = { 304static struct resource pic2_io_resource = {
309 .name = "pic2", .start = 0xa0, .end = 0xa1, .flags = IORESOURCE_BUSY 305 .name = "pic2",
306 .start = PIC_SLAVE_CMD,
307 .end = PIC_SLAVE_IMR,
308 .flags = IORESOURCE_BUSY
310}; 309};
311 310
312/* 311/*
@@ -323,12 +322,8 @@ void __init init_i8259_irqs (void)
323 322
324 init_8259A(0); 323 init_8259A(0);
325 324
326 for (i = 0; i < 16; i++) { 325 for (i = 0; i < 16; i++)
327 irq_desc[i].status = IRQ_DISABLED; 326 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq);
328 irq_desc[i].action = NULL;
329 irq_desc[i].depth = 1;
330 irq_desc[i].chip = &i8259A_irq_type;
331 }
332 327
333 setup_irq(2, &irq2); 328 setup_irq(PIC_CASCADE_IR, &irq2);
334} 329}
diff --git a/arch/mips/kernel/irixelf.c b/arch/mips/kernel/irixelf.c
index ab12c8f01518..37cad5de515c 100644
--- a/arch/mips/kernel/irixelf.c
+++ b/arch/mips/kernel/irixelf.c
@@ -52,10 +52,6 @@ static struct linux_binfmt irix_format = {
52 irix_core_dump, PAGE_SIZE 52 irix_core_dump, PAGE_SIZE
53}; 53};
54 54
55#ifndef elf_addr_t
56#define elf_addr_t unsigned long
57#endif
58
59#ifdef DEBUG 55#ifdef DEBUG
60/* Debugging routines. */ 56/* Debugging routines. */
61static char *get_elf_p_type(Elf32_Word p_type) 57static char *get_elf_p_type(Elf32_Word p_type)
@@ -1013,7 +1009,7 @@ static int notesize(struct memelfnote *en)
1013 int sz; 1009 int sz;
1014 1010
1015 sz = sizeof(struct elf_note); 1011 sz = sizeof(struct elf_note);
1016 sz += roundup(strlen(en->name), 4); 1012 sz += roundup(strlen(en->name) + 1, 4);
1017 sz += roundup(en->datasz, 4); 1013 sz += roundup(en->datasz, 4);
1018 1014
1019 return sz; 1015 return sz;
@@ -1032,7 +1028,7 @@ static int writenote(struct memelfnote *men, struct file *file)
1032{ 1028{
1033 struct elf_note en; 1029 struct elf_note en;
1034 1030
1035 en.n_namesz = strlen(men->name); 1031 en.n_namesz = strlen(men->name) + 1;
1036 en.n_descsz = men->datasz; 1032 en.n_descsz = men->datasz;
1037 en.n_type = men->type; 1033 en.n_type = men->type;
1038 1034
@@ -1149,7 +1145,7 @@ static int irix_core_dump(long signr, struct pt_regs * regs, struct file *file)
1149 psinfo.pr_pid = prstatus.pr_pid = current->pid; 1145 psinfo.pr_pid = prstatus.pr_pid = current->pid;
1150 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid; 1146 psinfo.pr_ppid = prstatus.pr_ppid = current->parent->pid;
1151 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current); 1147 psinfo.pr_pgrp = prstatus.pr_pgrp = process_group(current);
1152 psinfo.pr_sid = prstatus.pr_sid = current->signal->session; 1148 psinfo.pr_sid = prstatus.pr_sid = process_session(current);
1153 if (current->pid == current->tgid) { 1149 if (current->pid == current->tgid) {
1154 /* 1150 /*
1155 * This is the record for the group leader. Add in the 1151 * This is the record for the group leader. Add in the
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 650a80ca3741..bcaad6696082 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -45,31 +45,6 @@ static inline void unmask_msc_irq(unsigned int irq)
45} 45}
46 46
47/* 47/*
48 * Enables the IRQ on SOC-it
49 */
50static void enable_msc_irq(unsigned int irq)
51{
52 unmask_msc_irq(irq);
53}
54
55/*
56 * Initialize the IRQ on SOC-it
57 */
58static unsigned int startup_msc_irq(unsigned int irq)
59{
60 unmask_msc_irq(irq);
61 return 0;
62}
63
64/*
65 * Disables the IRQ on SOC-it
66 */
67static void disable_msc_irq(unsigned int irq)
68{
69 mask_msc_irq(irq);
70}
71
72/*
73 * Masks and ACKs an IRQ 48 * Masks and ACKs an IRQ
74 */ 49 */
75static void level_mask_and_ack_msc_irq(unsigned int irq) 50static void level_mask_and_ack_msc_irq(unsigned int irq)
@@ -136,25 +111,23 @@ msc_bind_eic_interrupt (unsigned int irq, unsigned int set)
136 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); 111 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
137} 112}
138 113
139#define shutdown_msc_irq disable_msc_irq
140
141struct irq_chip msc_levelirq_type = { 114struct irq_chip msc_levelirq_type = {
142 .typename = "SOC-it-Level", 115 .typename = "SOC-it-Level",
143 .startup = startup_msc_irq,
144 .shutdown = shutdown_msc_irq,
145 .enable = enable_msc_irq,
146 .disable = disable_msc_irq,
147 .ack = level_mask_and_ack_msc_irq, 116 .ack = level_mask_and_ack_msc_irq,
117 .mask = mask_msc_irq,
118 .mask_ack = level_mask_and_ack_msc_irq,
119 .unmask = unmask_msc_irq,
120 .eoi = unmask_msc_irq,
148 .end = end_msc_irq, 121 .end = end_msc_irq,
149}; 122};
150 123
151struct irq_chip msc_edgeirq_type = { 124struct irq_chip msc_edgeirq_type = {
152 .typename = "SOC-it-Edge", 125 .typename = "SOC-it-Edge",
153 .startup =startup_msc_irq,
154 .shutdown = shutdown_msc_irq,
155 .enable = enable_msc_irq,
156 .disable = disable_msc_irq,
157 .ack = edge_mask_and_ack_msc_irq, 126 .ack = edge_mask_and_ack_msc_irq,
127 .mask = mask_msc_irq,
128 .mask_ack = edge_mask_and_ack_msc_irq,
129 .unmask = unmask_msc_irq,
130 .eoi = unmask_msc_irq,
158 .end = end_msc_irq, 131 .end = end_msc_irq,
159}; 132};
160 133
@@ -175,14 +148,14 @@ void __init init_msc_irqs(unsigned int base, msc_irqmap_t *imp, int nirq)
175 148
176 switch (imp->im_type) { 149 switch (imp->im_type) {
177 case MSC01_IRQ_EDGE: 150 case MSC01_IRQ_EDGE:
178 irq_desc[base+n].chip = &msc_edgeirq_type; 151 set_irq_chip(base+n, &msc_edgeirq_type);
179 if (cpu_has_veic) 152 if (cpu_has_veic)
180 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 153 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
181 else 154 else
182 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
183 break; 156 break;
184 case MSC01_IRQ_LEVEL: 157 case MSC01_IRQ_LEVEL:
185 irq_desc[base+n].chip = &msc_levelirq_type; 158 set_irq_chip(base+n, &msc_levelirq_type);
186 if (cpu_has_veic) 159 if (cpu_has_veic)
187 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 160 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
188 else 161 else
diff --git a/arch/mips/kernel/irq-mv6434x.c b/arch/mips/kernel/irq-mv6434x.c
index 37d106202b83..efbd219845b5 100644
--- a/arch/mips/kernel/irq-mv6434x.c
+++ b/arch/mips/kernel/irq-mv6434x.c
@@ -67,48 +67,6 @@ static inline void unmask_mv64340_irq(unsigned int irq)
67} 67}
68 68
69/* 69/*
70 * Enables the IRQ on Marvell Chip
71 */
72static void enable_mv64340_irq(unsigned int irq)
73{
74 unmask_mv64340_irq(irq);
75}
76
77/*
78 * Initialize the IRQ on Marvell Chip
79 */
80static unsigned int startup_mv64340_irq(unsigned int irq)
81{
82 unmask_mv64340_irq(irq);
83 return 0;
84}
85
86/*
87 * Disables the IRQ on Marvell Chip
88 */
89static void disable_mv64340_irq(unsigned int irq)
90{
91 mask_mv64340_irq(irq);
92}
93
94/*
95 * Masks and ACKs an IRQ
96 */
97static void mask_and_ack_mv64340_irq(unsigned int irq)
98{
99 mask_mv64340_irq(irq);
100}
101
102/*
103 * End IRQ processing
104 */
105static void end_mv64340_irq(unsigned int irq)
106{
107 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
108 unmask_mv64340_irq(irq);
109}
110
111/*
112 * Interrupt handler for interrupts coming from the Marvell chip. 70 * Interrupt handler for interrupts coming from the Marvell chip.
113 * It could be built in ethernet ports etc... 71 * It could be built in ethernet ports etc...
114 */ 72 */
@@ -133,29 +91,21 @@ void ll_mv64340_irq(void)
133 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32); 91 do_IRQ(ls1bit32(irq_src_high) + irq_base + 32);
134} 92}
135 93
136#define shutdown_mv64340_irq disable_mv64340_irq
137
138struct irq_chip mv64340_irq_type = { 94struct irq_chip mv64340_irq_type = {
139 .typename = "MV-64340", 95 .typename = "MV-64340",
140 .startup = startup_mv64340_irq, 96 .ack = mask_mv64340_irq,
141 .shutdown = shutdown_mv64340_irq, 97 .mask = mask_mv64340_irq,
142 .enable = enable_mv64340_irq, 98 .mask_ack = mask_mv64340_irq,
143 .disable = disable_mv64340_irq, 99 .unmask = unmask_mv64340_irq,
144 .ack = mask_and_ack_mv64340_irq,
145 .end = end_mv64340_irq,
146}; 100};
147 101
148void __init mv64340_irq_init(unsigned int base) 102void __init mv64340_irq_init(unsigned int base)
149{ 103{
150 int i; 104 int i;
151 105
152 /* Reset irq handlers pointers to NULL */ 106 for (i = base; i < base + 64; i++)
153 for (i = base; i < base + 64; i++) { 107 set_irq_chip_and_handler(i, &mv64340_irq_type,
154 irq_desc[i].status = IRQ_DISABLED; 108 handle_level_irq);
155 irq_desc[i].action = 0;
156 irq_desc[i].depth = 2;
157 irq_desc[i].chip = &mv64340_irq_type;
158 }
159 109
160 irq_base = base; 110 irq_base = base;
161} 111}
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 6b54c7109e2e..123324ba8c14 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -29,56 +29,12 @@ static inline void mask_rm7k_irq(unsigned int irq)
29 clear_c0_intcontrol(0x100 << (irq - irq_base)); 29 clear_c0_intcontrol(0x100 << (irq - irq_base));
30} 30}
31 31
32static inline void rm7k_cpu_irq_enable(unsigned int irq)
33{
34 unsigned long flags;
35
36 local_irq_save(flags);
37 unmask_rm7k_irq(irq);
38 local_irq_restore(flags);
39}
40
41static void rm7k_cpu_irq_disable(unsigned int irq)
42{
43 unsigned long flags;
44
45 local_irq_save(flags);
46 mask_rm7k_irq(irq);
47 local_irq_restore(flags);
48}
49
50static unsigned int rm7k_cpu_irq_startup(unsigned int irq)
51{
52 rm7k_cpu_irq_enable(irq);
53
54 return 0;
55}
56
57#define rm7k_cpu_irq_shutdown rm7k_cpu_irq_disable
58
59/*
60 * While we ack the interrupt interrupts are disabled and thus we don't need
61 * to deal with concurrency issues. Same for rm7k_cpu_irq_end.
62 */
63static void rm7k_cpu_irq_ack(unsigned int irq)
64{
65 mask_rm7k_irq(irq);
66}
67
68static void rm7k_cpu_irq_end(unsigned int irq)
69{
70 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
71 unmask_rm7k_irq(irq);
72}
73
74static struct irq_chip rm7k_irq_controller = { 32static struct irq_chip rm7k_irq_controller = {
75 .typename = "RM7000", 33 .typename = "RM7000",
76 .startup = rm7k_cpu_irq_startup, 34 .ack = mask_rm7k_irq,
77 .shutdown = rm7k_cpu_irq_shutdown, 35 .mask = mask_rm7k_irq,
78 .enable = rm7k_cpu_irq_enable, 36 .mask_ack = mask_rm7k_irq,
79 .disable = rm7k_cpu_irq_disable, 37 .unmask = unmask_rm7k_irq,
80 .ack = rm7k_cpu_irq_ack,
81 .end = rm7k_cpu_irq_end,
82}; 38};
83 39
84void __init rm7k_cpu_irq_init(int base) 40void __init rm7k_cpu_irq_init(int base)
@@ -87,12 +43,9 @@ void __init rm7k_cpu_irq_init(int base)
87 43
88 clear_c0_intcontrol(0x00000f00); /* Mask all */ 44 clear_c0_intcontrol(0x00000f00); /* Mask all */
89 45
90 for (i = base; i < base + 4; i++) { 46 for (i = base; i < base + 4; i++)
91 irq_desc[i].status = IRQ_DISABLED; 47 set_irq_chip_and_handler(i, &rm7k_irq_controller,
92 irq_desc[i].action = NULL; 48 handle_level_irq);
93 irq_desc[i].depth = 1;
94 irq_desc[i].chip = &rm7k_irq_controller;
95 }
96 49
97 irq_base = base; 50 irq_base = base;
98} 51}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index 62f011ba97a2..0e6f4c5349d2 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -48,15 +48,6 @@ static void rm9k_cpu_irq_disable(unsigned int irq)
48 local_irq_restore(flags); 48 local_irq_restore(flags);
49} 49}
50 50
51static unsigned int rm9k_cpu_irq_startup(unsigned int irq)
52{
53 rm9k_cpu_irq_enable(irq);
54
55 return 0;
56}
57
58#define rm9k_cpu_irq_shutdown rm9k_cpu_irq_disable
59
60/* 51/*
61 * Performance counter interrupts are global on all processors. 52 * Performance counter interrupts are global on all processors.
62 */ 53 */
@@ -89,40 +80,22 @@ static void rm9k_perfcounter_irq_shutdown(unsigned int irq)
89 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1); 80 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 0, 1);
90} 81}
91 82
92
93/*
94 * While we ack the interrupt interrupts are disabled and thus we don't need
95 * to deal with concurrency issues. Same for rm9k_cpu_irq_end.
96 */
97static void rm9k_cpu_irq_ack(unsigned int irq)
98{
99 mask_rm9k_irq(irq);
100}
101
102static void rm9k_cpu_irq_end(unsigned int irq)
103{
104 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
105 unmask_rm9k_irq(irq);
106}
107
108static struct irq_chip rm9k_irq_controller = { 83static struct irq_chip rm9k_irq_controller = {
109 .typename = "RM9000", 84 .typename = "RM9000",
110 .startup = rm9k_cpu_irq_startup, 85 .ack = mask_rm9k_irq,
111 .shutdown = rm9k_cpu_irq_shutdown, 86 .mask = mask_rm9k_irq,
112 .enable = rm9k_cpu_irq_enable, 87 .mask_ack = mask_rm9k_irq,
113 .disable = rm9k_cpu_irq_disable, 88 .unmask = unmask_rm9k_irq,
114 .ack = rm9k_cpu_irq_ack,
115 .end = rm9k_cpu_irq_end,
116}; 89};
117 90
118static struct irq_chip rm9k_perfcounter_irq = { 91static struct irq_chip rm9k_perfcounter_irq = {
119 .typename = "RM9000", 92 .typename = "RM9000",
120 .startup = rm9k_perfcounter_irq_startup, 93 .startup = rm9k_perfcounter_irq_startup,
121 .shutdown = rm9k_perfcounter_irq_shutdown, 94 .shutdown = rm9k_perfcounter_irq_shutdown,
122 .enable = rm9k_cpu_irq_enable, 95 .ack = mask_rm9k_irq,
123 .disable = rm9k_cpu_irq_disable, 96 .mask = mask_rm9k_irq,
124 .ack = rm9k_cpu_irq_ack, 97 .mask_ack = mask_rm9k_irq,
125 .end = rm9k_cpu_irq_end, 98 .unmask = unmask_rm9k_irq,
126}; 99};
127 100
128unsigned int rm9000_perfcount_irq; 101unsigned int rm9000_perfcount_irq;
@@ -135,15 +108,13 @@ void __init rm9k_cpu_irq_init(int base)
135 108
136 clear_c0_intcontrol(0x0000f000); /* Mask all */ 109 clear_c0_intcontrol(0x0000f000); /* Mask all */
137 110
138 for (i = base; i < base + 4; i++) { 111 for (i = base; i < base + 4; i++)
139 irq_desc[i].status = IRQ_DISABLED; 112 set_irq_chip_and_handler(i, &rm9k_irq_controller,
140 irq_desc[i].action = NULL; 113 handle_level_irq);
141 irq_desc[i].depth = 1;
142 irq_desc[i].chip = &rm9k_irq_controller;
143 }
144 114
145 rm9000_perfcount_irq = base + 1; 115 rm9000_perfcount_irq = base + 1;
146 irq_desc[rm9000_perfcount_irq].chip = &rm9k_perfcounter_irq; 116 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
117 handle_level_irq);
147 118
148 irq_base = base; 119 irq_base = base;
149} 120}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index dd24434392b6..2fe4c868a801 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -26,6 +26,48 @@
26#include <asm/system.h> 26#include <asm/system.h>
27#include <asm/uaccess.h> 27#include <asm/uaccess.h>
28 28
29static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
30
31int __devinit allocate_irqno(void)
32{
33 int irq;
34
35again:
36 irq = find_first_zero_bit(irq_map, NR_IRQS);
37
38 if (irq >= NR_IRQS)
39 return -ENOSPC;
40
41 if (test_and_set_bit(irq, irq_map))
42 goto again;
43
44 return irq;
45}
46
47EXPORT_SYMBOL_GPL(allocate_irqno);
48
49/*
50 * Allocate the 16 legacy interrupts for i8259 devices. This happens early
51 * in the kernel initialization so treating allocation failure as BUG() is
52 * ok.
53 */
54void __init alloc_legacy_irqno(void)
55{
56 int i;
57
58 for (i = 0; i <= 16; i++)
59 BUG_ON(test_and_set_bit(i, irq_map));
60}
61
62void __devinit free_irqno(unsigned int irq)
63{
64 smp_mb__before_clear_bit();
65 clear_bit(irq, irq_map);
66 smp_mb__after_clear_bit();
67}
68
69EXPORT_SYMBOL_GPL(free_irqno);
70
29/* 71/*
30 * 'what should we do if we get a hw irq event on an illegal vector'. 72 * 'what should we do if we get a hw irq event on an illegal vector'.
31 * each architecture has to answer this themselves. 73 * each architecture has to answer this themselves.
@@ -46,25 +88,6 @@ atomic_t irq_err_count;
46unsigned long irq_hwmask[NR_IRQS]; 88unsigned long irq_hwmask[NR_IRQS];
47#endif /* CONFIG_MIPS_MT_SMTC */ 89#endif /* CONFIG_MIPS_MT_SMTC */
48 90
49#undef do_IRQ
50
51/*
52 * do_IRQ handles all normal device IRQ's (the special
53 * SMP cross-CPU interrupts have their own specific
54 * handlers).
55 */
56asmlinkage unsigned int do_IRQ(unsigned int irq)
57{
58 irq_enter();
59
60 __DO_IRQ_SMTC_HOOK();
61 __do_IRQ(irq);
62
63 irq_exit();
64
65 return 1;
66}
67
68/* 91/*
69 * Generic, controller-independent functions: 92 * Generic, controller-independent functions:
70 */ 93 */
@@ -94,7 +117,7 @@ int show_interrupts(struct seq_file *p, void *v)
94 for_each_online_cpu(j) 117 for_each_online_cpu(j)
95 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]); 118 seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
96#endif 119#endif
97 seq_printf(p, " %14s", irq_desc[i].chip->typename); 120 seq_printf(p, " %14s", irq_desc[i].chip->name);
98 seq_printf(p, " %s", action->name); 121 seq_printf(p, " %s", action->name);
99 122
100 for (action=action->next; action; action = action->next) 123 for (action=action->next; action; action = action->next)
@@ -130,19 +153,6 @@ __setup("nokgdb", nokgdb);
130 153
131void __init init_IRQ(void) 154void __init init_IRQ(void)
132{ 155{
133 int i;
134
135 for (i = 0; i < NR_IRQS; i++) {
136 irq_desc[i].status = IRQ_DISABLED;
137 irq_desc[i].action = NULL;
138 irq_desc[i].depth = 1;
139 irq_desc[i].chip = &no_irq_chip;
140 spin_lock_init(&irq_desc[i].lock);
141#ifdef CONFIG_MIPS_MT_SMTC
142 irq_hwmask[i] = 0;
143#endif /* CONFIG_MIPS_MT_SMTC */
144 }
145
146 arch_init_irq(); 156 arch_init_irq();
147 157
148#ifdef CONFIG_KGDB 158#ifdef CONFIG_KGDB
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 9bb21c7f2149..fcc86b96ccf6 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -50,58 +50,13 @@ static inline void mask_mips_irq(unsigned int irq)
50 irq_disable_hazard(); 50 irq_disable_hazard();
51} 51}
52 52
53static inline void mips_cpu_irq_enable(unsigned int irq)
54{
55 unsigned long flags;
56
57 local_irq_save(flags);
58 unmask_mips_irq(irq);
59 back_to_back_c0_hazard();
60 local_irq_restore(flags);
61}
62
63static void mips_cpu_irq_disable(unsigned int irq)
64{
65 unsigned long flags;
66
67 local_irq_save(flags);
68 mask_mips_irq(irq);
69 back_to_back_c0_hazard();
70 local_irq_restore(flags);
71}
72
73static unsigned int mips_cpu_irq_startup(unsigned int irq)
74{
75 mips_cpu_irq_enable(irq);
76
77 return 0;
78}
79
80#define mips_cpu_irq_shutdown mips_cpu_irq_disable
81
82/*
83 * While we ack the interrupt interrupts are disabled and thus we don't need
84 * to deal with concurrency issues. Same for mips_cpu_irq_end.
85 */
86static void mips_cpu_irq_ack(unsigned int irq)
87{
88 mask_mips_irq(irq);
89}
90
91static void mips_cpu_irq_end(unsigned int irq)
92{
93 if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS)))
94 unmask_mips_irq(irq);
95}
96
97static struct irq_chip mips_cpu_irq_controller = { 53static struct irq_chip mips_cpu_irq_controller = {
98 .typename = "MIPS", 54 .typename = "MIPS",
99 .startup = mips_cpu_irq_startup, 55 .ack = mask_mips_irq,
100 .shutdown = mips_cpu_irq_shutdown, 56 .mask = mask_mips_irq,
101 .enable = mips_cpu_irq_enable, 57 .mask_ack = mask_mips_irq,
102 .disable = mips_cpu_irq_disable, 58 .unmask = unmask_mips_irq,
103 .ack = mips_cpu_irq_ack, 59 .eoi = unmask_mips_irq,
104 .end = mips_cpu_irq_end,
105}; 60};
106 61
107/* 62/*
@@ -110,8 +65,6 @@ static struct irq_chip mips_cpu_irq_controller = {
110 65
111#define unmask_mips_mt_irq unmask_mips_irq 66#define unmask_mips_mt_irq unmask_mips_irq
112#define mask_mips_mt_irq mask_mips_irq 67#define mask_mips_mt_irq mask_mips_irq
113#define mips_mt_cpu_irq_enable mips_cpu_irq_enable
114#define mips_mt_cpu_irq_disable mips_cpu_irq_disable
115 68
116static unsigned int mips_mt_cpu_irq_startup(unsigned int irq) 69static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
117{ 70{
@@ -119,13 +72,11 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
119 72
120 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base)); 73 clear_c0_cause(0x100 << (irq - mips_cpu_irq_base));
121 evpe(vpflags); 74 evpe(vpflags);
122 mips_mt_cpu_irq_enable(irq); 75 unmask_mips_mt_irq(irq);
123 76
124 return 0; 77 return 0;
125} 78}
126 79
127#define mips_mt_cpu_irq_shutdown mips_mt_cpu_irq_disable
128
129/* 80/*
130 * While we ack the interrupt interrupts are disabled and thus we don't need 81 * While we ack the interrupt interrupts are disabled and thus we don't need
131 * to deal with concurrency issues. Same for mips_cpu_irq_end. 82 * to deal with concurrency issues. Same for mips_cpu_irq_end.
@@ -138,16 +89,14 @@ static void mips_mt_cpu_irq_ack(unsigned int irq)
138 mask_mips_mt_irq(irq); 89 mask_mips_mt_irq(irq);
139} 90}
140 91
141#define mips_mt_cpu_irq_end mips_cpu_irq_end
142
143static struct irq_chip mips_mt_cpu_irq_controller = { 92static struct irq_chip mips_mt_cpu_irq_controller = {
144 .typename = "MIPS", 93 .typename = "MIPS",
145 .startup = mips_mt_cpu_irq_startup, 94 .startup = mips_mt_cpu_irq_startup,
146 .shutdown = mips_mt_cpu_irq_shutdown,
147 .enable = mips_mt_cpu_irq_enable,
148 .disable = mips_mt_cpu_irq_disable,
149 .ack = mips_mt_cpu_irq_ack, 95 .ack = mips_mt_cpu_irq_ack,
150 .end = mips_mt_cpu_irq_end, 96 .mask = mask_mips_mt_irq,
97 .mask_ack = mips_mt_cpu_irq_ack,
98 .unmask = unmask_mips_mt_irq,
99 .eoi = unmask_mips_mt_irq,
151}; 100};
152 101
153void __init mips_cpu_irq_init(int irq_base) 102void __init mips_cpu_irq_init(int irq_base)
@@ -163,19 +112,12 @@ void __init mips_cpu_irq_init(int irq_base)
163 * leave them uninitialized for other processors. 112 * leave them uninitialized for other processors.
164 */ 113 */
165 if (cpu_has_mipsmt) 114 if (cpu_has_mipsmt)
166 for (i = irq_base; i < irq_base + 2; i++) { 115 for (i = irq_base; i < irq_base + 2; i++)
167 irq_desc[i].status = IRQ_DISABLED; 116 set_irq_chip(i, &mips_mt_cpu_irq_controller);
168 irq_desc[i].action = NULL; 117
169 irq_desc[i].depth = 1; 118 for (i = irq_base + 2; i < irq_base + 8; i++)
170 irq_desc[i].chip = &mips_mt_cpu_irq_controller; 119 set_irq_chip_and_handler(i, &mips_cpu_irq_controller,
171 } 120 handle_level_irq);
172
173 for (i = irq_base + 2; i < irq_base + 8; i++) {
174 irq_desc[i].status = IRQ_DISABLED;
175 irq_desc[i].action = NULL;
176 irq_desc[i].depth = 1;
177 irq_desc[i].chip = &mips_cpu_irq_controller;
178 }
179 121
180 mips_cpu_irq_base = irq_base; 122 mips_cpu_irq_base = irq_base;
181} 123}
diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
index f06a144c7881..5929f883e46b 100644
--- a/arch/mips/kernel/kspd.c
+++ b/arch/mips/kernel/kspd.c
@@ -301,7 +301,7 @@ static void sp_cleanup(void)
301 for (;;) { 301 for (;;) {
302 unsigned long set; 302 unsigned long set;
303 i = j * __NFDBITS; 303 i = j * __NFDBITS;
304 if (i >= fdt->max_fdset || i >= fdt->max_fds) 304 if (i >= fdt->max_fds)
305 break; 305 break;
306 set = fdt->open_fds->fds_bits[j++]; 306 set = fdt->open_fds->fds_bits[j++];
307 while (set) { 307 while (set) {
@@ -319,7 +319,7 @@ static void sp_cleanup(void)
319static int channel_open = 0; 319static int channel_open = 0;
320 320
321/* the work handler */ 321/* the work handler */
322static void sp_work(void *data) 322static void sp_work(struct work_struct *unused)
323{ 323{
324 if (!channel_open) { 324 if (!channel_open) {
325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) { 325 if( rtlx_open(RTLX_CHANNEL_SYSIO, 1) != 0) {
@@ -354,7 +354,7 @@ static void startwork(int vpe)
354 return; 354 return;
355 } 355 }
356 356
357 INIT_WORK(&work, sp_work, NULL); 357 INIT_WORK(&work, sp_work);
358 queue_work(workqueue, &work); 358 queue_work(workqueue, &work);
359 } else 359 } else
360 queue_work(workqueue, &work); 360 queue_work(workqueue, &work);
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
index 53f4171fc188..b061c9aa6302 100644
--- a/arch/mips/kernel/linux32.c
+++ b/arch/mips/kernel/linux32.c
@@ -382,531 +382,6 @@ asmlinkage int sys32_sched_rr_get_interval(compat_pid_t pid,
382 return ret; 382 return ret;
383} 383}
384 384
385struct msgbuf32 { s32 mtype; char mtext[1]; };
386
387struct ipc_perm32
388{
389 key_t key;
390 __compat_uid_t uid;
391 __compat_gid_t gid;
392 __compat_uid_t cuid;
393 __compat_gid_t cgid;
394 compat_mode_t mode;
395 unsigned short seq;
396};
397
398struct ipc64_perm32 {
399 key_t key;
400 __compat_uid_t uid;
401 __compat_gid_t gid;
402 __compat_uid_t cuid;
403 __compat_gid_t cgid;
404 compat_mode_t mode;
405 unsigned short seq;
406 unsigned short __pad1;
407 unsigned int __unused1;
408 unsigned int __unused2;
409};
410
411struct semid_ds32 {
412 struct ipc_perm32 sem_perm; /* permissions .. see ipc.h */
413 compat_time_t sem_otime; /* last semop time */
414 compat_time_t sem_ctime; /* last change time */
415 u32 sem_base; /* ptr to first semaphore in array */
416 u32 sem_pending; /* pending operations to be processed */
417 u32 sem_pending_last; /* last pending operation */
418 u32 undo; /* undo requests on this array */
419 unsigned short sem_nsems; /* no. of semaphores in array */
420};
421
422struct semid64_ds32 {
423 struct ipc64_perm32 sem_perm;
424 compat_time_t sem_otime;
425 compat_time_t sem_ctime;
426 unsigned int sem_nsems;
427 unsigned int __unused1;
428 unsigned int __unused2;
429};
430
431struct msqid_ds32
432{
433 struct ipc_perm32 msg_perm;
434 u32 msg_first;
435 u32 msg_last;
436 compat_time_t msg_stime;
437 compat_time_t msg_rtime;
438 compat_time_t msg_ctime;
439 u32 wwait;
440 u32 rwait;
441 unsigned short msg_cbytes;
442 unsigned short msg_qnum;
443 unsigned short msg_qbytes;
444 compat_ipc_pid_t msg_lspid;
445 compat_ipc_pid_t msg_lrpid;
446};
447
448struct msqid64_ds32 {
449 struct ipc64_perm32 msg_perm;
450 compat_time_t msg_stime;
451 unsigned int __unused1;
452 compat_time_t msg_rtime;
453 unsigned int __unused2;
454 compat_time_t msg_ctime;
455 unsigned int __unused3;
456 unsigned int msg_cbytes;
457 unsigned int msg_qnum;
458 unsigned int msg_qbytes;
459 compat_pid_t msg_lspid;
460 compat_pid_t msg_lrpid;
461 unsigned int __unused4;
462 unsigned int __unused5;
463};
464
465struct shmid_ds32 {
466 struct ipc_perm32 shm_perm;
467 int shm_segsz;
468 compat_time_t shm_atime;
469 compat_time_t shm_dtime;
470 compat_time_t shm_ctime;
471 compat_ipc_pid_t shm_cpid;
472 compat_ipc_pid_t shm_lpid;
473 unsigned short shm_nattch;
474};
475
476struct shmid64_ds32 {
477 struct ipc64_perm32 shm_perm;
478 compat_size_t shm_segsz;
479 compat_time_t shm_atime;
480 compat_time_t shm_dtime;
481 compat_time_t shm_ctime;
482 compat_pid_t shm_cpid;
483 compat_pid_t shm_lpid;
484 unsigned int shm_nattch;
485 unsigned int __unused1;
486 unsigned int __unused2;
487};
488
489struct ipc_kludge32 {
490 u32 msgp;
491 s32 msgtyp;
492};
493
494static int
495do_sys32_semctl(int first, int second, int third, void __user *uptr)
496{
497 union semun fourth;
498 u32 pad;
499 int err, err2;
500 struct semid64_ds s;
501 mm_segment_t old_fs;
502
503 if (!uptr)
504 return -EINVAL;
505 err = -EFAULT;
506 if (get_user (pad, (u32 __user *)uptr))
507 return err;
508 if ((third & ~IPC_64) == SETVAL)
509 fourth.val = (int)pad;
510 else
511 fourth.__pad = (void __user *)A(pad);
512 switch (third & ~IPC_64) {
513 case IPC_INFO:
514 case IPC_RMID:
515 case IPC_SET:
516 case SEM_INFO:
517 case GETVAL:
518 case GETPID:
519 case GETNCNT:
520 case GETZCNT:
521 case GETALL:
522 case SETVAL:
523 case SETALL:
524 err = sys_semctl (first, second, third, fourth);
525 break;
526
527 case IPC_STAT:
528 case SEM_STAT:
529 fourth.__pad = (struct semid64_ds __user *)&s;
530 old_fs = get_fs();
531 set_fs(KERNEL_DS);
532 err = sys_semctl(first, second, third | IPC_64, fourth);
533 set_fs(old_fs);
534
535 if (third & IPC_64) {
536 struct semid64_ds32 __user *usp64 = (struct semid64_ds32 __user *) A(pad);
537
538 if (!access_ok(VERIFY_WRITE, usp64, sizeof(*usp64))) {
539 err = -EFAULT;
540 break;
541 }
542 err2 = __put_user(s.sem_perm.key, &usp64->sem_perm.key);
543 err2 |= __put_user(s.sem_perm.uid, &usp64->sem_perm.uid);
544 err2 |= __put_user(s.sem_perm.gid, &usp64->sem_perm.gid);
545 err2 |= __put_user(s.sem_perm.cuid, &usp64->sem_perm.cuid);
546 err2 |= __put_user(s.sem_perm.cgid, &usp64->sem_perm.cgid);
547 err2 |= __put_user(s.sem_perm.mode, &usp64->sem_perm.mode);
548 err2 |= __put_user(s.sem_perm.seq, &usp64->sem_perm.seq);
549 err2 |= __put_user(s.sem_otime, &usp64->sem_otime);
550 err2 |= __put_user(s.sem_ctime, &usp64->sem_ctime);
551 err2 |= __put_user(s.sem_nsems, &usp64->sem_nsems);
552 } else {
553 struct semid_ds32 __user *usp32 = (struct semid_ds32 __user *) A(pad);
554
555 if (!access_ok(VERIFY_WRITE, usp32, sizeof(*usp32))) {
556 err = -EFAULT;
557 break;
558 }
559 err2 = __put_user(s.sem_perm.key, &usp32->sem_perm.key);
560 err2 |= __put_user(s.sem_perm.uid, &usp32->sem_perm.uid);
561 err2 |= __put_user(s.sem_perm.gid, &usp32->sem_perm.gid);
562 err2 |= __put_user(s.sem_perm.cuid, &usp32->sem_perm.cuid);
563 err2 |= __put_user(s.sem_perm.cgid, &usp32->sem_perm.cgid);
564 err2 |= __put_user(s.sem_perm.mode, &usp32->sem_perm.mode);
565 err2 |= __put_user(s.sem_perm.seq, &usp32->sem_perm.seq);
566 err2 |= __put_user(s.sem_otime, &usp32->sem_otime);
567 err2 |= __put_user(s.sem_ctime, &usp32->sem_ctime);
568 err2 |= __put_user(s.sem_nsems, &usp32->sem_nsems);
569 }
570 if (err2)
571 err = -EFAULT;
572 break;
573
574 default:
575 err = - EINVAL;
576 break;
577 }
578
579 return err;
580}
581
582static int
583do_sys32_msgsnd (int first, int second, int third, void __user *uptr)
584{
585 struct msgbuf32 __user *up = (struct msgbuf32 __user *)uptr;
586 struct msgbuf *p;
587 mm_segment_t old_fs;
588 int err;
589
590 if (second < 0)
591 return -EINVAL;
592 p = kmalloc (second + sizeof (struct msgbuf)
593 + 4, GFP_USER);
594 if (!p)
595 return -ENOMEM;
596 err = get_user (p->mtype, &up->mtype);
597 if (err)
598 goto out;
599 err |= __copy_from_user (p->mtext, &up->mtext, second);
600 if (err)
601 goto out;
602 old_fs = get_fs ();
603 set_fs (KERNEL_DS);
604 err = sys_msgsnd (first, (struct msgbuf __user *)p, second, third);
605 set_fs (old_fs);
606out:
607 kfree (p);
608
609 return err;
610}
611
612static int
613do_sys32_msgrcv (int first, int second, int msgtyp, int third,
614 int version, void __user *uptr)
615{
616 struct msgbuf32 __user *up;
617 struct msgbuf *p;
618 mm_segment_t old_fs;
619 int err;
620
621 if (!version) {
622 struct ipc_kludge32 __user *uipck = (struct ipc_kludge32 __user *)uptr;
623 struct ipc_kludge32 ipck;
624
625 err = -EINVAL;
626 if (!uptr)
627 goto out;
628 err = -EFAULT;
629 if (copy_from_user (&ipck, uipck, sizeof (struct ipc_kludge32)))
630 goto out;
631 uptr = (void __user *)AA(ipck.msgp);
632 msgtyp = ipck.msgtyp;
633 }
634
635 if (second < 0)
636 return -EINVAL;
637 err = -ENOMEM;
638 p = kmalloc (second + sizeof (struct msgbuf) + 4, GFP_USER);
639 if (!p)
640 goto out;
641 old_fs = get_fs ();
642 set_fs (KERNEL_DS);
643 err = sys_msgrcv (first, (struct msgbuf __user *)p, second + 4, msgtyp, third);
644 set_fs (old_fs);
645 if (err < 0)
646 goto free_then_out;
647 up = (struct msgbuf32 __user *)uptr;
648 if (put_user (p->mtype, &up->mtype) ||
649 __copy_to_user (&up->mtext, p->mtext, err))
650 err = -EFAULT;
651free_then_out:
652 kfree (p);
653out:
654 return err;
655}
656
657static int
658do_sys32_msgctl (int first, int second, void __user *uptr)
659{
660 int err = -EINVAL, err2;
661 struct msqid64_ds m;
662 struct msqid_ds32 __user *up32 = (struct msqid_ds32 __user *)uptr;
663 struct msqid64_ds32 __user *up64 = (struct msqid64_ds32 __user *)uptr;
664 mm_segment_t old_fs;
665
666 switch (second & ~IPC_64) {
667 case IPC_INFO:
668 case IPC_RMID:
669 case MSG_INFO:
670 err = sys_msgctl (first, second, (struct msqid_ds __user *)uptr);
671 break;
672
673 case IPC_SET:
674 if (second & IPC_64) {
675 if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) {
676 err = -EFAULT;
677 break;
678 }
679 err = __get_user(m.msg_perm.uid, &up64->msg_perm.uid);
680 err |= __get_user(m.msg_perm.gid, &up64->msg_perm.gid);
681 err |= __get_user(m.msg_perm.mode, &up64->msg_perm.mode);
682 err |= __get_user(m.msg_qbytes, &up64->msg_qbytes);
683 } else {
684 if (!access_ok(VERIFY_READ, up32, sizeof(*up32))) {
685 err = -EFAULT;
686 break;
687 }
688 err = __get_user(m.msg_perm.uid, &up32->msg_perm.uid);
689 err |= __get_user(m.msg_perm.gid, &up32->msg_perm.gid);
690 err |= __get_user(m.msg_perm.mode, &up32->msg_perm.mode);
691 err |= __get_user(m.msg_qbytes, &up32->msg_qbytes);
692 }
693 if (err)
694 break;
695 old_fs = get_fs();
696 set_fs(KERNEL_DS);
697 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
698 set_fs(old_fs);
699 break;
700
701 case IPC_STAT:
702 case MSG_STAT:
703 old_fs = get_fs();
704 set_fs(KERNEL_DS);
705 err = sys_msgctl(first, second | IPC_64, (struct msqid_ds __user *)&m);
706 set_fs(old_fs);
707 if (second & IPC_64) {
708 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
709 err = -EFAULT;
710 break;
711 }
712 err2 = __put_user(m.msg_perm.key, &up64->msg_perm.key);
713 err2 |= __put_user(m.msg_perm.uid, &up64->msg_perm.uid);
714 err2 |= __put_user(m.msg_perm.gid, &up64->msg_perm.gid);
715 err2 |= __put_user(m.msg_perm.cuid, &up64->msg_perm.cuid);
716 err2 |= __put_user(m.msg_perm.cgid, &up64->msg_perm.cgid);
717 err2 |= __put_user(m.msg_perm.mode, &up64->msg_perm.mode);
718 err2 |= __put_user(m.msg_perm.seq, &up64->msg_perm.seq);
719 err2 |= __put_user(m.msg_stime, &up64->msg_stime);
720 err2 |= __put_user(m.msg_rtime, &up64->msg_rtime);
721 err2 |= __put_user(m.msg_ctime, &up64->msg_ctime);
722 err2 |= __put_user(m.msg_cbytes, &up64->msg_cbytes);
723 err2 |= __put_user(m.msg_qnum, &up64->msg_qnum);
724 err2 |= __put_user(m.msg_qbytes, &up64->msg_qbytes);
725 err2 |= __put_user(m.msg_lspid, &up64->msg_lspid);
726 err2 |= __put_user(m.msg_lrpid, &up64->msg_lrpid);
727 if (err2)
728 err = -EFAULT;
729 } else {
730 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
731 err = -EFAULT;
732 break;
733 }
734 err2 = __put_user(m.msg_perm.key, &up32->msg_perm.key);
735 err2 |= __put_user(m.msg_perm.uid, &up32->msg_perm.uid);
736 err2 |= __put_user(m.msg_perm.gid, &up32->msg_perm.gid);
737 err2 |= __put_user(m.msg_perm.cuid, &up32->msg_perm.cuid);
738 err2 |= __put_user(m.msg_perm.cgid, &up32->msg_perm.cgid);
739 err2 |= __put_user(m.msg_perm.mode, &up32->msg_perm.mode);
740 err2 |= __put_user(m.msg_perm.seq, &up32->msg_perm.seq);
741 err2 |= __put_user(m.msg_stime, &up32->msg_stime);
742 err2 |= __put_user(m.msg_rtime, &up32->msg_rtime);
743 err2 |= __put_user(m.msg_ctime, &up32->msg_ctime);
744 err2 |= __put_user(m.msg_cbytes, &up32->msg_cbytes);
745 err2 |= __put_user(m.msg_qnum, &up32->msg_qnum);
746 err2 |= __put_user(m.msg_qbytes, &up32->msg_qbytes);
747 err2 |= __put_user(m.msg_lspid, &up32->msg_lspid);
748 err2 |= __put_user(m.msg_lrpid, &up32->msg_lrpid);
749 if (err2)
750 err = -EFAULT;
751 }
752 break;
753 }
754
755 return err;
756}
757
758static int
759do_sys32_shmat (int first, int second, int third, int version, void __user *uptr)
760{
761 unsigned long raddr;
762 u32 __user *uaddr = (u32 __user *)A((u32)third);
763 int err = -EINVAL;
764
765 if (version == 1)
766 return err;
767 err = do_shmat (first, uptr, second, &raddr);
768 if (err)
769 return err;
770 err = put_user (raddr, uaddr);
771 return err;
772}
773
774struct shm_info32 {
775 int used_ids;
776 u32 shm_tot, shm_rss, shm_swp;
777 u32 swap_attempts, swap_successes;
778};
779
780static int
781do_sys32_shmctl (int first, int second, void __user *uptr)
782{
783 struct shmid64_ds32 __user *up64 = (struct shmid64_ds32 __user *)uptr;
784 struct shmid_ds32 __user *up32 = (struct shmid_ds32 __user *)uptr;
785 struct shm_info32 __user *uip = (struct shm_info32 __user *)uptr;
786 int err = -EFAULT, err2;
787 struct shmid64_ds s64;
788 mm_segment_t old_fs;
789 struct shm_info si;
790 struct shmid_ds s;
791
792 switch (second & ~IPC_64) {
793 case IPC_INFO:
794 second = IPC_INFO; /* So that we don't have to translate it */
795 case IPC_RMID:
796 case SHM_LOCK:
797 case SHM_UNLOCK:
798 err = sys_shmctl(first, second, (struct shmid_ds __user *)uptr);
799 break;
800 case IPC_SET:
801 if (second & IPC_64) {
802 err = get_user(s.shm_perm.uid, &up64->shm_perm.uid);
803 err |= get_user(s.shm_perm.gid, &up64->shm_perm.gid);
804 err |= get_user(s.shm_perm.mode, &up64->shm_perm.mode);
805 } else {
806 err = get_user(s.shm_perm.uid, &up32->shm_perm.uid);
807 err |= get_user(s.shm_perm.gid, &up32->shm_perm.gid);
808 err |= get_user(s.shm_perm.mode, &up32->shm_perm.mode);
809 }
810 if (err)
811 break;
812 old_fs = get_fs();
813 set_fs(KERNEL_DS);
814 err = sys_shmctl(first, second & ~IPC_64, (struct shmid_ds __user *)&s);
815 set_fs(old_fs);
816 break;
817
818 case IPC_STAT:
819 case SHM_STAT:
820 old_fs = get_fs();
821 set_fs(KERNEL_DS);
822 err = sys_shmctl(first, second | IPC_64, (void __user *) &s64);
823 set_fs(old_fs);
824 if (err < 0)
825 break;
826 if (second & IPC_64) {
827 if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) {
828 err = -EFAULT;
829 break;
830 }
831 err2 = __put_user(s64.shm_perm.key, &up64->shm_perm.key);
832 err2 |= __put_user(s64.shm_perm.uid, &up64->shm_perm.uid);
833 err2 |= __put_user(s64.shm_perm.gid, &up64->shm_perm.gid);
834 err2 |= __put_user(s64.shm_perm.cuid, &up64->shm_perm.cuid);
835 err2 |= __put_user(s64.shm_perm.cgid, &up64->shm_perm.cgid);
836 err2 |= __put_user(s64.shm_perm.mode, &up64->shm_perm.mode);
837 err2 |= __put_user(s64.shm_perm.seq, &up64->shm_perm.seq);
838 err2 |= __put_user(s64.shm_atime, &up64->shm_atime);
839 err2 |= __put_user(s64.shm_dtime, &up64->shm_dtime);
840 err2 |= __put_user(s64.shm_ctime, &up64->shm_ctime);
841 err2 |= __put_user(s64.shm_segsz, &up64->shm_segsz);
842 err2 |= __put_user(s64.shm_nattch, &up64->shm_nattch);
843 err2 |= __put_user(s64.shm_cpid, &up64->shm_cpid);
844 err2 |= __put_user(s64.shm_lpid, &up64->shm_lpid);
845 } else {
846 if (!access_ok(VERIFY_WRITE, up32, sizeof(*up32))) {
847 err = -EFAULT;
848 break;
849 }
850 err2 = __put_user(s64.shm_perm.key, &up32->shm_perm.key);
851 err2 |= __put_user(s64.shm_perm.uid, &up32->shm_perm.uid);
852 err2 |= __put_user(s64.shm_perm.gid, &up32->shm_perm.gid);
853 err2 |= __put_user(s64.shm_perm.cuid, &up32->shm_perm.cuid);
854 err2 |= __put_user(s64.shm_perm.cgid, &up32->shm_perm.cgid);
855 err2 |= __put_user(s64.shm_perm.mode, &up32->shm_perm.mode);
856 err2 |= __put_user(s64.shm_perm.seq, &up32->shm_perm.seq);
857 err2 |= __put_user(s64.shm_atime, &up32->shm_atime);
858 err2 |= __put_user(s64.shm_dtime, &up32->shm_dtime);
859 err2 |= __put_user(s64.shm_ctime, &up32->shm_ctime);
860 err2 |= __put_user(s64.shm_segsz, &up32->shm_segsz);
861 err2 |= __put_user(s64.shm_nattch, &up32->shm_nattch);
862 err2 |= __put_user(s64.shm_cpid, &up32->shm_cpid);
863 err2 |= __put_user(s64.shm_lpid, &up32->shm_lpid);
864 }
865 if (err2)
866 err = -EFAULT;
867 break;
868
869 case SHM_INFO:
870 old_fs = get_fs();
871 set_fs(KERNEL_DS);
872 err = sys_shmctl(first, second, (void __user *)&si);
873 set_fs(old_fs);
874 if (err < 0)
875 break;
876 err2 = put_user(si.used_ids, &uip->used_ids);
877 err2 |= __put_user(si.shm_tot, &uip->shm_tot);
878 err2 |= __put_user(si.shm_rss, &uip->shm_rss);
879 err2 |= __put_user(si.shm_swp, &uip->shm_swp);
880 err2 |= __put_user(si.swap_attempts, &uip->swap_attempts);
881 err2 |= __put_user (si.swap_successes, &uip->swap_successes);
882 if (err2)
883 err = -EFAULT;
884 break;
885
886 default:
887 err = -EINVAL;
888 break;
889 }
890
891 return err;
892}
893
894static int sys32_semtimedop(int semid, struct sembuf __user *tsems, int nsems,
895 const struct compat_timespec __user *timeout32)
896{
897 struct compat_timespec t32;
898 struct timespec __user *t64 = compat_alloc_user_space(sizeof(*t64));
899
900 if (copy_from_user(&t32, timeout32, sizeof(t32)))
901 return -EFAULT;
902
903 if (put_user(t32.tv_sec, &t64->tv_sec) ||
904 put_user(t32.tv_nsec, &t64->tv_nsec))
905 return -EFAULT;
906
907 return sys_semtimedop(semid, tsems, nsems, t64);
908}
909
910asmlinkage long 385asmlinkage long
911sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth) 386sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
912{ 387{
@@ -918,48 +393,43 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
918 switch (call) { 393 switch (call) {
919 case SEMOP: 394 case SEMOP:
920 /* struct sembuf is the same on 32 and 64bit :)) */ 395 /* struct sembuf is the same on 32 and 64bit :)) */
921 err = sys_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 396 err = sys_semtimedop(first, compat_ptr(ptr), second, NULL);
922 NULL);
923 break; 397 break;
924 case SEMTIMEDOP: 398 case SEMTIMEDOP:
925 err = sys32_semtimedop (first, (struct sembuf __user *)AA(ptr), second, 399 err = compat_sys_semtimedop(first, compat_ptr(ptr), second,
926 (const struct compat_timespec __user *)AA(fifth)); 400 compat_ptr(fifth));
927 break; 401 break;
928 case SEMGET: 402 case SEMGET:
929 err = sys_semget (first, second, third); 403 err = sys_semget(first, second, third);
930 break; 404 break;
931 case SEMCTL: 405 case SEMCTL:
932 err = do_sys32_semctl (first, second, third, 406 err = compat_sys_semctl(first, second, third, compat_ptr(ptr));
933 (void __user *)AA(ptr));
934 break; 407 break;
935
936 case MSGSND: 408 case MSGSND:
937 err = do_sys32_msgsnd (first, second, third, 409 err = compat_sys_msgsnd(first, second, third, compat_ptr(ptr));
938 (void __user *)AA(ptr));
939 break; 410 break;
940 case MSGRCV: 411 case MSGRCV:
941 err = do_sys32_msgrcv (first, second, fifth, third, 412 err = compat_sys_msgrcv(first, second, fifth, third,
942 version, (void __user *)AA(ptr)); 413 version, compat_ptr(ptr));
943 break; 414 break;
944 case MSGGET: 415 case MSGGET:
945 err = sys_msgget ((key_t) first, second); 416 err = sys_msgget((key_t) first, second);
946 break; 417 break;
947 case MSGCTL: 418 case MSGCTL:
948 err = do_sys32_msgctl (first, second, (void __user *)AA(ptr)); 419 err = compat_sys_msgctl(first, second, compat_ptr(ptr));
949 break; 420 break;
950
951 case SHMAT: 421 case SHMAT:
952 err = do_sys32_shmat (first, second, third, 422 err = compat_sys_shmat(first, second, third, version,
953 version, (void __user *)AA(ptr)); 423 compat_ptr(ptr));
954 break; 424 break;
955 case SHMDT: 425 case SHMDT:
956 err = sys_shmdt ((char __user *)A(ptr)); 426 err = sys_shmdt(compat_ptr(ptr));
957 break; 427 break;
958 case SHMGET: 428 case SHMGET:
959 err = sys_shmget (first, (unsigned)second, third); 429 err = sys_shmget(first, (unsigned)second, third);
960 break; 430 break;
961 case SHMCTL: 431 case SHMCTL:
962 err = do_sys32_shmctl (first, second, (void __user *)AA(ptr)); 432 err = compat_sys_shmctl(first, second, compat_ptr(ptr));
963 break; 433 break;
964 default: 434 default:
965 err = -EINVAL; 435 err = -EINVAL;
@@ -969,18 +439,16 @@ sys32_ipc (u32 call, int first, int second, int third, u32 ptr, u32 fifth)
969 return err; 439 return err;
970} 440}
971 441
972asmlinkage long sys32_shmat(int shmid, char __user *shmaddr, 442#ifdef CONFIG_MIPS32_N32
973 int shmflg, int32_t __user *addr) 443asmlinkage long sysn32_semctl(int semid, int semnum, int cmd, union semun arg)
974{ 444{
975 unsigned long raddr; 445 /* compat_sys_semctl expects a pointer to union semun */
976 int err; 446 u32 __user *uptr = compat_alloc_user_space(sizeof(u32));
977 447 if (put_user(ptr_to_compat(arg.__pad), uptr))
978 err = do_shmat(shmid, shmaddr, shmflg, &raddr); 448 return -EFAULT;
979 if (err) 449 return compat_sys_semctl(semid, semnum, cmd, uptr);
980 return err;
981
982 return put_user(raddr, addr);
983} 450}
451#endif
984 452
985struct sysctl_args32 453struct sysctl_args32
986{ 454{
@@ -1055,7 +523,9 @@ asmlinkage long sys32_newuname(struct new_utsname __user * name)
1055asmlinkage int sys32_personality(unsigned long personality) 523asmlinkage int sys32_personality(unsigned long personality)
1056{ 524{
1057 int ret; 525 int ret;
1058 if (current->personality == PER_LINUX32 && personality == PER_LINUX) 526 personality &= 0xffffffff;
527 if (personality(current->personality) == PER_LINUX32 &&
528 personality == PER_LINUX)
1059 personality = PER_LINUX32; 529 personality = PER_LINUX32;
1060 ret = sys_personality(personality); 530 ret = sys_personality(personality);
1061 if (ret == PER_LINUX32) 531 if (ret == PER_LINUX32)
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 000000000000..e0ad754c7edd
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,85 @@
1/*
2 * machine_kexec.c for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <linux/kexec.h>
10#include <linux/mm.h>
11#include <linux/delay.h>
12
13#include <asm/cacheflush.h>
14#include <asm/page.h>
15
16const extern unsigned char relocate_new_kernel[];
17const extern unsigned int relocate_new_kernel_size;
18
19extern unsigned long kexec_start_address;
20extern unsigned long kexec_indirection_page;
21
22int
23machine_kexec_prepare(struct kimage *kimage)
24{
25 return 0;
26}
27
28void
29machine_kexec_cleanup(struct kimage *kimage)
30{
31}
32
33void
34machine_shutdown(void)
35{
36}
37
38void
39machine_crash_shutdown(struct pt_regs *regs)
40{
41}
42
43void
44machine_kexec(struct kimage *image)
45{
46 unsigned long reboot_code_buffer;
47 unsigned long entry;
48 unsigned long *ptr;
49
50 reboot_code_buffer =
51 (unsigned long)page_address(image->control_code_page);
52
53 kexec_start_address = image->start;
54 kexec_indirection_page = phys_to_virt(image->head & PAGE_MASK);
55
56 memcpy((void*)reboot_code_buffer, relocate_new_kernel,
57 relocate_new_kernel_size);
58
59 /*
60 * The generic kexec code builds a page list with physical
61 * addresses. they are directly accessible through KSEG0 (or
62 * CKSEG0 or XPHYS if on 64bit system), hence the
63 * pys_to_virt() call.
64 */
65 for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
66 ptr = (entry & IND_INDIRECTION) ?
67 phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
68 if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
69 *ptr & IND_DESTINATION)
70 *ptr = phys_to_virt(*ptr);
71 }
72
73 /*
74 * we do not want to be bothered.
75 */
76 local_irq_disable();
77
78 flush_icache_range(reboot_code_buffer,
79 reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE);
80
81 printk("Will call new kernel at %08x\n", image->start);
82 printk("Bye ...\n");
83 flush_cache_all();
84 ((void (*)(void))reboot_code_buffer)();
85}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index d7bf0215bc1d..cb0801437b66 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -29,6 +29,7 @@
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/module.h> 30#include <linux/module.h>
31#include <linux/spinlock.h> 31#include <linux/spinlock.h>
32#include <asm/pgtable.h> /* MODULE_START */
32 33
33struct mips_hi16 { 34struct mips_hi16 {
34 struct mips_hi16 *next; 35 struct mips_hi16 *next;
@@ -43,9 +44,23 @@ static DEFINE_SPINLOCK(dbe_lock);
43 44
44void *module_alloc(unsigned long size) 45void *module_alloc(unsigned long size)
45{ 46{
47#ifdef MODULE_START
48 struct vm_struct *area;
49
50 size = PAGE_ALIGN(size);
51 if (!size)
52 return NULL;
53
54 area = __get_vm_area(size, VM_ALLOC, MODULE_START, MODULE_END);
55 if (!area)
56 return NULL;
57
58 return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL);
59#else
46 if (size == 0) 60 if (size == 0)
47 return NULL; 61 return NULL;
48 return vmalloc(size); 62 return vmalloc(size);
63#endif
49} 64}
50 65
51/* Free memory returned from module_alloc */ 66/* Free memory returned from module_alloc */
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 9f307eb1a31e..ec8209f3a0c6 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -358,10 +358,8 @@ static int __init frame_info_init(void)
358 unsigned long size = 0; 358 unsigned long size = 0;
359#ifdef CONFIG_KALLSYMS 359#ifdef CONFIG_KALLSYMS
360 unsigned long ofs; 360 unsigned long ofs;
361 char *modname;
362 char namebuf[KSYM_NAME_LEN + 1];
363 361
364 kallsyms_lookup((unsigned long)schedule, &size, &ofs, &modname, namebuf); 362 kallsyms_lookup_size_offset((unsigned long)schedule, &size, &ofs);
365#endif 363#endif
366 schedule_mfi.func = schedule; 364 schedule_mfi.func = schedule;
367 schedule_mfi.func_size = size; 365 schedule_mfi.func_size = size;
@@ -403,8 +401,6 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
403{ 401{
404 unsigned long stack_page; 402 unsigned long stack_page;
405 struct mips_frame_info info; 403 struct mips_frame_info info;
406 char *modname;
407 char namebuf[KSYM_NAME_LEN + 1];
408 unsigned long size, ofs; 404 unsigned long size, ofs;
409 int leaf; 405 int leaf;
410 extern void ret_from_irq(void); 406 extern void ret_from_irq(void);
@@ -433,7 +429,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
433 } 429 }
434 return 0; 430 return 0;
435 } 431 }
436 if (!kallsyms_lookup(pc, &size, &ofs, &modname, namebuf)) 432 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
437 return 0; 433 return 0;
438 /* 434 /*
439 * Return ra if an exception occured at the first instruction 435 * Return ra if an exception occured at the first instruction
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d5c8b82fed72..cc566cf12246 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -85,7 +85,12 @@
85 move $28, a2 85 move $28, a2
86 cpu_restore_nonscratch a1 86 cpu_restore_nonscratch a1
87 87
88#if (_THREAD_SIZE - 32) < 0x10000
88 PTR_ADDIU t0, $28, _THREAD_SIZE - 32 89 PTR_ADDIU t0, $28, _THREAD_SIZE - 32
90#else
91 PTR_LI t0, _THREAD_SIZE - 32
92 PTR_ADDU t0, $28
93#endif
89 set_saved_sp t0, t1, t2 94 set_saved_sp t0, t1, t2
90#ifdef CONFIG_MIPS_MT_SMTC 95#ifdef CONFIG_MIPS_MT_SMTC
91 /* Read-modify-writes of Status must be atomic on a VPE */ 96 /* Read-modify-writes of Status must be atomic on a VPE */
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 000000000000..a3f0d00c1334
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,80 @@
1/*
2 * relocate_kernel.S for kexec
3 * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
4 *
5 * This source code is licensed under the GNU General Public License,
6 * Version 2. See the file COPYING for more details.
7 */
8
9#include <asm/asm.h>
10#include <asm/asmmacro.h>
11#include <asm/regdef.h>
12#include <asm/page.h>
13#include <asm/mipsregs.h>
14#include <asm/stackframe.h>
15#include <asm/addrspace.h>
16
17 .globl relocate_new_kernel
18relocate_new_kernel:
19
20 PTR_L s0, kexec_indirection_page
21 PTR_L s1, kexec_start_address
22
23process_entry:
24 PTR_L s2, (s0)
25 PTR_ADD s0, s0, SZREG
26
27 /* destination page */
28 and s3, s2, 0x1
29 beq s3, zero, 1f
30 and s4, s2, ~0x1 /* store destination addr in s4 */
31 move a0, s4
32 b process_entry
33
341:
35 /* indirection page, update s0 */
36 and s3, s2, 0x2
37 beq s3, zero, 1f
38 and s0, s2, ~0x2
39 b process_entry
40
411:
42 /* done page */
43 and s3, s2, 0x4
44 beq s3, zero, 1f
45 b done
461:
47 /* source page */
48 and s3, s2, 0x8
49 beq s3, zero, process_entry
50 and s2, s2, ~0x8
51 li s6, (1 << PAGE_SHIFT) / SZREG
52
53copy_word:
54 /* copy page word by word */
55 REG_L s5, (s2)
56 REG_S s5, (s4)
57 INT_ADD s4, s4, SZREG
58 INT_ADD s2, s2, SZREG
59 INT_SUB s6, s6, 1
60 beq s6, zero, process_entry
61 b copy_word
62 b process_entry
63
64done:
65 /* jump to kexec_start_address */
66 j s1
67
68 .globl kexec_start_address
69kexec_start_address:
70 .long 0x0
71
72 .globl kexec_indirection_page
73kexec_indirection_page:
74 .long 0x0
75
76relocate_new_kernel_end:
77
78 .globl relocate_new_kernel_size
79relocate_new_kernel_size:
80 .long relocate_new_kernel_end - relocate_new_kernel
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
index 621037db2290..060563a712b6 100644
--- a/arch/mips/kernel/reset.c
+++ b/arch/mips/kernel/reset.c
@@ -23,6 +23,8 @@ void (*_machine_restart)(char *command);
23void (*_machine_halt)(void); 23void (*_machine_halt)(void);
24void (*pm_power_off)(void); 24void (*pm_power_off)(void);
25 25
26EXPORT_SYMBOL(pm_power_off);
27
26void machine_restart(char *command) 28void machine_restart(char *command)
27{ 29{
28 if (_machine_restart) 30 if (_machine_restart)
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 8c8c8324f775..5a99e3e0c96d 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -415,7 +415,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
415 int minor; 415 int minor;
416 unsigned int mask = 0; 416 unsigned int mask = 0;
417 417
418 minor = iminor(file->f_dentry->d_inode); 418 minor = iminor(file->f_path.dentry->d_inode);
419 419
420 poll_wait(file, &channel_wqs[minor].rt_queue, wait); 420 poll_wait(file, &channel_wqs[minor].rt_queue, wait);
421 poll_wait(file, &channel_wqs[minor].lx_queue, wait); 421 poll_wait(file, &channel_wqs[minor].lx_queue, wait);
@@ -437,7 +437,7 @@ static unsigned int file_poll(struct file *file, poll_table * wait)
437static ssize_t file_read(struct file *file, char __user * buffer, size_t count, 437static ssize_t file_read(struct file *file, char __user * buffer, size_t count,
438 loff_t * ppos) 438 loff_t * ppos)
439{ 439{
440 int minor = iminor(file->f_dentry->d_inode); 440 int minor = iminor(file->f_path.dentry->d_inode);
441 441
442 /* data available? */ 442 /* data available? */
443 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { 443 if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) {
@@ -454,7 +454,7 @@ static ssize_t file_write(struct file *file, const char __user * buffer,
454 struct rtlx_channel *rt; 454 struct rtlx_channel *rt;
455 DECLARE_WAITQUEUE(wait, current); 455 DECLARE_WAITQUEUE(wait, current);
456 456
457 minor = iminor(file->f_dentry->d_inode); 457 minor = iminor(file->f_path.dentry->d_inode);
458 rt = &rtlx->channel[minor]; 458 rt = &rtlx->channel[minor];
459 459
460 /* any space left... */ 460 /* any space left... */
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 61362e6fa9ec..7c0b3936ba44 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -652,7 +652,10 @@ einval: li v0, -EINVAL
652 sys sys_vmsplice 4 652 sys sys_vmsplice 4
653 sys sys_move_pages 6 653 sys sys_move_pages 6
654 sys sys_set_robust_list 2 654 sys sys_set_robust_list 2
655 sys sys_get_robust_list 3 655 sys sys_get_robust_list 3 /* 4310 */
656 sys sys_kexec_load 4
657 sys sys_getcpu 3
658 sys sys_epoll_pwait 6
656 .endm 659 .endm
657 660
658 /* We pre-compute the number of _instruction_ bytes needed to 661 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 6c7b5ed0ea6e..e569b846e9a3 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -468,3 +468,6 @@ sys_call_table:
468 PTR sys_move_pages 468 PTR sys_move_pages
469 PTR sys_set_robust_list 469 PTR sys_set_robust_list
470 PTR sys_get_robust_list 470 PTR sys_get_robust_list
471 PTR sys_kexec_load /* 5270 */
472 PTR sys_getcpu
473 PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 6d9f18727ac5..34567d81f940 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -149,8 +149,8 @@ EXPORT(sysn32_call_table)
149 PTR sys_mincore 149 PTR sys_mincore
150 PTR sys_madvise 150 PTR sys_madvise
151 PTR sys_shmget 151 PTR sys_shmget
152 PTR sys32_shmat 152 PTR sys_shmat
153 PTR sys_shmctl /* 6030 */ 153 PTR compat_sys_shmctl /* 6030 */
154 PTR sys_dup 154 PTR sys_dup
155 PTR sys_dup2 155 PTR sys_dup2
156 PTR sys_pause 156 PTR sys_pause
@@ -184,12 +184,12 @@ EXPORT(sysn32_call_table)
184 PTR sys32_newuname 184 PTR sys32_newuname
185 PTR sys_semget 185 PTR sys_semget
186 PTR sys_semop 186 PTR sys_semop
187 PTR sys_semctl 187 PTR sysn32_semctl
188 PTR sys_shmdt /* 6065 */ 188 PTR sys_shmdt /* 6065 */
189 PTR sys_msgget 189 PTR sys_msgget
190 PTR sys_msgsnd 190 PTR compat_sys_msgsnd
191 PTR sys_msgrcv 191 PTR compat_sys_msgrcv
192 PTR sys_msgctl 192 PTR compat_sys_msgctl
193 PTR compat_sys_fcntl /* 6070 */ 193 PTR compat_sys_fcntl /* 6070 */
194 PTR sys_flock 194 PTR sys_flock
195 PTR sys_fsync 195 PTR sys_fsync
@@ -280,7 +280,7 @@ EXPORT(sysn32_call_table)
280 PTR sys_sync 280 PTR sys_sync
281 PTR sys_acct 281 PTR sys_acct
282 PTR sys32_settimeofday 282 PTR sys32_settimeofday
283 PTR sys_mount /* 6160 */ 283 PTR compat_sys_mount /* 6160 */
284 PTR sys_umount 284 PTR sys_umount
285 PTR sys_swapon 285 PTR sys_swapon
286 PTR sys_swapoff 286 PTR sys_swapoff
@@ -335,7 +335,7 @@ EXPORT(sysn32_call_table)
335 PTR compat_sys_fcntl64 335 PTR compat_sys_fcntl64
336 PTR sys_set_tid_address 336 PTR sys_set_tid_address
337 PTR sys_restart_syscall 337 PTR sys_restart_syscall
338 PTR sys_semtimedop /* 6215 */ 338 PTR compat_sys_semtimedop /* 6215 */
339 PTR sys_fadvise64_64 339 PTR sys_fadvise64_64
340 PTR compat_sys_statfs64 340 PTR compat_sys_statfs64
341 PTR compat_sys_fstatfs64 341 PTR compat_sys_fstatfs64
@@ -394,3 +394,6 @@ EXPORT(sysn32_call_table)
394 PTR sys_move_pages 394 PTR sys_move_pages
395 PTR compat_sys_set_robust_list 395 PTR compat_sys_set_robust_list
396 PTR compat_sys_get_robust_list 396 PTR compat_sys_get_robust_list
397 PTR compat_sys_kexec_load
398 PTR sys_getcpu
399 PTR sys_epoll_pwait
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 2e6d0673163e..e91379c1be1d 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -226,7 +226,7 @@ sys_call_table:
226 PTR sys_ni_syscall /* was sys_stat */ 226 PTR sys_ni_syscall /* was sys_stat */
227 PTR sys_lseek 227 PTR sys_lseek
228 PTR sys_getpid /* 4020 */ 228 PTR sys_getpid /* 4020 */
229 PTR sys_mount 229 PTR compat_sys_mount
230 PTR sys_oldumount 230 PTR sys_oldumount
231 PTR sys_setuid 231 PTR sys_setuid
232 PTR sys_getuid 232 PTR sys_getuid
@@ -516,4 +516,7 @@ sys_call_table:
516 PTR compat_sys_move_pages 516 PTR compat_sys_move_pages
517 PTR compat_sys_set_robust_list 517 PTR compat_sys_set_robust_list
518 PTR compat_sys_get_robust_list /* 4310 */ 518 PTR compat_sys_get_robust_list /* 4310 */
519 PTR compat_sys_kexec_load
520 PTR sys_getcpu
521 PTR sys_epoll_pwait
519 .size sys_call_table,.-sys_call_table 522 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index fdbb508661c5..89440a0d8528 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -145,13 +145,12 @@ static int __init rd_start_early(char *p)
145 unsigned long start = memparse(p, &p); 145 unsigned long start = memparse(p, &p);
146 146
147#ifdef CONFIG_64BIT 147#ifdef CONFIG_64BIT
148 /* HACK: Guess if the sign extension was forgotten */ 148 /* Guess if the sign extension was forgotten by bootloader */
149 if (start > 0x0000000080000000 && start < 0x00000000ffffffff) 149 if (start < XKPHYS)
150 start |= 0xffffffff00000000UL; 150 start = (int)start;
151#endif 151#endif
152 initrd_start = start; 152 initrd_start = start;
153 initrd_end += start; 153 initrd_end += start;
154
155 return 0; 154 return 0;
156} 155}
157early_param("rd_start", rd_start_early); 156early_param("rd_start", rd_start_early);
@@ -159,41 +158,64 @@ early_param("rd_start", rd_start_early);
159static int __init rd_size_early(char *p) 158static int __init rd_size_early(char *p)
160{ 159{
161 initrd_end += memparse(p, &p); 160 initrd_end += memparse(p, &p);
162
163 return 0; 161 return 0;
164} 162}
165early_param("rd_size", rd_size_early); 163early_param("rd_size", rd_size_early);
166 164
165/* it returns the next free pfn after initrd */
167static unsigned long __init init_initrd(void) 166static unsigned long __init init_initrd(void)
168{ 167{
169 unsigned long tmp, end, size; 168 unsigned long end;
170 u32 *initrd_header; 169 u32 *initrd_header;
171 170
172 ROOT_DEV = Root_RAM0;
173
174 /* 171 /*
175 * Board specific code or command line parser should have 172 * Board specific code or command line parser should have
176 * already set up initrd_start and initrd_end. In these cases 173 * already set up initrd_start and initrd_end. In these cases
177 * perfom sanity checks and use them if all looks good. 174 * perfom sanity checks and use them if all looks good.
178 */ 175 */
179 size = initrd_end - initrd_start; 176 if (initrd_start && initrd_end > initrd_start)
180 if (initrd_end == 0 || size == 0) { 177 goto sanitize;
181 initrd_start = 0; 178
182 initrd_end = 0; 179 /*
183 } else 180 * See if initrd has been added to the kernel image by
184 return initrd_end; 181 * arch/mips/boot/addinitrd.c. In that case a header is
185 182 * prepended to initrd and is made up by 8 bytes. The fisrt
186 end = (unsigned long)&_end; 183 * word is a magic number and the second one is the size of
187 tmp = PAGE_ALIGN(end) - sizeof(u32) * 2; 184 * initrd. Initrd start must be page aligned in any cases.
188 if (tmp < end) 185 */
189 tmp += PAGE_SIZE; 186 initrd_header = __va(PAGE_ALIGN(__pa_symbol(&_end) + 8)) - 8;
190 187 if (initrd_header[0] != 0x494E5244)
191 initrd_header = (u32 *)tmp; 188 goto disable;
192 if (initrd_header[0] == 0x494E5244) { 189 initrd_start = (unsigned long)(initrd_header + 2);
193 initrd_start = (unsigned long)&initrd_header[2]; 190 initrd_end = initrd_start + initrd_header[1];
194 initrd_end = initrd_start + initrd_header[1]; 191
192sanitize:
193 if (initrd_start & ~PAGE_MASK) {
194 printk(KERN_ERR "initrd start must be page aligned\n");
195 goto disable;
195 } 196 }
196 return initrd_end; 197 if (initrd_start < PAGE_OFFSET) {
198 printk(KERN_ERR "initrd start < PAGE_OFFSET\n");
199 goto disable;
200 }
201
202 /*
203 * Sanitize initrd addresses. For example firmware
204 * can't guess if they need to pass them through
205 * 64-bits values if the kernel has been built in pure
206 * 32-bit. We need also to switch from KSEG0 to XKPHYS
207 * addresses now, so the code can now safely use __pa().
208 */
209 end = __pa(initrd_end);
210 initrd_end = (unsigned long)__va(end);
211 initrd_start = (unsigned long)__va(__pa(initrd_start));
212
213 ROOT_DEV = Root_RAM0;
214 return PFN_UP(end);
215disable:
216 initrd_start = 0;
217 initrd_end = 0;
218 return 0;
197} 219}
198 220
199static void __init finalize_initrd(void) 221static void __init finalize_initrd(void)
@@ -204,12 +226,12 @@ static void __init finalize_initrd(void)
204 printk(KERN_INFO "Initrd not found or empty"); 226 printk(KERN_INFO "Initrd not found or empty");
205 goto disable; 227 goto disable;
206 } 228 }
207 if (CPHYSADDR(initrd_end) > PFN_PHYS(max_low_pfn)) { 229 if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
208 printk("Initrd extends beyond end of memory"); 230 printk("Initrd extends beyond end of memory");
209 goto disable; 231 goto disable;
210 } 232 }
211 233
212 reserve_bootmem(CPHYSADDR(initrd_start), size); 234 reserve_bootmem(__pa(initrd_start), size);
213 initrd_below_start_ok = 1; 235 initrd_below_start_ok = 1;
214 236
215 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n", 237 printk(KERN_INFO "Initial ramdisk at: 0x%lx (%lu bytes)\n",
@@ -223,7 +245,11 @@ disable:
223 245
224#else /* !CONFIG_BLK_DEV_INITRD */ 246#else /* !CONFIG_BLK_DEV_INITRD */
225 247
226#define init_initrd() 0 248static unsigned long __init init_initrd(void)
249{
250 return 0;
251}
252
227#define finalize_initrd() do {} while (0) 253#define finalize_initrd() do {} while (0)
228 254
229#endif 255#endif
@@ -255,8 +281,7 @@ static void __init bootmem_init(void)
255 * not selected. Once that done we can determine the low bound 281 * not selected. Once that done we can determine the low bound
256 * of usable memory. 282 * of usable memory.
257 */ 283 */
258 reserved_end = init_initrd(); 284 reserved_end = max(init_initrd(), PFN_UP(__pa_symbol(&_end)));
259 reserved_end = PFN_UP(CPHYSADDR(max(reserved_end, (unsigned long)&_end)));
260 285
261 /* 286 /*
262 * Find the highest page frame number we have available. 287 * Find the highest page frame number we have available.
@@ -428,10 +453,10 @@ static void __init resource_init(void)
428 if (UNCAC_BASE != IO_BASE) 453 if (UNCAC_BASE != IO_BASE)
429 return; 454 return;
430 455
431 code_resource.start = virt_to_phys(&_text); 456 code_resource.start = __pa_symbol(&_text);
432 code_resource.end = virt_to_phys(&_etext) - 1; 457 code_resource.end = __pa_symbol(&_etext) - 1;
433 data_resource.start = virt_to_phys(&_etext); 458 data_resource.start = __pa_symbol(&_etext);
434 data_resource.end = virt_to_phys(&_edata) - 1; 459 data_resource.end = __pa_symbol(&_edata) - 1;
435 460
436 /* 461 /*
437 * Request address space for all standard RAM. 462 * Request address space for all standard RAM.
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 477c5334ec1b..a67c18555ed3 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -17,7 +17,6 @@
17 */ 17 */
18#include <linux/cache.h> 18#include <linux/cache.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/sched.h>
21#include <linux/mm.h> 20#include <linux/mm.h>
22#include <linux/smp.h> 21#include <linux/smp.h>
23#include <linux/smp_lock.h> 22#include <linux/smp_lock.h>
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 3b5f3b632622..1ee689c0e0c9 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -140,15 +140,90 @@ static struct irqaction irq_call = {
140 .name = "IPI_call" 140 .name = "IPI_call"
141}; 141};
142 142
143static void __init smp_copy_vpe_config(void)
144{
145 write_vpe_c0_status(
146 (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
147
148 /* set config to be the same as vpe0, particularly kseg0 coherency alg */
149 write_vpe_c0_config( read_c0_config());
150
151 /* make sure there are no software interrupts pending */
152 write_vpe_c0_cause(0);
153
154 /* Propagate Config7 */
155 write_vpe_c0_config7(read_c0_config7());
156
157 write_vpe_c0_count(read_c0_count());
158}
159
160static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
161 unsigned int ncpu)
162{
163 if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
164 return ncpu;
165
166 /* Deactivate all but VPE 0 */
167 if (tc != 0) {
168 unsigned long tmp = read_vpe_c0_vpeconf0();
169
170 tmp &= ~VPECONF0_VPA;
171
172 /* master VPE */
173 tmp |= VPECONF0_MVP;
174 write_vpe_c0_vpeconf0(tmp);
175
176 /* Record this as available CPU */
177 cpu_set(tc, phys_cpu_present_map);
178 __cpu_number_map[tc] = ++ncpu;
179 __cpu_logical_map[ncpu] = tc;
180 }
181
182 /* Disable multi-threading with TC's */
183 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
184
185 if (tc != 0)
186 smp_copy_vpe_config();
187
188 return ncpu;
189}
190
191static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0)
192{
193 unsigned long tmp;
194
195 if (!tc)
196 return;
197
198 /* bind a TC to each VPE, May as well put all excess TC's
199 on the last VPE */
200 if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
201 write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
202 else {
203 write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
204
205 /* and set XTC */
206 write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
207 }
208
209 tmp = read_tc_c0_tcstatus();
210
211 /* mark not allocated and not dynamically allocatable */
212 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
213 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
214 write_tc_c0_tcstatus(tmp);
215
216 write_tc_c0_tchalt(TCHALT_H);
217}
218
143/* 219/*
144 * Common setup before any secondaries are started 220 * Common setup before any secondaries are started
145 * Make sure all CPU's are in a sensible state before we boot any of the 221 * Make sure all CPU's are in a sensible state before we boot any of the
146 * secondarys 222 * secondarys
147 */ 223 */
148void plat_smp_setup(void) 224void __init plat_smp_setup(void)
149{ 225{
150 unsigned long val; 226 unsigned int mvpconf0, ntc, tc, ncpu = 0;
151 int i, num;
152 227
153#ifdef CONFIG_MIPS_MT_FPAFF 228#ifdef CONFIG_MIPS_MT_FPAFF
154 /* If we have an FPU, enroll ourselves in the FPU-full mask */ 229 /* If we have an FPU, enroll ourselves in the FPU-full mask */
@@ -167,75 +242,16 @@ void plat_smp_setup(void)
167 /* Put MVPE's into 'configuration state' */ 242 /* Put MVPE's into 'configuration state' */
168 set_c0_mvpcontrol(MVPCONTROL_VPC); 243 set_c0_mvpcontrol(MVPCONTROL_VPC);
169 244
170 val = read_c0_mvpconf0(); 245 mvpconf0 = read_c0_mvpconf0();
246 ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
171 247
172 /* we'll always have more TC's than VPE's, so loop setting everything 248 /* we'll always have more TC's than VPE's, so loop setting everything
173 to a sensible state */ 249 to a sensible state */
174 for (i = 0, num = 0; i <= ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT); i++) { 250 for (tc = 0; tc <= ntc; tc++) {
175 settc(i); 251 settc(tc);
176
177 /* VPE's */
178 if (i <= ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) {
179
180 /* deactivate all but vpe0 */
181 if (i != 0) {
182 unsigned long tmp = read_vpe_c0_vpeconf0();
183
184 tmp &= ~VPECONF0_VPA;
185
186 /* master VPE */
187 tmp |= VPECONF0_MVP;
188 write_vpe_c0_vpeconf0(tmp);
189
190 /* Record this as available CPU */
191 cpu_set(i, phys_cpu_present_map);
192 __cpu_number_map[i] = ++num;
193 __cpu_logical_map[num] = i;
194 }
195
196 /* disable multi-threading with TC's */
197 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
198
199 if (i != 0) {
200 write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
201 252
202 /* set config to be the same as vpe0, particularly kseg0 coherency alg */ 253 smp_tc_init(tc, mvpconf0);
203 write_vpe_c0_config( read_c0_config()); 254 ncpu = smp_vpe_init(tc, mvpconf0, ncpu);
204
205 /* make sure there are no software interrupts pending */
206 write_vpe_c0_cause(0);
207
208 /* Propagate Config7 */
209 write_vpe_c0_config7(read_c0_config7());
210 }
211
212 }
213
214 /* TC's */
215
216 if (i != 0) {
217 unsigned long tmp;
218
219 /* bind a TC to each VPE, May as well put all excess TC's
220 on the last VPE */
221 if ( i >= (((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1) )
222 write_tc_c0_tcbind(read_tc_c0_tcbind() | ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) );
223 else {
224 write_tc_c0_tcbind( read_tc_c0_tcbind() | i);
225
226 /* and set XTC */
227 write_vpe_c0_vpeconf0( read_vpe_c0_vpeconf0() | (i << VPECONF0_XTC_SHIFT));
228 }
229
230 tmp = read_tc_c0_tcstatus();
231
232 /* mark not allocated and not dynamically allocatable */
233 tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
234 tmp |= TCSTATUS_IXMT; /* interrupt exempt */
235 write_tc_c0_tcstatus(tmp);
236
237 write_tc_c0_tchalt(TCHALT_H);
238 }
239 } 255 }
240 256
241 /* Release config state */ 257 /* Release config state */
@@ -243,7 +259,7 @@ void plat_smp_setup(void)
243 259
244 /* We'll wait until starting the secondaries before starting MVPE */ 260 /* We'll wait until starting the secondaries before starting MVPE */
245 261
246 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num); 262 printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
247} 263}
248 264
249void __init plat_prepare_cpus(unsigned int max_cpus) 265void __init plat_prepare_cpus(unsigned int max_cpus)
@@ -262,7 +278,9 @@ void __init plat_prepare_cpus(unsigned int max_cpus)
262 278
263 /* need to mark IPI's as IRQ_PER_CPU */ 279 /* need to mark IPI's as IRQ_PER_CPU */
264 irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU; 280 irq_desc[cpu_ipi_resched_irq].status |= IRQ_PER_CPU;
281 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
265 irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU; 282 irq_desc[cpu_ipi_call_irq].status |= IRQ_PER_CPU;
283 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
266} 284}
267 285
268/* 286/*
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index db80957ada89..f2a8701e414d 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -172,7 +172,7 @@ int smp_call_function (void (*func) (void *info), void *info, int retry,
172 172
173 spin_lock(&smp_call_lock); 173 spin_lock(&smp_call_lock);
174 call_data = &data; 174 call_data = &data;
175 mb(); 175 smp_mb();
176 176
177 /* Send a message to all other CPUs and wait for them to respond */ 177 /* Send a message to all other CPUs and wait for them to respond */
178 for_each_online_cpu(i) 178 for_each_online_cpu(i)
@@ -204,7 +204,7 @@ void smp_call_function_interrupt(void)
204 * Notify initiating CPU that I've grabbed the data and am 204 * Notify initiating CPU that I've grabbed the data and am
205 * about to execute the function. 205 * about to execute the function.
206 */ 206 */
207 mb(); 207 smp_mb();
208 atomic_inc(&call_data->started); 208 atomic_inc(&call_data->started);
209 209
210 /* 210 /*
@@ -215,7 +215,7 @@ void smp_call_function_interrupt(void)
215 irq_exit(); 215 irq_exit();
216 216
217 if (wait) { 217 if (wait) {
218 mb(); 218 smp_mb();
219 atomic_inc(&call_data->finished); 219 atomic_inc(&call_data->finished);
220 } 220 }
221} 221}
@@ -463,28 +463,5 @@ void flush_tlb_one(unsigned long vaddr)
463 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); 463 smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
464} 464}
465 465
466static DEFINE_PER_CPU(struct cpu, cpu_devices);
467
468static int __init topology_init(void)
469{
470 int i, ret;
471
472#ifdef CONFIG_NUMA
473 for_each_online_node(i)
474 register_one_node(i);
475#endif /* CONFIG_NUMA */
476
477 for_each_present_cpu(i) {
478 ret = register_cpu(&per_cpu(cpu_devices, i), i);
479 if (ret)
480 printk(KERN_WARNING "topology_init: register_cpu %d "
481 "failed (%d)\n", i, ret);
482 }
483
484 return 0;
485}
486
487subsys_initcall(topology_init);
488
489EXPORT_SYMBOL(flush_tlb_page); 466EXPORT_SYMBOL(flush_tlb_page);
490EXPORT_SYMBOL(flush_tlb_one); 467EXPORT_SYMBOL(flush_tlb_one);
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
index 1cb9441f1474..921207c4a83c 100644
--- a/arch/mips/kernel/smtc-asm.S
+++ b/arch/mips/kernel/smtc-asm.S
@@ -101,7 +101,9 @@ FEXPORT(__smtc_ipi_vector)
101 lw t0,PT_PADSLOT5(sp) 101 lw t0,PT_PADSLOT5(sp)
102 /* Argument from sender passed in stack pad slot 4 */ 102 /* Argument from sender passed in stack pad slot 4 */
103 lw a0,PT_PADSLOT4(sp) 103 lw a0,PT_PADSLOT4(sp)
104 PTR_LA ra, _ret_from_irq 104 LONG_L s0, TI_REGS($28)
105 LONG_S sp, TI_REGS($28)
106 PTR_LA ra, ret_from_irq
105 jr t0 107 jr t0
106 108
107/* 109/*
@@ -119,7 +121,10 @@ LEAF(self_ipi)
119 subu t1,sp,PT_SIZE 121 subu t1,sp,PT_SIZE
120 sw ra,PT_EPC(t1) 122 sw ra,PT_EPC(t1)
121 sw a0,PT_PADSLOT4(t1) 123 sw a0,PT_PADSLOT4(t1)
124 LONG_L s0, TI_REGS($28)
125 LONG_S sp, TI_REGS($28)
122 la t2,ipi_decode 126 la t2,ipi_decode
127 LONG_S s0, TI_REGS($28)
123 sw t2,PT_PADSLOT5(t1) 128 sw t2,PT_PADSLOT5(t1)
124 /* Save pre-disable value of TCStatus */ 129 /* Save pre-disable value of TCStatus */
125 sw t0,PT_TCSTATUS(t1) 130 sw t0,PT_TCSTATUS(t1)
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index cc1f7474f7d7..802febed7df5 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -476,6 +476,7 @@ void mipsmt_prepare_cpus(void)
476 write_vpe_c0_compare(0); 476 write_vpe_c0_compare(0);
477 /* Propagate Config7 */ 477 /* Propagate Config7 */
478 write_vpe_c0_config7(read_c0_config7()); 478 write_vpe_c0_config7(read_c0_config7());
479 write_vpe_c0_count(read_c0_count());
479 } 480 }
480 /* enable multi-threading within VPE */ 481 /* enable multi-threading within VPE */
481 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 482 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -1008,6 +1009,7 @@ void setup_cross_vpe_interrupts(void)
1008 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1009 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1009 1010
1010 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU; 1011 irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
1012 set_irq_handler(cpu_ipi_irq, handle_percpu_irq);
1011} 1013}
1012 1014
1013/* 1015/*
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
index 4aabe526a68e..a586aba337a7 100644
--- a/arch/mips/kernel/stacktrace.c
+++ b/arch/mips/kernel/stacktrace.c
@@ -57,7 +57,7 @@ static void save_context_stack(struct stack_trace *trace,
57 pc = unwind_stack(task, &sp, pc, &ra); 57 pc = unwind_stack(task, &sp, pc, &ra);
58 } while (pc); 58 } while (pc);
59#else 59#else
60 save_raw_context_stack(sp); 60 save_raw_context_stack(trace, sp);
61#endif 61#endif
62} 62}
63 63
diff --git a/arch/mips/kernel/sysirix.c b/arch/mips/kernel/sysirix.c
index 93c74fefff76..6c2406a93f2b 100644
--- a/arch/mips/kernel/sysirix.c
+++ b/arch/mips/kernel/sysirix.c
@@ -732,7 +732,7 @@ asmlinkage int irix_fstatfs(unsigned int fd, struct irix_statfs __user *buf)
732 goto out; 732 goto out;
733 } 733 }
734 734
735 error = vfs_statfs(file->f_dentry, &kbuf); 735 error = vfs_statfs(file->f_path.dentry, &kbuf);
736 if (error) 736 if (error)
737 goto out_f; 737 goto out_f;
738 738
@@ -1041,7 +1041,7 @@ asmlinkage unsigned long irix_mmap32(unsigned long addr, size_t len, int prot,
1041 unsigned long old_pos; 1041 unsigned long old_pos;
1042 long max_size = offset + len; 1042 long max_size = offset + len;
1043 1043
1044 if (max_size > file->f_dentry->d_inode->i_size) { 1044 if (max_size > file->f_path.dentry->d_inode->i_size) {
1045 old_pos = sys_lseek (fd, max_size - 1, 0); 1045 old_pos = sys_lseek (fd, max_size - 1, 0);
1046 sys_write (fd, (void __user *) "", 1); 1046 sys_write (fd, (void __user *) "", 1);
1047 sys_lseek (fd, old_pos, 0); 1047 sys_lseek (fd, old_pos, 0);
@@ -1406,7 +1406,7 @@ asmlinkage int irix_fstatvfs(int fd, struct irix_statvfs __user *buf)
1406 error = -EBADF; 1406 error = -EBADF;
1407 goto out; 1407 goto out;
1408 } 1408 }
1409 error = vfs_statfs(file->f_dentry, &kbuf); 1409 error = vfs_statfs(file->f_path.dentry, &kbuf);
1410 if (error) 1410 if (error)
1411 goto out_f; 1411 goto out_f;
1412 1412
@@ -1526,7 +1526,7 @@ asmlinkage int irix_mmap64(struct pt_regs *regs)
1526 unsigned long old_pos; 1526 unsigned long old_pos;
1527 long max_size = off2 + len; 1527 long max_size = off2 + len;
1528 1528
1529 if (max_size > file->f_dentry->d_inode->i_size) { 1529 if (max_size > file->f_path.dentry->d_inode->i_size) {
1530 old_pos = sys_lseek (fd, max_size - 1, 0); 1530 old_pos = sys_lseek (fd, max_size - 1, 0);
1531 sys_write (fd, (void __user *) "", 1); 1531 sys_write (fd, (void __user *) "", 1);
1532 sys_lseek (fd, old_pos, 0); 1532 sys_lseek (fd, old_pos, 0);
@@ -1658,7 +1658,7 @@ asmlinkage int irix_fstatvfs64(int fd, struct irix_statvfs __user *buf)
1658 error = -EBADF; 1658 error = -EBADF;
1659 goto out; 1659 goto out;
1660 } 1660 }
1661 error = vfs_statfs(file->f_dentry, &kbuf); 1661 error = vfs_statfs(file->f_path.dentry, &kbuf);
1662 if (error) 1662 if (error)
1663 goto out_f; 1663 goto out_f;
1664 1664
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index debe86c2f691..11aab6d6bfe5 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -67,15 +67,9 @@ int (*rtc_mips_set_time)(unsigned long) = null_rtc_set_time;
67int (*rtc_mips_set_mmss)(unsigned long); 67int (*rtc_mips_set_mmss)(unsigned long);
68 68
69 69
70/* usecs per counter cycle, shifted to left by 32 bits */
71static unsigned int sll32_usecs_per_cycle;
72
73/* how many counter cycles in a jiffy */ 70/* how many counter cycles in a jiffy */
74static unsigned long cycles_per_jiffy __read_mostly; 71static unsigned long cycles_per_jiffy __read_mostly;
75 72
76/* Cycle counter value at the previous timer interrupt.. */
77static unsigned int timerhi, timerlo;
78
79/* expirelo is the count value for next CPU timer interrupt */ 73/* expirelo is the count value for next CPU timer interrupt */
80static unsigned int expirelo; 74static unsigned int expirelo;
81 75
@@ -88,17 +82,11 @@ static void null_timer_ack(void) { /* nothing */ }
88/* 82/*
89 * Null high precision timer functions for systems lacking one. 83 * Null high precision timer functions for systems lacking one.
90 */ 84 */
91static unsigned int null_hpt_read(void) 85static cycle_t null_hpt_read(void)
92{ 86{
93 return 0; 87 return 0;
94} 88}
95 89
96static void null_hpt_init(unsigned int count)
97{
98 /* nothing */
99}
100
101
102/* 90/*
103 * Timer ack for an R4k-compatible timer of a known frequency. 91 * Timer ack for an R4k-compatible timer of a known frequency.
104 */ 92 */
@@ -123,191 +111,20 @@ static void c0_timer_ack(void)
123/* 111/*
124 * High precision timer functions for a R4k-compatible timer. 112 * High precision timer functions for a R4k-compatible timer.
125 */ 113 */
126static unsigned int c0_hpt_read(void) 114static cycle_t c0_hpt_read(void)
127{ 115{
128 return read_c0_count(); 116 return read_c0_count();
129} 117}
130 118
131/* For use solely as a high precision timer. */
132static void c0_hpt_init(unsigned int count)
133{
134 write_c0_count(read_c0_count() - count);
135}
136
137/* For use both as a high precision timer and an interrupt source. */ 119/* For use both as a high precision timer and an interrupt source. */
138static void c0_hpt_timer_init(unsigned int count) 120static void __init c0_hpt_timer_init(void)
139{ 121{
140 count = read_c0_count() - count; 122 expirelo = read_c0_count() + cycles_per_jiffy;
141 expirelo = (count / cycles_per_jiffy + 1) * cycles_per_jiffy;
142 write_c0_count(expirelo - cycles_per_jiffy);
143 write_c0_compare(expirelo); 123 write_c0_compare(expirelo);
144 write_c0_count(count);
145} 124}
146 125
147int (*mips_timer_state)(void); 126int (*mips_timer_state)(void);
148void (*mips_timer_ack)(void); 127void (*mips_timer_ack)(void);
149unsigned int (*mips_hpt_read)(void);
150void (*mips_hpt_init)(unsigned int);
151
152/*
153 * Gettimeoffset routines. These routines returns the time duration
154 * since last timer interrupt in usecs.
155 *
156 * If the exact CPU counter frequency is known, use fixed_rate_gettimeoffset.
157 * Otherwise use calibrate_gettimeoffset()
158 *
159 * If the CPU does not have the counter register, you can either supply
160 * your own gettimeoffset() routine, or use null_gettimeoffset(), which
161 * gives the same resolution as HZ.
162 */
163
164static unsigned long null_gettimeoffset(void)
165{
166 return 0;
167}
168
169
170/* The function pointer to one of the gettimeoffset funcs. */
171unsigned long (*do_gettimeoffset)(void) = null_gettimeoffset;
172
173
174static unsigned long fixed_rate_gettimeoffset(void)
175{
176 u32 count;
177 unsigned long res;
178
179 /* Get last timer tick in absolute kernel time */
180 count = mips_hpt_read();
181
182 /* .. relative to previous jiffy (32 bits is enough) */
183 count -= timerlo;
184
185 __asm__("multu %1,%2"
186 : "=h" (res)
187 : "r" (count), "r" (sll32_usecs_per_cycle)
188 : "lo", GCC_REG_ACCUM);
189
190 /*
191 * Due to possible jiffies inconsistencies, we need to check
192 * the result so that we'll get a timer that is monotonic.
193 */
194 if (res >= USECS_PER_JIFFY)
195 res = USECS_PER_JIFFY - 1;
196
197 return res;
198}
199
200
201/*
202 * Cached "1/(clocks per usec) * 2^32" value.
203 * It has to be recalculated once each jiffy.
204 */
205static unsigned long cached_quotient;
206
207/* Last jiffy when calibrate_divXX_gettimeoffset() was called. */
208static unsigned long last_jiffies;
209
210/*
211 * This is moved from dec/time.c:do_ioasic_gettimeoffset() by Maciej.
212 */
213static unsigned long calibrate_div32_gettimeoffset(void)
214{
215 u32 count;
216 unsigned long res, tmp;
217 unsigned long quotient;
218
219 tmp = jiffies;
220
221 quotient = cached_quotient;
222
223 if (last_jiffies != tmp) {
224 last_jiffies = tmp;
225 if (last_jiffies != 0) {
226 unsigned long r0;
227 do_div64_32(r0, timerhi, timerlo, tmp);
228 do_div64_32(quotient, USECS_PER_JIFFY,
229 USECS_PER_JIFFY_FRAC, r0);
230 cached_quotient = quotient;
231 }
232 }
233
234 /* Get last timer tick in absolute kernel time */
235 count = mips_hpt_read();
236
237 /* .. relative to previous jiffy (32 bits is enough) */
238 count -= timerlo;
239
240 __asm__("multu %1,%2"
241 : "=h" (res)
242 : "r" (count), "r" (quotient)
243 : "lo", GCC_REG_ACCUM);
244
245 /*
246 * Due to possible jiffies inconsistencies, we need to check
247 * the result so that we'll get a timer that is monotonic.
248 */
249 if (res >= USECS_PER_JIFFY)
250 res = USECS_PER_JIFFY - 1;
251
252 return res;
253}
254
255static unsigned long calibrate_div64_gettimeoffset(void)
256{
257 u32 count;
258 unsigned long res, tmp;
259 unsigned long quotient;
260
261 tmp = jiffies;
262
263 quotient = cached_quotient;
264
265 if (last_jiffies != tmp) {
266 last_jiffies = tmp;
267 if (last_jiffies) {
268 unsigned long r0;
269 __asm__(".set push\n\t"
270 ".set mips3\n\t"
271 "lwu %0,%3\n\t"
272 "dsll32 %1,%2,0\n\t"
273 "or %1,%1,%0\n\t"
274 "ddivu $0,%1,%4\n\t"
275 "mflo %1\n\t"
276 "dsll32 %0,%5,0\n\t"
277 "or %0,%0,%6\n\t"
278 "ddivu $0,%0,%1\n\t"
279 "mflo %0\n\t"
280 ".set pop"
281 : "=&r" (quotient), "=&r" (r0)
282 : "r" (timerhi), "m" (timerlo),
283 "r" (tmp), "r" (USECS_PER_JIFFY),
284 "r" (USECS_PER_JIFFY_FRAC)
285 : "hi", "lo", GCC_REG_ACCUM);
286 cached_quotient = quotient;
287 }
288 }
289
290 /* Get last timer tick in absolute kernel time */
291 count = mips_hpt_read();
292
293 /* .. relative to previous jiffy (32 bits is enough) */
294 count -= timerlo;
295
296 __asm__("multu %1,%2"
297 : "=h" (res)
298 : "r" (count), "r" (quotient)
299 : "lo", GCC_REG_ACCUM);
300
301 /*
302 * Due to possible jiffies inconsistencies, we need to check
303 * the result so that we'll get a timer that is monotonic.
304 */
305 if (res >= USECS_PER_JIFFY)
306 res = USECS_PER_JIFFY - 1;
307
308 return res;
309}
310
311 128
312/* last time when xtime and rtc are sync'ed up */ 129/* last time when xtime and rtc are sync'ed up */
313static long last_rtc_update; 130static long last_rtc_update;
@@ -334,18 +151,10 @@ void local_timer_interrupt(int irq, void *dev_id)
334 */ 151 */
335irqreturn_t timer_interrupt(int irq, void *dev_id) 152irqreturn_t timer_interrupt(int irq, void *dev_id)
336{ 153{
337 unsigned long j;
338 unsigned int count;
339
340 write_seqlock(&xtime_lock); 154 write_seqlock(&xtime_lock);
341 155
342 count = mips_hpt_read();
343 mips_timer_ack(); 156 mips_timer_ack();
344 157
345 /* Update timerhi/timerlo for intra-jiffy calibration. */
346 timerhi += count < timerlo; /* Wrap around */
347 timerlo = count;
348
349 /* 158 /*
350 * call the generic timer interrupt handling 159 * call the generic timer interrupt handling
351 */ 160 */
@@ -368,47 +177,6 @@ irqreturn_t timer_interrupt(int irq, void *dev_id)
368 } 177 }
369 } 178 }
370 179
371 /*
372 * If jiffies has overflown in this timer_interrupt, we must
373 * update the timer[hi]/[lo] to make fast gettimeoffset funcs
374 * quotient calc still valid. -arca
375 *
376 * The first timer interrupt comes late as interrupts are
377 * enabled long after timers are initialized. Therefore the
378 * high precision timer is fast, leading to wrong gettimeoffset()
379 * calculations. We deal with it by setting it based on the
380 * number of its ticks between the second and the third interrupt.
381 * That is still somewhat imprecise, but it's a good estimate.
382 * --macro
383 */
384 j = jiffies;
385 if (j < 4) {
386 static unsigned int prev_count;
387 static int hpt_initialized;
388
389 switch (j) {
390 case 0:
391 timerhi = timerlo = 0;
392 mips_hpt_init(count);
393 break;
394 case 2:
395 prev_count = count;
396 break;
397 case 3:
398 if (!hpt_initialized) {
399 unsigned int c3 = 3 * (count - prev_count);
400
401 timerhi = 0;
402 timerlo = c3;
403 mips_hpt_init(count - c3);
404 hpt_initialized = 1;
405 }
406 break;
407 default:
408 break;
409 }
410 }
411
412 write_sequnlock(&xtime_lock); 180 write_sequnlock(&xtime_lock);
413 181
414 /* 182 /*
@@ -476,12 +244,11 @@ asmlinkage void ll_local_timer_interrupt(int irq)
476 * 1) board_time_init() - 244 * 1) board_time_init() -
477 * a) (optional) set up RTC routines, 245 * a) (optional) set up RTC routines,
478 * b) (optional) calibrate and set the mips_hpt_frequency 246 * b) (optional) calibrate and set the mips_hpt_frequency
479 * (only needed if you intended to use fixed_rate_gettimeoffset 247 * (only needed if you intended to use cpu counter as timer interrupt
480 * or use cpu counter as timer interrupt source) 248 * source)
481 * 2) setup xtime based on rtc_mips_get_time(). 249 * 2) setup xtime based on rtc_mips_get_time().
482 * 3) choose a appropriate gettimeoffset routine. 250 * 3) calculate a couple of cached variables for later usage
483 * 4) calculate a couple of cached variables for later usage 251 * 4) plat_timer_setup() -
484 * 5) plat_timer_setup() -
485 * a) (optional) over-write any choices made above by time_init(). 252 * a) (optional) over-write any choices made above by time_init().
486 * b) machine specific code should setup the timer irqaction. 253 * b) machine specific code should setup the timer irqaction.
487 * c) enable the timer interrupt 254 * c) enable the timer interrupt
@@ -499,8 +266,7 @@ static struct irqaction timer_irqaction = {
499 266
500static unsigned int __init calibrate_hpt(void) 267static unsigned int __init calibrate_hpt(void)
501{ 268{
502 u64 frequency; 269 cycle_t frequency, hpt_start, hpt_end, hpt_count, hz;
503 u32 hpt_start, hpt_end, hpt_count, hz;
504 270
505 const int loops = HZ / 10; 271 const int loops = HZ / 10;
506 int log_2_loops = 0; 272 int log_2_loops = 0;
@@ -526,20 +292,49 @@ static unsigned int __init calibrate_hpt(void)
526 * during the calculated number of periods between timer 292 * during the calculated number of periods between timer
527 * interrupts. 293 * interrupts.
528 */ 294 */
529 hpt_start = mips_hpt_read(); 295 hpt_start = clocksource_mips.read();
530 do { 296 do {
531 while (mips_timer_state()); 297 while (mips_timer_state());
532 while (!mips_timer_state()); 298 while (!mips_timer_state());
533 } while (--i); 299 } while (--i);
534 hpt_end = mips_hpt_read(); 300 hpt_end = clocksource_mips.read();
535 301
536 hpt_count = hpt_end - hpt_start; 302 hpt_count = (hpt_end - hpt_start) & clocksource_mips.mask;
537 hz = HZ; 303 hz = HZ;
538 frequency = (u64)hpt_count * (u64)hz; 304 frequency = hpt_count * hz;
539 305
540 return frequency >> log_2_loops; 306 return frequency >> log_2_loops;
541} 307}
542 308
309struct clocksource clocksource_mips = {
310 .name = "MIPS",
311 .mask = 0xffffffff,
312 .is_continuous = 1,
313};
314
315static void __init init_mips_clocksource(void)
316{
317 u64 temp;
318 u32 shift;
319
320 if (!mips_hpt_frequency || clocksource_mips.read == null_hpt_read)
321 return;
322
323 /* Calclate a somewhat reasonable rating value */
324 clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
325 /* Find a shift value */
326 for (shift = 32; shift > 0; shift--) {
327 temp = (u64) NSEC_PER_SEC << shift;
328 do_div(temp, mips_hpt_frequency);
329 if ((temp >> 32) == 0)
330 break;
331 }
332 clocksource_mips.shift = shift;
333 clocksource_mips.mult = (u32)temp;
334
335 clocksource_register(&clocksource_mips);
336}
337
543void __init time_init(void) 338void __init time_init(void)
544{ 339{
545 if (board_time_init) 340 if (board_time_init)
@@ -555,59 +350,36 @@ void __init time_init(void)
555 -xtime.tv_sec, -xtime.tv_nsec); 350 -xtime.tv_sec, -xtime.tv_nsec);
556 351
557 /* Choose appropriate high precision timer routines. */ 352 /* Choose appropriate high precision timer routines. */
558 if (!cpu_has_counter && !mips_hpt_read) { 353 if (!cpu_has_counter && !clocksource_mips.read)
559 /* No high precision timer -- sorry. */ 354 /* No high precision timer -- sorry. */
560 mips_hpt_read = null_hpt_read; 355 clocksource_mips.read = null_hpt_read;
561 mips_hpt_init = null_hpt_init; 356 else if (!mips_hpt_frequency && !mips_timer_state) {
562 } else if (!mips_hpt_frequency && !mips_timer_state) {
563 /* A high precision timer of unknown frequency. */ 357 /* A high precision timer of unknown frequency. */
564 if (!mips_hpt_read) { 358 if (!clocksource_mips.read)
565 /* No external high precision timer -- use R4k. */ 359 /* No external high precision timer -- use R4k. */
566 mips_hpt_read = c0_hpt_read; 360 clocksource_mips.read = c0_hpt_read;
567 mips_hpt_init = c0_hpt_init;
568 }
569
570 if (cpu_has_mips32r1 || cpu_has_mips32r2 ||
571 (current_cpu_data.isa_level == MIPS_CPU_ISA_I) ||
572 (current_cpu_data.isa_level == MIPS_CPU_ISA_II))
573 /*
574 * We need to calibrate the counter but we don't have
575 * 64-bit division.
576 */
577 do_gettimeoffset = calibrate_div32_gettimeoffset;
578 else
579 /*
580 * We need to calibrate the counter but we *do* have
581 * 64-bit division.
582 */
583 do_gettimeoffset = calibrate_div64_gettimeoffset;
584 } else { 361 } else {
585 /* We know counter frequency. Or we can get it. */ 362 /* We know counter frequency. Or we can get it. */
586 if (!mips_hpt_read) { 363 if (!clocksource_mips.read) {
587 /* No external high precision timer -- use R4k. */ 364 /* No external high precision timer -- use R4k. */
588 mips_hpt_read = c0_hpt_read; 365 clocksource_mips.read = c0_hpt_read;
589 366
590 if (mips_timer_state) 367 if (!mips_timer_state) {
591 mips_hpt_init = c0_hpt_init;
592 else {
593 /* No external timer interrupt -- use R4k. */ 368 /* No external timer interrupt -- use R4k. */
594 mips_hpt_init = c0_hpt_timer_init;
595 mips_timer_ack = c0_timer_ack; 369 mips_timer_ack = c0_timer_ack;
370 /* Calculate cache parameters. */
371 cycles_per_jiffy =
372 (mips_hpt_frequency + HZ / 2) / HZ;
373 /*
374 * This sets up the high precision
375 * timer for the first interrupt.
376 */
377 c0_hpt_timer_init();
596 } 378 }
597 } 379 }
598 if (!mips_hpt_frequency) 380 if (!mips_hpt_frequency)
599 mips_hpt_frequency = calibrate_hpt(); 381 mips_hpt_frequency = calibrate_hpt();
600 382
601 do_gettimeoffset = fixed_rate_gettimeoffset;
602
603 /* Calculate cache parameters. */
604 cycles_per_jiffy = (mips_hpt_frequency + HZ / 2) / HZ;
605
606 /* sll32_usecs_per_cycle = 10^6 * 2^32 / mips_counter_freq */
607 do_div64_32(sll32_usecs_per_cycle,
608 1000000, mips_hpt_frequency / 2,
609 mips_hpt_frequency);
610
611 /* Report the high precision timer rate for a reference. */ 383 /* Report the high precision timer rate for a reference. */
612 printk("Using %u.%03u MHz high precision timer.\n", 384 printk("Using %u.%03u MHz high precision timer.\n",
613 ((mips_hpt_frequency + 500) / 1000) / 1000, 385 ((mips_hpt_frequency + 500) / 1000) / 1000,
@@ -618,9 +390,6 @@ void __init time_init(void)
618 /* No timer interrupt ack (e.g. i8254). */ 390 /* No timer interrupt ack (e.g. i8254). */
619 mips_timer_ack = null_timer_ack; 391 mips_timer_ack = null_timer_ack;
620 392
621 /* This sets up the high precision timer for the first interrupt. */
622 mips_hpt_init(mips_hpt_read());
623
624 /* 393 /*
625 * Call board specific timer interrupt setup. 394 * Call board specific timer interrupt setup.
626 * 395 *
@@ -633,6 +402,8 @@ void __init time_init(void)
633 * is not invoked accidentally. 402 * is not invoked accidentally.
634 */ 403 */
635 plat_timer_setup(&timer_irqaction); 404 plat_timer_setup(&timer_irqaction);
405
406 init_mips_clocksource();
636} 407}
637 408
638#define FEBRUARY 2 409#define FEBRUARY 2
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
new file mode 100644
index 000000000000..660e44ed44d7
--- /dev/null
+++ b/arch/mips/kernel/topology.c
@@ -0,0 +1,29 @@
1#include <linux/cpu.h>
2#include <linux/cpumask.h>
3#include <linux/init.h>
4#include <linux/node.h>
5#include <linux/nodemask.h>
6#include <linux/percpu.h>
7
8static DEFINE_PER_CPU(struct cpu, cpu_devices);
9
10static int __init topology_init(void)
11{
12 int i, ret;
13
14#ifdef CONFIG_NUMA
15 for_each_online_node(i)
16 register_one_node(i);
17#endif /* CONFIG_NUMA */
18
19 for_each_present_cpu(i) {
20 ret = register_cpu(&per_cpu(cpu_devices, i), i);
21 if (ret)
22 printk(KERN_WARNING "topology_init: register_cpu %d "
23 "failed (%d)\n", i, ret);
24 }
25
26 return 0;
27}
28
29subsys_initcall(topology_init);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cce8313ec27d..2a932cada244 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -54,6 +54,8 @@ extern asmlinkage void handle_dbe(void);
54extern asmlinkage void handle_sys(void); 54extern asmlinkage void handle_sys(void);
55extern asmlinkage void handle_bp(void); 55extern asmlinkage void handle_bp(void);
56extern asmlinkage void handle_ri(void); 56extern asmlinkage void handle_ri(void);
57extern asmlinkage void handle_ri_rdhwr_vivt(void);
58extern asmlinkage void handle_ri_rdhwr(void);
57extern asmlinkage void handle_cpu(void); 59extern asmlinkage void handle_cpu(void);
58extern asmlinkage void handle_ov(void); 60extern asmlinkage void handle_ov(void);
59extern asmlinkage void handle_tr(void); 61extern asmlinkage void handle_tr(void);
@@ -397,19 +399,6 @@ asmlinkage void do_be(struct pt_regs *regs)
397 force_sig(SIGBUS, current); 399 force_sig(SIGBUS, current);
398} 400}
399 401
400static inline int get_insn_opcode(struct pt_regs *regs, unsigned int *opcode)
401{
402 unsigned int __user *epc;
403
404 epc = (unsigned int __user *) regs->cp0_epc +
405 ((regs->cp0_cause & CAUSEF_BD) != 0);
406 if (!get_user(*opcode, epc))
407 return 0;
408
409 force_sig(SIGSEGV, current);
410 return 1;
411}
412
413/* 402/*
414 * ll/sc emulation 403 * ll/sc emulation
415 */ 404 */
@@ -544,8 +533,8 @@ static inline int simulate_llsc(struct pt_regs *regs)
544{ 533{
545 unsigned int opcode; 534 unsigned int opcode;
546 535
547 if (unlikely(get_insn_opcode(regs, &opcode))) 536 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
548 return -EFAULT; 537 goto out_sigsegv;
549 538
550 if ((opcode & OPCODE) == LL) { 539 if ((opcode & OPCODE) == LL) {
551 simulate_ll(regs, opcode); 540 simulate_ll(regs, opcode);
@@ -557,6 +546,10 @@ static inline int simulate_llsc(struct pt_regs *regs)
557 } 546 }
558 547
559 return -EFAULT; /* Strange things going on ... */ 548 return -EFAULT; /* Strange things going on ... */
549
550out_sigsegv:
551 force_sig(SIGSEGV, current);
552 return -EFAULT;
560} 553}
561 554
562/* 555/*
@@ -569,8 +562,8 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
569 struct thread_info *ti = task_thread_info(current); 562 struct thread_info *ti = task_thread_info(current);
570 unsigned int opcode; 563 unsigned int opcode;
571 564
572 if (unlikely(get_insn_opcode(regs, &opcode))) 565 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
573 return -EFAULT; 566 goto out_sigsegv;
574 567
575 if (unlikely(compute_return_epc(regs))) 568 if (unlikely(compute_return_epc(regs)))
576 return -EFAULT; 569 return -EFAULT;
@@ -589,6 +582,10 @@ static inline int simulate_rdhwr(struct pt_regs *regs)
589 582
590 /* Not ours. */ 583 /* Not ours. */
591 return -EFAULT; 584 return -EFAULT;
585
586out_sigsegv:
587 force_sig(SIGSEGV, current);
588 return -EFAULT;
592} 589}
593 590
594asmlinkage void do_ov(struct pt_regs *regs) 591asmlinkage void do_ov(struct pt_regs *regs)
@@ -672,10 +669,8 @@ asmlinkage void do_bp(struct pt_regs *regs)
672 unsigned int opcode, bcode; 669 unsigned int opcode, bcode;
673 siginfo_t info; 670 siginfo_t info;
674 671
675 die_if_kernel("Break instruction in kernel code", regs); 672 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
676 673 goto out_sigsegv;
677 if (get_insn_opcode(regs, &opcode))
678 return;
679 674
680 /* 675 /*
681 * There is the ancient bug in the MIPS assemblers that the break 676 * There is the ancient bug in the MIPS assemblers that the break
@@ -696,6 +691,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
696 switch (bcode) { 691 switch (bcode) {
697 case BRK_OVERFLOW << 10: 692 case BRK_OVERFLOW << 10:
698 case BRK_DIVZERO << 10: 693 case BRK_DIVZERO << 10:
694 die_if_kernel("Break instruction in kernel code", regs);
699 if (bcode == (BRK_DIVZERO << 10)) 695 if (bcode == (BRK_DIVZERO << 10))
700 info.si_code = FPE_INTDIV; 696 info.si_code = FPE_INTDIV;
701 else 697 else
@@ -705,9 +701,16 @@ asmlinkage void do_bp(struct pt_regs *regs)
705 info.si_addr = (void __user *) regs->cp0_epc; 701 info.si_addr = (void __user *) regs->cp0_epc;
706 force_sig_info(SIGFPE, &info, current); 702 force_sig_info(SIGFPE, &info, current);
707 break; 703 break;
704 case BRK_BUG:
705 die("Kernel bug detected", regs);
706 break;
708 default: 707 default:
708 die_if_kernel("Break instruction in kernel code", regs);
709 force_sig(SIGTRAP, current); 709 force_sig(SIGTRAP, current);
710 } 710 }
711
712out_sigsegv:
713 force_sig(SIGSEGV, current);
711} 714}
712 715
713asmlinkage void do_tr(struct pt_regs *regs) 716asmlinkage void do_tr(struct pt_regs *regs)
@@ -715,10 +718,8 @@ asmlinkage void do_tr(struct pt_regs *regs)
715 unsigned int opcode, tcode = 0; 718 unsigned int opcode, tcode = 0;
716 siginfo_t info; 719 siginfo_t info;
717 720
718 die_if_kernel("Trap instruction in kernel code", regs); 721 if (get_user(opcode, (unsigned int __user *) exception_epc(regs)))
719 722 goto out_sigsegv;
720 if (get_insn_opcode(regs, &opcode))
721 return;
722 723
723 /* Immediate versions don't provide a code. */ 724 /* Immediate versions don't provide a code. */
724 if (!(opcode & OPCODE)) 725 if (!(opcode & OPCODE))
@@ -733,6 +734,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
733 switch (tcode) { 734 switch (tcode) {
734 case BRK_OVERFLOW: 735 case BRK_OVERFLOW:
735 case BRK_DIVZERO: 736 case BRK_DIVZERO:
737 die_if_kernel("Trap instruction in kernel code", regs);
736 if (tcode == BRK_DIVZERO) 738 if (tcode == BRK_DIVZERO)
737 info.si_code = FPE_INTDIV; 739 info.si_code = FPE_INTDIV;
738 else 740 else
@@ -742,9 +744,16 @@ asmlinkage void do_tr(struct pt_regs *regs)
742 info.si_addr = (void __user *) regs->cp0_epc; 744 info.si_addr = (void __user *) regs->cp0_epc;
743 force_sig_info(SIGFPE, &info, current); 745 force_sig_info(SIGFPE, &info, current);
744 break; 746 break;
747 case BRK_BUG:
748 die("Kernel bug detected", regs);
749 break;
745 default: 750 default:
751 die_if_kernel("Trap instruction in kernel code", regs);
746 force_sig(SIGTRAP, current); 752 force_sig(SIGTRAP, current);
747 } 753 }
754
755out_sigsegv:
756 force_sig(SIGSEGV, current);
748} 757}
749 758
750asmlinkage void do_ri(struct pt_regs *regs) 759asmlinkage void do_ri(struct pt_regs *regs)
@@ -1111,7 +1120,7 @@ static struct shadow_registers {
1111static void mips_srs_init(void) 1120static void mips_srs_init(void)
1112{ 1121{
1113 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1; 1122 shadow_registers.sr_supported = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
1114 printk(KERN_INFO "%d MIPSR2 register sets available\n", 1123 printk(KERN_INFO "%ld MIPSR2 register sets available\n",
1115 shadow_registers.sr_supported); 1124 shadow_registers.sr_supported);
1116 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */ 1125 shadow_registers.sr_allocated = 1; /* Set 0 used by kernel */
1117} 1126}
@@ -1423,6 +1432,15 @@ void __init set_uncached_handler (unsigned long offset, void *addr, unsigned lon
1423 memcpy((void *)(uncached_ebase + offset), addr, size); 1432 memcpy((void *)(uncached_ebase + offset), addr, size);
1424} 1433}
1425 1434
1435static int __initdata rdhwr_noopt;
1436static int __init set_rdhwr_noopt(char *str)
1437{
1438 rdhwr_noopt = 1;
1439 return 1;
1440}
1441
1442__setup("rdhwr_noopt", set_rdhwr_noopt);
1443
1426void __init trap_init(void) 1444void __init trap_init(void)
1427{ 1445{
1428 extern char except_vec3_generic, except_vec3_r4000; 1446 extern char except_vec3_generic, except_vec3_r4000;
@@ -1502,7 +1520,9 @@ void __init trap_init(void)
1502 1520
1503 set_except_vector(8, handle_sys); 1521 set_except_vector(8, handle_sys);
1504 set_except_vector(9, handle_bp); 1522 set_except_vector(9, handle_bp);
1505 set_except_vector(10, handle_ri); 1523 set_except_vector(10, rdhwr_noopt ? handle_ri :
1524 (cpu_has_vtag_icache ?
1525 handle_ri_rdhwr_vivt : handle_ri_rdhwr));
1506 set_except_vector(11, handle_cpu); 1526 set_except_vector(11, handle_cpu);
1507 set_except_vector(12, handle_ov); 1527 set_except_vector(12, handle_ov);
1508 set_except_vector(13, handle_tr); 1528 set_except_vector(13, handle_tr);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 0bb9cd889456..cecff24cc972 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -50,6 +50,16 @@ SECTIONS
50 /* writeable */ 50 /* writeable */
51 .data : { /* Data */ 51 .data : { /* Data */
52 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ 52 . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
53 /*
54 * This ALIGN is needed as a workaround for a bug a gcc bug upto 4.1 which
55 * limits the maximum alignment to at most 32kB and results in the following
56 * warning:
57 *
58 * CC arch/mips/kernel/init_task.o
59 * arch/mips/kernel/init_task.c:30: warning: alignment of ‘init_thread_union’
60 * is greater than maximum object file alignment. Using 32768
61 */
62 . = ALIGN(_PAGE_SIZE);
53 *(.data.init_task) 63 *(.data.init_task)
54 64
55 *(.data) 65 *(.data)
@@ -91,13 +101,7 @@ SECTIONS
91 101
92 __initcall_start = .; 102 __initcall_start = .;
93 .initcall.init : { 103 .initcall.init : {
94 *(.initcall1.init) 104 INITCALLS
95 *(.initcall2.init)
96 *(.initcall3.init)
97 *(.initcall4.init)
98 *(.initcall5.init)
99 *(.initcall6.init)
100 *(.initcall7.init)
101 } 105 }
102 __initcall_end = .; 106 __initcall_end = .;
103 107
@@ -108,6 +112,7 @@ SECTIONS
108 /* .exit.text is discarded at runtime, not link time, to deal with 112 /* .exit.text is discarded at runtime, not link time, to deal with
109 references from .rodata */ 113 references from .rodata */
110 .exit.text : { *(.exit.text) } 114 .exit.text : { *(.exit.text) }
115 .exit.data : { *(.exit.data) }
111 . = ALIGN(_PAGE_SIZE); 116 . = ALIGN(_PAGE_SIZE);
112 __initramfs_start = .; 117 __initramfs_start = .;
113 .init.ramfs : { *(.init.ramfs) } 118 .init.ramfs : { *(.init.ramfs) }
@@ -135,7 +140,6 @@ SECTIONS
135 140
136 /* Sections to be discarded */ 141 /* Sections to be discarded */
137 /DISCARD/ : { 142 /DISCARD/ : {
138 *(.exit.data)
139 *(.exitcall.exit) 143 *(.exitcall.exit)
140 144
141 /* ABI crap starts here */ 145 /* ABI crap starts here */
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 51ddd2166898..666bef484dcb 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -1179,7 +1179,7 @@ static ssize_t vpe_write(struct file *file, const char __user * buffer,
1179 size_t ret = count; 1179 size_t ret = count;
1180 struct vpe *v; 1180 struct vpe *v;
1181 1181
1182 minor = iminor(file->f_dentry->d_inode); 1182 minor = iminor(file->f_path.dentry->d_inode);
1183 if ((v = get_vpe(minor)) == NULL) 1183 if ((v = get_vpe(minor)) == NULL)
1184 return -ENODEV; 1184 return -ENODEV;
1185 1185