aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:15 -0400
committerJiri Kosina <jkosina@suse.cz>2011-04-26 04:22:59 -0400
commit07f9479a40cc778bc1462ada11f95b01360ae4ff (patch)
tree0676cf38df3844004bb3ebfd99dfa67a4a8998f5 /arch/mips/kernel
parent9d5e6bdb3013acfb311ab407eeca0b6a6a3dedbf (diff)
parentcd2e49e90f1cae7726c9a2c54488d881d7f1cd1c (diff)
Merge branch 'master' into for-next
Fast-forwarded to current state of Linus' tree as there are patches to be applied for files that didn't exist on the old branch.
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/cpu-bugs64.c2
-rw-r--r--arch/mips/kernel/i8259.c43
-rw-r--r--arch/mips/kernel/irq-gic.c45
-rw-r--r--arch/mips/kernel/irq-gt641xx.c30
-rw-r--r--arch/mips/kernel/irq-msc01.c63
-rw-r--r--arch/mips/kernel/irq-rm7000.c20
-rw-r--r--arch/mips/kernel/irq-rm9000.c53
-rw-r--r--arch/mips/kernel/irq.c51
-rw-r--r--arch/mips/kernel/irq_cpu.c50
-rw-r--r--arch/mips/kernel/irq_txx9.c32
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2
-rw-r--r--arch/mips/kernel/process.c2
-rw-r--r--arch/mips/kernel/scall32-o32.S4
-rw-r--r--arch/mips/kernel/scall64-64.S4
-rw-r--r--arch/mips/kernel/scall64-n32.S4
-rw-r--r--arch/mips/kernel/scall64-o32.S4
-rw-r--r--arch/mips/kernel/smp-mt.c2
-rw-r--r--arch/mips/kernel/smtc.c15
-rw-r--r--arch/mips/kernel/time.c2
-rw-r--r--arch/mips/kernel/vpe.c2
20 files changed, 195 insertions, 235 deletions
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index b8bb8ba60869..f305ca14351b 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -73,7 +73,7 @@ static inline void mult_sh_align_mod(long *v1, long *v2, long *w,
73 : "0" (5), "1" (8), "2" (5)); 73 : "0" (5), "1" (8), "2" (5));
74 align_mod(align, mod); 74 align_mod(align, mod);
75 /* 75 /*
76 * The trailing nop is needed to fullfill the two-instruction 76 * The trailing nop is needed to fulfill the two-instruction
77 * requirement between reading hi/lo and staring a mult/div. 77 * requirement between reading hi/lo and staring a mult/div.
78 * Leaving it out may cause gas insert a nop itself breaking 78 * Leaving it out may cause gas insert a nop itself breaking
79 * the desired alignment of the next chunk. 79 * the desired alignment of the next chunk.
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index c58176cc796b..c018696765d4 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -31,19 +31,19 @@
31 31
32static int i8259A_auto_eoi = -1; 32static int i8259A_auto_eoi = -1;
33DEFINE_RAW_SPINLOCK(i8259A_lock); 33DEFINE_RAW_SPINLOCK(i8259A_lock);
34static void disable_8259A_irq(unsigned int irq); 34static void disable_8259A_irq(struct irq_data *d);
35static void enable_8259A_irq(unsigned int irq); 35static void enable_8259A_irq(struct irq_data *d);
36static void mask_and_ack_8259A(unsigned int irq); 36static void mask_and_ack_8259A(struct irq_data *d);
37static void init_8259A(int auto_eoi); 37static void init_8259A(int auto_eoi);
38 38
39static struct irq_chip i8259A_chip = { 39static struct irq_chip i8259A_chip = {
40 .name = "XT-PIC", 40 .name = "XT-PIC",
41 .mask = disable_8259A_irq, 41 .irq_mask = disable_8259A_irq,
42 .disable = disable_8259A_irq, 42 .irq_disable = disable_8259A_irq,
43 .unmask = enable_8259A_irq, 43 .irq_unmask = enable_8259A_irq,
44 .mask_ack = mask_and_ack_8259A, 44 .irq_mask_ack = mask_and_ack_8259A,
45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF 45#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
46 .set_affinity = plat_set_irq_affinity, 46 .irq_set_affinity = plat_set_irq_affinity,
47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 47#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
48}; 48};
49 49
@@ -59,12 +59,11 @@ static unsigned int cached_irq_mask = 0xffff;
59#define cached_master_mask (cached_irq_mask) 59#define cached_master_mask (cached_irq_mask)
60#define cached_slave_mask (cached_irq_mask >> 8) 60#define cached_slave_mask (cached_irq_mask >> 8)
61 61
62static void disable_8259A_irq(unsigned int irq) 62static void disable_8259A_irq(struct irq_data *d)
63{ 63{
64 unsigned int mask; 64 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
65 unsigned long flags; 65 unsigned long flags;
66 66
67 irq -= I8259A_IRQ_BASE;
68 mask = 1 << irq; 67 mask = 1 << irq;
69 raw_spin_lock_irqsave(&i8259A_lock, flags); 68 raw_spin_lock_irqsave(&i8259A_lock, flags);
70 cached_irq_mask |= mask; 69 cached_irq_mask |= mask;
@@ -75,12 +74,11 @@ static void disable_8259A_irq(unsigned int irq)
75 raw_spin_unlock_irqrestore(&i8259A_lock, flags); 74 raw_spin_unlock_irqrestore(&i8259A_lock, flags);
76} 75}
77 76
78static void enable_8259A_irq(unsigned int irq) 77static void enable_8259A_irq(struct irq_data *d)
79{ 78{
80 unsigned int mask; 79 unsigned int mask, irq = d->irq - I8259A_IRQ_BASE;
81 unsigned long flags; 80 unsigned long flags;
82 81
83 irq -= I8259A_IRQ_BASE;
84 mask = ~(1 << irq); 82 mask = ~(1 << irq);
85 raw_spin_lock_irqsave(&i8259A_lock, flags); 83 raw_spin_lock_irqsave(&i8259A_lock, flags);
86 cached_irq_mask &= mask; 84 cached_irq_mask &= mask;
@@ -112,7 +110,7 @@ int i8259A_irq_pending(unsigned int irq)
112void make_8259A_irq(unsigned int irq) 110void make_8259A_irq(unsigned int irq)
113{ 111{
114 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
115 set_irq_chip_and_handler(irq, &i8259A_chip, handle_level_irq); 113 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
116 enable_irq(irq); 114 enable_irq(irq);
117} 115}
118 116
@@ -145,12 +143,11 @@ static inline int i8259A_irq_real(unsigned int irq)
145 * first, _then_ send the EOI, and the order of EOI 143 * first, _then_ send the EOI, and the order of EOI
146 * to the two 8259s is important! 144 * to the two 8259s is important!
147 */ 145 */
148static void mask_and_ack_8259A(unsigned int irq) 146static void mask_and_ack_8259A(struct irq_data *d)
149{ 147{
150 unsigned int irqmask; 148 unsigned int irqmask, irq = d->irq - I8259A_IRQ_BASE;
151 unsigned long flags; 149 unsigned long flags;
152 150
153 irq -= I8259A_IRQ_BASE;
154 irqmask = 1 << irq; 151 irqmask = 1 << irq;
155 raw_spin_lock_irqsave(&i8259A_lock, flags); 152 raw_spin_lock_irqsave(&i8259A_lock, flags);
156 /* 153 /*
@@ -290,9 +287,9 @@ static void init_8259A(int auto_eoi)
290 * In AEOI mode we just have to mask the interrupt 287 * In AEOI mode we just have to mask the interrupt
291 * when acking. 288 * when acking.
292 */ 289 */
293 i8259A_chip.mask_ack = disable_8259A_irq; 290 i8259A_chip.irq_mask_ack = disable_8259A_irq;
294 else 291 else
295 i8259A_chip.mask_ack = mask_and_ack_8259A; 292 i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
296 293
297 udelay(100); /* wait for 8259A to initialize */ 294 udelay(100); /* wait for 8259A to initialize */
298 295
@@ -339,8 +336,8 @@ void __init init_i8259_irqs(void)
339 init_8259A(0); 336 init_8259A(0);
340 337
341 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { 338 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) {
342 set_irq_chip_and_handler(i, &i8259A_chip, handle_level_irq); 339 irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq);
343 set_irq_probe(i); 340 irq_set_probe(i);
344 } 341 }
345 342
346 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); 343 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
index 1774271af848..0c527f652196 100644
--- a/arch/mips/kernel/irq-gic.c
+++ b/arch/mips/kernel/irq-gic.c
@@ -87,17 +87,10 @@ unsigned int gic_get_int(void)
87 return i; 87 return i;
88} 88}
89 89
90static unsigned int gic_irq_startup(unsigned int irq) 90static void gic_irq_ack(struct irq_data *d)
91{ 91{
92 irq -= _irqbase; 92 unsigned int irq = d->irq - _irqbase;
93 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
94 GIC_SET_INTR_MASK(irq);
95 return 0;
96}
97 93
98static void gic_irq_ack(unsigned int irq)
99{
100 irq -= _irqbase;
101 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 94 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
102 GIC_CLR_INTR_MASK(irq); 95 GIC_CLR_INTR_MASK(irq);
103 96
@@ -105,16 +98,16 @@ static void gic_irq_ack(unsigned int irq)
105 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq); 98 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
106} 99}
107 100
108static void gic_mask_irq(unsigned int irq) 101static void gic_mask_irq(struct irq_data *d)
109{ 102{
110 irq -= _irqbase; 103 unsigned int irq = d->irq - _irqbase;
111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 104 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
112 GIC_CLR_INTR_MASK(irq); 105 GIC_CLR_INTR_MASK(irq);
113} 106}
114 107
115static void gic_unmask_irq(unsigned int irq) 108static void gic_unmask_irq(struct irq_data *d)
116{ 109{
117 irq -= _irqbase; 110 unsigned int irq = d->irq - _irqbase;
118 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq); 111 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
119 GIC_SET_INTR_MASK(irq); 112 GIC_SET_INTR_MASK(irq);
120} 113}
@@ -123,13 +116,14 @@ static void gic_unmask_irq(unsigned int irq)
123 116
124static DEFINE_SPINLOCK(gic_lock); 117static DEFINE_SPINLOCK(gic_lock);
125 118
126static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask) 119static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
120 bool force)
127{ 121{
122 unsigned int irq = d->irq - _irqbase;
128 cpumask_t tmp = CPU_MASK_NONE; 123 cpumask_t tmp = CPU_MASK_NONE;
129 unsigned long flags; 124 unsigned long flags;
130 int i; 125 int i;
131 126
132 irq -= _irqbase;
133 pr_debug("%s(%d) called\n", __func__, irq); 127 pr_debug("%s(%d) called\n", __func__, irq);
134 cpumask_and(&tmp, cpumask, cpu_online_mask); 128 cpumask_and(&tmp, cpumask, cpu_online_mask);
135 if (cpus_empty(tmp)) 129 if (cpus_empty(tmp))
@@ -147,23 +141,22 @@ static int gic_set_affinity(unsigned int irq, const struct cpumask *cpumask)
147 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask); 141 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
148 142
149 } 143 }
150 cpumask_copy(irq_desc[irq].affinity, cpumask); 144 cpumask_copy(d->affinity, cpumask);
151 spin_unlock_irqrestore(&gic_lock, flags); 145 spin_unlock_irqrestore(&gic_lock, flags);
152 146
153 return 0; 147 return IRQ_SET_MASK_OK_NOCOPY;
154} 148}
155#endif 149#endif
156 150
157static struct irq_chip gic_irq_controller = { 151static struct irq_chip gic_irq_controller = {
158 .name = "MIPS GIC", 152 .name = "MIPS GIC",
159 .startup = gic_irq_startup, 153 .irq_ack = gic_irq_ack,
160 .ack = gic_irq_ack, 154 .irq_mask = gic_mask_irq,
161 .mask = gic_mask_irq, 155 .irq_mask_ack = gic_mask_irq,
162 .mask_ack = gic_mask_irq, 156 .irq_unmask = gic_unmask_irq,
163 .unmask = gic_unmask_irq, 157 .irq_eoi = gic_unmask_irq,
164 .eoi = gic_unmask_irq,
165#ifdef CONFIG_SMP 158#ifdef CONFIG_SMP
166 .set_affinity = gic_set_affinity, 159 .irq_set_affinity = gic_set_affinity,
167#endif 160#endif
168}; 161};
169 162
@@ -236,7 +229,7 @@ static void __init gic_basic_init(int numintrs, int numvpes,
236 vpe_local_setup(numvpes); 229 vpe_local_setup(numvpes);
237 230
238 for (i = _irqbase; i < (_irqbase + numintrs); i++) 231 for (i = _irqbase; i < (_irqbase + numintrs); i++)
239 set_irq_chip(i, &gic_irq_controller); 232 irq_set_chip(i, &gic_irq_controller);
240} 233}
241 234
242void __init gic_init(unsigned long gic_base_addr, 235void __init gic_init(unsigned long gic_base_addr,
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
index 42ef81461bfc..883fc6cead36 100644
--- a/arch/mips/kernel/irq-gt641xx.c
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -29,64 +29,64 @@
29 29
30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); 30static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
31 31
32static void ack_gt641xx_irq(unsigned int irq) 32static void ack_gt641xx_irq(struct irq_data *d)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 u32 cause; 35 u32 cause;
36 36
37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 37 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
38 cause = GT_READ(GT_INTRCAUSE_OFS); 38 cause = GT_READ(GT_INTRCAUSE_OFS);
39 cause &= ~GT641XX_IRQ_TO_BIT(irq); 39 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
40 GT_WRITE(GT_INTRCAUSE_OFS, cause); 40 GT_WRITE(GT_INTRCAUSE_OFS, cause);
41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 41 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
42} 42}
43 43
44static void mask_gt641xx_irq(unsigned int irq) 44static void mask_gt641xx_irq(struct irq_data *d)
45{ 45{
46 unsigned long flags; 46 unsigned long flags;
47 u32 mask; 47 u32 mask;
48 48
49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 49 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
50 mask = GT_READ(GT_INTRMASK_OFS); 50 mask = GT_READ(GT_INTRMASK_OFS);
51 mask &= ~GT641XX_IRQ_TO_BIT(irq); 51 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
52 GT_WRITE(GT_INTRMASK_OFS, mask); 52 GT_WRITE(GT_INTRMASK_OFS, mask);
53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 53 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
54} 54}
55 55
56static void mask_ack_gt641xx_irq(unsigned int irq) 56static void mask_ack_gt641xx_irq(struct irq_data *d)
57{ 57{
58 unsigned long flags; 58 unsigned long flags;
59 u32 cause, mask; 59 u32 cause, mask;
60 60
61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 61 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
62 mask = GT_READ(GT_INTRMASK_OFS); 62 mask = GT_READ(GT_INTRMASK_OFS);
63 mask &= ~GT641XX_IRQ_TO_BIT(irq); 63 mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
64 GT_WRITE(GT_INTRMASK_OFS, mask); 64 GT_WRITE(GT_INTRMASK_OFS, mask);
65 65
66 cause = GT_READ(GT_INTRCAUSE_OFS); 66 cause = GT_READ(GT_INTRCAUSE_OFS);
67 cause &= ~GT641XX_IRQ_TO_BIT(irq); 67 cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
68 GT_WRITE(GT_INTRCAUSE_OFS, cause); 68 GT_WRITE(GT_INTRCAUSE_OFS, cause);
69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 69 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
70} 70}
71 71
72static void unmask_gt641xx_irq(unsigned int irq) 72static void unmask_gt641xx_irq(struct irq_data *d)
73{ 73{
74 unsigned long flags; 74 unsigned long flags;
75 u32 mask; 75 u32 mask;
76 76
77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags); 77 raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
78 mask = GT_READ(GT_INTRMASK_OFS); 78 mask = GT_READ(GT_INTRMASK_OFS);
79 mask |= GT641XX_IRQ_TO_BIT(irq); 79 mask |= GT641XX_IRQ_TO_BIT(d->irq);
80 GT_WRITE(GT_INTRMASK_OFS, mask); 80 GT_WRITE(GT_INTRMASK_OFS, mask);
81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags); 81 raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
82} 82}
83 83
84static struct irq_chip gt641xx_irq_chip = { 84static struct irq_chip gt641xx_irq_chip = {
85 .name = "GT641xx", 85 .name = "GT641xx",
86 .ack = ack_gt641xx_irq, 86 .irq_ack = ack_gt641xx_irq,
87 .mask = mask_gt641xx_irq, 87 .irq_mask = mask_gt641xx_irq,
88 .mask_ack = mask_ack_gt641xx_irq, 88 .irq_mask_ack = mask_ack_gt641xx_irq,
89 .unmask = unmask_gt641xx_irq, 89 .irq_unmask = unmask_gt641xx_irq,
90}; 90};
91 91
92void gt641xx_irq_dispatch(void) 92void gt641xx_irq_dispatch(void)
@@ -126,6 +126,6 @@ void __init gt641xx_irq_init(void)
126 * bit31: logical or of bits[25:1]. 126 * bit31: logical or of bits[25:1].
127 */ 127 */
128 for (i = 1; i < 30; i++) 128 for (i = 1; i < 30; i++)
129 set_irq_chip_and_handler(GT641XX_IRQ_BASE + i, 129 irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
130 &gt641xx_irq_chip, handle_level_irq); 130 &gt641xx_irq_chip, handle_level_irq);
131} 131}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 6a8cd28133d5..0c6afeed89d2 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -28,8 +28,10 @@ static unsigned long _icctrl_msc;
28static unsigned int irq_base; 28static unsigned int irq_base;
29 29
30/* mask off an interrupt */ 30/* mask off an interrupt */
31static inline void mask_msc_irq(unsigned int irq) 31static inline void mask_msc_irq(struct irq_data *d)
32{ 32{
33 unsigned int irq = d->irq;
34
33 if (irq < (irq_base + 32)) 35 if (irq < (irq_base + 32))
34 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); 36 MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
35 else 37 else
@@ -37,8 +39,10 @@ static inline void mask_msc_irq(unsigned int irq)
37} 39}
38 40
39/* unmask an interrupt */ 41/* unmask an interrupt */
40static inline void unmask_msc_irq(unsigned int irq) 42static inline void unmask_msc_irq(struct irq_data *d)
41{ 43{
44 unsigned int irq = d->irq;
45
42 if (irq < (irq_base + 32)) 46 if (irq < (irq_base + 32))
43 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); 47 MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
44 else 48 else
@@ -48,9 +52,11 @@ static inline void unmask_msc_irq(unsigned int irq)
48/* 52/*
49 * Masks and ACKs an IRQ 53 * Masks and ACKs an IRQ
50 */ 54 */
51static void level_mask_and_ack_msc_irq(unsigned int irq) 55static void level_mask_and_ack_msc_irq(struct irq_data *d)
52{ 56{
53 mask_msc_irq(irq); 57 unsigned int irq = d->irq;
58
59 mask_msc_irq(d);
54 if (!cpu_has_veic) 60 if (!cpu_has_veic)
55 MSCIC_WRITE(MSC01_IC_EOI, 0); 61 MSCIC_WRITE(MSC01_IC_EOI, 0);
56 /* This actually needs to be a call into platform code */ 62 /* This actually needs to be a call into platform code */
@@ -60,9 +66,11 @@ static void level_mask_and_ack_msc_irq(unsigned int irq)
60/* 66/*
61 * Masks and ACKs an IRQ 67 * Masks and ACKs an IRQ
62 */ 68 */
63static void edge_mask_and_ack_msc_irq(unsigned int irq) 69static void edge_mask_and_ack_msc_irq(struct irq_data *d)
64{ 70{
65 mask_msc_irq(irq); 71 unsigned int irq = d->irq;
72
73 mask_msc_irq(d);
66 if (!cpu_has_veic) 74 if (!cpu_has_veic)
67 MSCIC_WRITE(MSC01_IC_EOI, 0); 75 MSCIC_WRITE(MSC01_IC_EOI, 0);
68 else { 76 else {
@@ -75,15 +83,6 @@ static void edge_mask_and_ack_msc_irq(unsigned int irq)
75} 83}
76 84
77/* 85/*
78 * End IRQ processing
79 */
80static void end_msc_irq(unsigned int irq)
81{
82 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
83 unmask_msc_irq(irq);
84}
85
86/*
87 * Interrupt handler for interrupts coming from SOC-it. 86 * Interrupt handler for interrupts coming from SOC-it.
88 */ 87 */
89void ll_msc_irq(void) 88void ll_msc_irq(void)
@@ -107,22 +106,20 @@ static void msc_bind_eic_interrupt(int irq, int set)
107 106
108static struct irq_chip msc_levelirq_type = { 107static struct irq_chip msc_levelirq_type = {
109 .name = "SOC-it-Level", 108 .name = "SOC-it-Level",
110 .ack = level_mask_and_ack_msc_irq, 109 .irq_ack = level_mask_and_ack_msc_irq,
111 .mask = mask_msc_irq, 110 .irq_mask = mask_msc_irq,
112 .mask_ack = level_mask_and_ack_msc_irq, 111 .irq_mask_ack = level_mask_and_ack_msc_irq,
113 .unmask = unmask_msc_irq, 112 .irq_unmask = unmask_msc_irq,
114 .eoi = unmask_msc_irq, 113 .irq_eoi = unmask_msc_irq,
115 .end = end_msc_irq,
116}; 114};
117 115
118static struct irq_chip msc_edgeirq_type = { 116static struct irq_chip msc_edgeirq_type = {
119 .name = "SOC-it-Edge", 117 .name = "SOC-it-Edge",
120 .ack = edge_mask_and_ack_msc_irq, 118 .irq_ack = edge_mask_and_ack_msc_irq,
121 .mask = mask_msc_irq, 119 .irq_mask = mask_msc_irq,
122 .mask_ack = edge_mask_and_ack_msc_irq, 120 .irq_mask_ack = edge_mask_and_ack_msc_irq,
123 .unmask = unmask_msc_irq, 121 .irq_unmask = unmask_msc_irq,
124 .eoi = unmask_msc_irq, 122 .irq_eoi = unmask_msc_irq,
125 .end = end_msc_irq,
126}; 123};
127 124
128 125
@@ -140,16 +137,20 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
140 137
141 switch (imp->im_type) { 138 switch (imp->im_type) {
142 case MSC01_IRQ_EDGE: 139 case MSC01_IRQ_EDGE:
143 set_irq_chip_and_handler_name(irqbase + n, 140 irq_set_chip_and_handler_name(irqbase + n,
144 &msc_edgeirq_type, handle_edge_irq, "edge"); 141 &msc_edgeirq_type,
142 handle_edge_irq,
143 "edge");
145 if (cpu_has_veic) 144 if (cpu_has_veic)
146 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); 145 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
147 else 146 else
148 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); 147 MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
149 break; 148 break;
150 case MSC01_IRQ_LEVEL: 149 case MSC01_IRQ_LEVEL:
151 set_irq_chip_and_handler_name(irqbase+n, 150 irq_set_chip_and_handler_name(irqbase + n,
152 &msc_levelirq_type, handle_level_irq, "level"); 151 &msc_levelirq_type,
152 handle_level_irq,
153 "level");
153 if (cpu_has_veic) 154 if (cpu_has_veic)
154 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); 155 MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
155 else 156 else
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c
index 9731e8b47862..a8a8977d5887 100644
--- a/arch/mips/kernel/irq-rm7000.c
+++ b/arch/mips/kernel/irq-rm7000.c
@@ -18,23 +18,23 @@
18#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
19#include <asm/system.h> 19#include <asm/system.h>
20 20
21static inline void unmask_rm7k_irq(unsigned int irq) 21static inline void unmask_rm7k_irq(struct irq_data *d)
22{ 22{
23 set_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 23 set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
24} 24}
25 25
26static inline void mask_rm7k_irq(unsigned int irq) 26static inline void mask_rm7k_irq(struct irq_data *d)
27{ 27{
28 clear_c0_intcontrol(0x100 << (irq - RM7K_CPU_IRQ_BASE)); 28 clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE));
29} 29}
30 30
31static struct irq_chip rm7k_irq_controller = { 31static struct irq_chip rm7k_irq_controller = {
32 .name = "RM7000", 32 .name = "RM7000",
33 .ack = mask_rm7k_irq, 33 .irq_ack = mask_rm7k_irq,
34 .mask = mask_rm7k_irq, 34 .irq_mask = mask_rm7k_irq,
35 .mask_ack = mask_rm7k_irq, 35 .irq_mask_ack = mask_rm7k_irq,
36 .unmask = unmask_rm7k_irq, 36 .irq_unmask = unmask_rm7k_irq,
37 .eoi = unmask_rm7k_irq 37 .irq_eoi = unmask_rm7k_irq
38}; 38};
39 39
40void __init rm7k_cpu_irq_init(void) 40void __init rm7k_cpu_irq_init(void)
@@ -45,6 +45,6 @@ void __init rm7k_cpu_irq_init(void)
45 clear_c0_intcontrol(0x00000f00); /* Mask all */ 45 clear_c0_intcontrol(0x00000f00); /* Mask all */
46 46
47 for (i = base; i < base + 4; i++) 47 for (i = base; i < base + 4; i++)
48 set_irq_chip_and_handler(i, &rm7k_irq_controller, 48 irq_set_chip_and_handler(i, &rm7k_irq_controller,
49 handle_percpu_irq); 49 handle_percpu_irq);
50} 50}
diff --git a/arch/mips/kernel/irq-rm9000.c b/arch/mips/kernel/irq-rm9000.c
index b7e4025b58a8..38874a4b9255 100644
--- a/arch/mips/kernel/irq-rm9000.c
+++ b/arch/mips/kernel/irq-rm9000.c
@@ -19,22 +19,22 @@
19#include <asm/mipsregs.h> 19#include <asm/mipsregs.h>
20#include <asm/system.h> 20#include <asm/system.h>
21 21
22static inline void unmask_rm9k_irq(unsigned int irq) 22static inline void unmask_rm9k_irq(struct irq_data *d)
23{ 23{
24 set_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 24 set_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
25} 25}
26 26
27static inline void mask_rm9k_irq(unsigned int irq) 27static inline void mask_rm9k_irq(struct irq_data *d)
28{ 28{
29 clear_c0_intcontrol(0x1000 << (irq - RM9K_CPU_IRQ_BASE)); 29 clear_c0_intcontrol(0x1000 << (d->irq - RM9K_CPU_IRQ_BASE));
30} 30}
31 31
32static inline void rm9k_cpu_irq_enable(unsigned int irq) 32static inline void rm9k_cpu_irq_enable(struct irq_data *d)
33{ 33{
34 unsigned long flags; 34 unsigned long flags;
35 35
36 local_irq_save(flags); 36 local_irq_save(flags);
37 unmask_rm9k_irq(irq); 37 unmask_rm9k_irq(d);
38 local_irq_restore(flags); 38 local_irq_restore(flags);
39} 39}
40 40
@@ -43,50 +43,47 @@ static inline void rm9k_cpu_irq_enable(unsigned int irq)
43 */ 43 */
44static void local_rm9k_perfcounter_irq_startup(void *args) 44static void local_rm9k_perfcounter_irq_startup(void *args)
45{ 45{
46 unsigned int irq = (unsigned int) args; 46 rm9k_cpu_irq_enable(args);
47
48 rm9k_cpu_irq_enable(irq);
49} 47}
50 48
51static unsigned int rm9k_perfcounter_irq_startup(unsigned int irq) 49static unsigned int rm9k_perfcounter_irq_startup(struct irq_data *d)
52{ 50{
53 on_each_cpu(local_rm9k_perfcounter_irq_startup, (void *) irq, 1); 51 on_each_cpu(local_rm9k_perfcounter_irq_startup, d, 1);
54 52
55 return 0; 53 return 0;
56} 54}
57 55
58static void local_rm9k_perfcounter_irq_shutdown(void *args) 56static void local_rm9k_perfcounter_irq_shutdown(void *args)
59{ 57{
60 unsigned int irq = (unsigned int) args;
61 unsigned long flags; 58 unsigned long flags;
62 59
63 local_irq_save(flags); 60 local_irq_save(flags);
64 mask_rm9k_irq(irq); 61 mask_rm9k_irq(args);
65 local_irq_restore(flags); 62 local_irq_restore(flags);
66} 63}
67 64
68static void rm9k_perfcounter_irq_shutdown(unsigned int irq) 65static void rm9k_perfcounter_irq_shutdown(struct irq_data *d)
69{ 66{
70 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, (void *) irq, 1); 67 on_each_cpu(local_rm9k_perfcounter_irq_shutdown, d, 1);
71} 68}
72 69
73static struct irq_chip rm9k_irq_controller = { 70static struct irq_chip rm9k_irq_controller = {
74 .name = "RM9000", 71 .name = "RM9000",
75 .ack = mask_rm9k_irq, 72 .irq_ack = mask_rm9k_irq,
76 .mask = mask_rm9k_irq, 73 .irq_mask = mask_rm9k_irq,
77 .mask_ack = mask_rm9k_irq, 74 .irq_mask_ack = mask_rm9k_irq,
78 .unmask = unmask_rm9k_irq, 75 .irq_unmask = unmask_rm9k_irq,
79 .eoi = unmask_rm9k_irq 76 .irq_eoi = unmask_rm9k_irq
80}; 77};
81 78
82static struct irq_chip rm9k_perfcounter_irq = { 79static struct irq_chip rm9k_perfcounter_irq = {
83 .name = "RM9000", 80 .name = "RM9000",
84 .startup = rm9k_perfcounter_irq_startup, 81 .irq_startup = rm9k_perfcounter_irq_startup,
85 .shutdown = rm9k_perfcounter_irq_shutdown, 82 .irq_shutdown = rm9k_perfcounter_irq_shutdown,
86 .ack = mask_rm9k_irq, 83 .irq_ack = mask_rm9k_irq,
87 .mask = mask_rm9k_irq, 84 .irq_mask = mask_rm9k_irq,
88 .mask_ack = mask_rm9k_irq, 85 .irq_mask_ack = mask_rm9k_irq,
89 .unmask = unmask_rm9k_irq, 86 .irq_unmask = unmask_rm9k_irq,
90}; 87};
91 88
92unsigned int rm9000_perfcount_irq; 89unsigned int rm9000_perfcount_irq;
@@ -101,10 +98,10 @@ void __init rm9k_cpu_irq_init(void)
101 clear_c0_intcontrol(0x0000f000); /* Mask all */ 98 clear_c0_intcontrol(0x0000f000); /* Mask all */
102 99
103 for (i = base; i < base + 4; i++) 100 for (i = base; i < base + 4; i++)
104 set_irq_chip_and_handler(i, &rm9k_irq_controller, 101 irq_set_chip_and_handler(i, &rm9k_irq_controller,
105 handle_level_irq); 102 handle_level_irq);
106 103
107 rm9000_perfcount_irq = base + 1; 104 rm9000_perfcount_irq = base + 1;
108 set_irq_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq, 105 irq_set_chip_and_handler(rm9000_perfcount_irq, &rm9k_perfcounter_irq,
109 handle_percpu_irq); 106 handle_percpu_irq);
110} 107}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 4f93db58a79e..9b734d74ae8e 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -81,48 +81,9 @@ void ack_bad_irq(unsigned int irq)
81 81
82atomic_t irq_err_count; 82atomic_t irq_err_count;
83 83
84/* 84int arch_show_interrupts(struct seq_file *p, int prec)
85 * Generic, controller-independent functions:
86 */
87
88int show_interrupts(struct seq_file *p, void *v)
89{ 85{
90 int i = *(loff_t *) v, j; 86 seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
91 struct irqaction * action;
92 unsigned long flags;
93
94 if (i == 0) {
95 seq_printf(p, " ");
96 for_each_online_cpu(j)
97 seq_printf(p, "CPU%d ", j);
98 seq_putc(p, '\n');
99 }
100
101 if (i < NR_IRQS) {
102 raw_spin_lock_irqsave(&irq_desc[i].lock, flags);
103 action = irq_desc[i].action;
104 if (!action)
105 goto skip;
106 seq_printf(p, "%3d: ", i);
107#ifndef CONFIG_SMP
108 seq_printf(p, "%10u ", kstat_irqs(i));
109#else
110 for_each_online_cpu(j)
111 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
112#endif
113 seq_printf(p, " %14s", irq_desc[i].chip->name);
114 seq_printf(p, " %s", action->name);
115
116 for (action=action->next; action; action = action->next)
117 seq_printf(p, ", %s", action->name);
118
119 seq_putc(p, '\n');
120skip:
121 raw_spin_unlock_irqrestore(&irq_desc[i].lock, flags);
122 } else if (i == NR_IRQS) {
123 seq_putc(p, '\n');
124 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
125 }
126 return 0; 87 return 0;
127} 88}
128 89
@@ -141,7 +102,7 @@ void __init init_IRQ(void)
141#endif 102#endif
142 103
143 for (i = 0; i < NR_IRQS; i++) 104 for (i = 0; i < NR_IRQS; i++)
144 set_irq_noprobe(i); 105 irq_set_noprobe(i);
145 106
146 arch_init_irq(); 107 arch_init_irq();
147 108
@@ -183,8 +144,8 @@ void __irq_entry do_IRQ(unsigned int irq)
183{ 144{
184 irq_enter(); 145 irq_enter();
185 check_stack_overflow(); 146 check_stack_overflow();
186 __DO_IRQ_SMTC_HOOK(irq); 147 if (!smtc_handle_on_other_cpu(irq))
187 generic_handle_irq(irq); 148 generic_handle_irq(irq);
188 irq_exit(); 149 irq_exit();
189} 150}
190 151
@@ -197,7 +158,7 @@ void __irq_entry do_IRQ(unsigned int irq)
197void __irq_entry do_IRQ_no_affinity(unsigned int irq) 158void __irq_entry do_IRQ_no_affinity(unsigned int irq)
198{ 159{
199 irq_enter(); 160 irq_enter();
200 __NO_AFFINITY_IRQ_SMTC_HOOK(irq); 161 smtc_im_backstop(irq);
201 generic_handle_irq(irq); 162 generic_handle_irq(irq);
202 irq_exit(); 163 irq_exit();
203} 164}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index 0262abe09121..6e71b284f6c9 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -37,42 +37,38 @@
37#include <asm/mipsmtregs.h> 37#include <asm/mipsmtregs.h>
38#include <asm/system.h> 38#include <asm/system.h>
39 39
40static inline void unmask_mips_irq(unsigned int irq) 40static inline void unmask_mips_irq(struct irq_data *d)
41{ 41{
42 set_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 42 set_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
43 irq_enable_hazard(); 43 irq_enable_hazard();
44} 44}
45 45
46static inline void mask_mips_irq(unsigned int irq) 46static inline void mask_mips_irq(struct irq_data *d)
47{ 47{
48 clear_c0_status(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 48 clear_c0_status(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
49 irq_disable_hazard(); 49 irq_disable_hazard();
50} 50}
51 51
52static struct irq_chip mips_cpu_irq_controller = { 52static struct irq_chip mips_cpu_irq_controller = {
53 .name = "MIPS", 53 .name = "MIPS",
54 .ack = mask_mips_irq, 54 .irq_ack = mask_mips_irq,
55 .mask = mask_mips_irq, 55 .irq_mask = mask_mips_irq,
56 .mask_ack = mask_mips_irq, 56 .irq_mask_ack = mask_mips_irq,
57 .unmask = unmask_mips_irq, 57 .irq_unmask = unmask_mips_irq,
58 .eoi = unmask_mips_irq, 58 .irq_eoi = unmask_mips_irq,
59}; 59};
60 60
61/* 61/*
62 * Basically the same as above but taking care of all the MT stuff 62 * Basically the same as above but taking care of all the MT stuff
63 */ 63 */
64 64
65#define unmask_mips_mt_irq unmask_mips_irq 65static unsigned int mips_mt_cpu_irq_startup(struct irq_data *d)
66#define mask_mips_mt_irq mask_mips_irq
67
68static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
69{ 66{
70 unsigned int vpflags = dvpe(); 67 unsigned int vpflags = dvpe();
71 68
72 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 69 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
73 evpe(vpflags); 70 evpe(vpflags);
74 unmask_mips_mt_irq(irq); 71 unmask_mips_irq(d);
75
76 return 0; 72 return 0;
77} 73}
78 74
@@ -80,22 +76,22 @@ static unsigned int mips_mt_cpu_irq_startup(unsigned int irq)
80 * While we ack the interrupt interrupts are disabled and thus we don't need 76 * While we ack the interrupt interrupts are disabled and thus we don't need
81 * to deal with concurrency issues. Same for mips_cpu_irq_end. 77 * to deal with concurrency issues. Same for mips_cpu_irq_end.
82 */ 78 */
83static void mips_mt_cpu_irq_ack(unsigned int irq) 79static void mips_mt_cpu_irq_ack(struct irq_data *d)
84{ 80{
85 unsigned int vpflags = dvpe(); 81 unsigned int vpflags = dvpe();
86 clear_c0_cause(0x100 << (irq - MIPS_CPU_IRQ_BASE)); 82 clear_c0_cause(0x100 << (d->irq - MIPS_CPU_IRQ_BASE));
87 evpe(vpflags); 83 evpe(vpflags);
88 mask_mips_mt_irq(irq); 84 mask_mips_irq(d);
89} 85}
90 86
91static struct irq_chip mips_mt_cpu_irq_controller = { 87static struct irq_chip mips_mt_cpu_irq_controller = {
92 .name = "MIPS", 88 .name = "MIPS",
93 .startup = mips_mt_cpu_irq_startup, 89 .irq_startup = mips_mt_cpu_irq_startup,
94 .ack = mips_mt_cpu_irq_ack, 90 .irq_ack = mips_mt_cpu_irq_ack,
95 .mask = mask_mips_mt_irq, 91 .irq_mask = mask_mips_irq,
96 .mask_ack = mips_mt_cpu_irq_ack, 92 .irq_mask_ack = mips_mt_cpu_irq_ack,
97 .unmask = unmask_mips_mt_irq, 93 .irq_unmask = unmask_mips_irq,
98 .eoi = unmask_mips_mt_irq, 94 .irq_eoi = unmask_mips_irq,
99}; 95};
100 96
101void __init mips_cpu_irq_init(void) 97void __init mips_cpu_irq_init(void)
@@ -113,10 +109,10 @@ void __init mips_cpu_irq_init(void)
113 */ 109 */
114 if (cpu_has_mipsmt) 110 if (cpu_has_mipsmt)
115 for (i = irq_base; i < irq_base + 2; i++) 111 for (i = irq_base; i < irq_base + 2; i++)
116 set_irq_chip_and_handler(i, &mips_mt_cpu_irq_controller, 112 irq_set_chip_and_handler(i, &mips_mt_cpu_irq_controller,
117 handle_percpu_irq); 113 handle_percpu_irq);
118 114
119 for (i = irq_base + 2; i < irq_base + 8; i++) 115 for (i = irq_base + 2; i < irq_base + 8; i++)
120 set_irq_chip_and_handler(i, &mips_cpu_irq_controller, 116 irq_set_chip_and_handler(i, &mips_cpu_irq_controller,
121 handle_percpu_irq); 117 handle_percpu_irq);
122} 118}
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
index 95a96f69172d..b0c55b50218e 100644
--- a/arch/mips/kernel/irq_txx9.c
+++ b/arch/mips/kernel/irq_txx9.c
@@ -63,9 +63,9 @@ static struct {
63 unsigned char mode; 63 unsigned char mode;
64} txx9irq[TXx9_MAX_IR] __read_mostly; 64} txx9irq[TXx9_MAX_IR] __read_mostly;
65 65
66static void txx9_irq_unmask(unsigned int irq) 66static void txx9_irq_unmask(struct irq_data *d)
67{ 67{
68 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 68 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
69 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; 69 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
70 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 70 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
71 71
@@ -79,9 +79,9 @@ static void txx9_irq_unmask(unsigned int irq)
79#endif 79#endif
80} 80}
81 81
82static inline void txx9_irq_mask(unsigned int irq) 82static inline void txx9_irq_mask(struct irq_data *d)
83{ 83{
84 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 84 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
85 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; 85 u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
86 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; 86 int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
87 87
@@ -99,19 +99,19 @@ static inline void txx9_irq_mask(unsigned int irq)
99#endif 99#endif
100} 100}
101 101
102static void txx9_irq_mask_ack(unsigned int irq) 102static void txx9_irq_mask_ack(struct irq_data *d)
103{ 103{
104 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 104 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
105 105
106 txx9_irq_mask(irq); 106 txx9_irq_mask(d);
107 /* clear edge detection */ 107 /* clear edge detection */
108 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) 108 if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
109 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); 109 __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
110} 110}
111 111
112static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type) 112static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
113{ 113{
114 unsigned int irq_nr = irq - TXX9_IRQ_BASE; 114 unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
115 u32 cr; 115 u32 cr;
116 u32 __iomem *crp; 116 u32 __iomem *crp;
117 int ofs; 117 int ofs;
@@ -139,11 +139,11 @@ static int txx9_irq_set_type(unsigned int irq, unsigned int flow_type)
139 139
140static struct irq_chip txx9_irq_chip = { 140static struct irq_chip txx9_irq_chip = {
141 .name = "TXX9", 141 .name = "TXX9",
142 .ack = txx9_irq_mask_ack, 142 .irq_ack = txx9_irq_mask_ack,
143 .mask = txx9_irq_mask, 143 .irq_mask = txx9_irq_mask,
144 .mask_ack = txx9_irq_mask_ack, 144 .irq_mask_ack = txx9_irq_mask_ack,
145 .unmask = txx9_irq_unmask, 145 .irq_unmask = txx9_irq_unmask,
146 .set_type = txx9_irq_set_type, 146 .irq_set_type = txx9_irq_set_type,
147}; 147};
148 148
149void __init txx9_irq_init(unsigned long baseaddr) 149void __init txx9_irq_init(unsigned long baseaddr)
@@ -154,8 +154,8 @@ void __init txx9_irq_init(unsigned long baseaddr)
154 for (i = 0; i < TXx9_MAX_IR; i++) { 154 for (i = 0; i < TXx9_MAX_IR; i++) {
155 txx9irq[i].level = 4; /* middle level */ 155 txx9irq[i].level = 4; /* middle level */
156 txx9irq[i].mode = TXx9_IRCR_LOW; 156 txx9irq[i].mode = TXx9_IRCR_LOW;
157 set_irq_chip_and_handler(TXX9_IRQ_BASE + i, 157 irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
158 &txx9_irq_chip, handle_level_irq); 158 handle_level_irq);
159 } 159 }
160 160
161 /* mask all IRC interrupts */ 161 /* mask all IRC interrupts */
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d9a7db78ed62..75266ff4cc33 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -721,7 +721,7 @@ static void mipsxx_pmu_start(void)
721 721
722/* 722/*
723 * MIPS performance counters can be per-TC. The control registers can 723 * MIPS performance counters can be per-TC. The control registers can
724 * not be directly accessed accross CPUs. Hence if we want to do global 724 * not be directly accessed across CPUs. Hence if we want to do global
725 * control, we need cross CPU calls. on_each_cpu() can help us, but we 725 * control, we need cross CPU calls. on_each_cpu() can help us, but we
726 * can not make sure this function is called with interrupts enabled. So 726 * can not make sure this function is called with interrupts enabled. So
727 * here we pause local counters and then grab a rwlock and leave the 727 * here we pause local counters and then grab a rwlock and leave the
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index ae167df73ddd..d2112d3cf115 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -410,7 +410,7 @@ unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
410 if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) 410 if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
411 return 0; 411 return 0;
412 /* 412 /*
413 * Return ra if an exception occured at the first instruction 413 * Return ra if an exception occurred at the first instruction
414 */ 414 */
415 if (unlikely(ofs == 0)) { 415 if (unlikely(ofs == 0)) {
416 pc = *ra; 416 pc = *ra;
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index fbaabad0e6e2..7f5468b38d4c 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -586,6 +586,10 @@ einval: li v0, -ENOSYS
586 sys sys_fanotify_init 2 586 sys sys_fanotify_init 2
587 sys sys_fanotify_mark 6 587 sys sys_fanotify_mark 6
588 sys sys_prlimit64 4 588 sys sys_prlimit64 4
589 sys sys_name_to_handle_at 5
590 sys sys_open_by_handle_at 3 /* 4340 */
591 sys sys_clock_adjtime 2
592 sys sys_syncfs 1
589 .endm 593 .endm
590 594
591 /* We pre-compute the number of _instruction_ bytes needed to 595 /* We pre-compute the number of _instruction_ bytes needed to
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 3f4179283207..a2e1fcbc41dc 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -425,4 +425,8 @@ sys_call_table:
425 PTR sys_fanotify_init /* 5295 */ 425 PTR sys_fanotify_init /* 5295 */
426 PTR sys_fanotify_mark 426 PTR sys_fanotify_mark
427 PTR sys_prlimit64 427 PTR sys_prlimit64
428 PTR sys_name_to_handle_at
429 PTR sys_open_by_handle_at
430 PTR sys_clock_adjtime /* 5300 */
431 PTR sys_syncfs
428 .size sys_call_table,.-sys_call_table 432 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index f08ece6d8acc..b2c7624995b8 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -425,4 +425,8 @@ EXPORT(sysn32_call_table)
425 PTR sys_fanotify_init /* 6300 */ 425 PTR sys_fanotify_init /* 6300 */
426 PTR sys_fanotify_mark 426 PTR sys_fanotify_mark
427 PTR sys_prlimit64 427 PTR sys_prlimit64
428 PTR sys_name_to_handle_at
429 PTR sys_open_by_handle_at
430 PTR compat_sys_clock_adjtime /* 6305 */
431 PTR sys_syncfs
428 .size sysn32_call_table,.-sysn32_call_table 432 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 78d768a3e19d..049a9c8c49a0 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -543,4 +543,8 @@ sys_call_table:
543 PTR sys_fanotify_init 543 PTR sys_fanotify_init
544 PTR sys_32_fanotify_mark 544 PTR sys_32_fanotify_mark
545 PTR sys_prlimit64 545 PTR sys_prlimit64
546 PTR sys_name_to_handle_at
547 PTR compat_sys_open_by_handle_at /* 4340 */
548 PTR compat_sys_clock_adjtime
549 PTR sys_syncfs
546 .size sys_call_table,.-sys_call_table 550 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index c0e81418ba21..1ec56e635d04 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -120,7 +120,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
120 120
121 local_irq_save(flags); 121 local_irq_save(flags);
122 122
123 vpflags = dvpe(); /* cant access the other CPU's registers whilst MVPE enabled */ 123 vpflags = dvpe(); /* can't access the other CPU's registers whilst MVPE enabled */
124 124
125 switch (action) { 125 switch (action) {
126 case SMP_CALL_FUNCTION: 126 case SMP_CALL_FUNCTION:
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 39c08254b0f1..5a88cc4ccd5a 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -677,8 +677,9 @@ void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
677 */ 677 */
678} 678}
679 679
680void smtc_forward_irq(unsigned int irq) 680void smtc_forward_irq(struct irq_data *d)
681{ 681{
682 unsigned int irq = d->irq;
682 int target; 683 int target;
683 684
684 /* 685 /*
@@ -692,7 +693,7 @@ void smtc_forward_irq(unsigned int irq)
692 * and efficiency, we just pick the easiest one to find. 693 * and efficiency, we just pick the easiest one to find.
693 */ 694 */
694 695
695 target = cpumask_first(irq_desc[irq].affinity); 696 target = cpumask_first(d->affinity);
696 697
697 /* 698 /*
698 * We depend on the platform code to have correctly processed 699 * We depend on the platform code to have correctly processed
@@ -707,12 +708,10 @@ void smtc_forward_irq(unsigned int irq)
707 */ 708 */
708 709
709 /* If no one is eligible, service locally */ 710 /* If no one is eligible, service locally */
710 if (target >= NR_CPUS) { 711 if (target >= NR_CPUS)
711 do_IRQ_no_affinity(irq); 712 do_IRQ_no_affinity(irq);
712 return; 713 else
713 } 714 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
714
715 smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
716} 715}
717 716
718#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */ 717#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
@@ -1147,7 +1146,7 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1147 1146
1148 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ)); 1147 setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
1149 1148
1150 set_irq_handler(cpu_ipi_irq, handle_percpu_irq); 1149 irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
1151} 1150}
1152 1151
1153/* 1152/*
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index fb7497405510..1083ad4e1017 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -102,7 +102,7 @@ static __init int cpu_has_mfc0_count_bug(void)
102 case CPU_R4400SC: 102 case CPU_R4400SC:
103 case CPU_R4400MC: 103 case CPU_R4400MC:
104 /* 104 /*
105 * The published errata for the R4400 upto 3.0 say the CPU 105 * The published errata for the R4400 up to 3.0 say the CPU
106 * has the mfc0 from count bug. 106 * has the mfc0 from count bug.
107 */ 107 */
108 if ((current_cpu_data.processor_id & 0xff) <= 0x30) 108 if ((current_cpu_data.processor_id & 0xff) <= 0x30)
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index ab52b7cf3b6b..dbb6b408f001 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -19,7 +19,7 @@
19 * VPE support module 19 * VPE support module
20 * 20 *
21 * Provides support for loading a MIPS SP program on VPE1. 21 * Provides support for loading a MIPS SP program on VPE1.
22 * The SP enviroment is rather simple, no tlb's. It needs to be relocatable 22 * The SP environment is rather simple, no tlb's. It needs to be relocatable
23 * (or partially linked). You should initialise your stack in the startup 23 * (or partially linked). You should initialise your stack in the startup
24 * code. This loader looks for the symbol __start and sets up 24 * code. This loader looks for the symbol __start and sets up
25 * execution to resume from there. The MIPS SDE kit contains suitable examples. 25 * execution to resume from there. The MIPS SDE kit contains suitable examples.