aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 13:51:43 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2008-04-28 13:51:43 -0400
commite31a94ed371c70855eb30b77c490d6d85dd4da26 (patch)
tree58d9f1a75a22319f97731db8d9ac07b78a8d8aaf /arch/mips/kernel
parent9d9ad4b51d2b29b5bbeb4011f5e76f7538119cf9 (diff)
parentfcbd3b4b92efe29b59df16b910138cf43683be88 (diff)
Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus: (45 commits) [MIPS] Pb1200/DBAu1200: move platform code to its proper place [MIPS] Fix handling of trap and breakpoint instructions [MIPS] Pb1200: do register SMC 91C111 [MIPS] DBAu1200: fix bad SMC 91C111 resource size [NET] Kconfig: Rename MIKROTIK_RB500 -> MIKROTIK_RB532 [MIPS] IP27: Fix build bug due to missing include [MIPS] Fix some sparse warnings on traps.c and irq-msc01.c [MIPS] cevt-gt641xx: Kill unnecessary include [MIPS] DS1287: Add clockevent driver [MIPS] add DECstation I/O ASIC clocksource [MIPS] rbtx4938: minor cleanup [MIPS] Alchemy: kill unused PCI_IRQ_TABLE_LOOKUP macro [MIPS] rbtx4938: misc cleanups [MIPS] jmr3927: use generic txx9 gpio [MIPS] rbhma4500: use generic txx9 gpio [MIPS] generic txx9 gpio support [MIPS] make fallback gpio.h gpiolib-friendly [MIPS] unexport null_perf_irq() and make it static [MIPS] unexport rtc_mips_set_time() [MIPS] unexport copy_from_user_page() ...
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/Makefile8
-rw-r--r--arch/mips/kernel/cevt-ds1287.c129
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c2
-rw-r--r--arch/mips/kernel/cpu-probe.c21
-rw-r--r--arch/mips/kernel/csrc-ioasic.c65
-rw-r--r--arch/mips/kernel/gpio_txx9.c87
-rw-r--r--arch/mips/kernel/irq-gic.c295
-rw-r--r--arch/mips/kernel/irq-msc01.c10
-rw-r--r--arch/mips/kernel/signal-common.h2
-rw-r--r--arch/mips/kernel/smp-cmp.c265
-rw-r--r--arch/mips/kernel/smp-mt.c143
-rw-r--r--arch/mips/kernel/smp.c4
-rw-r--r--arch/mips/kernel/smtc.c11
-rw-r--r--arch/mips/kernel/spram.c221
-rw-r--r--arch/mips/kernel/sync-r4k.c159
-rw-r--r--arch/mips/kernel/time.c5
-rw-r--r--arch/mips/kernel/traps.c213
17 files changed, 1421 insertions, 219 deletions
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 6fcdb6fda2e2..45545be3eb86 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,12 +10,15 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
13obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
13obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 14obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
14obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 15obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
15obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o 16obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
16obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 17obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
18obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
17obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 19obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
18obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 20obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
21obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
19 22
20binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \ 23binfmt_irix-objs := irixelf.o irixinv.o irixioctl.o irixsig.o \
21 irix5sys.o sysirix.o 24 irix5sys.o sysirix.o
@@ -50,6 +53,8 @@ obj-$(CONFIG_MIPS_MT) += mips-mt.o
50obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o 53obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
51obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o 54obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
52obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o 55obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
56obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
57obj-$(CONFIG_CPU_MIPSR2) += spram.o
53 58
54obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o 59obj-$(CONFIG_MIPS_APSP_KSPD) += kspd.o
55obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o 60obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
@@ -62,6 +67,7 @@ obj-$(CONFIG_IRQ_CPU_RM9K) += irq-rm9000.o
62obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o 67obj-$(CONFIG_MIPS_BOARDS_GEN) += irq-msc01.o
63obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 68obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
64obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o 69obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
70obj-$(CONFIG_IRQ_GIC) += irq-gic.o
65 71
66obj-$(CONFIG_32BIT) += scall32-o32.o 72obj-$(CONFIG_32BIT) += scall32-o32.o
67obj-$(CONFIG_64BIT) += scall64-64.o 73obj-$(CONFIG_64BIT) += scall64-64.o
@@ -77,6 +83,8 @@ obj-$(CONFIG_64BIT) += cpu-bugs64.o
77 83
78obj-$(CONFIG_I8253) += i8253.o 84obj-$(CONFIG_I8253) += i8253.o
79 85
86obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
87
80obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 88obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
81obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 89obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
82 90
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 000000000000..df4acb68bfb5
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,129 @@
1/*
2 * DS1287 clockevent driver
3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/clockchips.h>
21#include <linux/init.h>
22#include <linux/interrupt.h>
23#include <linux/mc146818rtc.h>
24
25#include <asm/time.h>
26
27int ds1287_timer_state(void)
28{
29 return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
30}
31
32int ds1287_set_base_clock(unsigned int hz)
33{
34 u8 rate;
35
36 switch (hz) {
37 case 128:
38 rate = 0x9;
39 break;
40 case 256:
41 rate = 0x8;
42 break;
43 case 1024:
44 rate = 0x6;
45 break;
46 default:
47 return -EINVAL;
48 }
49
50 CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
51
52 return 0;
53}
54
55static int ds1287_set_next_event(unsigned long delta,
56 struct clock_event_device *evt)
57{
58 return -EINVAL;
59}
60
61static void ds1287_set_mode(enum clock_event_mode mode,
62 struct clock_event_device *evt)
63{
64 u8 val;
65
66 spin_lock(&rtc_lock);
67
68 val = CMOS_READ(RTC_REG_B);
69
70 switch (mode) {
71 case CLOCK_EVT_MODE_PERIODIC:
72 val |= RTC_PIE;
73 break;
74 default:
75 val &= ~RTC_PIE;
76 break;
77 }
78
79 CMOS_WRITE(val, RTC_REG_B);
80
81 spin_unlock(&rtc_lock);
82}
83
84static void ds1287_event_handler(struct clock_event_device *dev)
85{
86}
87
88static struct clock_event_device ds1287_clockevent = {
89 .name = "ds1287",
90 .features = CLOCK_EVT_FEAT_PERIODIC,
91 .cpumask = CPU_MASK_CPU0,
92 .set_next_event = ds1287_set_next_event,
93 .set_mode = ds1287_set_mode,
94 .event_handler = ds1287_event_handler,
95};
96
97static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
98{
99 struct clock_event_device *cd = &ds1287_clockevent;
100
101 /* Ack the RTC interrupt. */
102 CMOS_READ(RTC_REG_C);
103
104 cd->event_handler(cd);
105
106 return IRQ_HANDLED;
107}
108
109static struct irqaction ds1287_irqaction = {
110 .handler = ds1287_interrupt,
111 .flags = IRQF_DISABLED | IRQF_PERCPU,
112 .name = "ds1287",
113};
114
115int __init ds1287_clockevent_init(int irq)
116{
117 struct clock_event_device *cd;
118
119 cd = &ds1287_clockevent;
120 cd->rating = 100;
121 cd->irq = irq;
122 clockevent_set_clock(cd, 32768);
123 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
124 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
125
126 clockevents_register_device(&ds1287_clockevent);
127
128 return setup_irq(irq, &ds1287_irqaction);
129}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
index c36772631fe0..6e2f58520afb 100644
--- a/arch/mips/kernel/cevt-gt641xx.c
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -25,8 +25,6 @@
25#include <asm/gt64120.h> 25#include <asm/gt64120.h>
26#include <asm/time.h> 26#include <asm/time.h>
27 27
28#include <irq.h>
29
30static DEFINE_SPINLOCK(gt641xx_timer_lock); 28static DEFINE_SPINLOCK(gt641xx_timer_lock);
31static unsigned int gt641xx_base_clock; 29static unsigned int gt641xx_base_clock;
32 30
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 89c3304cb93c..335a6ae3d594 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -169,6 +169,7 @@ static inline void check_wait(void)
169 169
170 case CPU_24K: 170 case CPU_24K:
171 case CPU_34K: 171 case CPU_34K:
172 case CPU_1004K:
172 cpu_wait = r4k_wait; 173 cpu_wait = r4k_wait;
173 if (read_c0_config7() & MIPS_CONF7_WII) 174 if (read_c0_config7() & MIPS_CONF7_WII)
174 cpu_wait = r4k_wait_irqoff; 175 cpu_wait = r4k_wait_irqoff;
@@ -675,6 +676,12 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
675 return; 676 return;
676} 677}
677 678
679#ifdef CONFIG_CPU_MIPSR2
680extern void spram_config(void);
681#else
682static inline void spram_config(void) {}
683#endif
684
678static inline void cpu_probe_mips(struct cpuinfo_mips *c) 685static inline void cpu_probe_mips(struct cpuinfo_mips *c)
679{ 686{
680 decode_configs(c); 687 decode_configs(c);
@@ -711,7 +718,12 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c)
711 case PRID_IMP_74K: 718 case PRID_IMP_74K:
712 c->cputype = CPU_74K; 719 c->cputype = CPU_74K;
713 break; 720 break;
721 case PRID_IMP_1004K:
722 c->cputype = CPU_1004K;
723 break;
714 } 724 }
725
726 spram_config();
715} 727}
716 728
717static inline void cpu_probe_alchemy(struct cpuinfo_mips *c) 729static inline void cpu_probe_alchemy(struct cpuinfo_mips *c)
@@ -778,7 +790,7 @@ static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c)
778 } 790 }
779} 791}
780 792
781static inline void cpu_probe_philips(struct cpuinfo_mips *c) 793static inline void cpu_probe_nxp(struct cpuinfo_mips *c)
782{ 794{
783 decode_configs(c); 795 decode_configs(c);
784 switch (c->processor_id & 0xff00) { 796 switch (c->processor_id & 0xff00) {
@@ -787,7 +799,7 @@ static inline void cpu_probe_philips(struct cpuinfo_mips *c)
787 c->isa_level = MIPS_CPU_ISA_M32R1; 799 c->isa_level = MIPS_CPU_ISA_M32R1;
788 break; 800 break;
789 default: 801 default:
790 panic("Unknown Philips Core!"); /* REVISIT: die? */ 802 panic("Unknown NXP Core!"); /* REVISIT: die? */
791 break; 803 break;
792 } 804 }
793} 805}
@@ -876,6 +888,7 @@ static __cpuinit const char *cpu_to_name(struct cpuinfo_mips *c)
876 case CPU_24K: name = "MIPS 24K"; break; 888 case CPU_24K: name = "MIPS 24K"; break;
877 case CPU_25KF: name = "MIPS 25Kf"; break; 889 case CPU_25KF: name = "MIPS 25Kf"; break;
878 case CPU_34K: name = "MIPS 34K"; break; 890 case CPU_34K: name = "MIPS 34K"; break;
891 case CPU_1004K: name = "MIPS 1004K"; break;
879 case CPU_74K: name = "MIPS 74K"; break; 892 case CPU_74K: name = "MIPS 74K"; break;
880 case CPU_VR4111: name = "NEC VR4111"; break; 893 case CPU_VR4111: name = "NEC VR4111"; break;
881 case CPU_VR4121: name = "NEC VR4121"; break; 894 case CPU_VR4121: name = "NEC VR4121"; break;
@@ -925,8 +938,8 @@ __cpuinit void cpu_probe(void)
925 case PRID_COMP_SANDCRAFT: 938 case PRID_COMP_SANDCRAFT:
926 cpu_probe_sandcraft(c); 939 cpu_probe_sandcraft(c);
927 break; 940 break;
928 case PRID_COMP_PHILIPS: 941 case PRID_COMP_NXP:
929 cpu_probe_philips(c); 942 cpu_probe_nxp(c);
930 break; 943 break;
931 default: 944 default:
932 c->cputype = CPU_UNKNOWN; 945 c->cputype = CPU_UNKNOWN;
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 000000000000..1d5f63cf8997
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,65 @@
1/*
2 * DEC I/O ASIC's counter clocksource
3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yoichi_yuasa@tripeaks.co.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19 */
20#include <linux/clocksource.h>
21#include <linux/init.h>
22
23#include <asm/ds1287.h>
24#include <asm/time.h>
25#include <asm/dec/ioasic.h>
26#include <asm/dec/ioasic_addrs.h>
27
28static cycle_t dec_ioasic_hpt_read(void)
29{
30 return ioasic_read(IO_REG_FCTR);
31}
32
33static struct clocksource clocksource_dec = {
34 .name = "dec-ioasic",
35 .read = dec_ioasic_hpt_read,
36 .mask = CLOCKSOURCE_MASK(32),
37 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
38};
39
40void __init dec_ioasic_clocksource_init(void)
41{
42 unsigned int freq;
43 u32 start, end;
44 int i = HZ / 10;
45
46
47 while (!ds1287_timer_state())
48 ;
49
50 start = dec_ioasic_hpt_read();
51
52 while (i--)
53 while (!ds1287_timer_state())
54 ;
55
56 end = dec_ioasic_hpt_read();
57
58 freq = (end - start) * 10;
59 printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
60
61 clocksource_dec.rating = 200 + freq / 10000000;
62 clocksource_set_clock(&clocksource_dec, freq);
63
64 clocksource_register(&clocksource_dec);
65}
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 000000000000..b1436a857998
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,87 @@
1/*
2 * A gpio chip driver for TXx9 SoCs
3 *
4 * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/init.h>
12#include <linux/spinlock.h>
13#include <linux/gpio.h>
14#include <linux/errno.h>
15#include <linux/io.h>
16#include <asm/txx9pio.h>
17
18static DEFINE_SPINLOCK(txx9_gpio_lock);
19
20static struct txx9_pio_reg __iomem *txx9_pioptr;
21
22static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
23{
24 return __raw_readl(&txx9_pioptr->din) & (1 << offset);
25}
26
27static void txx9_gpio_set_raw(unsigned int offset, int value)
28{
29 u32 val;
30 val = __raw_readl(&txx9_pioptr->dout);
31 if (value)
32 val |= 1 << offset;
33 else
34 val &= ~(1 << offset);
35 __raw_writel(val, &txx9_pioptr->dout);
36}
37
38static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
39 int value)
40{
41 unsigned long flags;
42 spin_lock_irqsave(&txx9_gpio_lock, flags);
43 txx9_gpio_set_raw(offset, value);
44 mmiowb();
45 spin_unlock_irqrestore(&txx9_gpio_lock, flags);
46}
47
48static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
49{
50 spin_lock_irq(&txx9_gpio_lock);
51 __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
52 &txx9_pioptr->dir);
53 mmiowb();
54 spin_unlock_irq(&txx9_gpio_lock);
55 return 0;
56}
57
58static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
59 int value)
60{
61 spin_lock_irq(&txx9_gpio_lock);
62 txx9_gpio_set_raw(offset, value);
63 __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
64 &txx9_pioptr->dir);
65 mmiowb();
66 spin_unlock_irq(&txx9_gpio_lock);
67 return 0;
68}
69
70static struct gpio_chip txx9_gpio_chip = {
71 .get = txx9_gpio_get,
72 .set = txx9_gpio_set,
73 .direction_input = txx9_gpio_dir_in,
74 .direction_output = txx9_gpio_dir_out,
75 .label = "TXx9",
76};
77
78int __init txx9_gpio_init(unsigned long baseaddr,
79 unsigned int base, unsigned int num)
80{
81 txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
82 if (!txx9_pioptr)
83 return -ENODEV;
84 txx9_gpio_chip.base = base;
85 txx9_gpio_chip.ngpio = num;
86 return gpiochip_add(&txx9_gpio_chip);
87}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
new file mode 100644
index 000000000000..f0a4bb19e096
--- /dev/null
+++ b/arch/mips/kernel/irq-gic.c
@@ -0,0 +1,295 @@
1#undef DEBUG
2
3#include <linux/bitmap.h>
4#include <linux/init.h>
5
6#include <asm/io.h>
7#include <asm/gic.h>
8#include <asm/gcmpregs.h>
9#include <asm/mips-boards/maltaint.h>
10#include <asm/irq.h>
11#include <linux/hardirq.h>
12#include <asm-generic/bitops/find.h>
13
14
15static unsigned long _gic_base;
16static unsigned int _irqbase, _mapsize, numvpes, numintrs;
17static struct gic_intr_map *_intrmap;
18
19static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
20static struct gic_pending_regs pending_regs[NR_CPUS];
21static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
22
23#define gic_wedgeb2bok 0 /*
24 * Can GIC handle b2b writes to wedge register?
25 */
26#if gic_wedgeb2bok == 0
27static DEFINE_SPINLOCK(gic_wedgeb2b_lock);
28#endif
29
30void gic_send_ipi(unsigned int intr)
31{
32#if gic_wedgeb2bok == 0
33 unsigned long flags;
34#endif
35 pr_debug("CPU%d: %s status %08x\n", smp_processor_id(), __func__,
36 read_c0_status());
37 if (!gic_wedgeb2bok)
38 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
39 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
40 if (!gic_wedgeb2bok) {
41 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
42 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
43 }
44}
45
46/* This is Malta specific and needs to be exported */
47static void vpe_local_setup(unsigned int numvpes)
48{
49 int i;
50 unsigned long timer_interrupt = 5, perf_interrupt = 5;
51 unsigned int vpe_ctl;
52
53 /*
54 * Setup the default performance counter timer interrupts
55 * for all VPEs
56 */
57 for (i = 0; i < numvpes; i++) {
58 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
59
60 /* Are Interrupts locally routable? */
61 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
62 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
63 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
64 GIC_MAP_TO_PIN_MSK | timer_interrupt);
65
66 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
67 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
68 GIC_MAP_TO_PIN_MSK | perf_interrupt);
69 }
70}
71
72unsigned int gic_get_int(void)
73{
74 unsigned int i;
75 unsigned long *pending, *intrmask, *pcpu_mask;
76 unsigned long *pending_abs, *intrmask_abs;
77
78 /* Get per-cpu bitmaps */
79 pending = pending_regs[smp_processor_id()].pending;
80 intrmask = intrmask_regs[smp_processor_id()].intrmask;
81 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
82
83 pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
84 GIC_SH_PEND_31_0_OFS);
85 intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
86 GIC_SH_MASK_31_0_OFS);
87
88 for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
89 GICREAD(*pending_abs, pending[i]);
90 GICREAD(*intrmask_abs, intrmask[i]);
91 pending_abs++;
92 intrmask_abs++;
93 }
94
95 bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
96 bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
97
98 i = find_first_bit(pending, GIC_NUM_INTRS);
99
100 pr_debug("CPU%d: %s pend=%d\n", smp_processor_id(), __func__, i);
101
102 return i;
103}
104
105static unsigned int gic_irq_startup(unsigned int irq)
106{
107 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
108 irq -= _irqbase;
109 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
110 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
111 1 << (irq % 32));
112 return 0;
113}
114
115static void gic_irq_ack(unsigned int irq)
116{
117#if gic_wedgeb2bok == 0
118 unsigned long flags;
119#endif
120 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
121 irq -= _irqbase;
122 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
123 1 << (irq % 32));
124
125 if (_intrmap[irq].trigtype == GIC_TRIG_EDGE) {
126 if (!gic_wedgeb2bok)
127 spin_lock_irqsave(&gic_wedgeb2b_lock, flags);
128 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
129 if (!gic_wedgeb2bok) {
130 (void) GIC_REG(SHARED, GIC_SH_CONFIG);
131 spin_unlock_irqrestore(&gic_wedgeb2b_lock, flags);
132 }
133 }
134}
135
136static void gic_mask_irq(unsigned int irq)
137{
138 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
139 irq -= _irqbase;
140 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
141 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_RMASK_31_0_OFS + (irq / 32))),
142 1 << (irq % 32));
143}
144
145static void gic_unmask_irq(unsigned int irq)
146{
147 pr_debug("CPU%d: %s: irq%d\n", smp_processor_id(), __func__, irq);
148 irq -= _irqbase;
149 /* FIXME: this is wrong for !GICISWORDLITTLEENDIAN */
150 GICWRITE(GIC_REG_ADDR(SHARED, (GIC_SH_SMASK_31_0_OFS + (irq / 32))),
151 1 << (irq % 32));
152}
153
154#ifdef CONFIG_SMP
155
156static DEFINE_SPINLOCK(gic_lock);
157
158static void gic_set_affinity(unsigned int irq, cpumask_t cpumask)
159{
160 cpumask_t tmp = CPU_MASK_NONE;
161 unsigned long flags;
162 int i;
163
164 pr_debug(KERN_DEBUG "%s called\n", __func__);
165 irq -= _irqbase;
166
167 cpus_and(tmp, cpumask, cpu_online_map);
168 if (cpus_empty(tmp))
169 return;
170
171 /* Assumption : cpumask refers to a single CPU */
172 spin_lock_irqsave(&gic_lock, flags);
173 for (;;) {
174 /* Re-route this IRQ */
175 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
176
177 /*
178 * FIXME: assumption that _intrmap is ordered and has no holes
179 */
180
181 /* Update the intr_map */
182 _intrmap[irq].cpunum = first_cpu(tmp);
183
184 /* Update the pcpu_masks */
185 for (i = 0; i < NR_CPUS; i++)
186 clear_bit(irq, pcpu_masks[i].pcpu_mask);
187 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
188
189 }
190 irq_desc[irq].affinity = cpumask;
191 spin_unlock_irqrestore(&gic_lock, flags);
192
193}
194#endif
195
196static struct irq_chip gic_irq_controller = {
197 .name = "MIPS GIC",
198 .startup = gic_irq_startup,
199 .ack = gic_irq_ack,
200 .mask = gic_mask_irq,
201 .mask_ack = gic_mask_irq,
202 .unmask = gic_unmask_irq,
203 .eoi = gic_unmask_irq,
204#ifdef CONFIG_SMP
205 .set_affinity = gic_set_affinity,
206#endif
207};
208
209static void __init setup_intr(unsigned int intr, unsigned int cpu,
210 unsigned int pin, unsigned int polarity, unsigned int trigtype)
211{
212 /* Setup Intr to Pin mapping */
213 if (pin & GIC_MAP_TO_NMI_MSK) {
214 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
215 /* FIXME: hack to route NMI to all cpu's */
216 for (cpu = 0; cpu < NR_CPUS; cpu += 32) {
217 GICWRITE(GIC_REG_ADDR(SHARED,
218 GIC_SH_MAP_TO_VPE_REG_OFF(intr, cpu)),
219 0xffffffff);
220 }
221 } else {
222 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
223 GIC_MAP_TO_PIN_MSK | pin);
224 /* Setup Intr to CPU mapping */
225 GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
226 }
227
228 /* Setup Intr Polarity */
229 GIC_SET_POLARITY(intr, polarity);
230
231 /* Setup Intr Trigger Type */
232 GIC_SET_TRIGGER(intr, trigtype);
233
234 /* Init Intr Masks */
235 GIC_SET_INTR_MASK(intr, 0);
236}
237
238static void __init gic_basic_init(void)
239{
240 unsigned int i, cpu;
241
242 /* Setup defaults */
243 for (i = 0; i < GIC_NUM_INTRS; i++) {
244 GIC_SET_POLARITY(i, GIC_POL_POS);
245 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
246 GIC_SET_INTR_MASK(i, 0);
247 }
248
249 /* Setup specifics */
250 for (i = 0; i < _mapsize; i++) {
251 cpu = _intrmap[i].cpunum;
252 if (cpu == X)
253 continue;
254
255 setup_intr(_intrmap[i].intrnum,
256 _intrmap[i].cpunum,
257 _intrmap[i].pin,
258 _intrmap[i].polarity,
259 _intrmap[i].trigtype);
260 /* Initialise per-cpu Interrupt software masks */
261 if (_intrmap[i].ipiflag)
262 set_bit(_intrmap[i].intrnum, pcpu_masks[cpu].pcpu_mask);
263 }
264
265 vpe_local_setup(numvpes);
266
267 for (i = _irqbase; i < (_irqbase + numintrs); i++)
268 set_irq_chip(i, &gic_irq_controller);
269}
270
271void __init gic_init(unsigned long gic_base_addr,
272 unsigned long gic_addrspace_size,
273 struct gic_intr_map *intr_map, unsigned int intr_map_size,
274 unsigned int irqbase)
275{
276 unsigned int gicconfig;
277
278 _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
279 gic_addrspace_size);
280 _irqbase = irqbase;
281 _intrmap = intr_map;
282 _mapsize = intr_map_size;
283
284 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
285 numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
286 GIC_SH_CONFIG_NUMINTRS_SHF;
287 numintrs = ((numintrs + 1) * 8);
288
289 numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
290 GIC_SH_CONFIG_NUMVPES_SHF;
291
292 pr_debug("%s called\n", __func__);
293
294 gic_basic_init();
295}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4edc7e451d91..963c16d266ab 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -17,6 +17,7 @@
17#include <asm/io.h> 17#include <asm/io.h>
18#include <asm/irq.h> 18#include <asm/irq.h>
19#include <asm/msc01_ic.h> 19#include <asm/msc01_ic.h>
20#include <asm/traps.h>
20 21
21static unsigned long _icctrl_msc; 22static unsigned long _icctrl_msc;
22#define MSC01_IC_REG_BASE _icctrl_msc 23#define MSC01_IC_REG_BASE _icctrl_msc
@@ -98,14 +99,13 @@ void ll_msc_irq(void)
98 } 99 }
99} 100}
100 101
101void 102static void msc_bind_eic_interrupt(int irq, int set)
102msc_bind_eic_interrupt(unsigned int irq, unsigned int set)
103{ 103{
104 MSCIC_WRITE(MSC01_IC_RAMW, 104 MSCIC_WRITE(MSC01_IC_RAMW,
105 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); 105 (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
106} 106}
107 107
108struct irq_chip msc_levelirq_type = { 108static struct irq_chip msc_levelirq_type = {
109 .name = "SOC-it-Level", 109 .name = "SOC-it-Level",
110 .ack = level_mask_and_ack_msc_irq, 110 .ack = level_mask_and_ack_msc_irq,
111 .mask = mask_msc_irq, 111 .mask = mask_msc_irq,
@@ -115,7 +115,7 @@ struct irq_chip msc_levelirq_type = {
115 .end = end_msc_irq, 115 .end = end_msc_irq,
116}; 116};
117 117
118struct irq_chip msc_edgeirq_type = { 118static struct irq_chip msc_edgeirq_type = {
119 .name = "SOC-it-Edge", 119 .name = "SOC-it-Edge",
120 .ack = edge_mask_and_ack_msc_irq, 120 .ack = edge_mask_and_ack_msc_irq,
121 .mask = mask_msc_irq, 121 .mask = mask_msc_irq,
@@ -128,8 +128,6 @@ struct irq_chip msc_edgeirq_type = {
128 128
129void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) 129void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
130{ 130{
131 extern void (*board_bind_eic_interrupt)(unsigned int irq, unsigned int regset);
132
133 _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); 131 _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
134 132
135 /* Reset interrupt controller - initialises all registers to 0 */ 133 /* Reset interrupt controller - initialises all registers to 0 */
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index c0faabd52010..6c8e8c4246f7 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -14,7 +14,7 @@
14/* #define DEBUG_SIG */ 14/* #define DEBUG_SIG */
15 15
16#ifdef DEBUG_SIG 16#ifdef DEBUG_SIG
17# define DEBUGP(fmt, args...) printk("%s: " fmt, __FUNCTION__ , ##args) 17# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
18#else 18#else
19# define DEBUGP(fmt, args...) 19# define DEBUGP(fmt, args...)
20#endif 20#endif
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000000..ca476c4f62a5
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,265 @@
1/*
2 * This program is free software; you can distribute it and/or modify it
3 * under the terms of the GNU General Public License (Version 2) as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope it will be useful, but WITHOUT
7 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
8 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
9 * for more details.
10 *
11 * You should have received a copy of the GNU General Public License along
12 * with this program; if not, write to the Free Software Foundation, Inc.,
13 * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
14 *
15 * Copyright (C) 2007 MIPS Technologies, Inc.
16 * Chris Dearman (chris@mips.com)
17 */
18
19#undef DEBUG
20
21#include <linux/kernel.h>
22#include <linux/sched.h>
23#include <linux/cpumask.h>
24#include <linux/interrupt.h>
25#include <linux/compiler.h>
26
27#include <asm/atomic.h>
28#include <asm/cacheflush.h>
29#include <asm/cpu.h>
30#include <asm/processor.h>
31#include <asm/system.h>
32#include <asm/hardirq.h>
33#include <asm/mmu_context.h>
34#include <asm/smp.h>
35#include <asm/time.h>
36#include <asm/mipsregs.h>
37#include <asm/mipsmtregs.h>
38#include <asm/mips_mt.h>
39
40/*
41 * Crude manipulation of the CPU masks to control which
42 * which CPU's are brought online during initialisation
43 *
44 * Beware... this needs to be called after CPU discovery
45 * but before CPU bringup
46 */
47static int __init allowcpus(char *str)
48{
49 cpumask_t cpu_allow_map;
50 char buf[256];
51 int len;
52
53 cpus_clear(cpu_allow_map);
54 if (cpulist_parse(str, cpu_allow_map) == 0) {
55 cpu_set(0, cpu_allow_map);
56 cpus_and(cpu_possible_map, cpu_possible_map, cpu_allow_map);
57 len = cpulist_scnprintf(buf, sizeof(buf)-1, cpu_possible_map);
58 buf[len] = '\0';
59 pr_debug("Allowable CPUs: %s\n", buf);
60 return 1;
61 } else
62 return 0;
63}
64__setup("allowcpus=", allowcpus);
65
66static void ipi_call_function(unsigned int cpu)
67{
68 unsigned int action = 0;
69
70 pr_debug("CPU%d: %s cpu %d status %08x\n",
71 smp_processor_id(), __func__, cpu, read_c0_status());
72
73 switch (cpu) {
74 case 0:
75 action = GIC_IPI_EXT_INTR_CALLFNC_VPE0;
76 break;
77 case 1:
78 action = GIC_IPI_EXT_INTR_CALLFNC_VPE1;
79 break;
80 case 2:
81 action = GIC_IPI_EXT_INTR_CALLFNC_VPE2;
82 break;
83 case 3:
84 action = GIC_IPI_EXT_INTR_CALLFNC_VPE3;
85 break;
86 }
87 gic_send_ipi(action);
88}
89
90
91static void ipi_resched(unsigned int cpu)
92{
93 unsigned int action = 0;
94
95 pr_debug("CPU%d: %s cpu %d status %08x\n",
96 smp_processor_id(), __func__, cpu, read_c0_status());
97
98 switch (cpu) {
99 case 0:
100 action = GIC_IPI_EXT_INTR_RESCHED_VPE0;
101 break;
102 case 1:
103 action = GIC_IPI_EXT_INTR_RESCHED_VPE1;
104 break;
105 case 2:
106 action = GIC_IPI_EXT_INTR_RESCHED_VPE2;
107 break;
108 case 3:
109 action = GIC_IPI_EXT_INTR_RESCHED_VPE3;
110 break;
111 }
112 gic_send_ipi(action);
113}
114
115/*
116 * FIXME: This isn't restricted to CMP
117 * The SMVP kernel could use GIC interrupts if available
118 */
119void cmp_send_ipi_single(int cpu, unsigned int action)
120{
121 unsigned long flags;
122
123 local_irq_save(flags);
124
125 switch (action) {
126 case SMP_CALL_FUNCTION:
127 ipi_call_function(cpu);
128 break;
129
130 case SMP_RESCHEDULE_YOURSELF:
131 ipi_resched(cpu);
132 break;
133 }
134
135 local_irq_restore(flags);
136}
137
138static void cmp_send_ipi_mask(cpumask_t mask, unsigned int action)
139{
140 unsigned int i;
141
142 for_each_cpu_mask(i, mask)
143 cmp_send_ipi_single(i, action);
144}
145
146static void cmp_init_secondary(void)
147{
148 struct cpuinfo_mips *c = &current_cpu_data;
149
150 /* Assume GIC is present */
151 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | STATUSF_IP6 |
152 STATUSF_IP7);
153
154 /* Enable per-cpu interrupts: platform specific */
155
156 c->core = (read_c0_ebase() >> 1) & 0xff;
157#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
158 c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & TCBIND_CURVPE;
159#endif
160#ifdef CONFIG_MIPS_MT_SMTC
161 c->tc_id = (read_c0_tcbind() >> TCBIND_CURTC_SHIFT) & TCBIND_CURTC;
162#endif
163}
164
165static void cmp_smp_finish(void)
166{
167 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
168
169 /* CDFIXME: remove this? */
170 write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
171
172#ifdef CONFIG_MIPS_MT_FPAFF
173 /* If we have an FPU, enroll ourselves in the FPU-full mask */
174 if (cpu_has_fpu)
175 cpu_set(smp_processor_id(), mt_fpu_cpumask);
176#endif /* CONFIG_MIPS_MT_FPAFF */
177
178 local_irq_enable();
179}
180
181static void cmp_cpus_done(void)
182{
183 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
184}
185
186/*
187 * Setup the PC, SP, and GP of a secondary processor and start it running
188 * smp_bootstrap is the place to resume from
189 * __KSTK_TOS(idle) is apparently the stack pointer
190 * (unsigned long)idle->thread_info the gp
191 */
192static void cmp_boot_secondary(int cpu, struct task_struct *idle)
193{
194 struct thread_info *gp = task_thread_info(idle);
195 unsigned long sp = __KSTK_TOS(idle);
196 unsigned long pc = (unsigned long)&smp_bootstrap;
197 unsigned long a0 = 0;
198
199 pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
200 __func__, cpu);
201
202#if 0
203 /* Needed? */
204 flush_icache_range((unsigned long)gp,
205 (unsigned long)(gp + sizeof(struct thread_info)));
206#endif
207
208 amon_cpu_start(cpu, pc, sp, gp, a0);
209}
210
211/*
212 * Common setup before any secondaries are started
213 */
214void __init cmp_smp_setup(void)
215{
216 int i;
217 int ncpu = 0;
218
219 pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
220
221#ifdef CONFIG_MIPS_MT_FPAFF
222 /* If we have an FPU, enroll ourselves in the FPU-full mask */
223 if (cpu_has_fpu)
224 cpu_set(0, mt_fpu_cpumask);
225#endif /* CONFIG_MIPS_MT_FPAFF */
226
227 for (i = 1; i < NR_CPUS; i++) {
228 if (amon_cpu_avail(i)) {
229 cpu_set(i, phys_cpu_present_map);
230 __cpu_number_map[i] = ++ncpu;
231 __cpu_logical_map[ncpu] = i;
232 }
233 }
234
235 if (cpu_has_mipsmt) {
236 unsigned int nvpe, mvpconf0 = read_c0_mvpconf0();
237
238 nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
239 smp_num_siblings = nvpe;
240 }
241 pr_info("Detected %i available secondary CPU(s)\n", ncpu);
242}
243
244void __init cmp_prepare_cpus(unsigned int max_cpus)
245{
246 pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
247 smp_processor_id(), __func__, max_cpus);
248
249 /*
250 * FIXME: some of these options are per-system, some per-core and
251 * some per-cpu
252 */
253 mips_mt_set_cpuoptions();
254}
255
256struct plat_smp_ops cmp_smp_ops = {
257 .send_ipi_single = cmp_send_ipi_single,
258 .send_ipi_mask = cmp_send_ipi_mask,
259 .init_secondary = cmp_init_secondary,
260 .smp_finish = cmp_smp_finish,
261 .cpus_done = cmp_cpus_done,
262 .boot_secondary = cmp_boot_secondary,
263 .smp_setup = cmp_smp_setup,
264 .prepare_cpus = cmp_prepare_cpus,
265};
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 89e6f6aa5166..87a1816c1f45 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -36,110 +36,7 @@
36#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
37#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
38 38
39#define MIPS_CPU_IPI_RESCHED_IRQ 0 39static void __init smvp_copy_vpe_config(void)
40#define MIPS_CPU_IPI_CALL_IRQ 1
41
42static int cpu_ipi_resched_irq, cpu_ipi_call_irq;
43
44#if 0
45static void dump_mtregisters(int vpe, int tc)
46{
47 printk("vpe %d tc %d\n", vpe, tc);
48
49 settc(tc);
50
51 printk(" c0 status 0x%lx\n", read_vpe_c0_status());
52 printk(" vpecontrol 0x%lx\n", read_vpe_c0_vpecontrol());
53 printk(" vpeconf0 0x%lx\n", read_vpe_c0_vpeconf0());
54 printk(" tcstatus 0x%lx\n", read_tc_c0_tcstatus());
55 printk(" tcrestart 0x%lx\n", read_tc_c0_tcrestart());
56 printk(" tcbind 0x%lx\n", read_tc_c0_tcbind());
57 printk(" tchalt 0x%lx\n", read_tc_c0_tchalt());
58}
59#endif
60
61void __init sanitize_tlb_entries(void)
62{
63 int i, tlbsiz;
64 unsigned long mvpconf0, ncpu;
65
66 if (!cpu_has_mipsmt)
67 return;
68
69 /* Enable VPC */
70 set_c0_mvpcontrol(MVPCONTROL_VPC);
71
72 back_to_back_c0_hazard();
73
74 /* Disable TLB sharing */
75 clear_c0_mvpcontrol(MVPCONTROL_STLB);
76
77 mvpconf0 = read_c0_mvpconf0();
78
79 printk(KERN_INFO "MVPConf0 0x%lx TLBS %lx PTLBE %ld\n", mvpconf0,
80 (mvpconf0 & MVPCONF0_TLBS) >> MVPCONF0_TLBS_SHIFT,
81 (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT);
82
83 tlbsiz = (mvpconf0 & MVPCONF0_PTLBE) >> MVPCONF0_PTLBE_SHIFT;
84 ncpu = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
85
86 printk(" tlbsiz %d ncpu %ld\n", tlbsiz, ncpu);
87
88 if (tlbsiz > 0) {
89 /* share them out across the vpe's */
90 tlbsiz /= ncpu;
91
92 printk(KERN_INFO "setting Config1.MMU_size to %d\n", tlbsiz);
93
94 for (i = 0; i < ncpu; i++) {
95 settc(i);
96
97 if (i == 0)
98 write_c0_config1((read_c0_config1() & ~(0x3f << 25)) | (tlbsiz << 25));
99 else
100 write_vpe_c0_config1((read_vpe_c0_config1() & ~(0x3f << 25)) |
101 (tlbsiz << 25));
102 }
103 }
104
105 clear_c0_mvpcontrol(MVPCONTROL_VPC);
106}
107
108static void ipi_resched_dispatch(void)
109{
110 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
111}
112
113static void ipi_call_dispatch(void)
114{
115 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
116}
117
118static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
119{
120 return IRQ_HANDLED;
121}
122
123static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
124{
125 smp_call_function_interrupt();
126
127 return IRQ_HANDLED;
128}
129
130static struct irqaction irq_resched = {
131 .handler = ipi_resched_interrupt,
132 .flags = IRQF_DISABLED|IRQF_PERCPU,
133 .name = "IPI_resched"
134};
135
136static struct irqaction irq_call = {
137 .handler = ipi_call_interrupt,
138 .flags = IRQF_DISABLED|IRQF_PERCPU,
139 .name = "IPI_call"
140};
141
142static void __init smp_copy_vpe_config(void)
143{ 40{
144 write_vpe_c0_status( 41 write_vpe_c0_status(
145 (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); 42 (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
@@ -156,7 +53,7 @@ static void __init smp_copy_vpe_config(void)
156 write_vpe_c0_count(read_c0_count()); 53 write_vpe_c0_count(read_c0_count());
157} 54}
158 55
159static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0, 56static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
160 unsigned int ncpu) 57 unsigned int ncpu)
161{ 58{
162 if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) 59 if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
@@ -182,12 +79,12 @@ static unsigned int __init smp_vpe_init(unsigned int tc, unsigned int mvpconf0,
182 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); 79 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
183 80
184 if (tc != 0) 81 if (tc != 0)
185 smp_copy_vpe_config(); 82 smvp_copy_vpe_config();
186 83
187 return ncpu; 84 return ncpu;
188} 85}
189 86
190static void __init smp_tc_init(unsigned int tc, unsigned int mvpconf0) 87static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
191{ 88{
192 unsigned long tmp; 89 unsigned long tmp;
193 90
@@ -254,15 +151,20 @@ static void vsmp_send_ipi_mask(cpumask_t mask, unsigned int action)
254 151
255static void __cpuinit vsmp_init_secondary(void) 152static void __cpuinit vsmp_init_secondary(void)
256{ 153{
257 /* Enable per-cpu interrupts */ 154 extern int gic_present;
258 155
259 /* This is Malta specific: IPI,performance and timer inetrrupts */ 156 /* This is Malta specific: IPI,performance and timer inetrrupts */
260 write_c0_status((read_c0_status() & ~ST0_IM ) | 157 if (gic_present)
261 (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP6 | STATUSF_IP7)); 158 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
159 STATUSF_IP6 | STATUSF_IP7);
160 else
161 change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
162 STATUSF_IP6 | STATUSF_IP7);
262} 163}
263 164
264static void __cpuinit vsmp_smp_finish(void) 165static void __cpuinit vsmp_smp_finish(void)
265{ 166{
167 /* CDFIXME: remove this? */
266 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); 168 write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
267 169
268#ifdef CONFIG_MIPS_MT_FPAFF 170#ifdef CONFIG_MIPS_MT_FPAFF
@@ -323,7 +225,7 @@ static void __cpuinit vsmp_boot_secondary(int cpu, struct task_struct *idle)
323/* 225/*
324 * Common setup before any secondaries are started 226 * Common setup before any secondaries are started
325 * Make sure all CPU's are in a sensible state before we boot any of the 227 * Make sure all CPU's are in a sensible state before we boot any of the
326 * secondarys 228 * secondaries
327 */ 229 */
328static void __init vsmp_smp_setup(void) 230static void __init vsmp_smp_setup(void)
329{ 231{
@@ -356,8 +258,8 @@ static void __init vsmp_smp_setup(void)
356 for (tc = 0; tc <= ntc; tc++) { 258 for (tc = 0; tc <= ntc; tc++) {
357 settc(tc); 259 settc(tc);
358 260
359 smp_tc_init(tc, mvpconf0); 261 smvp_tc_init(tc, mvpconf0);
360 ncpu = smp_vpe_init(tc, mvpconf0, ncpu); 262 ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
361 } 263 }
362 264
363 /* Release config state */ 265 /* Release config state */
@@ -371,21 +273,6 @@ static void __init vsmp_smp_setup(void)
371static void __init vsmp_prepare_cpus(unsigned int max_cpus) 273static void __init vsmp_prepare_cpus(unsigned int max_cpus)
372{ 274{
373 mips_mt_set_cpuoptions(); 275 mips_mt_set_cpuoptions();
374
375 /* set up ipi interrupts */
376 if (cpu_has_vint) {
377 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
378 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
379 }
380
381 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
382 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ;
383
384 setup_irq(cpu_ipi_resched_irq, &irq_resched);
385 setup_irq(cpu_ipi_call_irq, &irq_call);
386
387 set_irq_handler(cpu_ipi_resched_irq, handle_percpu_irq);
388 set_irq_handler(cpu_ipi_call_irq, handle_percpu_irq);
389} 276}
390 277
391struct plat_smp_ops vsmp_smp_ops = { 278struct plat_smp_ops vsmp_smp_ops = {
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 9d41dab90a80..33780cc61ce9 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -35,6 +35,7 @@
35#include <asm/atomic.h> 35#include <asm/atomic.h>
36#include <asm/cpu.h> 36#include <asm/cpu.h>
37#include <asm/processor.h> 37#include <asm/processor.h>
38#include <asm/r4k-timer.h>
38#include <asm/system.h> 39#include <asm/system.h>
39#include <asm/mmu_context.h> 40#include <asm/mmu_context.h>
40#include <asm/time.h> 41#include <asm/time.h>
@@ -125,6 +126,8 @@ asmlinkage __cpuinit void start_secondary(void)
125 126
126 cpu_set(cpu, cpu_callin_map); 127 cpu_set(cpu, cpu_callin_map);
127 128
129 synchronise_count_slave();
130
128 cpu_idle(); 131 cpu_idle();
129} 132}
130 133
@@ -287,6 +290,7 @@ void smp_send_stop(void)
287void __init smp_cpus_done(unsigned int max_cpus) 290void __init smp_cpus_done(unsigned int max_cpus)
288{ 291{
289 mp_ops->cpus_done(); 292 mp_ops->cpus_done();
293 synchronise_count_master();
290} 294}
291 295
292/* called from main before smp_init() */ 296/* called from main before smp_init() */
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index b42e71c71119..3e863186cd22 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -174,14 +174,6 @@ static int clock_hang_reported[NR_CPUS];
174 174
175#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 175#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
176 176
177/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
178
179void __init sanitize_tlb_entries(void)
180{
181 printk("Deprecated sanitize_tlb_entries() invoked\n");
182}
183
184
185/* 177/*
186 * Configure shared TLB - VPC configuration bit must be set by caller 178 * Configure shared TLB - VPC configuration bit must be set by caller
187 */ 179 */
@@ -339,7 +331,8 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
339 /* In general, all TCs should have the same cpu_data indications */ 331 /* In general, all TCs should have the same cpu_data indications */
340 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips)); 332 memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
341 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */ 333 /* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
342 if (cpu_data[0].cputype == CPU_34K) 334 if (cpu_data[0].cputype == CPU_34K ||
335 cpu_data[0].cputype == CPU_1004K)
343 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 336 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
344 cpu_data[cpu].vpe_id = vpe; 337 cpu_data[cpu].vpe_id = vpe;
345 cpu_data[cpu].tc_id = tc; 338 cpu_data[cpu].tc_id = tc;
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644
index 000000000000..6ddb507a87ef
--- /dev/null
+++ b/arch/mips/kernel/spram.c
@@ -0,0 +1,221 @@
1/*
2 * MIPS SPRAM support
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
10 */
11#include <linux/init.h>
12#include <linux/kernel.h>
13#include <linux/ptrace.h>
14#include <linux/stddef.h>
15
16#include <asm/cpu.h>
17#include <asm/fpu.h>
18#include <asm/mipsregs.h>
19#include <asm/system.h>
20#include <asm/r4kcache.h>
21#include <asm/hazards.h>
22
23/*
24 * These definitions are correct for the 24K/34K/74K SPRAM sample
25 * implementation. The 4KS interpreted the tags differently...
26 */
27#define SPRAM_TAG0_ENABLE 0x00000080
28#define SPRAM_TAG0_PA_MASK 0xfffff000
29#define SPRAM_TAG1_SIZE_MASK 0xfffff000
30
31#define SPRAM_TAG_STRIDE 8
32
33#define ERRCTL_SPRAM (1 << 28)
34
35/* errctl access */
36#define read_c0_errctl(x) read_c0_ecc(x)
37#define write_c0_errctl(x) write_c0_ecc(x)
38
39/*
40 * Different semantics to the set_c0_* function built by __BUILD_SET_C0
41 */
42static __cpuinit unsigned int bis_c0_errctl(unsigned int set)
43{
44 unsigned int res;
45 res = read_c0_errctl();
46 write_c0_errctl(res | set);
47 return res;
48}
49
50static __cpuinit void ispram_store_tag(unsigned int offset, unsigned int data)
51{
52 unsigned int errctl;
53
54 /* enable SPRAM tag access */
55 errctl = bis_c0_errctl(ERRCTL_SPRAM);
56 ehb();
57
58 write_c0_taglo(data);
59 ehb();
60
61 cache_op(Index_Store_Tag_I, CKSEG0|offset);
62 ehb();
63
64 write_c0_errctl(errctl);
65 ehb();
66}
67
68
69static __cpuinit unsigned int ispram_load_tag(unsigned int offset)
70{
71 unsigned int data;
72 unsigned int errctl;
73
74 /* enable SPRAM tag access */
75 errctl = bis_c0_errctl(ERRCTL_SPRAM);
76 ehb();
77 cache_op(Index_Load_Tag_I, CKSEG0 | offset);
78 ehb();
79 data = read_c0_taglo();
80 ehb();
81 write_c0_errctl(errctl);
82 ehb();
83
84 return data;
85}
86
87static __cpuinit void dspram_store_tag(unsigned int offset, unsigned int data)
88{
89 unsigned int errctl;
90
91 /* enable SPRAM tag access */
92 errctl = bis_c0_errctl(ERRCTL_SPRAM);
93 ehb();
94 write_c0_dtaglo(data);
95 ehb();
96 cache_op(Index_Store_Tag_D, CKSEG0 | offset);
97 ehb();
98 write_c0_errctl(errctl);
99 ehb();
100}
101
102
103static __cpuinit unsigned int dspram_load_tag(unsigned int offset)
104{
105 unsigned int data;
106 unsigned int errctl;
107
108 errctl = bis_c0_errctl(ERRCTL_SPRAM);
109 ehb();
110 cache_op(Index_Load_Tag_D, CKSEG0 | offset);
111 ehb();
112 data = read_c0_dtaglo();
113 ehb();
114 write_c0_errctl(errctl);
115 ehb();
116
117 return data;
118}
119
120static __cpuinit void probe_spram(char *type,
121 unsigned int base,
122 unsigned int (*read)(unsigned int),
123 void (*write)(unsigned int, unsigned int))
124{
125 unsigned int firstsize = 0, lastsize = 0;
126 unsigned int firstpa = 0, lastpa = 0, pa = 0;
127 unsigned int offset = 0;
128 unsigned int size, tag0, tag1;
129 unsigned int enabled;
130 int i;
131
132 /*
133 * The limit is arbitrary but avoids the loop running away if
134 * the SPRAM tags are implemented differently
135 */
136
137 for (i = 0; i < 8; i++) {
138 tag0 = read(offset);
139 tag1 = read(offset+SPRAM_TAG_STRIDE);
140 pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
141 type, i, tag0, tag1);
142
143 size = tag1 & SPRAM_TAG1_SIZE_MASK;
144
145 if (size == 0)
146 break;
147
148 if (i != 0) {
149 /* tags may repeat... */
150 if ((pa == firstpa && size == firstsize) ||
151 (pa == lastpa && size == lastsize))
152 break;
153 }
154
155 /* Align base with size */
156 base = (base + size - 1) & ~(size-1);
157
158 /* reprogram the base address base address and enable */
159 tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
160 write(offset, tag0);
161
162 base += size;
163
164 /* reread the tag */
165 tag0 = read(offset);
166 pa = tag0 & SPRAM_TAG0_PA_MASK;
167 enabled = tag0 & SPRAM_TAG0_ENABLE;
168
169 if (i == 0) {
170 firstpa = pa;
171 firstsize = size;
172 }
173
174 lastpa = pa;
175 lastsize = size;
176
177 if (strcmp(type, "DSPRAM") == 0) {
178 unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
179 unsigned int v;
180#define TDAT 0x5a5aa5a5
181 vp[0] = TDAT;
182 vp[1] = ~TDAT;
183
184 mb();
185
186 v = vp[0];
187 if (v != TDAT)
188 printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
189 vp, TDAT, v);
190 v = vp[1];
191 if (v != ~TDAT)
192 printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
193 vp+1, ~TDAT, v);
194 }
195
196 pr_info("%s%d: PA=%08x,Size=%08x%s\n",
197 type, i, pa, size, enabled ? ",enabled" : "");
198 offset += 2 * SPRAM_TAG_STRIDE;
199 }
200}
201
202__cpuinit void spram_config(void)
203{
204 struct cpuinfo_mips *c = &current_cpu_data;
205 unsigned int config0;
206
207 switch (c->cputype) {
208 case CPU_24K:
209 case CPU_34K:
210 case CPU_74K:
211 config0 = read_c0_config();
212 /* FIXME: addresses are Malta specific */
213 if (config0 & (1<<24)) {
214 probe_spram("ISPRAM", 0x1c000000,
215 &ispram_load_tag, &ispram_store_tag);
216 }
217 if (config0 & (1<<23))
218 probe_spram("DSPRAM", 0x1c100000,
219 &dspram_load_tag, &dspram_store_tag);
220 }
221}
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000000..9021108eb9c1
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,159 @@
1/*
2 * Count register synchronisation.
3 *
4 * All CPUs will have their count registers synchronised to the CPU0 expirelo
5 * value. This can cause a small timewarp for CPU0. All other CPU's should
6 * not have done anything significant (but they may have had interrupts
7 * enabled briefly - prom_smp_finish() should not be responsible for enabling
8 * interrupts...)
9 *
10 * FIXME: broken for SMTC
11 */
12
13#include <linux/kernel.h>
14#include <linux/init.h>
15#include <linux/irqflags.h>
16#include <linux/r4k-timer.h>
17
18#include <asm/atomic.h>
19#include <asm/barrier.h>
20#include <asm/cpumask.h>
21#include <asm/mipsregs.h>
22
23static atomic_t __initdata count_start_flag = ATOMIC_INIT(0);
24static atomic_t __initdata count_count_start = ATOMIC_INIT(0);
25static atomic_t __initdata count_count_stop = ATOMIC_INIT(0);
26
27#define COUNTON 100
28#define NR_LOOPS 5
29
30void __init synchronise_count_master(void)
31{
32 int i;
33 unsigned long flags;
34 unsigned int initcount;
35 int nslaves;
36
37#ifdef CONFIG_MIPS_MT_SMTC
38 /*
39 * SMTC needs to synchronise per VPE, not per CPU
40 * ignore for now
41 */
42 return;
43#endif
44
45 pr_info("Checking COUNT synchronization across %u CPUs: ",
46 num_online_cpus());
47
48 local_irq_save(flags);
49
50 /*
51 * Notify the slaves that it's time to start
52 */
53 atomic_set(&count_start_flag, 1);
54 smp_wmb();
55
56 /* Count will be initialised to expirelo for all CPU's */
57 initcount = expirelo;
58
59 /*
60 * We loop a few times to get a primed instruction cache,
61 * then the last pass is more or less synchronised and
62 * the master and slaves each set their cycle counters to a known
63 * value all at once. This reduces the chance of having random offsets
64 * between the processors, and guarantees that the maximum
65 * delay between the cycle counters is never bigger than
66 * the latency of information-passing (cachelines) between
67 * two CPUs.
68 */
69
70 nslaves = num_online_cpus()-1;
71 for (i = 0; i < NR_LOOPS; i++) {
72 /* slaves loop on '!= ncpus' */
73 while (atomic_read(&count_count_start) != nslaves)
74 mb();
75 atomic_set(&count_count_stop, 0);
76 smp_wmb();
77
78 /* this lets the slaves write their count register */
79 atomic_inc(&count_count_start);
80
81 /*
82 * Everyone initialises count in the last loop:
83 */
84 if (i == NR_LOOPS-1)
85 write_c0_count(initcount);
86
87 /*
88 * Wait for all slaves to leave the synchronization point:
89 */
90 while (atomic_read(&count_count_stop) != nslaves)
91 mb();
92 atomic_set(&count_count_start, 0);
93 smp_wmb();
94 atomic_inc(&count_count_stop);
95 }
96 /* Arrange for an interrupt in a short while */
97 write_c0_compare(read_c0_count() + COUNTON);
98
99 local_irq_restore(flags);
100
101 /*
102 * i386 code reported the skew here, but the
103 * count registers were almost certainly out of sync
104 * so no point in alarming people
105 */
106 printk("done.\n");
107}
108
109void __init synchronise_count_slave(void)
110{
111 int i;
112 unsigned long flags;
113 unsigned int initcount;
114 int ncpus;
115
116#ifdef CONFIG_MIPS_MT_SMTC
117 /*
118 * SMTC needs to synchronise per VPE, not per CPU
119 * ignore for now
120 */
121 return;
122#endif
123
124 local_irq_save(flags);
125
126 /*
127 * Not every cpu is online at the time this gets called,
128 * so we first wait for the master to say everyone is ready
129 */
130
131 while (!atomic_read(&count_start_flag))
132 mb();
133
134 /* Count will be initialised to expirelo for all CPU's */
135 initcount = expirelo;
136
137 ncpus = num_online_cpus();
138 for (i = 0; i < NR_LOOPS; i++) {
139 atomic_inc(&count_count_start);
140 while (atomic_read(&count_count_start) != ncpus)
141 mb();
142
143 /*
144 * Everyone initialises count in the last loop:
145 */
146 if (i == NR_LOOPS-1)
147 write_c0_count(initcount);
148
149 atomic_inc(&count_count_stop);
150 while (atomic_read(&count_count_stop) != ncpus)
151 mb();
152 }
153 /* Arrange for an interrupt in a short while */
154 write_c0_compare(read_c0_count() + COUNTON);
155
156 local_irq_restore(flags);
157}
158#undef NR_LOOPS
159#endif
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index b45a7093ca2d..1f467d534642 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -38,7 +38,6 @@ int __weak rtc_mips_set_time(unsigned long sec)
38{ 38{
39 return 0; 39 return 0;
40} 40}
41EXPORT_SYMBOL(rtc_mips_set_time);
42 41
43int __weak rtc_mips_set_mmss(unsigned long nowtime) 42int __weak rtc_mips_set_mmss(unsigned long nowtime)
44{ 43{
@@ -50,13 +49,11 @@ int update_persistent_clock(struct timespec now)
50 return rtc_mips_set_mmss(now.tv_sec); 49 return rtc_mips_set_mmss(now.tv_sec);
51} 50}
52 51
53int null_perf_irq(void) 52static int null_perf_irq(void)
54{ 53{
55 return 0; 54 return 0;
56} 55}
57 56
58EXPORT_SYMBOL(null_perf_irq);
59
60int (*perf_irq)(void) = null_perf_irq; 57int (*perf_irq)(void) = null_perf_irq;
61 58
62EXPORT_SYMBOL(perf_irq); 59EXPORT_SYMBOL(perf_irq);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 984c0d0a7b4d..cb8b0e2c7954 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -22,6 +22,7 @@
22#include <linux/kallsyms.h> 22#include <linux/kallsyms.h>
23#include <linux/bootmem.h> 23#include <linux/bootmem.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/ptrace.h>
25 26
26#include <asm/bootinfo.h> 27#include <asm/bootinfo.h>
27#include <asm/branch.h> 28#include <asm/branch.h>
@@ -80,19 +81,22 @@ void (*board_bind_eic_interrupt)(int irq, int regset);
80 81
81static void show_raw_backtrace(unsigned long reg29) 82static void show_raw_backtrace(unsigned long reg29)
82{ 83{
83 unsigned long *sp = (unsigned long *)reg29; 84 unsigned long *sp = (unsigned long *)(reg29 & ~3);
84 unsigned long addr; 85 unsigned long addr;
85 86
86 printk("Call Trace:"); 87 printk("Call Trace:");
87#ifdef CONFIG_KALLSYMS 88#ifdef CONFIG_KALLSYMS
88 printk("\n"); 89 printk("\n");
89#endif 90#endif
90 while (!kstack_end(sp)) { 91#define IS_KVA01(a) ((((unsigned int)a) & 0xc0000000) == 0x80000000)
91 addr = *sp++; 92 if (IS_KVA01(sp)) {
92 if (__kernel_text_address(addr)) 93 while (!kstack_end(sp)) {
93 print_ip_sym(addr); 94 addr = *sp++;
95 if (__kernel_text_address(addr))
96 print_ip_sym(addr);
97 }
98 printk("\n");
94 } 99 }
95 printk("\n");
96} 100}
97 101
98#ifdef CONFIG_KALLSYMS 102#ifdef CONFIG_KALLSYMS
@@ -192,16 +196,19 @@ EXPORT_SYMBOL(dump_stack);
192static void show_code(unsigned int __user *pc) 196static void show_code(unsigned int __user *pc)
193{ 197{
194 long i; 198 long i;
199 unsigned short __user *pc16 = NULL;
195 200
196 printk("\nCode:"); 201 printk("\nCode:");
197 202
203 if ((unsigned long)pc & 1)
204 pc16 = (unsigned short __user *)((unsigned long)pc & ~1);
198 for(i = -3 ; i < 6 ; i++) { 205 for(i = -3 ; i < 6 ; i++) {
199 unsigned int insn; 206 unsigned int insn;
200 if (__get_user(insn, pc + i)) { 207 if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) {
201 printk(" (Bad address in epc)\n"); 208 printk(" (Bad address in epc)\n");
202 break; 209 break;
203 } 210 }
204 printk("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>')); 211 printk("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>'));
205 } 212 }
206} 213}
207 214
@@ -311,10 +318,21 @@ void show_regs(struct pt_regs *regs)
311 318
312void show_registers(const struct pt_regs *regs) 319void show_registers(const struct pt_regs *regs)
313{ 320{
321 const int field = 2 * sizeof(unsigned long);
322
314 __show_regs(regs); 323 __show_regs(regs);
315 print_modules(); 324 print_modules();
316 printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n", 325 printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
317 current->comm, task_pid_nr(current), current_thread_info(), current); 326 current->comm, current->pid, current_thread_info(), current,
327 field, current_thread_info()->tp_value);
328 if (cpu_has_userlocal) {
329 unsigned long tls;
330
331 tls = read_c0_userlocal();
332 if (tls != current_thread_info()->tp_value)
333 printk("*HwTLS: %0*lx\n", field, tls);
334 }
335
318 show_stacktrace(current, regs); 336 show_stacktrace(current, regs);
319 show_code((unsigned int __user *) regs->cp0_epc); 337 show_code((unsigned int __user *) regs->cp0_epc);
320 printk("\n"); 338 printk("\n");
@@ -657,35 +675,24 @@ asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
657 force_sig_info(SIGFPE, &info, current); 675 force_sig_info(SIGFPE, &info, current);
658} 676}
659 677
660asmlinkage void do_bp(struct pt_regs *regs) 678static void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
679 const char *str)
661{ 680{
662 unsigned int opcode, bcode;
663 siginfo_t info; 681 siginfo_t info;
664 682 char b[40];
665 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
666 goto out_sigsegv;
667
668 /*
669 * There is the ancient bug in the MIPS assemblers that the break
670 * code starts left to bit 16 instead to bit 6 in the opcode.
671 * Gas is bug-compatible, but not always, grrr...
672 * We handle both cases with a simple heuristics. --macro
673 */
674 bcode = ((opcode >> 6) & ((1 << 20) - 1));
675 if (bcode < (1 << 10))
676 bcode <<= 10;
677 683
678 /* 684 /*
679 * (A short test says that IRIX 5.3 sends SIGTRAP for all break 685 * A short test says that IRIX 5.3 sends SIGTRAP for all trap
680 * insns, even for break codes that indicate arithmetic failures. 686 * insns, even for trap and break codes that indicate arithmetic
681 * Weird ...) 687 * failures. Weird ...
682 * But should we continue the brokenness??? --macro 688 * But should we continue the brokenness??? --macro
683 */ 689 */
684 switch (bcode) { 690 switch (code) {
685 case BRK_OVERFLOW << 10: 691 case BRK_OVERFLOW:
686 case BRK_DIVZERO << 10: 692 case BRK_DIVZERO:
687 die_if_kernel("Break instruction in kernel code", regs); 693 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
688 if (bcode == (BRK_DIVZERO << 10)) 694 die_if_kernel(b, regs);
695 if (code == BRK_DIVZERO)
689 info.si_code = FPE_INTDIV; 696 info.si_code = FPE_INTDIV;
690 else 697 else
691 info.si_code = FPE_INTOVF; 698 info.si_code = FPE_INTOVF;
@@ -695,12 +702,34 @@ asmlinkage void do_bp(struct pt_regs *regs)
695 force_sig_info(SIGFPE, &info, current); 702 force_sig_info(SIGFPE, &info, current);
696 break; 703 break;
697 case BRK_BUG: 704 case BRK_BUG:
698 die("Kernel bug detected", regs); 705 die_if_kernel("Kernel bug detected", regs);
706 force_sig(SIGTRAP, current);
699 break; 707 break;
700 default: 708 default:
701 die_if_kernel("Break instruction in kernel code", regs); 709 scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
710 die_if_kernel(b, regs);
702 force_sig(SIGTRAP, current); 711 force_sig(SIGTRAP, current);
703 } 712 }
713}
714
715asmlinkage void do_bp(struct pt_regs *regs)
716{
717 unsigned int opcode, bcode;
718
719 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
720 goto out_sigsegv;
721
722 /*
723 * There is the ancient bug in the MIPS assemblers that the break
724 * code starts left to bit 16 instead to bit 6 in the opcode.
725 * Gas is bug-compatible, but not always, grrr...
726 * We handle both cases with a simple heuristics. --macro
727 */
728 bcode = ((opcode >> 6) & ((1 << 20) - 1));
729 if (bcode >= (1 << 10))
730 bcode >>= 10;
731
732 do_trap_or_bp(regs, bcode, "Break");
704 return; 733 return;
705 734
706out_sigsegv: 735out_sigsegv:
@@ -710,7 +739,6 @@ out_sigsegv:
710asmlinkage void do_tr(struct pt_regs *regs) 739asmlinkage void do_tr(struct pt_regs *regs)
711{ 740{
712 unsigned int opcode, tcode = 0; 741 unsigned int opcode, tcode = 0;
713 siginfo_t info;
714 742
715 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs))) 743 if (__get_user(opcode, (unsigned int __user *) exception_epc(regs)))
716 goto out_sigsegv; 744 goto out_sigsegv;
@@ -719,32 +747,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
719 if (!(opcode & OPCODE)) 747 if (!(opcode & OPCODE))
720 tcode = ((opcode >> 6) & ((1 << 10) - 1)); 748 tcode = ((opcode >> 6) & ((1 << 10) - 1));
721 749
722 /* 750 do_trap_or_bp(regs, tcode, "Trap");
723 * (A short test says that IRIX 5.3 sends SIGTRAP for all trap
724 * insns, even for trap codes that indicate arithmetic failures.
725 * Weird ...)
726 * But should we continue the brokenness??? --macro
727 */
728 switch (tcode) {
729 case BRK_OVERFLOW:
730 case BRK_DIVZERO:
731 die_if_kernel("Trap instruction in kernel code", regs);
732 if (tcode == BRK_DIVZERO)
733 info.si_code = FPE_INTDIV;
734 else
735 info.si_code = FPE_INTOVF;
736 info.si_signo = SIGFPE;
737 info.si_errno = 0;
738 info.si_addr = (void __user *) regs->cp0_epc;
739 force_sig_info(SIGFPE, &info, current);
740 break;
741 case BRK_BUG:
742 die("Kernel bug detected", regs);
743 break;
744 default:
745 die_if_kernel("Trap instruction in kernel code", regs);
746 force_sig(SIGTRAP, current);
747 }
748 return; 751 return;
749 752
750out_sigsegv: 753out_sigsegv:
@@ -985,6 +988,21 @@ asmlinkage void do_reserved(struct pt_regs *regs)
985 (regs->cp0_cause & 0x7f) >> 2); 988 (regs->cp0_cause & 0x7f) >> 2);
986} 989}
987 990
991static int __initdata l1parity = 1;
992static int __init nol1parity(char *s)
993{
994 l1parity = 0;
995 return 1;
996}
997__setup("nol1par", nol1parity);
998static int __initdata l2parity = 1;
999static int __init nol2parity(char *s)
1000{
1001 l2parity = 0;
1002 return 1;
1003}
1004__setup("nol2par", nol2parity);
1005
988/* 1006/*
989 * Some MIPS CPUs can enable/disable for cache parity detection, but do 1007 * Some MIPS CPUs can enable/disable for cache parity detection, but do
990 * it different ways. 1008 * it different ways.
@@ -994,6 +1012,62 @@ static inline void parity_protection_init(void)
994 switch (current_cpu_type()) { 1012 switch (current_cpu_type()) {
995 case CPU_24K: 1013 case CPU_24K:
996 case CPU_34K: 1014 case CPU_34K:
1015 case CPU_74K:
1016 case CPU_1004K:
1017 {
1018#define ERRCTL_PE 0x80000000
1019#define ERRCTL_L2P 0x00800000
1020 unsigned long errctl;
1021 unsigned int l1parity_present, l2parity_present;
1022
1023 errctl = read_c0_ecc();
1024 errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
1025
1026 /* probe L1 parity support */
1027 write_c0_ecc(errctl | ERRCTL_PE);
1028 back_to_back_c0_hazard();
1029 l1parity_present = (read_c0_ecc() & ERRCTL_PE);
1030
1031 /* probe L2 parity support */
1032 write_c0_ecc(errctl|ERRCTL_L2P);
1033 back_to_back_c0_hazard();
1034 l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
1035
1036 if (l1parity_present && l2parity_present) {
1037 if (l1parity)
1038 errctl |= ERRCTL_PE;
1039 if (l1parity ^ l2parity)
1040 errctl |= ERRCTL_L2P;
1041 } else if (l1parity_present) {
1042 if (l1parity)
1043 errctl |= ERRCTL_PE;
1044 } else if (l2parity_present) {
1045 if (l2parity)
1046 errctl |= ERRCTL_L2P;
1047 } else {
1048 /* No parity available */
1049 }
1050
1051 printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
1052
1053 write_c0_ecc(errctl);
1054 back_to_back_c0_hazard();
1055 errctl = read_c0_ecc();
1056 printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
1057
1058 if (l1parity_present)
1059 printk(KERN_INFO "Cache parity protection %sabled\n",
1060 (errctl & ERRCTL_PE) ? "en" : "dis");
1061
1062 if (l2parity_present) {
1063 if (l1parity_present && l1parity)
1064 errctl ^= ERRCTL_L2P;
1065 printk(KERN_INFO "L2 cache parity protection %sabled\n",
1066 (errctl & ERRCTL_L2P) ? "en" : "dis");
1067 }
1068 }
1069 break;
1070
997 case CPU_5KC: 1071 case CPU_5KC:
998 write_c0_ecc(0x80000000); 1072 write_c0_ecc(0x80000000);
999 back_to_back_c0_hazard(); 1073 back_to_back_c0_hazard();
@@ -1306,6 +1380,17 @@ int cp0_compare_irq;
1306int cp0_perfcount_irq; 1380int cp0_perfcount_irq;
1307EXPORT_SYMBOL_GPL(cp0_perfcount_irq); 1381EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
1308 1382
1383static int __cpuinitdata noulri;
1384
1385static int __init ulri_disable(char *s)
1386{
1387 pr_info("Disabling ulri\n");
1388 noulri = 1;
1389
1390 return 1;
1391}
1392__setup("noulri", ulri_disable);
1393
1309void __cpuinit per_cpu_trap_init(void) 1394void __cpuinit per_cpu_trap_init(void)
1310{ 1395{
1311 unsigned int cpu = smp_processor_id(); 1396 unsigned int cpu = smp_processor_id();
@@ -1342,16 +1427,14 @@ void __cpuinit per_cpu_trap_init(void)
1342 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, 1427 change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
1343 status_set); 1428 status_set);
1344 1429
1345#ifdef CONFIG_CPU_MIPSR2
1346 if (cpu_has_mips_r2) { 1430 if (cpu_has_mips_r2) {
1347 unsigned int enable = 0x0000000f; 1431 unsigned int enable = 0x0000000f;
1348 1432
1349 if (cpu_has_userlocal) 1433 if (!noulri && cpu_has_userlocal)
1350 enable |= (1 << 29); 1434 enable |= (1 << 29);
1351 1435
1352 write_c0_hwrena(enable); 1436 write_c0_hwrena(enable);
1353 } 1437 }
1354#endif
1355 1438
1356#ifdef CONFIG_MIPS_MT_SMTC 1439#ifdef CONFIG_MIPS_MT_SMTC
1357 if (!secondaryTC) { 1440 if (!secondaryTC) {