aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips
diff options
context:
space:
mode:
authorRussell King <rmk@dyn-67.arm.linux.org.uk>2008-10-14 17:24:51 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2008-10-14 17:24:51 -0400
commit7e69a8c4d06b7ecb874f571e82b715a9f79bc3c4 (patch)
tree0248fb8f7a3e445cc3c744252abeecabb9205c05 /arch/mips
parentb6825d2df55aa7d7341c715b577b73a6a03dc944 (diff)
parentd5120ae72a066b18f98e0c45ce73262f58030851 (diff)
Merge branch 's3c-move' into devel
Conflicts: arch/arm/mach-versatile/core.c
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig53
-rw-r--r--arch/mips/au1000/common/gpio.c6
-rw-r--r--arch/mips/kernel/Makefile1
-rw-r--r--arch/mips/kernel/cevt-r4k.c173
-rw-r--r--arch/mips/kernel/cevt-smtc.c321
-rw-r--r--arch/mips/kernel/cpu-probe.c26
-rw-r--r--arch/mips/kernel/entry.S10
-rw-r--r--arch/mips/kernel/genex.S41
-rw-r--r--arch/mips/kernel/head.S1
-rw-r--r--arch/mips/kernel/kgdb.c3
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c2
-rw-r--r--arch/mips/kernel/process.c19
-rw-r--r--arch/mips/kernel/ptrace.c2
-rw-r--r--arch/mips/kernel/smtc.c260
-rw-r--r--arch/mips/kernel/traps.c28
-rw-r--r--arch/mips/kernel/vmlinux.lds.S1
-rw-r--r--arch/mips/lib/csum_partial.S21
-rw-r--r--arch/mips/mti-malta/Makefile2
-rw-r--r--arch/mips/mti-malta/malta-smtc.c9
-rw-r--r--arch/mips/pci/Makefile1
-rw-r--r--arch/mips/pci/pci-bcm47xx.c60
-rw-r--r--arch/mips/pci/pci-ip27.c40
-rw-r--r--arch/mips/sibyte/swarm/Makefile3
-rw-r--r--arch/mips/sibyte/swarm/platform.c81
24 files changed, 819 insertions, 345 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 49896a2a1d72..1e06d233fa83 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -211,6 +211,7 @@ config MIPS_MALTA
211 select SYS_SUPPORTS_64BIT_KERNEL 211 select SYS_SUPPORTS_64BIT_KERNEL
212 select SYS_SUPPORTS_BIG_ENDIAN 212 select SYS_SUPPORTS_BIG_ENDIAN
213 select SYS_SUPPORTS_LITTLE_ENDIAN 213 select SYS_SUPPORTS_LITTLE_ENDIAN
214 select SYS_SUPPORTS_MIPS_CMP if BROKEN # because SYNC_R4K is broken
214 select SYS_SUPPORTS_MULTITHREADING 215 select SYS_SUPPORTS_MULTITHREADING
215 select SYS_SUPPORTS_SMARTMIPS 216 select SYS_SUPPORTS_SMARTMIPS
216 help 217 help
@@ -1403,7 +1404,6 @@ config MIPS_MT_SMTC
1403 depends on CPU_MIPS32_R2 1404 depends on CPU_MIPS32_R2
1404 #depends on CPU_MIPS64_R2 # once there is hardware ... 1405 #depends on CPU_MIPS64_R2 # once there is hardware ...
1405 depends on SYS_SUPPORTS_MULTITHREADING 1406 depends on SYS_SUPPORTS_MULTITHREADING
1406 select GENERIC_CLOCKEVENTS_BROADCAST
1407 select CPU_MIPSR2_IRQ_VI 1407 select CPU_MIPSR2_IRQ_VI
1408 select CPU_MIPSR2_IRQ_EI 1408 select CPU_MIPSR2_IRQ_EI
1409 select MIPS_MT 1409 select MIPS_MT
@@ -1451,32 +1451,17 @@ config MIPS_VPE_LOADER
1451 Includes a loader for loading an elf relocatable object 1451 Includes a loader for loading an elf relocatable object
1452 onto another VPE and running it. 1452 onto another VPE and running it.
1453 1453
1454config MIPS_MT_SMTC_INSTANT_REPLAY
1455 bool "Low-latency Dispatch of Deferred SMTC IPIs"
1456 depends on MIPS_MT_SMTC && !PREEMPT
1457 default y
1458 help
1459 SMTC pseudo-interrupts between TCs are deferred and queued
1460 if the target TC is interrupt-inhibited (IXMT). In the first
1461 SMTC prototypes, these queued IPIs were serviced on return
1462 to user mode, or on entry into the kernel idle loop. The
1463 INSTANT_REPLAY option dispatches them as part of local_irq_restore()
1464 processing, which adds runtime overhead (hence the option to turn
1465 it off), but ensures that IPIs are handled promptly even under
1466 heavy I/O interrupt load.
1467
1468config MIPS_MT_SMTC_IM_BACKSTOP 1454config MIPS_MT_SMTC_IM_BACKSTOP
1469 bool "Use per-TC register bits as backstop for inhibited IM bits" 1455 bool "Use per-TC register bits as backstop for inhibited IM bits"
1470 depends on MIPS_MT_SMTC 1456 depends on MIPS_MT_SMTC
1471 default y 1457 default n
1472 help 1458 help
1473 To support multiple TC microthreads acting as "CPUs" within 1459 To support multiple TC microthreads acting as "CPUs" within
1474 a VPE, VPE-wide interrupt mask bits must be specially manipulated 1460 a VPE, VPE-wide interrupt mask bits must be specially manipulated
1475 during interrupt handling. To support legacy drivers and interrupt 1461 during interrupt handling. To support legacy drivers and interrupt
1476 controller management code, SMTC has a "backstop" to track and 1462 controller management code, SMTC has a "backstop" to track and
1477 if necessary restore the interrupt mask. This has some performance 1463 if necessary restore the interrupt mask. This has some performance
1478 impact on interrupt service overhead. Disable it only if you know 1464 impact on interrupt service overhead.
1479 what you are doing.
1480 1465
1481config MIPS_MT_SMTC_IRQAFF 1466config MIPS_MT_SMTC_IRQAFF
1482 bool "Support IRQ affinity API" 1467 bool "Support IRQ affinity API"
@@ -1486,10 +1471,8 @@ config MIPS_MT_SMTC_IRQAFF
1486 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) 1471 Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
1487 for SMTC Linux kernel. Requires platform support, of which 1472 for SMTC Linux kernel. Requires platform support, of which
1488 an example can be found in the MIPS kernel i8259 and Malta 1473 an example can be found in the MIPS kernel i8259 and Malta
1489 platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY 1474 platform code. Adds some overhead to interrupt dispatch, and
1490 be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to 1475 should be used only if you know what you are doing.
1491 interrupt dispatch, and should be used only if you know what
1492 you are doing.
1493 1476
1494config MIPS_VPE_LOADER_TOM 1477config MIPS_VPE_LOADER_TOM
1495 bool "Load VPE program into memory hidden from linux" 1478 bool "Load VPE program into memory hidden from linux"
@@ -1517,6 +1500,18 @@ config MIPS_APSP_KSPD
1517 "exit" syscall notifying other kernel modules the SP program is 1500 "exit" syscall notifying other kernel modules the SP program is
1518 exiting. You probably want to say yes here. 1501 exiting. You probably want to say yes here.
1519 1502
1503config MIPS_CMP
1504 bool "MIPS CMP framework support"
1505 depends on SYS_SUPPORTS_MIPS_CMP
1506 select SYNC_R4K if BROKEN
1507 select SYS_SUPPORTS_SMP
1508 select SYS_SUPPORTS_SCHED_SMT if SMP
1509 select WEAK_ORDERING
1510 default n
1511 help
1512 This is a placeholder option for the GCMP work. It will need to
1513 be handled differently...
1514
1520config SB1_PASS_1_WORKAROUNDS 1515config SB1_PASS_1_WORKAROUNDS
1521 bool 1516 bool
1522 depends on CPU_SB1_PASS_1 1517 depends on CPU_SB1_PASS_1
@@ -1693,6 +1688,9 @@ config SMP
1693config SMP_UP 1688config SMP_UP
1694 bool 1689 bool
1695 1690
1691config SYS_SUPPORTS_MIPS_CMP
1692 bool
1693
1696config SYS_SUPPORTS_SMP 1694config SYS_SUPPORTS_SMP
1697 bool 1695 bool
1698 1696
@@ -1740,17 +1738,6 @@ config NR_CPUS
1740 performance should round up your number of processors to the next 1738 performance should round up your number of processors to the next
1741 power of two. 1739 power of two.
1742 1740
1743config MIPS_CMP
1744 bool "MIPS CMP framework support"
1745 depends on SMP
1746 select SYNC_R4K
1747 select SYS_SUPPORTS_SCHED_SMT
1748 select WEAK_ORDERING
1749 default n
1750 help
1751 This is a placeholder option for the GCMP work. It will need to
1752 be handled differently...
1753
1754source "kernel/time/Kconfig" 1741source "kernel/time/Kconfig"
1755 1742
1756# 1743#
diff --git a/arch/mips/au1000/common/gpio.c b/arch/mips/au1000/common/gpio.c
index b485d94ce8a5..e660ddd611c4 100644
--- a/arch/mips/au1000/common/gpio.c
+++ b/arch/mips/au1000/common/gpio.c
@@ -48,7 +48,7 @@ static void au1xxx_gpio2_write(unsigned gpio, int value)
48{ 48{
49 gpio -= AU1XXX_GPIO_BASE; 49 gpio -= AU1XXX_GPIO_BASE;
50 50
51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | (value << gpio); 51 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
52} 52}
53 53
54static int au1xxx_gpio2_direction_input(unsigned gpio) 54static int au1xxx_gpio2_direction_input(unsigned gpio)
@@ -61,7 +61,8 @@ static int au1xxx_gpio2_direction_input(unsigned gpio)
61static int au1xxx_gpio2_direction_output(unsigned gpio, int value) 61static int au1xxx_gpio2_direction_output(unsigned gpio, int value)
62{ 62{
63 gpio -= AU1XXX_GPIO_BASE; 63 gpio -= AU1XXX_GPIO_BASE;
64 gpio2->dir = (0x01 << gpio) | (value << gpio); 64 gpio2->dir |= 0x01 << gpio;
65 gpio2->output = (GPIO2_OUTPUT_ENABLE_MASK << gpio) | ((!!value) << gpio);
65 return 0; 66 return 0;
66} 67}
67 68
@@ -90,6 +91,7 @@ static int au1xxx_gpio1_direction_input(unsigned gpio)
90static int au1xxx_gpio1_direction_output(unsigned gpio, int value) 91static int au1xxx_gpio1_direction_output(unsigned gpio, int value)
91{ 92{
92 gpio1->trioutclr = (0x01 & gpio); 93 gpio1->trioutclr = (0x01 & gpio);
94 au1xxx_gpio1_write(gpio, value);
93 return 0; 95 return 0;
94} 96}
95 97
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 706f93974797..25775cb54000 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \
10 10
11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 11obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 12obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
13obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
13obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 14obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
14obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 15obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
15obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 16obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 24a2d907aa0d..4a4c59f2737a 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -12,6 +12,14 @@
12 12
13#include <asm/smtc_ipi.h> 13#include <asm/smtc_ipi.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/cevt-r4k.h>
16
17/*
18 * The SMTC Kernel for the 34K, 1004K, et. al. replaces several
19 * of these routines with SMTC-specific variants.
20 */
21
22#ifndef CONFIG_MIPS_MT_SMTC
15 23
16static int mips_next_event(unsigned long delta, 24static int mips_next_event(unsigned long delta,
17 struct clock_event_device *evt) 25 struct clock_event_device *evt)
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta,
19 unsigned int cnt; 27 unsigned int cnt;
20 int res; 28 int res;
21 29
22#ifdef CONFIG_MIPS_MT_SMTC
23 {
24 unsigned long flags, vpflags;
25 local_irq_save(flags);
26 vpflags = dvpe();
27#endif
28 cnt = read_c0_count(); 30 cnt = read_c0_count();
29 cnt += delta; 31 cnt += delta;
30 write_c0_compare(cnt); 32 write_c0_compare(cnt);
31 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; 33 res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0;
32#ifdef CONFIG_MIPS_MT_SMTC
33 evpe(vpflags);
34 local_irq_restore(flags);
35 }
36#endif
37 return res; 34 return res;
38} 35}
39 36
40static void mips_set_mode(enum clock_event_mode mode, 37#endif /* CONFIG_MIPS_MT_SMTC */
41 struct clock_event_device *evt) 38
39void mips_set_clock_mode(enum clock_event_mode mode,
40 struct clock_event_device *evt)
42{ 41{
43 /* Nothing to do ... */ 42 /* Nothing to do ... */
44} 43}
45 44
46static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); 45DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
47static int cp0_timer_irq_installed; 46int cp0_timer_irq_installed;
48 47
49/* 48#ifndef CONFIG_MIPS_MT_SMTC
50 * Timer ack for an R4k-compatible timer of a known frequency.
51 */
52static void c0_timer_ack(void)
53{
54 write_c0_compare(read_c0_compare());
55}
56 49
57/* 50irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
58 * Possibly handle a performance counter interrupt.
59 * Return true if the timer interrupt should not be checked
60 */
61static inline int handle_perf_irq(int r2)
62{
63 /*
64 * The performance counter overflow interrupt may be shared with the
65 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
66 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
67 * and we can't reliably determine if a counter interrupt has also
68 * happened (!r2) then don't check for a timer interrupt.
69 */
70 return (cp0_perfcount_irq < 0) &&
71 perf_irq() == IRQ_HANDLED &&
72 !r2;
73}
74
75static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
76{ 51{
77 const int r2 = cpu_has_mips_r2; 52 const int r2 = cpu_has_mips_r2;
78 struct clock_event_device *cd; 53 struct clock_event_device *cd;
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
93 * interrupt. Being the paranoiacs we are we check anyway. 68 * interrupt. Being the paranoiacs we are we check anyway.
94 */ 69 */
95 if (!r2 || (read_c0_cause() & (1 << 30))) { 70 if (!r2 || (read_c0_cause() & (1 << 30))) {
96 c0_timer_ack(); 71 /* Clear Count/Compare Interrupt */
97#ifdef CONFIG_MIPS_MT_SMTC 72 write_c0_compare(read_c0_compare());
98 if (cpu_data[cpu].vpe_id)
99 goto out;
100 cpu = 0;
101#endif
102 cd = &per_cpu(mips_clockevent_device, cpu); 73 cd = &per_cpu(mips_clockevent_device, cpu);
103 cd->event_handler(cd); 74 cd->event_handler(cd);
104 } 75 }
@@ -107,65 +78,16 @@ out:
107 return IRQ_HANDLED; 78 return IRQ_HANDLED;
108} 79}
109 80
110static struct irqaction c0_compare_irqaction = { 81#endif /* Not CONFIG_MIPS_MT_SMTC */
82
83struct irqaction c0_compare_irqaction = {
111 .handler = c0_compare_interrupt, 84 .handler = c0_compare_interrupt,
112#ifdef CONFIG_MIPS_MT_SMTC
113 .flags = IRQF_DISABLED,
114#else
115 .flags = IRQF_DISABLED | IRQF_PERCPU, 85 .flags = IRQF_DISABLED | IRQF_PERCPU,
116#endif
117 .name = "timer", 86 .name = "timer",
118}; 87};
119 88
120#ifdef CONFIG_MIPS_MT_SMTC
121DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
122
123static void smtc_set_mode(enum clock_event_mode mode,
124 struct clock_event_device *evt)
125{
126}
127
128static void mips_broadcast(cpumask_t mask)
129{
130 unsigned int cpu;
131
132 for_each_cpu_mask(cpu, mask)
133 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
134}
135
136static void setup_smtc_dummy_clockevent_device(void)
137{
138 //uint64_t mips_freq = mips_hpt_^frequency;
139 unsigned int cpu = smp_processor_id();
140 struct clock_event_device *cd;
141 89
142 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 90void mips_event_handler(struct clock_event_device *dev)
143
144 cd->name = "SMTC";
145 cd->features = CLOCK_EVT_FEAT_DUMMY;
146
147 /* Calculate the min / max delta */
148 cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
149 cd->shift = 0; //32;
150 cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd);
151 cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd);
152
153 cd->rating = 200;
154 cd->irq = 17; //-1;
155// if (cpu)
156// cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu);
157// else
158 cd->cpumask = cpumask_of_cpu(cpu);
159
160 cd->set_mode = smtc_set_mode;
161
162 cd->broadcast = mips_broadcast;
163
164 clockevents_register_device(cd);
165}
166#endif
167
168static void mips_event_handler(struct clock_event_device *dev)
169{ 91{
170} 92}
171 93
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void)
177 return (read_c0_cause() >> cp0_compare_irq) & 0x100; 99 return (read_c0_cause() >> cp0_compare_irq) & 0x100;
178} 100}
179 101
180static int c0_compare_int_usable(void) 102/*
103 * Compare interrupt can be routed and latched outside the core,
104 * so a single execution hazard barrier may not be enough to give
105 * it time to clear as seen in the Cause register. 4 time the
106 * pipeline depth seems reasonably conservative, and empirically
107 * works better in configurations with high CPU/bus clock ratios.
108 */
109
110#define compare_change_hazard() \
111 do { \
112 irq_disable_hazard(); \
113 irq_disable_hazard(); \
114 irq_disable_hazard(); \
115 irq_disable_hazard(); \
116 } while (0)
117
118int c0_compare_int_usable(void)
181{ 119{
182 unsigned int delta; 120 unsigned int delta;
183 unsigned int cnt; 121 unsigned int cnt;
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void)
187 */ 125 */
188 if (c0_compare_int_pending()) { 126 if (c0_compare_int_pending()) {
189 write_c0_compare(read_c0_count()); 127 write_c0_compare(read_c0_count());
190 irq_disable_hazard(); 128 compare_change_hazard();
191 if (c0_compare_int_pending()) 129 if (c0_compare_int_pending())
192 return 0; 130 return 0;
193 } 131 }
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void)
196 cnt = read_c0_count(); 134 cnt = read_c0_count();
197 cnt += delta; 135 cnt += delta;
198 write_c0_compare(cnt); 136 write_c0_compare(cnt);
199 irq_disable_hazard(); 137 compare_change_hazard();
200 if ((int)(read_c0_count() - cnt) < 0) 138 if ((int)(read_c0_count() - cnt) < 0)
201 break; 139 break;
202 /* increase delta if the timer was already expired */ 140 /* increase delta if the timer was already expired */
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void)
205 while ((int)(read_c0_count() - cnt) <= 0) 143 while ((int)(read_c0_count() - cnt) <= 0)
206 ; /* Wait for expiry */ 144 ; /* Wait for expiry */
207 145
146 compare_change_hazard();
208 if (!c0_compare_int_pending()) 147 if (!c0_compare_int_pending())
209 return 0; 148 return 0;
210 149
211 write_c0_compare(read_c0_count()); 150 write_c0_compare(read_c0_count());
212 irq_disable_hazard(); 151 compare_change_hazard();
213 if (c0_compare_int_pending()) 152 if (c0_compare_int_pending())
214 return 0; 153 return 0;
215 154
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void)
219 return 1; 158 return 1;
220} 159}
221 160
161#ifndef CONFIG_MIPS_MT_SMTC
162
222int __cpuinit mips_clockevent_init(void) 163int __cpuinit mips_clockevent_init(void)
223{ 164{
224 uint64_t mips_freq = mips_hpt_frequency; 165 uint64_t mips_freq = mips_hpt_frequency;
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void)
229 if (!cpu_has_counter || !mips_hpt_frequency) 170 if (!cpu_has_counter || !mips_hpt_frequency)
230 return -ENXIO; 171 return -ENXIO;
231 172
232#ifdef CONFIG_MIPS_MT_SMTC
233 setup_smtc_dummy_clockevent_device();
234
235 /*
236 * On SMTC we only register VPE0's compare interrupt as clockevent
237 * device.
238 */
239 if (cpu)
240 return 0;
241#endif
242
243 if (!c0_compare_int_usable()) 173 if (!c0_compare_int_usable())
244 return -ENXIO; 174 return -ENXIO;
245 175
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void)
265 195
266 cd->rating = 300; 196 cd->rating = 300;
267 cd->irq = irq; 197 cd->irq = irq;
268#ifdef CONFIG_MIPS_MT_SMTC
269 cd->cpumask = CPU_MASK_ALL;
270#else
271 cd->cpumask = cpumask_of_cpu(cpu); 198 cd->cpumask = cpumask_of_cpu(cpu);
272#endif
273 cd->set_next_event = mips_next_event; 199 cd->set_next_event = mips_next_event;
274 cd->set_mode = mips_set_mode; 200 cd->set_mode = mips_set_clock_mode;
275 cd->event_handler = mips_event_handler; 201 cd->event_handler = mips_event_handler;
276 202
277 clockevents_register_device(cd); 203 clockevents_register_device(cd);
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void)
281 207
282 cp0_timer_irq_installed = 1; 208 cp0_timer_irq_installed = 1;
283 209
284#ifdef CONFIG_MIPS_MT_SMTC
285#define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq)
286 setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT);
287#else
288 setup_irq(irq, &c0_compare_irqaction); 210 setup_irq(irq, &c0_compare_irqaction);
289#endif
290 211
291 return 0; 212 return 0;
292} 213}
214
215#endif /* Not CONFIG_MIPS_MT_SMTC */
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c
new file mode 100644
index 000000000000..5162fe4b5952
--- /dev/null
+++ b/arch/mips/kernel/cevt-smtc.c
@@ -0,0 +1,321 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2007 MIPS Technologies, Inc.
7 * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
8 * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
9 */
10#include <linux/clockchips.h>
11#include <linux/interrupt.h>
12#include <linux/percpu.h>
13
14#include <asm/smtc_ipi.h>
15#include <asm/time.h>
16#include <asm/cevt-r4k.h>
17
18/*
19 * Variant clock event timer support for SMTC on MIPS 34K, 1004K
20 * or other MIPS MT cores.
21 *
22 * Notes on SMTC Support:
23 *
24 * SMTC has multiple microthread TCs pretending to be Linux CPUs.
25 * But there's only one Count/Compare pair per VPE, and Compare
26 * interrupts are taken opportunisitically by available TCs
27 * bound to the VPE with the Count register. The new timer
28 * framework provides for global broadcasts, but we really
29 * want VPE-level multicasts for best behavior. So instead
30 * of invoking the high-level clock-event broadcast code,
31 * this version of SMTC support uses the historical SMTC
32 * multicast mechanisms "under the hood", appearing to the
33 * generic clock layer as if the interrupts are per-CPU.
34 *
35 * The approach taken here is to maintain a set of NR_CPUS
36 * virtual timers, and track which "CPU" needs to be alerted
37 * at each event.
38 *
39 * It's unlikely that we'll see a MIPS MT core with more than
40 * 2 VPEs, but we *know* that we won't need to handle more
41 * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
42 * is always going to be overkill, but always going to be enough.
43 */
44
45unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
46static int smtc_nextinvpe[NR_CPUS];
47
48/*
49 * Timestamps stored are absolute values to be programmed
50 * into Count register. Valid timestamps will never be zero.
51 * If a Zero Count value is actually calculated, it is converted
52 * to be a 1, which will introduce 1 or two CPU cycles of error
53 * roughly once every four billion events, which at 1000 HZ means
54 * about once every 50 days. If that's actually a problem, one
55 * could alternate squashing 0 to 1 and to -1.
56 */
57
58#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
59#define ISVALID(x) ((x) != 0L)
60
61/*
62 * Time comparison is subtle, as it's really truncated
63 * modular arithmetic.
64 */
65
66#define IS_SOONER(a, b, reference) \
67 (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
68
69/*
70 * CATCHUP_INCREMENT, used when the function falls behind the counter.
71 * Could be an increasing function instead of a constant;
72 */
73
74#define CATCHUP_INCREMENT 64
75
76static int mips_next_event(unsigned long delta,
77 struct clock_event_device *evt)
78{
79 unsigned long flags;
80 unsigned int mtflags;
81 unsigned long timestamp, reference, previous;
82 unsigned long nextcomp = 0L;
83 int vpe = current_cpu_data.vpe_id;
84 int cpu = smp_processor_id();
85 local_irq_save(flags);
86 mtflags = dmt();
87
88 /*
89 * Maintain the per-TC virtual timer
90 * and program the per-VPE shared Count register
91 * as appropriate here...
92 */
93 reference = (unsigned long)read_c0_count();
94 timestamp = MAKEVALID(reference + delta);
95 /*
96 * To really model the clock, we have to catch the case
97 * where the current next-in-VPE timestamp is the old
98 * timestamp for the calling CPE, but the new value is
99 * in fact later. In that case, we have to do a full
100 * scan and discover the new next-in-VPE CPU id and
101 * timestamp.
102 */
103 previous = smtc_nexttime[vpe][cpu];
104 if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
105 && IS_SOONER(previous, timestamp, reference)) {
106 int i;
107 int soonest = cpu;
108
109 /*
110 * Update timestamp array here, so that new
111 * value gets considered along with those of
112 * other virtual CPUs on the VPE.
113 */
114 smtc_nexttime[vpe][cpu] = timestamp;
115 for_each_online_cpu(i) {
116 if (ISVALID(smtc_nexttime[vpe][i])
117 && IS_SOONER(smtc_nexttime[vpe][i],
118 smtc_nexttime[vpe][soonest], reference)) {
119 soonest = i;
120 }
121 }
122 smtc_nextinvpe[vpe] = soonest;
123 nextcomp = smtc_nexttime[vpe][soonest];
124 /*
125 * Otherwise, we don't have to process the whole array rank,
126 * we just have to see if the event horizon has gotten closer.
127 */
128 } else {
129 if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
130 IS_SOONER(timestamp,
131 smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
132 smtc_nextinvpe[vpe] = cpu;
133 nextcomp = timestamp;
134 }
135 /*
136 * Since next-in-VPE may me the same as the executing
137 * virtual CPU, we update the array *after* checking
138 * its value.
139 */
140 smtc_nexttime[vpe][cpu] = timestamp;
141 }
142
143 /*
144 * It may be that, in fact, we don't need to update Compare,
145 * but if we do, we want to make sure we didn't fall into
146 * a crack just behind Count.
147 */
148 if (ISVALID(nextcomp)) {
149 write_c0_compare(nextcomp);
150 ehb();
151 /*
152 * We never return an error, we just make sure
153 * that we trigger the handlers as quickly as
154 * we can if we fell behind.
155 */
156 while ((nextcomp - (unsigned long)read_c0_count())
157 > (unsigned long)LONG_MAX) {
158 nextcomp += CATCHUP_INCREMENT;
159 write_c0_compare(nextcomp);
160 ehb();
161 }
162 }
163 emt(mtflags);
164 local_irq_restore(flags);
165 return 0;
166}
167
168
169void smtc_distribute_timer(int vpe)
170{
171 unsigned long flags;
172 unsigned int mtflags;
173 int cpu;
174 struct clock_event_device *cd;
175 unsigned long nextstamp = 0L;
176 unsigned long reference;
177
178
179repeat:
180 for_each_online_cpu(cpu) {
181 /*
182 * Find virtual CPUs within the current VPE who have
183 * unserviced timer requests whose time is now past.
184 */
185 local_irq_save(flags);
186 mtflags = dmt();
187 if (cpu_data[cpu].vpe_id == vpe &&
188 ISVALID(smtc_nexttime[vpe][cpu])) {
189 reference = (unsigned long)read_c0_count();
190 if ((smtc_nexttime[vpe][cpu] - reference)
191 > (unsigned long)LONG_MAX) {
192 smtc_nexttime[vpe][cpu] = 0L;
193 emt(mtflags);
194 local_irq_restore(flags);
195 /*
196 * We don't send IPIs to ourself.
197 */
198 if (cpu != smp_processor_id()) {
199 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
200 } else {
201 cd = &per_cpu(mips_clockevent_device, cpu);
202 cd->event_handler(cd);
203 }
204 } else {
205 /* Local to VPE but Valid Time not yet reached. */
206 if (!ISVALID(nextstamp) ||
207 IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
208 reference)) {
209 smtc_nextinvpe[vpe] = cpu;
210 nextstamp = smtc_nexttime[vpe][cpu];
211 }
212 emt(mtflags);
213 local_irq_restore(flags);
214 }
215 } else {
216 emt(mtflags);
217 local_irq_restore(flags);
218
219 }
220 }
221 /* Reprogram for interrupt at next soonest timestamp for VPE */
222 if (ISVALID(nextstamp)) {
223 write_c0_compare(nextstamp);
224 ehb();
225 if ((nextstamp - (unsigned long)read_c0_count())
226 > (unsigned long)LONG_MAX)
227 goto repeat;
228 }
229}
230
231
232irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
233{
234 int cpu = smp_processor_id();
235
236 /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
237 handle_perf_irq(1);
238
239 if (read_c0_cause() & (1 << 30)) {
240 /* Clear Count/Compare Interrupt */
241 write_c0_compare(read_c0_compare());
242 smtc_distribute_timer(cpu_data[cpu].vpe_id);
243 }
244 return IRQ_HANDLED;
245}
246
247
248int __cpuinit mips_clockevent_init(void)
249{
250 uint64_t mips_freq = mips_hpt_frequency;
251 unsigned int cpu = smp_processor_id();
252 struct clock_event_device *cd;
253 unsigned int irq;
254 int i;
255 int j;
256
257 if (!cpu_has_counter || !mips_hpt_frequency)
258 return -ENXIO;
259 if (cpu == 0) {
260 for (i = 0; i < num_possible_cpus(); i++) {
261 smtc_nextinvpe[i] = 0;
262 for (j = 0; j < num_possible_cpus(); j++)
263 smtc_nexttime[i][j] = 0L;
264 }
265 /*
266 * SMTC also can't have the usablility test
267 * run by secondary TCs once Compare is in use.
268 */
269 if (!c0_compare_int_usable())
270 return -ENXIO;
271 }
272
273 /*
274 * With vectored interrupts things are getting platform specific.
275 * get_c0_compare_int is a hook to allow a platform to return the
276 * interrupt number of it's liking.
277 */
278 irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
279 if (get_c0_compare_int)
280 irq = get_c0_compare_int();
281
282 cd = &per_cpu(mips_clockevent_device, cpu);
283
284 cd->name = "MIPS";
285 cd->features = CLOCK_EVT_FEAT_ONESHOT;
286
287 /* Calculate the min / max delta */
288 cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
289 cd->shift = 32;
290 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
291 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
292
293 cd->rating = 300;
294 cd->irq = irq;
295 cd->cpumask = cpumask_of_cpu(cpu);
296 cd->set_next_event = mips_next_event;
297 cd->set_mode = mips_set_clock_mode;
298 cd->event_handler = mips_event_handler;
299
300 clockevents_register_device(cd);
301
302 /*
303 * On SMTC we only want to do the data structure
304 * initialization and IRQ setup once.
305 */
306 if (cpu)
307 return 0;
308 /*
309 * And we need the hwmask associated with the c0_compare
310 * vector to be initialized.
311 */
312 irq_hwmask[irq] = (0x100 << cp0_compare_irq);
313 if (cp0_timer_irq_installed)
314 return 0;
315
316 cp0_timer_irq_installed = 1;
317
318 setup_irq(irq, &c0_compare_irqaction);
319
320 return 0;
321}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 335a6ae3d594..e621fda8ab37 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -45,18 +45,7 @@ static void r39xx_wait(void)
45 local_irq_enable(); 45 local_irq_enable();
46} 46}
47 47
48/* 48extern void r4k_wait(void);
49 * There is a race when WAIT instruction executed with interrupt
50 * enabled.
51 * But it is implementation-dependent wheter the pipelie restarts when
52 * a non-enabled interrupt is requested.
53 */
54static void r4k_wait(void)
55{
56 __asm__(" .set mips3 \n"
57 " wait \n"
58 " .set mips0 \n");
59}
60 49
61/* 50/*
62 * This variant is preferable as it allows testing need_resched and going to 51 * This variant is preferable as it allows testing need_resched and going to
@@ -65,14 +54,18 @@ static void r4k_wait(void)
65 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes 54 * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
66 * using this version a gamble. 55 * using this version a gamble.
67 */ 56 */
68static void r4k_wait_irqoff(void) 57void r4k_wait_irqoff(void)
69{ 58{
70 local_irq_disable(); 59 local_irq_disable();
71 if (!need_resched()) 60 if (!need_resched())
72 __asm__(" .set mips3 \n" 61 __asm__(" .set push \n"
62 " .set mips3 \n"
73 " wait \n" 63 " wait \n"
74 " .set mips0 \n"); 64 " .set pop \n");
75 local_irq_enable(); 65 local_irq_enable();
66 __asm__(" .globl __pastwait \n"
67 "__pastwait: \n");
68 return;
76} 69}
77 70
78/* 71/*
@@ -128,7 +121,7 @@ static int __init wait_disable(char *s)
128 121
129__setup("nowait", wait_disable); 122__setup("nowait", wait_disable);
130 123
131static inline void check_wait(void) 124void __init check_wait(void)
132{ 125{
133 struct cpuinfo_mips *c = &current_cpu_data; 126 struct cpuinfo_mips *c = &current_cpu_data;
134 127
@@ -242,7 +235,6 @@ static inline void check_errata(void)
242 235
243void __init check_bugs32(void) 236void __init check_bugs32(void)
244{ 237{
245 check_wait();
246 check_errata(); 238 check_errata();
247} 239}
248 240
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index e29598ae939d..ffa331029e08 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit)
79 79
80FEXPORT(restore_all) # restore full frame 80FEXPORT(restore_all) # restore full frame
81#ifdef CONFIG_MIPS_MT_SMTC 81#ifdef CONFIG_MIPS_MT_SMTC
82/* Detect and execute deferred IPI "interrupts" */
83 LONG_L s0, TI_REGS($28)
84 LONG_S sp, TI_REGS($28)
85 jal deferred_smtc_ipi
86 LONG_S s0, TI_REGS($28)
87#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 82#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
88/* Re-arm any temporarily masked interrupts not explicitly "acked" */ 83/* Re-arm any temporarily masked interrupts not explicitly "acked" */
89 mfc0 v0, CP0_TCSTATUS 84 mfc0 v0, CP0_TCSTATUS
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame
112 xor t0, t0, t3 107 xor t0, t0, t3
113 mtc0 t0, CP0_TCCONTEXT 108 mtc0 t0, CP0_TCCONTEXT
114#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 109#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
110/* Detect and execute deferred IPI "interrupts" */
111 LONG_L s0, TI_REGS($28)
112 LONG_S sp, TI_REGS($28)
113 jal deferred_smtc_ipi
114 LONG_S s0, TI_REGS($28)
115#endif /* CONFIG_MIPS_MT_SMTC */ 115#endif /* CONFIG_MIPS_MT_SMTC */
116 .set noat 116 .set noat
117 RESTORE_TEMP 117 RESTORE_TEMP
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index c6ada98ee042..01dcbe38fa01 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -20,6 +20,7 @@
20#include <asm/stackframe.h> 20#include <asm/stackframe.h>
21#include <asm/war.h> 21#include <asm/war.h>
22#include <asm/page.h> 22#include <asm/page.h>
23#include <asm/thread_info.h>
23 24
24#define PANIC_PIC(msg) \ 25#define PANIC_PIC(msg) \
25 .set push; \ 26 .set push; \
@@ -126,7 +127,42 @@ handle_vcei:
126 127
127 __FINIT 128 __FINIT
128 129
130 .align 5 /* 32 byte rollback region */
131LEAF(r4k_wait)
132 .set push
133 .set noreorder
134 /* start of rollback region */
135 LONG_L t0, TI_FLAGS($28)
136 nop
137 andi t0, _TIF_NEED_RESCHED
138 bnez t0, 1f
139 nop
140 nop
141 nop
142 .set mips3
143 wait
144 /* end of rollback region (the region size must be power of two) */
145 .set pop
1461:
147 jr ra
148 END(r4k_wait)
149
150 .macro BUILD_ROLLBACK_PROLOGUE handler
151 FEXPORT(rollback_\handler)
152 .set push
153 .set noat
154 MFC0 k0, CP0_EPC
155 PTR_LA k1, r4k_wait
156 ori k0, 0x1f /* 32 byte rollback region */
157 xori k0, 0x1f
158 bne k0, k1, 9f
159 MTC0 k0, CP0_EPC
1609:
161 .set pop
162 .endm
163
129 .align 5 164 .align 5
165BUILD_ROLLBACK_PROLOGUE handle_int
130NESTED(handle_int, PT_SIZE, sp) 166NESTED(handle_int, PT_SIZE, sp)
131#ifdef CONFIG_TRACE_IRQFLAGS 167#ifdef CONFIG_TRACE_IRQFLAGS
132 /* 168 /*
@@ -201,6 +237,7 @@ NESTED(except_vec_ejtag_debug, 0, sp)
201 * This prototype is copied to ebase + n*IntCtl.VS and patched 237 * This prototype is copied to ebase + n*IntCtl.VS and patched
202 * to invoke the handler 238 * to invoke the handler
203 */ 239 */
240BUILD_ROLLBACK_PROLOGUE except_vec_vi
204NESTED(except_vec_vi, 0, sp) 241NESTED(except_vec_vi, 0, sp)
205 SAVE_SOME 242 SAVE_SOME
206 SAVE_AT 243 SAVE_AT
@@ -245,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp)
245 and t0, a0, t1 282 and t0, a0, t1
246#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP 283#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
247 mfc0 t2, CP0_TCCONTEXT 284 mfc0 t2, CP0_TCCONTEXT
248 or t0, t0, t2 285 or t2, t0, t2
249 mtc0 t0, CP0_TCCONTEXT 286 mtc0 t2, CP0_TCCONTEXT
250#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ 287#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
251 xor t1, t1, t0 288 xor t1, t1, t0
252 mtc0 t1, CP0_STATUS 289 mtc0 t1, CP0_STATUS
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 361364501d34..492a0a8d70fb 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -22,6 +22,7 @@
22#include <asm/irqflags.h> 22#include <asm/irqflags.h>
23#include <asm/regdef.h> 23#include <asm/regdef.h>
24#include <asm/page.h> 24#include <asm/page.h>
25#include <asm/pgtable-bits.h>
25#include <asm/mipsregs.h> 26#include <asm/mipsregs.h>
26#include <asm/stackframe.h> 27#include <asm/stackframe.h>
27 28
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
index 8f6d58ede33c..6e152c80cd4a 100644
--- a/arch/mips/kernel/kgdb.c
+++ b/arch/mips/kernel/kgdb.c
@@ -236,8 +236,7 @@ int kgdb_arch_handle_exception(int vector, int signo, int err_code,
236 236
237 atomic_set(&kgdb_cpu_doing_single_step, -1); 237 atomic_set(&kgdb_cpu_doing_single_step, -1);
238 if (remcom_in_buffer[0] == 's') 238 if (remcom_in_buffer[0] == 's')
239 if (kgdb_contthread) 239 atomic_set(&kgdb_cpu_doing_single_step, cpu);
240 atomic_set(&kgdb_cpu_doing_single_step, cpu);
241 240
242 return 0; 241 return 0;
243 } 242 }
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index df4d3f2f740c..dc9eb72ed9de 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh);
159/* 159/*
160 * FPU Use Factor empirically derived from experiments on 34K 160 * FPU Use Factor empirically derived from experiments on 34K
161 */ 161 */
162#define FPUSEFACTOR 333 162#define FPUSEFACTOR 2000
163 163
164static __init int mt_fp_affinity_init(void) 164static __init int mt_fp_affinity_init(void)
165{ 165{
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index b16facd9ea8e..22fc19bbe87f 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void)
55 while (1) { 55 while (1) {
56 tick_nohz_stop_sched_tick(1); 56 tick_nohz_stop_sched_tick(1);
57 while (!need_resched()) { 57 while (!need_resched()) {
58#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 58#ifdef CONFIG_MIPS_MT_SMTC
59 extern void smtc_idle_loop_hook(void); 59 extern void smtc_idle_loop_hook(void);
60 60
61 smtc_idle_loop_hook(); 61 smtc_idle_loop_hook();
@@ -145,17 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
145 */ 145 */
146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); 146 p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); 147 childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
148 clear_tsk_thread_flag(p, TIF_USEDFPU);
149 148
150#ifdef CONFIG_MIPS_MT_FPAFF 149#ifdef CONFIG_MIPS_MT_SMTC
151 /* 150 /*
152 * FPU affinity support is cleaner if we track the 151 * SMTC restores TCStatus after Status, and the CU bits
153 * user-visible CPU affinity from the very beginning. 152 * are aliased there.
154 * The generic cpus_allowed mask will already have
155 * been copied from the parent before copy_thread
156 * is invoked.
157 */ 153 */
158 p->thread.user_cpus_allowed = p->cpus_allowed; 154 childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
155#endif
156 clear_tsk_thread_flag(p, TIF_USEDFPU);
157
158#ifdef CONFIG_MIPS_MT_FPAFF
159 clear_tsk_thread_flag(p, TIF_FPUBOUND);
159#endif /* CONFIG_MIPS_MT_FPAFF */ 160#endif /* CONFIG_MIPS_MT_FPAFF */
160 161
161 if (clone_flags & CLONE_SETTLS) 162 if (clone_flags & CLONE_SETTLS)
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index 35234b92b9a5..96ffc9c6d194 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
238 case FPC_EIR: { /* implementation / version register */ 238 case FPC_EIR: { /* implementation / version register */
239 unsigned int flags; 239 unsigned int flags;
240#ifdef CONFIG_MIPS_MT_SMTC 240#ifdef CONFIG_MIPS_MT_SMTC
241 unsigned int irqflags; 241 unsigned long irqflags;
242 unsigned int mtflags; 242 unsigned int mtflags;
243#endif /* CONFIG_MIPS_MT_SMTC */ 243#endif /* CONFIG_MIPS_MT_SMTC */
244 244
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index a516286532ab..897fb2b4751c 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,4 +1,21 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
15 *
16 * Copyright (C) 2004 Mips Technologies, Inc
17 * Copyright (C) 2008 Kevin D. Kissell
18 */
2 19
3#include <linux/clockchips.h> 20#include <linux/clockchips.h>
4#include <linux/kernel.h> 21#include <linux/kernel.h>
@@ -21,7 +38,6 @@
21#include <asm/time.h> 38#include <asm/time.h>
22#include <asm/addrspace.h> 39#include <asm/addrspace.h>
23#include <asm/smtc.h> 40#include <asm/smtc.h>
24#include <asm/smtc_ipi.h>
25#include <asm/smtc_proc.h> 41#include <asm/smtc_proc.h>
26 42
27/* 43/*
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS];
58 74
59asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; 75asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
60 76
61/*
62 * Clock interrupt "latch" buffers, per "CPU"
63 */
64
65static atomic_t ipi_timer_latch[NR_CPUS];
66 77
67/* 78/*
68 * Number of InterProcessor Interrupt (IPI) message buffers to allocate 79 * Number of InterProcessor Interrupt (IPI) message buffers to allocate
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS];
70 81
71#define IPIBUF_PER_CPU 4 82#define IPIBUF_PER_CPU 4
72 83
73static struct smtc_ipi_q IPIQ[NR_CPUS]; 84struct smtc_ipi_q IPIQ[NR_CPUS];
74static struct smtc_ipi_q freeIPIq; 85static struct smtc_ipi_q freeIPIq;
75 86
76 87
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void)
282 * phys_cpu_present_map and the logical/physical mappings. 293 * phys_cpu_present_map and the logical/physical mappings.
283 */ 294 */
284 295
285int __init mipsmt_build_cpu_map(int start_cpu_slot) 296int __init smtc_build_cpu_map(int start_cpu_slot)
286{ 297{
287 int i, ntcs; 298 int i, ntcs;
288 299
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
325 write_tc_c0_tcstatus((read_tc_c0_tcstatus() 336 write_tc_c0_tcstatus((read_tc_c0_tcstatus()
326 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) 337 & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
327 | TCSTATUS_A); 338 | TCSTATUS_A);
328 write_tc_c0_tccontext(0); 339 /*
340 * TCContext gets an offset from the base of the IPIQ array
341 * to be used in low-level code to detect the presence of
342 * an active IPI queue
343 */
344 write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
329 /* Bind tc to vpe */ 345 /* Bind tc to vpe */
330 write_tc_c0_tcbind(vpe); 346 write_tc_c0_tcbind(vpe);
331 /* In general, all TCs should have the same cpu_data indications */ 347 /* In general, all TCs should have the same cpu_data indications */
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu)
336 cpu_data[cpu].options &= ~MIPS_CPU_FPU; 352 cpu_data[cpu].options &= ~MIPS_CPU_FPU;
337 cpu_data[cpu].vpe_id = vpe; 353 cpu_data[cpu].vpe_id = vpe;
338 cpu_data[cpu].tc_id = tc; 354 cpu_data[cpu].tc_id = tc;
355 /* Multi-core SMTC hasn't been tested, but be prepared */
356 cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
339} 357}
340 358
359/*
360 * Tweak to get Count registes in as close a sync as possible.
361 * Value seems good for 34K-class cores.
362 */
363
364#define CP0_SKEW 8
341 365
342void mipsmt_prepare_cpus(void) 366void smtc_prepare_cpus(int cpus)
343{ 367{
344 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; 368 int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
345 unsigned long flags; 369 unsigned long flags;
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void)
363 IPIQ[i].head = IPIQ[i].tail = NULL; 387 IPIQ[i].head = IPIQ[i].tail = NULL;
364 spin_lock_init(&IPIQ[i].lock); 388 spin_lock_init(&IPIQ[i].lock);
365 IPIQ[i].depth = 0; 389 IPIQ[i].depth = 0;
366 atomic_set(&ipi_timer_latch[i], 0);
367 } 390 }
368 391
369 /* cpu_data index starts at zero */ 392 /* cpu_data index starts at zero */
370 cpu = 0; 393 cpu = 0;
371 cpu_data[cpu].vpe_id = 0; 394 cpu_data[cpu].vpe_id = 0;
372 cpu_data[cpu].tc_id = 0; 395 cpu_data[cpu].tc_id = 0;
396 cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
373 cpu++; 397 cpu++;
374 398
375 /* Report on boot-time options */ 399 /* Report on boot-time options */
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void)
484 write_vpe_c0_compare(0); 508 write_vpe_c0_compare(0);
485 /* Propagate Config7 */ 509 /* Propagate Config7 */
486 write_vpe_c0_config7(read_c0_config7()); 510 write_vpe_c0_config7(read_c0_config7());
487 write_vpe_c0_count(read_c0_count()); 511 write_vpe_c0_count(read_c0_count() + CP0_SKEW);
512 ehb();
488 } 513 }
489 /* enable multi-threading within VPE */ 514 /* enable multi-threading within VPE */
490 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); 515 write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void)
556void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) 581void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
557{ 582{
558 extern u32 kernelsp[NR_CPUS]; 583 extern u32 kernelsp[NR_CPUS];
559 long flags; 584 unsigned long flags;
560 int mtflags; 585 int mtflags;
561 586
562 LOCK_MT_PRA(); 587 LOCK_MT_PRA();
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle)
585 610
586void smtc_init_secondary(void) 611void smtc_init_secondary(void)
587{ 612{
588 /*
589 * Start timer on secondary VPEs if necessary.
590 * plat_timer_setup has already have been invoked by init/main
591 * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that
592 * SMTC init code assigns TCs consdecutively and in ascending order
593 * to across available VPEs.
594 */
595 if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
596 ((read_c0_tcbind() & TCBIND_CURVPE)
597 != cpu_data[smp_processor_id() - 1].vpe_id)){
598 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
599 }
600
601 local_irq_enable(); 613 local_irq_enable();
602} 614}
603 615
604void smtc_smp_finish(void) 616void smtc_smp_finish(void)
605{ 617{
618 int cpu = smp_processor_id();
619
620 /*
621 * Lowest-numbered CPU per VPE starts a clock tick.
622 * Like per_cpu_trap_init() hack, this assumes that
623 * SMTC init code assigns TCs consdecutively and
624 * in ascending order across available VPEs.
625 */
626 if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
627 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
628
606 printk("TC %d going on-line as CPU %d\n", 629 printk("TC %d going on-line as CPU %d\n",
607 cpu_data[smp_processor_id()].tc_id, smp_processor_id()); 630 cpu_data[smp_processor_id()].tc_id, smp_processor_id());
608} 631}
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
753{ 776{
754 int tcstatus; 777 int tcstatus;
755 struct smtc_ipi *pipi; 778 struct smtc_ipi *pipi;
756 long flags; 779 unsigned long flags;
757 int mtflags; 780 int mtflags;
781 unsigned long tcrestart;
782 extern void r4k_wait_irqoff(void), __pastwait(void);
758 783
759 if (cpu == smp_processor_id()) { 784 if (cpu == smp_processor_id()) {
760 printk("Cannot Send IPI to self!\n"); 785 printk("Cannot Send IPI to self!\n");
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
771 pipi->arg = (void *)action; 796 pipi->arg = (void *)action;
772 pipi->dest = cpu; 797 pipi->dest = cpu;
773 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 798 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
774 if (type == SMTC_CLOCK_TICK)
775 atomic_inc(&ipi_timer_latch[cpu]);
776 /* If not on same VPE, enqueue and send cross-VPE interrupt */ 799 /* If not on same VPE, enqueue and send cross-VPE interrupt */
777 smtc_ipi_nq(&IPIQ[cpu], pipi); 800 smtc_ipi_nq(&IPIQ[cpu], pipi);
778 LOCK_CORE_PRA(); 801 LOCK_CORE_PRA();
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
800 823
801 if ((tcstatus & TCSTATUS_IXMT) != 0) { 824 if ((tcstatus & TCSTATUS_IXMT) != 0) {
802 /* 825 /*
803 * Spin-waiting here can deadlock, 826 * If we're in the the irq-off version of the wait
804 * so we queue the message for the target TC. 827 * loop, we need to force exit from the wait and
828 * do a direct post of the IPI.
829 */
830 if (cpu_wait == r4k_wait_irqoff) {
831 tcrestart = read_tc_c0_tcrestart();
832 if (tcrestart >= (unsigned long)r4k_wait_irqoff
833 && tcrestart < (unsigned long)__pastwait) {
834 write_tc_c0_tcrestart(__pastwait);
835 tcstatus &= ~TCSTATUS_IXMT;
836 write_tc_c0_tcstatus(tcstatus);
837 goto postdirect;
838 }
839 }
840 /*
841 * Otherwise we queue the message for the target TC
842 * to pick up when he does a local_irq_restore()
805 */ 843 */
806 write_tc_c0_tchalt(0); 844 write_tc_c0_tchalt(0);
807 UNLOCK_CORE_PRA(); 845 UNLOCK_CORE_PRA();
808 /* Try to reduce redundant timer interrupt messages */
809 if (type == SMTC_CLOCK_TICK) {
810 if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){
811 smtc_ipi_nq(&freeIPIq, pipi);
812 return;
813 }
814 }
815 smtc_ipi_nq(&IPIQ[cpu], pipi); 846 smtc_ipi_nq(&IPIQ[cpu], pipi);
816 } else { 847 } else {
817 if (type == SMTC_CLOCK_TICK) 848postdirect:
818 atomic_inc(&ipi_timer_latch[cpu]);
819 post_direct_ipi(cpu, pipi); 849 post_direct_ipi(cpu, pipi);
820 write_tc_c0_tchalt(0); 850 write_tc_c0_tchalt(0);
821 UNLOCK_CORE_PRA(); 851 UNLOCK_CORE_PRA();
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void)
883 smp_call_function_interrupt(); 913 smp_call_function_interrupt();
884} 914}
885 915
886DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); 916DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
887 917
888void ipi_decode(struct smtc_ipi *pipi) 918void ipi_decode(struct smtc_ipi *pipi)
889{ 919{
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi)
891 struct clock_event_device *cd; 921 struct clock_event_device *cd;
892 void *arg_copy = pipi->arg; 922 void *arg_copy = pipi->arg;
893 int type_copy = pipi->type; 923 int type_copy = pipi->type;
894 int ticks;
895
896 smtc_ipi_nq(&freeIPIq, pipi); 924 smtc_ipi_nq(&freeIPIq, pipi);
897 switch (type_copy) { 925 switch (type_copy) {
898 case SMTC_CLOCK_TICK: 926 case SMTC_CLOCK_TICK:
899 irq_enter(); 927 irq_enter();
900 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; 928 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
901 cd = &per_cpu(smtc_dummy_clockevent_device, cpu); 929 cd = &per_cpu(mips_clockevent_device, cpu);
902 ticks = atomic_read(&ipi_timer_latch[cpu]); 930 cd->event_handler(cd);
903 atomic_sub(ticks, &ipi_timer_latch[cpu]);
904 while (ticks) {
905 cd->event_handler(cd);
906 ticks--;
907 }
908 irq_exit(); 931 irq_exit();
909 break; 932 break;
910 933
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi)
937 } 960 }
938} 961}
939 962
963/*
964 * Similar to smtc_ipi_replay(), but invoked from context restore,
965 * so it reuses the current exception frame rather than set up a
966 * new one with self_ipi.
967 */
968
940void deferred_smtc_ipi(void) 969void deferred_smtc_ipi(void)
941{ 970{
942 struct smtc_ipi *pipi; 971 int cpu = smp_processor_id();
943 unsigned long flags;
944/* DEBUG */
945 int q = smp_processor_id();
946 972
947 /* 973 /*
948 * Test is not atomic, but much faster than a dequeue, 974 * Test is not atomic, but much faster than a dequeue,
949 * and the vast majority of invocations will have a null queue. 975 * and the vast majority of invocations will have a null queue.
976 * If irq_disabled when this was called, then any IPIs queued
977 * after we test last will be taken on the next irq_enable/restore.
978 * If interrupts were enabled, then any IPIs added after the
979 * last test will be taken directly.
950 */ 980 */
951 if (IPIQ[q].head != NULL) { 981
952 while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { 982 while (IPIQ[cpu].head != NULL) {
953 /* ipi_decode() should be called with interrupts off */ 983 struct smtc_ipi_q *q = &IPIQ[cpu];
954 local_irq_save(flags); 984 struct smtc_ipi *pipi;
985 unsigned long flags;
986
987 /*
988 * It may be possible we'll come in with interrupts
989 * already enabled.
990 */
991 local_irq_save(flags);
992
993 spin_lock(&q->lock);
994 pipi = __smtc_ipi_dq(q);
995 spin_unlock(&q->lock);
996 if (pipi != NULL)
955 ipi_decode(pipi); 997 ipi_decode(pipi);
956 local_irq_restore(flags); 998 /*
957 } 999 * The use of the __raw_local restore isn't
1000 * as obviously necessary here as in smtc_ipi_replay(),
1001 * but it's more efficient, given that we're already
1002 * running down the IPI queue.
1003 */
1004 __raw_local_irq_restore(flags);
958 } 1005 }
959} 1006}
960 1007
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
975 struct smtc_ipi *pipi; 1022 struct smtc_ipi *pipi;
976 unsigned long tcstatus; 1023 unsigned long tcstatus;
977 int sent; 1024 int sent;
978 long flags; 1025 unsigned long flags;
979 unsigned int mtflags; 1026 unsigned int mtflags;
980 unsigned int vpflags; 1027 unsigned int vpflags;
981 1028
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe)
1066 1113
1067/* 1114/*
1068 * SMTC-specific hacks invoked from elsewhere in the kernel. 1115 * SMTC-specific hacks invoked from elsewhere in the kernel.
1069 *
1070 * smtc_ipi_replay is called from raw_local_irq_restore which is only ever
1071 * called with interrupts disabled. We do rely on interrupts being disabled
1072 * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would
1073 * result in a recursive call to raw_local_irq_restore().
1074 */ 1116 */
1075 1117
1076static void __smtc_ipi_replay(void) 1118 /*
1119 * smtc_ipi_replay is called from raw_local_irq_restore
1120 */
1121
1122void smtc_ipi_replay(void)
1077{ 1123{
1078 unsigned int cpu = smp_processor_id(); 1124 unsigned int cpu = smp_processor_id();
1079 1125
1080 /* 1126 /*
1081 * To the extent that we've ever turned interrupts off, 1127 * To the extent that we've ever turned interrupts off,
1082 * we may have accumulated deferred IPIs. This is subtle. 1128 * we may have accumulated deferred IPIs. This is subtle.
1083 * If we use the smtc_ipi_qdepth() macro, we'll get an
1084 * exact number - but we'll also disable interrupts
1085 * and create a window of failure where a new IPI gets
1086 * queued after we test the depth but before we re-enable
1087 * interrupts. So long as IXMT never gets set, however,
1088 * we should be OK: If we pick up something and dispatch 1129 * we should be OK: If we pick up something and dispatch
1089 * it here, that's great. If we see nothing, but concurrent 1130 * it here, that's great. If we see nothing, but concurrent
1090 * with this operation, another TC sends us an IPI, IXMT 1131 * with this operation, another TC sends us an IPI, IXMT
1091 * is clear, and we'll handle it as a real pseudo-interrupt 1132 * is clear, and we'll handle it as a real pseudo-interrupt
1092 * and not a pseudo-pseudo interrupt. 1133 * and not a pseudo-pseudo interrupt. The important thing
1134 * is to do the last check for queued message *after* the
1135 * re-enabling of interrupts.
1093 */ 1136 */
1094 if (IPIQ[cpu].depth > 0) { 1137 while (IPIQ[cpu].head != NULL) {
1095 while (1) { 1138 struct smtc_ipi_q *q = &IPIQ[cpu];
1096 struct smtc_ipi_q *q = &IPIQ[cpu]; 1139 struct smtc_ipi *pipi;
1097 struct smtc_ipi *pipi; 1140 unsigned long flags;
1098 extern void self_ipi(struct smtc_ipi *); 1141
1099 1142 /*
1100 spin_lock(&q->lock); 1143 * It's just possible we'll come in with interrupts
1101 pipi = __smtc_ipi_dq(q); 1144 * already enabled.
1102 spin_unlock(&q->lock); 1145 */
1103 if (!pipi) 1146 local_irq_save(flags);
1104 break; 1147
1148 spin_lock(&q->lock);
1149 pipi = __smtc_ipi_dq(q);
1150 spin_unlock(&q->lock);
1151 /*
1152 ** But use a raw restore here to avoid recursion.
1153 */
1154 __raw_local_irq_restore(flags);
1105 1155
1156 if (pipi) {
1106 self_ipi(pipi); 1157 self_ipi(pipi);
1107 smtc_cpu_stats[cpu].selfipis++; 1158 smtc_cpu_stats[cpu].selfipis++;
1108 } 1159 }
1109 } 1160 }
1110} 1161}
1111 1162
1112void smtc_ipi_replay(void)
1113{
1114 raw_local_irq_disable();
1115 __smtc_ipi_replay();
1116}
1117
1118EXPORT_SYMBOL(smtc_ipi_replay); 1163EXPORT_SYMBOL(smtc_ipi_replay);
1119 1164
1120void smtc_idle_loop_hook(void) 1165void smtc_idle_loop_hook(void)
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void)
1193 } 1238 }
1194 } 1239 }
1195 1240
1196 /*
1197 * Now that we limit outstanding timer IPIs, check for hung TC
1198 */
1199 for (tc = 0; tc < NR_CPUS; tc++) {
1200 /* Don't check ourself - we'll dequeue IPIs just below */
1201 if ((tc != smp_processor_id()) &&
1202 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1203 if (clock_hang_reported[tc] == 0) {
1204 pdb_msg += sprintf(pdb_msg,
1205 "TC %d looks hung with timer latch at %d\n",
1206 tc, atomic_read(&ipi_timer_latch[tc]));
1207 clock_hang_reported[tc]++;
1208 }
1209 }
1210 }
1211 emt(mtflags); 1241 emt(mtflags);
1212 local_irq_restore(flags); 1242 local_irq_restore(flags);
1213 if (pdb_msg != &id_ho_db_msg[0]) 1243 if (pdb_msg != &id_ho_db_msg[0])
1214 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); 1244 printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
1215#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 1245#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
1216 1246
1217 /* 1247 smtc_ipi_replay();
1218 * Replay any accumulated deferred IPIs. If "Instant Replay"
1219 * is in use, there should never be any.
1220 */
1221#ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY
1222 {
1223 unsigned long flags;
1224
1225 local_irq_save(flags);
1226 __smtc_ipi_replay();
1227 local_irq_restore(flags);
1228 }
1229#endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */
1230} 1248}
1231 1249
1232void smtc_soft_dump(void) 1250void smtc_soft_dump(void)
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void)
1242 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); 1260 printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
1243 } 1261 }
1244 smtc_ipi_qdump(); 1262 smtc_ipi_qdump();
1245 printk("Timer IPI Backlogs:\n");
1246 for (i=0; i < NR_CPUS; i++) {
1247 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1248 }
1249 printk("%d Recoveries of \"stolen\" FPU\n", 1263 printk("%d Recoveries of \"stolen\" FPU\n",
1250 atomic_read(&smtc_fpu_recoveries)); 1264 atomic_read(&smtc_fpu_recoveries));
1251} 1265}
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 6bee29097a56..b602ac6eb47d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -46,6 +46,9 @@
46#include <asm/types.h> 46#include <asm/types.h>
47#include <asm/stacktrace.h> 47#include <asm/stacktrace.h>
48 48
49extern void check_wait(void);
50extern asmlinkage void r4k_wait(void);
51extern asmlinkage void rollback_handle_int(void);
49extern asmlinkage void handle_int(void); 52extern asmlinkage void handle_int(void);
50extern asmlinkage void handle_tlbm(void); 53extern asmlinkage void handle_tlbm(void);
51extern asmlinkage void handle_tlbl(void); 54extern asmlinkage void handle_tlbl(void);
@@ -822,8 +825,10 @@ static void mt_ase_fp_affinity(void)
822 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { 825 if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
823 cpumask_t tmask; 826 cpumask_t tmask;
824 827
825 cpus_and(tmask, current->thread.user_cpus_allowed, 828 current->thread.user_cpus_allowed
826 mt_fpu_cpumask); 829 = current->cpus_allowed;
830 cpus_and(tmask, current->cpus_allowed,
831 mt_fpu_cpumask);
827 set_cpus_allowed(current, tmask); 832 set_cpus_allowed(current, tmask);
828 set_thread_flag(TIF_FPUBOUND); 833 set_thread_flag(TIF_FPUBOUND);
829 } 834 }
@@ -1251,6 +1256,9 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1251 1256
1252 extern char except_vec_vi, except_vec_vi_lui; 1257 extern char except_vec_vi, except_vec_vi_lui;
1253 extern char except_vec_vi_ori, except_vec_vi_end; 1258 extern char except_vec_vi_ori, except_vec_vi_end;
1259 extern char rollback_except_vec_vi;
1260 char *vec_start = (cpu_wait == r4k_wait) ?
1261 &rollback_except_vec_vi : &except_vec_vi;
1254#ifdef CONFIG_MIPS_MT_SMTC 1262#ifdef CONFIG_MIPS_MT_SMTC
1255 /* 1263 /*
1256 * We need to provide the SMTC vectored interrupt handler 1264 * We need to provide the SMTC vectored interrupt handler
@@ -1258,11 +1266,11 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1258 * Status.IM bit to be masked before going there. 1266 * Status.IM bit to be masked before going there.
1259 */ 1267 */
1260 extern char except_vec_vi_mori; 1268 extern char except_vec_vi_mori;
1261 const int mori_offset = &except_vec_vi_mori - &except_vec_vi; 1269 const int mori_offset = &except_vec_vi_mori - vec_start;
1262#endif /* CONFIG_MIPS_MT_SMTC */ 1270#endif /* CONFIG_MIPS_MT_SMTC */
1263 const int handler_len = &except_vec_vi_end - &except_vec_vi; 1271 const int handler_len = &except_vec_vi_end - vec_start;
1264 const int lui_offset = &except_vec_vi_lui - &except_vec_vi; 1272 const int lui_offset = &except_vec_vi_lui - vec_start;
1265 const int ori_offset = &except_vec_vi_ori - &except_vec_vi; 1273 const int ori_offset = &except_vec_vi_ori - vec_start;
1266 1274
1267 if (handler_len > VECTORSPACING) { 1275 if (handler_len > VECTORSPACING) {
1268 /* 1276 /*
@@ -1272,7 +1280,7 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
1272 panic("VECTORSPACING too small"); 1280 panic("VECTORSPACING too small");
1273 } 1281 }
1274 1282
1275 memcpy(b, &except_vec_vi, handler_len); 1283 memcpy(b, vec_start, handler_len);
1276#ifdef CONFIG_MIPS_MT_SMTC 1284#ifdef CONFIG_MIPS_MT_SMTC
1277 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */ 1285 BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
1278 1286
@@ -1554,6 +1562,10 @@ void __init trap_init(void)
1554 extern char except_vec3_generic, except_vec3_r4000; 1562 extern char except_vec3_generic, except_vec3_r4000;
1555 extern char except_vec4; 1563 extern char except_vec4;
1556 unsigned long i; 1564 unsigned long i;
1565 int rollback;
1566
1567 check_wait();
1568 rollback = (cpu_wait == r4k_wait);
1557 1569
1558#if defined(CONFIG_KGDB) 1570#if defined(CONFIG_KGDB)
1559 if (kgdb_early_setup) 1571 if (kgdb_early_setup)
@@ -1618,7 +1630,7 @@ void __init trap_init(void)
1618 if (board_be_init) 1630 if (board_be_init)
1619 board_be_init(); 1631 board_be_init();
1620 1632
1621 set_except_vector(0, handle_int); 1633 set_except_vector(0, rollback ? rollback_handle_int : handle_int);
1622 set_except_vector(1, handle_tlbm); 1634 set_except_vector(1, handle_tlbm);
1623 set_except_vector(2, handle_tlbl); 1635 set_except_vector(2, handle_tlbl);
1624 set_except_vector(3, handle_tlbs); 1636 set_except_vector(3, handle_tlbs);
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index b5470ceb418b..afb119f35682 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -36,6 +36,7 @@ SECTIONS
36 SCHED_TEXT 36 SCHED_TEXT
37 LOCK_TEXT 37 LOCK_TEXT
38 KPROBES_TEXT 38 KPROBES_TEXT
39 *(.text.*)
39 *(.fixup) 40 *(.fixup)
40 *(.gnu.warning) 41 *(.gnu.warning)
41 } :text = 0 42 } :text = 0
diff --git a/arch/mips/lib/csum_partial.S b/arch/mips/lib/csum_partial.S
index 8d7784122c14..edac9892c51a 100644
--- a/arch/mips/lib/csum_partial.S
+++ b/arch/mips/lib/csum_partial.S
@@ -39,12 +39,14 @@
39#ifdef USE_DOUBLE 39#ifdef USE_DOUBLE
40 40
41#define LOAD ld 41#define LOAD ld
42#define LOAD32 lwu
42#define ADD daddu 43#define ADD daddu
43#define NBYTES 8 44#define NBYTES 8
44 45
45#else 46#else
46 47
47#define LOAD lw 48#define LOAD lw
49#define LOAD32 lw
48#define ADD addu 50#define ADD addu
49#define NBYTES 4 51#define NBYTES 4
50 52
@@ -60,6 +62,14 @@
60 ADD sum, v1; \ 62 ADD sum, v1; \
61 .set pop 63 .set pop
62 64
65#define ADDC32(sum,reg) \
66 .set push; \
67 .set noat; \
68 addu sum, reg; \
69 sltu v1, sum, reg; \
70 addu sum, v1; \
71 .set pop
72
63#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \ 73#define CSUM_BIGCHUNK1(src, offset, sum, _t0, _t1, _t2, _t3) \
64 LOAD _t0, (offset + UNIT(0))(src); \ 74 LOAD _t0, (offset + UNIT(0))(src); \
65 LOAD _t1, (offset + UNIT(1))(src); \ 75 LOAD _t1, (offset + UNIT(1))(src); \
@@ -132,7 +142,7 @@ LEAF(csum_partial)
132 beqz t8, .Lqword_align 142 beqz t8, .Lqword_align
133 andi t8, src, 0x8 143 andi t8, src, 0x8
134 144
135 lw t0, 0x00(src) 145 LOAD32 t0, 0x00(src)
136 LONG_SUBU a1, a1, 0x4 146 LONG_SUBU a1, a1, 0x4
137 ADDC(sum, t0) 147 ADDC(sum, t0)
138 PTR_ADDU src, src, 0x4 148 PTR_ADDU src, src, 0x4
@@ -211,7 +221,7 @@ LEAF(csum_partial)
211 LONG_SRL t8, t8, 0x2 221 LONG_SRL t8, t8, 0x2
212 222
213.Lend_words: 223.Lend_words:
214 lw t0, (src) 224 LOAD32 t0, (src)
215 LONG_SUBU t8, t8, 0x1 225 LONG_SUBU t8, t8, 0x1
216 ADDC(sum, t0) 226 ADDC(sum, t0)
217 .set reorder /* DADDI_WAR */ 227 .set reorder /* DADDI_WAR */
@@ -230,6 +240,9 @@ LEAF(csum_partial)
230 /* Still a full word to go */ 240 /* Still a full word to go */
231 ulw t1, (src) 241 ulw t1, (src)
232 PTR_ADDIU src, 4 242 PTR_ADDIU src, 4
243#ifdef USE_DOUBLE
244 dsll t1, t1, 32 /* clear lower 32bit */
245#endif
233 ADDC(sum, t1) 246 ADDC(sum, t1)
234 247
2351: move t1, zero 2481: move t1, zero
@@ -280,7 +293,7 @@ LEAF(csum_partial)
2801: 2931:
281 .set reorder 294 .set reorder
282 /* Add the passed partial csum. */ 295 /* Add the passed partial csum. */
283 ADDC(sum, a2) 296 ADDC32(sum, a2)
284 jr ra 297 jr ra
285 .set noreorder 298 .set noreorder
286 END(csum_partial) 299 END(csum_partial)
@@ -681,7 +694,7 @@ EXC( sb t0, NBYTES-2(dst), .Ls_exc)
681 .set pop 694 .set pop
6821: 6951:
683 .set reorder 696 .set reorder
684 ADDC(sum, psum) 697 ADDC32(sum, psum)
685 jr ra 698 jr ra
686 .set noreorder 699 .set noreorder
687 700
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index 3b7dd722c32a..cef2db8d2225 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o
15obj-$(CONFIG_PCI) += malta-pci.o 15obj-$(CONFIG_PCI) += malta-pci.o
16 16
17# FIXME FIXME FIXME 17# FIXME FIXME FIXME
18obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o 18obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
19 19
20EXTRA_CFLAGS += -Werror 20EXTRA_CFLAGS += -Werror
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c
index 5ea705e49454..f84a46a8ae6e 100644
--- a/arch/mips/mti-malta/malta-smtc.c
+++ b/arch/mips/mti-malta/malta-smtc.c
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void)
84 84
85static void __init msmtc_smp_setup(void) 85static void __init msmtc_smp_setup(void)
86{ 86{
87 mipsmt_build_cpu_map(0); 87 /*
88 * we won't get the definitive value until
89 * we've run smtc_prepare_cpus later, but
90 * we would appear to need an upper bound now.
91 */
92 smp_num_siblings = smtc_build_cpu_map(0);
88} 93}
89 94
90static void __init msmtc_prepare_cpus(unsigned int max_cpus) 95static void __init msmtc_prepare_cpus(unsigned int max_cpus)
91{ 96{
92 mipsmt_prepare_cpus(); 97 smtc_prepare_cpus(max_cpus);
93} 98}
94 99
95struct plat_smp_ops msmtc_smp_ops = { 100struct plat_smp_ops msmtc_smp_ops = {
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 15e01aec37fd..c8c32f417b6c 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_SOC_TX3927) += ops-tx3927.o
15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o 15obj-$(CONFIG_PCI_VR41XX) += ops-vr41xx.o pci-vr41xx.o
16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o 16obj-$(CONFIG_MARKEINS) += ops-emma2rh.o pci-emma2rh.o fixup-emma2rh.o
17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o 17obj-$(CONFIG_PCI_TX4927) += ops-tx4927.o
18obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
18 19
19# 20#
20# These are still pretty much in the old state, watch, go blind. 21# These are still pretty much in the old state, watch, go blind.
diff --git a/arch/mips/pci/pci-bcm47xx.c b/arch/mips/pci/pci-bcm47xx.c
new file mode 100644
index 000000000000..bea9b6cdfdbf
--- /dev/null
+++ b/arch/mips/pci/pci-bcm47xx.c
@@ -0,0 +1,60 @@
1/*
2 * Copyright (C) 2008 Aurelien Jarno <aurelien@aurel32.net>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 *
9 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
10 * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
12 * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
13 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
14 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
15 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
16 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
17 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
18 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/types.h>
26#include <linux/pci.h>
27#include <linux/ssb/ssb.h>
28
29int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
30{
31 return 0;
32}
33
34int pcibios_plat_dev_init(struct pci_dev *dev)
35{
36 int res;
37 u8 slot, pin;
38
39 res = ssb_pcibios_plat_dev_init(dev);
40 if (res < 0) {
41 printk(KERN_ALERT "PCI: Failed to init device %s\n",
42 pci_name(dev));
43 return res;
44 }
45
46 pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
47 slot = PCI_SLOT(dev->devfn);
48 res = ssb_pcibios_map_irq(dev, slot, pin);
49
50 /* IRQ-0 and IRQ-1 are software interrupts. */
51 if (res < 2) {
52 printk(KERN_ALERT "PCI: Failed to map IRQ of device %s\n",
53 pci_name(dev));
54 return res;
55 }
56
57 dev->irq = res;
58 return 0;
59}
60
diff --git a/arch/mips/pci/pci-ip27.c b/arch/mips/pci/pci-ip27.c
index bd78368c82bf..f97ab1461012 100644
--- a/arch/mips/pci/pci-ip27.c
+++ b/arch/mips/pci/pci-ip27.c
@@ -143,25 +143,47 @@ int __cpuinit bridge_probe(nasid_t nasid, int widget_id, int masterwid)
143 */ 143 */
144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) 144int __devinit pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
145{ 145{
146 return 0;
147}
148
149/* Most MIPS systems have straight-forward swizzling needs. */
150static inline u8 bridge_swizzle(u8 pin, u8 slot)
151{
152 return (((pin - 1) + slot) % 4) + 1;
153}
154
155static inline struct pci_dev *bridge_root_dev(struct pci_dev *dev)
156{
157 while (dev->bus->parent) {
158 /* Move up the chain of bridges. */
159 dev = dev->bus->self;
160 }
161
162 return dev;
163}
164
165/* Do platform specific device initialization at pci_enable_device() time */
166int pcibios_plat_dev_init(struct pci_dev *dev)
167{
146 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus); 168 struct bridge_controller *bc = BRIDGE_CONTROLLER(dev->bus);
147 int irq = bc->pci_int[slot]; 169 struct pci_dev *rdev = bridge_root_dev(dev);
170 int slot = PCI_SLOT(rdev->devfn);
171 int irq;
148 172
173 irq = bc->pci_int[slot];
149 if (irq == -1) { 174 if (irq == -1) {
150 irq = bc->pci_int[slot] = request_bridge_irq(bc); 175 irq = request_bridge_irq(bc);
151 if (irq < 0) 176 if (irq < 0)
152 panic("Can't allocate interrupt for PCI device %s\n", 177 return irq;
153 pci_name(dev)); 178
179 bc->pci_int[slot] = irq;
154 } 180 }
155 181
156 irq_to_bridge[irq] = bc; 182 irq_to_bridge[irq] = bc;
157 irq_to_slot[irq] = slot; 183 irq_to_slot[irq] = slot;
158 184
159 return irq; 185 dev->irq = irq;
160}
161 186
162/* Do platform specific device initialization at pci_enable_device() time */
163int pcibios_plat_dev_init(struct pci_dev *dev)
164{
165 return 0; 187 return 0;
166} 188}
167 189
diff --git a/arch/mips/sibyte/swarm/Makefile b/arch/mips/sibyte/swarm/Makefile
index f18ba9201bbc..7b45f199d92a 100644
--- a/arch/mips/sibyte/swarm/Makefile
+++ b/arch/mips/sibyte/swarm/Makefile
@@ -1,3 +1,4 @@
1obj-y := setup.o rtc_xicor1241.o rtc_m41t81.o 1obj-y := platform.o setup.o rtc_xicor1241.o \
2 rtc_m41t81.o
2 3
3obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o 4obj-$(CONFIG_I2C_BOARDINFO) += swarm-i2c.o
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
new file mode 100644
index 000000000000..dd0e5b9b64e8
--- /dev/null
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -0,0 +1,81 @@
1#include <linux/err.h>
2#include <linux/kernel.h>
3#include <linux/init.h>
4#include <linux/io.h>
5#include <linux/platform_device.h>
6#include <linux/ata_platform.h>
7
8#include <asm/sibyte/board.h>
9#include <asm/sibyte/sb1250_genbus.h>
10#include <asm/sibyte/sb1250_regs.h>
11
12#define DRV_NAME "pata-swarm"
13
14#define SWARM_IDE_SHIFT 5
15#define SWARM_IDE_BASE 0x1f0
16#define SWARM_IDE_CTRL 0x3f6
17
18static struct resource swarm_pata_resource[] = {
19 {
20 .name = "Swarm GenBus IDE",
21 .flags = IORESOURCE_MEM,
22 }, {
23 .name = "Swarm GenBus IDE",
24 .flags = IORESOURCE_MEM,
25 }, {
26 .name = "Swarm GenBus IDE",
27 .flags = IORESOURCE_IRQ,
28 .start = K_INT_GB_IDE,
29 .end = K_INT_GB_IDE,
30 },
31};
32
33static struct pata_platform_info pata_platform_data = {
34 .ioport_shift = SWARM_IDE_SHIFT,
35};
36
37static struct platform_device swarm_pata_device = {
38 .name = "pata_platform",
39 .id = -1,
40 .resource = swarm_pata_resource,
41 .num_resources = ARRAY_SIZE(swarm_pata_resource),
42 .dev = {
43 .platform_data = &pata_platform_data,
44 .coherent_dma_mask = ~0, /* grumble */
45 },
46};
47
48static int __init swarm_pata_init(void)
49{
50 u8 __iomem *base;
51 phys_t offset, size;
52 struct resource *r;
53
54 if (!SIBYTE_HAVE_IDE)
55 return -ENODEV;
56
57 base = ioremap(A_IO_EXT_BASE, 0x800);
58 offset = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_START_ADDR, IDE_CS));
59 size = __raw_readq(base + R_IO_EXT_REG(R_IO_EXT_MULT_SIZE, IDE_CS));
60 iounmap(base);
61
62 offset = G_IO_START_ADDR(offset) << S_IO_ADDRBASE;
63 size = (G_IO_MULT_SIZE(size) + 1) << S_IO_REGSIZE;
64 if (offset < A_PHYS_GENBUS || offset >= A_PHYS_GENBUS_END) {
65 pr_info(DRV_NAME ": PATA interface at GenBus disabled\n");
66
67 return -EBUSY;
68 }
69
70 pr_info(DRV_NAME ": PATA interface at GenBus slot %i\n", IDE_CS);
71
72 r = swarm_pata_resource;
73 r[0].start = offset + (SWARM_IDE_BASE << SWARM_IDE_SHIFT);
74 r[0].end = offset + ((SWARM_IDE_BASE + 8) << SWARM_IDE_SHIFT) - 1;
75 r[1].start = offset + (SWARM_IDE_CTRL << SWARM_IDE_SHIFT);
76 r[1].end = offset + ((SWARM_IDE_CTRL + 1) << SWARM_IDE_SHIFT) - 1;
77
78 return platform_device_register(&swarm_pata_device);
79}
80
81device_initcall(swarm_pata_init);