diff options
36 files changed, 856 insertions, 451 deletions
diff --git a/Documentation/HOWTO b/Documentation/HOWTO index c2371c5a98f9..48a3955f05fc 100644 --- a/Documentation/HOWTO +++ b/Documentation/HOWTO | |||
@@ -77,7 +77,8 @@ documentation files are also added which explain how to use the feature. | |||
77 | When a kernel change causes the interface that the kernel exposes to | 77 | When a kernel change causes the interface that the kernel exposes to |
78 | userspace to change, it is recommended that you send the information or | 78 | userspace to change, it is recommended that you send the information or |
79 | a patch to the manual pages explaining the change to the manual pages | 79 | a patch to the manual pages explaining the change to the manual pages |
80 | maintainer at mtk.manpages@gmail.com. | 80 | maintainer at mtk.manpages@gmail.com, and CC the list |
81 | linux-api@vger.kernel.org. | ||
81 | 82 | ||
82 | Here is a list of files that are in the kernel source tree that are | 83 | Here is a list of files that are in the kernel source tree that are |
83 | required reading: | 84 | required reading: |
diff --git a/Documentation/SubmitChecklist b/Documentation/SubmitChecklist index da10e0714241..21f0795af20f 100644 --- a/Documentation/SubmitChecklist +++ b/Documentation/SubmitChecklist | |||
@@ -67,6 +67,8 @@ kernel patches. | |||
67 | 67 | ||
68 | 19: All new userspace interfaces are documented in Documentation/ABI/. | 68 | 19: All new userspace interfaces are documented in Documentation/ABI/. |
69 | See Documentation/ABI/README for more information. | 69 | See Documentation/ABI/README for more information. |
70 | Patches that change userspace interfaces should be CCed to | ||
71 | linux-api@vger.kernel.org. | ||
70 | 72 | ||
71 | 20: Check that it all passes `make headers_check'. | 73 | 20: Check that it all passes `make headers_check'. |
72 | 74 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 3596d1782264..8dae4555f10e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -1198,9 +1198,7 @@ M: hpa@zytor.com | |||
1198 | S: Maintained | 1198 | S: Maintained |
1199 | 1199 | ||
1200 | CPUSETS | 1200 | CPUSETS |
1201 | P: Paul Jackson | ||
1202 | P: Paul Menage | 1201 | P: Paul Menage |
1203 | M: pj@sgi.com | ||
1204 | M: menage@google.com | 1202 | M: menage@google.com |
1205 | L: linux-kernel@vger.kernel.org | 1203 | L: linux-kernel@vger.kernel.org |
1206 | W: http://www.bullopensource.org/cpuset/ | 1204 | W: http://www.bullopensource.org/cpuset/ |
@@ -2706,6 +2704,7 @@ MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 | |||
2706 | P: Michael Kerrisk | 2704 | P: Michael Kerrisk |
2707 | M: mtk.manpages@gmail.com | 2705 | M: mtk.manpages@gmail.com |
2708 | W: http://www.kernel.org/doc/man-pages | 2706 | W: http://www.kernel.org/doc/man-pages |
2707 | L: linux-man@vger.kernel.org | ||
2709 | S: Supported | 2708 | S: Supported |
2710 | 2709 | ||
2711 | MARVELL LIBERTAS WIRELESS DRIVER | 2710 | MARVELL LIBERTAS WIRELESS DRIVER |
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index 49896a2a1d72..c930b8ceb418 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -1403,7 +1403,6 @@ config MIPS_MT_SMTC | |||
1403 | depends on CPU_MIPS32_R2 | 1403 | depends on CPU_MIPS32_R2 |
1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... | 1404 | #depends on CPU_MIPS64_R2 # once there is hardware ... |
1405 | depends on SYS_SUPPORTS_MULTITHREADING | 1405 | depends on SYS_SUPPORTS_MULTITHREADING |
1406 | select GENERIC_CLOCKEVENTS_BROADCAST | ||
1407 | select CPU_MIPSR2_IRQ_VI | 1406 | select CPU_MIPSR2_IRQ_VI |
1408 | select CPU_MIPSR2_IRQ_EI | 1407 | select CPU_MIPSR2_IRQ_EI |
1409 | select MIPS_MT | 1408 | select MIPS_MT |
@@ -1451,32 +1450,17 @@ config MIPS_VPE_LOADER | |||
1451 | Includes a loader for loading an elf relocatable object | 1450 | Includes a loader for loading an elf relocatable object |
1452 | onto another VPE and running it. | 1451 | onto another VPE and running it. |
1453 | 1452 | ||
1454 | config MIPS_MT_SMTC_INSTANT_REPLAY | ||
1455 | bool "Low-latency Dispatch of Deferred SMTC IPIs" | ||
1456 | depends on MIPS_MT_SMTC && !PREEMPT | ||
1457 | default y | ||
1458 | help | ||
1459 | SMTC pseudo-interrupts between TCs are deferred and queued | ||
1460 | if the target TC is interrupt-inhibited (IXMT). In the first | ||
1461 | SMTC prototypes, these queued IPIs were serviced on return | ||
1462 | to user mode, or on entry into the kernel idle loop. The | ||
1463 | INSTANT_REPLAY option dispatches them as part of local_irq_restore() | ||
1464 | processing, which adds runtime overhead (hence the option to turn | ||
1465 | it off), but ensures that IPIs are handled promptly even under | ||
1466 | heavy I/O interrupt load. | ||
1467 | |||
1468 | config MIPS_MT_SMTC_IM_BACKSTOP | 1453 | config MIPS_MT_SMTC_IM_BACKSTOP |
1469 | bool "Use per-TC register bits as backstop for inhibited IM bits" | 1454 | bool "Use per-TC register bits as backstop for inhibited IM bits" |
1470 | depends on MIPS_MT_SMTC | 1455 | depends on MIPS_MT_SMTC |
1471 | default y | 1456 | default n |
1472 | help | 1457 | help |
1473 | To support multiple TC microthreads acting as "CPUs" within | 1458 | To support multiple TC microthreads acting as "CPUs" within |
1474 | a VPE, VPE-wide interrupt mask bits must be specially manipulated | 1459 | a VPE, VPE-wide interrupt mask bits must be specially manipulated |
1475 | during interrupt handling. To support legacy drivers and interrupt | 1460 | during interrupt handling. To support legacy drivers and interrupt |
1476 | controller management code, SMTC has a "backstop" to track and | 1461 | controller management code, SMTC has a "backstop" to track and |
1477 | if necessary restore the interrupt mask. This has some performance | 1462 | if necessary restore the interrupt mask. This has some performance |
1478 | impact on interrupt service overhead. Disable it only if you know | 1463 | impact on interrupt service overhead. |
1479 | what you are doing. | ||
1480 | 1464 | ||
1481 | config MIPS_MT_SMTC_IRQAFF | 1465 | config MIPS_MT_SMTC_IRQAFF |
1482 | bool "Support IRQ affinity API" | 1466 | bool "Support IRQ affinity API" |
@@ -1486,10 +1470,8 @@ config MIPS_MT_SMTC_IRQAFF | |||
1486 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) | 1470 | Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.) |
1487 | for SMTC Linux kernel. Requires platform support, of which | 1471 | for SMTC Linux kernel. Requires platform support, of which |
1488 | an example can be found in the MIPS kernel i8259 and Malta | 1472 | an example can be found in the MIPS kernel i8259 and Malta |
1489 | platform code. It is recommended that MIPS_MT_SMTC_INSTANT_REPLAY | 1473 | platform code. Adds some overhead to interrupt dispatch, and |
1490 | be enabled if MIPS_MT_SMTC_IRQAFF is used. Adds overhead to | 1474 | should be used only if you know what you are doing. |
1491 | interrupt dispatch, and should be used only if you know what | ||
1492 | you are doing. | ||
1493 | 1475 | ||
1494 | config MIPS_VPE_LOADER_TOM | 1476 | config MIPS_VPE_LOADER_TOM |
1495 | bool "Load VPE program into memory hidden from linux" | 1477 | bool "Load VPE program into memory hidden from linux" |
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile index 706f93974797..25775cb54000 100644 --- a/arch/mips/kernel/Makefile +++ b/arch/mips/kernel/Makefile | |||
@@ -10,6 +10,7 @@ obj-y += cpu-probe.o branch.o entry.o genex.o irq.o process.o \ | |||
10 | 10 | ||
11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | 11 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o |
12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | 12 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o |
13 | obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o | ||
13 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | 14 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o |
14 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | 15 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o |
15 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | 16 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o |
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c index 24a2d907aa0d..4a4c59f2737a 100644 --- a/arch/mips/kernel/cevt-r4k.c +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -12,6 +12,14 @@ | |||
12 | 12 | ||
13 | #include <asm/smtc_ipi.h> | 13 | #include <asm/smtc_ipi.h> |
14 | #include <asm/time.h> | 14 | #include <asm/time.h> |
15 | #include <asm/cevt-r4k.h> | ||
16 | |||
17 | /* | ||
18 | * The SMTC Kernel for the 34K, 1004K, et. al. replaces several | ||
19 | * of these routines with SMTC-specific variants. | ||
20 | */ | ||
21 | |||
22 | #ifndef CONFIG_MIPS_MT_SMTC | ||
15 | 23 | ||
16 | static int mips_next_event(unsigned long delta, | 24 | static int mips_next_event(unsigned long delta, |
17 | struct clock_event_device *evt) | 25 | struct clock_event_device *evt) |
@@ -19,60 +27,27 @@ static int mips_next_event(unsigned long delta, | |||
19 | unsigned int cnt; | 27 | unsigned int cnt; |
20 | int res; | 28 | int res; |
21 | 29 | ||
22 | #ifdef CONFIG_MIPS_MT_SMTC | ||
23 | { | ||
24 | unsigned long flags, vpflags; | ||
25 | local_irq_save(flags); | ||
26 | vpflags = dvpe(); | ||
27 | #endif | ||
28 | cnt = read_c0_count(); | 30 | cnt = read_c0_count(); |
29 | cnt += delta; | 31 | cnt += delta; |
30 | write_c0_compare(cnt); | 32 | write_c0_compare(cnt); |
31 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; | 33 | res = ((int)(read_c0_count() - cnt) > 0) ? -ETIME : 0; |
32 | #ifdef CONFIG_MIPS_MT_SMTC | ||
33 | evpe(vpflags); | ||
34 | local_irq_restore(flags); | ||
35 | } | ||
36 | #endif | ||
37 | return res; | 34 | return res; |
38 | } | 35 | } |
39 | 36 | ||
40 | static void mips_set_mode(enum clock_event_mode mode, | 37 | #endif /* CONFIG_MIPS_MT_SMTC */ |
41 | struct clock_event_device *evt) | 38 | |
39 | void mips_set_clock_mode(enum clock_event_mode mode, | ||
40 | struct clock_event_device *evt) | ||
42 | { | 41 | { |
43 | /* Nothing to do ... */ | 42 | /* Nothing to do ... */ |
44 | } | 43 | } |
45 | 44 | ||
46 | static DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 45 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
47 | static int cp0_timer_irq_installed; | 46 | int cp0_timer_irq_installed; |
48 | 47 | ||
49 | /* | 48 | #ifndef CONFIG_MIPS_MT_SMTC |
50 | * Timer ack for an R4k-compatible timer of a known frequency. | ||
51 | */ | ||
52 | static void c0_timer_ack(void) | ||
53 | { | ||
54 | write_c0_compare(read_c0_compare()); | ||
55 | } | ||
56 | 49 | ||
57 | /* | 50 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) |
58 | * Possibly handle a performance counter interrupt. | ||
59 | * Return true if the timer interrupt should not be checked | ||
60 | */ | ||
61 | static inline int handle_perf_irq(int r2) | ||
62 | { | ||
63 | /* | ||
64 | * The performance counter overflow interrupt may be shared with the | ||
65 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
66 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
67 | * and we can't reliably determine if a counter interrupt has also | ||
68 | * happened (!r2) then don't check for a timer interrupt. | ||
69 | */ | ||
70 | return (cp0_perfcount_irq < 0) && | ||
71 | perf_irq() == IRQ_HANDLED && | ||
72 | !r2; | ||
73 | } | ||
74 | |||
75 | static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
76 | { | 51 | { |
77 | const int r2 = cpu_has_mips_r2; | 52 | const int r2 = cpu_has_mips_r2; |
78 | struct clock_event_device *cd; | 53 | struct clock_event_device *cd; |
@@ -93,12 +68,8 @@ static irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | |||
93 | * interrupt. Being the paranoiacs we are we check anyway. | 68 | * interrupt. Being the paranoiacs we are we check anyway. |
94 | */ | 69 | */ |
95 | if (!r2 || (read_c0_cause() & (1 << 30))) { | 70 | if (!r2 || (read_c0_cause() & (1 << 30))) { |
96 | c0_timer_ack(); | 71 | /* Clear Count/Compare Interrupt */ |
97 | #ifdef CONFIG_MIPS_MT_SMTC | 72 | write_c0_compare(read_c0_compare()); |
98 | if (cpu_data[cpu].vpe_id) | ||
99 | goto out; | ||
100 | cpu = 0; | ||
101 | #endif | ||
102 | cd = &per_cpu(mips_clockevent_device, cpu); | 73 | cd = &per_cpu(mips_clockevent_device, cpu); |
103 | cd->event_handler(cd); | 74 | cd->event_handler(cd); |
104 | } | 75 | } |
@@ -107,65 +78,16 @@ out: | |||
107 | return IRQ_HANDLED; | 78 | return IRQ_HANDLED; |
108 | } | 79 | } |
109 | 80 | ||
110 | static struct irqaction c0_compare_irqaction = { | 81 | #endif /* Not CONFIG_MIPS_MT_SMTC */ |
82 | |||
83 | struct irqaction c0_compare_irqaction = { | ||
111 | .handler = c0_compare_interrupt, | 84 | .handler = c0_compare_interrupt, |
112 | #ifdef CONFIG_MIPS_MT_SMTC | ||
113 | .flags = IRQF_DISABLED, | ||
114 | #else | ||
115 | .flags = IRQF_DISABLED | IRQF_PERCPU, | 85 | .flags = IRQF_DISABLED | IRQF_PERCPU, |
116 | #endif | ||
117 | .name = "timer", | 86 | .name = "timer", |
118 | }; | 87 | }; |
119 | 88 | ||
120 | #ifdef CONFIG_MIPS_MT_SMTC | ||
121 | DEFINE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | ||
122 | |||
123 | static void smtc_set_mode(enum clock_event_mode mode, | ||
124 | struct clock_event_device *evt) | ||
125 | { | ||
126 | } | ||
127 | |||
128 | static void mips_broadcast(cpumask_t mask) | ||
129 | { | ||
130 | unsigned int cpu; | ||
131 | |||
132 | for_each_cpu_mask(cpu, mask) | ||
133 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
134 | } | ||
135 | |||
136 | static void setup_smtc_dummy_clockevent_device(void) | ||
137 | { | ||
138 | //uint64_t mips_freq = mips_hpt_^frequency; | ||
139 | unsigned int cpu = smp_processor_id(); | ||
140 | struct clock_event_device *cd; | ||
141 | 89 | ||
142 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 90 | void mips_event_handler(struct clock_event_device *dev) |
143 | |||
144 | cd->name = "SMTC"; | ||
145 | cd->features = CLOCK_EVT_FEAT_DUMMY; | ||
146 | |||
147 | /* Calculate the min / max delta */ | ||
148 | cd->mult = 0; //div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
149 | cd->shift = 0; //32; | ||
150 | cd->max_delta_ns = 0; //clockevent_delta2ns(0x7fffffff, cd); | ||
151 | cd->min_delta_ns = 0; //clockevent_delta2ns(0x30, cd); | ||
152 | |||
153 | cd->rating = 200; | ||
154 | cd->irq = 17; //-1; | ||
155 | // if (cpu) | ||
156 | // cd->cpumask = CPU_MASK_ALL; // cpumask_of_cpu(cpu); | ||
157 | // else | ||
158 | cd->cpumask = cpumask_of_cpu(cpu); | ||
159 | |||
160 | cd->set_mode = smtc_set_mode; | ||
161 | |||
162 | cd->broadcast = mips_broadcast; | ||
163 | |||
164 | clockevents_register_device(cd); | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | static void mips_event_handler(struct clock_event_device *dev) | ||
169 | { | 91 | { |
170 | } | 92 | } |
171 | 93 | ||
@@ -177,7 +99,23 @@ static int c0_compare_int_pending(void) | |||
177 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; | 99 | return (read_c0_cause() >> cp0_compare_irq) & 0x100; |
178 | } | 100 | } |
179 | 101 | ||
180 | static int c0_compare_int_usable(void) | 102 | /* |
103 | * Compare interrupt can be routed and latched outside the core, | ||
104 | * so a single execution hazard barrier may not be enough to give | ||
105 | * it time to clear as seen in the Cause register. 4 time the | ||
106 | * pipeline depth seems reasonably conservative, and empirically | ||
107 | * works better in configurations with high CPU/bus clock ratios. | ||
108 | */ | ||
109 | |||
110 | #define compare_change_hazard() \ | ||
111 | do { \ | ||
112 | irq_disable_hazard(); \ | ||
113 | irq_disable_hazard(); \ | ||
114 | irq_disable_hazard(); \ | ||
115 | irq_disable_hazard(); \ | ||
116 | } while (0) | ||
117 | |||
118 | int c0_compare_int_usable(void) | ||
181 | { | 119 | { |
182 | unsigned int delta; | 120 | unsigned int delta; |
183 | unsigned int cnt; | 121 | unsigned int cnt; |
@@ -187,7 +125,7 @@ static int c0_compare_int_usable(void) | |||
187 | */ | 125 | */ |
188 | if (c0_compare_int_pending()) { | 126 | if (c0_compare_int_pending()) { |
189 | write_c0_compare(read_c0_count()); | 127 | write_c0_compare(read_c0_count()); |
190 | irq_disable_hazard(); | 128 | compare_change_hazard(); |
191 | if (c0_compare_int_pending()) | 129 | if (c0_compare_int_pending()) |
192 | return 0; | 130 | return 0; |
193 | } | 131 | } |
@@ -196,7 +134,7 @@ static int c0_compare_int_usable(void) | |||
196 | cnt = read_c0_count(); | 134 | cnt = read_c0_count(); |
197 | cnt += delta; | 135 | cnt += delta; |
198 | write_c0_compare(cnt); | 136 | write_c0_compare(cnt); |
199 | irq_disable_hazard(); | 137 | compare_change_hazard(); |
200 | if ((int)(read_c0_count() - cnt) < 0) | 138 | if ((int)(read_c0_count() - cnt) < 0) |
201 | break; | 139 | break; |
202 | /* increase delta if the timer was already expired */ | 140 | /* increase delta if the timer was already expired */ |
@@ -205,11 +143,12 @@ static int c0_compare_int_usable(void) | |||
205 | while ((int)(read_c0_count() - cnt) <= 0) | 143 | while ((int)(read_c0_count() - cnt) <= 0) |
206 | ; /* Wait for expiry */ | 144 | ; /* Wait for expiry */ |
207 | 145 | ||
146 | compare_change_hazard(); | ||
208 | if (!c0_compare_int_pending()) | 147 | if (!c0_compare_int_pending()) |
209 | return 0; | 148 | return 0; |
210 | 149 | ||
211 | write_c0_compare(read_c0_count()); | 150 | write_c0_compare(read_c0_count()); |
212 | irq_disable_hazard(); | 151 | compare_change_hazard(); |
213 | if (c0_compare_int_pending()) | 152 | if (c0_compare_int_pending()) |
214 | return 0; | 153 | return 0; |
215 | 154 | ||
@@ -219,6 +158,8 @@ static int c0_compare_int_usable(void) | |||
219 | return 1; | 158 | return 1; |
220 | } | 159 | } |
221 | 160 | ||
161 | #ifndef CONFIG_MIPS_MT_SMTC | ||
162 | |||
222 | int __cpuinit mips_clockevent_init(void) | 163 | int __cpuinit mips_clockevent_init(void) |
223 | { | 164 | { |
224 | uint64_t mips_freq = mips_hpt_frequency; | 165 | uint64_t mips_freq = mips_hpt_frequency; |
@@ -229,17 +170,6 @@ int __cpuinit mips_clockevent_init(void) | |||
229 | if (!cpu_has_counter || !mips_hpt_frequency) | 170 | if (!cpu_has_counter || !mips_hpt_frequency) |
230 | return -ENXIO; | 171 | return -ENXIO; |
231 | 172 | ||
232 | #ifdef CONFIG_MIPS_MT_SMTC | ||
233 | setup_smtc_dummy_clockevent_device(); | ||
234 | |||
235 | /* | ||
236 | * On SMTC we only register VPE0's compare interrupt as clockevent | ||
237 | * device. | ||
238 | */ | ||
239 | if (cpu) | ||
240 | return 0; | ||
241 | #endif | ||
242 | |||
243 | if (!c0_compare_int_usable()) | 173 | if (!c0_compare_int_usable()) |
244 | return -ENXIO; | 174 | return -ENXIO; |
245 | 175 | ||
@@ -265,13 +195,9 @@ int __cpuinit mips_clockevent_init(void) | |||
265 | 195 | ||
266 | cd->rating = 300; | 196 | cd->rating = 300; |
267 | cd->irq = irq; | 197 | cd->irq = irq; |
268 | #ifdef CONFIG_MIPS_MT_SMTC | ||
269 | cd->cpumask = CPU_MASK_ALL; | ||
270 | #else | ||
271 | cd->cpumask = cpumask_of_cpu(cpu); | 198 | cd->cpumask = cpumask_of_cpu(cpu); |
272 | #endif | ||
273 | cd->set_next_event = mips_next_event; | 199 | cd->set_next_event = mips_next_event; |
274 | cd->set_mode = mips_set_mode; | 200 | cd->set_mode = mips_set_clock_mode; |
275 | cd->event_handler = mips_event_handler; | 201 | cd->event_handler = mips_event_handler; |
276 | 202 | ||
277 | clockevents_register_device(cd); | 203 | clockevents_register_device(cd); |
@@ -281,12 +207,9 @@ int __cpuinit mips_clockevent_init(void) | |||
281 | 207 | ||
282 | cp0_timer_irq_installed = 1; | 208 | cp0_timer_irq_installed = 1; |
283 | 209 | ||
284 | #ifdef CONFIG_MIPS_MT_SMTC | ||
285 | #define CPUCTR_IMASKBIT (0x100 << cp0_compare_irq) | ||
286 | setup_irq_smtc(irq, &c0_compare_irqaction, CPUCTR_IMASKBIT); | ||
287 | #else | ||
288 | setup_irq(irq, &c0_compare_irqaction); | 210 | setup_irq(irq, &c0_compare_irqaction); |
289 | #endif | ||
290 | 211 | ||
291 | return 0; | 212 | return 0; |
292 | } | 213 | } |
214 | |||
215 | #endif /* Not CONFIG_MIPS_MT_SMTC */ | ||
diff --git a/arch/mips/kernel/cevt-smtc.c b/arch/mips/kernel/cevt-smtc.c new file mode 100644 index 000000000000..5162fe4b5952 --- /dev/null +++ b/arch/mips/kernel/cevt-smtc.c | |||
@@ -0,0 +1,321 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | * Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl | ||
9 | */ | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/percpu.h> | ||
13 | |||
14 | #include <asm/smtc_ipi.h> | ||
15 | #include <asm/time.h> | ||
16 | #include <asm/cevt-r4k.h> | ||
17 | |||
18 | /* | ||
19 | * Variant clock event timer support for SMTC on MIPS 34K, 1004K | ||
20 | * or other MIPS MT cores. | ||
21 | * | ||
22 | * Notes on SMTC Support: | ||
23 | * | ||
24 | * SMTC has multiple microthread TCs pretending to be Linux CPUs. | ||
25 | * But there's only one Count/Compare pair per VPE, and Compare | ||
26 | * interrupts are taken opportunisitically by available TCs | ||
27 | * bound to the VPE with the Count register. The new timer | ||
28 | * framework provides for global broadcasts, but we really | ||
29 | * want VPE-level multicasts for best behavior. So instead | ||
30 | * of invoking the high-level clock-event broadcast code, | ||
31 | * this version of SMTC support uses the historical SMTC | ||
32 | * multicast mechanisms "under the hood", appearing to the | ||
33 | * generic clock layer as if the interrupts are per-CPU. | ||
34 | * | ||
35 | * The approach taken here is to maintain a set of NR_CPUS | ||
36 | * virtual timers, and track which "CPU" needs to be alerted | ||
37 | * at each event. | ||
38 | * | ||
39 | * It's unlikely that we'll see a MIPS MT core with more than | ||
40 | * 2 VPEs, but we *know* that we won't need to handle more | ||
41 | * VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements | ||
42 | * is always going to be overkill, but always going to be enough. | ||
43 | */ | ||
44 | |||
45 | unsigned long smtc_nexttime[NR_CPUS][NR_CPUS]; | ||
46 | static int smtc_nextinvpe[NR_CPUS]; | ||
47 | |||
48 | /* | ||
49 | * Timestamps stored are absolute values to be programmed | ||
50 | * into Count register. Valid timestamps will never be zero. | ||
51 | * If a Zero Count value is actually calculated, it is converted | ||
52 | * to be a 1, which will introduce 1 or two CPU cycles of error | ||
53 | * roughly once every four billion events, which at 1000 HZ means | ||
54 | * about once every 50 days. If that's actually a problem, one | ||
55 | * could alternate squashing 0 to 1 and to -1. | ||
56 | */ | ||
57 | |||
58 | #define MAKEVALID(x) (((x) == 0L) ? 1L : (x)) | ||
59 | #define ISVALID(x) ((x) != 0L) | ||
60 | |||
61 | /* | ||
62 | * Time comparison is subtle, as it's really truncated | ||
63 | * modular arithmetic. | ||
64 | */ | ||
65 | |||
66 | #define IS_SOONER(a, b, reference) \ | ||
67 | (((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference))) | ||
68 | |||
69 | /* | ||
70 | * CATCHUP_INCREMENT, used when the function falls behind the counter. | ||
71 | * Could be an increasing function instead of a constant; | ||
72 | */ | ||
73 | |||
74 | #define CATCHUP_INCREMENT 64 | ||
75 | |||
76 | static int mips_next_event(unsigned long delta, | ||
77 | struct clock_event_device *evt) | ||
78 | { | ||
79 | unsigned long flags; | ||
80 | unsigned int mtflags; | ||
81 | unsigned long timestamp, reference, previous; | ||
82 | unsigned long nextcomp = 0L; | ||
83 | int vpe = current_cpu_data.vpe_id; | ||
84 | int cpu = smp_processor_id(); | ||
85 | local_irq_save(flags); | ||
86 | mtflags = dmt(); | ||
87 | |||
88 | /* | ||
89 | * Maintain the per-TC virtual timer | ||
90 | * and program the per-VPE shared Count register | ||
91 | * as appropriate here... | ||
92 | */ | ||
93 | reference = (unsigned long)read_c0_count(); | ||
94 | timestamp = MAKEVALID(reference + delta); | ||
95 | /* | ||
96 | * To really model the clock, we have to catch the case | ||
97 | * where the current next-in-VPE timestamp is the old | ||
98 | * timestamp for the calling CPE, but the new value is | ||
99 | * in fact later. In that case, we have to do a full | ||
100 | * scan and discover the new next-in-VPE CPU id and | ||
101 | * timestamp. | ||
102 | */ | ||
103 | previous = smtc_nexttime[vpe][cpu]; | ||
104 | if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous) | ||
105 | && IS_SOONER(previous, timestamp, reference)) { | ||
106 | int i; | ||
107 | int soonest = cpu; | ||
108 | |||
109 | /* | ||
110 | * Update timestamp array here, so that new | ||
111 | * value gets considered along with those of | ||
112 | * other virtual CPUs on the VPE. | ||
113 | */ | ||
114 | smtc_nexttime[vpe][cpu] = timestamp; | ||
115 | for_each_online_cpu(i) { | ||
116 | if (ISVALID(smtc_nexttime[vpe][i]) | ||
117 | && IS_SOONER(smtc_nexttime[vpe][i], | ||
118 | smtc_nexttime[vpe][soonest], reference)) { | ||
119 | soonest = i; | ||
120 | } | ||
121 | } | ||
122 | smtc_nextinvpe[vpe] = soonest; | ||
123 | nextcomp = smtc_nexttime[vpe][soonest]; | ||
124 | /* | ||
125 | * Otherwise, we don't have to process the whole array rank, | ||
126 | * we just have to see if the event horizon has gotten closer. | ||
127 | */ | ||
128 | } else { | ||
129 | if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) || | ||
130 | IS_SOONER(timestamp, | ||
131 | smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) { | ||
132 | smtc_nextinvpe[vpe] = cpu; | ||
133 | nextcomp = timestamp; | ||
134 | } | ||
135 | /* | ||
136 | * Since next-in-VPE may me the same as the executing | ||
137 | * virtual CPU, we update the array *after* checking | ||
138 | * its value. | ||
139 | */ | ||
140 | smtc_nexttime[vpe][cpu] = timestamp; | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * It may be that, in fact, we don't need to update Compare, | ||
145 | * but if we do, we want to make sure we didn't fall into | ||
146 | * a crack just behind Count. | ||
147 | */ | ||
148 | if (ISVALID(nextcomp)) { | ||
149 | write_c0_compare(nextcomp); | ||
150 | ehb(); | ||
151 | /* | ||
152 | * We never return an error, we just make sure | ||
153 | * that we trigger the handlers as quickly as | ||
154 | * we can if we fell behind. | ||
155 | */ | ||
156 | while ((nextcomp - (unsigned long)read_c0_count()) | ||
157 | > (unsigned long)LONG_MAX) { | ||
158 | nextcomp += CATCHUP_INCREMENT; | ||
159 | write_c0_compare(nextcomp); | ||
160 | ehb(); | ||
161 | } | ||
162 | } | ||
163 | emt(mtflags); | ||
164 | local_irq_restore(flags); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | |||
169 | void smtc_distribute_timer(int vpe) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | unsigned int mtflags; | ||
173 | int cpu; | ||
174 | struct clock_event_device *cd; | ||
175 | unsigned long nextstamp = 0L; | ||
176 | unsigned long reference; | ||
177 | |||
178 | |||
179 | repeat: | ||
180 | for_each_online_cpu(cpu) { | ||
181 | /* | ||
182 | * Find virtual CPUs within the current VPE who have | ||
183 | * unserviced timer requests whose time is now past. | ||
184 | */ | ||
185 | local_irq_save(flags); | ||
186 | mtflags = dmt(); | ||
187 | if (cpu_data[cpu].vpe_id == vpe && | ||
188 | ISVALID(smtc_nexttime[vpe][cpu])) { | ||
189 | reference = (unsigned long)read_c0_count(); | ||
190 | if ((smtc_nexttime[vpe][cpu] - reference) | ||
191 | > (unsigned long)LONG_MAX) { | ||
192 | smtc_nexttime[vpe][cpu] = 0L; | ||
193 | emt(mtflags); | ||
194 | local_irq_restore(flags); | ||
195 | /* | ||
196 | * We don't send IPIs to ourself. | ||
197 | */ | ||
198 | if (cpu != smp_processor_id()) { | ||
199 | smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0); | ||
200 | } else { | ||
201 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
202 | cd->event_handler(cd); | ||
203 | } | ||
204 | } else { | ||
205 | /* Local to VPE but Valid Time not yet reached. */ | ||
206 | if (!ISVALID(nextstamp) || | ||
207 | IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp, | ||
208 | reference)) { | ||
209 | smtc_nextinvpe[vpe] = cpu; | ||
210 | nextstamp = smtc_nexttime[vpe][cpu]; | ||
211 | } | ||
212 | emt(mtflags); | ||
213 | local_irq_restore(flags); | ||
214 | } | ||
215 | } else { | ||
216 | emt(mtflags); | ||
217 | local_irq_restore(flags); | ||
218 | |||
219 | } | ||
220 | } | ||
221 | /* Reprogram for interrupt at next soonest timestamp for VPE */ | ||
222 | if (ISVALID(nextstamp)) { | ||
223 | write_c0_compare(nextstamp); | ||
224 | ehb(); | ||
225 | if ((nextstamp - (unsigned long)read_c0_count()) | ||
226 | > (unsigned long)LONG_MAX) | ||
227 | goto repeat; | ||
228 | } | ||
229 | } | ||
230 | |||
231 | |||
232 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
233 | { | ||
234 | int cpu = smp_processor_id(); | ||
235 | |||
236 | /* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */ | ||
237 | handle_perf_irq(1); | ||
238 | |||
239 | if (read_c0_cause() & (1 << 30)) { | ||
240 | /* Clear Count/Compare Interrupt */ | ||
241 | write_c0_compare(read_c0_compare()); | ||
242 | smtc_distribute_timer(cpu_data[cpu].vpe_id); | ||
243 | } | ||
244 | return IRQ_HANDLED; | ||
245 | } | ||
246 | |||
247 | |||
248 | int __cpuinit mips_clockevent_init(void) | ||
249 | { | ||
250 | uint64_t mips_freq = mips_hpt_frequency; | ||
251 | unsigned int cpu = smp_processor_id(); | ||
252 | struct clock_event_device *cd; | ||
253 | unsigned int irq; | ||
254 | int i; | ||
255 | int j; | ||
256 | |||
257 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
258 | return -ENXIO; | ||
259 | if (cpu == 0) { | ||
260 | for (i = 0; i < num_possible_cpus(); i++) { | ||
261 | smtc_nextinvpe[i] = 0; | ||
262 | for (j = 0; j < num_possible_cpus(); j++) | ||
263 | smtc_nexttime[i][j] = 0L; | ||
264 | } | ||
265 | /* | ||
266 | * SMTC also can't have the usablility test | ||
267 | * run by secondary TCs once Compare is in use. | ||
268 | */ | ||
269 | if (!c0_compare_int_usable()) | ||
270 | return -ENXIO; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * With vectored interrupts things are getting platform specific. | ||
275 | * get_c0_compare_int is a hook to allow a platform to return the | ||
276 | * interrupt number of it's liking. | ||
277 | */ | ||
278 | irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
279 | if (get_c0_compare_int) | ||
280 | irq = get_c0_compare_int(); | ||
281 | |||
282 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
283 | |||
284 | cd->name = "MIPS"; | ||
285 | cd->features = CLOCK_EVT_FEAT_ONESHOT; | ||
286 | |||
287 | /* Calculate the min / max delta */ | ||
288 | cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32); | ||
289 | cd->shift = 32; | ||
290 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
291 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
292 | |||
293 | cd->rating = 300; | ||
294 | cd->irq = irq; | ||
295 | cd->cpumask = cpumask_of_cpu(cpu); | ||
296 | cd->set_next_event = mips_next_event; | ||
297 | cd->set_mode = mips_set_clock_mode; | ||
298 | cd->event_handler = mips_event_handler; | ||
299 | |||
300 | clockevents_register_device(cd); | ||
301 | |||
302 | /* | ||
303 | * On SMTC we only want to do the data structure | ||
304 | * initialization and IRQ setup once. | ||
305 | */ | ||
306 | if (cpu) | ||
307 | return 0; | ||
308 | /* | ||
309 | * And we need the hwmask associated with the c0_compare | ||
310 | * vector to be initialized. | ||
311 | */ | ||
312 | irq_hwmask[irq] = (0x100 << cp0_compare_irq); | ||
313 | if (cp0_timer_irq_installed) | ||
314 | return 0; | ||
315 | |||
316 | cp0_timer_irq_installed = 1; | ||
317 | |||
318 | setup_irq(irq, &c0_compare_irqaction); | ||
319 | |||
320 | return 0; | ||
321 | } | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c index 11c92dc53791..e621fda8ab37 100644 --- a/arch/mips/kernel/cpu-probe.c +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -54,14 +54,18 @@ extern void r4k_wait(void); | |||
54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | 54 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes |
55 | * using this version a gamble. | 55 | * using this version a gamble. |
56 | */ | 56 | */ |
57 | static void r4k_wait_irqoff(void) | 57 | void r4k_wait_irqoff(void) |
58 | { | 58 | { |
59 | local_irq_disable(); | 59 | local_irq_disable(); |
60 | if (!need_resched()) | 60 | if (!need_resched()) |
61 | __asm__(" .set mips3 \n" | 61 | __asm__(" .set push \n" |
62 | " .set mips3 \n" | ||
62 | " wait \n" | 63 | " wait \n" |
63 | " .set mips0 \n"); | 64 | " .set pop \n"); |
64 | local_irq_enable(); | 65 | local_irq_enable(); |
66 | __asm__(" .globl __pastwait \n" | ||
67 | "__pastwait: \n"); | ||
68 | return; | ||
65 | } | 69 | } |
66 | 70 | ||
67 | /* | 71 | /* |
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S index e29598ae939d..ffa331029e08 100644 --- a/arch/mips/kernel/entry.S +++ b/arch/mips/kernel/entry.S | |||
@@ -79,11 +79,6 @@ FEXPORT(syscall_exit) | |||
79 | 79 | ||
80 | FEXPORT(restore_all) # restore full frame | 80 | FEXPORT(restore_all) # restore full frame |
81 | #ifdef CONFIG_MIPS_MT_SMTC | 81 | #ifdef CONFIG_MIPS_MT_SMTC |
82 | /* Detect and execute deferred IPI "interrupts" */ | ||
83 | LONG_L s0, TI_REGS($28) | ||
84 | LONG_S sp, TI_REGS($28) | ||
85 | jal deferred_smtc_ipi | ||
86 | LONG_S s0, TI_REGS($28) | ||
87 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 82 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
88 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ | 83 | /* Re-arm any temporarily masked interrupts not explicitly "acked" */ |
89 | mfc0 v0, CP0_TCSTATUS | 84 | mfc0 v0, CP0_TCSTATUS |
@@ -112,6 +107,11 @@ FEXPORT(restore_all) # restore full frame | |||
112 | xor t0, t0, t3 | 107 | xor t0, t0, t3 |
113 | mtc0 t0, CP0_TCCONTEXT | 108 | mtc0 t0, CP0_TCCONTEXT |
114 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 109 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
110 | /* Detect and execute deferred IPI "interrupts" */ | ||
111 | LONG_L s0, TI_REGS($28) | ||
112 | LONG_S sp, TI_REGS($28) | ||
113 | jal deferred_smtc_ipi | ||
114 | LONG_S s0, TI_REGS($28) | ||
115 | #endif /* CONFIG_MIPS_MT_SMTC */ | 115 | #endif /* CONFIG_MIPS_MT_SMTC */ |
116 | .set noat | 116 | .set noat |
117 | RESTORE_TEMP | 117 | RESTORE_TEMP |
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S index f886dd7f708e..01dcbe38fa01 100644 --- a/arch/mips/kernel/genex.S +++ b/arch/mips/kernel/genex.S | |||
@@ -282,8 +282,8 @@ NESTED(except_vec_vi_handler, 0, sp) | |||
282 | and t0, a0, t1 | 282 | and t0, a0, t1 |
283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP | 283 | #ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP |
284 | mfc0 t2, CP0_TCCONTEXT | 284 | mfc0 t2, CP0_TCCONTEXT |
285 | or t0, t0, t2 | 285 | or t2, t0, t2 |
286 | mtc0 t0, CP0_TCCONTEXT | 286 | mtc0 t2, CP0_TCCONTEXT |
287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ | 287 | #endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */ |
288 | xor t1, t1, t0 | 288 | xor t1, t1, t0 |
289 | mtc0 t1, CP0_STATUS | 289 | mtc0 t1, CP0_STATUS |
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c index df4d3f2f740c..dc9eb72ed9de 100644 --- a/arch/mips/kernel/mips-mt-fpaff.c +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -159,7 +159,7 @@ __setup("fpaff=", fpaff_thresh); | |||
159 | /* | 159 | /* |
160 | * FPU Use Factor empirically derived from experiments on 34K | 160 | * FPU Use Factor empirically derived from experiments on 34K |
161 | */ | 161 | */ |
162 | #define FPUSEFACTOR 333 | 162 | #define FPUSEFACTOR 2000 |
163 | 163 | ||
164 | static __init int mt_fp_affinity_init(void) | 164 | static __init int mt_fp_affinity_init(void) |
165 | { | 165 | { |
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c index ce7684335a41..22fc19bbe87f 100644 --- a/arch/mips/kernel/process.c +++ b/arch/mips/kernel/process.c | |||
@@ -55,7 +55,7 @@ void __noreturn cpu_idle(void) | |||
55 | while (1) { | 55 | while (1) { |
56 | tick_nohz_stop_sched_tick(1); | 56 | tick_nohz_stop_sched_tick(1); |
57 | while (!need_resched()) { | 57 | while (!need_resched()) { |
58 | #ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG | 58 | #ifdef CONFIG_MIPS_MT_SMTC |
59 | extern void smtc_idle_loop_hook(void); | 59 | extern void smtc_idle_loop_hook(void); |
60 | 60 | ||
61 | smtc_idle_loop_hook(); | 61 | smtc_idle_loop_hook(); |
@@ -145,19 +145,18 @@ int copy_thread(int nr, unsigned long clone_flags, unsigned long usp, | |||
145 | */ | 145 | */ |
146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); | 146 | p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1); |
147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | 147 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); |
148 | |||
149 | #ifdef CONFIG_MIPS_MT_SMTC | ||
150 | /* | ||
151 | * SMTC restores TCStatus after Status, and the CU bits | ||
152 | * are aliased there. | ||
153 | */ | ||
154 | childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1); | ||
155 | #endif | ||
148 | clear_tsk_thread_flag(p, TIF_USEDFPU); | 156 | clear_tsk_thread_flag(p, TIF_USEDFPU); |
149 | 157 | ||
150 | #ifdef CONFIG_MIPS_MT_FPAFF | 158 | #ifdef CONFIG_MIPS_MT_FPAFF |
151 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | 159 | clear_tsk_thread_flag(p, TIF_FPUBOUND); |
152 | |||
153 | /* | ||
154 | * FPU affinity support is cleaner if we track the | ||
155 | * user-visible CPU affinity from the very beginning. | ||
156 | * The generic cpus_allowed mask will already have | ||
157 | * been copied from the parent before copy_thread | ||
158 | * is invoked. | ||
159 | */ | ||
160 | p->thread.user_cpus_allowed = p->cpus_allowed; | ||
161 | #endif /* CONFIG_MIPS_MT_FPAFF */ | 160 | #endif /* CONFIG_MIPS_MT_FPAFF */ |
162 | 161 | ||
163 | if (clone_flags & CLONE_SETTLS) | 162 | if (clone_flags & CLONE_SETTLS) |
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c index 35234b92b9a5..96ffc9c6d194 100644 --- a/arch/mips/kernel/ptrace.c +++ b/arch/mips/kernel/ptrace.c | |||
@@ -238,7 +238,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
238 | case FPC_EIR: { /* implementation / version register */ | 238 | case FPC_EIR: { /* implementation / version register */ |
239 | unsigned int flags; | 239 | unsigned int flags; |
240 | #ifdef CONFIG_MIPS_MT_SMTC | 240 | #ifdef CONFIG_MIPS_MT_SMTC |
241 | unsigned int irqflags; | 241 | unsigned long irqflags; |
242 | unsigned int mtflags; | 242 | unsigned int mtflags; |
243 | #endif /* CONFIG_MIPS_MT_SMTC */ | 243 | #endif /* CONFIG_MIPS_MT_SMTC */ |
244 | 244 | ||
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c | |||
@@ -1,4 +1,21 @@ | |||
1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* |
2 | * This program is free software; you can redistribute it and/or | ||
3 | * modify it under the terms of the GNU General Public License | ||
4 | * as published by the Free Software Foundation; either version 2 | ||
5 | * of the License, or (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
17 | * Copyright (C) 2008 Kevin D. Kissell | ||
18 | */ | ||
2 | 19 | ||
3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> |
4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -21,7 +38,6 @@ | |||
21 | #include <asm/time.h> | 38 | #include <asm/time.h> |
22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> |
23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> |
24 | #include <asm/smtc_ipi.h> | ||
25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> |
26 | 42 | ||
27 | /* | 43 | /* |
@@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
58 | 74 | ||
59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; |
60 | 76 | ||
61 | /* | ||
62 | * Clock interrupt "latch" buffers, per "CPU" | ||
63 | */ | ||
64 | |||
65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
66 | 77 | ||
67 | /* | 78 | /* |
68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate |
@@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; | |||
70 | 81 | ||
71 | #define IPIBUF_PER_CPU 4 | 82 | #define IPIBUF_PER_CPU 4 |
72 | 83 | ||
73 | static struct smtc_ipi_q IPIQ[NR_CPUS]; | 84 | struct smtc_ipi_q IPIQ[NR_CPUS]; |
74 | static struct smtc_ipi_q freeIPIq; | 85 | static struct smtc_ipi_q freeIPIq; |
75 | 86 | ||
76 | 87 | ||
@@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. |
283 | */ | 294 | */ |
284 | 295 | ||
285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) |
286 | { | 297 | { |
287 | int i, ntcs; | 298 | int i, ntcs; |
288 | 299 | ||
@@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() |
326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) |
327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); |
328 | write_tc_c0_tccontext(0); | 339 | /* |
340 | * TCContext gets an offset from the base of the IPIQ array | ||
341 | * to be used in low-level code to detect the presence of | ||
342 | * an active IPI queue | ||
343 | */ | ||
344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ |
330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); |
331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ |
@@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; |
337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; |
338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; |
355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
339 | } | 357 | } |
340 | 358 | ||
359 | /* | ||
360 | * Tweak to get Count registes in as close a sync as possible. | ||
361 | * Value seems good for 34K-class cores. | ||
362 | */ | ||
363 | |||
364 | #define CP0_SKEW 8 | ||
341 | 365 | ||
342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) |
343 | { | 367 | { |
344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; |
345 | unsigned long flags; | 369 | unsigned long flags; |
@@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; |
364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); |
365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; |
366 | atomic_set(&ipi_timer_latch[i], 0); | ||
367 | } | 390 | } |
368 | 391 | ||
369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ |
370 | cpu = 0; | 393 | cpu = 0; |
371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; |
372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; |
396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
373 | cpu++; | 397 | cpu++; |
374 | 398 | ||
375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ |
@@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); |
485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ |
486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); |
487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); |
512 | ehb(); | ||
488 | } | 513 | } |
489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ |
490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); |
@@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) | |||
556 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 581 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) |
557 | { | 582 | { |
558 | extern u32 kernelsp[NR_CPUS]; | 583 | extern u32 kernelsp[NR_CPUS]; |
559 | long flags; | 584 | unsigned long flags; |
560 | int mtflags; | 585 | int mtflags; |
561 | 586 | ||
562 | LOCK_MT_PRA(); | 587 | LOCK_MT_PRA(); |
@@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
585 | 610 | ||
586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) |
587 | { | 612 | { |
588 | /* | ||
589 | * Start timer on secondary VPEs if necessary. | ||
590 | * plat_timer_setup has already have been invoked by init/main | ||
591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
593 | * to across available VPEs. | ||
594 | */ | ||
595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
599 | } | ||
600 | |||
601 | local_irq_enable(); | 613 | local_irq_enable(); |
602 | } | 614 | } |
603 | 615 | ||
604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) |
605 | { | 617 | { |
618 | int cpu = smp_processor_id(); | ||
619 | |||
620 | /* | ||
621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
622 | * Like per_cpu_trap_init() hack, this assumes that | ||
623 | * SMTC init code assigns TCs consdecutively and | ||
624 | * in ascending order across available VPEs. | ||
625 | */ | ||
626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
628 | |||
606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", |
607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); |
608 | } | 631 | } |
@@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
753 | { | 776 | { |
754 | int tcstatus; | 777 | int tcstatus; |
755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; |
756 | long flags; | 779 | unsigned long flags; |
757 | int mtflags; | 780 | int mtflags; |
781 | unsigned long tcrestart; | ||
782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
758 | 783 | ||
759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { |
760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); |
@@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; |
772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; |
773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { |
774 | if (type == SMTC_CLOCK_TICK) | ||
775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ |
777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); |
@@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
800 | 823 | ||
801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { |
802 | /* | 825 | /* |
803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait |
804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and |
828 | * do a direct post of the IPI. | ||
829 | */ | ||
830 | if (cpu_wait == r4k_wait_irqoff) { | ||
831 | tcrestart = read_tc_c0_tcrestart(); | ||
832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
833 | && tcrestart < (unsigned long)__pastwait) { | ||
834 | write_tc_c0_tcrestart(__pastwait); | ||
835 | tcstatus &= ~TCSTATUS_IXMT; | ||
836 | write_tc_c0_tcstatus(tcstatus); | ||
837 | goto postdirect; | ||
838 | } | ||
839 | } | ||
840 | /* | ||
841 | * Otherwise we queue the message for the target TC | ||
842 | * to pick up when he does a local_irq_restore() | ||
805 | */ | 843 | */ |
806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); |
807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); |
808 | /* Try to reduce redundant timer interrupt messages */ | ||
809 | if (type == SMTC_CLOCK_TICK) { | ||
810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
812 | return; | ||
813 | } | ||
814 | } | ||
815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); |
816 | } else { | 847 | } else { |
817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: |
818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); |
820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); |
821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); |
@@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); |
884 | } | 914 | } |
885 | 915 | ||
886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); |
887 | 917 | ||
888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) |
889 | { | 919 | { |
@@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; |
892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; |
893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; |
894 | int ticks; | ||
895 | |||
896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); |
897 | switch (type_copy) { | 925 | switch (type_copy) { |
898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: |
899 | irq_enter(); | 927 | irq_enter(); |
900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; |
901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); |
902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); |
903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
904 | while (ticks) { | ||
905 | cd->event_handler(cd); | ||
906 | ticks--; | ||
907 | } | ||
908 | irq_exit(); | 931 | irq_exit(); |
909 | break; | 932 | break; |
910 | 933 | ||
@@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
937 | } | 960 | } |
938 | } | 961 | } |
939 | 962 | ||
963 | /* | ||
964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
965 | * so it reuses the current exception frame rather than set up a | ||
966 | * new one with self_ipi. | ||
967 | */ | ||
968 | |||
940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) |
941 | { | 970 | { |
942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); |
943 | unsigned long flags; | ||
944 | /* DEBUG */ | ||
945 | int q = smp_processor_id(); | ||
946 | 972 | ||
947 | /* | 973 | /* |
948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, |
949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. |
976 | * If irq_disabled when this was called, then any IPIs queued | ||
977 | * after we test last will be taken on the next irq_enable/restore. | ||
978 | * If interrupts were enabled, then any IPIs added after the | ||
979 | * last test will be taken directly. | ||
950 | */ | 980 | */ |
951 | if (IPIQ[q].head != NULL) { | 981 | |
952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { |
953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; |
985 | unsigned long flags; | ||
986 | |||
987 | /* | ||
988 | * It may be possible we'll come in with interrupts | ||
989 | * already enabled. | ||
990 | */ | ||
991 | local_irq_save(flags); | ||
992 | |||
993 | spin_lock(&q->lock); | ||
994 | pipi = __smtc_ipi_dq(q); | ||
995 | spin_unlock(&q->lock); | ||
996 | if (pipi != NULL) | ||
955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); |
956 | local_irq_restore(flags); | 998 | /* |
957 | } | 999 | * The use of the __raw_local restore isn't |
1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
1001 | * but it's more efficient, given that we're already | ||
1002 | * running down the IPI queue. | ||
1003 | */ | ||
1004 | __raw_local_irq_restore(flags); | ||
958 | } | 1005 | } |
959 | } | 1006 | } |
960 | 1007 | ||
@@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
975 | struct smtc_ipi *pipi; | 1022 | struct smtc_ipi *pipi; |
976 | unsigned long tcstatus; | 1023 | unsigned long tcstatus; |
977 | int sent; | 1024 | int sent; |
978 | long flags; | 1025 | unsigned long flags; |
979 | unsigned int mtflags; | 1026 | unsigned int mtflags; |
980 | unsigned int vpflags; | 1027 | unsigned int vpflags; |
981 | 1028 | ||
@@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
1066 | 1113 | ||
1067 | /* | 1114 | /* |
1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. |
1069 | * | ||
1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
1073 | * result in a recursive call to raw_local_irq_restore(). | ||
1074 | */ | 1116 | */ |
1075 | 1117 | ||
1076 | static void __smtc_ipi_replay(void) | 1118 | /* |
1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
1120 | */ | ||
1121 | |||
1122 | void smtc_ipi_replay(void) | ||
1077 | { | 1123 | { |
1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); |
1079 | 1125 | ||
1080 | /* | 1126 | /* |
1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, |
1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. |
1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
1084 | * exact number - but we'll also disable interrupts | ||
1085 | * and create a window of failure where a new IPI gets | ||
1086 | * queued after we test the depth but before we re-enable | ||
1087 | * interrupts. So long as IXMT never gets set, however, | ||
1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch |
1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent |
1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT |
1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt |
1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing |
1134 | * is to do the last check for queued message *after* the | ||
1135 | * re-enabling of interrupts. | ||
1093 | */ | 1136 | */ |
1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { |
1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; |
1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; |
1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; |
1098 | extern void self_ipi(struct smtc_ipi *); | 1141 | |
1099 | 1142 | /* | |
1100 | spin_lock(&q->lock); | 1143 | * It's just possible we'll come in with interrupts |
1101 | pipi = __smtc_ipi_dq(q); | 1144 | * already enabled. |
1102 | spin_unlock(&q->lock); | 1145 | */ |
1103 | if (!pipi) | 1146 | local_irq_save(flags); |
1104 | break; | 1147 | |
1148 | spin_lock(&q->lock); | ||
1149 | pipi = __smtc_ipi_dq(q); | ||
1150 | spin_unlock(&q->lock); | ||
1151 | /* | ||
1152 | ** But use a raw restore here to avoid recursion. | ||
1153 | */ | ||
1154 | __raw_local_irq_restore(flags); | ||
1105 | 1155 | ||
1156 | if (pipi) { | ||
1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); |
1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; |
1108 | } | 1159 | } |
1109 | } | 1160 | } |
1110 | } | 1161 | } |
1111 | 1162 | ||
1112 | void smtc_ipi_replay(void) | ||
1113 | { | ||
1114 | raw_local_irq_disable(); | ||
1115 | __smtc_ipi_replay(); | ||
1116 | } | ||
1117 | |||
1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); |
1119 | 1164 | ||
1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) |
@@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
1193 | } | 1238 | } |
1194 | } | 1239 | } |
1195 | 1240 | ||
1196 | /* | ||
1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
1198 | */ | ||
1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
1201 | if ((tc != smp_processor_id()) && | ||
1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
1203 | if (clock_hang_reported[tc] == 0) { | ||
1204 | pdb_msg += sprintf(pdb_msg, | ||
1205 | "TC %d looks hung with timer latch at %d\n", | ||
1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
1207 | clock_hang_reported[tc]++; | ||
1208 | } | ||
1209 | } | ||
1210 | } | ||
1211 | emt(mtflags); | 1241 | emt(mtflags); |
1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); |
1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) |
1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); |
1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ |
1216 | 1246 | ||
1217 | /* | 1247 | smtc_ipi_replay(); |
1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
1219 | * is in use, there should never be any. | ||
1220 | */ | ||
1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
1222 | { | ||
1223 | unsigned long flags; | ||
1224 | |||
1225 | local_irq_save(flags); | ||
1226 | __smtc_ipi_replay(); | ||
1227 | local_irq_restore(flags); | ||
1228 | } | ||
1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
1230 | } | 1248 | } |
1231 | 1249 | ||
1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) |
@@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); |
1243 | } | 1261 | } |
1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); |
1245 | printk("Timer IPI Backlogs:\n"); | ||
1246 | for (i=0; i < NR_CPUS; i++) { | ||
1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
1248 | } | ||
1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", |
1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); |
1251 | } | 1265 | } |
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c index 5fd0cd020af5..b602ac6eb47d 100644 --- a/arch/mips/kernel/traps.c +++ b/arch/mips/kernel/traps.c | |||
@@ -825,8 +825,10 @@ static void mt_ase_fp_affinity(void) | |||
825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { | 825 | if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) { |
826 | cpumask_t tmask; | 826 | cpumask_t tmask; |
827 | 827 | ||
828 | cpus_and(tmask, current->thread.user_cpus_allowed, | 828 | current->thread.user_cpus_allowed |
829 | mt_fpu_cpumask); | 829 | = current->cpus_allowed; |
830 | cpus_and(tmask, current->cpus_allowed, | ||
831 | mt_fpu_cpumask); | ||
830 | set_cpus_allowed(current, tmask); | 832 | set_cpus_allowed(current, tmask); |
831 | set_thread_flag(TIF_FPUBOUND); | 833 | set_thread_flag(TIF_FPUBOUND); |
832 | } | 834 | } |
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile index 3b7dd722c32a..cef2db8d2225 100644 --- a/arch/mips/mti-malta/Makefile +++ b/arch/mips/mti-malta/Makefile | |||
@@ -15,6 +15,6 @@ obj-$(CONFIG_EARLY_PRINTK) += malta-console.o | |||
15 | obj-$(CONFIG_PCI) += malta-pci.o | 15 | obj-$(CONFIG_PCI) += malta-pci.o |
16 | 16 | ||
17 | # FIXME FIXME FIXME | 17 | # FIXME FIXME FIXME |
18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta_smtc.o | 18 | obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o |
19 | 19 | ||
20 | EXTRA_CFLAGS += -Werror | 20 | EXTRA_CFLAGS += -Werror |
diff --git a/arch/mips/mti-malta/malta-smtc.c b/arch/mips/mti-malta/malta-smtc.c index 5ea705e49454..f84a46a8ae6e 100644 --- a/arch/mips/mti-malta/malta-smtc.c +++ b/arch/mips/mti-malta/malta-smtc.c | |||
@@ -84,12 +84,17 @@ static void msmtc_cpus_done(void) | |||
84 | 84 | ||
85 | static void __init msmtc_smp_setup(void) | 85 | static void __init msmtc_smp_setup(void) |
86 | { | 86 | { |
87 | mipsmt_build_cpu_map(0); | 87 | /* |
88 | * we won't get the definitive value until | ||
89 | * we've run smtc_prepare_cpus later, but | ||
90 | * we would appear to need an upper bound now. | ||
91 | */ | ||
92 | smp_num_siblings = smtc_build_cpu_map(0); | ||
88 | } | 93 | } |
89 | 94 | ||
90 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) | 95 | static void __init msmtc_prepare_cpus(unsigned int max_cpus) |
91 | { | 96 | { |
92 | mipsmt_prepare_cpus(); | 97 | smtc_prepare_cpus(max_cpus); |
93 | } | 98 | } |
94 | 99 | ||
95 | struct plat_smp_ops msmtc_smp_ops = { | 100 | struct plat_smp_ops msmtc_smp_ops = { |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index ca114fe46ffb..06acb1a18bbc 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -169,6 +169,8 @@ void init_cpu_timer(void) | |||
169 | 169 | ||
170 | static void clock_comparator_interrupt(__u16 code) | 170 | static void clock_comparator_interrupt(__u16 code) |
171 | { | 171 | { |
172 | if (S390_lowcore.clock_comparator == -1ULL) | ||
173 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
172 | } | 174 | } |
173 | 175 | ||
174 | static void etr_timing_alert(struct etr_irq_parm *); | 176 | static void etr_timing_alert(struct etr_irq_parm *); |
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index fc6ab6094df8..0953cee05efc 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -1,14 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/lib/delay.c | ||
3 | * Precise Delay Loops for S390 | 2 | * Precise Delay Loops for S390 |
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999,2008 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com>, |
8 | * | ||
9 | * Derived from "arch/i386/lib/delay.c" | ||
10 | * Copyright (C) 1993 Linus Torvalds | ||
11 | * Copyright (C) 1997 Martin Mares <mj@atrey.karlin.mff.cuni.cz> | ||
12 | */ | 7 | */ |
13 | 8 | ||
14 | #include <linux/sched.h> | 9 | #include <linux/sched.h> |
@@ -29,30 +24,31 @@ void __delay(unsigned long loops) | |||
29 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); | 24 | asm volatile("0: brct %0,0b" : : "d" ((loops/2) + 1)); |
30 | } | 25 | } |
31 | 26 | ||
32 | /* | 27 | static void __udelay_disabled(unsigned long usecs) |
33 | * Waits for 'usecs' microseconds using the TOD clock comparator. | ||
34 | */ | ||
35 | void __udelay(unsigned long usecs) | ||
36 | { | 28 | { |
37 | u64 end, time, old_cc = 0; | 29 | unsigned long mask, cr0, cr0_saved; |
38 | unsigned long flags, cr0, mask, dummy; | 30 | u64 clock_saved; |
39 | int irq_context; | ||
40 | 31 | ||
41 | irq_context = in_interrupt(); | 32 | clock_saved = local_tick_disable(); |
42 | if (!irq_context) | 33 | set_clock_comparator(get_clock() + ((u64) usecs << 12)); |
43 | local_bh_disable(); | 34 | __ctl_store(cr0_saved, 0, 0); |
44 | local_irq_save(flags); | 35 | cr0 = (cr0_saved & 0xffff00e0) | 0x00000800; |
45 | if (raw_irqs_disabled_flags(flags)) { | 36 | __ctl_load(cr0 , 0, 0); |
46 | old_cc = local_tick_disable(); | 37 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; |
47 | S390_lowcore.clock_comparator = -1ULL; | 38 | trace_hardirqs_on(); |
48 | __ctl_store(cr0, 0, 0); | 39 | __load_psw_mask(mask); |
49 | dummy = (cr0 & 0xffff00e0) | 0x00000800; | 40 | local_irq_disable(); |
50 | __ctl_load(dummy , 0, 0); | 41 | __ctl_load(cr0_saved, 0, 0); |
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT; | 42 | local_tick_enable(clock_saved); |
52 | } else | 43 | set_clock_comparator(S390_lowcore.clock_comparator); |
53 | mask = psw_kernel_bits | PSW_MASK_WAIT | | 44 | } |
54 | PSW_MASK_EXT | PSW_MASK_IO; | ||
55 | 45 | ||
46 | static void __udelay_enabled(unsigned long usecs) | ||
47 | { | ||
48 | unsigned long mask; | ||
49 | u64 end, time; | ||
50 | |||
51 | mask = psw_kernel_bits | PSW_MASK_WAIT | PSW_MASK_EXT | PSW_MASK_IO; | ||
56 | end = get_clock() + ((u64) usecs << 12); | 52 | end = get_clock() + ((u64) usecs << 12); |
57 | do { | 53 | do { |
58 | time = end < S390_lowcore.clock_comparator ? | 54 | time = end < S390_lowcore.clock_comparator ? |
@@ -62,13 +58,37 @@ void __udelay(unsigned long usecs) | |||
62 | __load_psw_mask(mask); | 58 | __load_psw_mask(mask); |
63 | local_irq_disable(); | 59 | local_irq_disable(); |
64 | } while (get_clock() < end); | 60 | } while (get_clock() < end); |
61 | set_clock_comparator(S390_lowcore.clock_comparator); | ||
62 | } | ||
65 | 63 | ||
66 | if (raw_irqs_disabled_flags(flags)) { | 64 | /* |
67 | __ctl_load(cr0, 0, 0); | 65 | * Waits for 'usecs' microseconds using the TOD clock comparator. |
68 | local_tick_enable(old_cc); | 66 | */ |
67 | void __udelay(unsigned long usecs) | ||
68 | { | ||
69 | unsigned long flags; | ||
70 | |||
71 | preempt_disable(); | ||
72 | local_irq_save(flags); | ||
73 | if (in_irq()) { | ||
74 | __udelay_disabled(usecs); | ||
75 | goto out; | ||
76 | } | ||
77 | if (in_softirq()) { | ||
78 | if (raw_irqs_disabled_flags(flags)) | ||
79 | __udelay_disabled(usecs); | ||
80 | else | ||
81 | __udelay_enabled(usecs); | ||
82 | goto out; | ||
69 | } | 83 | } |
70 | if (!irq_context) | 84 | if (raw_irqs_disabled_flags(flags)) { |
85 | local_bh_disable(); | ||
86 | __udelay_disabled(usecs); | ||
71 | _local_bh_enable(); | 87 | _local_bh_enable(); |
72 | set_clock_comparator(S390_lowcore.clock_comparator); | 88 | goto out; |
89 | } | ||
90 | __udelay_enabled(usecs); | ||
91 | out: | ||
73 | local_irq_restore(flags); | 92 | local_irq_restore(flags); |
93 | preempt_enable(); | ||
74 | } | 94 | } |
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c index 084109507c9f..8dd3336efd7e 100644 --- a/drivers/acpi/glue.c +++ b/drivers/acpi/glue.c | |||
@@ -165,8 +165,11 @@ static int acpi_bind_one(struct device *dev, acpi_handle handle) | |||
165 | "firmware_node"); | 165 | "firmware_node"); |
166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, | 166 | ret = sysfs_create_link(&acpi_dev->dev.kobj, &dev->kobj, |
167 | "physical_node"); | 167 | "physical_node"); |
168 | if (acpi_dev->wakeup.flags.valid) | 168 | if (acpi_dev->wakeup.flags.valid) { |
169 | device_set_wakeup_capable(dev, true); | 169 | device_set_wakeup_capable(dev, true); |
170 | device_set_wakeup_enable(dev, | ||
171 | acpi_dev->wakeup.state.enabled); | ||
172 | } | ||
170 | } | 173 | } |
171 | 174 | ||
172 | return 0; | 175 | return 0; |
diff --git a/drivers/acpi/sleep/proc.c b/drivers/acpi/sleep/proc.c index 4ebbba2b6b19..bf5b04de02d1 100644 --- a/drivers/acpi/sleep/proc.c +++ b/drivers/acpi/sleep/proc.c | |||
@@ -377,6 +377,14 @@ acpi_system_wakeup_device_seq_show(struct seq_file *seq, void *offset) | |||
377 | return 0; | 377 | return 0; |
378 | } | 378 | } |
379 | 379 | ||
380 | static void physical_device_enable_wakeup(struct acpi_device *adev) | ||
381 | { | ||
382 | struct device *dev = acpi_get_physical_device(adev->handle); | ||
383 | |||
384 | if (dev && device_can_wakeup(dev)) | ||
385 | device_set_wakeup_enable(dev, adev->wakeup.state.enabled); | ||
386 | } | ||
387 | |||
380 | static ssize_t | 388 | static ssize_t |
381 | acpi_system_write_wakeup_device(struct file *file, | 389 | acpi_system_write_wakeup_device(struct file *file, |
382 | const char __user * buffer, | 390 | const char __user * buffer, |
@@ -411,6 +419,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
411 | } | 419 | } |
412 | } | 420 | } |
413 | if (found_dev) { | 421 | if (found_dev) { |
422 | physical_device_enable_wakeup(found_dev); | ||
414 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { | 423 | list_for_each_safe(node, next, &acpi_wakeup_device_list) { |
415 | struct acpi_device *dev = container_of(node, | 424 | struct acpi_device *dev = container_of(node, |
416 | struct | 425 | struct |
@@ -428,6 +437,7 @@ acpi_system_write_wakeup_device(struct file *file, | |||
428 | dev->pnp.bus_id, found_dev->pnp.bus_id); | 437 | dev->pnp.bus_id, found_dev->pnp.bus_id); |
429 | dev->wakeup.state.enabled = | 438 | dev->wakeup.state.enabled = |
430 | found_dev->wakeup.state.enabled; | 439 | found_dev->wakeup.state.enabled; |
440 | physical_device_enable_wakeup(dev); | ||
431 | } | 441 | } |
432 | } | 442 | } |
433 | } | 443 | } |
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c index 94df91771243..0778d99aea7c 100644 --- a/drivers/dma/dw_dmac.c +++ b/drivers/dma/dw_dmac.c | |||
@@ -364,7 +364,7 @@ static void dw_dma_tasklet(unsigned long data) | |||
364 | int i; | 364 | int i; |
365 | 365 | ||
366 | status_block = dma_readl(dw, RAW.BLOCK); | 366 | status_block = dma_readl(dw, RAW.BLOCK); |
367 | status_xfer = dma_readl(dw, RAW.BLOCK); | 367 | status_xfer = dma_readl(dw, RAW.XFER); |
368 | status_err = dma_readl(dw, RAW.ERROR); | 368 | status_err = dma_readl(dw, RAW.ERROR); |
369 | 369 | ||
370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", | 370 | dev_vdbg(dw->dma.dev, "tasklet: status_block=%x status_err=%x\n", |
diff --git a/drivers/leds/leds-fsg.c b/drivers/leds/leds-fsg.c index be0e12144b8b..34935155c1c0 100644 --- a/drivers/leds/leds-fsg.c +++ b/drivers/leds/leds-fsg.c | |||
@@ -161,6 +161,16 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
161 | { | 161 | { |
162 | int ret; | 162 | int ret; |
163 | 163 | ||
164 | /* Map the LED chip select address space */ | ||
165 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
166 | if (!latch_address) { | ||
167 | ret = -ENOMEM; | ||
168 | goto failremap; | ||
169 | } | ||
170 | |||
171 | latch_value = 0xffff; | ||
172 | *latch_address = latch_value; | ||
173 | |||
164 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); | 174 | ret = led_classdev_register(&pdev->dev, &fsg_wlan_led); |
165 | if (ret < 0) | 175 | if (ret < 0) |
166 | goto failwlan; | 176 | goto failwlan; |
@@ -185,20 +195,8 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
185 | if (ret < 0) | 195 | if (ret < 0) |
186 | goto failring; | 196 | goto failring; |
187 | 197 | ||
188 | /* Map the LED chip select address space */ | ||
189 | latch_address = (unsigned short *) ioremap(IXP4XX_EXP_BUS_BASE(2), 512); | ||
190 | if (!latch_address) { | ||
191 | ret = -ENOMEM; | ||
192 | goto failremap; | ||
193 | } | ||
194 | |||
195 | latch_value = 0xffff; | ||
196 | *latch_address = latch_value; | ||
197 | |||
198 | return ret; | 198 | return ret; |
199 | 199 | ||
200 | failremap: | ||
201 | led_classdev_unregister(&fsg_ring_led); | ||
202 | failring: | 200 | failring: |
203 | led_classdev_unregister(&fsg_sync_led); | 201 | led_classdev_unregister(&fsg_sync_led); |
204 | failsync: | 202 | failsync: |
@@ -210,14 +208,14 @@ static int fsg_led_probe(struct platform_device *pdev) | |||
210 | failwan: | 208 | failwan: |
211 | led_classdev_unregister(&fsg_wlan_led); | 209 | led_classdev_unregister(&fsg_wlan_led); |
212 | failwlan: | 210 | failwlan: |
211 | iounmap(latch_address); | ||
212 | failremap: | ||
213 | 213 | ||
214 | return ret; | 214 | return ret; |
215 | } | 215 | } |
216 | 216 | ||
217 | static int fsg_led_remove(struct platform_device *pdev) | 217 | static int fsg_led_remove(struct platform_device *pdev) |
218 | { | 218 | { |
219 | iounmap(latch_address); | ||
220 | |||
221 | led_classdev_unregister(&fsg_wlan_led); | 219 | led_classdev_unregister(&fsg_wlan_led); |
222 | led_classdev_unregister(&fsg_wan_led); | 220 | led_classdev_unregister(&fsg_wan_led); |
223 | led_classdev_unregister(&fsg_sata_led); | 221 | led_classdev_unregister(&fsg_sata_led); |
@@ -225,6 +223,8 @@ static int fsg_led_remove(struct platform_device *pdev) | |||
225 | led_classdev_unregister(&fsg_sync_led); | 223 | led_classdev_unregister(&fsg_sync_led); |
226 | led_classdev_unregister(&fsg_ring_led); | 224 | led_classdev_unregister(&fsg_ring_led); |
227 | 225 | ||
226 | iounmap(latch_address); | ||
227 | |||
228 | return 0; | 228 | return 0; |
229 | } | 229 | } |
230 | 230 | ||
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c index 146c06972863..f508729123b5 100644 --- a/drivers/leds/leds-pca955x.c +++ b/drivers/leds/leds-pca955x.c | |||
@@ -248,11 +248,10 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
248 | const struct i2c_device_id *id) | 248 | const struct i2c_device_id *id) |
249 | { | 249 | { |
250 | struct pca955x_led *pca955x; | 250 | struct pca955x_led *pca955x; |
251 | int i; | ||
252 | int err = -ENODEV; | ||
253 | struct pca955x_chipdef *chip; | 251 | struct pca955x_chipdef *chip; |
254 | struct i2c_adapter *adapter; | 252 | struct i2c_adapter *adapter; |
255 | struct led_platform_data *pdata; | 253 | struct led_platform_data *pdata; |
254 | int i, err; | ||
256 | 255 | ||
257 | chip = &pca955x_chipdefs[id->driver_data]; | 256 | chip = &pca955x_chipdefs[id->driver_data]; |
258 | adapter = to_i2c_adapter(client->dev.parent); | 257 | adapter = to_i2c_adapter(client->dev.parent); |
@@ -282,43 +281,41 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
282 | } | 281 | } |
283 | } | 282 | } |
284 | 283 | ||
284 | pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); | ||
285 | if (!pca955x) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | i2c_set_clientdata(client, pca955x); | ||
289 | |||
285 | for (i = 0; i < chip->bits; i++) { | 290 | for (i = 0; i < chip->bits; i++) { |
286 | pca955x = kzalloc(sizeof(struct pca955x_led), GFP_KERNEL); | 291 | pca955x[i].chipdef = chip; |
287 | if (!pca955x) { | 292 | pca955x[i].client = client; |
288 | err = -ENOMEM; | 293 | pca955x[i].led_num = i; |
289 | goto exit; | ||
290 | } | ||
291 | 294 | ||
292 | pca955x->chipdef = chip; | ||
293 | pca955x->client = client; | ||
294 | pca955x->led_num = i; | ||
295 | /* Platform data can specify LED names and default triggers */ | 295 | /* Platform data can specify LED names and default triggers */ |
296 | if (pdata) { | 296 | if (pdata) { |
297 | if (pdata->leds[i].name) | 297 | if (pdata->leds[i].name) |
298 | snprintf(pca955x->name, 32, "pca955x:%s", | 298 | snprintf(pca955x[i].name, |
299 | pdata->leds[i].name); | 299 | sizeof(pca955x[i].name), "pca955x:%s", |
300 | pdata->leds[i].name); | ||
300 | if (pdata->leds[i].default_trigger) | 301 | if (pdata->leds[i].default_trigger) |
301 | pca955x->led_cdev.default_trigger = | 302 | pca955x[i].led_cdev.default_trigger = |
302 | pdata->leds[i].default_trigger; | 303 | pdata->leds[i].default_trigger; |
303 | } else { | 304 | } else { |
304 | snprintf(pca955x->name, 32, "pca955x:%d", i); | 305 | snprintf(pca955x[i].name, sizeof(pca955x[i].name), |
306 | "pca955x:%d", i); | ||
305 | } | 307 | } |
306 | spin_lock_init(&pca955x->lock); | ||
307 | 308 | ||
308 | pca955x->led_cdev.name = pca955x->name; | 309 | spin_lock_init(&pca955x[i].lock); |
309 | pca955x->led_cdev.brightness_set = | ||
310 | pca955x_led_set; | ||
311 | 310 | ||
312 | /* | 311 | pca955x[i].led_cdev.name = pca955x[i].name; |
313 | * Client data is a pointer to the _first_ pca955x_led | 312 | pca955x[i].led_cdev.brightness_set = pca955x_led_set; |
314 | * struct | ||
315 | */ | ||
316 | if (i == 0) | ||
317 | i2c_set_clientdata(client, pca955x); | ||
318 | 313 | ||
319 | INIT_WORK(&(pca955x->work), pca955x_led_work); | 314 | INIT_WORK(&pca955x[i].work, pca955x_led_work); |
320 | 315 | ||
321 | led_classdev_register(&client->dev, &(pca955x->led_cdev)); | 316 | err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); |
317 | if (err < 0) | ||
318 | goto exit; | ||
322 | } | 319 | } |
323 | 320 | ||
324 | /* Turn off LEDs */ | 321 | /* Turn off LEDs */ |
@@ -336,23 +333,32 @@ static int __devinit pca955x_probe(struct i2c_client *client, | |||
336 | pca955x_write_psc(client, 1, 0); | 333 | pca955x_write_psc(client, 1, 0); |
337 | 334 | ||
338 | return 0; | 335 | return 0; |
336 | |||
339 | exit: | 337 | exit: |
338 | while (i--) { | ||
339 | led_classdev_unregister(&pca955x[i].led_cdev); | ||
340 | cancel_work_sync(&pca955x[i].work); | ||
341 | } | ||
342 | |||
343 | kfree(pca955x); | ||
344 | i2c_set_clientdata(client, NULL); | ||
345 | |||
340 | return err; | 346 | return err; |
341 | } | 347 | } |
342 | 348 | ||
343 | static int __devexit pca955x_remove(struct i2c_client *client) | 349 | static int __devexit pca955x_remove(struct i2c_client *client) |
344 | { | 350 | { |
345 | struct pca955x_led *pca955x = i2c_get_clientdata(client); | 351 | struct pca955x_led *pca955x = i2c_get_clientdata(client); |
346 | int leds = pca955x->chipdef->bits; | ||
347 | int i; | 352 | int i; |
348 | 353 | ||
349 | for (i = 0; i < leds; i++) { | 354 | for (i = 0; i < pca955x->chipdef->bits; i++) { |
350 | led_classdev_unregister(&(pca955x->led_cdev)); | 355 | led_classdev_unregister(&pca955x[i].led_cdev); |
351 | cancel_work_sync(&(pca955x->work)); | 356 | cancel_work_sync(&pca955x[i].work); |
352 | kfree(pca955x); | ||
353 | pca955x = pca955x + 1; | ||
354 | } | 357 | } |
355 | 358 | ||
359 | kfree(pca955x); | ||
360 | i2c_set_clientdata(client, NULL); | ||
361 | |||
356 | return 0; | 362 | return 0; |
357 | } | 363 | } |
358 | 364 | ||
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c index 0b6095ba3ce9..bcd2bc477af2 100644 --- a/drivers/net/e1000e/ich8lan.c +++ b/drivers/net/e1000e/ich8lan.c | |||
@@ -396,7 +396,7 @@ static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw) | |||
396 | u32 extcnf_ctrl; | 396 | u32 extcnf_ctrl; |
397 | u32 timeout = PHY_CFG_TIMEOUT; | 397 | u32 timeout = PHY_CFG_TIMEOUT; |
398 | 398 | ||
399 | WARN_ON(preempt_count()); | 399 | might_sleep(); |
400 | 400 | ||
401 | if (!mutex_trylock(&nvm_mutex)) { | 401 | if (!mutex_trylock(&nvm_mutex)) { |
402 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", | 402 | WARN(1, KERN_ERR "e1000e mutex contention. Owned by pid %d\n", |
diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c index f118252f3a9f..52e2743b04ec 100644 --- a/drivers/rtc/rtc-dev.c +++ b/drivers/rtc/rtc-dev.c | |||
@@ -422,6 +422,12 @@ done: | |||
422 | return err; | 422 | return err; |
423 | } | 423 | } |
424 | 424 | ||
425 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
426 | { | ||
427 | struct rtc_device *rtc = file->private_data; | ||
428 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
429 | } | ||
430 | |||
425 | static int rtc_dev_release(struct inode *inode, struct file *file) | 431 | static int rtc_dev_release(struct inode *inode, struct file *file) |
426 | { | 432 | { |
427 | struct rtc_device *rtc = file->private_data; | 433 | struct rtc_device *rtc = file->private_data; |
@@ -434,16 +440,13 @@ static int rtc_dev_release(struct inode *inode, struct file *file) | |||
434 | if (rtc->ops->release) | 440 | if (rtc->ops->release) |
435 | rtc->ops->release(rtc->dev.parent); | 441 | rtc->ops->release(rtc->dev.parent); |
436 | 442 | ||
443 | if (file->f_flags & FASYNC) | ||
444 | rtc_dev_fasync(-1, file, 0); | ||
445 | |||
437 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); | 446 | clear_bit_unlock(RTC_DEV_BUSY, &rtc->flags); |
438 | return 0; | 447 | return 0; |
439 | } | 448 | } |
440 | 449 | ||
441 | static int rtc_dev_fasync(int fd, struct file *file, int on) | ||
442 | { | ||
443 | struct rtc_device *rtc = file->private_data; | ||
444 | return fasync_helper(fd, file, on, &rtc->async_queue); | ||
445 | } | ||
446 | |||
447 | static const struct file_operations rtc_dev_fops = { | 450 | static const struct file_operations rtc_dev_fops = { |
448 | .owner = THIS_MODULE, | 451 | .owner = THIS_MODULE, |
449 | .llseek = no_llseek, | 452 | .llseek = no_llseek, |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 1679e2f91c94..a0b6b46e7466 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -447,51 +447,36 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 447 | { |
448 | char s[80]; | 448 | char s[80]; |
449 | 449 | ||
450 | sprintf(s, "%s sc:%x ", cdev->dev.bus_id, irq_ptr->schid.sch_no); | 450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); |
451 | |||
452 | switch (irq_ptr->qib.qfmt) { | 451 | switch (irq_ptr->qib.qfmt) { |
453 | case QDIO_QETH_QFMT: | 452 | case QDIO_QETH_QFMT: |
454 | sprintf(s + strlen(s), "OSADE "); | 453 | sprintf(s + strlen(s), "OSA "); |
455 | break; | 454 | break; |
456 | case QDIO_ZFCP_QFMT: | 455 | case QDIO_ZFCP_QFMT: |
457 | sprintf(s + strlen(s), "ZFCP "); | 456 | sprintf(s + strlen(s), "ZFCP "); |
458 | break; | 457 | break; |
459 | case QDIO_IQDIO_QFMT: | 458 | case QDIO_IQDIO_QFMT: |
460 | sprintf(s + strlen(s), "HiperSockets "); | 459 | sprintf(s + strlen(s), "HS "); |
461 | break; | 460 | break; |
462 | } | 461 | } |
463 | sprintf(s + strlen(s), "using: "); | 462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); |
464 | 463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | |
465 | if (!is_thinint_irq(irq_ptr)) | 464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); |
466 | sprintf(s + strlen(s), "no"); | 465 | sprintf(s + strlen(s), "PCI:%d ", |
467 | sprintf(s + strlen(s), "AdapterInterrupts "); | 466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); |
468 | if (!(irq_ptr->sch_token != 0)) | 467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); |
469 | sprintf(s + strlen(s), "no"); | 468 | sprintf(s + strlen(s), "SIGA:"); |
470 | sprintf(s + strlen(s), "QEBSM "); | 469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); |
471 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) | 470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); |
472 | sprintf(s + strlen(s), "no"); | 471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); |
473 | sprintf(s + strlen(s), "OutboundPCI "); | 472 | sprintf(s + strlen(s), "%s", |
474 | if (!css_general_characteristics.aif_tdd) | 473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); |
475 | sprintf(s + strlen(s), "no"); | 474 | sprintf(s + strlen(s), "%s", |
476 | sprintf(s + strlen(s), "TDD\n"); | 475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); |
477 | printk(KERN_INFO "qdio: %s", s); | 476 | sprintf(s + strlen(s), "%s", |
478 | 477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | |
479 | memset(s, 0, sizeof(s)); | ||
480 | sprintf(s, "%s SIGA required: ", cdev->dev.bus_id); | ||
481 | if (irq_ptr->siga_flag.input) | ||
482 | sprintf(s + strlen(s), "Read "); | ||
483 | if (irq_ptr->siga_flag.output) | ||
484 | sprintf(s + strlen(s), "Write "); | ||
485 | if (irq_ptr->siga_flag.sync) | ||
486 | sprintf(s + strlen(s), "Sync "); | ||
487 | if (!irq_ptr->siga_flag.no_sync_ti) | ||
488 | sprintf(s + strlen(s), "SyncAI "); | ||
489 | if (!irq_ptr->siga_flag.no_sync_out_ti) | ||
490 | sprintf(s + strlen(s), "SyncOutAI "); | ||
491 | if (!irq_ptr->siga_flag.no_sync_out_pci) | ||
492 | sprintf(s + strlen(s), "SyncOutPCI"); | ||
493 | sprintf(s + strlen(s), "\n"); | 478 | sprintf(s + strlen(s), "\n"); |
494 | printk(KERN_INFO "qdio: %s", s); | 479 | printk(KERN_INFO "%s", s); |
495 | } | 480 | } |
496 | 481 | ||
497 | int __init qdio_setup_init(void) | 482 | int __init qdio_setup_init(void) |
diff --git a/drivers/spi/orion_spi.c b/drivers/spi/orion_spi.c index c4eaacd6e553..b872bfaf4bd2 100644 --- a/drivers/spi/orion_spi.c +++ b/drivers/spi/orion_spi.c | |||
@@ -427,7 +427,7 @@ static int orion_spi_transfer(struct spi_device *spi, struct spi_message *m) | |||
427 | goto msg_rejected; | 427 | goto msg_rejected; |
428 | } | 428 | } |
429 | 429 | ||
430 | if (t->speed_hz < orion_spi->min_speed) { | 430 | if (t->speed_hz && t->speed_hz < orion_spi->min_speed) { |
431 | dev_err(&spi->dev, | 431 | dev_err(&spi->dev, |
432 | "message rejected : " | 432 | "message rejected : " |
433 | "device min speed (%d Hz) exceeds " | 433 | "device min speed (%d Hz) exceeds " |
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index c6299e8a041d..9cbff84b787d 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c | |||
@@ -2400,11 +2400,15 @@ static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch) | |||
2400 | 2400 | ||
2401 | if (!fbcon_is_inactive(vc, info)) { | 2401 | if (!fbcon_is_inactive(vc, info)) { |
2402 | if (ops->blank_state != blank) { | 2402 | if (ops->blank_state != blank) { |
2403 | int ret = 1; | ||
2404 | |||
2403 | ops->blank_state = blank; | 2405 | ops->blank_state = blank; |
2404 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); | 2406 | fbcon_cursor(vc, blank ? CM_ERASE : CM_DRAW); |
2405 | ops->cursor_flash = (!blank); | 2407 | ops->cursor_flash = (!blank); |
2406 | 2408 | ||
2407 | if (fb_blank(info, blank)) | 2409 | if (info->fbops->fb_blank) |
2410 | ret = info->fbops->fb_blank(blank, info); | ||
2411 | if (ret) | ||
2408 | fbcon_generic_blank(vc, info, blank); | 2412 | fbcon_generic_blank(vc, info, blank); |
2409 | } | 2413 | } |
2410 | 2414 | ||
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h new file mode 100644 index 000000000000..fa4328f9124f --- /dev/null +++ b/include/asm-mips/cevt-r4k.h | |||
@@ -0,0 +1,46 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2008 Kevin D. Kissell | ||
7 | */ | ||
8 | |||
9 | /* | ||
10 | * Definitions used for common event timer implementation | ||
11 | * for MIPS 4K-type processors and their MIPS MT variants. | ||
12 | * Avoids unsightly extern declarations in C files. | ||
13 | */ | ||
14 | #ifndef __ASM_CEVT_R4K_H | ||
15 | #define __ASM_CEVT_R4K_H | ||
16 | |||
17 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||
18 | |||
19 | void mips_event_handler(struct clock_event_device *dev); | ||
20 | int c0_compare_int_usable(void); | ||
21 | void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *); | ||
22 | irqreturn_t c0_compare_interrupt(int, void *); | ||
23 | |||
24 | extern struct irqaction c0_compare_irqaction; | ||
25 | extern int cp0_timer_irq_installed; | ||
26 | |||
27 | /* | ||
28 | * Possibly handle a performance counter interrupt. | ||
29 | * Return true if the timer interrupt should not be checked | ||
30 | */ | ||
31 | |||
32 | static inline int handle_perf_irq(int r2) | ||
33 | { | ||
34 | /* | ||
35 | * The performance counter overflow interrupt may be shared with the | ||
36 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
37 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
38 | * and we can't reliably determine if a counter interrupt has also | ||
39 | * happened (!r2) then don't check for a timer interrupt. | ||
40 | */ | ||
41 | return (cp0_perfcount_irq < 0) && | ||
42 | perf_irq() == IRQ_HANDLED && | ||
43 | !r2; | ||
44 | } | ||
45 | |||
46 | #endif /* __ASM_CEVT_R4K_H */ | ||
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h index 881e8866501d..701ec0ba8fa9 100644 --- a/include/asm-mips/irqflags.h +++ b/include/asm-mips/irqflags.h | |||
@@ -38,8 +38,17 @@ __asm__( | |||
38 | " .set pop \n" | 38 | " .set pop \n" |
39 | " .endm"); | 39 | " .endm"); |
40 | 40 | ||
41 | extern void smtc_ipi_replay(void); | ||
42 | |||
41 | static inline void raw_local_irq_enable(void) | 43 | static inline void raw_local_irq_enable(void) |
42 | { | 44 | { |
45 | #ifdef CONFIG_MIPS_MT_SMTC | ||
46 | /* | ||
47 | * SMTC kernel needs to do a software replay of queued | ||
48 | * IPIs, at the cost of call overhead on each local_irq_enable() | ||
49 | */ | ||
50 | smtc_ipi_replay(); | ||
51 | #endif | ||
43 | __asm__ __volatile__( | 52 | __asm__ __volatile__( |
44 | "raw_local_irq_enable" | 53 | "raw_local_irq_enable" |
45 | : /* no outputs */ | 54 | : /* no outputs */ |
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void) | |||
47 | : "memory"); | 56 | : "memory"); |
48 | } | 57 | } |
49 | 58 | ||
59 | |||
50 | /* | 60 | /* |
51 | * For cli() we have to insert nops to make sure that the new value | 61 | * For cli() we have to insert nops to make sure that the new value |
52 | * has actually arrived in the status register before the end of this | 62 | * has actually arrived in the status register before the end of this |
@@ -185,15 +195,14 @@ __asm__( | |||
185 | " .set pop \n" | 195 | " .set pop \n" |
186 | " .endm \n"); | 196 | " .endm \n"); |
187 | 197 | ||
188 | extern void smtc_ipi_replay(void); | ||
189 | 198 | ||
190 | static inline void raw_local_irq_restore(unsigned long flags) | 199 | static inline void raw_local_irq_restore(unsigned long flags) |
191 | { | 200 | { |
192 | unsigned long __tmp1; | 201 | unsigned long __tmp1; |
193 | 202 | ||
194 | #ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | 203 | #ifdef CONFIG_MIPS_MT_SMTC |
195 | /* | 204 | /* |
196 | * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred | 205 | * SMTC kernel needs to do a software replay of queued |
197 | * IPIs, at the cost of branch and call overhead on each | 206 | * IPIs, at the cost of branch and call overhead on each |
198 | * local_irq_restore() | 207 | * local_irq_restore() |
199 | */ | 208 | */ |
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags) | |||
208 | : "memory"); | 217 | : "memory"); |
209 | } | 218 | } |
210 | 219 | ||
220 | static inline void __raw_local_irq_restore(unsigned long flags) | ||
221 | { | ||
222 | unsigned long __tmp1; | ||
223 | |||
224 | __asm__ __volatile__( | ||
225 | "raw_local_irq_restore\t%0" | ||
226 | : "=r" (__tmp1) | ||
227 | : "0" (flags) | ||
228 | : "memory"); | ||
229 | } | ||
230 | |||
211 | static inline int raw_irqs_disabled_flags(unsigned long flags) | 231 | static inline int raw_irqs_disabled_flags(unsigned long flags) |
212 | { | 232 | { |
213 | #ifdef CONFIG_MIPS_MT_SMTC | 233 | #ifdef CONFIG_MIPS_MT_SMTC |
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h index a46f8e258e6b..979866000da4 100644 --- a/include/asm-mips/mipsregs.h +++ b/include/asm-mips/mipsregs.h | |||
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \ | |||
1462 | { \ | 1462 | { \ |
1463 | unsigned int res; \ | 1463 | unsigned int res; \ |
1464 | unsigned int omt; \ | 1464 | unsigned int omt; \ |
1465 | unsigned int flags; \ | 1465 | unsigned long flags; \ |
1466 | \ | 1466 | \ |
1467 | local_irq_save(flags); \ | 1467 | local_irq_save(flags); \ |
1468 | omt = __dmt(); \ | 1468 | omt = __dmt(); \ |
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \ | |||
1480 | { \ | 1480 | { \ |
1481 | unsigned int res; \ | 1481 | unsigned int res; \ |
1482 | unsigned int omt; \ | 1482 | unsigned int omt; \ |
1483 | unsigned int flags; \ | 1483 | unsigned long flags; \ |
1484 | \ | 1484 | \ |
1485 | local_irq_save(flags); \ | 1485 | local_irq_save(flags); \ |
1486 | omt = __dmt(); \ | 1486 | omt = __dmt(); \ |
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \ | |||
1498 | { \ | 1498 | { \ |
1499 | unsigned int res; \ | 1499 | unsigned int res; \ |
1500 | unsigned int omt; \ | 1500 | unsigned int omt; \ |
1501 | unsigned int flags; \ | 1501 | unsigned long flags; \ |
1502 | \ | 1502 | \ |
1503 | local_irq_save(flags); \ | 1503 | local_irq_save(flags); \ |
1504 | \ | 1504 | \ |
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h index 3639b28f80db..ea60bf08dcb0 100644 --- a/include/asm-mips/smtc.h +++ b/include/asm-mips/smtc.h | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include <asm/mips_mt.h> | 8 | #include <asm/mips_mt.h> |
9 | #include <asm/smtc_ipi.h> | ||
9 | 10 | ||
10 | /* | 11 | /* |
11 | * System-wide SMTC status information | 12 | * System-wide SMTC status information |
@@ -38,14 +39,15 @@ struct mm_struct; | |||
38 | struct task_struct; | 39 | struct task_struct; |
39 | 40 | ||
40 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); | 41 | void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); |
41 | 42 | void self_ipi(struct smtc_ipi *); | |
42 | void smtc_flush_tlb_asid(unsigned long asid); | 43 | void smtc_flush_tlb_asid(unsigned long asid); |
43 | extern int mipsmt_build_cpu_map(int startslot); | 44 | extern int smtc_build_cpu_map(int startslot); |
44 | extern void mipsmt_prepare_cpus(void); | 45 | extern void smtc_prepare_cpus(int cpus); |
45 | extern void smtc_smp_finish(void); | 46 | extern void smtc_smp_finish(void); |
46 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); | 47 | extern void smtc_boot_secondary(int cpu, struct task_struct *t); |
47 | extern void smtc_cpus_done(void); | 48 | extern void smtc_cpus_done(void); |
48 | 49 | ||
50 | |||
49 | /* | 51 | /* |
50 | * Sharing the TLB between multiple VPEs means that the | 52 | * Sharing the TLB between multiple VPEs means that the |
51 | * "random" index selection function is not allowed to | 53 | * "random" index selection function is not allowed to |
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h index 051e1af0bb95..4c37c4e5f72e 100644 --- a/include/asm-mips/stackframe.h +++ b/include/asm-mips/stackframe.h | |||
@@ -297,14 +297,31 @@ | |||
297 | #ifdef CONFIG_MIPS_MT_SMTC | 297 | #ifdef CONFIG_MIPS_MT_SMTC |
298 | .set mips32r2 | 298 | .set mips32r2 |
299 | /* | 299 | /* |
300 | * This may not really be necessary if ints are already | 300 | * We need to make sure the read-modify-write |
301 | * inhibited here. | 301 | * of Status below isn't perturbed by an interrupt |
302 | * or cross-TC access, so we need to do at least a DMT, | ||
303 | * protected by an interrupt-inhibit. But setting IXMT | ||
304 | * also creates a few-cycle window where an IPI could | ||
305 | * be queued and not be detected before potentially | ||
306 | * returning to a WAIT or user-mode loop. It must be | ||
307 | * replayed. | ||
308 | * | ||
309 | * We're in the middle of a context switch, and | ||
310 | * we can't dispatch it directly without trashing | ||
311 | * some registers, so we'll try to detect this unlikely | ||
312 | * case and program a software interrupt in the VPE, | ||
313 | * as would be done for a cross-VPE IPI. To accomodate | ||
314 | * the handling of that case, we're doing a DVPE instead | ||
315 | * of just a DMT here to protect against other threads. | ||
316 | * This is a lot of cruft to cover a tiny window. | ||
317 | * If you can find a better design, implement it! | ||
318 | * | ||
302 | */ | 319 | */ |
303 | mfc0 v0, CP0_TCSTATUS | 320 | mfc0 v0, CP0_TCSTATUS |
304 | ori v0, TCSTATUS_IXMT | 321 | ori v0, TCSTATUS_IXMT |
305 | mtc0 v0, CP0_TCSTATUS | 322 | mtc0 v0, CP0_TCSTATUS |
306 | _ehb | 323 | _ehb |
307 | DMT 5 # dmt a1 | 324 | DVPE 5 # dvpe a1 |
308 | jal mips_ihb | 325 | jal mips_ihb |
309 | #endif /* CONFIG_MIPS_MT_SMTC */ | 326 | #endif /* CONFIG_MIPS_MT_SMTC */ |
310 | mfc0 a0, CP0_STATUS | 327 | mfc0 a0, CP0_STATUS |
@@ -325,17 +342,50 @@ | |||
325 | */ | 342 | */ |
326 | LONG_L v1, PT_TCSTATUS(sp) | 343 | LONG_L v1, PT_TCSTATUS(sp) |
327 | _ehb | 344 | _ehb |
328 | mfc0 v0, CP0_TCSTATUS | 345 | mfc0 a0, CP0_TCSTATUS |
329 | andi v1, TCSTATUS_IXMT | 346 | andi v1, TCSTATUS_IXMT |
330 | /* We know that TCStatua.IXMT should be set from above */ | 347 | bnez v1, 0f |
331 | xori v0, v0, TCSTATUS_IXMT | 348 | |
332 | or v0, v0, v1 | 349 | /* |
333 | mtc0 v0, CP0_TCSTATUS | 350 | * We'd like to detect any IPIs queued in the tiny window |
334 | _ehb | 351 | * above and request an software interrupt to service them |
335 | andi a1, a1, VPECONTROL_TE | 352 | * when we ERET. |
353 | * | ||
354 | * Computing the offset into the IPIQ array of the executing | ||
355 | * TC's IPI queue in-line would be tedious. We use part of | ||
356 | * the TCContext register to hold 16 bits of offset that we | ||
357 | * can add in-line to find the queue head. | ||
358 | */ | ||
359 | mfc0 v0, CP0_TCCONTEXT | ||
360 | la a2, IPIQ | ||
361 | srl v0, v0, 16 | ||
362 | addu a2, a2, v0 | ||
363 | LONG_L v0, 0(a2) | ||
364 | beqz v0, 0f | ||
365 | /* | ||
366 | * If we have a queue, provoke dispatch within the VPE by setting C_SW1 | ||
367 | */ | ||
368 | mfc0 v0, CP0_CAUSE | ||
369 | ori v0, v0, C_SW1 | ||
370 | mtc0 v0, CP0_CAUSE | ||
371 | 0: | ||
372 | /* | ||
373 | * This test should really never branch but | ||
374 | * let's be prudent here. Having atomized | ||
375 | * the shared register modifications, we can | ||
376 | * now EVPE, and must do so before interrupts | ||
377 | * are potentially re-enabled. | ||
378 | */ | ||
379 | andi a1, a1, MVPCONTROL_EVP | ||
336 | beqz a1, 1f | 380 | beqz a1, 1f |
337 | emt | 381 | evpe |
338 | 1: | 382 | 1: |
383 | /* We know that TCStatua.IXMT should be set from above */ | ||
384 | xori a0, a0, TCSTATUS_IXMT | ||
385 | or a0, a0, v1 | ||
386 | mtc0 a0, CP0_TCSTATUS | ||
387 | _ehb | ||
388 | |||
339 | .set mips0 | 389 | .set mips0 |
340 | #endif /* CONFIG_MIPS_MT_SMTC */ | 390 | #endif /* CONFIG_MIPS_MT_SMTC */ |
341 | LONG_L v1, PT_EPC(sp) | 391 | LONG_L v1, PT_EPC(sp) |
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h index 5da9794b2d78..b106fd8e0d5c 100644 --- a/include/linux/stacktrace.h +++ b/include/linux/stacktrace.h | |||
@@ -1,6 +1,8 @@ | |||
1 | #ifndef __LINUX_STACKTRACE_H | 1 | #ifndef __LINUX_STACKTRACE_H |
2 | #define __LINUX_STACKTRACE_H | 2 | #define __LINUX_STACKTRACE_H |
3 | 3 | ||
4 | struct task_struct; | ||
5 | |||
4 | #ifdef CONFIG_STACKTRACE | 6 | #ifdef CONFIG_STACKTRACE |
5 | struct stack_trace { | 7 | struct stack_trace { |
6 | unsigned int nr_entries, max_entries; | 8 | unsigned int nr_entries, max_entries; |
diff --git a/init/main.c b/init/main.c index f6f7042331dc..3820323c4c84 100644 --- a/init/main.c +++ b/init/main.c | |||
@@ -708,7 +708,7 @@ int do_one_initcall(initcall_t fn) | |||
708 | int result; | 708 | int result; |
709 | 709 | ||
710 | if (initcall_debug) { | 710 | if (initcall_debug) { |
711 | print_fn_descriptor_symbol("calling %s\n", fn); | 711 | printk("calling %pF\n", fn); |
712 | t0 = ktime_get(); | 712 | t0 = ktime_get(); |
713 | } | 713 | } |
714 | 714 | ||
@@ -718,8 +718,8 @@ int do_one_initcall(initcall_t fn) | |||
718 | t1 = ktime_get(); | 718 | t1 = ktime_get(); |
719 | delta = ktime_sub(t1, t0); | 719 | delta = ktime_sub(t1, t0); |
720 | 720 | ||
721 | print_fn_descriptor_symbol("initcall %s", fn); | 721 | printk("initcall %pF returned %d after %Ld msecs\n", |
722 | printk(" returned %d after %Ld msecs\n", result, | 722 | fn, result, |
723 | (unsigned long long) delta.tv64 >> 20); | 723 | (unsigned long long) delta.tv64 >> 20); |
724 | } | 724 | } |
725 | 725 | ||
@@ -737,8 +737,7 @@ int do_one_initcall(initcall_t fn) | |||
737 | local_irq_enable(); | 737 | local_irq_enable(); |
738 | } | 738 | } |
739 | if (msgbuf[0]) { | 739 | if (msgbuf[0]) { |
740 | print_fn_descriptor_symbol(KERN_WARNING "initcall %s", fn); | 740 | printk("initcall %pF returned with %s\n", fn, msgbuf); |
741 | printk(" returned with %s\n", msgbuf); | ||
742 | } | 741 | } |
743 | 742 | ||
744 | return result; | 743 | return result; |