aboutsummaryrefslogtreecommitdiffstats
path: root/include/asm-mips
diff options
context:
space:
mode:
Diffstat (limited to 'include/asm-mips')
-rw-r--r--include/asm-mips/cevt-r4k.h46
-rw-r--r--include/asm-mips/irqflags.h26
-rw-r--r--include/asm-mips/mipsregs.h6
-rw-r--r--include/asm-mips/pgtable-32.h2
-rw-r--r--include/asm-mips/smtc.h8
-rw-r--r--include/asm-mips/stackframe.h72
6 files changed, 139 insertions, 21 deletions
diff --git a/include/asm-mips/cevt-r4k.h b/include/asm-mips/cevt-r4k.h
new file mode 100644
index 000000000000..fa4328f9124f
--- /dev/null
+++ b/include/asm-mips/cevt-r4k.h
@@ -0,0 +1,46 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Kevin D. Kissell
7 */
8
9/*
10 * Definitions used for common event timer implementation
11 * for MIPS 4K-type processors and their MIPS MT variants.
12 * Avoids unsightly extern declarations in C files.
13 */
14#ifndef __ASM_CEVT_R4K_H
15#define __ASM_CEVT_R4K_H
16
17DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
18
19void mips_event_handler(struct clock_event_device *dev);
20int c0_compare_int_usable(void);
21void mips_set_clock_mode(enum clock_event_mode, struct clock_event_device *);
22irqreturn_t c0_compare_interrupt(int, void *);
23
24extern struct irqaction c0_compare_irqaction;
25extern int cp0_timer_irq_installed;
26
27/*
28 * Possibly handle a performance counter interrupt.
29 * Return true if the timer interrupt should not be checked
30 */
31
32static inline int handle_perf_irq(int r2)
33{
34 /*
35 * The performance counter overflow interrupt may be shared with the
36 * timer interrupt (cp0_perfcount_irq < 0). If it is and a
37 * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
38 * and we can't reliably determine if a counter interrupt has also
39 * happened (!r2) then don't check for a timer interrupt.
40 */
41 return (cp0_perfcount_irq < 0) &&
42 perf_irq() == IRQ_HANDLED &&
43 !r2;
44}
45
46#endif /* __ASM_CEVT_R4K_H */
diff --git a/include/asm-mips/irqflags.h b/include/asm-mips/irqflags.h
index 881e8866501d..701ec0ba8fa9 100644
--- a/include/asm-mips/irqflags.h
+++ b/include/asm-mips/irqflags.h
@@ -38,8 +38,17 @@ __asm__(
38 " .set pop \n" 38 " .set pop \n"
39 " .endm"); 39 " .endm");
40 40
41extern void smtc_ipi_replay(void);
42
41static inline void raw_local_irq_enable(void) 43static inline void raw_local_irq_enable(void)
42{ 44{
45#ifdef CONFIG_MIPS_MT_SMTC
46 /*
47 * SMTC kernel needs to do a software replay of queued
48 * IPIs, at the cost of call overhead on each local_irq_enable()
49 */
50 smtc_ipi_replay();
51#endif
43 __asm__ __volatile__( 52 __asm__ __volatile__(
44 "raw_local_irq_enable" 53 "raw_local_irq_enable"
45 : /* no outputs */ 54 : /* no outputs */
@@ -47,6 +56,7 @@ static inline void raw_local_irq_enable(void)
47 : "memory"); 56 : "memory");
48} 57}
49 58
59
50/* 60/*
51 * For cli() we have to insert nops to make sure that the new value 61 * For cli() we have to insert nops to make sure that the new value
52 * has actually arrived in the status register before the end of this 62 * has actually arrived in the status register before the end of this
@@ -185,15 +195,14 @@ __asm__(
185 " .set pop \n" 195 " .set pop \n"
186 " .endm \n"); 196 " .endm \n");
187 197
188extern void smtc_ipi_replay(void);
189 198
190static inline void raw_local_irq_restore(unsigned long flags) 199static inline void raw_local_irq_restore(unsigned long flags)
191{ 200{
192 unsigned long __tmp1; 201 unsigned long __tmp1;
193 202
194#ifdef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY 203#ifdef CONFIG_MIPS_MT_SMTC
195 /* 204 /*
196 * CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY does prompt replay of deferred 205 * SMTC kernel needs to do a software replay of queued
197 * IPIs, at the cost of branch and call overhead on each 206 * IPIs, at the cost of branch and call overhead on each
198 * local_irq_restore() 207 * local_irq_restore()
199 */ 208 */
@@ -208,6 +217,17 @@ static inline void raw_local_irq_restore(unsigned long flags)
208 : "memory"); 217 : "memory");
209} 218}
210 219
220static inline void __raw_local_irq_restore(unsigned long flags)
221{
222 unsigned long __tmp1;
223
224 __asm__ __volatile__(
225 "raw_local_irq_restore\t%0"
226 : "=r" (__tmp1)
227 : "0" (flags)
228 : "memory");
229}
230
211static inline int raw_irqs_disabled_flags(unsigned long flags) 231static inline int raw_irqs_disabled_flags(unsigned long flags)
212{ 232{
213#ifdef CONFIG_MIPS_MT_SMTC 233#ifdef CONFIG_MIPS_MT_SMTC
diff --git a/include/asm-mips/mipsregs.h b/include/asm-mips/mipsregs.h
index a46f8e258e6b..979866000da4 100644
--- a/include/asm-mips/mipsregs.h
+++ b/include/asm-mips/mipsregs.h
@@ -1462,7 +1462,7 @@ set_c0_##name(unsigned int set) \
1462{ \ 1462{ \
1463 unsigned int res; \ 1463 unsigned int res; \
1464 unsigned int omt; \ 1464 unsigned int omt; \
1465 unsigned int flags; \ 1465 unsigned long flags; \
1466 \ 1466 \
1467 local_irq_save(flags); \ 1467 local_irq_save(flags); \
1468 omt = __dmt(); \ 1468 omt = __dmt(); \
@@ -1480,7 +1480,7 @@ clear_c0_##name(unsigned int clear) \
1480{ \ 1480{ \
1481 unsigned int res; \ 1481 unsigned int res; \
1482 unsigned int omt; \ 1482 unsigned int omt; \
1483 unsigned int flags; \ 1483 unsigned long flags; \
1484 \ 1484 \
1485 local_irq_save(flags); \ 1485 local_irq_save(flags); \
1486 omt = __dmt(); \ 1486 omt = __dmt(); \
@@ -1498,7 +1498,7 @@ change_c0_##name(unsigned int change, unsigned int new) \
1498{ \ 1498{ \
1499 unsigned int res; \ 1499 unsigned int res; \
1500 unsigned int omt; \ 1500 unsigned int omt; \
1501 unsigned int flags; \ 1501 unsigned long flags; \
1502 \ 1502 \
1503 local_irq_save(flags); \ 1503 local_irq_save(flags); \
1504 \ 1504 \
diff --git a/include/asm-mips/pgtable-32.h b/include/asm-mips/pgtable-32.h
index 4396e9ffd418..55813d6150c7 100644
--- a/include/asm-mips/pgtable-32.h
+++ b/include/asm-mips/pgtable-32.h
@@ -57,7 +57,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
57#define PMD_ORDER 1 57#define PMD_ORDER 1
58#define PTE_ORDER 0 58#define PTE_ORDER 0
59 59
60#define PTRS_PER_PGD ((PAGE_SIZE << PGD_ORDER) / sizeof(pgd_t)) 60#define PTRS_PER_PGD (USER_PTRS_PER_PGD * 2)
61#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t)) 61#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
62 62
63#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE) 63#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
diff --git a/include/asm-mips/smtc.h b/include/asm-mips/smtc.h
index 3639b28f80db..ea60bf08dcb0 100644
--- a/include/asm-mips/smtc.h
+++ b/include/asm-mips/smtc.h
@@ -6,6 +6,7 @@
6 */ 6 */
7 7
8#include <asm/mips_mt.h> 8#include <asm/mips_mt.h>
9#include <asm/smtc_ipi.h>
9 10
10/* 11/*
11 * System-wide SMTC status information 12 * System-wide SMTC status information
@@ -38,14 +39,15 @@ struct mm_struct;
38struct task_struct; 39struct task_struct;
39 40
40void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu); 41void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
41 42void self_ipi(struct smtc_ipi *);
42void smtc_flush_tlb_asid(unsigned long asid); 43void smtc_flush_tlb_asid(unsigned long asid);
43extern int mipsmt_build_cpu_map(int startslot); 44extern int smtc_build_cpu_map(int startslot);
44extern void mipsmt_prepare_cpus(void); 45extern void smtc_prepare_cpus(int cpus);
45extern void smtc_smp_finish(void); 46extern void smtc_smp_finish(void);
46extern void smtc_boot_secondary(int cpu, struct task_struct *t); 47extern void smtc_boot_secondary(int cpu, struct task_struct *t);
47extern void smtc_cpus_done(void); 48extern void smtc_cpus_done(void);
48 49
50
49/* 51/*
50 * Sharing the TLB between multiple VPEs means that the 52 * Sharing the TLB between multiple VPEs means that the
51 * "random" index selection function is not allowed to 53 * "random" index selection function is not allowed to
diff --git a/include/asm-mips/stackframe.h b/include/asm-mips/stackframe.h
index 051e1af0bb95..4c37c4e5f72e 100644
--- a/include/asm-mips/stackframe.h
+++ b/include/asm-mips/stackframe.h
@@ -297,14 +297,31 @@
297#ifdef CONFIG_MIPS_MT_SMTC 297#ifdef CONFIG_MIPS_MT_SMTC
298 .set mips32r2 298 .set mips32r2
299 /* 299 /*
300 * This may not really be necessary if ints are already 300 * We need to make sure the read-modify-write
301 * inhibited here. 301 * of Status below isn't perturbed by an interrupt
302 * or cross-TC access, so we need to do at least a DMT,
303 * protected by an interrupt-inhibit. But setting IXMT
304 * also creates a few-cycle window where an IPI could
305 * be queued and not be detected before potentially
306 * returning to a WAIT or user-mode loop. It must be
307 * replayed.
308 *
309 * We're in the middle of a context switch, and
310 * we can't dispatch it directly without trashing
311 * some registers, so we'll try to detect this unlikely
312 * case and program a software interrupt in the VPE,
313 * as would be done for a cross-VPE IPI. To accomodate
314 * the handling of that case, we're doing a DVPE instead
315 * of just a DMT here to protect against other threads.
316 * This is a lot of cruft to cover a tiny window.
317 * If you can find a better design, implement it!
318 *
302 */ 319 */
303 mfc0 v0, CP0_TCSTATUS 320 mfc0 v0, CP0_TCSTATUS
304 ori v0, TCSTATUS_IXMT 321 ori v0, TCSTATUS_IXMT
305 mtc0 v0, CP0_TCSTATUS 322 mtc0 v0, CP0_TCSTATUS
306 _ehb 323 _ehb
307 DMT 5 # dmt a1 324 DVPE 5 # dvpe a1
308 jal mips_ihb 325 jal mips_ihb
309#endif /* CONFIG_MIPS_MT_SMTC */ 326#endif /* CONFIG_MIPS_MT_SMTC */
310 mfc0 a0, CP0_STATUS 327 mfc0 a0, CP0_STATUS
@@ -325,17 +342,50 @@
325 */ 342 */
326 LONG_L v1, PT_TCSTATUS(sp) 343 LONG_L v1, PT_TCSTATUS(sp)
327 _ehb 344 _ehb
328 mfc0 v0, CP0_TCSTATUS 345 mfc0 a0, CP0_TCSTATUS
329 andi v1, TCSTATUS_IXMT 346 andi v1, TCSTATUS_IXMT
330 /* We know that TCStatua.IXMT should be set from above */ 347 bnez v1, 0f
331 xori v0, v0, TCSTATUS_IXMT 348
332 or v0, v0, v1 349/*
333 mtc0 v0, CP0_TCSTATUS 350 * We'd like to detect any IPIs queued in the tiny window
334 _ehb 351 * above and request an software interrupt to service them
335 andi a1, a1, VPECONTROL_TE 352 * when we ERET.
353 *
354 * Computing the offset into the IPIQ array of the executing
355 * TC's IPI queue in-line would be tedious. We use part of
356 * the TCContext register to hold 16 bits of offset that we
357 * can add in-line to find the queue head.
358 */
359 mfc0 v0, CP0_TCCONTEXT
360 la a2, IPIQ
361 srl v0, v0, 16
362 addu a2, a2, v0
363 LONG_L v0, 0(a2)
364 beqz v0, 0f
365/*
366 * If we have a queue, provoke dispatch within the VPE by setting C_SW1
367 */
368 mfc0 v0, CP0_CAUSE
369 ori v0, v0, C_SW1
370 mtc0 v0, CP0_CAUSE
3710:
372 /*
373 * This test should really never branch but
374 * let's be prudent here. Having atomized
375 * the shared register modifications, we can
376 * now EVPE, and must do so before interrupts
377 * are potentially re-enabled.
378 */
379 andi a1, a1, MVPCONTROL_EVP
336 beqz a1, 1f 380 beqz a1, 1f
337 emt 381 evpe
3381: 3821:
383 /* We know that TCStatua.IXMT should be set from above */
384 xori a0, a0, TCSTATUS_IXMT
385 or a0, a0, v1
386 mtc0 a0, CP0_TCSTATUS
387 _ehb
388
339 .set mips0 389 .set mips0
340#endif /* CONFIG_MIPS_MT_SMTC */ 390#endif /* CONFIG_MIPS_MT_SMTC */
341 LONG_L v1, PT_EPC(sp) 391 LONG_L v1, PT_EPC(sp)