diff options
| author | Ingo Molnar <mingo@elte.hu> | 2008-10-12 06:39:30 -0400 | 
|---|---|---|
| committer | Ingo Molnar <mingo@elte.hu> | 2008-10-12 06:39:50 -0400 | 
| commit | 4c7145a1ec1bb789d5f07e47510e8bda546a7c4a (patch) | |
| tree | e2767b77e5413473a3bba302237f4669a203f183 /arch/mips/kernel/smtc.c | |
| parent | 74e91604b2452c15bbe72d77b37cf47ed0310d13 (diff) | |
| parent | fd048088306656824958e7783ffcee27e241b361 (diff) | |
Merge branch 'linus' into x86/spinlocks
Done to prevent this failure of an Octopus merge:
 Added arch/arm/include/asm/byteorder.h in both, but differently.
 ERROR: Merge conflict in arch/arm/include/asm/byteorder.h
 Auto-merging include/asm-x86/spinlock.h
 ERROR: Merge conflict in include/asm-x86/spinlock.h
 fatal: merge program failed
Diffstat (limited to 'arch/mips/kernel/smtc.c')
| -rw-r--r-- | arch/mips/kernel/smtc.c | 260 | 
1 files changed, 137 insertions, 123 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c index a516286532ab..897fb2b4751c 100644 --- a/arch/mips/kernel/smtc.c +++ b/arch/mips/kernel/smtc.c  | |||
| @@ -1,4 +1,21 @@ | |||
| 1 | /* Copyright (C) 2004 Mips Technologies, Inc */ | 1 | /* | 
| 2 | * This program is free software; you can redistribute it and/or | ||
| 3 | * modify it under the terms of the GNU General Public License | ||
| 4 | * as published by the Free Software Foundation; either version 2 | ||
| 5 | * of the License, or (at your option) any later version. | ||
| 6 | * | ||
| 7 | * This program is distributed in the hope that it will be useful, | ||
| 8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
| 9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
| 10 | * GNU General Public License for more details. | ||
| 11 | * | ||
| 12 | * You should have received a copy of the GNU General Public License | ||
| 13 | * along with this program; if not, write to the Free Software | ||
| 14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
| 15 | * | ||
| 16 | * Copyright (C) 2004 Mips Technologies, Inc | ||
| 17 | * Copyright (C) 2008 Kevin D. Kissell | ||
| 18 | */ | ||
| 2 | 19 | ||
| 3 | #include <linux/clockchips.h> | 20 | #include <linux/clockchips.h> | 
| 4 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> | 
| @@ -21,7 +38,6 @@ | |||
| 21 | #include <asm/time.h> | 38 | #include <asm/time.h> | 
| 22 | #include <asm/addrspace.h> | 39 | #include <asm/addrspace.h> | 
| 23 | #include <asm/smtc.h> | 40 | #include <asm/smtc.h> | 
| 24 | #include <asm/smtc_ipi.h> | ||
| 25 | #include <asm/smtc_proc.h> | 41 | #include <asm/smtc_proc.h> | 
| 26 | 42 | ||
| 27 | /* | 43 | /* | 
| @@ -58,11 +74,6 @@ unsigned long irq_hwmask[NR_IRQS]; | |||
| 58 | 74 | ||
| 59 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 75 | asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS]; | 
| 60 | 76 | ||
| 61 | /* | ||
| 62 | * Clock interrupt "latch" buffers, per "CPU" | ||
| 63 | */ | ||
| 64 | |||
| 65 | static atomic_t ipi_timer_latch[NR_CPUS]; | ||
| 66 | 77 | ||
| 67 | /* | 78 | /* | 
| 68 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 79 | * Number of InterProcessor Interrupt (IPI) message buffers to allocate | 
| @@ -70,7 +81,7 @@ static atomic_t ipi_timer_latch[NR_CPUS]; | |||
| 70 | 81 | ||
| 71 | #define IPIBUF_PER_CPU 4 | 82 | #define IPIBUF_PER_CPU 4 | 
| 72 | 83 | ||
| 73 | static struct smtc_ipi_q IPIQ[NR_CPUS]; | 84 | struct smtc_ipi_q IPIQ[NR_CPUS]; | 
| 74 | static struct smtc_ipi_q freeIPIq; | 85 | static struct smtc_ipi_q freeIPIq; | 
| 75 | 86 | ||
| 76 | 87 | ||
| @@ -282,7 +293,7 @@ static void smtc_configure_tlb(void) | |||
| 282 | * phys_cpu_present_map and the logical/physical mappings. | 293 | * phys_cpu_present_map and the logical/physical mappings. | 
| 283 | */ | 294 | */ | 
| 284 | 295 | ||
| 285 | int __init mipsmt_build_cpu_map(int start_cpu_slot) | 296 | int __init smtc_build_cpu_map(int start_cpu_slot) | 
| 286 | { | 297 | { | 
| 287 | int i, ntcs; | 298 | int i, ntcs; | 
| 288 | 299 | ||
| @@ -325,7 +336,12 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
| 325 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 336 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() | 
| 326 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 337 | & ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT)) | 
| 327 | | TCSTATUS_A); | 338 | | TCSTATUS_A); | 
| 328 | write_tc_c0_tccontext(0); | 339 | /* | 
| 340 | * TCContext gets an offset from the base of the IPIQ array | ||
| 341 | * to be used in low-level code to detect the presence of | ||
| 342 | * an active IPI queue | ||
| 343 | */ | ||
| 344 | write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16); | ||
| 329 | /* Bind tc to vpe */ | 345 | /* Bind tc to vpe */ | 
| 330 | write_tc_c0_tcbind(vpe); | 346 | write_tc_c0_tcbind(vpe); | 
| 331 | /* In general, all TCs should have the same cpu_data indications */ | 347 | /* In general, all TCs should have the same cpu_data indications */ | 
| @@ -336,10 +352,18 @@ static void smtc_tc_setup(int vpe, int tc, int cpu) | |||
| 336 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 352 | cpu_data[cpu].options &= ~MIPS_CPU_FPU; | 
| 337 | cpu_data[cpu].vpe_id = vpe; | 353 | cpu_data[cpu].vpe_id = vpe; | 
| 338 | cpu_data[cpu].tc_id = tc; | 354 | cpu_data[cpu].tc_id = tc; | 
| 355 | /* Multi-core SMTC hasn't been tested, but be prepared */ | ||
| 356 | cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff; | ||
| 339 | } | 357 | } | 
| 340 | 358 | ||
| 359 | /* | ||
| 360 | * Tweak to get Count registes in as close a sync as possible. | ||
| 361 | * Value seems good for 34K-class cores. | ||
| 362 | */ | ||
| 363 | |||
| 364 | #define CP0_SKEW 8 | ||
| 341 | 365 | ||
| 342 | void mipsmt_prepare_cpus(void) | 366 | void smtc_prepare_cpus(int cpus) | 
| 343 | { | 367 | { | 
| 344 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 368 | int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu; | 
| 345 | unsigned long flags; | 369 | unsigned long flags; | 
| @@ -363,13 +387,13 @@ void mipsmt_prepare_cpus(void) | |||
| 363 | IPIQ[i].head = IPIQ[i].tail = NULL; | 387 | IPIQ[i].head = IPIQ[i].tail = NULL; | 
| 364 | spin_lock_init(&IPIQ[i].lock); | 388 | spin_lock_init(&IPIQ[i].lock); | 
| 365 | IPIQ[i].depth = 0; | 389 | IPIQ[i].depth = 0; | 
| 366 | atomic_set(&ipi_timer_latch[i], 0); | ||
| 367 | } | 390 | } | 
| 368 | 391 | ||
| 369 | /* cpu_data index starts at zero */ | 392 | /* cpu_data index starts at zero */ | 
| 370 | cpu = 0; | 393 | cpu = 0; | 
| 371 | cpu_data[cpu].vpe_id = 0; | 394 | cpu_data[cpu].vpe_id = 0; | 
| 372 | cpu_data[cpu].tc_id = 0; | 395 | cpu_data[cpu].tc_id = 0; | 
| 396 | cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff; | ||
| 373 | cpu++; | 397 | cpu++; | 
| 374 | 398 | ||
| 375 | /* Report on boot-time options */ | 399 | /* Report on boot-time options */ | 
| @@ -484,7 +508,8 @@ void mipsmt_prepare_cpus(void) | |||
| 484 | write_vpe_c0_compare(0); | 508 | write_vpe_c0_compare(0); | 
| 485 | /* Propagate Config7 */ | 509 | /* Propagate Config7 */ | 
| 486 | write_vpe_c0_config7(read_c0_config7()); | 510 | write_vpe_c0_config7(read_c0_config7()); | 
| 487 | write_vpe_c0_count(read_c0_count()); | 511 | write_vpe_c0_count(read_c0_count() + CP0_SKEW); | 
| 512 | ehb(); | ||
| 488 | } | 513 | } | 
| 489 | /* enable multi-threading within VPE */ | 514 | /* enable multi-threading within VPE */ | 
| 490 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 515 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE); | 
| @@ -556,7 +581,7 @@ void mipsmt_prepare_cpus(void) | |||
| 556 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 581 | void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | 
| 557 | { | 582 | { | 
| 558 | extern u32 kernelsp[NR_CPUS]; | 583 | extern u32 kernelsp[NR_CPUS]; | 
| 559 | long flags; | 584 | unsigned long flags; | 
| 560 | int mtflags; | 585 | int mtflags; | 
| 561 | 586 | ||
| 562 | LOCK_MT_PRA(); | 587 | LOCK_MT_PRA(); | 
| @@ -585,24 +610,22 @@ void __cpuinit smtc_boot_secondary(int cpu, struct task_struct *idle) | |||
| 585 | 610 | ||
| 586 | void smtc_init_secondary(void) | 611 | void smtc_init_secondary(void) | 
| 587 | { | 612 | { | 
| 588 | /* | ||
| 589 | * Start timer on secondary VPEs if necessary. | ||
| 590 | * plat_timer_setup has already have been invoked by init/main | ||
| 591 | * on "boot" TC. Like per_cpu_trap_init() hack, this assumes that | ||
| 592 | * SMTC init code assigns TCs consdecutively and in ascending order | ||
| 593 | * to across available VPEs. | ||
| 594 | */ | ||
| 595 | if (((read_c0_tcbind() & TCBIND_CURTC) != 0) && | ||
| 596 | ((read_c0_tcbind() & TCBIND_CURVPE) | ||
| 597 | != cpu_data[smp_processor_id() - 1].vpe_id)){ | ||
| 598 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
| 599 | } | ||
| 600 | |||
| 601 | local_irq_enable(); | 613 | local_irq_enable(); | 
| 602 | } | 614 | } | 
| 603 | 615 | ||
| 604 | void smtc_smp_finish(void) | 616 | void smtc_smp_finish(void) | 
| 605 | { | 617 | { | 
| 618 | int cpu = smp_processor_id(); | ||
| 619 | |||
| 620 | /* | ||
| 621 | * Lowest-numbered CPU per VPE starts a clock tick. | ||
| 622 | * Like per_cpu_trap_init() hack, this assumes that | ||
| 623 | * SMTC init code assigns TCs consdecutively and | ||
| 624 | * in ascending order across available VPEs. | ||
| 625 | */ | ||
| 626 | if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id)) | ||
| 627 | write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); | ||
| 628 | |||
| 606 | printk("TC %d going on-line as CPU %d\n", | 629 | printk("TC %d going on-line as CPU %d\n", | 
| 607 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 630 | cpu_data[smp_processor_id()].tc_id, smp_processor_id()); | 
| 608 | } | 631 | } | 
| @@ -753,8 +776,10 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 753 | { | 776 | { | 
| 754 | int tcstatus; | 777 | int tcstatus; | 
| 755 | struct smtc_ipi *pipi; | 778 | struct smtc_ipi *pipi; | 
| 756 | long flags; | 779 | unsigned long flags; | 
| 757 | int mtflags; | 780 | int mtflags; | 
| 781 | unsigned long tcrestart; | ||
| 782 | extern void r4k_wait_irqoff(void), __pastwait(void); | ||
| 758 | 783 | ||
| 759 | if (cpu == smp_processor_id()) { | 784 | if (cpu == smp_processor_id()) { | 
| 760 | printk("Cannot Send IPI to self!\n"); | 785 | printk("Cannot Send IPI to self!\n"); | 
| @@ -771,8 +796,6 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 771 | pipi->arg = (void *)action; | 796 | pipi->arg = (void *)action; | 
| 772 | pipi->dest = cpu; | 797 | pipi->dest = cpu; | 
| 773 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 798 | if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { | 
| 774 | if (type == SMTC_CLOCK_TICK) | ||
| 775 | atomic_inc(&ipi_timer_latch[cpu]); | ||
| 776 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 799 | /* If not on same VPE, enqueue and send cross-VPE interrupt */ | 
| 777 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 800 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 
| 778 | LOCK_CORE_PRA(); | 801 | LOCK_CORE_PRA(); | 
| @@ -800,22 +823,29 @@ void smtc_send_ipi(int cpu, int type, unsigned int action) | |||
| 800 | 823 | ||
| 801 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 824 | if ((tcstatus & TCSTATUS_IXMT) != 0) { | 
| 802 | /* | 825 | /* | 
| 803 | * Spin-waiting here can deadlock, | 826 | * If we're in the the irq-off version of the wait | 
| 804 | * so we queue the message for the target TC. | 827 | * loop, we need to force exit from the wait and | 
| 828 | * do a direct post of the IPI. | ||
| 829 | */ | ||
| 830 | if (cpu_wait == r4k_wait_irqoff) { | ||
| 831 | tcrestart = read_tc_c0_tcrestart(); | ||
| 832 | if (tcrestart >= (unsigned long)r4k_wait_irqoff | ||
| 833 | && tcrestart < (unsigned long)__pastwait) { | ||
| 834 | write_tc_c0_tcrestart(__pastwait); | ||
| 835 | tcstatus &= ~TCSTATUS_IXMT; | ||
| 836 | write_tc_c0_tcstatus(tcstatus); | ||
| 837 | goto postdirect; | ||
| 838 | } | ||
| 839 | } | ||
| 840 | /* | ||
| 841 | * Otherwise we queue the message for the target TC | ||
| 842 | * to pick up when he does a local_irq_restore() | ||
| 805 | */ | 843 | */ | 
| 806 | write_tc_c0_tchalt(0); | 844 | write_tc_c0_tchalt(0); | 
| 807 | UNLOCK_CORE_PRA(); | 845 | UNLOCK_CORE_PRA(); | 
| 808 | /* Try to reduce redundant timer interrupt messages */ | ||
| 809 | if (type == SMTC_CLOCK_TICK) { | ||
| 810 | if (atomic_postincrement(&ipi_timer_latch[cpu])!=0){ | ||
| 811 | smtc_ipi_nq(&freeIPIq, pipi); | ||
| 812 | return; | ||
| 813 | } | ||
| 814 | } | ||
| 815 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 846 | smtc_ipi_nq(&IPIQ[cpu], pipi); | 
| 816 | } else { | 847 | } else { | 
| 817 | if (type == SMTC_CLOCK_TICK) | 848 | postdirect: | 
| 818 | atomic_inc(&ipi_timer_latch[cpu]); | ||
| 819 | post_direct_ipi(cpu, pipi); | 849 | post_direct_ipi(cpu, pipi); | 
| 820 | write_tc_c0_tchalt(0); | 850 | write_tc_c0_tchalt(0); | 
| 821 | UNLOCK_CORE_PRA(); | 851 | UNLOCK_CORE_PRA(); | 
| @@ -883,7 +913,7 @@ static void ipi_call_interrupt(void) | |||
| 883 | smp_call_function_interrupt(); | 913 | smp_call_function_interrupt(); | 
| 884 | } | 914 | } | 
| 885 | 915 | ||
| 886 | DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device); | 916 | DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device); | 
| 887 | 917 | ||
| 888 | void ipi_decode(struct smtc_ipi *pipi) | 918 | void ipi_decode(struct smtc_ipi *pipi) | 
| 889 | { | 919 | { | 
| @@ -891,20 +921,13 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
| 891 | struct clock_event_device *cd; | 921 | struct clock_event_device *cd; | 
| 892 | void *arg_copy = pipi->arg; | 922 | void *arg_copy = pipi->arg; | 
| 893 | int type_copy = pipi->type; | 923 | int type_copy = pipi->type; | 
| 894 | int ticks; | ||
| 895 | |||
| 896 | smtc_ipi_nq(&freeIPIq, pipi); | 924 | smtc_ipi_nq(&freeIPIq, pipi); | 
| 897 | switch (type_copy) { | 925 | switch (type_copy) { | 
| 898 | case SMTC_CLOCK_TICK: | 926 | case SMTC_CLOCK_TICK: | 
| 899 | irq_enter(); | 927 | irq_enter(); | 
| 900 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 928 | kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++; | 
| 901 | cd = &per_cpu(smtc_dummy_clockevent_device, cpu); | 929 | cd = &per_cpu(mips_clockevent_device, cpu); | 
| 902 | ticks = atomic_read(&ipi_timer_latch[cpu]); | 930 | cd->event_handler(cd); | 
| 903 | atomic_sub(ticks, &ipi_timer_latch[cpu]); | ||
| 904 | while (ticks) { | ||
| 905 | cd->event_handler(cd); | ||
| 906 | ticks--; | ||
| 907 | } | ||
| 908 | irq_exit(); | 931 | irq_exit(); | 
| 909 | break; | 932 | break; | 
| 910 | 933 | ||
| @@ -937,24 +960,48 @@ void ipi_decode(struct smtc_ipi *pipi) | |||
| 937 | } | 960 | } | 
| 938 | } | 961 | } | 
| 939 | 962 | ||
| 963 | /* | ||
| 964 | * Similar to smtc_ipi_replay(), but invoked from context restore, | ||
| 965 | * so it reuses the current exception frame rather than set up a | ||
| 966 | * new one with self_ipi. | ||
| 967 | */ | ||
| 968 | |||
| 940 | void deferred_smtc_ipi(void) | 969 | void deferred_smtc_ipi(void) | 
| 941 | { | 970 | { | 
| 942 | struct smtc_ipi *pipi; | 971 | int cpu = smp_processor_id(); | 
| 943 | unsigned long flags; | ||
| 944 | /* DEBUG */ | ||
| 945 | int q = smp_processor_id(); | ||
| 946 | 972 | ||
| 947 | /* | 973 | /* | 
| 948 | * Test is not atomic, but much faster than a dequeue, | 974 | * Test is not atomic, but much faster than a dequeue, | 
| 949 | * and the vast majority of invocations will have a null queue. | 975 | * and the vast majority of invocations will have a null queue. | 
| 976 | * If irq_disabled when this was called, then any IPIs queued | ||
| 977 | * after we test last will be taken on the next irq_enable/restore. | ||
| 978 | * If interrupts were enabled, then any IPIs added after the | ||
| 979 | * last test will be taken directly. | ||
| 950 | */ | 980 | */ | 
| 951 | if (IPIQ[q].head != NULL) { | 981 | |
| 952 | while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) { | 982 | while (IPIQ[cpu].head != NULL) { | 
| 953 | /* ipi_decode() should be called with interrupts off */ | 983 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 
| 954 | local_irq_save(flags); | 984 | struct smtc_ipi *pipi; | 
| 985 | unsigned long flags; | ||
| 986 | |||
| 987 | /* | ||
| 988 | * It may be possible we'll come in with interrupts | ||
| 989 | * already enabled. | ||
| 990 | */ | ||
| 991 | local_irq_save(flags); | ||
| 992 | |||
| 993 | spin_lock(&q->lock); | ||
| 994 | pipi = __smtc_ipi_dq(q); | ||
| 995 | spin_unlock(&q->lock); | ||
| 996 | if (pipi != NULL) | ||
| 955 | ipi_decode(pipi); | 997 | ipi_decode(pipi); | 
| 956 | local_irq_restore(flags); | 998 | /* | 
| 957 | } | 999 | * The use of the __raw_local restore isn't | 
| 1000 | * as obviously necessary here as in smtc_ipi_replay(), | ||
| 1001 | * but it's more efficient, given that we're already | ||
| 1002 | * running down the IPI queue. | ||
| 1003 | */ | ||
| 1004 | __raw_local_irq_restore(flags); | ||
| 958 | } | 1005 | } | 
| 959 | } | 1006 | } | 
| 960 | 1007 | ||
| @@ -975,7 +1022,7 @@ static irqreturn_t ipi_interrupt(int irq, void *dev_idm) | |||
| 975 | struct smtc_ipi *pipi; | 1022 | struct smtc_ipi *pipi; | 
| 976 | unsigned long tcstatus; | 1023 | unsigned long tcstatus; | 
| 977 | int sent; | 1024 | int sent; | 
| 978 | long flags; | 1025 | unsigned long flags; | 
| 979 | unsigned int mtflags; | 1026 | unsigned int mtflags; | 
| 980 | unsigned int vpflags; | 1027 | unsigned int vpflags; | 
| 981 | 1028 | ||
| @@ -1066,55 +1113,53 @@ static void setup_cross_vpe_interrupts(unsigned int nvpe) | |||
| 1066 | 1113 | ||
| 1067 | /* | 1114 | /* | 
| 1068 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 1115 | * SMTC-specific hacks invoked from elsewhere in the kernel. | 
| 1069 | * | ||
| 1070 | * smtc_ipi_replay is called from raw_local_irq_restore which is only ever | ||
| 1071 | * called with interrupts disabled. We do rely on interrupts being disabled | ||
| 1072 | * here because using spin_lock_irqsave()/spin_unlock_irqrestore() would | ||
| 1073 | * result in a recursive call to raw_local_irq_restore(). | ||
| 1074 | */ | 1116 | */ | 
| 1075 | 1117 | ||
| 1076 | static void __smtc_ipi_replay(void) | 1118 | /* | 
| 1119 | * smtc_ipi_replay is called from raw_local_irq_restore | ||
| 1120 | */ | ||
| 1121 | |||
| 1122 | void smtc_ipi_replay(void) | ||
| 1077 | { | 1123 | { | 
| 1078 | unsigned int cpu = smp_processor_id(); | 1124 | unsigned int cpu = smp_processor_id(); | 
| 1079 | 1125 | ||
| 1080 | /* | 1126 | /* | 
| 1081 | * To the extent that we've ever turned interrupts off, | 1127 | * To the extent that we've ever turned interrupts off, | 
| 1082 | * we may have accumulated deferred IPIs. This is subtle. | 1128 | * we may have accumulated deferred IPIs. This is subtle. | 
| 1083 | * If we use the smtc_ipi_qdepth() macro, we'll get an | ||
| 1084 | * exact number - but we'll also disable interrupts | ||
| 1085 | * and create a window of failure where a new IPI gets | ||
| 1086 | * queued after we test the depth but before we re-enable | ||
| 1087 | * interrupts. So long as IXMT never gets set, however, | ||
| 1088 | * we should be OK: If we pick up something and dispatch | 1129 | * we should be OK: If we pick up something and dispatch | 
| 1089 | * it here, that's great. If we see nothing, but concurrent | 1130 | * it here, that's great. If we see nothing, but concurrent | 
| 1090 | * with this operation, another TC sends us an IPI, IXMT | 1131 | * with this operation, another TC sends us an IPI, IXMT | 
| 1091 | * is clear, and we'll handle it as a real pseudo-interrupt | 1132 | * is clear, and we'll handle it as a real pseudo-interrupt | 
| 1092 | * and not a pseudo-pseudo interrupt. | 1133 | * and not a pseudo-pseudo interrupt. The important thing | 
| 1134 | * is to do the last check for queued message *after* the | ||
| 1135 | * re-enabling of interrupts. | ||
| 1093 | */ | 1136 | */ | 
| 1094 | if (IPIQ[cpu].depth > 0) { | 1137 | while (IPIQ[cpu].head != NULL) { | 
| 1095 | while (1) { | 1138 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 
| 1096 | struct smtc_ipi_q *q = &IPIQ[cpu]; | 1139 | struct smtc_ipi *pipi; | 
| 1097 | struct smtc_ipi *pipi; | 1140 | unsigned long flags; | 
| 1098 | extern void self_ipi(struct smtc_ipi *); | 1141 | |
| 1099 | 1142 | /* | |
| 1100 | spin_lock(&q->lock); | 1143 | * It's just possible we'll come in with interrupts | 
| 1101 | pipi = __smtc_ipi_dq(q); | 1144 | * already enabled. | 
| 1102 | spin_unlock(&q->lock); | 1145 | */ | 
| 1103 | if (!pipi) | 1146 | local_irq_save(flags); | 
| 1104 | break; | 1147 | |
| 1148 | spin_lock(&q->lock); | ||
| 1149 | pipi = __smtc_ipi_dq(q); | ||
| 1150 | spin_unlock(&q->lock); | ||
| 1151 | /* | ||
| 1152 | ** But use a raw restore here to avoid recursion. | ||
| 1153 | */ | ||
| 1154 | __raw_local_irq_restore(flags); | ||
| 1105 | 1155 | ||
| 1156 | if (pipi) { | ||
| 1106 | self_ipi(pipi); | 1157 | self_ipi(pipi); | 
| 1107 | smtc_cpu_stats[cpu].selfipis++; | 1158 | smtc_cpu_stats[cpu].selfipis++; | 
| 1108 | } | 1159 | } | 
| 1109 | } | 1160 | } | 
| 1110 | } | 1161 | } | 
| 1111 | 1162 | ||
| 1112 | void smtc_ipi_replay(void) | ||
| 1113 | { | ||
| 1114 | raw_local_irq_disable(); | ||
| 1115 | __smtc_ipi_replay(); | ||
| 1116 | } | ||
| 1117 | |||
| 1118 | EXPORT_SYMBOL(smtc_ipi_replay); | 1163 | EXPORT_SYMBOL(smtc_ipi_replay); | 
| 1119 | 1164 | ||
| 1120 | void smtc_idle_loop_hook(void) | 1165 | void smtc_idle_loop_hook(void) | 
| @@ -1193,40 +1238,13 @@ void smtc_idle_loop_hook(void) | |||
| 1193 | } | 1238 | } | 
| 1194 | } | 1239 | } | 
| 1195 | 1240 | ||
| 1196 | /* | ||
| 1197 | * Now that we limit outstanding timer IPIs, check for hung TC | ||
| 1198 | */ | ||
| 1199 | for (tc = 0; tc < NR_CPUS; tc++) { | ||
| 1200 | /* Don't check ourself - we'll dequeue IPIs just below */ | ||
| 1201 | if ((tc != smp_processor_id()) && | ||
| 1202 | atomic_read(&ipi_timer_latch[tc]) > timerq_limit) { | ||
| 1203 | if (clock_hang_reported[tc] == 0) { | ||
| 1204 | pdb_msg += sprintf(pdb_msg, | ||
| 1205 | "TC %d looks hung with timer latch at %d\n", | ||
| 1206 | tc, atomic_read(&ipi_timer_latch[tc])); | ||
| 1207 | clock_hang_reported[tc]++; | ||
| 1208 | } | ||
| 1209 | } | ||
| 1210 | } | ||
| 1211 | emt(mtflags); | 1241 | emt(mtflags); | 
| 1212 | local_irq_restore(flags); | 1242 | local_irq_restore(flags); | 
| 1213 | if (pdb_msg != &id_ho_db_msg[0]) | 1243 | if (pdb_msg != &id_ho_db_msg[0]) | 
| 1214 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 1244 | printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg); | 
| 1215 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 1245 | #endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ | 
| 1216 | 1246 | ||
| 1217 | /* | 1247 | smtc_ipi_replay(); | 
| 1218 | * Replay any accumulated deferred IPIs. If "Instant Replay" | ||
| 1219 | * is in use, there should never be any. | ||
| 1220 | */ | ||
| 1221 | #ifndef CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY | ||
| 1222 | { | ||
| 1223 | unsigned long flags; | ||
| 1224 | |||
| 1225 | local_irq_save(flags); | ||
| 1226 | __smtc_ipi_replay(); | ||
| 1227 | local_irq_restore(flags); | ||
| 1228 | } | ||
| 1229 | #endif /* CONFIG_MIPS_MT_SMTC_INSTANT_REPLAY */ | ||
| 1230 | } | 1248 | } | 
| 1231 | 1249 | ||
| 1232 | void smtc_soft_dump(void) | 1250 | void smtc_soft_dump(void) | 
| @@ -1242,10 +1260,6 @@ void smtc_soft_dump(void) | |||
| 1242 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 1260 | printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis); | 
| 1243 | } | 1261 | } | 
| 1244 | smtc_ipi_qdump(); | 1262 | smtc_ipi_qdump(); | 
| 1245 | printk("Timer IPI Backlogs:\n"); | ||
| 1246 | for (i=0; i < NR_CPUS; i++) { | ||
| 1247 | printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i])); | ||
| 1248 | } | ||
| 1249 | printk("%d Recoveries of \"stolen\" FPU\n", | 1263 | printk("%d Recoveries of \"stolen\" FPU\n", | 
| 1250 | atomic_read(&smtc_fpu_recoveries)); | 1264 | atomic_read(&smtc_fpu_recoveries)); | 
| 1251 | } | 1265 | } | 
