aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel/smtc.c
diff options
context:
space:
mode:
authorRalf Baechle <ralf@linux-mips.org>2007-10-11 18:46:09 -0400
committerRalf Baechle <ralf@linux-mips.org>2007-10-11 18:46:09 -0400
commitea5804015c0ce67741eb4b156a071fb4f415345f (patch)
treebc4da66c0d9a531a17159b1cb001ad8e96d24487 /arch/mips/kernel/smtc.c
parent7bcf7717b6a047c272410d0cd00213185fe6b99d (diff)
[MIPS] Dyntick support for SMTC:
The kernel currently only supports broadcasting of the timer interrupt from a single timer, not multicasting into two multicast groups of processors. So the implemented mechanism for SMTC works by broadcasting the cp0 compare interrupt on VPE 0 and ignoring it on any additional VPEs. Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
Diffstat (limited to 'arch/mips/kernel/smtc.c')
-rw-r--r--arch/mips/kernel/smtc.c67
1 files changed, 32 insertions, 35 deletions
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
index 137183bba54f..4d1ac9692dcd 100644
--- a/arch/mips/kernel/smtc.c
+++ b/arch/mips/kernel/smtc.c
@@ -1,5 +1,6 @@
1/* Copyright (C) 2004 Mips Technologies, Inc */ 1/* Copyright (C) 2004 Mips Technologies, Inc */
2 2
3#include <linux/clockchips.h>
3#include <linux/kernel.h> 4#include <linux/kernel.h>
4#include <linux/sched.h> 5#include <linux/sched.h>
5#include <linux/cpumask.h> 6#include <linux/cpumask.h>
@@ -62,7 +63,7 @@ asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
62 * Clock interrupt "latch" buffers, per "CPU" 63 * Clock interrupt "latch" buffers, per "CPU"
63 */ 64 */
64 65
65unsigned int ipi_timer_latch[NR_CPUS]; 66static atomic_t ipi_timer_latch[NR_CPUS];
66 67
67/* 68/*
68 * Number of InterProcessor Interupt (IPI) message buffers to allocate 69 * Number of InterProcessor Interupt (IPI) message buffers to allocate
@@ -296,8 +297,10 @@ int __init mipsmt_build_cpu_map(int start_cpu_slot)
296 __cpu_number_map[i] = i; 297 __cpu_number_map[i] = i;
297 __cpu_logical_map[i] = i; 298 __cpu_logical_map[i] = i;
298 } 299 }
300#ifdef CONFIG_MIPS_MT_FPAFF
299 /* Initialize map of CPUs with FPUs */ 301 /* Initialize map of CPUs with FPUs */
300 cpus_clear(mt_fpu_cpumask); 302 cpus_clear(mt_fpu_cpumask);
303#endif
301 304
302 /* One of those TC's is the one booting, and not a secondary... */ 305 /* One of those TC's is the one booting, and not a secondary... */
303 printk("%i available secondary CPU TC(s)\n", i - 1); 306 printk("%i available secondary CPU TC(s)\n", i - 1);
@@ -359,7 +362,7 @@ void mipsmt_prepare_cpus(void)
359 IPIQ[i].head = IPIQ[i].tail = NULL; 362 IPIQ[i].head = IPIQ[i].tail = NULL;
360 spin_lock_init(&IPIQ[i].lock); 363 spin_lock_init(&IPIQ[i].lock);
361 IPIQ[i].depth = 0; 364 IPIQ[i].depth = 0;
362 ipi_timer_latch[i] = 0; 365 atomic_set(&ipi_timer_latch[i], 0);
363 } 366 }
364 367
365 /* cpu_data index starts at zero */ 368 /* cpu_data index starts at zero */
@@ -482,10 +485,12 @@ void mipsmt_prepare_cpus(void)
482 485
483 /* Set up coprocessor affinity CPU mask(s) */ 486 /* Set up coprocessor affinity CPU mask(s) */
484 487
488#ifdef CONFIG_MIPS_MT_FPAFF
485 for (tc = 0; tc < ntc; tc++) { 489 for (tc = 0; tc < ntc; tc++) {
486 if (cpu_data[tc].options & MIPS_CPU_FPU) 490 if (cpu_data[tc].options & MIPS_CPU_FPU)
487 cpu_set(tc, mt_fpu_cpumask); 491 cpu_set(tc, mt_fpu_cpumask);
488 } 492 }
493#endif
489 494
490 /* set up ipi interrupts... */ 495 /* set up ipi interrupts... */
491 496
@@ -702,7 +707,7 @@ static void smtc_ipi_qdump(void)
702 * be done with the atomic.h primitives). And since this is 707 * be done with the atomic.h primitives). And since this is
703 * MIPS MT, we can assume that we have LL/SC. 708 * MIPS MT, we can assume that we have LL/SC.
704 */ 709 */
705static __inline__ int atomic_postincrement(unsigned int *pv) 710static inline int atomic_postincrement(atomic_t *v)
706{ 711{
707 unsigned long result; 712 unsigned long result;
708 713
@@ -714,8 +719,8 @@ static __inline__ int atomic_postincrement(unsigned int *pv)
714 " sc %1, %2 \n" 719 " sc %1, %2 \n"
715 " beqz %1, 1b \n" 720 " beqz %1, 1b \n"
716 __WEAK_LLSC_MB 721 __WEAK_LLSC_MB
717 : "=&r" (result), "=&r" (temp), "=m" (*pv) 722 : "=&r" (result), "=&r" (temp), "=m" (v->counter)
718 : "m" (*pv) 723 : "m" (v->counter)
719 : "memory"); 724 : "memory");
720 725
721 return result; 726 return result;
@@ -743,6 +748,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
743 pipi->arg = (void *)action; 748 pipi->arg = (void *)action;
744 pipi->dest = cpu; 749 pipi->dest = cpu;
745 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) { 750 if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
751 if (type == SMTC_CLOCK_TICK)
752 atomic_inc(&ipi_timer_latch[cpu]);
746 /* If not on same VPE, enqueue and send cross-VPE interupt */ 753 /* If not on same VPE, enqueue and send cross-VPE interupt */
747 smtc_ipi_nq(&IPIQ[cpu], pipi); 754 smtc_ipi_nq(&IPIQ[cpu], pipi);
748 LOCK_CORE_PRA(); 755 LOCK_CORE_PRA();
@@ -784,6 +791,8 @@ void smtc_send_ipi(int cpu, int type, unsigned int action)
784 } 791 }
785 smtc_ipi_nq(&IPIQ[cpu], pipi); 792 smtc_ipi_nq(&IPIQ[cpu], pipi);
786 } else { 793 } else {
794 if (type == SMTC_CLOCK_TICK)
795 atomic_inc(&ipi_timer_latch[cpu]);
787 post_direct_ipi(cpu, pipi); 796 post_direct_ipi(cpu, pipi);
788 write_tc_c0_tchalt(0); 797 write_tc_c0_tchalt(0);
789 UNLOCK_CORE_PRA(); 798 UNLOCK_CORE_PRA();
@@ -801,6 +810,7 @@ static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
801 unsigned long tcrestart; 810 unsigned long tcrestart;
802 extern u32 kernelsp[NR_CPUS]; 811 extern u32 kernelsp[NR_CPUS];
803 extern void __smtc_ipi_vector(void); 812 extern void __smtc_ipi_vector(void);
813//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
804 814
805 /* Extract Status, EPC from halted TC */ 815 /* Extract Status, EPC from halted TC */
806 tcstatus = read_tc_c0_tcstatus(); 816 tcstatus = read_tc_c0_tcstatus();
@@ -851,25 +861,31 @@ static void ipi_call_interrupt(void)
851 smp_call_function_interrupt(); 861 smp_call_function_interrupt();
852} 862}
853 863
864DECLARE_PER_CPU(struct clock_event_device, smtc_dummy_clockevent_device);
865
854void ipi_decode(struct smtc_ipi *pipi) 866void ipi_decode(struct smtc_ipi *pipi)
855{ 867{
868 unsigned int cpu = smp_processor_id();
869 struct clock_event_device *cd;
856 void *arg_copy = pipi->arg; 870 void *arg_copy = pipi->arg;
857 int type_copy = pipi->type; 871 int type_copy = pipi->type;
858 int dest_copy = pipi->dest; 872 int ticks;
859 873
860 smtc_ipi_nq(&freeIPIq, pipi); 874 smtc_ipi_nq(&freeIPIq, pipi);
861 switch (type_copy) { 875 switch (type_copy) {
862 case SMTC_CLOCK_TICK: 876 case SMTC_CLOCK_TICK:
863 irq_enter(); 877 irq_enter();
864 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + cp0_compare_irq]++; 878 kstat_this_cpu.irqs[MIPS_CPU_IRQ_BASE + 1]++;
865 /* Invoke Clock "Interrupt" */ 879 cd = &per_cpu(smtc_dummy_clockevent_device, cpu);
866 ipi_timer_latch[dest_copy] = 0; 880 ticks = atomic_read(&ipi_timer_latch[cpu]);
867#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG 881 atomic_sub(ticks, &ipi_timer_latch[cpu]);
868 clock_hang_reported[dest_copy] = 0; 882 while (ticks) {
869#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */ 883 cd->event_handler(cd);
870 local_timer_interrupt(0, NULL); 884 ticks--;
885 }
871 irq_exit(); 886 irq_exit();
872 break; 887 break;
888
873 case LINUX_SMP_IPI: 889 case LINUX_SMP_IPI:
874 switch ((int)arg_copy) { 890 switch ((int)arg_copy) {
875 case SMP_RESCHEDULE_YOURSELF: 891 case SMP_RESCHEDULE_YOURSELF:
@@ -921,25 +937,6 @@ void deferred_smtc_ipi(void)
921} 937}
922 938
923/* 939/*
924 * Send clock tick to all TCs except the one executing the funtion
925 */
926
927void smtc_timer_broadcast(void)
928{
929 int cpu;
930 int myTC = cpu_data[smp_processor_id()].tc_id;
931 int myVPE = cpu_data[smp_processor_id()].vpe_id;
932
933 smtc_cpu_stats[smp_processor_id()].timerints++;
934
935 for_each_online_cpu(cpu) {
936 if (cpu_data[cpu].vpe_id == myVPE &&
937 cpu_data[cpu].tc_id != myTC)
938 smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
939 }
940}
941
942/*
943 * Cross-VPE interrupts in the SMTC prototype use "software interrupts" 940 * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
944 * set via cross-VPE MTTR manipulation of the Cause register. It would be 941 * set via cross-VPE MTTR manipulation of the Cause register. It would be
945 * in some regards preferable to have external logic for "doorbell" hardware 942 * in some regards preferable to have external logic for "doorbell" hardware
@@ -1180,11 +1177,11 @@ void smtc_idle_loop_hook(void)
1180 for (tc = 0; tc < NR_CPUS; tc++) { 1177 for (tc = 0; tc < NR_CPUS; tc++) {
1181 /* Don't check ourself - we'll dequeue IPIs just below */ 1178 /* Don't check ourself - we'll dequeue IPIs just below */
1182 if ((tc != smp_processor_id()) && 1179 if ((tc != smp_processor_id()) &&
1183 ipi_timer_latch[tc] > timerq_limit) { 1180 atomic_read(&ipi_timer_latch[tc]) > timerq_limit) {
1184 if (clock_hang_reported[tc] == 0) { 1181 if (clock_hang_reported[tc] == 0) {
1185 pdb_msg += sprintf(pdb_msg, 1182 pdb_msg += sprintf(pdb_msg,
1186 "TC %d looks hung with timer latch at %d\n", 1183 "TC %d looks hung with timer latch at %d\n",
1187 tc, ipi_timer_latch[tc]); 1184 tc, atomic_read(&ipi_timer_latch[tc]));
1188 clock_hang_reported[tc]++; 1185 clock_hang_reported[tc]++;
1189 } 1186 }
1190 } 1187 }
@@ -1225,7 +1222,7 @@ void smtc_soft_dump(void)
1225 smtc_ipi_qdump(); 1222 smtc_ipi_qdump();
1226 printk("Timer IPI Backlogs:\n"); 1223 printk("Timer IPI Backlogs:\n");
1227 for (i=0; i < NR_CPUS; i++) { 1224 for (i=0; i < NR_CPUS; i++) {
1228 printk("%d: %d\n", i, ipi_timer_latch[i]); 1225 printk("%d: %d\n", i, atomic_read(&ipi_timer_latch[i]));
1229 } 1226 }
1230 printk("%d Recoveries of \"stolen\" FPU\n", 1227 printk("%d Recoveries of \"stolen\" FPU\n",
1231 atomic_read(&smtc_fpu_recoveries)); 1228 atomic_read(&smtc_fpu_recoveries));