aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms
diff options
context:
space:
mode:
authorLuke Browning <lukebr@linux.vnet.ibm.com>2008-04-28 00:32:34 -0400
committerJeremy Kerr <jk@ozlabs.org>2008-05-04 23:33:43 -0400
commit7a2142002f29a7b398c49da9bdec712dc57087c7 (patch)
treebd15498cbf76cabda2d42868ffc6936738d5c578 /arch/powerpc/platforms
parent093c16bf9b107433643cbf0843ca7808df90823b (diff)
[POWERPC] spufs: try to route SPU interrupts to local node
Currently, we re-route SPU interrupts to the current cpu, which may be on a remote node. In the case of time slicing, all spu interrupts will end up routed to the same cpu, where the spusched_tick occurs. This change routes mfc interrupts to the cpu where the controlling thread last ran, provided that cpu is on the same node as the spu (otherwise don't reroute interrupts). This should improve performance and provide a more predictable environment for processing spu exceptions. In the past we have seen concurrent delivery of spu exceptions to two cpus. This eliminates that concern. Signed-off-by: Luke Browning <lukebr@linux.vnet.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r--arch/powerpc/platforms/cell/spu_priv1_mmio.c16
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c4
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/cell/spufs/switch.c8
4 files changed, 26 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 67fa7247b80a..906a0a2a9fe1 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -28,6 +28,7 @@
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/mutex.h> 29#include <linux/mutex.h>
30#include <linux/device.h> 30#include <linux/device.h>
31#include <linux/sched.h>
31 32
32#include <asm/spu.h> 33#include <asm/spu.h>
33#include <asm/spu_priv1.h> 34#include <asm/spu_priv1.h>
@@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class)
75 76
76static void cpu_affinity_set(struct spu *spu, int cpu) 77static void cpu_affinity_set(struct spu *spu, int cpu)
77{ 78{
78 u64 target = iic_get_target_id(cpu); 79 u64 target;
79 u64 route = target << 48 | target << 32 | target << 16; 80 u64 route;
81
82 if (nr_cpus_node(spu->node)) {
83 cpumask_t spumask = node_to_cpumask(spu->node);
84 cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
85
86 if (!cpus_intersects(spumask, cpumask))
87 return;
88 }
89
90 target = iic_get_target_id(cpu);
91 route = target << 48 | target << 32 | target << 16;
80 out_be64(&spu->priv1->int_route_RW, route); 92 out_be64(&spu->priv1->int_route_RW, route);
81} 93}
82 94
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 62280c292aac..1df7d6d152c7 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx)
140 * if it is timesliced or preempted. 140 * if it is timesliced or preempted.
141 */ 141 */
142 ctx->cpus_allowed = current->cpus_allowed; 142 ctx->cpus_allowed = current->cpus_allowed;
143
144 /* Save the current cpu id for spu interrupt routing. */
145 ctx->last_ran = raw_smp_processor_id();
143} 146}
144 147
145void spu_update_sched_info(struct spu_context *ctx) 148void spu_update_sched_info(struct spu_context *ctx)
@@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
243 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); 246 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
244 spu_restore(&ctx->csa, spu); 247 spu_restore(&ctx->csa, spu);
245 spu->timestamp = jiffies; 248 spu->timestamp = jiffies;
246 spu_cpu_affinity_set(spu, raw_smp_processor_id());
247 spu_switch_notify(spu, ctx); 249 spu_switch_notify(spu, ctx);
248 ctx->state = SPU_STATE_RUNNABLE; 250 ctx->state = SPU_STATE_RUNNABLE;
249 251
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 7312745b7540..dc3a215a6a22 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -121,6 +121,7 @@ struct spu_context {
121 cpumask_t cpus_allowed; 121 cpumask_t cpus_allowed;
122 int policy; 122 int policy;
123 int prio; 123 int prio;
124 int last_ran;
124 125
125 /* statistics */ 126 /* statistics */
126 struct { 127 struct {
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index 9748808b0f19..47c658051fcb 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -1704,6 +1704,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1704 eieio(); 1704 eieio();
1705} 1705}
1706 1706
1707static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1708{
1709 struct spu_context *ctx = spu->ctx;
1710
1711 spu_cpu_affinity_set(spu, ctx->last_ran);
1712}
1713
1707static inline void restore_other_spu_access(struct spu_state *csa, 1714static inline void restore_other_spu_access(struct spu_state *csa,
1708 struct spu *spu) 1715 struct spu *spu)
1709{ 1716{
@@ -2014,6 +2021,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu)
2014 check_ppuint_mb_stat(next, spu); /* Step 67. */ 2021 check_ppuint_mb_stat(next, spu); /* Step 67. */
2015 spu_invalidate_slbs(spu); /* Modified Step 68. */ 2022 spu_invalidate_slbs(spu); /* Modified Step 68. */
2016 restore_mfc_sr1(next, spu); /* Step 69. */ 2023 restore_mfc_sr1(next, spu); /* Step 69. */
2024 set_int_route(next, spu); /* NEW */
2017 restore_other_spu_access(next, spu); /* Step 70. */ 2025 restore_other_spu_access(next, spu); /* Step 70. */
2018 restore_spu_runcntl(next, spu); /* Step 71. */ 2026 restore_spu_runcntl(next, spu); /* Step 71. */
2019 restore_mfc_cntl(next, spu); /* Step 72. */ 2027 restore_mfc_cntl(next, spu); /* Step 72. */