diff options
-rw-r--r-- | arch/powerpc/platforms/cell/spu_priv1_mmio.c | 16 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/sched.c | 4 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/spufs.h | 1 | ||||
-rw-r--r-- | arch/powerpc/platforms/cell/spufs/switch.c | 8 |
4 files changed, 26 insertions, 3 deletions
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c index 67fa7247b80a..906a0a2a9fe1 100644 --- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c +++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <linux/io.h> | 28 | #include <linux/io.h> |
29 | #include <linux/mutex.h> | 29 | #include <linux/mutex.h> |
30 | #include <linux/device.h> | 30 | #include <linux/device.h> |
31 | #include <linux/sched.h> | ||
31 | 32 | ||
32 | #include <asm/spu.h> | 33 | #include <asm/spu.h> |
33 | #include <asm/spu_priv1.h> | 34 | #include <asm/spu_priv1.h> |
@@ -75,8 +76,19 @@ static u64 int_stat_get(struct spu *spu, int class) | |||
75 | 76 | ||
76 | static void cpu_affinity_set(struct spu *spu, int cpu) | 77 | static void cpu_affinity_set(struct spu *spu, int cpu) |
77 | { | 78 | { |
78 | u64 target = iic_get_target_id(cpu); | 79 | u64 target; |
79 | u64 route = target << 48 | target << 32 | target << 16; | 80 | u64 route; |
81 | |||
82 | if (nr_cpus_node(spu->node)) { | ||
83 | cpumask_t spumask = node_to_cpumask(spu->node); | ||
84 | cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu)); | ||
85 | |||
86 | if (!cpus_intersects(spumask, cpumask)) | ||
87 | return; | ||
88 | } | ||
89 | |||
90 | target = iic_get_target_id(cpu); | ||
91 | route = target << 48 | target << 32 | target << 16; | ||
80 | out_be64(&spu->priv1->int_route_RW, route); | 92 | out_be64(&spu->priv1->int_route_RW, route); |
81 | } | 93 | } |
82 | 94 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 62280c292aac..1df7d6d152c7 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -140,6 +140,9 @@ void __spu_update_sched_info(struct spu_context *ctx) | |||
140 | * if it is timesliced or preempted. | 140 | * if it is timesliced or preempted. |
141 | */ | 141 | */ |
142 | ctx->cpus_allowed = current->cpus_allowed; | 142 | ctx->cpus_allowed = current->cpus_allowed; |
143 | |||
144 | /* Save the current cpu id for spu interrupt routing. */ | ||
145 | ctx->last_ran = raw_smp_processor_id(); | ||
143 | } | 146 | } |
144 | 147 | ||
145 | void spu_update_sched_info(struct spu_context *ctx) | 148 | void spu_update_sched_info(struct spu_context *ctx) |
@@ -243,7 +246,6 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx) | |||
243 | spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); | 246 | spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0); |
244 | spu_restore(&ctx->csa, spu); | 247 | spu_restore(&ctx->csa, spu); |
245 | spu->timestamp = jiffies; | 248 | spu->timestamp = jiffies; |
246 | spu_cpu_affinity_set(spu, raw_smp_processor_id()); | ||
247 | spu_switch_notify(spu, ctx); | 249 | spu_switch_notify(spu, ctx); |
248 | ctx->state = SPU_STATE_RUNNABLE; | 250 | ctx->state = SPU_STATE_RUNNABLE; |
249 | 251 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h index 7312745b7540..dc3a215a6a22 100644 --- a/arch/powerpc/platforms/cell/spufs/spufs.h +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -121,6 +121,7 @@ struct spu_context { | |||
121 | cpumask_t cpus_allowed; | 121 | cpumask_t cpus_allowed; |
122 | int policy; | 122 | int policy; |
123 | int prio; | 123 | int prio; |
124 | int last_ran; | ||
124 | 125 | ||
125 | /* statistics */ | 126 | /* statistics */ |
126 | struct { | 127 | struct { |
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c index 9748808b0f19..47c658051fcb 100644 --- a/arch/powerpc/platforms/cell/spufs/switch.c +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -1704,6 +1704,13 @@ static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) | |||
1704 | eieio(); | 1704 | eieio(); |
1705 | } | 1705 | } |
1706 | 1706 | ||
1707 | static inline void set_int_route(struct spu_state *csa, struct spu *spu) | ||
1708 | { | ||
1709 | struct spu_context *ctx = spu->ctx; | ||
1710 | |||
1711 | spu_cpu_affinity_set(spu, ctx->last_ran); | ||
1712 | } | ||
1713 | |||
1707 | static inline void restore_other_spu_access(struct spu_state *csa, | 1714 | static inline void restore_other_spu_access(struct spu_state *csa, |
1708 | struct spu *spu) | 1715 | struct spu *spu) |
1709 | { | 1716 | { |
@@ -2014,6 +2021,7 @@ static void restore_csa(struct spu_state *next, struct spu *spu) | |||
2014 | check_ppuint_mb_stat(next, spu); /* Step 67. */ | 2021 | check_ppuint_mb_stat(next, spu); /* Step 67. */ |
2015 | spu_invalidate_slbs(spu); /* Modified Step 68. */ | 2022 | spu_invalidate_slbs(spu); /* Modified Step 68. */ |
2016 | restore_mfc_sr1(next, spu); /* Step 69. */ | 2023 | restore_mfc_sr1(next, spu); /* Step 69. */ |
2024 | set_int_route(next, spu); /* NEW */ | ||
2017 | restore_other_spu_access(next, spu); /* Step 70. */ | 2025 | restore_other_spu_access(next, spu); /* Step 70. */ |
2018 | restore_spu_runcntl(next, spu); /* Step 71. */ | 2026 | restore_spu_runcntl(next, spu); /* Step 71. */ |
2019 | restore_mfc_cntl(next, spu); /* Step 72. */ | 2027 | restore_mfc_cntl(next, spu); /* Step 72. */ |