aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorAndre Detsch <adetsch@br.ibm.com>2007-07-20 15:39:33 -0400
committerArnd Bergmann <arnd@klappe.arndb.de>2007-07-20 15:41:50 -0400
commit27ec41d3a1d4df2b7cd190e93aad22ab86a72aa1 (patch)
tree6c9d5af3fc3c3cfbef390eb34caf4dc7e7a3913e /arch
parente840cfe6814d6f13ecb86cff7097ad7259df502e (diff)
[CELL] spufs: add spu stats in sysfs and ctx stat file in spufs
This patch exports per-context statistics in spufs as long as spu statistics in sysfs. It was formed by merging: "spufs: add spu stats in sysfs" From: Christoph Hellwig "spufs: add stat file to spufs" From: Christoph Hellwig "spufs: fix libassist accounting" From: Jeremy Kerr "spusched: fix spu utilization statistics" From: Luke Browning And some adjustments by myself, after suggestions on cbe-oss-dev. Having separate patches was making the review process harder than it should, as we end up integrating spus and ctx statistics accounting much more than it was on the first implementation. Signed-off-by: Andre Detsch <adetsch@br.ibm.com> Signed-off-by: Jeremy Kerr <jk@ozlabs.org> Signed-off-by: Arnd Bergmann <arnd.bergmann@de.ibm.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c24
-rw-r--r--arch/powerpc/platforms/cell/spufs/context.c3
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c8
-rw-r--r--arch/powerpc/platforms/cell/spufs/file.c32
-rw-r--r--arch/powerpc/platforms/cell/spufs/run.c10
-rw-r--r--arch/powerpc/platforms/cell/spufs/sched.c22
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h63
7 files changed, 89 insertions, 73 deletions
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index c563066e640d..caaf2bf78cad 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -553,6 +553,7 @@ static int __init create_spu(void *data)
553 int ret; 553 int ret;
554 static int number; 554 static int number;
555 unsigned long flags; 555 unsigned long flags;
556 struct timespec ts;
556 557
557 ret = -ENOMEM; 558 ret = -ENOMEM;
558 spu = kzalloc(sizeof (*spu), GFP_KERNEL); 559 spu = kzalloc(sizeof (*spu), GFP_KERNEL);
@@ -586,8 +587,9 @@ static int __init create_spu(void *data)
586 spin_unlock_irqrestore(&spu_list_lock, flags); 587 spin_unlock_irqrestore(&spu_list_lock, flags);
587 mutex_unlock(&spu_mutex); 588 mutex_unlock(&spu_mutex);
588 589
589 spu->stats.utilization_state = SPU_UTIL_IDLE; 590 spu->stats.util_state = SPU_UTIL_IDLE_LOADED;
590 spu->stats.tstamp = jiffies; 591 ktime_get_ts(&ts);
592 spu->stats.tstamp = timespec_to_ns(&ts);
591 593
592 goto out; 594 goto out;
593 595
@@ -608,12 +610,20 @@ static const char *spu_state_names[] = {
608static unsigned long long spu_acct_time(struct spu *spu, 610static unsigned long long spu_acct_time(struct spu *spu,
609 enum spu_utilization_state state) 611 enum spu_utilization_state state)
610{ 612{
613 struct timespec ts;
611 unsigned long long time = spu->stats.times[state]; 614 unsigned long long time = spu->stats.times[state];
612 615
613 if (spu->stats.utilization_state == state) 616 /*
614 time += jiffies - spu->stats.tstamp; 617 * If the spu is idle or the context is stopped, utilization
618 * statistics are not updated. Apply the time delta from the
619 * last recorded state of the spu.
620 */
621 if (spu->stats.util_state == state) {
622 ktime_get_ts(&ts);
623 time += timespec_to_ns(&ts) - spu->stats.tstamp;
624 }
615 625
616 return jiffies_to_msecs(time); 626 return time / NSEC_PER_MSEC;
617} 627}
618 628
619 629
@@ -623,11 +633,11 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, char *buf)
623 633
624 return sprintf(buf, "%s %llu %llu %llu %llu " 634 return sprintf(buf, "%s %llu %llu %llu %llu "
625 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 635 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
626 spu_state_names[spu->stats.utilization_state], 636 spu_state_names[spu->stats.util_state],
627 spu_acct_time(spu, SPU_UTIL_USER), 637 spu_acct_time(spu, SPU_UTIL_USER),
628 spu_acct_time(spu, SPU_UTIL_SYSTEM), 638 spu_acct_time(spu, SPU_UTIL_SYSTEM),
629 spu_acct_time(spu, SPU_UTIL_IOWAIT), 639 spu_acct_time(spu, SPU_UTIL_IOWAIT),
630 spu_acct_time(spu, SPU_UTIL_IDLE), 640 spu_acct_time(spu, SPU_UTIL_IDLE_LOADED),
631 spu->stats.vol_ctx_switch, 641 spu->stats.vol_ctx_switch,
632 spu->stats.invol_ctx_switch, 642 spu->stats.invol_ctx_switch,
633 spu->stats.slb_flt, 643 spu->stats.slb_flt,
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c
index 6d7bd60f5380..0e5e55f53c8b 100644
--- a/arch/powerpc/platforms/cell/spufs/context.c
+++ b/arch/powerpc/platforms/cell/spufs/context.c
@@ -59,8 +59,7 @@ struct spu_context *alloc_spu_context(struct spu_gang *gang)
59 spu_gang_add_ctx(gang, ctx); 59 spu_gang_add_ctx(gang, ctx);
60 ctx->cpus_allowed = current->cpus_allowed; 60 ctx->cpus_allowed = current->cpus_allowed;
61 spu_set_timeslice(ctx); 61 spu_set_timeslice(ctx);
62 ctx->stats.execution_state = SPUCTX_UTIL_USER; 62 ctx->stats.util_state = SPU_UTIL_IDLE_LOADED;
63 ctx->stats.tstamp = jiffies;
64 63
65 atomic_inc(&nr_spu_contexts); 64 atomic_inc(&nr_spu_contexts);
66 goto out; 65 goto out;
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index f53a07437472..917eab4be486 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -179,16 +179,14 @@ int spufs_handle_class1(struct spu_context *ctx)
179 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))) 179 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
180 return 0; 180 return 0;
181 181
182 spuctx_switch_state(ctx, SPUCTX_UTIL_IOWAIT); 182 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
183 183
184 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea, 184 pr_debug("ctx %p: ea %016lx, dsisr %016lx state %d\n", ctx, ea,
185 dsisr, ctx->state); 185 dsisr, ctx->state);
186 186
187 ctx->stats.hash_flt++; 187 ctx->stats.hash_flt++;
188 if (ctx->state == SPU_STATE_RUNNABLE) { 188 if (ctx->state == SPU_STATE_RUNNABLE)
189 ctx->spu->stats.hash_flt++; 189 ctx->spu->stats.hash_flt++;
190 spu_switch_state(ctx->spu, SPU_UTIL_IOWAIT);
191 }
192 190
193 /* we must not hold the lock when entering spu_handle_mm_fault */ 191 /* we must not hold the lock when entering spu_handle_mm_fault */
194 spu_release(ctx); 192 spu_release(ctx);
@@ -226,7 +224,7 @@ int spufs_handle_class1(struct spu_context *ctx)
226 } else 224 } else
227 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE); 225 spufs_handle_dma_error(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
228 226
229 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM); 227 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
230 return ret; 228 return ret;
231} 229}
232EXPORT_SYMBOL_GPL(spufs_handle_class1); 230EXPORT_SYMBOL_GPL(spufs_handle_class1);
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
index fe164112b3d0..9351db9472d9 100644
--- a/arch/powerpc/platforms/cell/spufs/file.c
+++ b/arch/powerpc/platforms/cell/spufs/file.c
@@ -2079,14 +2079,26 @@ static const char *ctx_state_names[] = {
2079}; 2079};
2080 2080
2081static unsigned long long spufs_acct_time(struct spu_context *ctx, 2081static unsigned long long spufs_acct_time(struct spu_context *ctx,
2082 enum spuctx_execution_state state) 2082 enum spu_utilization_state state)
2083{ 2083{
2084 unsigned long time = ctx->stats.times[state]; 2084 struct timespec ts;
2085 unsigned long long time = ctx->stats.times[state];
2085 2086
2086 if (ctx->stats.execution_state == state) 2087 /*
2087 time += jiffies - ctx->stats.tstamp; 2088 * In general, utilization statistics are updated by the controlling
2089 * thread as the spu context moves through various well defined
2090 * state transitions, but if the context is lazily loaded its
2091 * utilization statistics are not updated as the controlling thread
2092 * is not tightly coupled with the execution of the spu context. We
2093 * calculate and apply the time delta from the last recorded state
2094 * of the spu context.
2095 */
2096 if (ctx->spu && ctx->stats.util_state == state) {
2097 ktime_get_ts(&ts);
2098 time += timespec_to_ns(&ts) - ctx->stats.tstamp;
2099 }
2088 2100
2089 return jiffies_to_msecs(time); 2101 return time / NSEC_PER_MSEC;
2090} 2102}
2091 2103
2092static unsigned long long spufs_slb_flts(struct spu_context *ctx) 2104static unsigned long long spufs_slb_flts(struct spu_context *ctx)
@@ -2121,11 +2133,11 @@ static int spufs_show_stat(struct seq_file *s, void *private)
2121 spu_acquire(ctx); 2133 spu_acquire(ctx);
2122 seq_printf(s, "%s %llu %llu %llu %llu " 2134 seq_printf(s, "%s %llu %llu %llu %llu "
2123 "%llu %llu %llu %llu %llu %llu %llu %llu\n", 2135 "%llu %llu %llu %llu %llu %llu %llu %llu\n",
2124 ctx_state_names[ctx->stats.execution_state], 2136 ctx_state_names[ctx->stats.util_state],
2125 spufs_acct_time(ctx, SPUCTX_UTIL_USER), 2137 spufs_acct_time(ctx, SPU_UTIL_USER),
2126 spufs_acct_time(ctx, SPUCTX_UTIL_SYSTEM), 2138 spufs_acct_time(ctx, SPU_UTIL_SYSTEM),
2127 spufs_acct_time(ctx, SPUCTX_UTIL_IOWAIT), 2139 spufs_acct_time(ctx, SPU_UTIL_IOWAIT),
2128 spufs_acct_time(ctx, SPUCTX_UTIL_LOADED), 2140 spufs_acct_time(ctx, SPU_UTIL_IDLE_LOADED),
2129 ctx->stats.vol_ctx_switch, 2141 ctx->stats.vol_ctx_switch,
2130 ctx->stats.invol_ctx_switch, 2142 ctx->stats.invol_ctx_switch,
2131 spufs_slb_flts(ctx), 2143 spufs_slb_flts(ctx),
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index 58ae13b7de84..8c91b3f93152 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -126,6 +126,8 @@ out:
126 126
127static int spu_run_init(struct spu_context *ctx, u32 * npc) 127static int spu_run_init(struct spu_context *ctx, u32 * npc)
128{ 128{
129 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
130
129 if (ctx->flags & SPU_CREATE_ISOLATE) { 131 if (ctx->flags & SPU_CREATE_ISOLATE) {
130 unsigned long runcntl; 132 unsigned long runcntl;
131 133
@@ -151,6 +153,8 @@ static int spu_run_init(struct spu_context *ctx, u32 * npc)
151 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); 153 ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE);
152 } 154 }
153 155
156 spuctx_switch_state(ctx, SPU_UTIL_USER);
157
154 return 0; 158 return 0;
155} 159}
156 160
@@ -161,6 +165,8 @@ static int spu_run_fini(struct spu_context *ctx, u32 * npc,
161 165
162 *status = ctx->ops->status_read(ctx); 166 *status = ctx->ops->status_read(ctx);
163 *npc = ctx->ops->npc_read(ctx); 167 *npc = ctx->ops->npc_read(ctx);
168
169 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
164 spu_release(ctx); 170 spu_release(ctx);
165 171
166 if (signal_pending(current)) 172 if (signal_pending(current))
@@ -328,6 +334,9 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
328 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status)); 334 ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, &status));
329 if (unlikely(ret)) 335 if (unlikely(ret))
330 break; 336 break;
337
338 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
339
331 if ((status & SPU_STATUS_STOPPED_BY_STOP) && 340 if ((status & SPU_STATUS_STOPPED_BY_STOP) &&
332 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) { 341 (status >> SPU_STOP_STATUS_SHIFT == 0x2104)) {
333 ret = spu_process_callback(ctx); 342 ret = spu_process_callback(ctx);
@@ -356,6 +365,7 @@ long spufs_run_spu(struct file *file, struct spu_context *ctx,
356 (ctx->state == SPU_STATE_RUNNABLE)) 365 (ctx->state == SPU_STATE_RUNNABLE))
357 ctx->stats.libassist++; 366 ctx->stats.libassist++;
358 367
368
359 ctx->ops->master_stop(ctx); 369 ctx->ops->master_stop(ctx);
360 ret = spu_run_fini(ctx, npc, &status); 370 ret = spu_run_fini(ctx, npc, &status);
361 spu_yield(ctx); 371 spu_yield(ctx);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index fe789308dd1e..ecd9e95116ad 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -229,6 +229,7 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
229{ 229{
230 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid, 230 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
231 spu->number, spu->node); 231 spu->number, spu->node);
232 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
232 233
233 ctx->stats.slb_flt_base = spu->stats.slb_flt; 234 ctx->stats.slb_flt_base = spu->stats.slb_flt;
234 ctx->stats.class2_intr_base = spu->stats.class2_intr; 235 ctx->stats.class2_intr_base = spu->stats.class2_intr;
@@ -251,7 +252,8 @@ static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
251 spu_cpu_affinity_set(spu, raw_smp_processor_id()); 252 spu_cpu_affinity_set(spu, raw_smp_processor_id());
252 spu_switch_notify(spu, ctx); 253 spu_switch_notify(spu, ctx);
253 ctx->state = SPU_STATE_RUNNABLE; 254 ctx->state = SPU_STATE_RUNNABLE;
254 spu_switch_state(spu, SPU_UTIL_SYSTEM); 255
256 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
255} 257}
256 258
257/** 259/**
@@ -263,8 +265,7 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
263{ 265{
264 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__, 266 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
265 spu->pid, spu->number, spu->node); 267 spu->pid, spu->number, spu->node);
266 268 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
267 spu_switch_state(spu, SPU_UTIL_IDLE);
268 269
269 spu_switch_notify(spu, NULL); 270 spu_switch_notify(spu, NULL);
270 spu_unmap_mappings(ctx); 271 spu_unmap_mappings(ctx);
@@ -279,7 +280,6 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
279 spu_associate_mm(spu, NULL); 280 spu_associate_mm(spu, NULL);
280 spu->pid = 0; 281 spu->pid = 0;
281 ctx->ops = &spu_backing_ops; 282 ctx->ops = &spu_backing_ops;
282 ctx->spu = NULL;
283 spu->flags = 0; 283 spu->flags = 0;
284 spu->ctx = NULL; 284 spu->ctx = NULL;
285 285
@@ -287,6 +287,10 @@ static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
287 (spu->stats.slb_flt - ctx->stats.slb_flt_base); 287 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
288 ctx->stats.class2_intr += 288 ctx->stats.class2_intr +=
289 (spu->stats.class2_intr - ctx->stats.class2_intr_base); 289 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
290
291 /* This maps the underlying spu state to idle */
292 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
293 ctx->spu = NULL;
290} 294}
291 295
292/** 296/**
@@ -455,8 +459,6 @@ static struct spu *find_victim(struct spu_context *ctx)
455 */ 459 */
456int spu_activate(struct spu_context *ctx, unsigned long flags) 460int spu_activate(struct spu_context *ctx, unsigned long flags)
457{ 461{
458 spuctx_switch_state(ctx, SPUCTX_UTIL_SYSTEM);
459
460 do { 462 do {
461 struct spu *spu; 463 struct spu *spu;
462 464
@@ -551,7 +553,6 @@ static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
551void spu_deactivate(struct spu_context *ctx) 553void spu_deactivate(struct spu_context *ctx)
552{ 554{
553 __spu_deactivate(ctx, 1, MAX_PRIO); 555 __spu_deactivate(ctx, 1, MAX_PRIO);
554 spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
555} 556}
556 557
557/** 558/**
@@ -566,12 +567,7 @@ void spu_yield(struct spu_context *ctx)
566{ 567{
567 if (!(ctx->flags & SPU_CREATE_NOSCHED)) { 568 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
568 mutex_lock(&ctx->state_mutex); 569 mutex_lock(&ctx->state_mutex);
569 if (__spu_deactivate(ctx, 0, MAX_PRIO)) 570 __spu_deactivate(ctx, 0, MAX_PRIO);
570 spuctx_switch_state(ctx, SPUCTX_UTIL_USER);
571 else {
572 spuctx_switch_state(ctx, SPUCTX_UTIL_LOADED);
573 spu_switch_state(ctx->spu, SPU_UTIL_USER);
574 }
575 mutex_unlock(&ctx->state_mutex); 571 mutex_unlock(&ctx->state_mutex);
576 } 572 }
577} 573}
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 34d5f9f8b4ae..fdace9284378 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -40,19 +40,6 @@ enum {
40struct spu_context_ops; 40struct spu_context_ops;
41struct spu_gang; 41struct spu_gang;
42 42
43/*
44 * This is the state for spu utilization reporting to userspace.
45 * Because this state is visible to userspace it must never change and needs
46 * to be kept strictly separate from any internal state kept by the kernel.
47 */
48enum spuctx_execution_state {
49 SPUCTX_UTIL_USER = 0,
50 SPUCTX_UTIL_SYSTEM,
51 SPUCTX_UTIL_IOWAIT,
52 SPUCTX_UTIL_LOADED,
53 SPUCTX_UTIL_MAX
54};
55
56struct spu_context { 43struct spu_context {
57 struct spu *spu; /* pointer to a physical SPU */ 44 struct spu *spu; /* pointer to a physical SPU */
58 struct spu_state csa; /* SPU context save area. */ 45 struct spu_state csa; /* SPU context save area. */
@@ -104,9 +91,9 @@ struct spu_context {
104 /* statistics */ 91 /* statistics */
105 struct { 92 struct {
106 /* updates protected by ctx->state_mutex */ 93 /* updates protected by ctx->state_mutex */
107 enum spuctx_execution_state execution_state; 94 enum spu_utilization_state util_state;
108 unsigned long tstamp; /* time of last ctx switch */ 95 unsigned long long tstamp; /* time of last state switch */
109 unsigned long times[SPUCTX_UTIL_MAX]; 96 unsigned long long times[SPU_UTIL_MAX];
110 unsigned long long vol_ctx_switch; 97 unsigned long long vol_ctx_switch;
111 unsigned long long invol_ctx_switch; 98 unsigned long long invol_ctx_switch;
112 unsigned long long min_flt; 99 unsigned long long min_flt;
@@ -293,30 +280,34 @@ extern int spufs_coredump_num_notes;
293 * line. 280 * line.
294 */ 281 */
295static inline void spuctx_switch_state(struct spu_context *ctx, 282static inline void spuctx_switch_state(struct spu_context *ctx,
296 enum spuctx_execution_state new_state) 283 enum spu_utilization_state new_state)
297{ 284{
298 WARN_ON(!mutex_is_locked(&ctx->state_mutex)); 285 unsigned long long curtime;
286 signed long long delta;
287 struct timespec ts;
288 struct spu *spu;
289 enum spu_utilization_state old_state;
299 290
300 if (ctx->stats.execution_state != new_state) { 291 ktime_get_ts(&ts);
301 unsigned long curtime = jiffies; 292 curtime = timespec_to_ns(&ts);
293 delta = curtime - ctx->stats.tstamp;
302 294
303 ctx->stats.times[ctx->stats.execution_state] += 295 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
304 curtime - ctx->stats.tstamp; 296 WARN_ON(delta < 0);
305 ctx->stats.tstamp = curtime; 297
306 ctx->stats.execution_state = new_state; 298 spu = ctx->spu;
307 } 299 old_state = ctx->stats.util_state;
308} 300 ctx->stats.util_state = new_state;
309 301 ctx->stats.tstamp = curtime;
310static inline void spu_switch_state(struct spu *spu, 302
311 enum spuctx_execution_state new_state) 303 /*
312{ 304 * Update the physical SPU utilization statistics.
313 if (spu->stats.utilization_state != new_state) { 305 */
314 unsigned long curtime = jiffies; 306 if (spu) {
315 307 ctx->stats.times[old_state] += delta;
316 spu->stats.times[spu->stats.utilization_state] += 308 spu->stats.times[old_state] += delta;
317 curtime - spu->stats.tstamp; 309 spu->stats.util_state = new_state;
318 spu->stats.tstamp = curtime; 310 spu->stats.tstamp = curtime;
319 spu->stats.utilization_state = new_state;
320 } 311 }
321} 312}
322 313