aboutsummaryrefslogtreecommitdiffstats
path: root/block/blk-cgroup.c
diff options
context:
space:
mode:
authorVivek Goyal <vgoyal@redhat.com>2011-05-19 15:38:30 -0400
committerJens Axboe <jaxboe@fusionio.com>2011-05-20 14:34:53 -0400
commitf0bdc8cdd9a2bcc2c84ae2a1fdbff4188b354d8d (patch)
tree6d34ecec4cfc29c6032a7b6ce1acbb61088d1787 /block/blk-cgroup.c
parent575969a0dd3fe65c6556bcb8f87c42303326ea55 (diff)
blk-cgroup: Make cgroup stat reset path blkg->lock free for dispatch stats
Now dispatch stats update is lock free. But reset of these stats still takes blkg->stats_lock and is dependent on that. As stats are per cpu, we should be able to just reset the stats on each cpu without any locks. (Atleast for 64bit arch). On 32bit arch there is a small race where 64bit updates are not atomic. The result of this race can be that in the presence of other writers, one might not get 0 value after reset of a stat and might see something intermediate One can write more complicated code to cover this race like sending IPI to other cpus to reset stats and for offline cpus, reset these directly. Right not I am not taking that path because reset_update is more of a debug feature and it can happen only on 32bit arch and possibility of it happening is small. Will fix it if it becomes a real problem. For the time being going for code simplicity. Signed-off-by: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <jaxboe@fusionio.com>
Diffstat (limited to 'block/blk-cgroup.c')
-rw-r--r--block/blk-cgroup.c28
1 files changed, 28 insertions, 0 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 3622518e1c23..e41cc6f2ccc1 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -537,6 +537,30 @@ struct blkio_group *blkiocg_lookup_group(struct blkio_cgroup *blkcg, void *key)
537} 537}
538EXPORT_SYMBOL_GPL(blkiocg_lookup_group); 538EXPORT_SYMBOL_GPL(blkiocg_lookup_group);
539 539
540static void blkio_reset_stats_cpu(struct blkio_group *blkg)
541{
542 struct blkio_group_stats_cpu *stats_cpu;
543 int i, j, k;
544 /*
545 * Note: On 64 bit arch this should not be an issue. This has the
546 * possibility of returning some inconsistent value on 32bit arch
547 * as 64bit update on 32bit is non atomic. Taking care of this
548 * corner case makes code very complicated, like sending IPIs to
549 * cpus, taking care of stats of offline cpus etc.
550 *
551 * reset stats is anyway more of a debug feature and this sounds a
552 * corner case. So I am not complicating the code yet until and
553 * unless this becomes a real issue.
554 */
555 for_each_possible_cpu(i) {
556 stats_cpu = per_cpu_ptr(blkg->stats_cpu, i);
557 stats_cpu->sectors = 0;
558 for(j = 0; j < BLKIO_STAT_CPU_NR; j++)
559 for (k = 0; k < BLKIO_STAT_TOTAL; k++)
560 stats_cpu->stat_arr_cpu[j][k] = 0;
561 }
562}
563
540static int 564static int
541blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val) 565blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
542{ 566{
@@ -581,7 +605,11 @@ blkiocg_reset_stats(struct cgroup *cgroup, struct cftype *cftype, u64 val)
581 } 605 }
582#endif 606#endif
583 spin_unlock(&blkg->stats_lock); 607 spin_unlock(&blkg->stats_lock);
608
609 /* Reset Per cpu stats which don't take blkg->stats_lock */
610 blkio_reset_stats_cpu(blkg);
584 } 611 }
612
585 spin_unlock_irq(&blkcg->lock); 613 spin_unlock_irq(&blkcg->lock);
586 return 0; 614 return 0;
587} 615}