aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-cgroup.c27
-rw-r--r--block/blk-cgroup.h2
2 files changed, 24 insertions, 5 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 34bfcefdd924..3622518e1c23 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -400,14 +400,25 @@ void blkiocg_update_dispatch_stats(struct blkio_group *blkg,
400 uint64_t bytes, bool direction, bool sync) 400 uint64_t bytes, bool direction, bool sync)
401{ 401{
402 struct blkio_group_stats_cpu *stats_cpu; 402 struct blkio_group_stats_cpu *stats_cpu;
403 unsigned long flags;
404
405 /*
406 * Disabling interrupts to provide mutual exclusion between two
407 * writes on same cpu. It probably is not needed for 64bit. Not
408 * optimizing that case yet.
409 */
410 local_irq_save(flags);
403 411
404 stats_cpu = this_cpu_ptr(blkg->stats_cpu); 412 stats_cpu = this_cpu_ptr(blkg->stats_cpu);
405 413
414 u64_stats_update_begin(&stats_cpu->syncp);
406 stats_cpu->sectors += bytes >> 9; 415 stats_cpu->sectors += bytes >> 9;
407 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED], 416 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICED],
408 1, direction, sync); 417 1, direction, sync);
409 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES], 418 blkio_add_stat(stats_cpu->stat_arr_cpu[BLKIO_STAT_CPU_SERVICE_BYTES],
410 bytes, direction, sync); 419 bytes, direction, sync);
420 u64_stats_update_end(&stats_cpu->syncp);
421 local_irq_restore(flags);
411} 422}
412EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats); 423EXPORT_SYMBOL_GPL(blkiocg_update_dispatch_stats);
413 424
@@ -622,15 +633,21 @@ static uint64_t blkio_read_stat_cpu(struct blkio_group *blkg,
622{ 633{
623 int cpu; 634 int cpu;
624 struct blkio_group_stats_cpu *stats_cpu; 635 struct blkio_group_stats_cpu *stats_cpu;
625 uint64_t val = 0; 636 u64 val = 0, tval;
626 637
627 for_each_possible_cpu(cpu) { 638 for_each_possible_cpu(cpu) {
639 unsigned int start;
628 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu); 640 stats_cpu = per_cpu_ptr(blkg->stats_cpu, cpu);
629 641
630 if (type == BLKIO_STAT_CPU_SECTORS) 642 do {
631 val += stats_cpu->sectors; 643 start = u64_stats_fetch_begin(&stats_cpu->syncp);
632 else 644 if (type == BLKIO_STAT_CPU_SECTORS)
633 val += stats_cpu->stat_arr_cpu[type][sub_type]; 645 tval = stats_cpu->sectors;
646 else
647 tval = stats_cpu->stat_arr_cpu[type][sub_type];
648 } while(u64_stats_fetch_retry(&stats_cpu->syncp, start));
649
650 val += tval;
634 } 651 }
635 652
636 return val; 653 return val;
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index fd730a24b491..262226798093 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -14,6 +14,7 @@
14 */ 14 */
15 15
16#include <linux/cgroup.h> 16#include <linux/cgroup.h>
17#include <linux/u64_stats_sync.h>
17 18
18enum blkio_policy_id { 19enum blkio_policy_id {
19 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */ 20 BLKIO_POLICY_PROP = 0, /* Proportional Bandwidth division */
@@ -154,6 +155,7 @@ struct blkio_group_stats {
154struct blkio_group_stats_cpu { 155struct blkio_group_stats_cpu {
155 uint64_t sectors; 156 uint64_t sectors;
156 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL]; 157 uint64_t stat_arr_cpu[BLKIO_STAT_CPU_NR][BLKIO_STAT_TOTAL];
158 struct u64_stats_sync syncp;
157}; 159};
158 160
159struct blkio_group { 161struct blkio_group {