aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorSeth Jennings <sjenning@linux.vnet.ibm.com>2011-12-30 11:42:15 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2012-02-08 17:14:14 -0500
commit2a4830110b90deb4ee99b1ab8c8ebb120f27c0c8 (patch)
tree642f3ddca90439ff7fff2b76474319de2525807c /drivers/staging
parenta9d3c9e3c575ee09c905e07ae7cc1d52e2548d05 (diff)
staging: zcache: fix serialization bug in zv stats
In a multithreaded workload, the zv_curr_dist_counts and zv_cumul_dist_counts statistics are being corrupted because the increments and decrements in zv_create and zv_free are not atomic. This patch converts these statistics and their corresponding increments/decrements/reads to atomic operations. Signed-off-by: Seth Jennings <sjenning@linux.vnet.ibm.com> Acked-by: Dan Magenheimer <dan.magenheimer@oracle.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/zcache/zcache-main.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index ce07087750a..ef7c52bb1df 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -654,8 +654,8 @@ static unsigned int zv_max_zsize = (PAGE_SIZE / 8) * 7;
654 */ 654 */
655static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5; 655static unsigned int zv_max_mean_zsize = (PAGE_SIZE / 8) * 5;
656 656
657static unsigned long zv_curr_dist_counts[NCHUNKS]; 657static atomic_t zv_curr_dist_counts[NCHUNKS];
658static unsigned long zv_cumul_dist_counts[NCHUNKS]; 658static atomic_t zv_cumul_dist_counts[NCHUNKS];
659 659
660static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id, 660static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
661 struct tmem_oid *oid, uint32_t index, 661 struct tmem_oid *oid, uint32_t index,
@@ -674,8 +674,8 @@ static struct zv_hdr *zv_create(struct xv_pool *xvpool, uint32_t pool_id,
674 &page, &offset, ZCACHE_GFP_MASK); 674 &page, &offset, ZCACHE_GFP_MASK);
675 if (unlikely(ret)) 675 if (unlikely(ret))
676 goto out; 676 goto out;
677 zv_curr_dist_counts[chunks]++; 677 atomic_inc(&zv_curr_dist_counts[chunks]);
678 zv_cumul_dist_counts[chunks]++; 678 atomic_inc(&zv_cumul_dist_counts[chunks]);
679 zv = kmap_atomic(page, KM_USER0) + offset; 679 zv = kmap_atomic(page, KM_USER0) + offset;
680 zv->index = index; 680 zv->index = index;
681 zv->oid = *oid; 681 zv->oid = *oid;
@@ -697,7 +697,7 @@ static void zv_free(struct xv_pool *xvpool, struct zv_hdr *zv)
697 697
698 ASSERT_SENTINEL(zv, ZVH); 698 ASSERT_SENTINEL(zv, ZVH);
699 BUG_ON(chunks >= NCHUNKS); 699 BUG_ON(chunks >= NCHUNKS);
700 zv_curr_dist_counts[chunks]--; 700 atomic_dec(&zv_curr_dist_counts[chunks]);
701 size -= sizeof(*zv); 701 size -= sizeof(*zv);
702 BUG_ON(size == 0); 702 BUG_ON(size == 0);
703 INVERT_SENTINEL(zv, ZVH); 703 INVERT_SENTINEL(zv, ZVH);
@@ -737,7 +737,7 @@ static int zv_curr_dist_counts_show(char *buf)
737 char *p = buf; 737 char *p = buf;
738 738
739 for (i = 0; i < NCHUNKS; i++) { 739 for (i = 0; i < NCHUNKS; i++) {
740 n = zv_curr_dist_counts[i]; 740 n = atomic_read(&zv_curr_dist_counts[i]);
741 p += sprintf(p, "%lu ", n); 741 p += sprintf(p, "%lu ", n);
742 chunks += n; 742 chunks += n;
743 sum_total_chunks += i * n; 743 sum_total_chunks += i * n;
@@ -753,7 +753,7 @@ static int zv_cumul_dist_counts_show(char *buf)
753 char *p = buf; 753 char *p = buf;
754 754
755 for (i = 0; i < NCHUNKS; i++) { 755 for (i = 0; i < NCHUNKS; i++) {
756 n = zv_cumul_dist_counts[i]; 756 n = atomic_read(&zv_cumul_dist_counts[i]);
757 p += sprintf(p, "%lu ", n); 757 p += sprintf(p, "%lu ", n);
758 chunks += n; 758 chunks += n;
759 sum_total_chunks += i * n; 759 sum_total_chunks += i * n;