aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block/zram
diff options
context:
space:
mode:
authorWeijie Yang <weijie.yang@samsung.com>2014-08-06 19:08:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-08-06 21:01:23 -0400
commitd2d5e762c8990c4031890e03565983a05febd64a (patch)
tree9d5a8dd0fe39334fbace4b8ea76f39dd9ecbd45c /drivers/block/zram
parent023b409f9dac4cdea3322009f2e592068558690c (diff)
zram: replace global tb_lock with fine grain lock
Currently, we use a rwlock tb_lock to protect concurrent access to the whole zram meta table. However, according to the actual access model, there is only a small chance for upper user to access the same table[index], so the current lock granularity is too big. The idea of optimization is to change the lock granularity from whole meta table to per table entry (table -> table[index]), so that we can protect concurrent access to the same table[index], meanwhile allow the maximum concurrency. With this in mind, several kinds of locks which could be used as a per-entry lock were tested and compared: Test environment: x86-64 Intel Core2 Q8400, system memory 4GB, Ubuntu 12.04, kernel v3.15.0-rc3 as base, zram with 4 max_comp_streams LZO. iozone test: iozone -t 4 -R -r 16K -s 200M -I +Z (1GB zram with ext4 filesystem, take the average of 10 tests, KB/s) Test base CAS spinlock rwlock bit_spinlock ------------------------------------------------------------------- Initial write 1381094 1425435 1422860 1423075 1421521 Rewrite 1529479 1641199 1668762 1672855 1654910 Read 8468009 11324979 11305569 11117273 10997202 Re-read 8467476 11260914 11248059 11145336 10906486 Reverse Read 6821393 8106334 8282174 8279195 8109186 Stride read 7191093 8994306 9153982 8961224 9004434 Random read 7156353 8957932 9167098 8980465 8940476 Mixed workload 4172747 5680814 5927825 5489578 5972253 Random write 1483044 1605588 1594329 1600453 1596010 Pwrite 1276644 1303108 1311612 1314228 1300960 Pread 4324337 4632869 4618386 4457870 4500166 To enhance the possibility of access the same table[index] concurrently, set zram a small disksize(10MB) and let threads run with large loop count. fio test: fio --bs=32k --randrepeat=1 --randseed=100 --refill_buffers --scramble_buffers=1 --direct=1 --loops=3000 --numjobs=4 --filename=/dev/zram0 --name=seq-write --rw=write --stonewall --name=seq-read --rw=read --stonewall --name=seq-readwrite --rw=rw --stonewall --name=rand-readwrite --rw=randrw --stonewall (10MB zram raw block device, take the average of 10 tests, KB/s) Test base CAS spinlock rwlock bit_spinlock ------------------------------------------------------------- seq-write 933789 999357 1003298 995961 1001958 seq-read 5634130 6577930 6380861 6243912 6230006 seq-rw 1405687 1638117 1640256 1633903 1634459 rand-rw 1386119 1614664 1617211 1609267 1612471 All the optimization methods show a higher performance than the base, however, it is hard to say which method is the most appropriate. On the other hand, zram is mostly used on small embedded system, so we don't want to increase any memory footprint. This patch pick the bit_spinlock method, pack object size and page_flag into an unsigned long table.value, so as to not increase any memory overhead on both 32-bit and 64-bit system. On the third hand, even though different kinds of locks have different performances, we can ignore this difference, because: if zram is used as zram swapfile, the swap subsystem can prevent concurrent access to the same swapslot; if zram is used as zram-blk for set up filesystem on it, the upper filesystem and the page cache also prevent concurrent access of the same block mostly. So we can ignore the different performances among locks. Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com> Reviewed-by: Davidlohr Bueso <davidlohr@hp.com> Signed-off-by: Weijie Yang <weijie.yang@samsung.com> Signed-off-by: Minchan Kim <minchan@kernel.org> Cc: Jerome Marchand <jmarchan@redhat.com> Cc: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram')
-rw-r--r--drivers/block/zram/zram_drv.c69
-rw-r--r--drivers/block/zram/zram_drv.h24
2 files changed, 60 insertions, 33 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 40743972eaf7..dfa4024c448a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -183,19 +183,32 @@ static ssize_t comp_algorithm_store(struct device *dev,
183static int zram_test_flag(struct zram_meta *meta, u32 index, 183static int zram_test_flag(struct zram_meta *meta, u32 index,
184 enum zram_pageflags flag) 184 enum zram_pageflags flag)
185{ 185{
186 return meta->table[index].flags & BIT(flag); 186 return meta->table[index].value & BIT(flag);
187} 187}
188 188
189static void zram_set_flag(struct zram_meta *meta, u32 index, 189static void zram_set_flag(struct zram_meta *meta, u32 index,
190 enum zram_pageflags flag) 190 enum zram_pageflags flag)
191{ 191{
192 meta->table[index].flags |= BIT(flag); 192 meta->table[index].value |= BIT(flag);
193} 193}
194 194
195static void zram_clear_flag(struct zram_meta *meta, u32 index, 195static void zram_clear_flag(struct zram_meta *meta, u32 index,
196 enum zram_pageflags flag) 196 enum zram_pageflags flag)
197{ 197{
198 meta->table[index].flags &= ~BIT(flag); 198 meta->table[index].value &= ~BIT(flag);
199}
200
201static size_t zram_get_obj_size(struct zram_meta *meta, u32 index)
202{
203 return meta->table[index].value & (BIT(ZRAM_FLAG_SHIFT) - 1);
204}
205
206static void zram_set_obj_size(struct zram_meta *meta,
207 u32 index, size_t size)
208{
209 unsigned long flags = meta->table[index].value >> ZRAM_FLAG_SHIFT;
210
211 meta->table[index].value = (flags << ZRAM_FLAG_SHIFT) | size;
199} 212}
200 213
201static inline int is_partial_io(struct bio_vec *bvec) 214static inline int is_partial_io(struct bio_vec *bvec)
@@ -255,7 +268,6 @@ static struct zram_meta *zram_meta_alloc(u64 disksize)
255 goto free_table; 268 goto free_table;
256 } 269 }
257 270
258 rwlock_init(&meta->tb_lock);
259 return meta; 271 return meta;
260 272
261free_table: 273free_table:
@@ -304,7 +316,12 @@ static void handle_zero_page(struct bio_vec *bvec)
304 flush_dcache_page(page); 316 flush_dcache_page(page);
305} 317}
306 318
307/* NOTE: caller should hold meta->tb_lock with write-side */ 319
320/*
321 * To protect concurrent access to the same index entry,
322 * caller should hold this table index entry's bit_spinlock to
323 * indicate this index entry is accessing.
324 */
308static void zram_free_page(struct zram *zram, size_t index) 325static void zram_free_page(struct zram *zram, size_t index)
309{ 326{
310 struct zram_meta *meta = zram->meta; 327 struct zram_meta *meta = zram->meta;
@@ -324,11 +341,12 @@ static void zram_free_page(struct zram *zram, size_t index)
324 341
325 zs_free(meta->mem_pool, handle); 342 zs_free(meta->mem_pool, handle);
326 343
327 atomic64_sub(meta->table[index].size, &zram->stats.compr_data_size); 344 atomic64_sub(zram_get_obj_size(meta, index),
345 &zram->stats.compr_data_size);
328 atomic64_dec(&zram->stats.pages_stored); 346 atomic64_dec(&zram->stats.pages_stored);
329 347
330 meta->table[index].handle = 0; 348 meta->table[index].handle = 0;
331 meta->table[index].size = 0; 349 zram_set_obj_size(meta, index, 0);
332} 350}
333 351
334static int zram_decompress_page(struct zram *zram, char *mem, u32 index) 352static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
@@ -339,12 +357,12 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
339 unsigned long handle; 357 unsigned long handle;
340 size_t size; 358 size_t size;
341 359
342 read_lock(&meta->tb_lock); 360 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
343 handle = meta->table[index].handle; 361 handle = meta->table[index].handle;
344 size = meta->table[index].size; 362 size = zram_get_obj_size(meta, index);
345 363
346 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { 364 if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) {
347 read_unlock(&meta->tb_lock); 365 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
348 clear_page(mem); 366 clear_page(mem);
349 return 0; 367 return 0;
350 } 368 }
@@ -355,7 +373,7 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index)
355 else 373 else
356 ret = zcomp_decompress(zram->comp, cmem, size, mem); 374 ret = zcomp_decompress(zram->comp, cmem, size, mem);
357 zs_unmap_object(meta->mem_pool, handle); 375 zs_unmap_object(meta->mem_pool, handle);
358 read_unlock(&meta->tb_lock); 376 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
359 377
360 /* Should NEVER happen. Return bio error if it does. */ 378 /* Should NEVER happen. Return bio error if it does. */
361 if (unlikely(ret)) { 379 if (unlikely(ret)) {
@@ -376,14 +394,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
376 struct zram_meta *meta = zram->meta; 394 struct zram_meta *meta = zram->meta;
377 page = bvec->bv_page; 395 page = bvec->bv_page;
378 396
379 read_lock(&meta->tb_lock); 397 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
380 if (unlikely(!meta->table[index].handle) || 398 if (unlikely(!meta->table[index].handle) ||
381 zram_test_flag(meta, index, ZRAM_ZERO)) { 399 zram_test_flag(meta, index, ZRAM_ZERO)) {
382 read_unlock(&meta->tb_lock); 400 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
383 handle_zero_page(bvec); 401 handle_zero_page(bvec);
384 return 0; 402 return 0;
385 } 403 }
386 read_unlock(&meta->tb_lock); 404 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
387 405
388 if (is_partial_io(bvec)) 406 if (is_partial_io(bvec))
389 /* Use a temporary buffer to decompress the page */ 407 /* Use a temporary buffer to decompress the page */
@@ -461,10 +479,10 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
461 if (page_zero_filled(uncmem)) { 479 if (page_zero_filled(uncmem)) {
462 kunmap_atomic(user_mem); 480 kunmap_atomic(user_mem);
463 /* Free memory associated with this sector now. */ 481 /* Free memory associated with this sector now. */
464 write_lock(&zram->meta->tb_lock); 482 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
465 zram_free_page(zram, index); 483 zram_free_page(zram, index);
466 zram_set_flag(meta, index, ZRAM_ZERO); 484 zram_set_flag(meta, index, ZRAM_ZERO);
467 write_unlock(&zram->meta->tb_lock); 485 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
468 486
469 atomic64_inc(&zram->stats.zero_pages); 487 atomic64_inc(&zram->stats.zero_pages);
470 ret = 0; 488 ret = 0;
@@ -514,12 +532,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
514 * Free memory associated with this sector 532 * Free memory associated with this sector
515 * before overwriting unused sectors. 533 * before overwriting unused sectors.
516 */ 534 */
517 write_lock(&zram->meta->tb_lock); 535 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
518 zram_free_page(zram, index); 536 zram_free_page(zram, index);
519 537
520 meta->table[index].handle = handle; 538 meta->table[index].handle = handle;
521 meta->table[index].size = clen; 539 zram_set_obj_size(meta, index, clen);
522 write_unlock(&zram->meta->tb_lock); 540 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
523 541
524 /* Update stats */ 542 /* Update stats */
525 atomic64_add(clen, &zram->stats.compr_data_size); 543 atomic64_add(clen, &zram->stats.compr_data_size);
@@ -560,6 +578,7 @@ static void zram_bio_discard(struct zram *zram, u32 index,
560 int offset, struct bio *bio) 578 int offset, struct bio *bio)
561{ 579{
562 size_t n = bio->bi_iter.bi_size; 580 size_t n = bio->bi_iter.bi_size;
581 struct zram_meta *meta = zram->meta;
563 582
564 /* 583 /*
565 * zram manages data in physical block size units. Because logical block 584 * zram manages data in physical block size units. Because logical block
@@ -580,13 +599,9 @@ static void zram_bio_discard(struct zram *zram, u32 index,
580 } 599 }
581 600
582 while (n >= PAGE_SIZE) { 601 while (n >= PAGE_SIZE) {
583 /* 602 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
584 * Discard request can be large so the lock hold times could be
585 * lengthy. So take the lock once per page.
586 */
587 write_lock(&zram->meta->tb_lock);
588 zram_free_page(zram, index); 603 zram_free_page(zram, index);
589 write_unlock(&zram->meta->tb_lock); 604 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
590 index++; 605 index++;
591 n -= PAGE_SIZE; 606 n -= PAGE_SIZE;
592 } 607 }
@@ -821,9 +836,9 @@ static void zram_slot_free_notify(struct block_device *bdev,
821 zram = bdev->bd_disk->private_data; 836 zram = bdev->bd_disk->private_data;
822 meta = zram->meta; 837 meta = zram->meta;
823 838
824 write_lock(&meta->tb_lock); 839 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
825 zram_free_page(zram, index); 840 zram_free_page(zram, index);
826 write_unlock(&meta->tb_lock); 841 bit_spin_unlock(ZRAM_ACCESS, &meta->table[index].value);
827 atomic64_inc(&zram->stats.notify_free); 842 atomic64_inc(&zram->stats.notify_free);
828} 843}
829 844
diff --git a/drivers/block/zram/zram_drv.h b/drivers/block/zram/zram_drv.h
index c8161bd8969c..5b0afde729cd 100644
--- a/drivers/block/zram/zram_drv.h
+++ b/drivers/block/zram/zram_drv.h
@@ -50,10 +50,24 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
50#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \ 50#define ZRAM_SECTOR_PER_LOGICAL_BLOCK \
51 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT)) 51 (1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
52 52
53/* Flags for zram pages (table[page_no].flags) */ 53
54/*
55 * The lower ZRAM_FLAG_SHIFT bits of table.value is for
56 * object size (excluding header), the higher bits is for
57 * zram_pageflags.
58 *
59 * zram is mainly used for memory efficiency so we want to keep memory
60 * footprint small so we can squeeze size and flags into a field.
61 * The lower ZRAM_FLAG_SHIFT bits is for object size (excluding header),
62 * the higher bits is for zram_pageflags.
63 */
64#define ZRAM_FLAG_SHIFT 24
65
66/* Flags for zram pages (table[page_no].value) */
54enum zram_pageflags { 67enum zram_pageflags {
55 /* Page consists entirely of zeros */ 68 /* Page consists entirely of zeros */
56 ZRAM_ZERO, 69 ZRAM_ZERO = ZRAM_FLAG_SHIFT + 1,
70 ZRAM_ACCESS, /* page in now accessed */
57 71
58 __NR_ZRAM_PAGEFLAGS, 72 __NR_ZRAM_PAGEFLAGS,
59}; 73};
@@ -63,9 +77,8 @@ enum zram_pageflags {
63/* Allocated for each disk page */ 77/* Allocated for each disk page */
64struct zram_table_entry { 78struct zram_table_entry {
65 unsigned long handle; 79 unsigned long handle;
66 u16 size; /* object size (excluding header) */ 80 unsigned long value;
67 u8 flags; 81};
68} __aligned(4);
69 82
70struct zram_stats { 83struct zram_stats {
71 atomic64_t compr_data_size; /* compressed size of pages stored */ 84 atomic64_t compr_data_size; /* compressed size of pages stored */
@@ -80,7 +93,6 @@ struct zram_stats {
80}; 93};
81 94
82struct zram_meta { 95struct zram_meta {
83 rwlock_t tb_lock; /* protect table */
84 struct zram_table_entry *table; 96 struct zram_table_entry *table;
85 struct zs_pool *mem_pool; 97 struct zs_pool *mem_pool;
86}; 98};