diff options
author | Minchan Kim <minchan@kernel.org> | 2014-01-30 18:46:03 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 19:56:55 -0500 |
commit | 92967471b67163bb1654e9b7fe99449ab70a4aaa (patch) | |
tree | 440e10b86af31f9752585f2472a382d0b36611d4 /drivers/block/zram/zram_drv.c | |
parent | deb0bdeb2f3d6b81d37fc778316dae46b6daab56 (diff) |
zram: introduce zram->tb_lock
Currently, the zram table is protected by zram->lock but it's rather
coarse-grained lock and it makes hard for scalibility.
Let's use own rwlock instead of depending on zram->lock. This patch
adds new locking so obviously, it would make slow but this patch is just
prepartion for removing coarse-grained rw_semaphore(ie, zram->lock)
which is hurdle about zram scalability.
Final patch in this patchset series will remove the lock from read-path
and change rw_semaphore with mutex in write path. With bonus, we could
drop pending slot free mess in next patch.
Signed-off-by: Minchan Kim <minchan@kernel.org>
Cc: Nitin Gupta <ngupta@vflare.org>
Tested-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
Cc: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'drivers/block/zram/zram_drv.c')
-rw-r--r-- | drivers/block/zram/zram_drv.c | 26 |
1 files changed, 21 insertions, 5 deletions
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c index 9ab884999420..24e6426cf258 100644 --- a/drivers/block/zram/zram_drv.c +++ b/drivers/block/zram/zram_drv.c | |||
@@ -140,6 +140,7 @@ static ssize_t mem_used_total_show(struct device *dev, | |||
140 | return sprintf(buf, "%llu\n", val); | 140 | return sprintf(buf, "%llu\n", val); |
141 | } | 141 | } |
142 | 142 | ||
143 | /* flag operations needs meta->tb_lock */ | ||
143 | static int zram_test_flag(struct zram_meta *meta, u32 index, | 144 | static int zram_test_flag(struct zram_meta *meta, u32 index, |
144 | enum zram_pageflags flag) | 145 | enum zram_pageflags flag) |
145 | { | 146 | { |
@@ -228,6 +229,7 @@ static struct zram_meta *zram_meta_alloc(u64 disksize) | |||
228 | goto free_table; | 229 | goto free_table; |
229 | } | 230 | } |
230 | 231 | ||
232 | rwlock_init(&meta->tb_lock); | ||
231 | return meta; | 233 | return meta; |
232 | 234 | ||
233 | free_table: | 235 | free_table: |
@@ -280,6 +282,7 @@ static void handle_zero_page(struct bio_vec *bvec) | |||
280 | flush_dcache_page(page); | 282 | flush_dcache_page(page); |
281 | } | 283 | } |
282 | 284 | ||
285 | /* NOTE: caller should hold meta->tb_lock with write-side */ | ||
283 | static void zram_free_page(struct zram *zram, size_t index) | 286 | static void zram_free_page(struct zram *zram, size_t index) |
284 | { | 287 | { |
285 | struct zram_meta *meta = zram->meta; | 288 | struct zram_meta *meta = zram->meta; |
@@ -319,20 +322,26 @@ static int zram_decompress_page(struct zram *zram, char *mem, u32 index) | |||
319 | size_t clen = PAGE_SIZE; | 322 | size_t clen = PAGE_SIZE; |
320 | unsigned char *cmem; | 323 | unsigned char *cmem; |
321 | struct zram_meta *meta = zram->meta; | 324 | struct zram_meta *meta = zram->meta; |
322 | unsigned long handle = meta->table[index].handle; | 325 | unsigned long handle; |
326 | u16 size; | ||
327 | |||
328 | read_lock(&meta->tb_lock); | ||
329 | handle = meta->table[index].handle; | ||
330 | size = meta->table[index].size; | ||
323 | 331 | ||
324 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { | 332 | if (!handle || zram_test_flag(meta, index, ZRAM_ZERO)) { |
333 | read_unlock(&meta->tb_lock); | ||
325 | clear_page(mem); | 334 | clear_page(mem); |
326 | return 0; | 335 | return 0; |
327 | } | 336 | } |
328 | 337 | ||
329 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); | 338 | cmem = zs_map_object(meta->mem_pool, handle, ZS_MM_RO); |
330 | if (meta->table[index].size == PAGE_SIZE) | 339 | if (size == PAGE_SIZE) |
331 | copy_page(mem, cmem); | 340 | copy_page(mem, cmem); |
332 | else | 341 | else |
333 | ret = lzo1x_decompress_safe(cmem, meta->table[index].size, | 342 | ret = lzo1x_decompress_safe(cmem, size, mem, &clen); |
334 | mem, &clen); | ||
335 | zs_unmap_object(meta->mem_pool, handle); | 343 | zs_unmap_object(meta->mem_pool, handle); |
344 | read_unlock(&meta->tb_lock); | ||
336 | 345 | ||
337 | /* Should NEVER happen. Return bio error if it does. */ | 346 | /* Should NEVER happen. Return bio error if it does. */ |
338 | if (unlikely(ret != LZO_E_OK)) { | 347 | if (unlikely(ret != LZO_E_OK)) { |
@@ -353,11 +362,14 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |||
353 | struct zram_meta *meta = zram->meta; | 362 | struct zram_meta *meta = zram->meta; |
354 | page = bvec->bv_page; | 363 | page = bvec->bv_page; |
355 | 364 | ||
365 | read_lock(&meta->tb_lock); | ||
356 | if (unlikely(!meta->table[index].handle) || | 366 | if (unlikely(!meta->table[index].handle) || |
357 | zram_test_flag(meta, index, ZRAM_ZERO)) { | 367 | zram_test_flag(meta, index, ZRAM_ZERO)) { |
368 | read_unlock(&meta->tb_lock); | ||
358 | handle_zero_page(bvec); | 369 | handle_zero_page(bvec); |
359 | return 0; | 370 | return 0; |
360 | } | 371 | } |
372 | read_unlock(&meta->tb_lock); | ||
361 | 373 | ||
362 | if (is_partial_io(bvec)) | 374 | if (is_partial_io(bvec)) |
363 | /* Use a temporary buffer to decompress the page */ | 375 | /* Use a temporary buffer to decompress the page */ |
@@ -433,10 +445,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
433 | if (page_zero_filled(uncmem)) { | 445 | if (page_zero_filled(uncmem)) { |
434 | kunmap_atomic(user_mem); | 446 | kunmap_atomic(user_mem); |
435 | /* Free memory associated with this sector now. */ | 447 | /* Free memory associated with this sector now. */ |
448 | write_lock(&zram->meta->tb_lock); | ||
436 | zram_free_page(zram, index); | 449 | zram_free_page(zram, index); |
450 | zram_set_flag(meta, index, ZRAM_ZERO); | ||
451 | write_unlock(&zram->meta->tb_lock); | ||
437 | 452 | ||
438 | atomic_inc(&zram->stats.pages_zero); | 453 | atomic_inc(&zram->stats.pages_zero); |
439 | zram_set_flag(meta, index, ZRAM_ZERO); | ||
440 | ret = 0; | 454 | ret = 0; |
441 | goto out; | 455 | goto out; |
442 | } | 456 | } |
@@ -486,10 +500,12 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
486 | * Free memory associated with this sector | 500 | * Free memory associated with this sector |
487 | * before overwriting unused sectors. | 501 | * before overwriting unused sectors. |
488 | */ | 502 | */ |
503 | write_lock(&zram->meta->tb_lock); | ||
489 | zram_free_page(zram, index); | 504 | zram_free_page(zram, index); |
490 | 505 | ||
491 | meta->table[index].handle = handle; | 506 | meta->table[index].handle = handle; |
492 | meta->table[index].size = clen; | 507 | meta->table[index].size = clen; |
508 | write_unlock(&zram->meta->tb_lock); | ||
493 | 509 | ||
494 | /* Update stats */ | 510 | /* Update stats */ |
495 | atomic64_add(clen, &zram->stats.compr_size); | 511 | atomic64_add(clen, &zram->stats.compr_size); |