aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram/zram_drv.c
diff options
context:
space:
mode:
authorNitin Gupta <ngupta@vflare.org>2010-08-09 13:26:48 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2010-08-31 18:36:36 -0400
commit484875adbc473041b7cf4ef0cb3f56a2ae44a448 (patch)
tree803fcaee49e52441fe3812f49e11d004769eccbf /drivers/staging/zram/zram_drv.c
parente98419c23b1a189c932775f7833e94cb5230a16b (diff)
Staging: zram: Remove need for explicit device initialization
Currently, the user has to explicitly write a positive value to initstate sysfs node before the device can be used. This event triggers allocation of per-device metadata like memory pool, table array and so on. We do not pre-initialize all zram devices since the 'table' array, mapping disk blocks to compressed chunks, takes considerable amount of memory (8 bytes per page). So, pre-initializing all devices will be quite wasteful if only few or none of the devices are actually used. This explicit device initialization from user is an odd requirement and can be easily avoided. We now initialize the device when first write is done to the device. Signed-off-by: Nitin Gupta <ngupta@vflare.org> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/zram/zram_drv.c')
-rw-r--r--drivers/staging/zram/zram_drv.c35
1 files changed, 23 insertions, 12 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 3f698a5fc68..c5f84ee3d28 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -207,9 +207,15 @@ static int zram_read(struct zram *zram, struct bio *bio)
207 u32 index; 207 u32 index;
208 struct bio_vec *bvec; 208 struct bio_vec *bvec;
209 209
210 zram_stat64_inc(zram, &zram->stats.num_reads); 210 if (unlikely(!zram->init_done)) {
211 set_bit(BIO_UPTODATE, &bio->bi_flags);
212 bio_endio(bio, 0);
213 return 0;
214 }
211 215
216 zram_stat64_inc(zram, &zram->stats.num_reads);
212 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 217 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
218
213 bio_for_each_segment(bvec, bio, i) { 219 bio_for_each_segment(bvec, bio, i) {
214 int ret; 220 int ret;
215 size_t clen; 221 size_t clen;
@@ -275,16 +281,20 @@ out:
275 281
276static int zram_write(struct zram *zram, struct bio *bio) 282static int zram_write(struct zram *zram, struct bio *bio)
277{ 283{
278 int i; 284 int i, ret;
279 u32 index; 285 u32 index;
280 struct bio_vec *bvec; 286 struct bio_vec *bvec;
281 287
282 zram_stat64_inc(zram, &zram->stats.num_writes); 288 if (unlikely(!zram->init_done)) {
289 ret = zram_init_device(zram);
290 if (ret)
291 goto out;
292 }
283 293
294 zram_stat64_inc(zram, &zram->stats.num_writes);
284 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 295 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT;
285 296
286 bio_for_each_segment(bvec, bio, i) { 297 bio_for_each_segment(bvec, bio, i) {
287 int ret;
288 u32 offset; 298 u32 offset;
289 size_t clen; 299 size_t clen;
290 struct zobj_header *zheader; 300 struct zobj_header *zheader;
@@ -425,11 +435,6 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
425 int ret = 0; 435 int ret = 0;
426 struct zram *zram = queue->queuedata; 436 struct zram *zram = queue->queuedata;
427 437
428 if (unlikely(!zram->init_done)) {
429 bio_io_error(bio);
430 return 0;
431 }
432
433 if (!valid_io_request(zram, bio)) { 438 if (!valid_io_request(zram, bio)) {
434 zram_stat64_inc(zram, &zram->stats.invalid_io); 439 zram_stat64_inc(zram, &zram->stats.invalid_io);
435 bio_io_error(bio); 440 bio_io_error(bio);
@@ -453,7 +458,7 @@ void zram_reset_device(struct zram *zram)
453{ 458{
454 size_t index; 459 size_t index;
455 460
456 /* Do not accept any new I/O request */ 461 mutex_lock(&zram->init_lock);
457 zram->init_done = 0; 462 zram->init_done = 0;
458 463
459 /* Free various per-device buffers */ 464 /* Free various per-device buffers */
@@ -490,6 +495,7 @@ void zram_reset_device(struct zram *zram)
490 memset(&zram->stats, 0, sizeof(zram->stats)); 495 memset(&zram->stats, 0, sizeof(zram->stats));
491 496
492 zram->disksize = 0; 497 zram->disksize = 0;
498 mutex_unlock(&zram->init_lock);
493} 499}
494 500
495int zram_init_device(struct zram *zram) 501int zram_init_device(struct zram *zram)
@@ -497,9 +503,11 @@ int zram_init_device(struct zram *zram)
497 int ret; 503 int ret;
498 size_t num_pages; 504 size_t num_pages;
499 505
506 mutex_lock(&zram->init_lock);
507
500 if (zram->init_done) { 508 if (zram->init_done) {
501 pr_info("Device already initialized!\n"); 509 mutex_unlock(&zram->init_lock);
502 return -EBUSY; 510 return 0;
503 } 511 }
504 512
505 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT); 513 zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
@@ -542,11 +550,13 @@ int zram_init_device(struct zram *zram)
542 } 550 }
543 551
544 zram->init_done = 1; 552 zram->init_done = 1;
553 mutex_unlock(&zram->init_lock);
545 554
546 pr_debug("Initialization done!\n"); 555 pr_debug("Initialization done!\n");
547 return 0; 556 return 0;
548 557
549fail: 558fail:
559 mutex_unlock(&zram->init_lock);
550 zram_reset_device(zram); 560 zram_reset_device(zram);
551 561
552 pr_err("Initialization failed: err=%d\n", ret); 562 pr_err("Initialization failed: err=%d\n", ret);
@@ -572,6 +582,7 @@ static int create_device(struct zram *zram, int device_id)
572 int ret = 0; 582 int ret = 0;
573 583
574 mutex_init(&zram->lock); 584 mutex_init(&zram->lock);
585 mutex_init(&zram->init_lock);
575 spin_lock_init(&zram->stat64_lock); 586 spin_lock_init(&zram->stat64_lock);
576 587
577 zram->queue = blk_alloc_queue(GFP_KERNEL); 588 zram->queue = blk_alloc_queue(GFP_KERNEL);