aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging/zram
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /drivers/staging/zram
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'drivers/staging/zram')
-rw-r--r--drivers/staging/zram/zram_drv.c32
1 files changed, 16 insertions, 16 deletions
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 7f138196b3c..685d612a627 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
175 struct page *page = bvec->bv_page; 175 struct page *page = bvec->bv_page;
176 void *user_mem; 176 void *user_mem;
177 177
178 user_mem = kmap_atomic(page, KM_USER0); 178 user_mem = kmap_atomic(page);
179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); 179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 kunmap_atomic(user_mem, KM_USER0); 180 kunmap_atomic(user_mem);
181 181
182 flush_dcache_page(page); 182 flush_dcache_page(page);
183} 183}
@@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
188 struct page *page = bvec->bv_page; 188 struct page *page = bvec->bv_page;
189 unsigned char *user_mem, *cmem; 189 unsigned char *user_mem, *cmem;
190 190
191 user_mem = kmap_atomic(page, KM_USER0); 191 user_mem = kmap_atomic(page);
192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1); 192 cmem = kmap_atomic(zram->table[index].handle);
193 193
194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); 194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 kunmap_atomic(cmem, KM_USER1); 195 kunmap_atomic(cmem);
196 kunmap_atomic(user_mem, KM_USER0); 196 kunmap_atomic(user_mem);
197 197
198 flush_dcache_page(page); 198 flush_dcache_page(page);
199} 199}
@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
242 } 242 }
243 } 243 }
244 244
245 user_mem = kmap_atomic(page, KM_USER0); 245 user_mem = kmap_atomic(page);
246 if (!is_partial_io(bvec)) 246 if (!is_partial_io(bvec))
247 uncmem = user_mem; 247 uncmem = user_mem;
248 clen = PAGE_SIZE; 248 clen = PAGE_SIZE;
@@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
260 } 260 }
261 261
262 zs_unmap_object(zram->mem_pool, zram->table[index].handle); 262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 kunmap_atomic(user_mem, KM_USER0); 263 kunmap_atomic(user_mem);
264 264
265 /* Should NEVER happen. Return bio error if it does. */ 265 /* Should NEVER happen. Return bio error if it does. */
266 if (unlikely(ret != LZO_E_OK)) { 266 if (unlikely(ret != LZO_E_OK)) {
@@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
292 /* Page is stored uncompressed since it's incompressible */ 292 /* Page is stored uncompressed since it's incompressible */
293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 memcpy(mem, cmem, PAGE_SIZE); 294 memcpy(mem, cmem, PAGE_SIZE);
295 kunmap_atomic(cmem, KM_USER0); 295 kunmap_atomic(cmem);
296 return 0; 296 return 0;
297 } 297 }
298 298
@@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
351 zram_test_flag(zram, index, ZRAM_ZERO)) 351 zram_test_flag(zram, index, ZRAM_ZERO))
352 zram_free_page(zram, index); 352 zram_free_page(zram, index);
353 353
354 user_mem = kmap_atomic(page, KM_USER0); 354 user_mem = kmap_atomic(page);
355 355
356 if (is_partial_io(bvec)) 356 if (is_partial_io(bvec))
357 memcpy(uncmem + offset, user_mem + bvec->bv_offset, 357 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
360 uncmem = user_mem; 360 uncmem = user_mem;
361 361
362 if (page_zero_filled(uncmem)) { 362 if (page_zero_filled(uncmem)) {
363 kunmap_atomic(user_mem, KM_USER0); 363 kunmap_atomic(user_mem);
364 if (is_partial_io(bvec)) 364 if (is_partial_io(bvec))
365 kfree(uncmem); 365 kfree(uncmem);
366 zram_stat_inc(&zram->stats.pages_zero); 366 zram_stat_inc(&zram->stats.pages_zero);
@@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, 372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 zram->compress_workmem); 373 zram->compress_workmem);
374 374
375 kunmap_atomic(user_mem, KM_USER0); 375 kunmap_atomic(user_mem);
376 if (is_partial_io(bvec)) 376 if (is_partial_io(bvec))
377 kfree(uncmem); 377 kfree(uncmem);
378 378
@@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); 400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 zram_stat_inc(&zram->stats.pages_expand); 401 zram_stat_inc(&zram->stats.pages_expand);
402 handle = page_store; 402 handle = page_store;
403 src = kmap_atomic(page, KM_USER0); 403 src = kmap_atomic(page);
404 cmem = kmap_atomic(page_store, KM_USER1); 404 cmem = kmap_atomic(page_store);
405 goto memstore; 405 goto memstore;
406 } 406 }
407 407
@@ -427,8 +427,8 @@ memstore:
427 memcpy(cmem, src, clen); 427 memcpy(cmem, src, clen);
428 428
429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 kunmap_atomic(cmem, KM_USER1); 430 kunmap_atomic(cmem);
431 kunmap_atomic(src, KM_USER0); 431 kunmap_atomic(src);
432 } else { 432 } else {
433 zs_unmap_object(zram->mem_pool, handle); 433 zs_unmap_object(zram->mem_pool, handle);
434 } 434 }