diff options
author | Cong Wang <amwang@redhat.com> | 2011-11-25 10:14:25 -0500 |
---|---|---|
committer | Cong Wang <xiyou.wangcong@gmail.com> | 2012-03-20 09:48:20 -0400 |
commit | ba82fe2e691146a92a25c85b611b436dc20f7825 (patch) | |
tree | 51304ae3094e34cd350be30cdac1d97a453245f9 | |
parent | e3debd27636cd49a7424b63cc193141f58f37207 (diff) |
zram: remove the second argument of k[un]map_atomic()
Acked-by: Greg Kroah-Hartman <gregkh@suse.de>
Signed-off-by: Cong Wang <amwang@redhat.com>
-rw-r--r-- | drivers/staging/zram/xvmalloc.c | 39 | ||||
-rw-r--r-- | drivers/staging/zram/zram_drv.c | 44 |
2 files changed, 41 insertions, 42 deletions
diff --git a/drivers/staging/zram/xvmalloc.c b/drivers/staging/zram/xvmalloc.c index 1f9c5082b6d5..93ba8e9407aa 100644 --- a/drivers/staging/zram/xvmalloc.c +++ b/drivers/staging/zram/xvmalloc.c | |||
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag) | |||
56 | * This is called from xv_malloc/xv_free path, so it | 56 | * This is called from xv_malloc/xv_free path, so it |
57 | * needs to be fast. | 57 | * needs to be fast. |
58 | */ | 58 | */ |
59 | static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) | 59 | static void *get_ptr_atomic(struct page *page, u16 offset) |
60 | { | 60 | { |
61 | unsigned char *base; | 61 | unsigned char *base; |
62 | 62 | ||
63 | base = kmap_atomic(page, type); | 63 | base = kmap_atomic(page); |
64 | return base + offset; | 64 | return base + offset; |
65 | } | 65 | } |
66 | 66 | ||
67 | static void put_ptr_atomic(void *ptr, enum km_type type) | 67 | static void put_ptr_atomic(void *ptr) |
68 | { | 68 | { |
69 | kunmap_atomic(ptr, type); | 69 | kunmap_atomic(ptr); |
70 | } | 70 | } |
71 | 71 | ||
72 | static u32 get_blockprev(struct block_header *block) | 72 | static u32 get_blockprev(struct block_header *block) |
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset, | |||
202 | 202 | ||
203 | if (block->link.next_page) { | 203 | if (block->link.next_page) { |
204 | nextblock = get_ptr_atomic(block->link.next_page, | 204 | nextblock = get_ptr_atomic(block->link.next_page, |
205 | block->link.next_offset, KM_USER1); | 205 | block->link.next_offset); |
206 | nextblock->link.prev_page = page; | 206 | nextblock->link.prev_page = page; |
207 | nextblock->link.prev_offset = offset; | 207 | nextblock->link.prev_offset = offset; |
208 | put_ptr_atomic(nextblock, KM_USER1); | 208 | put_ptr_atomic(nextblock); |
209 | /* If there was a next page then the free bits are set. */ | 209 | /* If there was a next page then the free bits are set. */ |
210 | return; | 210 | return; |
211 | } | 211 | } |
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, | |||
225 | 225 | ||
226 | if (block->link.prev_page) { | 226 | if (block->link.prev_page) { |
227 | tmpblock = get_ptr_atomic(block->link.prev_page, | 227 | tmpblock = get_ptr_atomic(block->link.prev_page, |
228 | block->link.prev_offset, KM_USER1); | 228 | block->link.prev_offset); |
229 | tmpblock->link.next_page = block->link.next_page; | 229 | tmpblock->link.next_page = block->link.next_page; |
230 | tmpblock->link.next_offset = block->link.next_offset; | 230 | tmpblock->link.next_offset = block->link.next_offset; |
231 | put_ptr_atomic(tmpblock, KM_USER1); | 231 | put_ptr_atomic(tmpblock); |
232 | } | 232 | } |
233 | 233 | ||
234 | if (block->link.next_page) { | 234 | if (block->link.next_page) { |
235 | tmpblock = get_ptr_atomic(block->link.next_page, | 235 | tmpblock = get_ptr_atomic(block->link.next_page, |
236 | block->link.next_offset, KM_USER1); | 236 | block->link.next_offset); |
237 | tmpblock->link.prev_page = block->link.prev_page; | 237 | tmpblock->link.prev_page = block->link.prev_page; |
238 | tmpblock->link.prev_offset = block->link.prev_offset; | 238 | tmpblock->link.prev_offset = block->link.prev_offset; |
239 | put_ptr_atomic(tmpblock, KM_USER1); | 239 | put_ptr_atomic(tmpblock); |
240 | } | 240 | } |
241 | 241 | ||
242 | /* Is this block is at the head of the freelist? */ | 242 | /* Is this block is at the head of the freelist? */ |
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset, | |||
249 | if (pool->freelist[slindex].page) { | 249 | if (pool->freelist[slindex].page) { |
250 | struct block_header *tmpblock; | 250 | struct block_header *tmpblock; |
251 | tmpblock = get_ptr_atomic(pool->freelist[slindex].page, | 251 | tmpblock = get_ptr_atomic(pool->freelist[slindex].page, |
252 | pool->freelist[slindex].offset, | 252 | pool->freelist[slindex].offset); |
253 | KM_USER1); | ||
254 | tmpblock->link.prev_page = NULL; | 253 | tmpblock->link.prev_page = NULL; |
255 | tmpblock->link.prev_offset = 0; | 254 | tmpblock->link.prev_offset = 0; |
256 | put_ptr_atomic(tmpblock, KM_USER1); | 255 | put_ptr_atomic(tmpblock); |
257 | } else { | 256 | } else { |
258 | /* This freelist bucket is empty */ | 257 | /* This freelist bucket is empty */ |
259 | __clear_bit(slindex % BITS_PER_LONG, | 258 | __clear_bit(slindex % BITS_PER_LONG, |
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags) | |||
284 | stat_inc(&pool->total_pages); | 283 | stat_inc(&pool->total_pages); |
285 | 284 | ||
286 | spin_lock(&pool->lock); | 285 | spin_lock(&pool->lock); |
287 | block = get_ptr_atomic(page, 0, KM_USER0); | 286 | block = get_ptr_atomic(page, 0); |
288 | 287 | ||
289 | block->size = PAGE_SIZE - XV_ALIGN; | 288 | block->size = PAGE_SIZE - XV_ALIGN; |
290 | set_flag(block, BLOCK_FREE); | 289 | set_flag(block, BLOCK_FREE); |
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags) | |||
293 | 292 | ||
294 | insert_block(pool, page, 0, block); | 293 | insert_block(pool, page, 0, block); |
295 | 294 | ||
296 | put_ptr_atomic(block, KM_USER0); | 295 | put_ptr_atomic(block); |
297 | spin_unlock(&pool->lock); | 296 | spin_unlock(&pool->lock); |
298 | 297 | ||
299 | return 0; | 298 | return 0; |
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, | |||
375 | return -ENOMEM; | 374 | return -ENOMEM; |
376 | } | 375 | } |
377 | 376 | ||
378 | block = get_ptr_atomic(*page, *offset, KM_USER0); | 377 | block = get_ptr_atomic(*page, *offset); |
379 | 378 | ||
380 | remove_block(pool, *page, *offset, block, index); | 379 | remove_block(pool, *page, *offset, block, index); |
381 | 380 | ||
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page, | |||
405 | block->size = origsize; | 404 | block->size = origsize; |
406 | clear_flag(block, BLOCK_FREE); | 405 | clear_flag(block, BLOCK_FREE); |
407 | 406 | ||
408 | put_ptr_atomic(block, KM_USER0); | 407 | put_ptr_atomic(block); |
409 | spin_unlock(&pool->lock); | 408 | spin_unlock(&pool->lock); |
410 | 409 | ||
411 | *offset += XV_ALIGN; | 410 | *offset += XV_ALIGN; |
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) | |||
426 | 425 | ||
427 | spin_lock(&pool->lock); | 426 | spin_lock(&pool->lock); |
428 | 427 | ||
429 | page_start = get_ptr_atomic(page, 0, KM_USER0); | 428 | page_start = get_ptr_atomic(page, 0); |
430 | block = (struct block_header *)((char *)page_start + offset); | 429 | block = (struct block_header *)((char *)page_start + offset); |
431 | 430 | ||
432 | /* Catch double free bugs */ | 431 | /* Catch double free bugs */ |
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) | |||
468 | 467 | ||
469 | /* No used objects in this page. Free it. */ | 468 | /* No used objects in this page. Free it. */ |
470 | if (block->size == PAGE_SIZE - XV_ALIGN) { | 469 | if (block->size == PAGE_SIZE - XV_ALIGN) { |
471 | put_ptr_atomic(page_start, KM_USER0); | 470 | put_ptr_atomic(page_start); |
472 | spin_unlock(&pool->lock); | 471 | spin_unlock(&pool->lock); |
473 | 472 | ||
474 | __free_page(page); | 473 | __free_page(page); |
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset) | |||
486 | set_blockprev(tmpblock, offset); | 485 | set_blockprev(tmpblock, offset); |
487 | } | 486 | } |
488 | 487 | ||
489 | put_ptr_atomic(page_start, KM_USER0); | 488 | put_ptr_atomic(page_start); |
490 | spin_unlock(&pool->lock); | 489 | spin_unlock(&pool->lock); |
491 | } | 490 | } |
492 | EXPORT_SYMBOL_GPL(xv_free); | 491 | EXPORT_SYMBOL_GPL(xv_free); |
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c index 2a2a92d389e6..1cf68ad20fa6 100644 --- a/drivers/staging/zram/zram_drv.c +++ b/drivers/staging/zram/zram_drv.c | |||
@@ -161,9 +161,9 @@ static void zram_free_page(struct zram *zram, size_t index) | |||
161 | goto out; | 161 | goto out; |
162 | } | 162 | } |
163 | 163 | ||
164 | obj = kmap_atomic(page, KM_USER0) + offset; | 164 | obj = kmap_atomic(page) + offset; |
165 | clen = xv_get_object_size(obj) - sizeof(struct zobj_header); | 165 | clen = xv_get_object_size(obj) - sizeof(struct zobj_header); |
166 | kunmap_atomic(obj, KM_USER0); | 166 | kunmap_atomic(obj); |
167 | 167 | ||
168 | xv_free(zram->mem_pool, page, offset); | 168 | xv_free(zram->mem_pool, page, offset); |
169 | if (clen <= PAGE_SIZE / 2) | 169 | if (clen <= PAGE_SIZE / 2) |
@@ -182,9 +182,9 @@ static void handle_zero_page(struct bio_vec *bvec) | |||
182 | struct page *page = bvec->bv_page; | 182 | struct page *page = bvec->bv_page; |
183 | void *user_mem; | 183 | void *user_mem; |
184 | 184 | ||
185 | user_mem = kmap_atomic(page, KM_USER0); | 185 | user_mem = kmap_atomic(page); |
186 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); | 186 | memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); |
187 | kunmap_atomic(user_mem, KM_USER0); | 187 | kunmap_atomic(user_mem); |
188 | 188 | ||
189 | flush_dcache_page(page); | 189 | flush_dcache_page(page); |
190 | } | 190 | } |
@@ -195,12 +195,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec, | |||
195 | struct page *page = bvec->bv_page; | 195 | struct page *page = bvec->bv_page; |
196 | unsigned char *user_mem, *cmem; | 196 | unsigned char *user_mem, *cmem; |
197 | 197 | ||
198 | user_mem = kmap_atomic(page, KM_USER0); | 198 | user_mem = kmap_atomic(page); |
199 | cmem = kmap_atomic(zram->table[index].page, KM_USER1); | 199 | cmem = kmap_atomic(zram->table[index].page); |
200 | 200 | ||
201 | memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); | 201 | memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); |
202 | kunmap_atomic(cmem, KM_USER1); | 202 | kunmap_atomic(cmem); |
203 | kunmap_atomic(user_mem, KM_USER0); | 203 | kunmap_atomic(user_mem); |
204 | 204 | ||
205 | flush_dcache_page(page); | 205 | flush_dcache_page(page); |
206 | } | 206 | } |
@@ -249,12 +249,12 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |||
249 | } | 249 | } |
250 | } | 250 | } |
251 | 251 | ||
252 | user_mem = kmap_atomic(page, KM_USER0); | 252 | user_mem = kmap_atomic(page); |
253 | if (!is_partial_io(bvec)) | 253 | if (!is_partial_io(bvec)) |
254 | uncmem = user_mem; | 254 | uncmem = user_mem; |
255 | clen = PAGE_SIZE; | 255 | clen = PAGE_SIZE; |
256 | 256 | ||
257 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + | 257 | cmem = kmap_atomic(zram->table[index].page) + |
258 | zram->table[index].offset; | 258 | zram->table[index].offset; |
259 | 259 | ||
260 | ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), | 260 | ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), |
@@ -267,8 +267,8 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec, | |||
267 | kfree(uncmem); | 267 | kfree(uncmem); |
268 | } | 268 | } |
269 | 269 | ||
270 | kunmap_atomic(cmem, KM_USER1); | 270 | kunmap_atomic(cmem); |
271 | kunmap_atomic(user_mem, KM_USER0); | 271 | kunmap_atomic(user_mem); |
272 | 272 | ||
273 | /* Should NEVER happen. Return bio error if it does. */ | 273 | /* Should NEVER happen. Return bio error if it does. */ |
274 | if (unlikely(ret != LZO_E_OK)) { | 274 | if (unlikely(ret != LZO_E_OK)) { |
@@ -295,20 +295,20 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index) | |||
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | cmem = kmap_atomic(zram->table[index].page, KM_USER0) + | 298 | cmem = kmap_atomic(zram->table[index].page) + |
299 | zram->table[index].offset; | 299 | zram->table[index].offset; |
300 | 300 | ||
301 | /* Page is stored uncompressed since it's incompressible */ | 301 | /* Page is stored uncompressed since it's incompressible */ |
302 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { | 302 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { |
303 | memcpy(mem, cmem, PAGE_SIZE); | 303 | memcpy(mem, cmem, PAGE_SIZE); |
304 | kunmap_atomic(cmem, KM_USER0); | 304 | kunmap_atomic(cmem); |
305 | return 0; | 305 | return 0; |
306 | } | 306 | } |
307 | 307 | ||
308 | ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), | 308 | ret = lzo1x_decompress_safe(cmem + sizeof(*zheader), |
309 | xv_get_object_size(cmem) - sizeof(*zheader), | 309 | xv_get_object_size(cmem) - sizeof(*zheader), |
310 | mem, &clen); | 310 | mem, &clen); |
311 | kunmap_atomic(cmem, KM_USER0); | 311 | kunmap_atomic(cmem); |
312 | 312 | ||
313 | /* Should NEVER happen. Return bio error if it does. */ | 313 | /* Should NEVER happen. Return bio error if it does. */ |
314 | if (unlikely(ret != LZO_E_OK)) { | 314 | if (unlikely(ret != LZO_E_OK)) { |
@@ -359,7 +359,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
359 | zram_test_flag(zram, index, ZRAM_ZERO)) | 359 | zram_test_flag(zram, index, ZRAM_ZERO)) |
360 | zram_free_page(zram, index); | 360 | zram_free_page(zram, index); |
361 | 361 | ||
362 | user_mem = kmap_atomic(page, KM_USER0); | 362 | user_mem = kmap_atomic(page); |
363 | 363 | ||
364 | if (is_partial_io(bvec)) | 364 | if (is_partial_io(bvec)) |
365 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, | 365 | memcpy(uncmem + offset, user_mem + bvec->bv_offset, |
@@ -368,7 +368,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
368 | uncmem = user_mem; | 368 | uncmem = user_mem; |
369 | 369 | ||
370 | if (page_zero_filled(uncmem)) { | 370 | if (page_zero_filled(uncmem)) { |
371 | kunmap_atomic(user_mem, KM_USER0); | 371 | kunmap_atomic(user_mem); |
372 | if (is_partial_io(bvec)) | 372 | if (is_partial_io(bvec)) |
373 | kfree(uncmem); | 373 | kfree(uncmem); |
374 | zram_stat_inc(&zram->stats.pages_zero); | 374 | zram_stat_inc(&zram->stats.pages_zero); |
@@ -380,7 +380,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
380 | ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, | 380 | ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, |
381 | zram->compress_workmem); | 381 | zram->compress_workmem); |
382 | 382 | ||
383 | kunmap_atomic(user_mem, KM_USER0); | 383 | kunmap_atomic(user_mem); |
384 | if (is_partial_io(bvec)) | 384 | if (is_partial_io(bvec)) |
385 | kfree(uncmem); | 385 | kfree(uncmem); |
386 | 386 | ||
@@ -408,7 +408,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
408 | zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); | 408 | zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); |
409 | zram_stat_inc(&zram->stats.pages_expand); | 409 | zram_stat_inc(&zram->stats.pages_expand); |
410 | zram->table[index].page = page_store; | 410 | zram->table[index].page = page_store; |
411 | src = kmap_atomic(page, KM_USER0); | 411 | src = kmap_atomic(page); |
412 | goto memstore; | 412 | goto memstore; |
413 | } | 413 | } |
414 | 414 | ||
@@ -424,7 +424,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index, | |||
424 | memstore: | 424 | memstore: |
425 | zram->table[index].offset = store_offset; | 425 | zram->table[index].offset = store_offset; |
426 | 426 | ||
427 | cmem = kmap_atomic(zram->table[index].page, KM_USER1) + | 427 | cmem = kmap_atomic(zram->table[index].page) + |
428 | zram->table[index].offset; | 428 | zram->table[index].offset; |
429 | 429 | ||
430 | #if 0 | 430 | #if 0 |
@@ -438,9 +438,9 @@ memstore: | |||
438 | 438 | ||
439 | memcpy(cmem, src, clen); | 439 | memcpy(cmem, src, clen); |
440 | 440 | ||
441 | kunmap_atomic(cmem, KM_USER1); | 441 | kunmap_atomic(cmem); |
442 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) | 442 | if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) |
443 | kunmap_atomic(src, KM_USER0); | 443 | kunmap_atomic(src); |
444 | 444 | ||
445 | /* Update stats */ | 445 | /* Update stats */ |
446 | zram_stat64_add(zram, &zram->stats.compr_size, clen); | 446 | zram_stat64_add(zram, &zram->stats.compr_size, clen); |