aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /drivers/staging
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/ramster/xvmalloc.c39
-rw-r--r--drivers/staging/ramster/zcache-main.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/kmap_types.h20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c19
-rw-r--r--drivers/staging/zcache/zcache-main.c12
-rw-r--r--drivers/staging/zram/zram_drv.c32
9 files changed, 64 insertions, 111 deletions
diff --git a/drivers/staging/ramster/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d5..93ba8e9407aa 100644
--- a/drivers/staging/ramster/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
56 * This is called from xv_malloc/xv_free path, so it 56 * This is called from xv_malloc/xv_free path, so it
57 * needs to be fast. 57 * needs to be fast.
58 */ 58 */
59static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) 59static void *get_ptr_atomic(struct page *page, u16 offset)
60{ 60{
61 unsigned char *base; 61 unsigned char *base;
62 62
63 base = kmap_atomic(page, type); 63 base = kmap_atomic(page);
64 return base + offset; 64 return base + offset;
65} 65}
66 66
67static void put_ptr_atomic(void *ptr, enum km_type type) 67static void put_ptr_atomic(void *ptr)
68{ 68{
69 kunmap_atomic(ptr, type); 69 kunmap_atomic(ptr);
70} 70}
71 71
72static u32 get_blockprev(struct block_header *block) 72static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
202 202
203 if (block->link.next_page) { 203 if (block->link.next_page) {
204 nextblock = get_ptr_atomic(block->link.next_page, 204 nextblock = get_ptr_atomic(block->link.next_page,
205 block->link.next_offset, KM_USER1); 205 block->link.next_offset);
206 nextblock->link.prev_page = page; 206 nextblock->link.prev_page = page;
207 nextblock->link.prev_offset = offset; 207 nextblock->link.prev_offset = offset;
208 put_ptr_atomic(nextblock, KM_USER1); 208 put_ptr_atomic(nextblock);
209 /* If there was a next page then the free bits are set. */ 209 /* If there was a next page then the free bits are set. */
210 return; 210 return;
211 } 211 }
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
225 225
226 if (block->link.prev_page) { 226 if (block->link.prev_page) {
227 tmpblock = get_ptr_atomic(block->link.prev_page, 227 tmpblock = get_ptr_atomic(block->link.prev_page,
228 block->link.prev_offset, KM_USER1); 228 block->link.prev_offset);
229 tmpblock->link.next_page = block->link.next_page; 229 tmpblock->link.next_page = block->link.next_page;
230 tmpblock->link.next_offset = block->link.next_offset; 230 tmpblock->link.next_offset = block->link.next_offset;
231 put_ptr_atomic(tmpblock, KM_USER1); 231 put_ptr_atomic(tmpblock);
232 } 232 }
233 233
234 if (block->link.next_page) { 234 if (block->link.next_page) {
235 tmpblock = get_ptr_atomic(block->link.next_page, 235 tmpblock = get_ptr_atomic(block->link.next_page,
236 block->link.next_offset, KM_USER1); 236 block->link.next_offset);
237 tmpblock->link.prev_page = block->link.prev_page; 237 tmpblock->link.prev_page = block->link.prev_page;
238 tmpblock->link.prev_offset = block->link.prev_offset; 238 tmpblock->link.prev_offset = block->link.prev_offset;
239 put_ptr_atomic(tmpblock, KM_USER1); 239 put_ptr_atomic(tmpblock);
240 } 240 }
241 241
242 /* Is this block is at the head of the freelist? */ 242 /* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
249 if (pool->freelist[slindex].page) { 249 if (pool->freelist[slindex].page) {
250 struct block_header *tmpblock; 250 struct block_header *tmpblock;
251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page, 251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
252 pool->freelist[slindex].offset, 252 pool->freelist[slindex].offset);
253 KM_USER1);
254 tmpblock->link.prev_page = NULL; 253 tmpblock->link.prev_page = NULL;
255 tmpblock->link.prev_offset = 0; 254 tmpblock->link.prev_offset = 0;
256 put_ptr_atomic(tmpblock, KM_USER1); 255 put_ptr_atomic(tmpblock);
257 } else { 256 } else {
258 /* This freelist bucket is empty */ 257 /* This freelist bucket is empty */
259 __clear_bit(slindex % BITS_PER_LONG, 258 __clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
284 stat_inc(&pool->total_pages); 283 stat_inc(&pool->total_pages);
285 284
286 spin_lock(&pool->lock); 285 spin_lock(&pool->lock);
287 block = get_ptr_atomic(page, 0, KM_USER0); 286 block = get_ptr_atomic(page, 0);
288 287
289 block->size = PAGE_SIZE - XV_ALIGN; 288 block->size = PAGE_SIZE - XV_ALIGN;
290 set_flag(block, BLOCK_FREE); 289 set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
293 292
294 insert_block(pool, page, 0, block); 293 insert_block(pool, page, 0, block);
295 294
296 put_ptr_atomic(block, KM_USER0); 295 put_ptr_atomic(block);
297 spin_unlock(&pool->lock); 296 spin_unlock(&pool->lock);
298 297
299 return 0; 298 return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
375 return -ENOMEM; 374 return -ENOMEM;
376 } 375 }
377 376
378 block = get_ptr_atomic(*page, *offset, KM_USER0); 377 block = get_ptr_atomic(*page, *offset);
379 378
380 remove_block(pool, *page, *offset, block, index); 379 remove_block(pool, *page, *offset, block, index);
381 380
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
405 block->size = origsize; 404 block->size = origsize;
406 clear_flag(block, BLOCK_FREE); 405 clear_flag(block, BLOCK_FREE);
407 406
408 put_ptr_atomic(block, KM_USER0); 407 put_ptr_atomic(block);
409 spin_unlock(&pool->lock); 408 spin_unlock(&pool->lock);
410 409
411 *offset += XV_ALIGN; 410 *offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
426 425
427 spin_lock(&pool->lock); 426 spin_lock(&pool->lock);
428 427
429 page_start = get_ptr_atomic(page, 0, KM_USER0); 428 page_start = get_ptr_atomic(page, 0);
430 block = (struct block_header *)((char *)page_start + offset); 429 block = (struct block_header *)((char *)page_start + offset);
431 430
432 /* Catch double free bugs */ 431 /* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
468 467
469 /* No used objects in this page. Free it. */ 468 /* No used objects in this page. Free it. */
470 if (block->size == PAGE_SIZE - XV_ALIGN) { 469 if (block->size == PAGE_SIZE - XV_ALIGN) {
471 put_ptr_atomic(page_start, KM_USER0); 470 put_ptr_atomic(page_start);
472 spin_unlock(&pool->lock); 471 spin_unlock(&pool->lock);
473 472
474 __free_page(page); 473 __free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
486 set_blockprev(tmpblock, offset); 485 set_blockprev(tmpblock, offset);
487 } 486 }
488 487
489 put_ptr_atomic(page_start, KM_USER0); 488 put_ptr_atomic(page_start);
490 spin_unlock(&pool->lock); 489 spin_unlock(&pool->lock);
491} 490}
492EXPORT_SYMBOL_GPL(xv_free); 491EXPORT_SYMBOL_GPL(xv_free);
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 36d53ed9d71a..68b2e053a0e6 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -496,13 +496,13 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
496 } 496 }
497 ASSERT_SENTINEL(zh, ZBH); 497 ASSERT_SENTINEL(zh, ZBH);
498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
499 to_va = kmap_atomic(page, KM_USER0); 499 to_va = kmap_atomic(page);
500 size = zh->size; 500 size = zh->size;
501 from_va = zbud_data(zh, size); 501 from_va = zbud_data(zh, size);
502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); 502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
503 BUG_ON(ret != LZO_E_OK); 503 BUG_ON(ret != LZO_E_OK);
504 BUG_ON(out_len != PAGE_SIZE); 504 BUG_ON(out_len != PAGE_SIZE);
505 kunmap_atomic(to_va, KM_USER0); 505 kunmap_atomic(to_va);
506out: 506out:
507 spin_unlock(&zbpg->lock); 507 spin_unlock(&zbpg->lock);
508 return ret; 508 return ret;
@@ -1109,7 +1109,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1109 goto out; 1109 goto out;
1110 atomic_inc(&zv_curr_dist_counts[chunks]); 1110 atomic_inc(&zv_curr_dist_counts[chunks]);
1111 atomic_inc(&zv_cumul_dist_counts[chunks]); 1111 atomic_inc(&zv_cumul_dist_counts[chunks]);
1112 zv = kmap_atomic(page, KM_USER0) + offset; 1112 zv = kmap_atomic(page) + offset;
1113 zv->index = index; 1113 zv->index = index;
1114 zv->oid = *oid; 1114 zv->oid = *oid;
1115 zv->pool_id = pool_id; 1115 zv->pool_id = pool_id;
@@ -1123,7 +1123,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1123 spin_unlock(&zcache_rem_op_list_lock); 1123 spin_unlock(&zcache_rem_op_list_lock);
1124 } 1124 }
1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); 1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
1126 kunmap_atomic(zv, KM_USER0); 1126 kunmap_atomic(zv);
1127out: 1127out:
1128 return zv; 1128 return zv;
1129} 1129}
@@ -1145,7 +1145,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1145 &page, &offset, ZCACHE_GFP_MASK); 1145 &page, &offset, ZCACHE_GFP_MASK);
1146 if (unlikely(ret)) 1146 if (unlikely(ret))
1147 goto out; 1147 goto out;
1148 zv = kmap_atomic(page, KM_USER0) + offset; 1148 zv = kmap_atomic(page) + offset;
1149 SET_SENTINEL(zv, ZVH); 1149 SET_SENTINEL(zv, ZVH);
1150 INIT_LIST_HEAD(&zv->rem_op.list); 1150 INIT_LIST_HEAD(&zv->rem_op.list);
1151 zv->client_id = LOCAL_CLIENT; 1151 zv->client_id = LOCAL_CLIENT;
@@ -1153,7 +1153,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1153 zv->index = index; 1153 zv->index = index;
1154 zv->oid = *oid; 1154 zv->oid = *oid;
1155 zv->pool_id = pool->pool_id; 1155 zv->pool_id = pool->pool_id;
1156 kunmap_atomic(zv, KM_USER0); 1156 kunmap_atomic(zv);
1157out: 1157out:
1158 return zv; 1158 return zv;
1159} 1159}
@@ -1194,10 +1194,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
1194 ASSERT_SENTINEL(zv, ZVH); 1194 ASSERT_SENTINEL(zv, ZVH);
1195 size = xv_get_object_size(zv) - sizeof(*zv); 1195 size = xv_get_object_size(zv) - sizeof(*zv);
1196 BUG_ON(size == 0); 1196 BUG_ON(size == 0);
1197 to_va = kmap_atomic(page, KM_USER0); 1197 to_va = kmap_atomic(page);
1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), 1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
1199 size, to_va, &clen); 1199 size, to_va, &clen);
1200 kunmap_atomic(to_va, KM_USER0); 1200 kunmap_atomic(to_va);
1201 BUG_ON(ret != LZO_E_OK); 1201 BUG_ON(ret != LZO_E_OK);
1202 BUG_ON(clen != PAGE_SIZE); 1202 BUG_ON(clen != PAGE_SIZE);
1203} 1203}
@@ -2203,12 +2203,12 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
2203 BUG_ON(!irqs_disabled()); 2203 BUG_ON(!irqs_disabled());
2204 if (unlikely(dmem == NULL || wmem == NULL)) 2204 if (unlikely(dmem == NULL || wmem == NULL))
2205 goto out; /* no buffer, so can't compress */ 2205 goto out; /* no buffer, so can't compress */
2206 from_va = kmap_atomic(from, KM_USER0); 2206 from_va = kmap_atomic(from);
2207 mb(); 2207 mb();
2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); 2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
2209 BUG_ON(ret != LZO_E_OK); 2209 BUG_ON(ret != LZO_E_OK);
2210 *out_va = dmem; 2210 *out_va = dmem;
2211 kunmap_atomic(from_va, KM_USER0); 2211 kunmap_atomic(from_va);
2212 ret = 1; 2212 ret = 1;
2213out: 2213out:
2214 return ret; 2214 return ret;
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
index 69dcc3176ebc..d47345c4adcf 100644
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ b/drivers/staging/rtl8192u/ieee80211/cipher.c
@@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
71 u8 *src_p, *dst_p; 71 u8 *src_p, *dst_p;
72 int in_place; 72 int in_place;
73 73
74 scatterwalk_map(&walk_in, 0); 74 scatterwalk_map(&walk_in);
75 scatterwalk_map(&walk_out, 1); 75 scatterwalk_map(&walk_out);
76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); 76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); 77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
78 in_place = scatterwalk_samebuf(&walk_in, &walk_out, 78 in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
84 84
85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); 85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
86 86
87 scatterwalk_done(&walk_in, 0, nbytes); 87 scatterwalk_done(&walk_in, nbytes);
88 88
89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); 89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
90 scatterwalk_done(&walk_out, 1, nbytes); 90 scatterwalk_done(&walk_out, nbytes);
91 91
92 if (!nbytes) 92 if (!nbytes)
93 return 0; 93 return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
index 301ed514ac9e..05e7497fd106 100644
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ b/drivers/staging/rtl8192u/ieee80211/digest.c
@@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
39 unsigned int bytes_from_page = min(l, ((unsigned int) 39 unsigned int bytes_from_page = min(l, ((unsigned int)
40 (PAGE_SIZE)) - 40 (PAGE_SIZE)) -
41 offset); 41 offset);
42 char *p = crypto_kmap(pg, 0) + offset; 42 char *p = kmap_atomic(pg) + offset;
43 43
44 tfm->__crt_alg->cra_digest.dia_update 44 tfm->__crt_alg->cra_digest.dia_update
45 (crypto_tfm_ctx(tfm), p, 45 (crypto_tfm_ctx(tfm), p,
46 bytes_from_page); 46 bytes_from_page);
47 crypto_kunmap(p, 0); 47 kunmap_atomic(p);
48 crypto_yield(tfm); 48 crypto_yield(tfm);
49 offset = 0; 49 offset = 0;
50 pg++; 50 pg++;
@@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
75 tfm->crt_digest.dit_init(tfm); 75 tfm->crt_digest.dit_init(tfm);
76 76
77 for (i = 0; i < nsg; i++) { 77 for (i = 0; i < nsg; i++) {
78 char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; 78 char *p = kmap_atomic(sg[i].page) + sg[i].offset;
79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), 79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
80 p, sg[i].length); 80 p, sg[i].length);
81 crypto_kunmap(p, 0); 81 kunmap_atomic(p);
82 crypto_yield(tfm); 82 crypto_yield(tfm);
83 } 83 }
84 crypto_digest_final(tfm, out); 84 crypto_digest_final(tfm, out);
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
index a7c096eb269f..bebe13ac53b7 100644
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ b/drivers/staging/rtl8192u/ieee80211/internal.h
@@ -23,23 +23,6 @@
23#include <asm/kmap_types.h> 23#include <asm/kmap_types.h>
24 24
25 25
26extern enum km_type crypto_km_types[];
27
28static inline enum km_type crypto_kmap_type(int out)
29{
30 return crypto_km_types[(in_softirq() ? 2 : 0) + out];
31}
32
33static inline void *crypto_kmap(struct page *page, int out)
34{
35 return kmap_atomic(page, crypto_kmap_type(out));
36}
37
38static inline void crypto_kunmap(void *vaddr, int out)
39{
40 kunmap_atomic(vaddr, crypto_kmap_type(out));
41}
42
43static inline void crypto_yield(struct crypto_tfm *tfm) 26static inline void crypto_yield(struct crypto_tfm *tfm)
44{ 27{
45 if (!in_softirq()) 28 if (!in_softirq())
diff --git a/drivers/staging/rtl8192u/ieee80211/kmap_types.h b/drivers/staging/rtl8192u/ieee80211/kmap_types.h
deleted file mode 100644
index de67bb01b5f5..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/kmap_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __KMAP_TYPES_H
2
3#define __KMAP_TYPES_H
4
5
6enum km_type {
7 KM_BOUNCE_READ,
8 KM_SKB_SUNRPC_DATA,
9 KM_SKB_DATA_SOFTIRQ,
10 KM_USER0,
11 KM_USER1,
12 KM_BH_IRQ,
13 KM_SOFTIRQ0,
14 KM_SOFTIRQ1,
15 KM_TYPE_NR
16};
17
18#define _ASM_KMAP_TYPES_H
19
20#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
index 3543a6145046..8b73f6cefcf9 100644
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
@@ -13,8 +13,6 @@
13 * any later version. 13 * any later version.
14 * 14 *
15 */ 15 */
16#include "kmap_types.h"
17
18#include <linux/kernel.h> 16#include <linux/kernel.h>
19#include <linux/mm.h> 17#include <linux/mm.h>
20#include <linux/pagemap.h> 18#include <linux/pagemap.h>
@@ -23,13 +21,6 @@
23#include "internal.h" 21#include "internal.h"
24#include "scatterwalk.h" 22#include "scatterwalk.h"
25 23
26enum km_type crypto_km_types[] = {
27 KM_USER0,
28 KM_USER1,
29 KM_SOFTIRQ0,
30 KM_SOFTIRQ1,
31};
32
33void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch) 24void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
34{ 25{
35 if (nbytes <= walk->len_this_page && 26 if (nbytes <= walk->len_this_page &&
@@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
62 walk->offset = sg->offset; 53 walk->offset = sg->offset;
63} 54}
64 55
65void scatterwalk_map(struct scatter_walk *walk, int out) 56void scatterwalk_map(struct scatter_walk *walk)
66{ 57{
67 walk->data = crypto_kmap(walk->page, out) + walk->offset; 58 walk->data = kmap_atomic(walk->page) + walk->offset;
68} 59}
69 60
70static void scatterwalk_pagedone(struct scatter_walk *walk, int out, 61static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
103 * has been verified as multiple of the block size. 94 * has been verified as multiple of the block size.
104 */ 95 */
105int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 96int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
106 size_t nbytes, int out) 97 size_t nbytes)
107{ 98{
108 if (buf != walk->data) { 99 if (buf != walk->data) {
109 while (nbytes > walk->len_this_page) { 100 while (nbytes > walk->len_this_page) {
@@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
111 buf += walk->len_this_page; 102 buf += walk->len_this_page;
112 nbytes -= walk->len_this_page; 103 nbytes -= walk->len_this_page;
113 104
114 crypto_kunmap(walk->data, out); 105 kunmap_atomic(walk->data);
115 scatterwalk_pagedone(walk, out, 1); 106 scatterwalk_pagedone(walk, out, 1);
116 scatterwalk_map(walk, out); 107 scatterwalk_map(walk);
117 } 108 }
118 109
119 memcpy_dir(buf, walk->data, nbytes, out); 110 memcpy_dir(buf, walk->data, nbytes, out);
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 70734652f724..ed2c800b3a7e 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -455,14 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
455 } 455 }
456 ASSERT_SENTINEL(zh, ZBH); 456 ASSERT_SENTINEL(zh, ZBH);
457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
458 to_va = kmap_atomic(page, KM_USER0); 458 to_va = kmap_atomic(page);
459 size = zh->size; 459 size = zh->size;
460 from_va = zbud_data(zh, size); 460 from_va = zbud_data(zh, size);
461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size, 461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
462 to_va, &out_len); 462 to_va, &out_len);
463 BUG_ON(ret); 463 BUG_ON(ret);
464 BUG_ON(out_len != PAGE_SIZE); 464 BUG_ON(out_len != PAGE_SIZE);
465 kunmap_atomic(to_va, KM_USER0); 465 kunmap_atomic(to_va);
466out: 466out:
467 spin_unlock(&zbpg->lock); 467 spin_unlock(&zbpg->lock);
468 return ret; 468 return ret;
@@ -753,10 +753,10 @@ static void zv_decompress(struct page *page, void *handle)
753 zv = zs_map_object(zcache_host.zspool, handle); 753 zv = zs_map_object(zcache_host.zspool, handle);
754 BUG_ON(zv->size == 0); 754 BUG_ON(zv->size == 0);
755 ASSERT_SENTINEL(zv, ZVH); 755 ASSERT_SENTINEL(zv, ZVH);
756 to_va = kmap_atomic(page, KM_USER0); 756 to_va = kmap_atomic(page);
757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv), 757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
758 zv->size, to_va, &clen); 758 zv->size, to_va, &clen);
759 kunmap_atomic(to_va, KM_USER0); 759 kunmap_atomic(to_va);
760 zs_unmap_object(zcache_host.zspool, handle); 760 zs_unmap_object(zcache_host.zspool, handle);
761 BUG_ON(ret); 761 BUG_ON(ret);
762 BUG_ON(clen != PAGE_SIZE); 762 BUG_ON(clen != PAGE_SIZE);
@@ -1334,13 +1334,13 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1334 if (unlikely(dmem == NULL)) 1334 if (unlikely(dmem == NULL))
1335 goto out; /* no buffer or no compressor so can't compress */ 1335 goto out; /* no buffer or no compressor so can't compress */
1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER; 1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1337 from_va = kmap_atomic(from, KM_USER0); 1337 from_va = kmap_atomic(from);
1338 mb(); 1338 mb();
1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, 1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1340 out_len); 1340 out_len);
1341 BUG_ON(ret); 1341 BUG_ON(ret);
1342 *out_va = dmem; 1342 *out_va = dmem;
1343 kunmap_atomic(from_va, KM_USER0); 1343 kunmap_atomic(from_va);
1344 ret = 1; 1344 ret = 1;
1345out: 1345out:
1346 return ret; 1346 return ret;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 7f138196b3c9..685d612a627b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
175 struct page *page = bvec->bv_page; 175 struct page *page = bvec->bv_page;
176 void *user_mem; 176 void *user_mem;
177 177
178 user_mem = kmap_atomic(page, KM_USER0); 178 user_mem = kmap_atomic(page);
179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); 179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 kunmap_atomic(user_mem, KM_USER0); 180 kunmap_atomic(user_mem);
181 181
182 flush_dcache_page(page); 182 flush_dcache_page(page);
183} 183}
@@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
188 struct page *page = bvec->bv_page; 188 struct page *page = bvec->bv_page;
189 unsigned char *user_mem, *cmem; 189 unsigned char *user_mem, *cmem;
190 190
191 user_mem = kmap_atomic(page, KM_USER0); 191 user_mem = kmap_atomic(page);
192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1); 192 cmem = kmap_atomic(zram->table[index].handle);
193 193
194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); 194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 kunmap_atomic(cmem, KM_USER1); 195 kunmap_atomic(cmem);
196 kunmap_atomic(user_mem, KM_USER0); 196 kunmap_atomic(user_mem);
197 197
198 flush_dcache_page(page); 198 flush_dcache_page(page);
199} 199}
@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
242 } 242 }
243 } 243 }
244 244
245 user_mem = kmap_atomic(page, KM_USER0); 245 user_mem = kmap_atomic(page);
246 if (!is_partial_io(bvec)) 246 if (!is_partial_io(bvec))
247 uncmem = user_mem; 247 uncmem = user_mem;
248 clen = PAGE_SIZE; 248 clen = PAGE_SIZE;
@@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
260 } 260 }
261 261
262 zs_unmap_object(zram->mem_pool, zram->table[index].handle); 262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 kunmap_atomic(user_mem, KM_USER0); 263 kunmap_atomic(user_mem);
264 264
265 /* Should NEVER happen. Return bio error if it does. */ 265 /* Should NEVER happen. Return bio error if it does. */
266 if (unlikely(ret != LZO_E_OK)) { 266 if (unlikely(ret != LZO_E_OK)) {
@@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
292 /* Page is stored uncompressed since it's incompressible */ 292 /* Page is stored uncompressed since it's incompressible */
293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 memcpy(mem, cmem, PAGE_SIZE); 294 memcpy(mem, cmem, PAGE_SIZE);
295 kunmap_atomic(cmem, KM_USER0); 295 kunmap_atomic(cmem);
296 return 0; 296 return 0;
297 } 297 }
298 298
@@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
351 zram_test_flag(zram, index, ZRAM_ZERO)) 351 zram_test_flag(zram, index, ZRAM_ZERO))
352 zram_free_page(zram, index); 352 zram_free_page(zram, index);
353 353
354 user_mem = kmap_atomic(page, KM_USER0); 354 user_mem = kmap_atomic(page);
355 355
356 if (is_partial_io(bvec)) 356 if (is_partial_io(bvec))
357 memcpy(uncmem + offset, user_mem + bvec->bv_offset, 357 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
360 uncmem = user_mem; 360 uncmem = user_mem;
361 361
362 if (page_zero_filled(uncmem)) { 362 if (page_zero_filled(uncmem)) {
363 kunmap_atomic(user_mem, KM_USER0); 363 kunmap_atomic(user_mem);
364 if (is_partial_io(bvec)) 364 if (is_partial_io(bvec))
365 kfree(uncmem); 365 kfree(uncmem);
366 zram_stat_inc(&zram->stats.pages_zero); 366 zram_stat_inc(&zram->stats.pages_zero);
@@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, 372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 zram->compress_workmem); 373 zram->compress_workmem);
374 374
375 kunmap_atomic(user_mem, KM_USER0); 375 kunmap_atomic(user_mem);
376 if (is_partial_io(bvec)) 376 if (is_partial_io(bvec))
377 kfree(uncmem); 377 kfree(uncmem);
378 378
@@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); 400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 zram_stat_inc(&zram->stats.pages_expand); 401 zram_stat_inc(&zram->stats.pages_expand);
402 handle = page_store; 402 handle = page_store;
403 src = kmap_atomic(page, KM_USER0); 403 src = kmap_atomic(page);
404 cmem = kmap_atomic(page_store, KM_USER1); 404 cmem = kmap_atomic(page_store);
405 goto memstore; 405 goto memstore;
406 } 406 }
407 407
@@ -427,8 +427,8 @@ memstore:
427 memcpy(cmem, src, clen); 427 memcpy(cmem, src, clen);
428 428
429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 kunmap_atomic(cmem, KM_USER1); 430 kunmap_atomic(cmem);
431 kunmap_atomic(src, KM_USER0); 431 kunmap_atomic(src);
432 } else { 432 } else {
433 zs_unmap_object(zram->mem_pool, handle); 433 zs_unmap_object(zram->mem_pool, handle);
434 } 434 }