aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /drivers
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/ata/libata-sff.c8
-rw-r--r--drivers/block/brd.c20
-rw-r--r--drivers/block/drbd/drbd_bitmap.c50
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/crypto/hifn_795x.c10
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c8
-rw-r--r--drivers/gpu/drm/gma500/mmu.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c6
-rw-r--r--drivers/ide/ide-taskfile.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
-rw-r--r--drivers/md/bitmap.c42
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c4
-rw-r--r--drivers/memstick/host/tifm_ms.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c5
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/isci/request.c16
-rw-r--r--drivers/scsi/libfc/fc_fcp.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c8
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/scsi_debug.c24
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd_dif.c12
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/staging/ramster/xvmalloc.c39
-rw-r--r--drivers/staging/ramster/zcache-main.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/kmap_types.h20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c19
-rw-r--r--drivers/staging/zcache/zcache-main.c12
-rw-r--r--drivers/staging/zram/zram_drv.c32
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c10
-rw-r--r--drivers/vhost/vhost.c4
54 files changed, 297 insertions, 356 deletions
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9691dd0966d7..d8af325a6bda 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -720,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
720 720
721 /* FIXME: use a bounce buffer */ 721 /* FIXME: use a bounce buffer */
722 local_irq_save(flags); 722 local_irq_save(flags);
723 buf = kmap_atomic(page, KM_IRQ0); 723 buf = kmap_atomic(page);
724 724
725 /* do the actual data transfer */ 725 /* do the actual data transfer */
726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
727 do_write); 727 do_write);
728 728
729 kunmap_atomic(buf, KM_IRQ0); 729 kunmap_atomic(buf);
730 local_irq_restore(flags); 730 local_irq_restore(flags);
731 } else { 731 } else {
732 buf = page_address(page); 732 buf = page_address(page);
@@ -865,13 +865,13 @@ next_sg:
865 865
866 /* FIXME: use bounce buffer */ 866 /* FIXME: use bounce buffer */
867 local_irq_save(flags); 867 local_irq_save(flags);
868 buf = kmap_atomic(page, KM_IRQ0); 868 buf = kmap_atomic(page);
869 869
870 /* do the actual data transfer */ 870 /* do the actual data transfer */
871 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 871 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
872 count, rw); 872 count, rw);
873 873
874 kunmap_atomic(buf, KM_IRQ0); 874 kunmap_atomic(buf);
875 local_irq_restore(flags); 875 local_irq_restore(flags);
876 } else { 876 } else {
877 buf = page_address(page); 877 buf = page_address(page);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index ec246437f5a4..531ceb31d0ff 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
242 page = brd_lookup_page(brd, sector); 242 page = brd_lookup_page(brd, sector);
243 BUG_ON(!page); 243 BUG_ON(!page);
244 244
245 dst = kmap_atomic(page, KM_USER1); 245 dst = kmap_atomic(page);
246 memcpy(dst + offset, src, copy); 246 memcpy(dst + offset, src, copy);
247 kunmap_atomic(dst, KM_USER1); 247 kunmap_atomic(dst);
248 248
249 if (copy < n) { 249 if (copy < n) {
250 src += copy; 250 src += copy;
@@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
253 page = brd_lookup_page(brd, sector); 253 page = brd_lookup_page(brd, sector);
254 BUG_ON(!page); 254 BUG_ON(!page);
255 255
256 dst = kmap_atomic(page, KM_USER1); 256 dst = kmap_atomic(page);
257 memcpy(dst, src, copy); 257 memcpy(dst, src, copy);
258 kunmap_atomic(dst, KM_USER1); 258 kunmap_atomic(dst);
259 } 259 }
260} 260}
261 261
@@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
273 copy = min_t(size_t, n, PAGE_SIZE - offset); 273 copy = min_t(size_t, n, PAGE_SIZE - offset);
274 page = brd_lookup_page(brd, sector); 274 page = brd_lookup_page(brd, sector);
275 if (page) { 275 if (page) {
276 src = kmap_atomic(page, KM_USER1); 276 src = kmap_atomic(page);
277 memcpy(dst, src + offset, copy); 277 memcpy(dst, src + offset, copy);
278 kunmap_atomic(src, KM_USER1); 278 kunmap_atomic(src);
279 } else 279 } else
280 memset(dst, 0, copy); 280 memset(dst, 0, copy);
281 281
@@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
285 copy = n - copy; 285 copy = n - copy;
286 page = brd_lookup_page(brd, sector); 286 page = brd_lookup_page(brd, sector);
287 if (page) { 287 if (page) {
288 src = kmap_atomic(page, KM_USER1); 288 src = kmap_atomic(page);
289 memcpy(dst, src, copy); 289 memcpy(dst, src, copy);
290 kunmap_atomic(src, KM_USER1); 290 kunmap_atomic(src);
291 } else 291 } else
292 memset(dst, 0, copy); 292 memset(dst, 0, copy);
293 } 293 }
@@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
309 goto out; 309 goto out;
310 } 310 }
311 311
312 mem = kmap_atomic(page, KM_USER0); 312 mem = kmap_atomic(page);
313 if (rw == READ) { 313 if (rw == READ) {
314 copy_from_brd(mem + off, brd, sector, len); 314 copy_from_brd(mem + off, brd, sector, len);
315 flush_dcache_page(page); 315 flush_dcache_page(page);
@@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
317 flush_dcache_page(page); 317 flush_dcache_page(page);
318 copy_to_brd(brd, mem + off, sector, len); 318 copy_to_brd(brd, mem + off, sector, len);
319 } 319 }
320 kunmap_atomic(mem, KM_USER0); 320 kunmap_atomic(mem);
321 321
322out: 322out:
323 return err; 323 return err;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 912f585a760f..3030201c69d8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
289 return page_nr; 289 return page_nr;
290} 290}
291 291
292static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) 292static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
293{ 293{
294 struct page *page = b->bm_pages[idx]; 294 struct page *page = b->bm_pages[idx];
295 return (unsigned long *) kmap_atomic(page, km); 295 return (unsigned long *) kmap_atomic(page);
296} 296}
297 297
298static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 298static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
299{ 299{
300 return __bm_map_pidx(b, idx, KM_IRQ1); 300 return __bm_map_pidx(b, idx);
301} 301}
302 302
303static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 303static void __bm_unmap(unsigned long *p_addr)
304{ 304{
305 kunmap_atomic(p_addr, km); 305 kunmap_atomic(p_addr);
306}; 306};
307 307
308static void bm_unmap(unsigned long *p_addr) 308static void bm_unmap(unsigned long *p_addr)
309{ 309{
310 return __bm_unmap(p_addr, KM_IRQ1); 310 return __bm_unmap(p_addr);
311} 311}
312 312
313/* long word offset of _bitmap_ sector */ 313/* long word offset of _bitmap_ sector */
@@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
543 543
544 /* all but last page */ 544 /* all but last page */
545 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 545 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
546 p_addr = __bm_map_pidx(b, idx, KM_USER0); 546 p_addr = __bm_map_pidx(b, idx);
547 for (i = 0; i < LWPP; i++) 547 for (i = 0; i < LWPP; i++)
548 bits += hweight_long(p_addr[i]); 548 bits += hweight_long(p_addr[i]);
549 __bm_unmap(p_addr, KM_USER0); 549 __bm_unmap(p_addr);
550 cond_resched(); 550 cond_resched();
551 } 551 }
552 /* last (or only) page */ 552 /* last (or only) page */
553 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 553 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
554 p_addr = __bm_map_pidx(b, idx, KM_USER0); 554 p_addr = __bm_map_pidx(b, idx);
555 for (i = 0; i < last_word; i++) 555 for (i = 0; i < last_word; i++)
556 bits += hweight_long(p_addr[i]); 556 bits += hweight_long(p_addr[i]);
557 p_addr[last_word] &= cpu_to_lel(mask); 557 p_addr[last_word] &= cpu_to_lel(mask);
@@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
559 /* 32bit arch, may have an unused padding long */ 559 /* 32bit arch, may have an unused padding long */
560 if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 560 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
561 p_addr[last_word+1] = 0; 561 p_addr[last_word+1] = 0;
562 __bm_unmap(p_addr, KM_USER0); 562 __bm_unmap(p_addr);
563 return bits; 563 return bits;
564} 564}
565 565
@@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
970 * to use pre-allocated page pool */ 970 * to use pre-allocated page pool */
971 void *src, *dest; 971 void *src, *dest;
972 page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); 972 page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
973 dest = kmap_atomic(page, KM_USER0); 973 dest = kmap_atomic(page);
974 src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); 974 src = kmap_atomic(b->bm_pages[page_nr]);
975 memcpy(dest, src, PAGE_SIZE); 975 memcpy(dest, src, PAGE_SIZE);
976 kunmap_atomic(src, KM_USER1); 976 kunmap_atomic(src);
977 kunmap_atomic(dest, KM_USER0); 977 kunmap_atomic(dest);
978 bm_store_page_idx(page, page_nr); 978 bm_store_page_idx(page, page_nr);
979 } else 979 } else
980 page = b->bm_pages[page_nr]; 980 page = b->bm_pages[page_nr];
@@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
1163 * this returns a bit number, NOT a sector! 1163 * this returns a bit number, NOT a sector!
1164 */ 1164 */
1165static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1165static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1166 const int find_zero_bit, const enum km_type km) 1166 const int find_zero_bit)
1167{ 1167{
1168 struct drbd_bitmap *b = mdev->bitmap; 1168 struct drbd_bitmap *b = mdev->bitmap;
1169 unsigned long *p_addr; 1169 unsigned long *p_addr;
@@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1178 while (bm_fo < b->bm_bits) { 1178 while (bm_fo < b->bm_bits) {
1179 /* bit offset of the first bit in the page */ 1179 /* bit offset of the first bit in the page */
1180 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 1180 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1181 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); 1181 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1182 1182
1183 if (find_zero_bit) 1183 if (find_zero_bit)
1184 i = find_next_zero_bit_le(p_addr, 1184 i = find_next_zero_bit_le(p_addr,
@@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1187 i = find_next_bit_le(p_addr, 1187 i = find_next_bit_le(p_addr,
1188 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1188 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1189 1189
1190 __bm_unmap(p_addr, km); 1190 __bm_unmap(p_addr);
1191 if (i < PAGE_SIZE*8) { 1191 if (i < PAGE_SIZE*8) {
1192 bm_fo = bit_offset + i; 1192 bm_fo = bit_offset + i;
1193 if (bm_fo >= b->bm_bits) 1193 if (bm_fo >= b->bm_bits)
@@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
1215 if (BM_DONT_TEST & b->bm_flags) 1215 if (BM_DONT_TEST & b->bm_flags)
1216 bm_print_lock_info(mdev); 1216 bm_print_lock_info(mdev);
1217 1217
1218 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 1218 i = __bm_find_next(mdev, bm_fo, find_zero_bit);
1219 1219
1220 spin_unlock_irq(&b->bm_lock); 1220 spin_unlock_irq(&b->bm_lock);
1221 return i; 1221 return i;
@@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
1239unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1239unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1240{ 1240{
1241 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1241 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1242 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1242 return __bm_find_next(mdev, bm_fo, 0);
1243} 1243}
1244 1244
1245unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1245unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1246{ 1246{
1247 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1247 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1248 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1248 return __bm_find_next(mdev, bm_fo, 1);
1249} 1249}
1250 1250
1251/* returns number of bits actually changed. 1251/* returns number of bits actually changed.
@@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1273 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1273 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1274 if (page_nr != last_page_nr) { 1274 if (page_nr != last_page_nr) {
1275 if (p_addr) 1275 if (p_addr)
1276 __bm_unmap(p_addr, KM_IRQ1); 1276 __bm_unmap(p_addr);
1277 if (c < 0) 1277 if (c < 0)
1278 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1278 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1279 else if (c > 0) 1279 else if (c > 0)
1280 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 1280 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1281 changed_total += c; 1281 changed_total += c;
1282 c = 0; 1282 c = 0;
1283 p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1); 1283 p_addr = __bm_map_pidx(b, page_nr);
1284 last_page_nr = page_nr; 1284 last_page_nr = page_nr;
1285 } 1285 }
1286 if (val) 1286 if (val)
@@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1289 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1289 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1290 } 1290 }
1291 if (p_addr) 1291 if (p_addr)
1292 __bm_unmap(p_addr, KM_IRQ1); 1292 __bm_unmap(p_addr);
1293 if (c < 0) 1293 if (c < 0)
1294 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1294 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1295 else if (c > 0) 1295 else if (c > 0)
@@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1342{ 1342{
1343 int i; 1343 int i;
1344 int bits; 1344 int bits;
1345 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1); 1345 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1346 for (i = first_word; i < last_word; i++) { 1346 for (i = first_word; i < last_word; i++) {
1347 bits = hweight_long(paddr[i]); 1347 bits = hweight_long(paddr[i]);
1348 paddr[i] = ~0UL; 1348 paddr[i] = ~0UL;
1349 b->bm_set += BITS_PER_LONG - bits; 1349 b->bm_set += BITS_PER_LONG - bits;
1350 } 1350 }
1351 kunmap_atomic(paddr, KM_IRQ1); 1351 kunmap_atomic(paddr);
1352} 1352}
1353 1353
1354/* Same thing as drbd_bm_set_bits, 1354/* Same thing as drbd_bm_set_bits,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index af2a25049bce..e09f9cebbb20 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
2526 2526
2527 page = e->pages; 2527 page = e->pages;
2528 page_chain_for_each(page) { 2528 page_chain_for_each(page) {
2529 void *d = kmap_atomic(page, KM_USER0); 2529 void *d = kmap_atomic(page);
2530 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2530 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2531 memcpy(tl, d, l); 2531 memcpy(tl, d, l);
2532 kunmap_atomic(d, KM_USER0); 2532 kunmap_atomic(d);
2533 tl = (unsigned short*)((char*)tl + l); 2533 tl = (unsigned short*)((char*)tl + l);
2534 len -= l; 2534 len -= l;
2535 if (len == 0) 2535 if (len == 0)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cd504353b278..bbca966f8f66 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd,
93 struct page *loop_page, unsigned loop_off, 93 struct page *loop_page, unsigned loop_off,
94 int size, sector_t real_block) 94 int size, sector_t real_block)
95{ 95{
96 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 96 char *raw_buf = kmap_atomic(raw_page) + raw_off;
97 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 97 char *loop_buf = kmap_atomic(loop_page) + loop_off;
98 98
99 if (cmd == READ) 99 if (cmd == READ)
100 memcpy(loop_buf, raw_buf, size); 100 memcpy(loop_buf, raw_buf, size);
101 else 101 else
102 memcpy(raw_buf, loop_buf, size); 102 memcpy(raw_buf, loop_buf, size);
103 103
104 kunmap_atomic(loop_buf, KM_USER1); 104 kunmap_atomic(loop_buf);
105 kunmap_atomic(raw_buf, KM_USER0); 105 kunmap_atomic(raw_buf);
106 cond_resched(); 106 cond_resched();
107 return 0; 107 return 0;
108} 108}
@@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
112 struct page *loop_page, unsigned loop_off, 112 struct page *loop_page, unsigned loop_off,
113 int size, sector_t real_block) 113 int size, sector_t real_block)
114{ 114{
115 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 115 char *raw_buf = kmap_atomic(raw_page) + raw_off;
116 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 116 char *loop_buf = kmap_atomic(loop_page) + loop_off;
117 char *in, *out, *key; 117 char *in, *out, *key;
118 int i, keysize; 118 int i, keysize;
119 119
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
130 for (i = 0; i < size; i++) 130 for (i = 0; i < size; i++)
131 *out++ = *in++ ^ key[(i & 511) % keysize]; 131 *out++ = *in++ ^ key[(i & 511) % keysize];
132 132
133 kunmap_atomic(loop_buf, KM_USER1); 133 kunmap_atomic(loop_buf);
134 kunmap_atomic(raw_buf, KM_USER0); 134 kunmap_atomic(raw_buf);
135 cond_resched(); 135 cond_resched();
136 return 0; 136 return 0;
137} 137}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d59edeabd93f..ba66e4445f41 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag
987 987
988 while (copy_size > 0) { 988 while (copy_size > 0) {
989 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg); 989 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
990 void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) + 990 void *vfrom = kmap_atomic(src_bvl->bv_page) +
991 src_bvl->bv_offset + offs; 991 src_bvl->bv_offset + offs;
992 void *vto = page_address(dst_page) + dst_offs; 992 void *vto = page_address(dst_page) + dst_offs;
993 int len = min_t(int, copy_size, src_bvl->bv_len - offs); 993 int len = min_t(int, copy_size, src_bvl->bv_len - offs);
994 994
995 BUG_ON(len < 0); 995 BUG_ON(len < 0);
996 memcpy(vto, vfrom, len); 996 memcpy(vto, vfrom, len);
997 kunmap_atomic(vfrom, KM_USER0); 997 kunmap_atomic(vfrom);
998 998
999 seg++; 999 seg++;
1000 offs = 0; 1000 offs = 0;
@@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
1019 offs = 0; 1019 offs = 0;
1020 for (f = 0; f < pkt->frames; f++) { 1020 for (f = 0; f < pkt->frames; f++) {
1021 if (bvec[f].bv_page != pkt->pages[p]) { 1021 if (bvec[f].bv_page != pkt->pages[p]) {
1022 void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; 1022 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
1023 void *vto = page_address(pkt->pages[p]) + offs; 1023 void *vto = page_address(pkt->pages[p]) + offs;
1024 memcpy(vto, vfrom, CD_FRAMESIZE); 1024 memcpy(vto, vfrom, CD_FRAMESIZE);
1025 kunmap_atomic(vfrom, KM_USER0); 1025 kunmap_atomic(vfrom);
1026 bvec[f].bv_page = pkt->pages[p]; 1026 bvec[f].bv_page = pkt->pages[p];
1027 bvec[f].bv_offset = offs; 1027 bvec[f].bv_offset = offs;
1028 } else { 1028 } else {
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index fe765f49de58..76368f984023 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1731,9 +1731,9 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
1731 while (size) { 1731 while (size) {
1732 copy = min3(srest, dst->length, size); 1732 copy = min3(srest, dst->length, size);
1733 1733
1734 daddr = kmap_atomic(sg_page(dst), KM_IRQ0); 1734 daddr = kmap_atomic(sg_page(dst));
1735 memcpy(daddr + dst->offset + offset, saddr, copy); 1735 memcpy(daddr + dst->offset + offset, saddr, copy);
1736 kunmap_atomic(daddr, KM_IRQ0); 1736 kunmap_atomic(daddr);
1737 1737
1738 nbytes -= copy; 1738 nbytes -= copy;
1739 size -= copy; 1739 size -= copy;
@@ -1793,17 +1793,17 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1793 continue; 1793 continue;
1794 } 1794 }
1795 1795
1796 saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); 1796 saddr = kmap_atomic(sg_page(t));
1797 1797
1798 err = ablkcipher_get(saddr, &t->length, t->offset, 1798 err = ablkcipher_get(saddr, &t->length, t->offset,
1799 dst, nbytes, &nbytes); 1799 dst, nbytes, &nbytes);
1800 if (err < 0) { 1800 if (err < 0) {
1801 kunmap_atomic(saddr, KM_SOFTIRQ0); 1801 kunmap_atomic(saddr);
1802 break; 1802 break;
1803 } 1803 }
1804 1804
1805 idx += err; 1805 idx += err;
1806 kunmap_atomic(saddr, KM_SOFTIRQ0); 1806 kunmap_atomic(saddr);
1807 } 1807 }
1808 1808
1809 hifn_cipher_walk_exit(&rctx->walk); 1809 hifn_cipher_walk_exit(&rctx->walk);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index ca6c04d350ee..da09cd74bc5b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -620,13 +620,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
620 if (PageHighMem(pg)) 620 if (PageHighMem(pg))
621 local_irq_save(flags); 621 local_irq_save(flags);
622 622
623 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ); 623 virt_addr = kmap_atomic(pg);
624 624
625 /* Perform architecture specific atomic scrub operation */ 625 /* Perform architecture specific atomic scrub operation */
626 atomic_scrub(virt_addr + offset, size); 626 atomic_scrub(virt_addr + offset, size);
627 627
628 /* Unmap and complete */ 628 /* Unmap and complete */
629 kunmap_atomic(virt_addr, KM_BOUNCE_READ); 629 kunmap_atomic(virt_addr);
630 630
631 if (PageHighMem(pg)) 631 if (PageHighMem(pg))
632 local_irq_restore(flags); 632 local_irq_restore(flags);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 592865381c6e..4b8653b932f9 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
41 if (unlikely(page == NULL)) 41 if (unlikely(page == NULL))
42 return; 42 return;
43 43
44 page_virtual = kmap_atomic(page, KM_USER0); 44 page_virtual = kmap_atomic(page);
45 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 45 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
46 clflush(page_virtual + i); 46 clflush(page_virtual + i);
47 kunmap_atomic(page_virtual, KM_USER0); 47 kunmap_atomic(page_virtual);
48} 48}
49 49
50static void drm_cache_flush_clflush(struct page *pages[], 50static void drm_cache_flush_clflush(struct page *pages[],
@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
87 if (unlikely(page == NULL)) 87 if (unlikely(page == NULL))
88 continue; 88 continue;
89 89
90 page_virtual = kmap_atomic(page, KM_USER0); 90 page_virtual = kmap_atomic(page);
91 flush_dcache_range((unsigned long)page_virtual, 91 flush_dcache_range((unsigned long)page_virtual,
92 (unsigned long)page_virtual + PAGE_SIZE); 92 (unsigned long)page_virtual + PAGE_SIZE);
93 kunmap_atomic(page_virtual, KM_USER0); 93 kunmap_atomic(page_virtual);
94 } 94 }
95#else 95#else
96 printk(KERN_ERR "Architecture has no drm_cache.c support\n"); 96 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c904d73b1de3..e80ee82f6caf 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
125 int i; 125 int i;
126 uint8_t *clf; 126 uint8_t *clf;
127 127
128 clf = kmap_atomic(page, KM_USER0); 128 clf = kmap_atomic(page);
129 mb(); 129 mb();
130 for (i = 0; i < clflush_count; ++i) { 130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf); 131 psb_clflush(clf);
132 clf += clflush_add; 132 clf += clflush_add;
133 } 133 }
134 mb(); 134 mb();
135 kunmap_atomic(clf, KM_USER0); 135 kunmap_atomic(clf);
136} 136}
137 137
138static void psb_pages_clflush(struct psb_mmu_driver *driver, 138static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
325 325
326 spin_lock(lock); 326 spin_lock(lock);
327 327
328 v = kmap_atomic(pt->p, KM_USER0); 328 v = kmap_atomic(pt->p);
329 clf = (uint8_t *) v; 329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v; 330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
341 mb(); 341 mb();
342 } 342 }
343 343
344 kunmap_atomic(v, KM_USER0); 344 kunmap_atomic(v);
345 spin_unlock(lock); 345 spin_unlock(lock);
346 346
347 pt->count = 0; 347 pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
376 continue; 376 continue;
377 } 377 }
378 378
379 v = kmap_atomic(pd->p, KM_USER0); 379 v = kmap_atomic(pd->p);
380 pd->tables[index] = pt; 380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; 381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index; 382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0); 383 kunmap_atomic((void *) v);
384 384
385 if (pd->hw_context != -1) { 385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]); 386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1); 387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 } 388 }
389 } 389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0); 390 pt->v = kmap_atomic(pt->p);
391 return pt; 391 return pt;
392} 392}
393 393
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
404 spin_unlock(lock); 404 spin_unlock(lock);
405 return NULL; 405 return NULL;
406 } 406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0); 407 pt->v = kmap_atomic(pt->p);
408 return pt; 408 return pt;
409} 409}
410 410
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
413 struct psb_mmu_pd *pd = pt->pd; 413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v; 414 uint32_t *v;
415 415
416 kunmap_atomic(pt->v, KM_USER0); 416 kunmap_atomic(pt->v);
417 if (pt->count == 0) { 417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0); 418 v = kmap_atomic(pd->p);
419 v[pt->index] = pd->invalid_pde; 419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL; 420 pd->tables[pt->index] = NULL;
421 421
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
424 (void *) &v[pt->index]); 424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1); 425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 } 426 }
427 kunmap_atomic(pt->v, KM_USER0); 427 kunmap_atomic(pt->v);
428 spin_unlock(&pd->driver->lock); 428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt); 429 psb_mmu_free_pt(pt);
430 return; 430 return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
457 down_read(&driver->sem); 457 down_read(&driver->sem);
458 spin_lock(&driver->lock); 458 spin_lock(&driver->lock);
459 459
460 v = kmap_atomic(pd->p, KM_USER0); 460 v = kmap_atomic(pd->p);
461 v += start; 461 v += start;
462 462
463 while (gtt_pages--) { 463 while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
467 467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/ 468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages); 469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v, KM_USER0); 470 kunmap_atomic(v);
471 spin_unlock(&driver->lock); 471 spin_unlock(&driver->lock);
472 472
473 if (pd->hw_context != -1) 473 if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
830 uint32_t *v; 830 uint32_t *v;
831 831
832 spin_lock(lock); 832 spin_lock(lock);
833 v = kmap_atomic(pd->p, KM_USER0); 833 v = kmap_atomic(pd->p);
834 tmp = v[psb_mmu_pd_index(virtual)]; 834 tmp = v[psb_mmu_pd_index(virtual)];
835 kunmap_atomic(v, KM_USER0); 835 kunmap_atomic(v);
836 spin_unlock(lock); 836 spin_unlock(lock);
837 837
838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || 838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 2f75d203a2bf..c10cf5e2443a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
309 goto out_err; 309 goto out_err;
310 310
311 preempt_disable(); 311 preempt_disable();
312 from_virtual = kmap_atomic(from_page, KM_USER0); 312 from_virtual = kmap_atomic(from_page);
313 to_virtual = kmap_atomic(to_page, KM_USER1); 313 to_virtual = kmap_atomic(to_page);
314 memcpy(to_virtual, from_virtual, PAGE_SIZE); 314 memcpy(to_virtual, from_virtual, PAGE_SIZE);
315 kunmap_atomic(to_virtual, KM_USER1); 315 kunmap_atomic(to_virtual);
316 kunmap_atomic(from_virtual, KM_USER0); 316 kunmap_atomic(from_virtual);
317 preempt_enable(); 317 preempt_enable();
318 page_cache_release(from_page); 318 page_cache_release(from_page);
319 } 319 }
@@ -365,11 +365,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
365 goto out_err; 365 goto out_err;
366 } 366 }
367 preempt_disable(); 367 preempt_disable();
368 from_virtual = kmap_atomic(from_page, KM_USER0); 368 from_virtual = kmap_atomic(from_page);
369 to_virtual = kmap_atomic(to_page, KM_USER1); 369 to_virtual = kmap_atomic(to_page);
370 memcpy(to_virtual, from_virtual, PAGE_SIZE); 370 memcpy(to_virtual, from_virtual, PAGE_SIZE);
371 kunmap_atomic(to_virtual, KM_USER1); 371 kunmap_atomic(to_virtual);
372 kunmap_atomic(from_virtual, KM_USER0); 372 kunmap_atomic(from_virtual);
373 preempt_enable(); 373 preempt_enable();
374 set_page_dirty(to_page); 374 set_page_dirty(to_page);
375 mark_page_accessed(to_page); 375 mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index f4e7763a7694..51c9ba5cd2fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
136 136
137 if (likely(page_virtual != NULL)) { 137 if (likely(page_virtual != NULL)) {
138 desc_virtual->ppn = page_to_pfn(page); 138 desc_virtual->ppn = page_to_pfn(page);
139 kunmap_atomic(page_virtual, KM_USER0); 139 kunmap_atomic(page_virtual);
140 } 140 }
141 141
142 page_virtual = kmap_atomic(page, KM_USER0); 142 page_virtual = kmap_atomic(page);
143 desc_virtual = page_virtual - 1; 143 desc_virtual = page_virtual - 1;
144 prev_pfn = ~(0UL); 144 prev_pfn = ~(0UL);
145 145
@@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
169 } 169 }
170 170
171 if (likely(page_virtual != NULL)) 171 if (likely(page_virtual != NULL))
172 kunmap_atomic(page_virtual, KM_USER0); 172 kunmap_atomic(page_virtual);
173 173
174 return 0; 174 return 0;
175out_err: 175out_err:
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 5bc2839ebcfd..729428edeba2 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -253,7 +253,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
253 if (page_is_high) 253 if (page_is_high)
254 local_irq_save(flags); 254 local_irq_save(flags);
255 255
256 buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; 256 buf = kmap_atomic(page) + offset;
257 257
258 cmd->nleft -= nr_bytes; 258 cmd->nleft -= nr_bytes;
259 cmd->cursg_ofs += nr_bytes; 259 cmd->cursg_ofs += nr_bytes;
@@ -269,7 +269,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
269 else 269 else
270 hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes); 270 hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
271 271
272 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 272 kunmap_atomic(buf);
273 273
274 if (page_is_high) 274 if (page_is_high)
275 local_irq_restore(flags); 275 local_irq_restore(flags);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index fb88d6896b67..2033a928d34d 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -73,11 +73,11 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
73 73
74 p = mem; 74 p = mem;
75 for_each_sg(sgl, sg, data->size, i) { 75 for_each_sg(sgl, sg, data->size, i) {
76 from = kmap_atomic(sg_page(sg), KM_USER0); 76 from = kmap_atomic(sg_page(sg));
77 memcpy(p, 77 memcpy(p,
78 from + sg->offset, 78 from + sg->offset,
79 sg->length); 79 sg->length);
80 kunmap_atomic(from, KM_USER0); 80 kunmap_atomic(from);
81 p += sg->length; 81 p += sg->length;
82 } 82 }
83 } 83 }
@@ -133,11 +133,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
133 133
134 p = mem; 134 p = mem;
135 for_each_sg(sgl, sg, sg_size, i) { 135 for_each_sg(sgl, sg, sg_size, i) {
136 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 136 to = kmap_atomic(sg_page(sg));
137 memcpy(to + sg->offset, 137 memcpy(to + sg->offset,
138 p, 138 p,
139 sg->length); 139 sg->length);
140 kunmap_atomic(to, KM_SOFTIRQ0); 140 kunmap_atomic(to);
141 p += sg->length; 141 p += sg->length;
142 } 142 }
143 } 143 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index cdf36b1e9aa6..045e086144ad 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -457,7 +457,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
457 return; 457 return;
458 } 458 }
459 spin_unlock_irqrestore(&bitmap->lock, flags); 459 spin_unlock_irqrestore(&bitmap->lock, flags);
460 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 460 sb = kmap_atomic(bitmap->sb_page);
461 sb->events = cpu_to_le64(bitmap->mddev->events); 461 sb->events = cpu_to_le64(bitmap->mddev->events);
462 if (bitmap->mddev->events < bitmap->events_cleared) 462 if (bitmap->mddev->events < bitmap->events_cleared)
463 /* rocking back to read-only */ 463 /* rocking back to read-only */
@@ -467,7 +467,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
467 /* Just in case these have been changed via sysfs: */ 467 /* Just in case these have been changed via sysfs: */
468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); 469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
470 kunmap_atomic(sb, KM_USER0); 470 kunmap_atomic(sb);
471 write_page(bitmap, bitmap->sb_page, 1); 471 write_page(bitmap, bitmap->sb_page, 1);
472} 472}
473 473
@@ -478,7 +478,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
478 478
479 if (!bitmap || !bitmap->sb_page) 479 if (!bitmap || !bitmap->sb_page)
480 return; 480 return;
481 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 481 sb = kmap_atomic(bitmap->sb_page);
482 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); 482 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
483 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); 483 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
484 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); 484 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -497,7 +497,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
497 printk(KERN_DEBUG " sync size: %llu KB\n", 497 printk(KERN_DEBUG " sync size: %llu KB\n",
498 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 498 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
499 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); 499 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
500 kunmap_atomic(sb, KM_USER0); 500 kunmap_atomic(sb);
501} 501}
502 502
503/* 503/*
@@ -525,7 +525,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
525 } 525 }
526 bitmap->sb_page->index = 0; 526 bitmap->sb_page->index = 0;
527 527
528 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 528 sb = kmap_atomic(bitmap->sb_page);
529 529
530 sb->magic = cpu_to_le32(BITMAP_MAGIC); 530 sb->magic = cpu_to_le32(BITMAP_MAGIC);
531 sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 531 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -533,7 +533,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
533 chunksize = bitmap->mddev->bitmap_info.chunksize; 533 chunksize = bitmap->mddev->bitmap_info.chunksize;
534 BUG_ON(!chunksize); 534 BUG_ON(!chunksize);
535 if (!is_power_of_2(chunksize)) { 535 if (!is_power_of_2(chunksize)) {
536 kunmap_atomic(sb, KM_USER0); 536 kunmap_atomic(sb);
537 printk(KERN_ERR "bitmap chunksize not a power of 2\n"); 537 printk(KERN_ERR "bitmap chunksize not a power of 2\n");
538 return -EINVAL; 538 return -EINVAL;
539 } 539 }
@@ -571,7 +571,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
571 bitmap->flags |= BITMAP_HOSTENDIAN; 571 bitmap->flags |= BITMAP_HOSTENDIAN;
572 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); 572 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
573 573
574 kunmap_atomic(sb, KM_USER0); 574 kunmap_atomic(sb);
575 575
576 return 0; 576 return 0;
577} 577}
@@ -603,7 +603,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
603 return err; 603 return err;
604 } 604 }
605 605
606 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 606 sb = kmap_atomic(bitmap->sb_page);
607 607
608 chunksize = le32_to_cpu(sb->chunksize); 608 chunksize = le32_to_cpu(sb->chunksize);
609 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 609 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -664,7 +664,7 @@ success:
664 bitmap->events_cleared = bitmap->mddev->events; 664 bitmap->events_cleared = bitmap->mddev->events;
665 err = 0; 665 err = 0;
666out: 666out:
667 kunmap_atomic(sb, KM_USER0); 667 kunmap_atomic(sb);
668 if (err) 668 if (err)
669 bitmap_print_sb(bitmap); 669 bitmap_print_sb(bitmap);
670 return err; 670 return err;
@@ -689,7 +689,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
689 return 0; 689 return 0;
690 } 690 }
691 spin_unlock_irqrestore(&bitmap->lock, flags); 691 spin_unlock_irqrestore(&bitmap->lock, flags);
692 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 692 sb = kmap_atomic(bitmap->sb_page);
693 old = le32_to_cpu(sb->state) & bits; 693 old = le32_to_cpu(sb->state) & bits;
694 switch (op) { 694 switch (op) {
695 case MASK_SET: 695 case MASK_SET:
@@ -703,7 +703,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
703 default: 703 default:
704 BUG(); 704 BUG();
705 } 705 }
706 kunmap_atomic(sb, KM_USER0); 706 kunmap_atomic(sb);
707 return old; 707 return old;
708} 708}
709 709
@@ -881,12 +881,12 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
881 bit = file_page_offset(bitmap, chunk); 881 bit = file_page_offset(bitmap, chunk);
882 882
883 /* set the bit */ 883 /* set the bit */
884 kaddr = kmap_atomic(page, KM_USER0); 884 kaddr = kmap_atomic(page);
885 if (bitmap->flags & BITMAP_HOSTENDIAN) 885 if (bitmap->flags & BITMAP_HOSTENDIAN)
886 set_bit(bit, kaddr); 886 set_bit(bit, kaddr);
887 else 887 else
888 __set_bit_le(bit, kaddr); 888 __set_bit_le(bit, kaddr);
889 kunmap_atomic(kaddr, KM_USER0); 889 kunmap_atomic(kaddr);
890 pr_debug("set file bit %lu page %lu\n", bit, page->index); 890 pr_debug("set file bit %lu page %lu\n", bit, page->index);
891 /* record page number so it gets flushed to disk when unplug occurs */ 891 /* record page number so it gets flushed to disk when unplug occurs */
892 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 892 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -1050,10 +1050,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1050 * if bitmap is out of date, dirty the 1050 * if bitmap is out of date, dirty the
1051 * whole page and write it out 1051 * whole page and write it out
1052 */ 1052 */
1053 paddr = kmap_atomic(page, KM_USER0); 1053 paddr = kmap_atomic(page);
1054 memset(paddr + offset, 0xff, 1054 memset(paddr + offset, 0xff,
1055 PAGE_SIZE - offset); 1055 PAGE_SIZE - offset);
1056 kunmap_atomic(paddr, KM_USER0); 1056 kunmap_atomic(paddr);
1057 write_page(bitmap, page, 1); 1057 write_page(bitmap, page, 1);
1058 1058
1059 ret = -EIO; 1059 ret = -EIO;
@@ -1061,12 +1061,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1061 goto err; 1061 goto err;
1062 } 1062 }
1063 } 1063 }
1064 paddr = kmap_atomic(page, KM_USER0); 1064 paddr = kmap_atomic(page);
1065 if (bitmap->flags & BITMAP_HOSTENDIAN) 1065 if (bitmap->flags & BITMAP_HOSTENDIAN)
1066 b = test_bit(bit, paddr); 1066 b = test_bit(bit, paddr);
1067 else 1067 else
1068 b = test_bit_le(bit, paddr); 1068 b = test_bit_le(bit, paddr);
1069 kunmap_atomic(paddr, KM_USER0); 1069 kunmap_atomic(paddr);
1070 if (b) { 1070 if (b) {
1071 /* if the disk bit is set, set the memory bit */ 1071 /* if the disk bit is set, set the memory bit */
1072 int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) 1072 int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
@@ -1209,10 +1209,10 @@ void bitmap_daemon_work(struct mddev *mddev)
1209 mddev->bitmap_info.external == 0) { 1209 mddev->bitmap_info.external == 0) {
1210 bitmap_super_t *sb; 1210 bitmap_super_t *sb;
1211 bitmap->need_sync = 0; 1211 bitmap->need_sync = 0;
1212 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 1212 sb = kmap_atomic(bitmap->sb_page);
1213 sb->events_cleared = 1213 sb->events_cleared =
1214 cpu_to_le64(bitmap->events_cleared); 1214 cpu_to_le64(bitmap->events_cleared);
1215 kunmap_atomic(sb, KM_USER0); 1215 kunmap_atomic(sb);
1216 write_page(bitmap, bitmap->sb_page, 1); 1216 write_page(bitmap, bitmap->sb_page, 1);
1217 } 1217 }
1218 spin_lock_irqsave(&bitmap->lock, flags); 1218 spin_lock_irqsave(&bitmap->lock, flags);
@@ -1235,7 +1235,7 @@ void bitmap_daemon_work(struct mddev *mddev)
1235 -1); 1235 -1);
1236 1236
1237 /* clear the bit */ 1237 /* clear the bit */
1238 paddr = kmap_atomic(page, KM_USER0); 1238 paddr = kmap_atomic(page);
1239 if (bitmap->flags & BITMAP_HOSTENDIAN) 1239 if (bitmap->flags & BITMAP_HOSTENDIAN)
1240 clear_bit(file_page_offset(bitmap, j), 1240 clear_bit(file_page_offset(bitmap, j),
1241 paddr); 1241 paddr);
@@ -1244,7 +1244,7 @@ void bitmap_daemon_work(struct mddev *mddev)
1244 file_page_offset(bitmap, 1244 file_page_offset(bitmap,
1245 j), 1245 j),
1246 paddr); 1246 paddr);
1247 kunmap_atomic(paddr, KM_USER0); 1247 kunmap_atomic(paddr);
1248 } else if (*bmc <= 2) { 1248 } else if (*bmc <= 2) {
1249 *bmc = 1; /* maybe clear the bit next time */ 1249 *bmc = 1; /* maybe clear the bit next time */
1250 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); 1250 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8c2a000cf3f5..db6b51639cee 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -590,9 +590,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
590 int r = 0; 590 int r = 0;
591 591
592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
593 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); 593 src = kmap_atomic(sg_page(&dmreq->sg_in));
594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
595 kunmap_atomic(src, KM_USER0); 595 kunmap_atomic(src);
596 } else 596 } else
597 memset(iv, 0, cc->iv_size); 597 memset(iv, 0, cc->iv_size);
598 598
@@ -608,14 +608,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
609 return 0; 609 return 0;
610 610
611 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); 611 dst = kmap_atomic(sg_page(&dmreq->sg_out));
612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
613 613
614 /* Tweak the first block of plaintext sector */ 614 /* Tweak the first block of plaintext sector */
615 if (!r) 615 if (!r)
616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
617 617
618 kunmap_atomic(dst, KM_USER0); 618 kunmap_atomic(dst);
619 return r; 619 return r;
620} 620}
621 621
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
index 69cc8166b20b..7338cb2d0a38 100644
--- a/drivers/media/video/ivtv/ivtv-udma.c
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -57,9 +57,9 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
57 if (dma->bouncemap[map_offset] == NULL) 57 if (dma->bouncemap[map_offset] == NULL)
58 return -1; 58 return -1;
59 local_irq_save(flags); 59 local_irq_save(flags);
60 src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset; 60 src = kmap_atomic(dma->map[map_offset]) + offset;
61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); 61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62 kunmap_atomic(src, KM_BOUNCE_READ); 62 kunmap_atomic(src);
63 local_irq_restore(flags); 63 local_irq_restore(flags);
64 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset); 64 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
65 } 65 }
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 5319e9b65847..c37d3756d8d2 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -325,7 +325,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
325 p_cnt = min(p_cnt, length); 325 p_cnt = min(p_cnt, length);
326 326
327 local_irq_save(flags); 327 local_irq_save(flags);
328 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; 328 buf = kmap_atomic(pg) + p_off;
329 } else { 329 } else {
330 buf = host->req->data + host->block_pos; 330 buf = host->req->data + host->block_pos;
331 p_cnt = host->req->data_len - host->block_pos; 331 p_cnt = host->req->data_len - host->block_pos;
@@ -341,7 +341,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
341 : jmb38x_ms_read_reg_data(host, buf, p_cnt); 341 : jmb38x_ms_read_reg_data(host, buf, p_cnt);
342 342
343 if (host->req->long_data) { 343 if (host->req->long_data) {
344 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); 344 kunmap_atomic(buf - p_off);
345 local_irq_restore(flags); 345 local_irq_restore(flags);
346 } 346 }
347 347
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 6902b83eb1b4..7bafa72f8f57 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -210,7 +210,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
210 p_cnt = min(p_cnt, length); 210 p_cnt = min(p_cnt, length);
211 211
212 local_irq_save(flags); 212 local_irq_save(flags);
213 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; 213 buf = kmap_atomic(pg) + p_off;
214 } else { 214 } else {
215 buf = host->req->data + host->block_pos; 215 buf = host->req->data + host->block_pos;
216 p_cnt = host->req->data_len - host->block_pos; 216 p_cnt = host->req->data_len - host->block_pos;
@@ -221,7 +221,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
221 : tifm_ms_read_data(host, buf, p_cnt); 221 : tifm_ms_read_data(host, buf, p_cnt);
222 222
223 if (host->req->long_data) { 223 if (host->req->long_data) {
224 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); 224 kunmap_atomic(buf - p_off);
225 local_irq_restore(flags); 225 local_irq_restore(flags);
226 } 226 }
227 227
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 6419a88a69e6..0e9aec8f6917 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4101,11 +4101,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4101 if (length <= copybreak && 4101 if (length <= copybreak &&
4102 skb_tailroom(skb) >= length) { 4102 skb_tailroom(skb) >= length) {
4103 u8 *vaddr; 4103 u8 *vaddr;
4104 vaddr = kmap_atomic(buffer_info->page, 4104 vaddr = kmap_atomic(buffer_info->page);
4105 KM_SKB_DATA_SOFTIRQ);
4106 memcpy(skb_tail_pointer(skb), vaddr, length); 4105 memcpy(skb_tail_pointer(skb), vaddr, length);
4107 kunmap_atomic(vaddr, 4106 kunmap_atomic(vaddr);
4108 KM_SKB_DATA_SOFTIRQ);
4109 /* re-use the page, so don't erase 4107 /* re-use the page, so don't erase
4110 * buffer_info->page */ 4108 * buffer_info->page */
4111 skb_put(skb, length); 4109 skb_put(skb, length);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a9a4ea2c616e..7152eb11b7b9 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1301,10 +1301,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1301 ps_page->dma, 1301 ps_page->dma,
1302 PAGE_SIZE, 1302 PAGE_SIZE,
1303 DMA_FROM_DEVICE); 1303 DMA_FROM_DEVICE);
1304 vaddr = kmap_atomic(ps_page->page, 1304 vaddr = kmap_atomic(ps_page->page);
1305 KM_SKB_DATA_SOFTIRQ);
1306 memcpy(skb_tail_pointer(skb), vaddr, l1); 1305 memcpy(skb_tail_pointer(skb), vaddr, l1);
1307 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1306 kunmap_atomic(vaddr);
1308 dma_sync_single_for_device(&pdev->dev, 1307 dma_sync_single_for_device(&pdev->dev,
1309 ps_page->dma, 1308 ps_page->dma,
1310 PAGE_SIZE, 1309 PAGE_SIZE,
@@ -1503,12 +1502,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1503 if (length <= copybreak && 1502 if (length <= copybreak &&
1504 skb_tailroom(skb) >= length) { 1503 skb_tailroom(skb) >= length) {
1505 u8 *vaddr; 1504 u8 *vaddr;
1506 vaddr = kmap_atomic(buffer_info->page, 1505 vaddr = kmap_atomic(buffer_info->page);
1507 KM_SKB_DATA_SOFTIRQ);
1508 memcpy(skb_tail_pointer(skb), vaddr, 1506 memcpy(skb_tail_pointer(skb), vaddr,
1509 length); 1507 length);
1510 kunmap_atomic(vaddr, 1508 kunmap_atomic(vaddr);
1511 KM_SKB_DATA_SOFTIRQ);
1512 /* re-use the page, so don't erase 1509 /* re-use the page, so don't erase
1513 * buffer_info->page */ 1510 * buffer_info->page */
1514 skb_put(skb, length); 1511 skb_put(skb, length);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b36edbd625dd..3c2295560732 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -104,8 +104,8 @@
104#include <asm/byteorder.h> 104#include <asm/byteorder.h>
105#include <asm/uaccess.h> 105#include <asm/uaccess.h>
106 106
107#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define cas_page_map(x) kmap_atomic((x))
108#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 108#define cas_page_unmap(x) kunmap_atomic((x))
109#define CAS_NCPUS num_online_cpus() 109#define CAS_NCPUS num_online_cpus()
110 110
111#define cas_skb_release(x) netif_rx(x) 111#define cas_skb_release(x) netif_rx(x)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f980600f78a8..2fe9e90e53d9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1736,7 +1736,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1736 (uint32_t ) cmd->cmnd[8]; 1736 (uint32_t ) cmd->cmnd[8];
1737 /* 4 bytes: Areca io control code */ 1737 /* 4 bytes: Areca io control code */
1738 sg = scsi_sglist(cmd); 1738 sg = scsi_sglist(cmd);
1739 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1739 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1740 if (scsi_sg_count(cmd) > 1) { 1740 if (scsi_sg_count(cmd) > 1) {
1741 retvalue = ARCMSR_MESSAGE_FAIL; 1741 retvalue = ARCMSR_MESSAGE_FAIL;
1742 goto message_out; 1742 goto message_out;
@@ -1985,7 +1985,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1985 } 1985 }
1986 message_out: 1986 message_out:
1987 sg = scsi_sglist(cmd); 1987 sg = scsi_sglist(cmd);
1988 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1988 kunmap_atomic(buffer - sg->offset);
1989 return retvalue; 1989 return retvalue;
1990} 1990}
1991 1991
@@ -2035,11 +2035,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2035 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2035 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2036 2036
2037 sg = scsi_sglist(cmd); 2037 sg = scsi_sglist(cmd);
2038 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 2038 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2039 2039
2040 memcpy(buffer, inqdata, sizeof(inqdata)); 2040 memcpy(buffer, inqdata, sizeof(inqdata));
2041 sg = scsi_sglist(cmd); 2041 sg = scsi_sglist(cmd);
2042 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 2042 kunmap_atomic(buffer - sg->offset);
2043 2043
2044 cmd->scsi_done(cmd); 2044 cmd->scsi_done(cmd);
2045 } 2045 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 8c6156a10d90..a9af42e83632 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
322 return -ENOMEM; 322 return -ENOMEM;
323 } 323 }
324 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 324 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
325 cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) 325 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
326 + frag->page_offset;
327 } else { 326 } else {
328 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 327 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
329 } 328 }
@@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
332 cp->fcoe_eof = eof; 331 cp->fcoe_eof = eof;
333 cp->fcoe_crc32 = cpu_to_le32(~crc); 332 cp->fcoe_crc32 = cpu_to_le32(~crc);
334 if (skb_is_nonlinear(skb)) { 333 if (skb_is_nonlinear(skb)) {
335 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 334 kunmap_atomic(cp);
336 cp = NULL; 335 cp = NULL;
337 } 336 }
338 337
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d3ff9cd40234..89afd6d21d89 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1956 1956
1957 /* data fits in the skb's headroom */ 1957 /* data fits in the skb's headroom */
1958 for (i = 0; i < tdata->nr_frags; i++, frag++) { 1958 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1959 char *src = kmap_atomic(frag->page, 1959 char *src = kmap_atomic(frag->page);
1960 KM_SOFTIRQ0);
1961 1960
1962 memcpy(dst, src+frag->offset, frag->size); 1961 memcpy(dst, src+frag->offset, frag->size);
1963 dst += frag->size; 1962 dst += frag->size;
1964 kunmap_atomic(src, KM_SOFTIRQ0); 1963 kunmap_atomic(src);
1965 } 1964 }
1966 if (padlen) { 1965 if (padlen) {
1967 memset(dst, 0, padlen); 1966 memset(dst, 0, padlen);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c164890224d2..cc75cbea936b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1515,7 +1515,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1515 return -ENOMEM; 1515 return -ENOMEM;
1516 } 1516 }
1517 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1517 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1518 cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) 1518 cp = kmap_atomic(skb_frag_page(frag))
1519 + frag->page_offset; 1519 + frag->page_offset;
1520 } else { 1520 } else {
1521 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 1521 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -1526,7 +1526,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1526 cp->fcoe_crc32 = cpu_to_le32(~crc); 1526 cp->fcoe_crc32 = cpu_to_le32(~crc);
1527 1527
1528 if (skb_is_nonlinear(skb)) { 1528 if (skb_is_nonlinear(skb)) {
1529 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 1529 kunmap_atomic(cp);
1530 cp = NULL; 1530 cp = NULL;
1531 } 1531 }
1532 1532
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index bd97b2273f20..4d119a326d3b 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
210 while (len > 0) { 210 while (len > 0) {
211 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); 211 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
212 data = kmap_atomic( 212 data = kmap_atomic(
213 skb_frag_page(frag) + (off >> PAGE_SHIFT), 213 skb_frag_page(frag) + (off >> PAGE_SHIFT));
214 KM_SKB_DATA_SOFTIRQ);
215 crc = crc32(crc, data + (off & ~PAGE_MASK), clen); 214 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
216 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); 215 kunmap_atomic(data);
217 off += clen; 216 off += clen;
218 len -= clen; 217 len -= clen;
219 } 218 }
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 3242bcabad97..d42ec921de46 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2310,10 +2310,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2310 return; 2310 return;
2311 } 2311 }
2312 local_irq_save(flags); 2312 local_irq_save(flags);
2313 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; 2313 address = kmap_atomic(sg_page(sl)) + sl->offset;
2314 memcpy(address, buffer, cpnow); 2314 memcpy(address, buffer, cpnow);
2315 flush_dcache_page(sg_page(sl)); 2315 flush_dcache_page(sg_page(sl));
2316 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2316 kunmap_atomic(address);
2317 local_irq_restore(flags); 2317 local_irq_restore(flags);
2318 if (cpsum == cpcount) 2318 if (cpsum == cpcount)
2319 break; 2319 break;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index d77891e5683b..b6d7a5c2fc94 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1511 /* kmap_atomic() ensures addressability of the user buffer.*/ 1511 /* kmap_atomic() ensures addressability of the user buffer.*/
1512 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1512 /* local_irq_save() protects the KM_IRQ0 address slot. */
1513 local_irq_save(flags); 1513 local_irq_save(flags);
1514 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1514 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1515 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1515 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1516 buffer[2] == 'P' && buffer[3] == 'P') { 1516 buffer[2] == 'P' && buffer[3] == 'P') {
1517 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1517 kunmap_atomic(buffer - sg->offset);
1518 local_irq_restore(flags); 1518 local_irq_restore(flags);
1519 return 1; 1519 return 1;
1520 } 1520 }
1521 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1521 kunmap_atomic(buffer - sg->offset);
1522 local_irq_restore(flags); 1522 local_irq_restore(flags);
1523 } 1523 }
1524 return 0; 1524 return 0;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 192cb48d849a..ee0dc05c6269 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1304,9 +1304,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1304 struct page *page = sg_page(sg); 1304 struct page *page = sg_page(sg);
1305 1305
1306 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1306 copy_len = min_t(int, total_len, sg_dma_len(sg));
1307 kaddr = kmap_atomic(page, KM_IRQ0); 1307 kaddr = kmap_atomic(page);
1308 memcpy(kaddr + sg->offset, src_addr, copy_len); 1308 memcpy(kaddr + sg->offset, src_addr, copy_len);
1309 kunmap_atomic(kaddr, KM_IRQ0); 1309 kunmap_atomic(kaddr);
1310 total_len -= copy_len; 1310 total_len -= copy_len;
1311 src_addr += copy_len; 1311 src_addr += copy_len;
1312 sg = sg_next(sg); 1312 sg = sg_next(sg);
@@ -1654,7 +1654,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1654 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1654 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1655 frame_index, 1655 frame_index,
1656 &frame_header); 1656 &frame_header);
1657 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 1657 kaddr = kmap_atomic(sg_page(sg));
1658 rsp = kaddr + sg->offset; 1658 rsp = kaddr + sg->offset;
1659 sci_swab32_cpy(rsp, frame_header, 1); 1659 sci_swab32_cpy(rsp, frame_header, 1);
1660 1660
@@ -1691,7 +1691,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1691 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1691 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 } 1693 }
1694 kunmap_atomic(kaddr, KM_IRQ0); 1694 kunmap_atomic(kaddr);
1695 1695
1696 sci_controller_release_frame(ihost, frame_index); 1696 sci_controller_release_frame(ihost, frame_index);
1697 1697
@@ -3023,10 +3023,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3023 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 3023 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3024 3024
3025 /* need to swab it back in case the command buffer is re-used */ 3025 /* need to swab it back in case the command buffer is re-used */
3026 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3026 kaddr = kmap_atomic(sg_page(sg));
3027 smp_req = kaddr + sg->offset; 3027 smp_req = kaddr + sg->offset;
3028 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3028 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3029 kunmap_atomic(kaddr, KM_IRQ0); 3029 kunmap_atomic(kaddr);
3030 break; 3030 break;
3031 } 3031 }
3032 default: 3032 default:
@@ -3311,7 +3311,7 @@ sci_io_request_construct_smp(struct device *dev,
3311 u8 req_len; 3311 u8 req_len;
3312 u32 cmd; 3312 u32 cmd;
3313 3313
3314 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3314 kaddr = kmap_atomic(sg_page(sg));
3315 smp_req = kaddr + sg->offset; 3315 smp_req = kaddr + sg->offset;
3316 /* 3316 /*
3317 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3317 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
@@ -3337,7 +3337,7 @@ sci_io_request_construct_smp(struct device *dev,
3337 req_len = smp_req->req_len; 3337 req_len = smp_req->req_len;
3338 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3338 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3339 cmd = *(u32 *) smp_req; 3339 cmd = *(u32 *) smp_req;
3340 kunmap_atomic(kaddr, KM_IRQ0); 3340 kunmap_atomic(kaddr);
3341 3341
3342 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3342 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3343 return SCI_FAILURE; 3343 return SCI_FAILURE;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index f607314810ac..b577c907b318 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
485 485
486 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { 486 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
487 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 487 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
488 &offset, KM_SOFTIRQ0, NULL); 488 &offset, NULL);
489 } else { 489 } else {
490 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 490 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
491 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 491 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
492 &offset, KM_SOFTIRQ0, &crc); 492 &offset, &crc);
493 buf = fc_frame_payload_get(fp, 0); 493 buf = fc_frame_payload_get(fp, 0);
494 if (len % 4) 494 if (len % 4)
495 crc = crc32(crc, buf + len, 4 - (len % 4)); 495 crc = crc32(crc, buf + len, 4 - (len % 4));
@@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
650 * The scatterlist item may be bigger than PAGE_SIZE, 650 * The scatterlist item may be bigger than PAGE_SIZE,
651 * but we must not cross pages inside the kmap. 651 * but we must not cross pages inside the kmap.
652 */ 652 */
653 page_addr = kmap_atomic(page, KM_SOFTIRQ0); 653 page_addr = kmap_atomic(page);
654 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 654 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
655 sg_bytes); 655 sg_bytes);
656 kunmap_atomic(page_addr, KM_SOFTIRQ0); 656 kunmap_atomic(page_addr);
657 data += sg_bytes; 657 data += sg_bytes;
658 } 658 }
659 offset += sg_bytes; 659 offset += sg_bytes;
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 1bf9841ef154..8d65a51a7598 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -105,14 +105,13 @@ module_exit(libfc_exit);
105 * @sg: pointer to the pointer of the SG list. 105 * @sg: pointer to the pointer of the SG list.
106 * @nents: pointer to the remaining number of entries in the SG list. 106 * @nents: pointer to the remaining number of entries in the SG list.
107 * @offset: pointer to the current offset in the SG list. 107 * @offset: pointer to the current offset in the SG list.
108 * @km_type: dedicated page table slot type for kmap_atomic.
109 * @crc: pointer to the 32-bit crc value. 108 * @crc: pointer to the 32-bit crc value.
110 * If crc is NULL, CRC is not calculated. 109 * If crc is NULL, CRC is not calculated.
111 */ 110 */
112u32 fc_copy_buffer_to_sglist(void *buf, size_t len, 111u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
113 struct scatterlist *sg, 112 struct scatterlist *sg,
114 u32 *nents, size_t *offset, 113 u32 *nents, size_t *offset,
115 enum km_type km_type, u32 *crc) 114 u32 *crc)
116{ 115{
117 size_t remaining = len; 116 size_t remaining = len;
118 u32 copy_len = 0; 117 u32 copy_len = 0;
@@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
142 off = *offset + sg->offset; 141 off = *offset + sg->offset;
143 sg_bytes = min(sg_bytes, 142 sg_bytes = min(sg_bytes,
144 (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); 143 (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
145 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 144 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
146 km_type);
147 if (crc) 145 if (crc)
148 *crc = crc32(*crc, buf, sg_bytes); 146 *crc = crc32(*crc, buf, sg_bytes);
149 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); 147 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
150 kunmap_atomic(page_addr, km_type); 148 kunmap_atomic(page_addr);
151 buf += sg_bytes; 149 buf += sg_bytes;
152 *offset += sg_bytes; 150 *offset += sg_bytes;
153 remaining -= sg_bytes; 151 remaining -= sg_bytes;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c7d071289af5..c2830cc66d6a 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
134u32 fc_copy_buffer_to_sglist(void *buf, size_t len, 134u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
135 struct scatterlist *sg, 135 struct scatterlist *sg,
136 u32 *nents, size_t *offset, 136 u32 *nents, size_t *offset,
137 enum km_type km_type, u32 *crc); 137 u32 *crc);
138 138
139#endif /* _FC_LIBFC_H_ */ 139#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 83750ebb527f..c1a808cc5920 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1698,7 +1698,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1698 1698
1699 job->reply->reply_payload_rcv_len += 1699 job->reply->reply_payload_rcv_len +=
1700 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1700 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1701 &info->offset, KM_BIO_SRC_IRQ, NULL); 1701 &info->offset, NULL);
1702 1702
1703 if (fr_eof(fp) == FC_EOF_T && 1703 if (fr_eof(fp) == FC_EOF_T &&
1704 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1704 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5715a3d0a3d3..7f0465b9623e 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
135 135
136 if (recv) { 136 if (recv) {
137 segment->atomic_mapped = true; 137 segment->atomic_mapped = true;
138 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 138 segment->sg_mapped = kmap_atomic(sg_page(sg));
139 } else { 139 } else {
140 segment->atomic_mapped = false; 140 segment->atomic_mapped = false;
141 /* the xmit path can sleep with the page mapped so use kmap */ 141 /* the xmit path can sleep with the page mapped so use kmap */
@@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
149{ 149{
150 if (segment->sg_mapped) { 150 if (segment->sg_mapped) {
151 if (segment->atomic_mapped) 151 if (segment->atomic_mapped)
152 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); 152 kunmap_atomic(segment->sg_mapped);
153 else 153 else
154 kunmap(sg_page(segment->sg)); 154 kunmap(sg_page(segment->sg));
155 segment->sg_mapped = NULL; 155 segment->sg_mapped = NULL;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index bb8f49269a68..3814d3eed401 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -246,9 +246,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
246 } 246 }
247 247
248 local_irq_disable(); 248 local_irq_disable();
249 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 249 buf = kmap_atomic(bio_page(req->bio));
250 memcpy(req_data, buf, blk_rq_bytes(req)); 250 memcpy(req_data, buf, blk_rq_bytes(req));
251 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 251 kunmap_atomic(buf - bio_offset(req->bio));
252 local_irq_enable(); 252 local_irq_enable();
253 253
254 if (req_data[0] != SMP_REQUEST) 254 if (req_data[0] != SMP_REQUEST)
@@ -361,10 +361,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
361 } 361 }
362 362
363 local_irq_disable(); 363 local_irq_disable();
364 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 364 buf = kmap_atomic(bio_page(rsp->bio));
365 memcpy(buf, resp_data, blk_rq_bytes(rsp)); 365 memcpy(buf, resp_data, blk_rq_bytes(rsp));
366 flush_kernel_dcache_page(bio_page(rsp->bio)); 366 flush_kernel_dcache_page(bio_page(rsp->bio));
367 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 367 kunmap_atomic(buf - bio_offset(rsp->bio));
368 local_irq_enable(); 368 local_irq_enable();
369 369
370 out: 370 out:
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 15eefa1d61fd..4d39a9ffc081 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
670 struct scatterlist *sg; 670 struct scatterlist *sg;
671 671
672 sg = scsi_sglist(cmd); 672 sg = scsi_sglist(cmd);
673 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 673 buf = kmap_atomic(sg_page(sg)) + sg->offset;
674 674
675 memset(buf, 0, cmd->cmnd[4]); 675 memset(buf, 0, cmd->cmnd[4]);
676 kunmap_atomic(buf - sg->offset, KM_IRQ0); 676 kunmap_atomic(buf - sg->offset);
677 677
678 cmd->result = (DID_OK << 16); 678 cmd->result = (DID_OK << 16);
679 cmd->scsi_done(cmd); 679 cmd->scsi_done(cmd);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a4884a57cf79..01ab9c4d3464 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1885,11 +1885,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1885 case SAS_PROTOCOL_SMP: { 1885 case SAS_PROTOCOL_SMP: {
1886 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1886 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1887 tstat->stat = SAM_STAT_GOOD; 1887 tstat->stat = SAM_STAT_GOOD;
1888 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); 1888 to = kmap_atomic(sg_page(sg_resp));
1889 memcpy(to + sg_resp->offset, 1889 memcpy(to + sg_resp->offset,
1890 slot->response + sizeof(struct mvs_err_info), 1890 slot->response + sizeof(struct mvs_err_info),
1891 sg_dma_len(sg_resp)); 1891 sg_dma_len(sg_resp));
1892 kunmap_atomic(to, KM_IRQ0); 1892 kunmap_atomic(to);
1893 break; 1893 break;
1894 } 1894 }
1895 1895
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6888b2ca5bfc..68da6c092f65 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1778,7 +1778,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { 1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1779 int len = min(psgl->length, resid); 1779 int len = min(psgl->length, resid);
1780 1780
1781 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset; 1781 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1782 memcpy(paddr, dif_storep + dif_offset(sector), len); 1782 memcpy(paddr, dif_storep + dif_offset(sector), len);
1783 1783
1784 sector += len >> 3; 1784 sector += len >> 3;
@@ -1788,7 +1788,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1788 sector = do_div(tmp_sec, sdebug_store_sectors); 1788 sector = do_div(tmp_sec, sdebug_store_sectors);
1789 } 1789 }
1790 resid -= len; 1790 resid -= len;
1791 kunmap_atomic(paddr, KM_IRQ0); 1791 kunmap_atomic(paddr);
1792 } 1792 }
1793 1793
1794 dix_reads++; 1794 dix_reads++;
@@ -1881,12 +1881,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1881 BUG_ON(scsi_sg_count(SCpnt) == 0); 1881 BUG_ON(scsi_sg_count(SCpnt) == 0);
1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); 1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1883 1883
1884 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; 1884 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1885 ppage_offset = 0; 1885 ppage_offset = 0;
1886 1886
1887 /* For each data page */ 1887 /* For each data page */
1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { 1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1889 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset; 1889 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1890 1890
1891 /* For each sector-sized chunk in data page */ 1891 /* For each sector-sized chunk in data page */
1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) { 1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@@ -1895,10 +1895,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1895 * protection page advance to the next one 1895 * protection page advance to the next one
1896 */ 1896 */
1897 if (ppage_offset >= psgl->length) { 1897 if (ppage_offset >= psgl->length) {
1898 kunmap_atomic(paddr, KM_IRQ1); 1898 kunmap_atomic(paddr);
1899 psgl = sg_next(psgl); 1899 psgl = sg_next(psgl);
1900 BUG_ON(psgl == NULL); 1900 BUG_ON(psgl == NULL);
1901 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) 1901 paddr = kmap_atomic(sg_page(psgl))
1902 + psgl->offset; 1902 + psgl->offset;
1903 ppage_offset = 0; 1903 ppage_offset = 0;
1904 } 1904 }
@@ -1971,10 +1971,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1971 ppage_offset += sizeof(struct sd_dif_tuple); 1971 ppage_offset += sizeof(struct sd_dif_tuple);
1972 } 1972 }
1973 1973
1974 kunmap_atomic(daddr, KM_IRQ0); 1974 kunmap_atomic(daddr);
1975 } 1975 }
1976 1976
1977 kunmap_atomic(paddr, KM_IRQ1); 1977 kunmap_atomic(paddr);
1978 1978
1979 dix_writes++; 1979 dix_writes++;
1980 1980
@@ -1982,8 +1982,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1982 1982
1983out: 1983out:
1984 dif_errors++; 1984 dif_errors++;
1985 kunmap_atomic(daddr, KM_IRQ0); 1985 kunmap_atomic(daddr);
1986 kunmap_atomic(paddr, KM_IRQ1); 1986 kunmap_atomic(paddr);
1987 return ret; 1987 return ret;
1988} 1988}
1989 1989
@@ -2303,7 +2303,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2303 2303
2304 offset = 0; 2304 offset = 0;
2305 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { 2305 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2306 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); 2306 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2307 if (!kaddr) 2307 if (!kaddr)
2308 goto out; 2308 goto out;
2309 2309
@@ -2311,7 +2311,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2311 *(kaddr + sg->offset + j) ^= *(buf + offset + j); 2311 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2312 2312
2313 offset += sg->length; 2313 offset += sg->length;
2314 kunmap_atomic(kaddr, KM_USER0); 2314 kunmap_atomic(kaddr);
2315 } 2315 }
2316 ret = 0; 2316 ret = 0;
2317out: 2317out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2c95dbe9d65..a33b2b66da67 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2567,7 +2567,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2567 if (*len > sg_len) 2567 if (*len > sg_len)
2568 *len = sg_len; 2568 *len = sg_len;
2569 2569
2570 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2570 return kmap_atomic(page);
2571} 2571}
2572EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2572EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2573 2573
@@ -2577,6 +2577,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2577 */ 2577 */
2578void scsi_kunmap_atomic_sg(void *virt) 2578void scsi_kunmap_atomic_sg(void *virt)
2579{ 2579{
2580 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2580 kunmap_atomic(virt);
2581} 2581}
2582EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2582EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index f8fb2d691c0a..e52d5bc42bc4 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
392 virt = bio->bi_integrity->bip_sector & 0xffffffff; 392 virt = bio->bi_integrity->bip_sector & 0xffffffff;
393 393
394 bip_for_each_vec(iv, bio->bi_integrity, i) { 394 bip_for_each_vec(iv, bio->bi_integrity, i) {
395 sdt = kmap_atomic(iv->bv_page, KM_USER0) 395 sdt = kmap_atomic(iv->bv_page)
396 + iv->bv_offset; 396 + iv->bv_offset;
397 397
398 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 398 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@@ -405,7 +405,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
405 phys++; 405 phys++;
406 } 406 }
407 407
408 kunmap_atomic(sdt, KM_USER0); 408 kunmap_atomic(sdt);
409 } 409 }
410 410
411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY); 411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
@@ -414,7 +414,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
414 return 0; 414 return 0;
415 415
416error: 416error:
417 kunmap_atomic(sdt, KM_USER0); 417 kunmap_atomic(sdt);
418 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n", 418 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
419 __func__, virt, phys, be32_to_cpu(sdt->ref_tag), 419 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
420 be16_to_cpu(sdt->app_tag)); 420 be16_to_cpu(sdt->app_tag));
@@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
453 virt = bio->bi_integrity->bip_sector & 0xffffffff; 453 virt = bio->bi_integrity->bip_sector & 0xffffffff;
454 454
455 bip_for_each_vec(iv, bio->bi_integrity, i) { 455 bip_for_each_vec(iv, bio->bi_integrity, i) {
456 sdt = kmap_atomic(iv->bv_page, KM_USER0) 456 sdt = kmap_atomic(iv->bv_page)
457 + iv->bv_offset; 457 + iv->bv_offset;
458 458
459 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 459 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
460 460
461 if (sectors == 0) { 461 if (sectors == 0) {
462 kunmap_atomic(sdt, KM_USER0); 462 kunmap_atomic(sdt);
463 return; 463 return;
464 } 464 }
465 465
@@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
474 sectors--; 474 sectors--;
475 } 475 }
476 476
477 kunmap_atomic(sdt, KM_USER0); 477 kunmap_atomic(sdt);
478 } 478 }
479 } 479 }
480} 480}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 695ffc36e02d..83a1972a1999 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -481,6 +481,19 @@ cleanup:
481 return NULL; 481 return NULL;
482} 482}
483 483
484/* Disgusting wrapper functions */
485static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
486{
487 void *addr = kmap_atomic(sg_page(sgl + idx));
488 return (unsigned long)addr;
489}
490
491static inline void sg_kunmap_atomic(unsigned long addr)
492{
493 kunmap_atomic((void *)addr);
494}
495
496
484/* Assume the original sgl has enough room */ 497/* Assume the original sgl has enough room */
485static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 498static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
486 struct scatterlist *bounce_sgl, 499 struct scatterlist *bounce_sgl,
@@ -499,15 +512,12 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
499 local_irq_save(flags); 512 local_irq_save(flags);
500 513
501 for (i = 0; i < orig_sgl_count; i++) { 514 for (i = 0; i < orig_sgl_count; i++) {
502 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 515 dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
503 KM_IRQ0) + orig_sgl[i].offset;
504 dest = dest_addr; 516 dest = dest_addr;
505 destlen = orig_sgl[i].length; 517 destlen = orig_sgl[i].length;
506 518
507 if (bounce_addr == 0) 519 if (bounce_addr == 0)
508 bounce_addr = 520 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
509 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
510 KM_IRQ0);
511 521
512 while (destlen) { 522 while (destlen) {
513 src = bounce_addr + bounce_sgl[j].offset; 523 src = bounce_addr + bounce_sgl[j].offset;
@@ -523,7 +533,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
523 533
524 if (bounce_sgl[j].offset == bounce_sgl[j].length) { 534 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
525 /* full */ 535 /* full */
526 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 536 sg_kunmap_atomic(bounce_addr);
527 j++; 537 j++;
528 538
529 /* 539 /*
@@ -537,26 +547,21 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
537 /* 547 /*
538 * We are done; cleanup and return. 548 * We are done; cleanup and return.
539 */ 549 */
540 kunmap_atomic((void *)(dest_addr - 550 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
541 orig_sgl[i].offset),
542 KM_IRQ0);
543 local_irq_restore(flags); 551 local_irq_restore(flags);
544 return total_copied; 552 return total_copied;
545 } 553 }
546 554
547 /* if we need to use another bounce buffer */ 555 /* if we need to use another bounce buffer */
548 if (destlen || i != orig_sgl_count - 1) 556 if (destlen || i != orig_sgl_count - 1)
549 bounce_addr = 557 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
550 (unsigned long)kmap_atomic(
551 sg_page((&bounce_sgl[j])), KM_IRQ0);
552 } else if (destlen == 0 && i == orig_sgl_count - 1) { 558 } else if (destlen == 0 && i == orig_sgl_count - 1) {
553 /* unmap the last bounce that is < PAGE_SIZE */ 559 /* unmap the last bounce that is < PAGE_SIZE */
554 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 560 sg_kunmap_atomic(bounce_addr);
555 } 561 }
556 } 562 }
557 563
558 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset), 564 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
559 KM_IRQ0);
560 } 565 }
561 566
562 local_irq_restore(flags); 567 local_irq_restore(flags);
@@ -581,15 +586,12 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
581 local_irq_save(flags); 586 local_irq_save(flags);
582 587
583 for (i = 0; i < orig_sgl_count; i++) { 588 for (i = 0; i < orig_sgl_count; i++) {
584 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 589 src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
585 KM_IRQ0) + orig_sgl[i].offset;
586 src = src_addr; 590 src = src_addr;
587 srclen = orig_sgl[i].length; 591 srclen = orig_sgl[i].length;
588 592
589 if (bounce_addr == 0) 593 if (bounce_addr == 0)
590 bounce_addr = 594 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
591 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
592 KM_IRQ0);
593 595
594 while (srclen) { 596 while (srclen) {
595 /* assume bounce offset always == 0 */ 597 /* assume bounce offset always == 0 */
@@ -606,22 +608,20 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
606 608
607 if (bounce_sgl[j].length == PAGE_SIZE) { 609 if (bounce_sgl[j].length == PAGE_SIZE) {
608 /* full..move to next entry */ 610 /* full..move to next entry */
609 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 611 sg_kunmap_atomic(bounce_addr);
610 j++; 612 j++;
611 613
612 /* if we need to use another bounce buffer */ 614 /* if we need to use another bounce buffer */
613 if (srclen || i != orig_sgl_count - 1) 615 if (srclen || i != orig_sgl_count - 1)
614 bounce_addr = 616 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
615 (unsigned long)kmap_atomic(
616 sg_page((&bounce_sgl[j])), KM_IRQ0);
617 617
618 } else if (srclen == 0 && i == orig_sgl_count - 1) { 618 } else if (srclen == 0 && i == orig_sgl_count - 1) {
619 /* unmap the last bounce that is < PAGE_SIZE */ 619 /* unmap the last bounce that is < PAGE_SIZE */
620 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 620 sg_kunmap_atomic(bounce_addr);
621 } 621 }
622 } 622 }
623 623
624 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0); 624 sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
625 } 625 }
626 626
627 local_irq_restore(flags); 627 local_irq_restore(flags);
diff --git a/drivers/staging/ramster/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d5..93ba8e9407aa 100644
--- a/drivers/staging/ramster/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
56 * This is called from xv_malloc/xv_free path, so it 56 * This is called from xv_malloc/xv_free path, so it
57 * needs to be fast. 57 * needs to be fast.
58 */ 58 */
59static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) 59static void *get_ptr_atomic(struct page *page, u16 offset)
60{ 60{
61 unsigned char *base; 61 unsigned char *base;
62 62
63 base = kmap_atomic(page, type); 63 base = kmap_atomic(page);
64 return base + offset; 64 return base + offset;
65} 65}
66 66
67static void put_ptr_atomic(void *ptr, enum km_type type) 67static void put_ptr_atomic(void *ptr)
68{ 68{
69 kunmap_atomic(ptr, type); 69 kunmap_atomic(ptr);
70} 70}
71 71
72static u32 get_blockprev(struct block_header *block) 72static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
202 202
203 if (block->link.next_page) { 203 if (block->link.next_page) {
204 nextblock = get_ptr_atomic(block->link.next_page, 204 nextblock = get_ptr_atomic(block->link.next_page,
205 block->link.next_offset, KM_USER1); 205 block->link.next_offset);
206 nextblock->link.prev_page = page; 206 nextblock->link.prev_page = page;
207 nextblock->link.prev_offset = offset; 207 nextblock->link.prev_offset = offset;
208 put_ptr_atomic(nextblock, KM_USER1); 208 put_ptr_atomic(nextblock);
209 /* If there was a next page then the free bits are set. */ 209 /* If there was a next page then the free bits are set. */
210 return; 210 return;
211 } 211 }
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
225 225
226 if (block->link.prev_page) { 226 if (block->link.prev_page) {
227 tmpblock = get_ptr_atomic(block->link.prev_page, 227 tmpblock = get_ptr_atomic(block->link.prev_page,
228 block->link.prev_offset, KM_USER1); 228 block->link.prev_offset);
229 tmpblock->link.next_page = block->link.next_page; 229 tmpblock->link.next_page = block->link.next_page;
230 tmpblock->link.next_offset = block->link.next_offset; 230 tmpblock->link.next_offset = block->link.next_offset;
231 put_ptr_atomic(tmpblock, KM_USER1); 231 put_ptr_atomic(tmpblock);
232 } 232 }
233 233
234 if (block->link.next_page) { 234 if (block->link.next_page) {
235 tmpblock = get_ptr_atomic(block->link.next_page, 235 tmpblock = get_ptr_atomic(block->link.next_page,
236 block->link.next_offset, KM_USER1); 236 block->link.next_offset);
237 tmpblock->link.prev_page = block->link.prev_page; 237 tmpblock->link.prev_page = block->link.prev_page;
238 tmpblock->link.prev_offset = block->link.prev_offset; 238 tmpblock->link.prev_offset = block->link.prev_offset;
239 put_ptr_atomic(tmpblock, KM_USER1); 239 put_ptr_atomic(tmpblock);
240 } 240 }
241 241
242 /* Is this block is at the head of the freelist? */ 242 /* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
249 if (pool->freelist[slindex].page) { 249 if (pool->freelist[slindex].page) {
250 struct block_header *tmpblock; 250 struct block_header *tmpblock;
251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page, 251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
252 pool->freelist[slindex].offset, 252 pool->freelist[slindex].offset);
253 KM_USER1);
254 tmpblock->link.prev_page = NULL; 253 tmpblock->link.prev_page = NULL;
255 tmpblock->link.prev_offset = 0; 254 tmpblock->link.prev_offset = 0;
256 put_ptr_atomic(tmpblock, KM_USER1); 255 put_ptr_atomic(tmpblock);
257 } else { 256 } else {
258 /* This freelist bucket is empty */ 257 /* This freelist bucket is empty */
259 __clear_bit(slindex % BITS_PER_LONG, 258 __clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
284 stat_inc(&pool->total_pages); 283 stat_inc(&pool->total_pages);
285 284
286 spin_lock(&pool->lock); 285 spin_lock(&pool->lock);
287 block = get_ptr_atomic(page, 0, KM_USER0); 286 block = get_ptr_atomic(page, 0);
288 287
289 block->size = PAGE_SIZE - XV_ALIGN; 288 block->size = PAGE_SIZE - XV_ALIGN;
290 set_flag(block, BLOCK_FREE); 289 set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
293 292
294 insert_block(pool, page, 0, block); 293 insert_block(pool, page, 0, block);
295 294
296 put_ptr_atomic(block, KM_USER0); 295 put_ptr_atomic(block);
297 spin_unlock(&pool->lock); 296 spin_unlock(&pool->lock);
298 297
299 return 0; 298 return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
375 return -ENOMEM; 374 return -ENOMEM;
376 } 375 }
377 376
378 block = get_ptr_atomic(*page, *offset, KM_USER0); 377 block = get_ptr_atomic(*page, *offset);
379 378
380 remove_block(pool, *page, *offset, block, index); 379 remove_block(pool, *page, *offset, block, index);
381 380
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
405 block->size = origsize; 404 block->size = origsize;
406 clear_flag(block, BLOCK_FREE); 405 clear_flag(block, BLOCK_FREE);
407 406
408 put_ptr_atomic(block, KM_USER0); 407 put_ptr_atomic(block);
409 spin_unlock(&pool->lock); 408 spin_unlock(&pool->lock);
410 409
411 *offset += XV_ALIGN; 410 *offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
426 425
427 spin_lock(&pool->lock); 426 spin_lock(&pool->lock);
428 427
429 page_start = get_ptr_atomic(page, 0, KM_USER0); 428 page_start = get_ptr_atomic(page, 0);
430 block = (struct block_header *)((char *)page_start + offset); 429 block = (struct block_header *)((char *)page_start + offset);
431 430
432 /* Catch double free bugs */ 431 /* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
468 467
469 /* No used objects in this page. Free it. */ 468 /* No used objects in this page. Free it. */
470 if (block->size == PAGE_SIZE - XV_ALIGN) { 469 if (block->size == PAGE_SIZE - XV_ALIGN) {
471 put_ptr_atomic(page_start, KM_USER0); 470 put_ptr_atomic(page_start);
472 spin_unlock(&pool->lock); 471 spin_unlock(&pool->lock);
473 472
474 __free_page(page); 473 __free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
486 set_blockprev(tmpblock, offset); 485 set_blockprev(tmpblock, offset);
487 } 486 }
488 487
489 put_ptr_atomic(page_start, KM_USER0); 488 put_ptr_atomic(page_start);
490 spin_unlock(&pool->lock); 489 spin_unlock(&pool->lock);
491} 490}
492EXPORT_SYMBOL_GPL(xv_free); 491EXPORT_SYMBOL_GPL(xv_free);
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 36d53ed9d71a..68b2e053a0e6 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -496,13 +496,13 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
496 } 496 }
497 ASSERT_SENTINEL(zh, ZBH); 497 ASSERT_SENTINEL(zh, ZBH);
498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
499 to_va = kmap_atomic(page, KM_USER0); 499 to_va = kmap_atomic(page);
500 size = zh->size; 500 size = zh->size;
501 from_va = zbud_data(zh, size); 501 from_va = zbud_data(zh, size);
502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); 502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
503 BUG_ON(ret != LZO_E_OK); 503 BUG_ON(ret != LZO_E_OK);
504 BUG_ON(out_len != PAGE_SIZE); 504 BUG_ON(out_len != PAGE_SIZE);
505 kunmap_atomic(to_va, KM_USER0); 505 kunmap_atomic(to_va);
506out: 506out:
507 spin_unlock(&zbpg->lock); 507 spin_unlock(&zbpg->lock);
508 return ret; 508 return ret;
@@ -1109,7 +1109,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1109 goto out; 1109 goto out;
1110 atomic_inc(&zv_curr_dist_counts[chunks]); 1110 atomic_inc(&zv_curr_dist_counts[chunks]);
1111 atomic_inc(&zv_cumul_dist_counts[chunks]); 1111 atomic_inc(&zv_cumul_dist_counts[chunks]);
1112 zv = kmap_atomic(page, KM_USER0) + offset; 1112 zv = kmap_atomic(page) + offset;
1113 zv->index = index; 1113 zv->index = index;
1114 zv->oid = *oid; 1114 zv->oid = *oid;
1115 zv->pool_id = pool_id; 1115 zv->pool_id = pool_id;
@@ -1123,7 +1123,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1123 spin_unlock(&zcache_rem_op_list_lock); 1123 spin_unlock(&zcache_rem_op_list_lock);
1124 } 1124 }
1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); 1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
1126 kunmap_atomic(zv, KM_USER0); 1126 kunmap_atomic(zv);
1127out: 1127out:
1128 return zv; 1128 return zv;
1129} 1129}
@@ -1145,7 +1145,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1145 &page, &offset, ZCACHE_GFP_MASK); 1145 &page, &offset, ZCACHE_GFP_MASK);
1146 if (unlikely(ret)) 1146 if (unlikely(ret))
1147 goto out; 1147 goto out;
1148 zv = kmap_atomic(page, KM_USER0) + offset; 1148 zv = kmap_atomic(page) + offset;
1149 SET_SENTINEL(zv, ZVH); 1149 SET_SENTINEL(zv, ZVH);
1150 INIT_LIST_HEAD(&zv->rem_op.list); 1150 INIT_LIST_HEAD(&zv->rem_op.list);
1151 zv->client_id = LOCAL_CLIENT; 1151 zv->client_id = LOCAL_CLIENT;
@@ -1153,7 +1153,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1153 zv->index = index; 1153 zv->index = index;
1154 zv->oid = *oid; 1154 zv->oid = *oid;
1155 zv->pool_id = pool->pool_id; 1155 zv->pool_id = pool->pool_id;
1156 kunmap_atomic(zv, KM_USER0); 1156 kunmap_atomic(zv);
1157out: 1157out:
1158 return zv; 1158 return zv;
1159} 1159}
@@ -1194,10 +1194,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
1194 ASSERT_SENTINEL(zv, ZVH); 1194 ASSERT_SENTINEL(zv, ZVH);
1195 size = xv_get_object_size(zv) - sizeof(*zv); 1195 size = xv_get_object_size(zv) - sizeof(*zv);
1196 BUG_ON(size == 0); 1196 BUG_ON(size == 0);
1197 to_va = kmap_atomic(page, KM_USER0); 1197 to_va = kmap_atomic(page);
1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), 1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
1199 size, to_va, &clen); 1199 size, to_va, &clen);
1200 kunmap_atomic(to_va, KM_USER0); 1200 kunmap_atomic(to_va);
1201 BUG_ON(ret != LZO_E_OK); 1201 BUG_ON(ret != LZO_E_OK);
1202 BUG_ON(clen != PAGE_SIZE); 1202 BUG_ON(clen != PAGE_SIZE);
1203} 1203}
@@ -2203,12 +2203,12 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
2203 BUG_ON(!irqs_disabled()); 2203 BUG_ON(!irqs_disabled());
2204 if (unlikely(dmem == NULL || wmem == NULL)) 2204 if (unlikely(dmem == NULL || wmem == NULL))
2205 goto out; /* no buffer, so can't compress */ 2205 goto out; /* no buffer, so can't compress */
2206 from_va = kmap_atomic(from, KM_USER0); 2206 from_va = kmap_atomic(from);
2207 mb(); 2207 mb();
2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); 2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
2209 BUG_ON(ret != LZO_E_OK); 2209 BUG_ON(ret != LZO_E_OK);
2210 *out_va = dmem; 2210 *out_va = dmem;
2211 kunmap_atomic(from_va, KM_USER0); 2211 kunmap_atomic(from_va);
2212 ret = 1; 2212 ret = 1;
2213out: 2213out:
2214 return ret; 2214 return ret;
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
index 69dcc3176ebc..d47345c4adcf 100644
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ b/drivers/staging/rtl8192u/ieee80211/cipher.c
@@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
71 u8 *src_p, *dst_p; 71 u8 *src_p, *dst_p;
72 int in_place; 72 int in_place;
73 73
74 scatterwalk_map(&walk_in, 0); 74 scatterwalk_map(&walk_in);
75 scatterwalk_map(&walk_out, 1); 75 scatterwalk_map(&walk_out);
76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); 76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); 77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
78 in_place = scatterwalk_samebuf(&walk_in, &walk_out, 78 in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
84 84
85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); 85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
86 86
87 scatterwalk_done(&walk_in, 0, nbytes); 87 scatterwalk_done(&walk_in, nbytes);
88 88
89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); 89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
90 scatterwalk_done(&walk_out, 1, nbytes); 90 scatterwalk_done(&walk_out, nbytes);
91 91
92 if (!nbytes) 92 if (!nbytes)
93 return 0; 93 return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
index 301ed514ac9e..05e7497fd106 100644
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ b/drivers/staging/rtl8192u/ieee80211/digest.c
@@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
39 unsigned int bytes_from_page = min(l, ((unsigned int) 39 unsigned int bytes_from_page = min(l, ((unsigned int)
40 (PAGE_SIZE)) - 40 (PAGE_SIZE)) -
41 offset); 41 offset);
42 char *p = crypto_kmap(pg, 0) + offset; 42 char *p = kmap_atomic(pg) + offset;
43 43
44 tfm->__crt_alg->cra_digest.dia_update 44 tfm->__crt_alg->cra_digest.dia_update
45 (crypto_tfm_ctx(tfm), p, 45 (crypto_tfm_ctx(tfm), p,
46 bytes_from_page); 46 bytes_from_page);
47 crypto_kunmap(p, 0); 47 kunmap_atomic(p);
48 crypto_yield(tfm); 48 crypto_yield(tfm);
49 offset = 0; 49 offset = 0;
50 pg++; 50 pg++;
@@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
75 tfm->crt_digest.dit_init(tfm); 75 tfm->crt_digest.dit_init(tfm);
76 76
77 for (i = 0; i < nsg; i++) { 77 for (i = 0; i < nsg; i++) {
78 char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; 78 char *p = kmap_atomic(sg[i].page) + sg[i].offset;
79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), 79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
80 p, sg[i].length); 80 p, sg[i].length);
81 crypto_kunmap(p, 0); 81 kunmap_atomic(p);
82 crypto_yield(tfm); 82 crypto_yield(tfm);
83 } 83 }
84 crypto_digest_final(tfm, out); 84 crypto_digest_final(tfm, out);
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
index a7c096eb269f..bebe13ac53b7 100644
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ b/drivers/staging/rtl8192u/ieee80211/internal.h
@@ -23,23 +23,6 @@
23#include <asm/kmap_types.h> 23#include <asm/kmap_types.h>
24 24
25 25
26extern enum km_type crypto_km_types[];
27
28static inline enum km_type crypto_kmap_type(int out)
29{
30 return crypto_km_types[(in_softirq() ? 2 : 0) + out];
31}
32
33static inline void *crypto_kmap(struct page *page, int out)
34{
35 return kmap_atomic(page, crypto_kmap_type(out));
36}
37
38static inline void crypto_kunmap(void *vaddr, int out)
39{
40 kunmap_atomic(vaddr, crypto_kmap_type(out));
41}
42
43static inline void crypto_yield(struct crypto_tfm *tfm) 26static inline void crypto_yield(struct crypto_tfm *tfm)
44{ 27{
45 if (!in_softirq()) 28 if (!in_softirq())
diff --git a/drivers/staging/rtl8192u/ieee80211/kmap_types.h b/drivers/staging/rtl8192u/ieee80211/kmap_types.h
deleted file mode 100644
index de67bb01b5f5..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/kmap_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __KMAP_TYPES_H
2
3#define __KMAP_TYPES_H
4
5
6enum km_type {
7 KM_BOUNCE_READ,
8 KM_SKB_SUNRPC_DATA,
9 KM_SKB_DATA_SOFTIRQ,
10 KM_USER0,
11 KM_USER1,
12 KM_BH_IRQ,
13 KM_SOFTIRQ0,
14 KM_SOFTIRQ1,
15 KM_TYPE_NR
16};
17
18#define _ASM_KMAP_TYPES_H
19
20#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
index 3543a6145046..8b73f6cefcf9 100644
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
@@ -13,8 +13,6 @@
13 * any later version. 13 * any later version.
14 * 14 *
15 */ 15 */
16#include "kmap_types.h"
17
18#include <linux/kernel.h> 16#include <linux/kernel.h>
19#include <linux/mm.h> 17#include <linux/mm.h>
20#include <linux/pagemap.h> 18#include <linux/pagemap.h>
@@ -23,13 +21,6 @@
23#include "internal.h" 21#include "internal.h"
24#include "scatterwalk.h" 22#include "scatterwalk.h"
25 23
26enum km_type crypto_km_types[] = {
27 KM_USER0,
28 KM_USER1,
29 KM_SOFTIRQ0,
30 KM_SOFTIRQ1,
31};
32
33void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch) 24void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
34{ 25{
35 if (nbytes <= walk->len_this_page && 26 if (nbytes <= walk->len_this_page &&
@@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
62 walk->offset = sg->offset; 53 walk->offset = sg->offset;
63} 54}
64 55
65void scatterwalk_map(struct scatter_walk *walk, int out) 56void scatterwalk_map(struct scatter_walk *walk)
66{ 57{
67 walk->data = crypto_kmap(walk->page, out) + walk->offset; 58 walk->data = kmap_atomic(walk->page) + walk->offset;
68} 59}
69 60
70static void scatterwalk_pagedone(struct scatter_walk *walk, int out, 61static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
103 * has been verified as multiple of the block size. 94 * has been verified as multiple of the block size.
104 */ 95 */
105int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 96int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
106 size_t nbytes, int out) 97 size_t nbytes)
107{ 98{
108 if (buf != walk->data) { 99 if (buf != walk->data) {
109 while (nbytes > walk->len_this_page) { 100 while (nbytes > walk->len_this_page) {
@@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
111 buf += walk->len_this_page; 102 buf += walk->len_this_page;
112 nbytes -= walk->len_this_page; 103 nbytes -= walk->len_this_page;
113 104
114 crypto_kunmap(walk->data, out); 105 kunmap_atomic(walk->data);
115 scatterwalk_pagedone(walk, out, 1); 106 scatterwalk_pagedone(walk, out, 1);
116 scatterwalk_map(walk, out); 107 scatterwalk_map(walk);
117 } 108 }
118 109
119 memcpy_dir(buf, walk->data, nbytes, out); 110 memcpy_dir(buf, walk->data, nbytes, out);
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 70734652f724..ed2c800b3a7e 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -455,14 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
455 } 455 }
456 ASSERT_SENTINEL(zh, ZBH); 456 ASSERT_SENTINEL(zh, ZBH);
457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
458 to_va = kmap_atomic(page, KM_USER0); 458 to_va = kmap_atomic(page);
459 size = zh->size; 459 size = zh->size;
460 from_va = zbud_data(zh, size); 460 from_va = zbud_data(zh, size);
461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size, 461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
462 to_va, &out_len); 462 to_va, &out_len);
463 BUG_ON(ret); 463 BUG_ON(ret);
464 BUG_ON(out_len != PAGE_SIZE); 464 BUG_ON(out_len != PAGE_SIZE);
465 kunmap_atomic(to_va, KM_USER0); 465 kunmap_atomic(to_va);
466out: 466out:
467 spin_unlock(&zbpg->lock); 467 spin_unlock(&zbpg->lock);
468 return ret; 468 return ret;
@@ -753,10 +753,10 @@ static void zv_decompress(struct page *page, void *handle)
753 zv = zs_map_object(zcache_host.zspool, handle); 753 zv = zs_map_object(zcache_host.zspool, handle);
754 BUG_ON(zv->size == 0); 754 BUG_ON(zv->size == 0);
755 ASSERT_SENTINEL(zv, ZVH); 755 ASSERT_SENTINEL(zv, ZVH);
756 to_va = kmap_atomic(page, KM_USER0); 756 to_va = kmap_atomic(page);
757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv), 757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
758 zv->size, to_va, &clen); 758 zv->size, to_va, &clen);
759 kunmap_atomic(to_va, KM_USER0); 759 kunmap_atomic(to_va);
760 zs_unmap_object(zcache_host.zspool, handle); 760 zs_unmap_object(zcache_host.zspool, handle);
761 BUG_ON(ret); 761 BUG_ON(ret);
762 BUG_ON(clen != PAGE_SIZE); 762 BUG_ON(clen != PAGE_SIZE);
@@ -1334,13 +1334,13 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1334 if (unlikely(dmem == NULL)) 1334 if (unlikely(dmem == NULL))
1335 goto out; /* no buffer or no compressor so can't compress */ 1335 goto out; /* no buffer or no compressor so can't compress */
1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER; 1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1337 from_va = kmap_atomic(from, KM_USER0); 1337 from_va = kmap_atomic(from);
1338 mb(); 1338 mb();
1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, 1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1340 out_len); 1340 out_len);
1341 BUG_ON(ret); 1341 BUG_ON(ret);
1342 *out_va = dmem; 1342 *out_va = dmem;
1343 kunmap_atomic(from_va, KM_USER0); 1343 kunmap_atomic(from_va);
1344 ret = 1; 1344 ret = 1;
1345out: 1345out:
1346 return ret; 1346 return ret;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 7f138196b3c9..685d612a627b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
175 struct page *page = bvec->bv_page; 175 struct page *page = bvec->bv_page;
176 void *user_mem; 176 void *user_mem;
177 177
178 user_mem = kmap_atomic(page, KM_USER0); 178 user_mem = kmap_atomic(page);
179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); 179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 kunmap_atomic(user_mem, KM_USER0); 180 kunmap_atomic(user_mem);
181 181
182 flush_dcache_page(page); 182 flush_dcache_page(page);
183} 183}
@@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
188 struct page *page = bvec->bv_page; 188 struct page *page = bvec->bv_page;
189 unsigned char *user_mem, *cmem; 189 unsigned char *user_mem, *cmem;
190 190
191 user_mem = kmap_atomic(page, KM_USER0); 191 user_mem = kmap_atomic(page);
192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1); 192 cmem = kmap_atomic(zram->table[index].handle);
193 193
194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); 194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 kunmap_atomic(cmem, KM_USER1); 195 kunmap_atomic(cmem);
196 kunmap_atomic(user_mem, KM_USER0); 196 kunmap_atomic(user_mem);
197 197
198 flush_dcache_page(page); 198 flush_dcache_page(page);
199} 199}
@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
242 } 242 }
243 } 243 }
244 244
245 user_mem = kmap_atomic(page, KM_USER0); 245 user_mem = kmap_atomic(page);
246 if (!is_partial_io(bvec)) 246 if (!is_partial_io(bvec))
247 uncmem = user_mem; 247 uncmem = user_mem;
248 clen = PAGE_SIZE; 248 clen = PAGE_SIZE;
@@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
260 } 260 }
261 261
262 zs_unmap_object(zram->mem_pool, zram->table[index].handle); 262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 kunmap_atomic(user_mem, KM_USER0); 263 kunmap_atomic(user_mem);
264 264
265 /* Should NEVER happen. Return bio error if it does. */ 265 /* Should NEVER happen. Return bio error if it does. */
266 if (unlikely(ret != LZO_E_OK)) { 266 if (unlikely(ret != LZO_E_OK)) {
@@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
292 /* Page is stored uncompressed since it's incompressible */ 292 /* Page is stored uncompressed since it's incompressible */
293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 memcpy(mem, cmem, PAGE_SIZE); 294 memcpy(mem, cmem, PAGE_SIZE);
295 kunmap_atomic(cmem, KM_USER0); 295 kunmap_atomic(cmem);
296 return 0; 296 return 0;
297 } 297 }
298 298
@@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
351 zram_test_flag(zram, index, ZRAM_ZERO)) 351 zram_test_flag(zram, index, ZRAM_ZERO))
352 zram_free_page(zram, index); 352 zram_free_page(zram, index);
353 353
354 user_mem = kmap_atomic(page, KM_USER0); 354 user_mem = kmap_atomic(page);
355 355
356 if (is_partial_io(bvec)) 356 if (is_partial_io(bvec))
357 memcpy(uncmem + offset, user_mem + bvec->bv_offset, 357 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
360 uncmem = user_mem; 360 uncmem = user_mem;
361 361
362 if (page_zero_filled(uncmem)) { 362 if (page_zero_filled(uncmem)) {
363 kunmap_atomic(user_mem, KM_USER0); 363 kunmap_atomic(user_mem);
364 if (is_partial_io(bvec)) 364 if (is_partial_io(bvec))
365 kfree(uncmem); 365 kfree(uncmem);
366 zram_stat_inc(&zram->stats.pages_zero); 366 zram_stat_inc(&zram->stats.pages_zero);
@@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, 372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 zram->compress_workmem); 373 zram->compress_workmem);
374 374
375 kunmap_atomic(user_mem, KM_USER0); 375 kunmap_atomic(user_mem);
376 if (is_partial_io(bvec)) 376 if (is_partial_io(bvec))
377 kfree(uncmem); 377 kfree(uncmem);
378 378
@@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); 400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 zram_stat_inc(&zram->stats.pages_expand); 401 zram_stat_inc(&zram->stats.pages_expand);
402 handle = page_store; 402 handle = page_store;
403 src = kmap_atomic(page, KM_USER0); 403 src = kmap_atomic(page);
404 cmem = kmap_atomic(page_store, KM_USER1); 404 cmem = kmap_atomic(page_store);
405 goto memstore; 405 goto memstore;
406 } 406 }
407 407
@@ -427,8 +427,8 @@ memstore:
427 memcpy(cmem, src, clen); 427 memcpy(cmem, src, clen);
428 428
429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 kunmap_atomic(cmem, KM_USER1); 430 kunmap_atomic(cmem);
431 kunmap_atomic(src, KM_USER0); 431 kunmap_atomic(src);
432 } else { 432 } else {
433 zs_unmap_object(zram->mem_pool, handle); 433 zs_unmap_object(zram->mem_pool, handle);
434 } 434 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index cd5cd95812bb..929cc9364c8a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2344,7 +2344,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
2344 2344
2345 offset = 0; 2345 offset = 0;
2346 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2346 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2347 addr = kmap_atomic(sg_page(sg), KM_USER0); 2347 addr = kmap_atomic(sg_page(sg));
2348 if (!addr) 2348 if (!addr)
2349 goto out; 2349 goto out;
2350 2350
@@ -2352,7 +2352,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
2352 *(addr + sg->offset + i) ^= *(buf + offset + i); 2352 *(addr + sg->offset + i) ^= *(buf + offset + i);
2353 2353
2354 offset += sg->length; 2354 offset += sg->length;
2355 kunmap_atomic(addr, KM_USER0); 2355 kunmap_atomic(addr);
2356 } 2356 }
2357 2357
2358out: 2358out:
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index d8cabc21036d..2b693eefac55 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -146,14 +146,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
146 PAGE_SIZE << compound_order(page); 146 PAGE_SIZE << compound_order(page);
147 } else { 147 } else {
148 BUG_ON(!page); 148 BUG_ON(!page);
149 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 149 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
150 KM_SOFTIRQ0);
151 page_addr = from; 150 page_addr = from;
152 from += mem_off & ~PAGE_MASK; 151 from += mem_off & ~PAGE_MASK;
153 tlen = min(tlen, (size_t)(PAGE_SIZE - 152 tlen = min(tlen, (size_t)(PAGE_SIZE -
154 (mem_off & ~PAGE_MASK))); 153 (mem_off & ~PAGE_MASK)));
155 memcpy(to, from, tlen); 154 memcpy(to, from, tlen);
156 kunmap_atomic(page_addr, KM_SOFTIRQ0); 155 kunmap_atomic(page_addr);
157 to += tlen; 156 to += tlen;
158 } 157 }
159 158
@@ -291,14 +290,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
291 290
292 tlen = min(mem_len, frame_len); 291 tlen = min(mem_len, frame_len);
293 292
294 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 293 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
295 KM_SOFTIRQ0);
296 page_addr = to; 294 page_addr = to;
297 to += mem_off & ~PAGE_MASK; 295 to += mem_off & ~PAGE_MASK;
298 tlen = min(tlen, (size_t)(PAGE_SIZE - 296 tlen = min(tlen, (size_t)(PAGE_SIZE -
299 (mem_off & ~PAGE_MASK))); 297 (mem_off & ~PAGE_MASK)));
300 memcpy(to, from, tlen); 298 memcpy(to, from, tlen);
301 kunmap_atomic(page_addr, KM_SOFTIRQ0); 299 kunmap_atomic(page_addr);
302 300
303 from += tlen; 301 from += tlen;
304 frame_len -= tlen; 302 frame_len -= tlen;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c14c42b95ab8..bdb2d6436b2b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -937,9 +937,9 @@ static int set_bit_to_user(int nr, void __user *addr)
937 if (r < 0) 937 if (r < 0)
938 return r; 938 return r;
939 BUG_ON(r != 1); 939 BUG_ON(r != 1);
940 base = kmap_atomic(page, KM_USER0); 940 base = kmap_atomic(page);
941 set_bit(bit, base); 941 set_bit(bit, base);
942 kunmap_atomic(base, KM_USER0); 942 kunmap_atomic(base);
943 set_page_dirty_lock(page); 943 set_page_dirty_lock(page);
944 put_page(page); 944 put_page(page);
945 return 0; 945 return 0;