aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
-rw-r--r--Documentation/feature-removal-schedule.txt8
-rw-r--r--arch/arm/include/asm/highmem.h2
-rw-r--r--arch/arm/mm/copypage-fa.c12
-rw-r--r--arch/arm/mm/copypage-feroceon.c12
-rw-r--r--arch/arm/mm/copypage-v3.c12
-rw-r--r--arch/arm/mm/copypage-v4mc.c8
-rw-r--r--arch/arm/mm/copypage-v4wb.c12
-rw-r--r--arch/arm/mm/copypage-v4wt.c12
-rw-r--r--arch/arm/mm/copypage-v6.c12
-rw-r--r--arch/arm/mm/copypage-xsc3.c12
-rw-r--r--arch/arm/mm/copypage-xscale.c8
-rw-r--r--arch/arm/mm/highmem.c4
-rw-r--r--arch/frv/include/asm/highmem.h2
-rw-r--r--arch/frv/mm/highmem.c4
-rw-r--r--arch/mips/include/asm/highmem.h2
-rw-r--r--arch/mips/mm/c-r4k.c4
-rw-r--r--arch/mips/mm/highmem.c4
-rw-r--r--arch/mips/mm/init.c8
-rw-r--r--arch/mn10300/include/asm/highmem.h2
-rw-r--r--arch/parisc/include/asm/cacheflush.h2
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/kvm/book3s_pr.c4
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c5
-rw-r--r--arch/powerpc/mm/hugetlbpage.c4
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/sh/mm/cache-sh4.c4
-rw-r--r--arch/sh/mm/cache.c12
-rw-r--r--arch/sparc/include/asm/highmem.h2
-rw-r--r--arch/sparc/mm/highmem.c4
-rw-r--r--arch/tile/include/asm/highmem.h2
-rw-r--r--arch/tile/mm/highmem.c4
-rw-r--r--arch/um/kernel/skas/uaccess.c4
-rw-r--r--arch/x86/crypto/aesni-intel_glue.c24
-rw-r--r--arch/x86/include/asm/highmem.h2
-rw-r--r--arch/x86/kernel/crash_dump_32.c6
-rw-r--r--arch/x86/kvm/lapic.c8
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
-rw-r--r--arch/x86/kvm/x86.c8
-rw-r--r--arch/x86/lib/usercopy_32.c4
-rw-r--r--arch/x86/mm/highmem_32.c4
-rw-r--r--crypto/ahash.c4
-rw-r--r--crypto/async_tx/async_memcpy.c8
-rw-r--r--crypto/blkcipher.c8
-rw-r--r--crypto/ccm.c4
-rw-r--r--crypto/scatterwalk.c8
-rw-r--r--crypto/shash.c8
-rw-r--r--drivers/ata/libata-sff.c8
-rw-r--r--drivers/block/brd.c20
-rw-r--r--drivers/block/drbd/drbd_bitmap.c50
-rw-r--r--drivers/block/drbd/drbd_nl.c4
-rw-r--r--drivers/block/loop.c16
-rw-r--r--drivers/block/pktcdvd.c8
-rw-r--r--drivers/crypto/hifn_795x.c10
-rw-r--r--drivers/edac/edac_mc.c4
-rw-r--r--drivers/gpu/drm/drm_cache.c8
-rw-r--r--drivers/gpu/drm/gma500/mmu.c30
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c16
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c6
-rw-r--r--drivers/ide/ide-taskfile.c4
-rw-r--r--drivers/infiniband/ulp/iser/iser_memory.c8
-rw-r--r--drivers/md/bitmap.c42
-rw-r--r--drivers/md/dm-crypt.c8
-rw-r--r--drivers/media/video/ivtv/ivtv-udma.c4
-rw-r--r--drivers/memstick/host/jmb38x_ms.c4
-rw-r--r--drivers/memstick/host/tifm_ms.c4
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c11
-rw-r--r--drivers/net/ethernet/sun/cassini.c4
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c8
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c5
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c5
-rw-r--r--drivers/scsi/fcoe/fcoe.c4
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c5
-rw-r--r--drivers/scsi/gdth.c4
-rw-r--r--drivers/scsi/ips.c6
-rw-r--r--drivers/scsi/isci/request.c16
-rw-r--r--drivers/scsi/libfc/fc_fcp.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.c8
-rw-r--r--drivers/scsi/libfc/fc_libfc.h2
-rw-r--r--drivers/scsi/libfc/fc_lport.c2
-rw-r--r--drivers/scsi/libiscsi_tcp.c4
-rw-r--r--drivers/scsi/libsas/sas_host_smp.c8
-rw-r--r--drivers/scsi/megaraid.c4
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/scsi_debug.c24
-rw-r--r--drivers/scsi/scsi_lib.c4
-rw-r--r--drivers/scsi/sd_dif.c12
-rw-r--r--drivers/scsi/storvsc_drv.c52
-rw-r--r--drivers/staging/ramster/xvmalloc.c39
-rw-r--r--drivers/staging/ramster/zcache-main.c20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/cipher.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/digest.c8
-rw-r--r--drivers/staging/rtl8192u/ieee80211/internal.h17
-rw-r--r--drivers/staging/rtl8192u/ieee80211/kmap_types.h20
-rw-r--r--drivers/staging/rtl8192u/ieee80211/scatterwalk.c19
-rw-r--r--drivers/staging/zcache/zcache-main.c12
-rw-r--r--drivers/staging/zram/zram_drv.c32
-rw-r--r--drivers/target/target_core_transport.c4
-rw-r--r--drivers/target/tcm_fc/tfc_io.c10
-rw-r--r--drivers/vhost/vhost.c4
-rw-r--r--fs/afs/fsclient.c8
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/aio.c30
-rw-r--r--fs/bio-integrity.c10
-rw-r--r--fs/btrfs/compression.c12
-rw-r--r--fs/btrfs/extent_io.c16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/btrfs/lzo.c4
-rw-r--r--fs/btrfs/scrub.c8
-rw-r--r--fs/btrfs/zlib.c4
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/ext2/dir.c4
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/gfs2/aops.c12
-rw-r--r--fs/gfs2/lops.c8
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c12
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c12
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/logfs/dir.c18
-rw-r--r--fs/logfs/readwrite.c38
-rw-r--r--fs/logfs/segment.c4
-rw-r--r--fs/minix/dir.c4
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nilfs2/cpfile.c94
-rw-r--r--fs/nilfs2/dat.c38
-rw-r--r--fs/nilfs2/dir.c4
-rw-r--r--fs/nilfs2/ifile.c4
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/page.c8
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segbuf.c4
-rw-r--r--fs/nilfs2/sufile.c68
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/attrib.c20
-rw-r--r--fs/ntfs/file.c16
-rw-r--r--fs/ntfs/super.c8
-rw-r--r--fs/ocfs2/aops.c16
-rw-r--r--fs/pipe.c8
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/splice.c7
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/symlink.c4
-rw-r--r--fs/ubifs/file.c4
-rw-r--r--fs/udf/file.c4
-rw-r--r--include/crypto/scatterwalk.h28
-rw-r--r--include/linux/bio.h8
-rw-r--r--include/linux/highmem.h79
-rw-r--r--kernel/debug/kdb/kdb_support.c4
-rw-r--r--kernel/power/snapshot.c28
-rw-r--r--lib/scatterlist.c4
-rw-r--r--lib/swiotlb.c5
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/ksm.c12
-rw-r--r--mm/memory.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swapfile.c30
-rw-r--r--mm/vmalloc.c8
-rw-r--r--net/core/kmap_skb.h4
-rw-r--r--net/rds/ib_recv.c7
-rw-r--r--net/rds/info.c6
-rw-r--r--net/rds/iw_recv.c7
-rw-r--r--net/rds/loop.c2
-rw-r--r--net/rds/rds.h2
-rw-r--r--net/rds/recv.c2
-rw-r--r--net/rds/tcp_recv.c11
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c4
-rw-r--r--net/sunrpc/socklib.c4
-rw-r--r--net/sunrpc/xdr.c20
-rw-r--r--net/sunrpc/xprtrdma/rpc_rdma.c8
-rw-r--r--security/tomoyo/domain.c4
180 files changed, 921 insertions, 969 deletions
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index d5dc80f30352..4bfd982f8080 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -535,3 +535,11 @@ Why: This driver provides support for USB storage devices like "USB
535 (CONFIG_USB_STORAGE) which only drawback is the additional SCSI 535 (CONFIG_USB_STORAGE) which only drawback is the additional SCSI
536 stack. 536 stack.
537Who: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> 537Who: Sebastian Andrzej Siewior <sebastian@breakpoint.cc>
538
539----------------------------
540
541What: kmap_atomic(page, km_type)
542When: 3.5
543Why: The old kmap_atomic() with two arguments is deprecated, we only
544 keep it for backward compatibility for few cycles and then drop it.
545Who: Cong Wang <amwang@redhat.com>
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h
index a4edd19dd3d6..8c5e828f484d 100644
--- a/arch/arm/include/asm/highmem.h
+++ b/arch/arm/include/asm/highmem.h
@@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page)
57#ifdef CONFIG_HIGHMEM 57#ifdef CONFIG_HIGHMEM
58extern void *kmap(struct page *page); 58extern void *kmap(struct page *page);
59extern void kunmap(struct page *page); 59extern void kunmap(struct page *page);
60extern void *__kmap_atomic(struct page *page); 60extern void *kmap_atomic(struct page *page);
61extern void __kunmap_atomic(void *kvaddr); 61extern void __kunmap_atomic(void *kvaddr);
62extern void *kmap_atomic_pfn(unsigned long pfn); 62extern void *kmap_atomic_pfn(unsigned long pfn);
63extern struct page *kmap_atomic_to_page(const void *ptr); 63extern struct page *kmap_atomic_to_page(const void *ptr);
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c
index d2852e1635b1..d130a5ece5d5 100644
--- a/arch/arm/mm/copypage-fa.c
+++ b/arch/arm/mm/copypage-fa.c
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
44{ 44{
45 void *kto, *kfrom; 45 void *kto, *kfrom;
46 46
47 kto = kmap_atomic(to, KM_USER0); 47 kto = kmap_atomic(to);
48 kfrom = kmap_atomic(from, KM_USER1); 48 kfrom = kmap_atomic(from);
49 fa_copy_user_page(kto, kfrom); 49 fa_copy_user_page(kto, kfrom);
50 kunmap_atomic(kfrom, KM_USER1); 50 kunmap_atomic(kfrom);
51 kunmap_atomic(kto, KM_USER0); 51 kunmap_atomic(kto);
52} 52}
53 53
54/* 54/*
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from,
58 */ 58 */
59void fa_clear_user_highpage(struct page *page, unsigned long vaddr) 59void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
60{ 60{
61 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 61 void *ptr, *kaddr = kmap_atomic(page);
62 asm volatile("\ 62 asm volatile("\
63 mov r1, %2 @ 1\n\ 63 mov r1, %2 @ 1\n\
64 mov r2, #0 @ 1\n\ 64 mov r2, #0 @ 1\n\
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr)
77 : "=r" (ptr) 77 : "=r" (ptr)
78 : "0" (kaddr), "I" (PAGE_SIZE / 32) 78 : "0" (kaddr), "I" (PAGE_SIZE / 32)
79 : "r1", "r2", "r3", "ip", "lr"); 79 : "r1", "r2", "r3", "ip", "lr");
80 kunmap_atomic(kaddr, KM_USER0); 80 kunmap_atomic(kaddr);
81} 81}
82 82
83struct cpu_user_fns fa_user_fns __initdata = { 83struct cpu_user_fns fa_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c
index ac163de7dc01..49ee0c1a7209 100644
--- a/arch/arm/mm/copypage-feroceon.c
+++ b/arch/arm/mm/copypage-feroceon.c
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from,
72{ 72{
73 void *kto, *kfrom; 73 void *kto, *kfrom;
74 74
75 kto = kmap_atomic(to, KM_USER0); 75 kto = kmap_atomic(to);
76 kfrom = kmap_atomic(from, KM_USER1); 76 kfrom = kmap_atomic(from);
77 flush_cache_page(vma, vaddr, page_to_pfn(from)); 77 flush_cache_page(vma, vaddr, page_to_pfn(from));
78 feroceon_copy_user_page(kto, kfrom); 78 feroceon_copy_user_page(kto, kfrom);
79 kunmap_atomic(kfrom, KM_USER1); 79 kunmap_atomic(kfrom);
80 kunmap_atomic(kto, KM_USER0); 80 kunmap_atomic(kto);
81} 81}
82 82
83void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) 83void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
84{ 84{
85 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 85 void *ptr, *kaddr = kmap_atomic(page);
86 asm volatile ("\ 86 asm volatile ("\
87 mov r1, %2 \n\ 87 mov r1, %2 \n\
88 mov r2, #0 \n\ 88 mov r2, #0 \n\
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr)
102 : "=r" (ptr) 102 : "=r" (ptr)
103 : "0" (kaddr), "I" (PAGE_SIZE / 32) 103 : "0" (kaddr), "I" (PAGE_SIZE / 32)
104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); 104 : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr");
105 kunmap_atomic(kaddr, KM_USER0); 105 kunmap_atomic(kaddr);
106} 106}
107 107
108struct cpu_user_fns feroceon_user_fns __initdata = { 108struct cpu_user_fns feroceon_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c
index f72303e1d804..3935bddd4769 100644
--- a/arch/arm/mm/copypage-v3.c
+++ b/arch/arm/mm/copypage-v3.c
@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
42{ 42{
43 void *kto, *kfrom; 43 void *kto, *kfrom;
44 44
45 kto = kmap_atomic(to, KM_USER0); 45 kto = kmap_atomic(to);
46 kfrom = kmap_atomic(from, KM_USER1); 46 kfrom = kmap_atomic(from);
47 v3_copy_user_page(kto, kfrom); 47 v3_copy_user_page(kto, kfrom);
48 kunmap_atomic(kfrom, KM_USER1); 48 kunmap_atomic(kfrom);
49 kunmap_atomic(kto, KM_USER0); 49 kunmap_atomic(kto);
50} 50}
51 51
52/* 52/*
@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from,
56 */ 56 */
57void v3_clear_user_highpage(struct page *page, unsigned long vaddr) 57void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
58{ 58{
59 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 59 void *ptr, *kaddr = kmap_atomic(page);
60 asm volatile("\n\ 60 asm volatile("\n\
61 mov r1, %2 @ 1\n\ 61 mov r1, %2 @ 1\n\
62 mov r2, #0 @ 1\n\ 62 mov r2, #0 @ 1\n\
@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr)
72 : "=r" (ptr) 72 : "=r" (ptr)
73 : "0" (kaddr), "I" (PAGE_SIZE / 64) 73 : "0" (kaddr), "I" (PAGE_SIZE / 64)
74 : "r1", "r2", "r3", "ip", "lr"); 74 : "r1", "r2", "r3", "ip", "lr");
75 kunmap_atomic(kaddr, KM_USER0); 75 kunmap_atomic(kaddr);
76} 76}
77 77
78struct cpu_user_fns v3_user_fns __initdata = { 78struct cpu_user_fns v3_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c
index 7d0a8c230342..ec8c3befb9c8 100644
--- a/arch/arm/mm/copypage-v4mc.c
+++ b/arch/arm/mm/copypage-v4mc.c
@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to)
71void v4_mc_copy_user_highpage(struct page *to, struct page *from, 71void v4_mc_copy_user_highpage(struct page *to, struct page *from,
72 unsigned long vaddr, struct vm_area_struct *vma) 72 unsigned long vaddr, struct vm_area_struct *vma)
73{ 73{
74 void *kto = kmap_atomic(to, KM_USER1); 74 void *kto = kmap_atomic(to);
75 75
76 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 76 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
77 __flush_dcache_page(page_mapping(from), from); 77 __flush_dcache_page(page_mapping(from), from);
@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
85 85
86 raw_spin_unlock(&minicache_lock); 86 raw_spin_unlock(&minicache_lock);
87 87
88 kunmap_atomic(kto, KM_USER1); 88 kunmap_atomic(kto);
89} 89}
90 90
91/* 91/*
@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from,
93 */ 93 */
94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 94void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
95{ 95{
96 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 96 void *ptr, *kaddr = kmap_atomic(page);
97 asm volatile("\ 97 asm volatile("\
98 mov r1, %2 @ 1\n\ 98 mov r1, %2 @ 1\n\
99 mov r2, #0 @ 1\n\ 99 mov r2, #0 @ 1\n\
@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
111 : "=r" (ptr) 111 : "=r" (ptr)
112 : "0" (kaddr), "I" (PAGE_SIZE / 64) 112 : "0" (kaddr), "I" (PAGE_SIZE / 64)
113 : "r1", "r2", "r3", "ip", "lr"); 113 : "r1", "r2", "r3", "ip", "lr");
114 kunmap_atomic(kaddr, KM_USER0); 114 kunmap_atomic(kaddr);
115} 115}
116 116
117struct cpu_user_fns v4_mc_user_fns __initdata = { 117struct cpu_user_fns v4_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c
index cb589cbb2b6c..067d0fdd630c 100644
--- a/arch/arm/mm/copypage-v4wb.c
+++ b/arch/arm/mm/copypage-v4wb.c
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
52{ 52{
53 void *kto, *kfrom; 53 void *kto, *kfrom;
54 54
55 kto = kmap_atomic(to, KM_USER0); 55 kto = kmap_atomic(to);
56 kfrom = kmap_atomic(from, KM_USER1); 56 kfrom = kmap_atomic(from);
57 flush_cache_page(vma, vaddr, page_to_pfn(from)); 57 flush_cache_page(vma, vaddr, page_to_pfn(from));
58 v4wb_copy_user_page(kto, kfrom); 58 v4wb_copy_user_page(kto, kfrom);
59 kunmap_atomic(kfrom, KM_USER1); 59 kunmap_atomic(kfrom);
60 kunmap_atomic(kto, KM_USER0); 60 kunmap_atomic(kto);
61} 61}
62 62
63/* 63/*
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from,
67 */ 67 */
68void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) 68void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
69{ 69{
70 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 70 void *ptr, *kaddr = kmap_atomic(page);
71 asm volatile("\ 71 asm volatile("\
72 mov r1, %2 @ 1\n\ 72 mov r1, %2 @ 1\n\
73 mov r2, #0 @ 1\n\ 73 mov r2, #0 @ 1\n\
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr)
86 : "=r" (ptr) 86 : "=r" (ptr)
87 : "0" (kaddr), "I" (PAGE_SIZE / 64) 87 : "0" (kaddr), "I" (PAGE_SIZE / 64)
88 : "r1", "r2", "r3", "ip", "lr"); 88 : "r1", "r2", "r3", "ip", "lr");
89 kunmap_atomic(kaddr, KM_USER0); 89 kunmap_atomic(kaddr);
90} 90}
91 91
92struct cpu_user_fns v4wb_user_fns __initdata = { 92struct cpu_user_fns v4wb_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c
index 30c7d048a324..b85c5da2e510 100644
--- a/arch/arm/mm/copypage-v4wt.c
+++ b/arch/arm/mm/copypage-v4wt.c
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
48{ 48{
49 void *kto, *kfrom; 49 void *kto, *kfrom;
50 50
51 kto = kmap_atomic(to, KM_USER0); 51 kto = kmap_atomic(to);
52 kfrom = kmap_atomic(from, KM_USER1); 52 kfrom = kmap_atomic(from);
53 v4wt_copy_user_page(kto, kfrom); 53 v4wt_copy_user_page(kto, kfrom);
54 kunmap_atomic(kfrom, KM_USER1); 54 kunmap_atomic(kfrom);
55 kunmap_atomic(kto, KM_USER0); 55 kunmap_atomic(kto);
56} 56}
57 57
58/* 58/*
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from,
62 */ 62 */
63void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) 63void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
64{ 64{
65 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 65 void *ptr, *kaddr = kmap_atomic(page);
66 asm volatile("\ 66 asm volatile("\
67 mov r1, %2 @ 1\n\ 67 mov r1, %2 @ 1\n\
68 mov r2, #0 @ 1\n\ 68 mov r2, #0 @ 1\n\
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr)
79 : "=r" (ptr) 79 : "=r" (ptr)
80 : "0" (kaddr), "I" (PAGE_SIZE / 64) 80 : "0" (kaddr), "I" (PAGE_SIZE / 64)
81 : "r1", "r2", "r3", "ip", "lr"); 81 : "r1", "r2", "r3", "ip", "lr");
82 kunmap_atomic(kaddr, KM_USER0); 82 kunmap_atomic(kaddr);
83} 83}
84 84
85struct cpu_user_fns v4wt_user_fns __initdata = { 85struct cpu_user_fns v4wt_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index 3d9a1552cef6..8b03a5814d00 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
38{ 38{
39 void *kto, *kfrom; 39 void *kto, *kfrom;
40 40
41 kfrom = kmap_atomic(from, KM_USER0); 41 kfrom = kmap_atomic(from);
42 kto = kmap_atomic(to, KM_USER1); 42 kto = kmap_atomic(to);
43 copy_page(kto, kfrom); 43 copy_page(kto, kfrom);
44 kunmap_atomic(kto, KM_USER1); 44 kunmap_atomic(kto);
45 kunmap_atomic(kfrom, KM_USER0); 45 kunmap_atomic(kfrom);
46} 46}
47 47
48/* 48/*
@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
51 */ 51 */
52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) 52static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr)
53{ 53{
54 void *kaddr = kmap_atomic(page, KM_USER0); 54 void *kaddr = kmap_atomic(page);
55 clear_page(kaddr); 55 clear_page(kaddr);
56 kunmap_atomic(kaddr, KM_USER0); 56 kunmap_atomic(kaddr);
57} 57}
58 58
59/* 59/*
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c
index f9cde0702f1e..03a2042aced5 100644
--- a/arch/arm/mm/copypage-xsc3.c
+++ b/arch/arm/mm/copypage-xsc3.c
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
75{ 75{
76 void *kto, *kfrom; 76 void *kto, *kfrom;
77 77
78 kto = kmap_atomic(to, KM_USER0); 78 kto = kmap_atomic(to);
79 kfrom = kmap_atomic(from, KM_USER1); 79 kfrom = kmap_atomic(from);
80 flush_cache_page(vma, vaddr, page_to_pfn(from)); 80 flush_cache_page(vma, vaddr, page_to_pfn(from));
81 xsc3_mc_copy_user_page(kto, kfrom); 81 xsc3_mc_copy_user_page(kto, kfrom);
82 kunmap_atomic(kfrom, KM_USER1); 82 kunmap_atomic(kfrom);
83 kunmap_atomic(kto, KM_USER0); 83 kunmap_atomic(kto);
84} 84}
85 85
86/* 86/*
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from,
90 */ 90 */
91void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 91void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
92{ 92{
93 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 93 void *ptr, *kaddr = kmap_atomic(page);
94 asm volatile ("\ 94 asm volatile ("\
95 mov r1, %2 \n\ 95 mov r1, %2 \n\
96 mov r2, #0 \n\ 96 mov r2, #0 \n\
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
105 : "=r" (ptr) 105 : "=r" (ptr)
106 : "0" (kaddr), "I" (PAGE_SIZE / 32) 106 : "0" (kaddr), "I" (PAGE_SIZE / 32)
107 : "r1", "r2", "r3"); 107 : "r1", "r2", "r3");
108 kunmap_atomic(kaddr, KM_USER0); 108 kunmap_atomic(kaddr);
109} 109}
110 110
111struct cpu_user_fns xsc3_mc_user_fns __initdata = { 111struct cpu_user_fns xsc3_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c
index 610c24ced310..439d106ae638 100644
--- a/arch/arm/mm/copypage-xscale.c
+++ b/arch/arm/mm/copypage-xscale.c
@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to)
93void xscale_mc_copy_user_highpage(struct page *to, struct page *from, 93void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
94 unsigned long vaddr, struct vm_area_struct *vma) 94 unsigned long vaddr, struct vm_area_struct *vma)
95{ 95{
96 void *kto = kmap_atomic(to, KM_USER1); 96 void *kto = kmap_atomic(to);
97 97
98 if (!test_and_set_bit(PG_dcache_clean, &from->flags)) 98 if (!test_and_set_bit(PG_dcache_clean, &from->flags))
99 __flush_dcache_page(page_mapping(from), from); 99 __flush_dcache_page(page_mapping(from), from);
@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
107 107
108 raw_spin_unlock(&minicache_lock); 108 raw_spin_unlock(&minicache_lock);
109 109
110 kunmap_atomic(kto, KM_USER1); 110 kunmap_atomic(kto);
111} 111}
112 112
113/* 113/*
@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from,
116void 116void
117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) 117xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
118{ 118{
119 void *ptr, *kaddr = kmap_atomic(page, KM_USER0); 119 void *ptr, *kaddr = kmap_atomic(page);
120 asm volatile( 120 asm volatile(
121 "mov r1, %2 \n\ 121 "mov r1, %2 \n\
122 mov r2, #0 \n\ 122 mov r2, #0 \n\
@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr)
133 : "=r" (ptr) 133 : "=r" (ptr)
134 : "0" (kaddr), "I" (PAGE_SIZE / 32) 134 : "0" (kaddr), "I" (PAGE_SIZE / 32)
135 : "r1", "r2", "r3", "ip"); 135 : "r1", "r2", "r3", "ip");
136 kunmap_atomic(kaddr, KM_USER0); 136 kunmap_atomic(kaddr);
137} 137}
138 138
139struct cpu_user_fns xscale_mc_user_fns __initdata = { 139struct cpu_user_fns xscale_mc_user_fns __initdata = {
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 807c0573abbe..5a21505d7550 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -36,7 +36,7 @@ void kunmap(struct page *page)
36} 36}
37EXPORT_SYMBOL(kunmap); 37EXPORT_SYMBOL(kunmap);
38 38
39void *__kmap_atomic(struct page *page) 39void *kmap_atomic(struct page *page)
40{ 40{
41 unsigned int idx; 41 unsigned int idx;
42 unsigned long vaddr; 42 unsigned long vaddr;
@@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page)
81 81
82 return (void *)vaddr; 82 return (void *)vaddr;
83} 83}
84EXPORT_SYMBOL(__kmap_atomic); 84EXPORT_SYMBOL(kmap_atomic);
85 85
86void __kunmap_atomic(void *kvaddr) 86void __kunmap_atomic(void *kvaddr)
87{ 87{
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index a8d6565d415d..716956a5317b 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -157,7 +157,7 @@ static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type)
157 pagefault_enable(); 157 pagefault_enable();
158} 158}
159 159
160void *__kmap_atomic(struct page *page); 160void *kmap_atomic(struct page *page);
161void __kunmap_atomic(void *kvaddr); 161void __kunmap_atomic(void *kvaddr);
162 162
163#endif /* !__ASSEMBLY__ */ 163#endif /* !__ASSEMBLY__ */
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c
index fd7fcd4c2e33..31902c9d5be5 100644
--- a/arch/frv/mm/highmem.c
+++ b/arch/frv/mm/highmem.c
@@ -37,7 +37,7 @@ struct page *kmap_atomic_to_page(void *ptr)
37 return virt_to_page(ptr); 37 return virt_to_page(ptr);
38} 38}
39 39
40void *__kmap_atomic(struct page *page) 40void *kmap_atomic(struct page *page)
41{ 41{
42 unsigned long paddr; 42 unsigned long paddr;
43 int type; 43 int type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
64 return NULL; 64 return NULL;
65 } 65 }
66} 66}
67EXPORT_SYMBOL(__kmap_atomic); 67EXPORT_SYMBOL(kmap_atomic);
68 68
69void __kunmap_atomic(void *kvaddr) 69void __kunmap_atomic(void *kvaddr)
70{ 70{
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 77e644082a3b..2d91888c9b74 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page);
47 47
48extern void *kmap(struct page *page); 48extern void *kmap(struct page *page);
49extern void kunmap(struct page *page); 49extern void kunmap(struct page *page);
50extern void *__kmap_atomic(struct page *page); 50extern void *kmap_atomic(struct page *page);
51extern void __kunmap_atomic(void *kvaddr); 51extern void __kunmap_atomic(void *kvaddr);
52extern void *kmap_atomic_pfn(unsigned long pfn); 52extern void *kmap_atomic_pfn(unsigned long pfn);
53extern struct page *kmap_atomic_to_page(void *ptr); 53extern struct page *kmap_atomic_to_page(void *ptr);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index 4f9eb0b23036..c97087d12d07 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -498,7 +498,7 @@ static inline void local_r4k_flush_cache_page(void *args)
498 if (map_coherent) 498 if (map_coherent)
499 vaddr = kmap_coherent(page, addr); 499 vaddr = kmap_coherent(page, addr);
500 else 500 else
501 vaddr = kmap_atomic(page, KM_USER0); 501 vaddr = kmap_atomic(page);
502 addr = (unsigned long)vaddr; 502 addr = (unsigned long)vaddr;
503 } 503 }
504 504
@@ -521,7 +521,7 @@ static inline void local_r4k_flush_cache_page(void *args)
521 if (map_coherent) 521 if (map_coherent)
522 kunmap_coherent(); 522 kunmap_coherent();
523 else 523 else
524 kunmap_atomic(vaddr, KM_USER0); 524 kunmap_atomic(vaddr);
525 } 525 }
526} 526}
527 527
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c
index 3634c7ea06ac..aff57057a949 100644
--- a/arch/mips/mm/highmem.c
+++ b/arch/mips/mm/highmem.c
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap);
41 * kmaps are appropriate for short, tight code paths only. 41 * kmaps are appropriate for short, tight code paths only.
42 */ 42 */
43 43
44void *__kmap_atomic(struct page *page) 44void *kmap_atomic(struct page *page)
45{ 45{
46 unsigned long vaddr; 46 unsigned long vaddr;
47 int idx, type; 47 int idx, type;
@@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page)
62 62
63 return (void*) vaddr; 63 return (void*) vaddr;
64} 64}
65EXPORT_SYMBOL(__kmap_atomic); 65EXPORT_SYMBOL(kmap_atomic);
66 66
67void __kunmap_atomic(void *kvaddr) 67void __kunmap_atomic(void *kvaddr)
68{ 68{
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 3b3ffd439cd7..1a85ba92eb5c 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from,
207{ 207{
208 void *vfrom, *vto; 208 void *vfrom, *vto;
209 209
210 vto = kmap_atomic(to, KM_USER1); 210 vto = kmap_atomic(to);
211 if (cpu_has_dc_aliases && 211 if (cpu_has_dc_aliases &&
212 page_mapped(from) && !Page_dcache_dirty(from)) { 212 page_mapped(from) && !Page_dcache_dirty(from)) {
213 vfrom = kmap_coherent(from, vaddr); 213 vfrom = kmap_coherent(from, vaddr);
214 copy_page(vto, vfrom); 214 copy_page(vto, vfrom);
215 kunmap_coherent(); 215 kunmap_coherent();
216 } else { 216 } else {
217 vfrom = kmap_atomic(from, KM_USER0); 217 vfrom = kmap_atomic(from);
218 copy_page(vto, vfrom); 218 copy_page(vto, vfrom);
219 kunmap_atomic(vfrom, KM_USER0); 219 kunmap_atomic(vfrom);
220 } 220 }
221 if ((!cpu_has_ic_fills_f_dc) || 221 if ((!cpu_has_ic_fills_f_dc) ||
222 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) 222 pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK))
223 flush_data_cache_page((unsigned long)vto); 223 flush_data_cache_page((unsigned long)vto);
224 kunmap_atomic(vto, KM_USER1); 224 kunmap_atomic(vto);
225 /* Make sure this page is cleared on other CPU's too before using it */ 225 /* Make sure this page is cleared on other CPU's too before using it */
226 smp_wmb(); 226 smp_wmb();
227} 227}
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h
index bfe2d88604d9..7c137cd8aa37 100644
--- a/arch/mn10300/include/asm/highmem.h
+++ b/arch/mn10300/include/asm/highmem.h
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
70 * be used in IRQ contexts, so in some (very limited) cases we need 70 * be used in IRQ contexts, so in some (very limited) cases we need
71 * it. 71 * it.
72 */ 72 */
73static inline unsigned long __kmap_atomic(struct page *page) 73static inline unsigned long kmap_atomic(struct page *page)
74{ 74{
75 unsigned long vaddr; 75 unsigned long vaddr;
76 int idx, type; 76 int idx, type;
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index da601dd34c05..9f21ab0c02e3 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -140,7 +140,7 @@ static inline void *kmap(struct page *page)
140 140
141#define kunmap(page) kunmap_parisc(page_address(page)) 141#define kunmap(page) kunmap_parisc(page_address(page))
142 142
143static inline void *__kmap_atomic(struct page *page) 143static inline void *kmap_atomic(struct page *page)
144{ 144{
145 pagefault_disable(); 145 pagefault_disable();
146 return page_address(page); 146 return page_address(page);
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index dbc264010d0b..caaf6e00630d 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
79 kunmap_high(page); 79 kunmap_high(page);
80} 80}
81 81
82static inline void *__kmap_atomic(struct page *page) 82static inline void *kmap_atomic(struct page *page)
83{ 83{
84 return kmap_atomic_prot(page, kmap_prot); 84 return kmap_atomic_prot(page, kmap_prot);
85} 85}
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e2cfb9e1e20e..220fcdf26978 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -227,14 +227,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
227 hpage_offset /= 4; 227 hpage_offset /= 4;
228 228
229 get_page(hpage); 229 get_page(hpage);
230 page = kmap_atomic(hpage, KM_USER0); 230 page = kmap_atomic(hpage);
231 231
232 /* patch dcbz into reserved instruction, so we trap */ 232 /* patch dcbz into reserved instruction, so we trap */
233 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) 233 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
234 if ((page[i] & 0xff0007ff) == INS_DCBZ) 234 if ((page[i] & 0xff0007ff) == INS_DCBZ)
235 page[i] &= 0xfffffff7; 235 page[i] &= 0xfffffff7;
236 236
237 kunmap_atomic(page, KM_USER0); 237 kunmap_atomic(page);
238 put_page(hpage); 238 put_page(hpage);
239} 239}
240 240
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 329be36c0a8d..6747eece84af 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
365 local_irq_save(flags); 365 local_irq_save(flags);
366 366
367 do { 367 do {
368 start = (unsigned long)kmap_atomic(page + seg_nr, 368 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
369 KM_PPC_SYNC_PAGE) + seg_offset;
370 369
371 /* Sync this buffer segment */ 370 /* Sync this buffer segment */
372 __dma_sync((void *)start, seg_size, direction); 371 __dma_sync((void *)start, seg_size, direction);
373 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); 372 kunmap_atomic((void *)start);
374 seg_nr++; 373 seg_nr++;
375 374
376 /* Calculate next buffer segment size */ 375 /* Calculate next buffer segment size */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8b3cc7d90fe..57c7465e656e 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -910,9 +910,9 @@ void flush_dcache_icache_hugepage(struct page *page)
910 if (!PageHighMem(page)) { 910 if (!PageHighMem(page)) {
911 __flush_dcache_icache(page_address(page+i)); 911 __flush_dcache_icache(page_address(page+i));
912 } else { 912 } else {
913 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); 913 start = kmap_atomic(page+i);
914 __flush_dcache_icache(start); 914 __flush_dcache_icache(start);
915 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 915 kunmap_atomic(start);
916 } 916 }
917 } 917 }
918} 918}
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d974b79a3068..baaafde7d135 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
458#endif 458#endif
459#ifdef CONFIG_BOOKE 459#ifdef CONFIG_BOOKE
460 { 460 {
461 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 461 void *start = kmap_atomic(page);
462 __flush_dcache_icache(start); 462 __flush_dcache_icache(start);
463 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 463 kunmap_atomic(start);
464 } 464 }
465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
466 /* On 8xx there is no need to kmap since highmem is not supported */ 466 /* On 8xx there is no need to kmap since highmem is not supported */
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c
index 92eb98633ab0..112fea12522a 100644
--- a/arch/sh/mm/cache-sh4.c
+++ b/arch/sh/mm/cache-sh4.c
@@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args)
244 if (map_coherent) 244 if (map_coherent)
245 vaddr = kmap_coherent(page, address); 245 vaddr = kmap_coherent(page, address);
246 else 246 else
247 vaddr = kmap_atomic(page, KM_USER0); 247 vaddr = kmap_atomic(page);
248 248
249 address = (unsigned long)vaddr; 249 address = (unsigned long)vaddr;
250 } 250 }
@@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args)
259 if (map_coherent) 259 if (map_coherent)
260 kunmap_coherent(vaddr); 260 kunmap_coherent(vaddr);
261 else 261 else
262 kunmap_atomic(vaddr, KM_USER0); 262 kunmap_atomic(vaddr);
263 } 263 }
264} 264}
265 265
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c
index 5a580ea04429..616966a96cba 100644
--- a/arch/sh/mm/cache.c
+++ b/arch/sh/mm/cache.c
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from,
95{ 95{
96 void *vfrom, *vto; 96 void *vfrom, *vto;
97 97
98 vto = kmap_atomic(to, KM_USER1); 98 vto = kmap_atomic(to);
99 99
100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && 100 if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
101 test_bit(PG_dcache_clean, &from->flags)) { 101 test_bit(PG_dcache_clean, &from->flags)) {
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from,
103 copy_page(vto, vfrom); 103 copy_page(vto, vfrom);
104 kunmap_coherent(vfrom); 104 kunmap_coherent(vfrom);
105 } else { 105 } else {
106 vfrom = kmap_atomic(from, KM_USER0); 106 vfrom = kmap_atomic(from);
107 copy_page(vto, vfrom); 107 copy_page(vto, vfrom);
108 kunmap_atomic(vfrom, KM_USER0); 108 kunmap_atomic(vfrom);
109 } 109 }
110 110
111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || 111 if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) ||
112 (vma->vm_flags & VM_EXEC)) 112 (vma->vm_flags & VM_EXEC))
113 __flush_purge_region(vto, PAGE_SIZE); 113 __flush_purge_region(vto, PAGE_SIZE);
114 114
115 kunmap_atomic(vto, KM_USER1); 115 kunmap_atomic(vto);
116 /* Make sure this page is cleared on other CPU's too before using it */ 116 /* Make sure this page is cleared on other CPU's too before using it */
117 smp_wmb(); 117 smp_wmb();
118} 118}
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage);
120 120
121void clear_user_highpage(struct page *page, unsigned long vaddr) 121void clear_user_highpage(struct page *page, unsigned long vaddr)
122{ 122{
123 void *kaddr = kmap_atomic(page, KM_USER0); 123 void *kaddr = kmap_atomic(page);
124 124
125 clear_page(kaddr); 125 clear_page(kaddr);
126 126
127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) 127 if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK))
128 __flush_purge_region(kaddr, PAGE_SIZE); 128 __flush_purge_region(kaddr, PAGE_SIZE);
129 129
130 kunmap_atomic(kaddr, KM_USER0); 130 kunmap_atomic(kaddr);
131} 131}
132EXPORT_SYMBOL(clear_user_highpage); 132EXPORT_SYMBOL(clear_user_highpage);
133 133
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h
index 3d7afbb7f4bb..3b6e00dd96e5 100644
--- a/arch/sparc/include/asm/highmem.h
+++ b/arch/sparc/include/asm/highmem.h
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page)
70 kunmap_high(page); 70 kunmap_high(page);
71} 71}
72 72
73extern void *__kmap_atomic(struct page *page); 73extern void *kmap_atomic(struct page *page);
74extern void __kunmap_atomic(void *kvaddr); 74extern void __kunmap_atomic(void *kvaddr);
75extern struct page *kmap_atomic_to_page(void *vaddr); 75extern struct page *kmap_atomic_to_page(void *vaddr);
76 76
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c
index 77140a02c86a..055c66cf1bf4 100644
--- a/arch/sparc/mm/highmem.c
+++ b/arch/sparc/mm/highmem.c
@@ -30,7 +30,7 @@
30#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
31#include <asm/fixmap.h> 31#include <asm/fixmap.h>
32 32
33void *__kmap_atomic(struct page *page) 33void *kmap_atomic(struct page *page)
34{ 34{
35 unsigned long vaddr; 35 unsigned long vaddr;
36 long idx, type; 36 long idx, type;
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page)
64 64
65 return (void*) vaddr; 65 return (void*) vaddr;
66} 66}
67EXPORT_SYMBOL(__kmap_atomic); 67EXPORT_SYMBOL(kmap_atomic);
68 68
69void __kunmap_atomic(void *kvaddr) 69void __kunmap_atomic(void *kvaddr)
70{ 70{
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index b2a6c5de79ab..fc8429a31c85 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -59,7 +59,7 @@ void *kmap_fix_kpte(struct page *page, int finished);
59/* This macro is used only in map_new_virtual() to map "page". */ 59/* This macro is used only in map_new_virtual() to map "page". */
60#define kmap_prot page_to_kpgprot(page) 60#define kmap_prot page_to_kpgprot(page)
61 61
62void *__kmap_atomic(struct page *page); 62void *kmap_atomic(struct page *page);
63void __kunmap_atomic(void *kvaddr); 63void __kunmap_atomic(void *kvaddr);
64void *kmap_atomic_pfn(unsigned long pfn); 64void *kmap_atomic_pfn(unsigned long pfn);
65void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 65void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index 31dbbd9afe47..ef8e5a62b6e3 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -224,12 +224,12 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
224} 224}
225EXPORT_SYMBOL(kmap_atomic_prot); 225EXPORT_SYMBOL(kmap_atomic_prot);
226 226
227void *__kmap_atomic(struct page *page) 227void *kmap_atomic(struct page *page)
228{ 228{
229 /* PAGE_NONE is a magic value that tells us to check immutability. */ 229 /* PAGE_NONE is a magic value that tells us to check immutability. */
230 return kmap_atomic_prot(page, PAGE_NONE); 230 return kmap_atomic_prot(page, PAGE_NONE);
231} 231}
232EXPORT_SYMBOL(__kmap_atomic); 232EXPORT_SYMBOL(kmap_atomic);
233 233
234void __kunmap_atomic(void *kvaddr) 234void __kunmap_atomic(void *kvaddr)
235{ 235{
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c
index 9fefd924fb49..cd7df79c6a56 100644
--- a/arch/um/kernel/skas/uaccess.c
+++ b/arch/um/kernel/skas/uaccess.c
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
69 return -1; 69 return -1;
70 70
71 page = pte_page(*pte); 71 page = pte_page(*pte);
72 addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + 72 addr = (unsigned long) kmap_atomic(page) +
73 (addr & ~PAGE_MASK); 73 (addr & ~PAGE_MASK);
74 74
75 current->thread.fault_catcher = &buf; 75 current->thread.fault_catcher = &buf;
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write,
82 82
83 current->thread.fault_catcher = NULL; 83 current->thread.fault_catcher = NULL;
84 84
85 kunmap_atomic((void *)addr, KM_UML_USERCOPY); 85 kunmap_atomic((void *)addr);
86 86
87 return n; 87 return n;
88} 88}
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c
index b3350bd32c60..c799352e24fc 100644
--- a/arch/x86/crypto/aesni-intel_glue.c
+++ b/arch/x86/crypto/aesni-intel_glue.c
@@ -1108,12 +1108,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
1108 one_entry_in_sg = 1; 1108 one_entry_in_sg = 1;
1109 scatterwalk_start(&src_sg_walk, req->src); 1109 scatterwalk_start(&src_sg_walk, req->src);
1110 scatterwalk_start(&assoc_sg_walk, req->assoc); 1110 scatterwalk_start(&assoc_sg_walk, req->assoc);
1111 src = scatterwalk_map(&src_sg_walk, 0); 1111 src = scatterwalk_map(&src_sg_walk);
1112 assoc = scatterwalk_map(&assoc_sg_walk, 0); 1112 assoc = scatterwalk_map(&assoc_sg_walk);
1113 dst = src; 1113 dst = src;
1114 if (unlikely(req->src != req->dst)) { 1114 if (unlikely(req->src != req->dst)) {
1115 scatterwalk_start(&dst_sg_walk, req->dst); 1115 scatterwalk_start(&dst_sg_walk, req->dst);
1116 dst = scatterwalk_map(&dst_sg_walk, 0); 1116 dst = scatterwalk_map(&dst_sg_walk);
1117 } 1117 }
1118 1118
1119 } else { 1119 } else {
@@ -1137,11 +1137,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req)
1137 * back to the packet. */ 1137 * back to the packet. */
1138 if (one_entry_in_sg) { 1138 if (one_entry_in_sg) {
1139 if (unlikely(req->src != req->dst)) { 1139 if (unlikely(req->src != req->dst)) {
1140 scatterwalk_unmap(dst, 0); 1140 scatterwalk_unmap(dst);
1141 scatterwalk_done(&dst_sg_walk, 0, 0); 1141 scatterwalk_done(&dst_sg_walk, 0, 0);
1142 } 1142 }
1143 scatterwalk_unmap(src, 0); 1143 scatterwalk_unmap(src);
1144 scatterwalk_unmap(assoc, 0); 1144 scatterwalk_unmap(assoc);
1145 scatterwalk_done(&src_sg_walk, 0, 0); 1145 scatterwalk_done(&src_sg_walk, 0, 0);
1146 scatterwalk_done(&assoc_sg_walk, 0, 0); 1146 scatterwalk_done(&assoc_sg_walk, 0, 0);
1147 } else { 1147 } else {
@@ -1190,12 +1190,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1190 one_entry_in_sg = 1; 1190 one_entry_in_sg = 1;
1191 scatterwalk_start(&src_sg_walk, req->src); 1191 scatterwalk_start(&src_sg_walk, req->src);
1192 scatterwalk_start(&assoc_sg_walk, req->assoc); 1192 scatterwalk_start(&assoc_sg_walk, req->assoc);
1193 src = scatterwalk_map(&src_sg_walk, 0); 1193 src = scatterwalk_map(&src_sg_walk);
1194 assoc = scatterwalk_map(&assoc_sg_walk, 0); 1194 assoc = scatterwalk_map(&assoc_sg_walk);
1195 dst = src; 1195 dst = src;
1196 if (unlikely(req->src != req->dst)) { 1196 if (unlikely(req->src != req->dst)) {
1197 scatterwalk_start(&dst_sg_walk, req->dst); 1197 scatterwalk_start(&dst_sg_walk, req->dst);
1198 dst = scatterwalk_map(&dst_sg_walk, 0); 1198 dst = scatterwalk_map(&dst_sg_walk);
1199 } 1199 }
1200 1200
1201 } else { 1201 } else {
@@ -1220,11 +1220,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req)
1220 1220
1221 if (one_entry_in_sg) { 1221 if (one_entry_in_sg) {
1222 if (unlikely(req->src != req->dst)) { 1222 if (unlikely(req->src != req->dst)) {
1223 scatterwalk_unmap(dst, 0); 1223 scatterwalk_unmap(dst);
1224 scatterwalk_done(&dst_sg_walk, 0, 0); 1224 scatterwalk_done(&dst_sg_walk, 0, 0);
1225 } 1225 }
1226 scatterwalk_unmap(src, 0); 1226 scatterwalk_unmap(src);
1227 scatterwalk_unmap(assoc, 0); 1227 scatterwalk_unmap(assoc);
1228 scatterwalk_done(&src_sg_walk, 0, 0); 1228 scatterwalk_done(&src_sg_walk, 0, 0);
1229 scatterwalk_done(&assoc_sg_walk, 0, 0); 1229 scatterwalk_done(&assoc_sg_walk, 0, 0);
1230 } else { 1230 } else {
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h
index 3bd04022fd0c..302a323b3f67 100644
--- a/arch/x86/include/asm/highmem.h
+++ b/arch/x86/include/asm/highmem.h
@@ -61,7 +61,7 @@ void *kmap(struct page *page);
61void kunmap(struct page *page); 61void kunmap(struct page *page);
62 62
63void *kmap_atomic_prot(struct page *page, pgprot_t prot); 63void *kmap_atomic_prot(struct page *page, pgprot_t prot);
64void *__kmap_atomic(struct page *page); 64void *kmap_atomic(struct page *page);
65void __kunmap_atomic(void *kvaddr); 65void __kunmap_atomic(void *kvaddr);
66void *kmap_atomic_pfn(unsigned long pfn); 66void *kmap_atomic_pfn(unsigned long pfn);
67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); 67void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot);
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c
index 642f75a68cd5..11891ca7b716 100644
--- a/arch/x86/kernel/crash_dump_32.c
+++ b/arch/x86/kernel/crash_dump_32.c
@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
62 62
63 if (!userbuf) { 63 if (!userbuf) {
64 memcpy(buf, (vaddr + offset), csize); 64 memcpy(buf, (vaddr + offset), csize);
65 kunmap_atomic(vaddr, KM_PTE0); 65 kunmap_atomic(vaddr);
66 } else { 66 } else {
67 if (!kdump_buf_page) { 67 if (!kdump_buf_page) {
68 printk(KERN_WARNING "Kdump: Kdump buffer page not" 68 printk(KERN_WARNING "Kdump: Kdump buffer page not"
69 " allocated\n"); 69 " allocated\n");
70 kunmap_atomic(vaddr, KM_PTE0); 70 kunmap_atomic(vaddr);
71 return -EFAULT; 71 return -EFAULT;
72 } 72 }
73 copy_page(kdump_buf_page, vaddr); 73 copy_page(kdump_buf_page, vaddr);
74 kunmap_atomic(vaddr, KM_PTE0); 74 kunmap_atomic(vaddr);
75 if (copy_to_user(buf, (kdump_buf_page + offset), csize)) 75 if (copy_to_user(buf, (kdump_buf_page + offset), csize))
76 return -EFAULT; 76 return -EFAULT;
77 } 77 }
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index cfdc6e0ef002..31bfc6927bc0 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1283 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) 1283 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr)
1284 return; 1284 return;
1285 1285
1286 vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); 1286 vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
1287 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); 1287 data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr));
1288 kunmap_atomic(vapic, KM_USER0); 1288 kunmap_atomic(vapic);
1289 1289
1290 apic_set_tpr(vcpu->arch.apic, data & 0xff); 1290 apic_set_tpr(vcpu->arch.apic, data & 0xff);
1291} 1291}
@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1310 max_isr = 0; 1310 max_isr = 0;
1311 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); 1311 data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24);
1312 1312
1313 vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); 1313 vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
1314 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; 1314 *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data;
1315 kunmap_atomic(vapic, KM_USER0); 1315 kunmap_atomic(vapic);
1316} 1316}
1317 1317
1318void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1318void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 15610285ebb6..df5a70311be8 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
92 if (unlikely(npages != 1)) 92 if (unlikely(npages != 1))
93 return -EFAULT; 93 return -EFAULT;
94 94
95 table = kmap_atomic(page, KM_USER0); 95 table = kmap_atomic(page);
96 ret = CMPXCHG(&table[index], orig_pte, new_pte); 96 ret = CMPXCHG(&table[index], orig_pte, new_pte);
97 kunmap_atomic(table, KM_USER0); 97 kunmap_atomic(table);
98 98
99 kvm_release_page_dirty(page); 99 kvm_release_page_dirty(page);
100 100
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9cbfc0698118..bb4fd2636bc2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1162 */ 1162 */
1163 vcpu->hv_clock.version += 2; 1163 vcpu->hv_clock.version += 2;
1164 1164
1165 shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); 1165 shared_kaddr = kmap_atomic(vcpu->time_page);
1166 1166
1167 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, 1167 memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
1168 sizeof(vcpu->hv_clock)); 1168 sizeof(vcpu->hv_clock));
1169 1169
1170 kunmap_atomic(shared_kaddr, KM_USER0); 1170 kunmap_atomic(shared_kaddr);
1171 1171
1172 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); 1172 mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
1173 return 0; 1173 return 0;
@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3848 goto emul_write; 3848 goto emul_write;
3849 } 3849 }
3850 3850
3851 kaddr = kmap_atomic(page, KM_USER0); 3851 kaddr = kmap_atomic(page);
3852 kaddr += offset_in_page(gpa); 3852 kaddr += offset_in_page(gpa);
3853 switch (bytes) { 3853 switch (bytes) {
3854 case 1: 3854 case 1:
@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt,
3866 default: 3866 default:
3867 BUG(); 3867 BUG();
3868 } 3868 }
3869 kunmap_atomic(kaddr, KM_USER0); 3869 kunmap_atomic(kaddr);
3870 kvm_release_page_dirty(page); 3870 kvm_release_page_dirty(page);
3871 3871
3872 if (!exchanged) 3872 if (!exchanged)
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index e218d5df85ff..d9b094ca7aaa 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -760,9 +760,9 @@ survive:
760 break; 760 break;
761 } 761 }
762 762
763 maddr = kmap_atomic(pg, KM_USER0); 763 maddr = kmap_atomic(pg);
764 memcpy(maddr + offset, from, len); 764 memcpy(maddr + offset, from, len);
765 kunmap_atomic(maddr, KM_USER0); 765 kunmap_atomic(maddr);
766 set_page_dirty_lock(pg); 766 set_page_dirty_lock(pg);
767 put_page(pg); 767 put_page(pg);
768 up_read(&current->mm->mmap_sem); 768 up_read(&current->mm->mmap_sem);
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
index f4f29b19fac5..6f31ee56c008 100644
--- a/arch/x86/mm/highmem_32.c
+++ b/arch/x86/mm/highmem_32.c
@@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
51} 51}
52EXPORT_SYMBOL(kmap_atomic_prot); 52EXPORT_SYMBOL(kmap_atomic_prot);
53 53
54void *__kmap_atomic(struct page *page) 54void *kmap_atomic(struct page *page)
55{ 55{
56 return kmap_atomic_prot(page, kmap_prot); 56 return kmap_atomic_prot(page, kmap_prot);
57} 57}
58EXPORT_SYMBOL(__kmap_atomic); 58EXPORT_SYMBOL(kmap_atomic);
59 59
60/* 60/*
61 * This is the same as kmap_atomic() but can map memory that doesn't 61 * This is the same as kmap_atomic() but can map memory that doesn't
diff --git a/crypto/ahash.c b/crypto/ahash.c
index ac93c99cfae8..33bc9b62e9ae 100644
--- a/crypto/ahash.c
+++ b/crypto/ahash.c
@@ -46,7 +46,7 @@ static int hash_walk_next(struct crypto_hash_walk *walk)
46 unsigned int nbytes = min(walk->entrylen, 46 unsigned int nbytes = min(walk->entrylen,
47 ((unsigned int)(PAGE_SIZE)) - offset); 47 ((unsigned int)(PAGE_SIZE)) - offset);
48 48
49 walk->data = crypto_kmap(walk->pg, 0); 49 walk->data = kmap_atomic(walk->pg);
50 walk->data += offset; 50 walk->data += offset;
51 51
52 if (offset & alignmask) { 52 if (offset & alignmask) {
@@ -93,7 +93,7 @@ int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
93 return nbytes; 93 return nbytes;
94 } 94 }
95 95
96 crypto_kunmap(walk->data, 0); 96 kunmap_atomic(walk->data);
97 crypto_yield(walk->flags); 97 crypto_yield(walk->flags);
98 98
99 if (err) 99 if (err)
diff --git a/crypto/async_tx/async_memcpy.c b/crypto/async_tx/async_memcpy.c
index 0d5a90ca6501..361b5e8239bc 100644
--- a/crypto/async_tx/async_memcpy.c
+++ b/crypto/async_tx/async_memcpy.c
@@ -79,13 +79,13 @@ async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
79 /* wait for any prerequisite operations */ 79 /* wait for any prerequisite operations */
80 async_tx_quiesce(&submit->depend_tx); 80 async_tx_quiesce(&submit->depend_tx);
81 81
82 dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset; 82 dest_buf = kmap_atomic(dest) + dest_offset;
83 src_buf = kmap_atomic(src, KM_USER1) + src_offset; 83 src_buf = kmap_atomic(src) + src_offset;
84 84
85 memcpy(dest_buf, src_buf, len); 85 memcpy(dest_buf, src_buf, len);
86 86
87 kunmap_atomic(src_buf, KM_USER1); 87 kunmap_atomic(src_buf);
88 kunmap_atomic(dest_buf, KM_USER0); 88 kunmap_atomic(dest_buf);
89 89
90 async_tx_sync_epilog(submit); 90 async_tx_sync_epilog(submit);
91 } 91 }
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 1e61d1a888b2..4dd80c725498 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -43,22 +43,22 @@ static int blkcipher_walk_first(struct blkcipher_desc *desc,
43 43
44static inline void blkcipher_map_src(struct blkcipher_walk *walk) 44static inline void blkcipher_map_src(struct blkcipher_walk *walk)
45{ 45{
46 walk->src.virt.addr = scatterwalk_map(&walk->in, 0); 46 walk->src.virt.addr = scatterwalk_map(&walk->in);
47} 47}
48 48
49static inline void blkcipher_map_dst(struct blkcipher_walk *walk) 49static inline void blkcipher_map_dst(struct blkcipher_walk *walk)
50{ 50{
51 walk->dst.virt.addr = scatterwalk_map(&walk->out, 1); 51 walk->dst.virt.addr = scatterwalk_map(&walk->out);
52} 52}
53 53
54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk) 54static inline void blkcipher_unmap_src(struct blkcipher_walk *walk)
55{ 55{
56 scatterwalk_unmap(walk->src.virt.addr, 0); 56 scatterwalk_unmap(walk->src.virt.addr);
57} 57}
58 58
59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk) 59static inline void blkcipher_unmap_dst(struct blkcipher_walk *walk)
60{ 60{
61 scatterwalk_unmap(walk->dst.virt.addr, 1); 61 scatterwalk_unmap(walk->dst.virt.addr);
62} 62}
63 63
64/* Get a spot of the specified length that does not straddle a page. 64/* Get a spot of the specified length that does not straddle a page.
diff --git a/crypto/ccm.c b/crypto/ccm.c
index c36d654cf56a..32fe1bb5decb 100644
--- a/crypto/ccm.c
+++ b/crypto/ccm.c
@@ -216,12 +216,12 @@ static void get_data_to_compute(struct crypto_cipher *tfm,
216 scatterwalk_start(&walk, sg_next(walk.sg)); 216 scatterwalk_start(&walk, sg_next(walk.sg));
217 n = scatterwalk_clamp(&walk, len); 217 n = scatterwalk_clamp(&walk, len);
218 } 218 }
219 data_src = scatterwalk_map(&walk, 0); 219 data_src = scatterwalk_map(&walk);
220 220
221 compute_mac(tfm, data_src, n, pctx); 221 compute_mac(tfm, data_src, n, pctx);
222 len -= n; 222 len -= n;
223 223
224 scatterwalk_unmap(data_src, 0); 224 scatterwalk_unmap(data_src);
225 scatterwalk_advance(&walk, n); 225 scatterwalk_advance(&walk, n);
226 scatterwalk_done(&walk, 0, len); 226 scatterwalk_done(&walk, 0, len);
227 if (len) 227 if (len)
diff --git a/crypto/scatterwalk.c b/crypto/scatterwalk.c
index 41e529af0773..7281b8a93ad3 100644
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -40,9 +40,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
40} 40}
41EXPORT_SYMBOL_GPL(scatterwalk_start); 41EXPORT_SYMBOL_GPL(scatterwalk_start);
42 42
43void *scatterwalk_map(struct scatter_walk *walk, int out) 43void *scatterwalk_map(struct scatter_walk *walk)
44{ 44{
45 return crypto_kmap(scatterwalk_page(walk), out) + 45 return kmap_atomic(scatterwalk_page(walk)) +
46 offset_in_page(walk->offset); 46 offset_in_page(walk->offset);
47} 47}
48EXPORT_SYMBOL_GPL(scatterwalk_map); 48EXPORT_SYMBOL_GPL(scatterwalk_map);
@@ -83,9 +83,9 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
83 if (len_this_page > nbytes) 83 if (len_this_page > nbytes)
84 len_this_page = nbytes; 84 len_this_page = nbytes;
85 85
86 vaddr = scatterwalk_map(walk, out); 86 vaddr = scatterwalk_map(walk);
87 memcpy_dir(buf, vaddr, len_this_page, out); 87 memcpy_dir(buf, vaddr, len_this_page, out);
88 scatterwalk_unmap(vaddr, out); 88 scatterwalk_unmap(vaddr);
89 89
90 scatterwalk_advance(walk, len_this_page); 90 scatterwalk_advance(walk, len_this_page);
91 91
diff --git a/crypto/shash.c b/crypto/shash.c
index 9100912716ae..21fc12e2378f 100644
--- a/crypto/shash.c
+++ b/crypto/shash.c
@@ -281,10 +281,10 @@ int shash_ahash_digest(struct ahash_request *req, struct shash_desc *desc)
281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) { 281 if (nbytes < min(sg->length, ((unsigned int)(PAGE_SIZE)) - offset)) {
282 void *data; 282 void *data;
283 283
284 data = crypto_kmap(sg_page(sg), 0); 284 data = kmap_atomic(sg_page(sg));
285 err = crypto_shash_digest(desc, data + offset, nbytes, 285 err = crypto_shash_digest(desc, data + offset, nbytes,
286 req->result); 286 req->result);
287 crypto_kunmap(data, 0); 287 kunmap_atomic(data);
288 crypto_yield(desc->flags); 288 crypto_yield(desc->flags);
289 } else 289 } else
290 err = crypto_shash_init(desc) ?: 290 err = crypto_shash_init(desc) ?:
@@ -420,9 +420,9 @@ static int shash_compat_digest(struct hash_desc *hdesc, struct scatterlist *sg,
420 420
421 desc->flags = hdesc->flags; 421 desc->flags = hdesc->flags;
422 422
423 data = crypto_kmap(sg_page(sg), 0); 423 data = kmap_atomic(sg_page(sg));
424 err = crypto_shash_digest(desc, data + offset, nbytes, out); 424 err = crypto_shash_digest(desc, data + offset, nbytes, out);
425 crypto_kunmap(data, 0); 425 kunmap_atomic(data);
426 crypto_yield(desc->flags); 426 crypto_yield(desc->flags);
427 goto out; 427 goto out;
428 } 428 }
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index 9691dd0966d7..d8af325a6bda 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -720,13 +720,13 @@ static void ata_pio_sector(struct ata_queued_cmd *qc)
720 720
721 /* FIXME: use a bounce buffer */ 721 /* FIXME: use a bounce buffer */
722 local_irq_save(flags); 722 local_irq_save(flags);
723 buf = kmap_atomic(page, KM_IRQ0); 723 buf = kmap_atomic(page);
724 724
725 /* do the actual data transfer */ 725 /* do the actual data transfer */
726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size, 726 ap->ops->sff_data_xfer(qc->dev, buf + offset, qc->sect_size,
727 do_write); 727 do_write);
728 728
729 kunmap_atomic(buf, KM_IRQ0); 729 kunmap_atomic(buf);
730 local_irq_restore(flags); 730 local_irq_restore(flags);
731 } else { 731 } else {
732 buf = page_address(page); 732 buf = page_address(page);
@@ -865,13 +865,13 @@ next_sg:
865 865
866 /* FIXME: use bounce buffer */ 866 /* FIXME: use bounce buffer */
867 local_irq_save(flags); 867 local_irq_save(flags);
868 buf = kmap_atomic(page, KM_IRQ0); 868 buf = kmap_atomic(page);
869 869
870 /* do the actual data transfer */ 870 /* do the actual data transfer */
871 consumed = ap->ops->sff_data_xfer(dev, buf + offset, 871 consumed = ap->ops->sff_data_xfer(dev, buf + offset,
872 count, rw); 872 count, rw);
873 873
874 kunmap_atomic(buf, KM_IRQ0); 874 kunmap_atomic(buf);
875 local_irq_restore(flags); 875 local_irq_restore(flags);
876 } else { 876 } else {
877 buf = page_address(page); 877 buf = page_address(page);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index ec246437f5a4..531ceb31d0ff 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -242,9 +242,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
242 page = brd_lookup_page(brd, sector); 242 page = brd_lookup_page(brd, sector);
243 BUG_ON(!page); 243 BUG_ON(!page);
244 244
245 dst = kmap_atomic(page, KM_USER1); 245 dst = kmap_atomic(page);
246 memcpy(dst + offset, src, copy); 246 memcpy(dst + offset, src, copy);
247 kunmap_atomic(dst, KM_USER1); 247 kunmap_atomic(dst);
248 248
249 if (copy < n) { 249 if (copy < n) {
250 src += copy; 250 src += copy;
@@ -253,9 +253,9 @@ static void copy_to_brd(struct brd_device *brd, const void *src,
253 page = brd_lookup_page(brd, sector); 253 page = brd_lookup_page(brd, sector);
254 BUG_ON(!page); 254 BUG_ON(!page);
255 255
256 dst = kmap_atomic(page, KM_USER1); 256 dst = kmap_atomic(page);
257 memcpy(dst, src, copy); 257 memcpy(dst, src, copy);
258 kunmap_atomic(dst, KM_USER1); 258 kunmap_atomic(dst);
259 } 259 }
260} 260}
261 261
@@ -273,9 +273,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
273 copy = min_t(size_t, n, PAGE_SIZE - offset); 273 copy = min_t(size_t, n, PAGE_SIZE - offset);
274 page = brd_lookup_page(brd, sector); 274 page = brd_lookup_page(brd, sector);
275 if (page) { 275 if (page) {
276 src = kmap_atomic(page, KM_USER1); 276 src = kmap_atomic(page);
277 memcpy(dst, src + offset, copy); 277 memcpy(dst, src + offset, copy);
278 kunmap_atomic(src, KM_USER1); 278 kunmap_atomic(src);
279 } else 279 } else
280 memset(dst, 0, copy); 280 memset(dst, 0, copy);
281 281
@@ -285,9 +285,9 @@ static void copy_from_brd(void *dst, struct brd_device *brd,
285 copy = n - copy; 285 copy = n - copy;
286 page = brd_lookup_page(brd, sector); 286 page = brd_lookup_page(brd, sector);
287 if (page) { 287 if (page) {
288 src = kmap_atomic(page, KM_USER1); 288 src = kmap_atomic(page);
289 memcpy(dst, src, copy); 289 memcpy(dst, src, copy);
290 kunmap_atomic(src, KM_USER1); 290 kunmap_atomic(src);
291 } else 291 } else
292 memset(dst, 0, copy); 292 memset(dst, 0, copy);
293 } 293 }
@@ -309,7 +309,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
309 goto out; 309 goto out;
310 } 310 }
311 311
312 mem = kmap_atomic(page, KM_USER0); 312 mem = kmap_atomic(page);
313 if (rw == READ) { 313 if (rw == READ) {
314 copy_from_brd(mem + off, brd, sector, len); 314 copy_from_brd(mem + off, brd, sector, len);
315 flush_dcache_page(page); 315 flush_dcache_page(page);
@@ -317,7 +317,7 @@ static int brd_do_bvec(struct brd_device *brd, struct page *page,
317 flush_dcache_page(page); 317 flush_dcache_page(page);
318 copy_to_brd(brd, mem + off, sector, len); 318 copy_to_brd(brd, mem + off, sector, len);
319 } 319 }
320 kunmap_atomic(mem, KM_USER0); 320 kunmap_atomic(mem);
321 321
322out: 322out:
323 return err; 323 return err;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index 912f585a760f..3030201c69d8 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -289,25 +289,25 @@ static unsigned int bm_bit_to_page_idx(struct drbd_bitmap *b, u64 bitnr)
289 return page_nr; 289 return page_nr;
290} 290}
291 291
292static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx, const enum km_type km) 292static unsigned long *__bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
293{ 293{
294 struct page *page = b->bm_pages[idx]; 294 struct page *page = b->bm_pages[idx];
295 return (unsigned long *) kmap_atomic(page, km); 295 return (unsigned long *) kmap_atomic(page);
296} 296}
297 297
298static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx) 298static unsigned long *bm_map_pidx(struct drbd_bitmap *b, unsigned int idx)
299{ 299{
300 return __bm_map_pidx(b, idx, KM_IRQ1); 300 return __bm_map_pidx(b, idx);
301} 301}
302 302
303static void __bm_unmap(unsigned long *p_addr, const enum km_type km) 303static void __bm_unmap(unsigned long *p_addr)
304{ 304{
305 kunmap_atomic(p_addr, km); 305 kunmap_atomic(p_addr);
306}; 306};
307 307
308static void bm_unmap(unsigned long *p_addr) 308static void bm_unmap(unsigned long *p_addr)
309{ 309{
310 return __bm_unmap(p_addr, KM_IRQ1); 310 return __bm_unmap(p_addr);
311} 311}
312 312
313/* long word offset of _bitmap_ sector */ 313/* long word offset of _bitmap_ sector */
@@ -543,15 +543,15 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
543 543
544 /* all but last page */ 544 /* all but last page */
545 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) { 545 for (idx = 0; idx < b->bm_number_of_pages - 1; idx++) {
546 p_addr = __bm_map_pidx(b, idx, KM_USER0); 546 p_addr = __bm_map_pidx(b, idx);
547 for (i = 0; i < LWPP; i++) 547 for (i = 0; i < LWPP; i++)
548 bits += hweight_long(p_addr[i]); 548 bits += hweight_long(p_addr[i]);
549 __bm_unmap(p_addr, KM_USER0); 549 __bm_unmap(p_addr);
550 cond_resched(); 550 cond_resched();
551 } 551 }
552 /* last (or only) page */ 552 /* last (or only) page */
553 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL; 553 last_word = ((b->bm_bits - 1) & BITS_PER_PAGE_MASK) >> LN2_BPL;
554 p_addr = __bm_map_pidx(b, idx, KM_USER0); 554 p_addr = __bm_map_pidx(b, idx);
555 for (i = 0; i < last_word; i++) 555 for (i = 0; i < last_word; i++)
556 bits += hweight_long(p_addr[i]); 556 bits += hweight_long(p_addr[i]);
557 p_addr[last_word] &= cpu_to_lel(mask); 557 p_addr[last_word] &= cpu_to_lel(mask);
@@ -559,7 +559,7 @@ static unsigned long bm_count_bits(struct drbd_bitmap *b)
559 /* 32bit arch, may have an unused padding long */ 559 /* 32bit arch, may have an unused padding long */
560 if (BITS_PER_LONG == 32 && (last_word & 1) == 0) 560 if (BITS_PER_LONG == 32 && (last_word & 1) == 0)
561 p_addr[last_word+1] = 0; 561 p_addr[last_word+1] = 0;
562 __bm_unmap(p_addr, KM_USER0); 562 __bm_unmap(p_addr);
563 return bits; 563 return bits;
564} 564}
565 565
@@ -970,11 +970,11 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
970 * to use pre-allocated page pool */ 970 * to use pre-allocated page pool */
971 void *src, *dest; 971 void *src, *dest;
972 page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT); 972 page = alloc_page(__GFP_HIGHMEM|__GFP_WAIT);
973 dest = kmap_atomic(page, KM_USER0); 973 dest = kmap_atomic(page);
974 src = kmap_atomic(b->bm_pages[page_nr], KM_USER1); 974 src = kmap_atomic(b->bm_pages[page_nr]);
975 memcpy(dest, src, PAGE_SIZE); 975 memcpy(dest, src, PAGE_SIZE);
976 kunmap_atomic(src, KM_USER1); 976 kunmap_atomic(src);
977 kunmap_atomic(dest, KM_USER0); 977 kunmap_atomic(dest);
978 bm_store_page_idx(page, page_nr); 978 bm_store_page_idx(page, page_nr);
979 } else 979 } else
980 page = b->bm_pages[page_nr]; 980 page = b->bm_pages[page_nr];
@@ -1163,7 +1163,7 @@ int drbd_bm_write_page(struct drbd_conf *mdev, unsigned int idx) __must_hold(loc
1163 * this returns a bit number, NOT a sector! 1163 * this returns a bit number, NOT a sector!
1164 */ 1164 */
1165static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo, 1165static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1166 const int find_zero_bit, const enum km_type km) 1166 const int find_zero_bit)
1167{ 1167{
1168 struct drbd_bitmap *b = mdev->bitmap; 1168 struct drbd_bitmap *b = mdev->bitmap;
1169 unsigned long *p_addr; 1169 unsigned long *p_addr;
@@ -1178,7 +1178,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1178 while (bm_fo < b->bm_bits) { 1178 while (bm_fo < b->bm_bits) {
1179 /* bit offset of the first bit in the page */ 1179 /* bit offset of the first bit in the page */
1180 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK; 1180 bit_offset = bm_fo & ~BITS_PER_PAGE_MASK;
1181 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo), km); 1181 p_addr = __bm_map_pidx(b, bm_bit_to_page_idx(b, bm_fo));
1182 1182
1183 if (find_zero_bit) 1183 if (find_zero_bit)
1184 i = find_next_zero_bit_le(p_addr, 1184 i = find_next_zero_bit_le(p_addr,
@@ -1187,7 +1187,7 @@ static unsigned long __bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo,
1187 i = find_next_bit_le(p_addr, 1187 i = find_next_bit_le(p_addr,
1188 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK); 1188 PAGE_SIZE*8, bm_fo & BITS_PER_PAGE_MASK);
1189 1189
1190 __bm_unmap(p_addr, km); 1190 __bm_unmap(p_addr);
1191 if (i < PAGE_SIZE*8) { 1191 if (i < PAGE_SIZE*8) {
1192 bm_fo = bit_offset + i; 1192 bm_fo = bit_offset + i;
1193 if (bm_fo >= b->bm_bits) 1193 if (bm_fo >= b->bm_bits)
@@ -1215,7 +1215,7 @@ static unsigned long bm_find_next(struct drbd_conf *mdev,
1215 if (BM_DONT_TEST & b->bm_flags) 1215 if (BM_DONT_TEST & b->bm_flags)
1216 bm_print_lock_info(mdev); 1216 bm_print_lock_info(mdev);
1217 1217
1218 i = __bm_find_next(mdev, bm_fo, find_zero_bit, KM_IRQ1); 1218 i = __bm_find_next(mdev, bm_fo, find_zero_bit);
1219 1219
1220 spin_unlock_irq(&b->bm_lock); 1220 spin_unlock_irq(&b->bm_lock);
1221 return i; 1221 return i;
@@ -1239,13 +1239,13 @@ unsigned long drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo
1239unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo) 1239unsigned long _drbd_bm_find_next(struct drbd_conf *mdev, unsigned long bm_fo)
1240{ 1240{
1241 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1241 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1242 return __bm_find_next(mdev, bm_fo, 0, KM_USER1); 1242 return __bm_find_next(mdev, bm_fo, 0);
1243} 1243}
1244 1244
1245unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo) 1245unsigned long _drbd_bm_find_next_zero(struct drbd_conf *mdev, unsigned long bm_fo)
1246{ 1246{
1247 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */ 1247 /* WARN_ON(!(BM_DONT_SET & mdev->b->bm_flags)); */
1248 return __bm_find_next(mdev, bm_fo, 1, KM_USER1); 1248 return __bm_find_next(mdev, bm_fo, 1);
1249} 1249}
1250 1250
1251/* returns number of bits actually changed. 1251/* returns number of bits actually changed.
@@ -1273,14 +1273,14 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1273 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr); 1273 unsigned int page_nr = bm_bit_to_page_idx(b, bitnr);
1274 if (page_nr != last_page_nr) { 1274 if (page_nr != last_page_nr) {
1275 if (p_addr) 1275 if (p_addr)
1276 __bm_unmap(p_addr, KM_IRQ1); 1276 __bm_unmap(p_addr);
1277 if (c < 0) 1277 if (c < 0)
1278 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1278 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1279 else if (c > 0) 1279 else if (c > 0)
1280 bm_set_page_need_writeout(b->bm_pages[last_page_nr]); 1280 bm_set_page_need_writeout(b->bm_pages[last_page_nr]);
1281 changed_total += c; 1281 changed_total += c;
1282 c = 0; 1282 c = 0;
1283 p_addr = __bm_map_pidx(b, page_nr, KM_IRQ1); 1283 p_addr = __bm_map_pidx(b, page_nr);
1284 last_page_nr = page_nr; 1284 last_page_nr = page_nr;
1285 } 1285 }
1286 if (val) 1286 if (val)
@@ -1289,7 +1289,7 @@ static int __bm_change_bits_to(struct drbd_conf *mdev, const unsigned long s,
1289 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr)); 1289 c -= (0 != __test_and_clear_bit_le(bitnr & BITS_PER_PAGE_MASK, p_addr));
1290 } 1290 }
1291 if (p_addr) 1291 if (p_addr)
1292 __bm_unmap(p_addr, KM_IRQ1); 1292 __bm_unmap(p_addr);
1293 if (c < 0) 1293 if (c < 0)
1294 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]); 1294 bm_set_page_lazy_writeout(b->bm_pages[last_page_nr]);
1295 else if (c > 0) 1295 else if (c > 0)
@@ -1342,13 +1342,13 @@ static inline void bm_set_full_words_within_one_page(struct drbd_bitmap *b,
1342{ 1342{
1343 int i; 1343 int i;
1344 int bits; 1344 int bits;
1345 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr], KM_IRQ1); 1345 unsigned long *paddr = kmap_atomic(b->bm_pages[page_nr]);
1346 for (i = first_word; i < last_word; i++) { 1346 for (i = first_word; i < last_word; i++) {
1347 bits = hweight_long(paddr[i]); 1347 bits = hweight_long(paddr[i]);
1348 paddr[i] = ~0UL; 1348 paddr[i] = ~0UL;
1349 b->bm_set += BITS_PER_LONG - bits; 1349 b->bm_set += BITS_PER_LONG - bits;
1350 } 1350 }
1351 kunmap_atomic(paddr, KM_IRQ1); 1351 kunmap_atomic(paddr);
1352} 1352}
1353 1353
1354/* Same thing as drbd_bm_set_bits, 1354/* Same thing as drbd_bm_set_bits,
diff --git a/drivers/block/drbd/drbd_nl.c b/drivers/block/drbd/drbd_nl.c
index af2a25049bce..e09f9cebbb20 100644
--- a/drivers/block/drbd/drbd_nl.c
+++ b/drivers/block/drbd/drbd_nl.c
@@ -2526,10 +2526,10 @@ void drbd_bcast_ee(struct drbd_conf *mdev,
2526 2526
2527 page = e->pages; 2527 page = e->pages;
2528 page_chain_for_each(page) { 2528 page_chain_for_each(page) {
2529 void *d = kmap_atomic(page, KM_USER0); 2529 void *d = kmap_atomic(page);
2530 unsigned l = min_t(unsigned, len, PAGE_SIZE); 2530 unsigned l = min_t(unsigned, len, PAGE_SIZE);
2531 memcpy(tl, d, l); 2531 memcpy(tl, d, l);
2532 kunmap_atomic(d, KM_USER0); 2532 kunmap_atomic(d);
2533 tl = (unsigned short*)((char*)tl + l); 2533 tl = (unsigned short*)((char*)tl + l);
2534 len -= l; 2534 len -= l;
2535 if (len == 0) 2535 if (len == 0)
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index cd504353b278..bbca966f8f66 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -93,16 +93,16 @@ static int transfer_none(struct loop_device *lo, int cmd,
93 struct page *loop_page, unsigned loop_off, 93 struct page *loop_page, unsigned loop_off,
94 int size, sector_t real_block) 94 int size, sector_t real_block)
95{ 95{
96 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 96 char *raw_buf = kmap_atomic(raw_page) + raw_off;
97 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 97 char *loop_buf = kmap_atomic(loop_page) + loop_off;
98 98
99 if (cmd == READ) 99 if (cmd == READ)
100 memcpy(loop_buf, raw_buf, size); 100 memcpy(loop_buf, raw_buf, size);
101 else 101 else
102 memcpy(raw_buf, loop_buf, size); 102 memcpy(raw_buf, loop_buf, size);
103 103
104 kunmap_atomic(loop_buf, KM_USER1); 104 kunmap_atomic(loop_buf);
105 kunmap_atomic(raw_buf, KM_USER0); 105 kunmap_atomic(raw_buf);
106 cond_resched(); 106 cond_resched();
107 return 0; 107 return 0;
108} 108}
@@ -112,8 +112,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
112 struct page *loop_page, unsigned loop_off, 112 struct page *loop_page, unsigned loop_off,
113 int size, sector_t real_block) 113 int size, sector_t real_block)
114{ 114{
115 char *raw_buf = kmap_atomic(raw_page, KM_USER0) + raw_off; 115 char *raw_buf = kmap_atomic(raw_page) + raw_off;
116 char *loop_buf = kmap_atomic(loop_page, KM_USER1) + loop_off; 116 char *loop_buf = kmap_atomic(loop_page) + loop_off;
117 char *in, *out, *key; 117 char *in, *out, *key;
118 int i, keysize; 118 int i, keysize;
119 119
@@ -130,8 +130,8 @@ static int transfer_xor(struct loop_device *lo, int cmd,
130 for (i = 0; i < size; i++) 130 for (i = 0; i < size; i++)
131 *out++ = *in++ ^ key[(i & 511) % keysize]; 131 *out++ = *in++ ^ key[(i & 511) % keysize];
132 132
133 kunmap_atomic(loop_buf, KM_USER1); 133 kunmap_atomic(loop_buf);
134 kunmap_atomic(raw_buf, KM_USER0); 134 kunmap_atomic(raw_buf);
135 cond_resched(); 135 cond_resched();
136 return 0; 136 return 0;
137} 137}
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index d59edeabd93f..ba66e4445f41 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -987,14 +987,14 @@ static void pkt_copy_bio_data(struct bio *src_bio, int seg, int offs, struct pag
987 987
988 while (copy_size > 0) { 988 while (copy_size > 0) {
989 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg); 989 struct bio_vec *src_bvl = bio_iovec_idx(src_bio, seg);
990 void *vfrom = kmap_atomic(src_bvl->bv_page, KM_USER0) + 990 void *vfrom = kmap_atomic(src_bvl->bv_page) +
991 src_bvl->bv_offset + offs; 991 src_bvl->bv_offset + offs;
992 void *vto = page_address(dst_page) + dst_offs; 992 void *vto = page_address(dst_page) + dst_offs;
993 int len = min_t(int, copy_size, src_bvl->bv_len - offs); 993 int len = min_t(int, copy_size, src_bvl->bv_len - offs);
994 994
995 BUG_ON(len < 0); 995 BUG_ON(len < 0);
996 memcpy(vto, vfrom, len); 996 memcpy(vto, vfrom, len);
997 kunmap_atomic(vfrom, KM_USER0); 997 kunmap_atomic(vfrom);
998 998
999 seg++; 999 seg++;
1000 offs = 0; 1000 offs = 0;
@@ -1019,10 +1019,10 @@ static void pkt_make_local_copy(struct packet_data *pkt, struct bio_vec *bvec)
1019 offs = 0; 1019 offs = 0;
1020 for (f = 0; f < pkt->frames; f++) { 1020 for (f = 0; f < pkt->frames; f++) {
1021 if (bvec[f].bv_page != pkt->pages[p]) { 1021 if (bvec[f].bv_page != pkt->pages[p]) {
1022 void *vfrom = kmap_atomic(bvec[f].bv_page, KM_USER0) + bvec[f].bv_offset; 1022 void *vfrom = kmap_atomic(bvec[f].bv_page) + bvec[f].bv_offset;
1023 void *vto = page_address(pkt->pages[p]) + offs; 1023 void *vto = page_address(pkt->pages[p]) + offs;
1024 memcpy(vto, vfrom, CD_FRAMESIZE); 1024 memcpy(vto, vfrom, CD_FRAMESIZE);
1025 kunmap_atomic(vfrom, KM_USER0); 1025 kunmap_atomic(vfrom);
1026 bvec[f].bv_page = pkt->pages[p]; 1026 bvec[f].bv_page = pkt->pages[p];
1027 bvec[f].bv_offset = offs; 1027 bvec[f].bv_offset = offs;
1028 } else { 1028 } else {
diff --git a/drivers/crypto/hifn_795x.c b/drivers/crypto/hifn_795x.c
index fe765f49de58..76368f984023 100644
--- a/drivers/crypto/hifn_795x.c
+++ b/drivers/crypto/hifn_795x.c
@@ -1731,9 +1731,9 @@ static int ablkcipher_get(void *saddr, unsigned int *srestp, unsigned int offset
1731 while (size) { 1731 while (size) {
1732 copy = min3(srest, dst->length, size); 1732 copy = min3(srest, dst->length, size);
1733 1733
1734 daddr = kmap_atomic(sg_page(dst), KM_IRQ0); 1734 daddr = kmap_atomic(sg_page(dst));
1735 memcpy(daddr + dst->offset + offset, saddr, copy); 1735 memcpy(daddr + dst->offset + offset, saddr, copy);
1736 kunmap_atomic(daddr, KM_IRQ0); 1736 kunmap_atomic(daddr);
1737 1737
1738 nbytes -= copy; 1738 nbytes -= copy;
1739 size -= copy; 1739 size -= copy;
@@ -1793,17 +1793,17 @@ static void hifn_process_ready(struct ablkcipher_request *req, int error)
1793 continue; 1793 continue;
1794 } 1794 }
1795 1795
1796 saddr = kmap_atomic(sg_page(t), KM_SOFTIRQ0); 1796 saddr = kmap_atomic(sg_page(t));
1797 1797
1798 err = ablkcipher_get(saddr, &t->length, t->offset, 1798 err = ablkcipher_get(saddr, &t->length, t->offset,
1799 dst, nbytes, &nbytes); 1799 dst, nbytes, &nbytes);
1800 if (err < 0) { 1800 if (err < 0) {
1801 kunmap_atomic(saddr, KM_SOFTIRQ0); 1801 kunmap_atomic(saddr);
1802 break; 1802 break;
1803 } 1803 }
1804 1804
1805 idx += err; 1805 idx += err;
1806 kunmap_atomic(saddr, KM_SOFTIRQ0); 1806 kunmap_atomic(saddr);
1807 } 1807 }
1808 1808
1809 hifn_cipher_walk_exit(&rctx->walk); 1809 hifn_cipher_walk_exit(&rctx->walk);
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index ca6c04d350ee..da09cd74bc5b 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -620,13 +620,13 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
620 if (PageHighMem(pg)) 620 if (PageHighMem(pg))
621 local_irq_save(flags); 621 local_irq_save(flags);
622 622
623 virt_addr = kmap_atomic(pg, KM_BOUNCE_READ); 623 virt_addr = kmap_atomic(pg);
624 624
625 /* Perform architecture specific atomic scrub operation */ 625 /* Perform architecture specific atomic scrub operation */
626 atomic_scrub(virt_addr + offset, size); 626 atomic_scrub(virt_addr + offset, size);
627 627
628 /* Unmap and complete */ 628 /* Unmap and complete */
629 kunmap_atomic(virt_addr, KM_BOUNCE_READ); 629 kunmap_atomic(virt_addr);
630 630
631 if (PageHighMem(pg)) 631 if (PageHighMem(pg))
632 local_irq_restore(flags); 632 local_irq_restore(flags);
diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c
index 592865381c6e..4b8653b932f9 100644
--- a/drivers/gpu/drm/drm_cache.c
+++ b/drivers/gpu/drm/drm_cache.c
@@ -41,10 +41,10 @@ drm_clflush_page(struct page *page)
41 if (unlikely(page == NULL)) 41 if (unlikely(page == NULL))
42 return; 42 return;
43 43
44 page_virtual = kmap_atomic(page, KM_USER0); 44 page_virtual = kmap_atomic(page);
45 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) 45 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
46 clflush(page_virtual + i); 46 clflush(page_virtual + i);
47 kunmap_atomic(page_virtual, KM_USER0); 47 kunmap_atomic(page_virtual);
48} 48}
49 49
50static void drm_cache_flush_clflush(struct page *pages[], 50static void drm_cache_flush_clflush(struct page *pages[],
@@ -87,10 +87,10 @@ drm_clflush_pages(struct page *pages[], unsigned long num_pages)
87 if (unlikely(page == NULL)) 87 if (unlikely(page == NULL))
88 continue; 88 continue;
89 89
90 page_virtual = kmap_atomic(page, KM_USER0); 90 page_virtual = kmap_atomic(page);
91 flush_dcache_range((unsigned long)page_virtual, 91 flush_dcache_range((unsigned long)page_virtual,
92 (unsigned long)page_virtual + PAGE_SIZE); 92 (unsigned long)page_virtual + PAGE_SIZE);
93 kunmap_atomic(page_virtual, KM_USER0); 93 kunmap_atomic(page_virtual);
94 } 94 }
95#else 95#else
96 printk(KERN_ERR "Architecture has no drm_cache.c support\n"); 96 printk(KERN_ERR "Architecture has no drm_cache.c support\n");
diff --git a/drivers/gpu/drm/gma500/mmu.c b/drivers/gpu/drm/gma500/mmu.c
index c904d73b1de3..e80ee82f6caf 100644
--- a/drivers/gpu/drm/gma500/mmu.c
+++ b/drivers/gpu/drm/gma500/mmu.c
@@ -125,14 +125,14 @@ static void psb_page_clflush(struct psb_mmu_driver *driver, struct page* page)
125 int i; 125 int i;
126 uint8_t *clf; 126 uint8_t *clf;
127 127
128 clf = kmap_atomic(page, KM_USER0); 128 clf = kmap_atomic(page);
129 mb(); 129 mb();
130 for (i = 0; i < clflush_count; ++i) { 130 for (i = 0; i < clflush_count; ++i) {
131 psb_clflush(clf); 131 psb_clflush(clf);
132 clf += clflush_add; 132 clf += clflush_add;
133 } 133 }
134 mb(); 134 mb();
135 kunmap_atomic(clf, KM_USER0); 135 kunmap_atomic(clf);
136} 136}
137 137
138static void psb_pages_clflush(struct psb_mmu_driver *driver, 138static void psb_pages_clflush(struct psb_mmu_driver *driver,
@@ -325,7 +325,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
325 325
326 spin_lock(lock); 326 spin_lock(lock);
327 327
328 v = kmap_atomic(pt->p, KM_USER0); 328 v = kmap_atomic(pt->p);
329 clf = (uint8_t *) v; 329 clf = (uint8_t *) v;
330 ptes = (uint32_t *) v; 330 ptes = (uint32_t *) v;
331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i) 331 for (i = 0; i < (PAGE_SIZE / sizeof(uint32_t)); ++i)
@@ -341,7 +341,7 @@ static struct psb_mmu_pt *psb_mmu_alloc_pt(struct psb_mmu_pd *pd)
341 mb(); 341 mb();
342 } 342 }
343 343
344 kunmap_atomic(v, KM_USER0); 344 kunmap_atomic(v);
345 spin_unlock(lock); 345 spin_unlock(lock);
346 346
347 pt->count = 0; 347 pt->count = 0;
@@ -376,18 +376,18 @@ struct psb_mmu_pt *psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd *pd,
376 continue; 376 continue;
377 } 377 }
378 378
379 v = kmap_atomic(pd->p, KM_USER0); 379 v = kmap_atomic(pd->p);
380 pd->tables[index] = pt; 380 pd->tables[index] = pt;
381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask; 381 v[index] = (page_to_pfn(pt->p) << 12) | pd->pd_mask;
382 pt->index = index; 382 pt->index = index;
383 kunmap_atomic((void *) v, KM_USER0); 383 kunmap_atomic((void *) v);
384 384
385 if (pd->hw_context != -1) { 385 if (pd->hw_context != -1) {
386 psb_mmu_clflush(pd->driver, (void *) &v[index]); 386 psb_mmu_clflush(pd->driver, (void *) &v[index]);
387 atomic_set(&pd->driver->needs_tlbflush, 1); 387 atomic_set(&pd->driver->needs_tlbflush, 1);
388 } 388 }
389 } 389 }
390 pt->v = kmap_atomic(pt->p, KM_USER0); 390 pt->v = kmap_atomic(pt->p);
391 return pt; 391 return pt;
392} 392}
393 393
@@ -404,7 +404,7 @@ static struct psb_mmu_pt *psb_mmu_pt_map_lock(struct psb_mmu_pd *pd,
404 spin_unlock(lock); 404 spin_unlock(lock);
405 return NULL; 405 return NULL;
406 } 406 }
407 pt->v = kmap_atomic(pt->p, KM_USER0); 407 pt->v = kmap_atomic(pt->p);
408 return pt; 408 return pt;
409} 409}
410 410
@@ -413,9 +413,9 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
413 struct psb_mmu_pd *pd = pt->pd; 413 struct psb_mmu_pd *pd = pt->pd;
414 uint32_t *v; 414 uint32_t *v;
415 415
416 kunmap_atomic(pt->v, KM_USER0); 416 kunmap_atomic(pt->v);
417 if (pt->count == 0) { 417 if (pt->count == 0) {
418 v = kmap_atomic(pd->p, KM_USER0); 418 v = kmap_atomic(pd->p);
419 v[pt->index] = pd->invalid_pde; 419 v[pt->index] = pd->invalid_pde;
420 pd->tables[pt->index] = NULL; 420 pd->tables[pt->index] = NULL;
421 421
@@ -424,7 +424,7 @@ static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt *pt)
424 (void *) &v[pt->index]); 424 (void *) &v[pt->index]);
425 atomic_set(&pd->driver->needs_tlbflush, 1); 425 atomic_set(&pd->driver->needs_tlbflush, 1);
426 } 426 }
427 kunmap_atomic(pt->v, KM_USER0); 427 kunmap_atomic(pt->v);
428 spin_unlock(&pd->driver->lock); 428 spin_unlock(&pd->driver->lock);
429 psb_mmu_free_pt(pt); 429 psb_mmu_free_pt(pt);
430 return; 430 return;
@@ -457,7 +457,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
457 down_read(&driver->sem); 457 down_read(&driver->sem);
458 spin_lock(&driver->lock); 458 spin_lock(&driver->lock);
459 459
460 v = kmap_atomic(pd->p, KM_USER0); 460 v = kmap_atomic(pd->p);
461 v += start; 461 v += start;
462 462
463 while (gtt_pages--) { 463 while (gtt_pages--) {
@@ -467,7 +467,7 @@ void psb_mmu_mirror_gtt(struct psb_mmu_pd *pd,
467 467
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/ 468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd->driver, &pd->p, num_pages); 469 psb_pages_clflush(pd->driver, &pd->p, num_pages);
470 kunmap_atomic(v, KM_USER0); 470 kunmap_atomic(v);
471 spin_unlock(&driver->lock); 471 spin_unlock(&driver->lock);
472 472
473 if (pd->hw_context != -1) 473 if (pd->hw_context != -1)
@@ -830,9 +830,9 @@ int psb_mmu_virtual_to_pfn(struct psb_mmu_pd *pd, uint32_t virtual,
830 uint32_t *v; 830 uint32_t *v;
831 831
832 spin_lock(lock); 832 spin_lock(lock);
833 v = kmap_atomic(pd->p, KM_USER0); 833 v = kmap_atomic(pd->p);
834 tmp = v[psb_mmu_pd_index(virtual)]; 834 tmp = v[psb_mmu_pd_index(virtual)];
835 kunmap_atomic(v, KM_USER0); 835 kunmap_atomic(v);
836 spin_unlock(lock); 836 spin_unlock(lock);
837 837
838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) || 838 if (tmp != pd->invalid_pde || !(tmp & PSB_PTE_VALID) ||
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index 2f75d203a2bf..c10cf5e2443a 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -309,11 +309,11 @@ int ttm_tt_swapin(struct ttm_tt *ttm)
309 goto out_err; 309 goto out_err;
310 310
311 preempt_disable(); 311 preempt_disable();
312 from_virtual = kmap_atomic(from_page, KM_USER0); 312 from_virtual = kmap_atomic(from_page);
313 to_virtual = kmap_atomic(to_page, KM_USER1); 313 to_virtual = kmap_atomic(to_page);
314 memcpy(to_virtual, from_virtual, PAGE_SIZE); 314 memcpy(to_virtual, from_virtual, PAGE_SIZE);
315 kunmap_atomic(to_virtual, KM_USER1); 315 kunmap_atomic(to_virtual);
316 kunmap_atomic(from_virtual, KM_USER0); 316 kunmap_atomic(from_virtual);
317 preempt_enable(); 317 preempt_enable();
318 page_cache_release(from_page); 318 page_cache_release(from_page);
319 } 319 }
@@ -365,11 +365,11 @@ int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistent_swap_storage)
365 goto out_err; 365 goto out_err;
366 } 366 }
367 preempt_disable(); 367 preempt_disable();
368 from_virtual = kmap_atomic(from_page, KM_USER0); 368 from_virtual = kmap_atomic(from_page);
369 to_virtual = kmap_atomic(to_page, KM_USER1); 369 to_virtual = kmap_atomic(to_page);
370 memcpy(to_virtual, from_virtual, PAGE_SIZE); 370 memcpy(to_virtual, from_virtual, PAGE_SIZE);
371 kunmap_atomic(to_virtual, KM_USER1); 371 kunmap_atomic(to_virtual);
372 kunmap_atomic(from_virtual, KM_USER0); 372 kunmap_atomic(from_virtual);
373 preempt_enable(); 373 preempt_enable();
374 set_page_dirty(to_page); 374 set_page_dirty(to_page);
375 mark_page_accessed(to_page); 375 mark_page_accessed(to_page);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index f4e7763a7694..51c9ba5cd2fb 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -136,10 +136,10 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
136 136
137 if (likely(page_virtual != NULL)) { 137 if (likely(page_virtual != NULL)) {
138 desc_virtual->ppn = page_to_pfn(page); 138 desc_virtual->ppn = page_to_pfn(page);
139 kunmap_atomic(page_virtual, KM_USER0); 139 kunmap_atomic(page_virtual);
140 } 140 }
141 141
142 page_virtual = kmap_atomic(page, KM_USER0); 142 page_virtual = kmap_atomic(page);
143 desc_virtual = page_virtual - 1; 143 desc_virtual = page_virtual - 1;
144 prev_pfn = ~(0UL); 144 prev_pfn = ~(0UL);
145 145
@@ -169,7 +169,7 @@ static int vmw_gmr_build_descriptors(struct list_head *desc_pages,
169 } 169 }
170 170
171 if (likely(page_virtual != NULL)) 171 if (likely(page_virtual != NULL))
172 kunmap_atomic(page_virtual, KM_USER0); 172 kunmap_atomic(page_virtual);
173 173
174 return 0; 174 return 0;
175out_err: 175out_err:
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 5bc2839ebcfd..729428edeba2 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -253,7 +253,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
253 if (page_is_high) 253 if (page_is_high)
254 local_irq_save(flags); 254 local_irq_save(flags);
255 255
256 buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset; 256 buf = kmap_atomic(page) + offset;
257 257
258 cmd->nleft -= nr_bytes; 258 cmd->nleft -= nr_bytes;
259 cmd->cursg_ofs += nr_bytes; 259 cmd->cursg_ofs += nr_bytes;
@@ -269,7 +269,7 @@ void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
269 else 269 else
270 hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes); 270 hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);
271 271
272 kunmap_atomic(buf, KM_BIO_SRC_IRQ); 272 kunmap_atomic(buf);
273 273
274 if (page_is_high) 274 if (page_is_high)
275 local_irq_restore(flags); 275 local_irq_restore(flags);
diff --git a/drivers/infiniband/ulp/iser/iser_memory.c b/drivers/infiniband/ulp/iser/iser_memory.c
index fb88d6896b67..2033a928d34d 100644
--- a/drivers/infiniband/ulp/iser/iser_memory.c
+++ b/drivers/infiniband/ulp/iser/iser_memory.c
@@ -73,11 +73,11 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
73 73
74 p = mem; 74 p = mem;
75 for_each_sg(sgl, sg, data->size, i) { 75 for_each_sg(sgl, sg, data->size, i) {
76 from = kmap_atomic(sg_page(sg), KM_USER0); 76 from = kmap_atomic(sg_page(sg));
77 memcpy(p, 77 memcpy(p,
78 from + sg->offset, 78 from + sg->offset,
79 sg->length); 79 sg->length);
80 kunmap_atomic(from, KM_USER0); 80 kunmap_atomic(from);
81 p += sg->length; 81 p += sg->length;
82 } 82 }
83 } 83 }
@@ -133,11 +133,11 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
133 133
134 p = mem; 134 p = mem;
135 for_each_sg(sgl, sg, sg_size, i) { 135 for_each_sg(sgl, sg, sg_size, i) {
136 to = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 136 to = kmap_atomic(sg_page(sg));
137 memcpy(to + sg->offset, 137 memcpy(to + sg->offset,
138 p, 138 p,
139 sg->length); 139 sg->length);
140 kunmap_atomic(to, KM_SOFTIRQ0); 140 kunmap_atomic(to);
141 p += sg->length; 141 p += sg->length;
142 } 142 }
143 } 143 }
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index cdf36b1e9aa6..045e086144ad 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -457,7 +457,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
457 return; 457 return;
458 } 458 }
459 spin_unlock_irqrestore(&bitmap->lock, flags); 459 spin_unlock_irqrestore(&bitmap->lock, flags);
460 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 460 sb = kmap_atomic(bitmap->sb_page);
461 sb->events = cpu_to_le64(bitmap->mddev->events); 461 sb->events = cpu_to_le64(bitmap->mddev->events);
462 if (bitmap->mddev->events < bitmap->events_cleared) 462 if (bitmap->mddev->events < bitmap->events_cleared)
463 /* rocking back to read-only */ 463 /* rocking back to read-only */
@@ -467,7 +467,7 @@ void bitmap_update_sb(struct bitmap *bitmap)
467 /* Just in case these have been changed via sysfs: */ 467 /* Just in case these have been changed via sysfs: */
468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ); 468 sb->daemon_sleep = cpu_to_le32(bitmap->mddev->bitmap_info.daemon_sleep/HZ);
469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind); 469 sb->write_behind = cpu_to_le32(bitmap->mddev->bitmap_info.max_write_behind);
470 kunmap_atomic(sb, KM_USER0); 470 kunmap_atomic(sb);
471 write_page(bitmap, bitmap->sb_page, 1); 471 write_page(bitmap, bitmap->sb_page, 1);
472} 472}
473 473
@@ -478,7 +478,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
478 478
479 if (!bitmap || !bitmap->sb_page) 479 if (!bitmap || !bitmap->sb_page)
480 return; 480 return;
481 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 481 sb = kmap_atomic(bitmap->sb_page);
482 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap)); 482 printk(KERN_DEBUG "%s: bitmap file superblock:\n", bmname(bitmap));
483 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic)); 483 printk(KERN_DEBUG " magic: %08x\n", le32_to_cpu(sb->magic));
484 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version)); 484 printk(KERN_DEBUG " version: %d\n", le32_to_cpu(sb->version));
@@ -497,7 +497,7 @@ void bitmap_print_sb(struct bitmap *bitmap)
497 printk(KERN_DEBUG " sync size: %llu KB\n", 497 printk(KERN_DEBUG " sync size: %llu KB\n",
498 (unsigned long long)le64_to_cpu(sb->sync_size)/2); 498 (unsigned long long)le64_to_cpu(sb->sync_size)/2);
499 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind)); 499 printk(KERN_DEBUG "max write behind: %d\n", le32_to_cpu(sb->write_behind));
500 kunmap_atomic(sb, KM_USER0); 500 kunmap_atomic(sb);
501} 501}
502 502
503/* 503/*
@@ -525,7 +525,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
525 } 525 }
526 bitmap->sb_page->index = 0; 526 bitmap->sb_page->index = 0;
527 527
528 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 528 sb = kmap_atomic(bitmap->sb_page);
529 529
530 sb->magic = cpu_to_le32(BITMAP_MAGIC); 530 sb->magic = cpu_to_le32(BITMAP_MAGIC);
531 sb->version = cpu_to_le32(BITMAP_MAJOR_HI); 531 sb->version = cpu_to_le32(BITMAP_MAJOR_HI);
@@ -533,7 +533,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
533 chunksize = bitmap->mddev->bitmap_info.chunksize; 533 chunksize = bitmap->mddev->bitmap_info.chunksize;
534 BUG_ON(!chunksize); 534 BUG_ON(!chunksize);
535 if (!is_power_of_2(chunksize)) { 535 if (!is_power_of_2(chunksize)) {
536 kunmap_atomic(sb, KM_USER0); 536 kunmap_atomic(sb);
537 printk(KERN_ERR "bitmap chunksize not a power of 2\n"); 537 printk(KERN_ERR "bitmap chunksize not a power of 2\n");
538 return -EINVAL; 538 return -EINVAL;
539 } 539 }
@@ -571,7 +571,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
571 bitmap->flags |= BITMAP_HOSTENDIAN; 571 bitmap->flags |= BITMAP_HOSTENDIAN;
572 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN); 572 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
573 573
574 kunmap_atomic(sb, KM_USER0); 574 kunmap_atomic(sb);
575 575
576 return 0; 576 return 0;
577} 577}
@@ -603,7 +603,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
603 return err; 603 return err;
604 } 604 }
605 605
606 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 606 sb = kmap_atomic(bitmap->sb_page);
607 607
608 chunksize = le32_to_cpu(sb->chunksize); 608 chunksize = le32_to_cpu(sb->chunksize);
609 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 609 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
@@ -664,7 +664,7 @@ success:
664 bitmap->events_cleared = bitmap->mddev->events; 664 bitmap->events_cleared = bitmap->mddev->events;
665 err = 0; 665 err = 0;
666out: 666out:
667 kunmap_atomic(sb, KM_USER0); 667 kunmap_atomic(sb);
668 if (err) 668 if (err)
669 bitmap_print_sb(bitmap); 669 bitmap_print_sb(bitmap);
670 return err; 670 return err;
@@ -689,7 +689,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
689 return 0; 689 return 0;
690 } 690 }
691 spin_unlock_irqrestore(&bitmap->lock, flags); 691 spin_unlock_irqrestore(&bitmap->lock, flags);
692 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 692 sb = kmap_atomic(bitmap->sb_page);
693 old = le32_to_cpu(sb->state) & bits; 693 old = le32_to_cpu(sb->state) & bits;
694 switch (op) { 694 switch (op) {
695 case MASK_SET: 695 case MASK_SET:
@@ -703,7 +703,7 @@ static int bitmap_mask_state(struct bitmap *bitmap, enum bitmap_state bits,
703 default: 703 default:
704 BUG(); 704 BUG();
705 } 705 }
706 kunmap_atomic(sb, KM_USER0); 706 kunmap_atomic(sb);
707 return old; 707 return old;
708} 708}
709 709
@@ -881,12 +881,12 @@ static void bitmap_file_set_bit(struct bitmap *bitmap, sector_t block)
881 bit = file_page_offset(bitmap, chunk); 881 bit = file_page_offset(bitmap, chunk);
882 882
883 /* set the bit */ 883 /* set the bit */
884 kaddr = kmap_atomic(page, KM_USER0); 884 kaddr = kmap_atomic(page);
885 if (bitmap->flags & BITMAP_HOSTENDIAN) 885 if (bitmap->flags & BITMAP_HOSTENDIAN)
886 set_bit(bit, kaddr); 886 set_bit(bit, kaddr);
887 else 887 else
888 __set_bit_le(bit, kaddr); 888 __set_bit_le(bit, kaddr);
889 kunmap_atomic(kaddr, KM_USER0); 889 kunmap_atomic(kaddr);
890 pr_debug("set file bit %lu page %lu\n", bit, page->index); 890 pr_debug("set file bit %lu page %lu\n", bit, page->index);
891 /* record page number so it gets flushed to disk when unplug occurs */ 891 /* record page number so it gets flushed to disk when unplug occurs */
892 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY); 892 set_page_attr(bitmap, page, BITMAP_PAGE_DIRTY);
@@ -1050,10 +1050,10 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1050 * if bitmap is out of date, dirty the 1050 * if bitmap is out of date, dirty the
1051 * whole page and write it out 1051 * whole page and write it out
1052 */ 1052 */
1053 paddr = kmap_atomic(page, KM_USER0); 1053 paddr = kmap_atomic(page);
1054 memset(paddr + offset, 0xff, 1054 memset(paddr + offset, 0xff,
1055 PAGE_SIZE - offset); 1055 PAGE_SIZE - offset);
1056 kunmap_atomic(paddr, KM_USER0); 1056 kunmap_atomic(paddr);
1057 write_page(bitmap, page, 1); 1057 write_page(bitmap, page, 1);
1058 1058
1059 ret = -EIO; 1059 ret = -EIO;
@@ -1061,12 +1061,12 @@ static int bitmap_init_from_disk(struct bitmap *bitmap, sector_t start)
1061 goto err; 1061 goto err;
1062 } 1062 }
1063 } 1063 }
1064 paddr = kmap_atomic(page, KM_USER0); 1064 paddr = kmap_atomic(page);
1065 if (bitmap->flags & BITMAP_HOSTENDIAN) 1065 if (bitmap->flags & BITMAP_HOSTENDIAN)
1066 b = test_bit(bit, paddr); 1066 b = test_bit(bit, paddr);
1067 else 1067 else
1068 b = test_bit_le(bit, paddr); 1068 b = test_bit_le(bit, paddr);
1069 kunmap_atomic(paddr, KM_USER0); 1069 kunmap_atomic(paddr);
1070 if (b) { 1070 if (b) {
1071 /* if the disk bit is set, set the memory bit */ 1071 /* if the disk bit is set, set the memory bit */
1072 int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap)) 1072 int needed = ((sector_t)(i+1) << (CHUNK_BLOCK_SHIFT(bitmap))
@@ -1209,10 +1209,10 @@ void bitmap_daemon_work(struct mddev *mddev)
1209 mddev->bitmap_info.external == 0) { 1209 mddev->bitmap_info.external == 0) {
1210 bitmap_super_t *sb; 1210 bitmap_super_t *sb;
1211 bitmap->need_sync = 0; 1211 bitmap->need_sync = 0;
1212 sb = kmap_atomic(bitmap->sb_page, KM_USER0); 1212 sb = kmap_atomic(bitmap->sb_page);
1213 sb->events_cleared = 1213 sb->events_cleared =
1214 cpu_to_le64(bitmap->events_cleared); 1214 cpu_to_le64(bitmap->events_cleared);
1215 kunmap_atomic(sb, KM_USER0); 1215 kunmap_atomic(sb);
1216 write_page(bitmap, bitmap->sb_page, 1); 1216 write_page(bitmap, bitmap->sb_page, 1);
1217 } 1217 }
1218 spin_lock_irqsave(&bitmap->lock, flags); 1218 spin_lock_irqsave(&bitmap->lock, flags);
@@ -1235,7 +1235,7 @@ void bitmap_daemon_work(struct mddev *mddev)
1235 -1); 1235 -1);
1236 1236
1237 /* clear the bit */ 1237 /* clear the bit */
1238 paddr = kmap_atomic(page, KM_USER0); 1238 paddr = kmap_atomic(page);
1239 if (bitmap->flags & BITMAP_HOSTENDIAN) 1239 if (bitmap->flags & BITMAP_HOSTENDIAN)
1240 clear_bit(file_page_offset(bitmap, j), 1240 clear_bit(file_page_offset(bitmap, j),
1241 paddr); 1241 paddr);
@@ -1244,7 +1244,7 @@ void bitmap_daemon_work(struct mddev *mddev)
1244 file_page_offset(bitmap, 1244 file_page_offset(bitmap,
1245 j), 1245 j),
1246 paddr); 1246 paddr);
1247 kunmap_atomic(paddr, KM_USER0); 1247 kunmap_atomic(paddr);
1248 } else if (*bmc <= 2) { 1248 } else if (*bmc <= 2) {
1249 *bmc = 1; /* maybe clear the bit next time */ 1249 *bmc = 1; /* maybe clear the bit next time */
1250 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING); 1250 set_page_attr(bitmap, page, BITMAP_PAGE_PENDING);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 8c2a000cf3f5..db6b51639cee 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -590,9 +590,9 @@ static int crypt_iv_lmk_gen(struct crypt_config *cc, u8 *iv,
590 int r = 0; 590 int r = 0;
591 591
592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) { 592 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) {
593 src = kmap_atomic(sg_page(&dmreq->sg_in), KM_USER0); 593 src = kmap_atomic(sg_page(&dmreq->sg_in));
594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset); 594 r = crypt_iv_lmk_one(cc, iv, dmreq, src + dmreq->sg_in.offset);
595 kunmap_atomic(src, KM_USER0); 595 kunmap_atomic(src);
596 } else 596 } else
597 memset(iv, 0, cc->iv_size); 597 memset(iv, 0, cc->iv_size);
598 598
@@ -608,14 +608,14 @@ static int crypt_iv_lmk_post(struct crypt_config *cc, u8 *iv,
608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE) 608 if (bio_data_dir(dmreq->ctx->bio_in) == WRITE)
609 return 0; 609 return 0;
610 610
611 dst = kmap_atomic(sg_page(&dmreq->sg_out), KM_USER0); 611 dst = kmap_atomic(sg_page(&dmreq->sg_out));
612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset); 612 r = crypt_iv_lmk_one(cc, iv, dmreq, dst + dmreq->sg_out.offset);
613 613
614 /* Tweak the first block of plaintext sector */ 614 /* Tweak the first block of plaintext sector */
615 if (!r) 615 if (!r)
616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size); 616 crypto_xor(dst + dmreq->sg_out.offset, iv, cc->iv_size);
617 617
618 kunmap_atomic(dst, KM_USER0); 618 kunmap_atomic(dst);
619 return r; 619 return r;
620} 620}
621 621
diff --git a/drivers/media/video/ivtv/ivtv-udma.c b/drivers/media/video/ivtv/ivtv-udma.c
index 69cc8166b20b..7338cb2d0a38 100644
--- a/drivers/media/video/ivtv/ivtv-udma.c
+++ b/drivers/media/video/ivtv/ivtv-udma.c
@@ -57,9 +57,9 @@ int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info
57 if (dma->bouncemap[map_offset] == NULL) 57 if (dma->bouncemap[map_offset] == NULL)
58 return -1; 58 return -1;
59 local_irq_save(flags); 59 local_irq_save(flags);
60 src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset; 60 src = kmap_atomic(dma->map[map_offset]) + offset;
61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len); 61 memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
62 kunmap_atomic(src, KM_BOUNCE_READ); 62 kunmap_atomic(src);
63 local_irq_restore(flags); 63 local_irq_restore(flags);
64 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset); 64 sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
65 } 65 }
diff --git a/drivers/memstick/host/jmb38x_ms.c b/drivers/memstick/host/jmb38x_ms.c
index 5319e9b65847..c37d3756d8d2 100644
--- a/drivers/memstick/host/jmb38x_ms.c
+++ b/drivers/memstick/host/jmb38x_ms.c
@@ -325,7 +325,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
325 p_cnt = min(p_cnt, length); 325 p_cnt = min(p_cnt, length);
326 326
327 local_irq_save(flags); 327 local_irq_save(flags);
328 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; 328 buf = kmap_atomic(pg) + p_off;
329 } else { 329 } else {
330 buf = host->req->data + host->block_pos; 330 buf = host->req->data + host->block_pos;
331 p_cnt = host->req->data_len - host->block_pos; 331 p_cnt = host->req->data_len - host->block_pos;
@@ -341,7 +341,7 @@ static int jmb38x_ms_transfer_data(struct jmb38x_ms_host *host)
341 : jmb38x_ms_read_reg_data(host, buf, p_cnt); 341 : jmb38x_ms_read_reg_data(host, buf, p_cnt);
342 342
343 if (host->req->long_data) { 343 if (host->req->long_data) {
344 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); 344 kunmap_atomic(buf - p_off);
345 local_irq_restore(flags); 345 local_irq_restore(flags);
346 } 346 }
347 347
diff --git a/drivers/memstick/host/tifm_ms.c b/drivers/memstick/host/tifm_ms.c
index 6902b83eb1b4..7bafa72f8f57 100644
--- a/drivers/memstick/host/tifm_ms.c
+++ b/drivers/memstick/host/tifm_ms.c
@@ -210,7 +210,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
210 p_cnt = min(p_cnt, length); 210 p_cnt = min(p_cnt, length);
211 211
212 local_irq_save(flags); 212 local_irq_save(flags);
213 buf = kmap_atomic(pg, KM_BIO_SRC_IRQ) + p_off; 213 buf = kmap_atomic(pg) + p_off;
214 } else { 214 } else {
215 buf = host->req->data + host->block_pos; 215 buf = host->req->data + host->block_pos;
216 p_cnt = host->req->data_len - host->block_pos; 216 p_cnt = host->req->data_len - host->block_pos;
@@ -221,7 +221,7 @@ static unsigned int tifm_ms_transfer_data(struct tifm_ms *host)
221 : tifm_ms_read_data(host, buf, p_cnt); 221 : tifm_ms_read_data(host, buf, p_cnt);
222 222
223 if (host->req->long_data) { 223 if (host->req->long_data) {
224 kunmap_atomic(buf - p_off, KM_BIO_SRC_IRQ); 224 kunmap_atomic(buf - p_off);
225 local_irq_restore(flags); 225 local_irq_restore(flags);
226 } 226 }
227 227
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 6419a88a69e6..0e9aec8f6917 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4101,11 +4101,9 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4101 if (length <= copybreak && 4101 if (length <= copybreak &&
4102 skb_tailroom(skb) >= length) { 4102 skb_tailroom(skb) >= length) {
4103 u8 *vaddr; 4103 u8 *vaddr;
4104 vaddr = kmap_atomic(buffer_info->page, 4104 vaddr = kmap_atomic(buffer_info->page);
4105 KM_SKB_DATA_SOFTIRQ);
4106 memcpy(skb_tail_pointer(skb), vaddr, length); 4105 memcpy(skb_tail_pointer(skb), vaddr, length);
4107 kunmap_atomic(vaddr, 4106 kunmap_atomic(vaddr);
4108 KM_SKB_DATA_SOFTIRQ);
4109 /* re-use the page, so don't erase 4107 /* re-use the page, so don't erase
4110 * buffer_info->page */ 4108 * buffer_info->page */
4111 skb_put(skb, length); 4109 skb_put(skb, length);
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a9a4ea2c616e..7152eb11b7b9 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -1301,10 +1301,9 @@ static bool e1000_clean_rx_irq_ps(struct e1000_ring *rx_ring, int *work_done,
1301 ps_page->dma, 1301 ps_page->dma,
1302 PAGE_SIZE, 1302 PAGE_SIZE,
1303 DMA_FROM_DEVICE); 1303 DMA_FROM_DEVICE);
1304 vaddr = kmap_atomic(ps_page->page, 1304 vaddr = kmap_atomic(ps_page->page);
1305 KM_SKB_DATA_SOFTIRQ);
1306 memcpy(skb_tail_pointer(skb), vaddr, l1); 1305 memcpy(skb_tail_pointer(skb), vaddr, l1);
1307 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 1306 kunmap_atomic(vaddr);
1308 dma_sync_single_for_device(&pdev->dev, 1307 dma_sync_single_for_device(&pdev->dev,
1309 ps_page->dma, 1308 ps_page->dma,
1310 PAGE_SIZE, 1309 PAGE_SIZE,
@@ -1503,12 +1502,10 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_ring *rx_ring, int *work_done,
1503 if (length <= copybreak && 1502 if (length <= copybreak &&
1504 skb_tailroom(skb) >= length) { 1503 skb_tailroom(skb) >= length) {
1505 u8 *vaddr; 1504 u8 *vaddr;
1506 vaddr = kmap_atomic(buffer_info->page, 1505 vaddr = kmap_atomic(buffer_info->page);
1507 KM_SKB_DATA_SOFTIRQ);
1508 memcpy(skb_tail_pointer(skb), vaddr, 1506 memcpy(skb_tail_pointer(skb), vaddr,
1509 length); 1507 length);
1510 kunmap_atomic(vaddr, 1508 kunmap_atomic(vaddr);
1511 KM_SKB_DATA_SOFTIRQ);
1512 /* re-use the page, so don't erase 1509 /* re-use the page, so don't erase
1513 * buffer_info->page */ 1510 * buffer_info->page */
1514 skb_put(skb, length); 1511 skb_put(skb, length);
diff --git a/drivers/net/ethernet/sun/cassini.c b/drivers/net/ethernet/sun/cassini.c
index b36edbd625dd..3c2295560732 100644
--- a/drivers/net/ethernet/sun/cassini.c
+++ b/drivers/net/ethernet/sun/cassini.c
@@ -104,8 +104,8 @@
104#include <asm/byteorder.h> 104#include <asm/byteorder.h>
105#include <asm/uaccess.h> 105#include <asm/uaccess.h>
106 106
107#define cas_page_map(x) kmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 107#define cas_page_map(x) kmap_atomic((x))
108#define cas_page_unmap(x) kunmap_atomic((x), KM_SKB_DATA_SOFTIRQ) 108#define cas_page_unmap(x) kunmap_atomic((x))
109#define CAS_NCPUS num_online_cpus() 109#define CAS_NCPUS num_online_cpus()
110 110
111#define cas_skb_release(x) netif_rx(x) 111#define cas_skb_release(x) netif_rx(x)
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index f980600f78a8..2fe9e90e53d9 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -1736,7 +1736,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1736 (uint32_t ) cmd->cmnd[8]; 1736 (uint32_t ) cmd->cmnd[8];
1737 /* 4 bytes: Areca io control code */ 1737 /* 4 bytes: Areca io control code */
1738 sg = scsi_sglist(cmd); 1738 sg = scsi_sglist(cmd);
1739 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1739 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1740 if (scsi_sg_count(cmd) > 1) { 1740 if (scsi_sg_count(cmd) > 1) {
1741 retvalue = ARCMSR_MESSAGE_FAIL; 1741 retvalue = ARCMSR_MESSAGE_FAIL;
1742 goto message_out; 1742 goto message_out;
@@ -1985,7 +1985,7 @@ static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
1985 } 1985 }
1986 message_out: 1986 message_out:
1987 sg = scsi_sglist(cmd); 1987 sg = scsi_sglist(cmd);
1988 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1988 kunmap_atomic(buffer - sg->offset);
1989 return retvalue; 1989 return retvalue;
1990} 1990}
1991 1991
@@ -2035,11 +2035,11 @@ static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb,
2035 strncpy(&inqdata[32], "R001", 4); /* Product Revision */ 2035 strncpy(&inqdata[32], "R001", 4); /* Product Revision */
2036 2036
2037 sg = scsi_sglist(cmd); 2037 sg = scsi_sglist(cmd);
2038 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 2038 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
2039 2039
2040 memcpy(buffer, inqdata, sizeof(inqdata)); 2040 memcpy(buffer, inqdata, sizeof(inqdata));
2041 sg = scsi_sglist(cmd); 2041 sg = scsi_sglist(cmd);
2042 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 2042 kunmap_atomic(buffer - sg->offset);
2043 2043
2044 cmd->scsi_done(cmd); 2044 cmd->scsi_done(cmd);
2045 } 2045 }
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 8c6156a10d90..a9af42e83632 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -322,8 +322,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
322 return -ENOMEM; 322 return -ENOMEM;
323 } 323 }
324 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 324 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
325 cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) 325 cp = kmap_atomic(skb_frag_page(frag)) + frag->page_offset;
326 + frag->page_offset;
327 } else { 326 } else {
328 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 327 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
329 } 328 }
@@ -332,7 +331,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
332 cp->fcoe_eof = eof; 331 cp->fcoe_eof = eof;
333 cp->fcoe_crc32 = cpu_to_le32(~crc); 332 cp->fcoe_crc32 = cpu_to_le32(~crc);
334 if (skb_is_nonlinear(skb)) { 333 if (skb_is_nonlinear(skb)) {
335 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 334 kunmap_atomic(cp);
336 cp = NULL; 335 cp = NULL;
337 } 336 }
338 337
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index d3ff9cd40234..89afd6d21d89 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1956,12 +1956,11 @@ int cxgbi_conn_init_pdu(struct iscsi_task *task, unsigned int offset,
1956 1956
1957 /* data fits in the skb's headroom */ 1957 /* data fits in the skb's headroom */
1958 for (i = 0; i < tdata->nr_frags; i++, frag++) { 1958 for (i = 0; i < tdata->nr_frags; i++, frag++) {
1959 char *src = kmap_atomic(frag->page, 1959 char *src = kmap_atomic(frag->page);
1960 KM_SOFTIRQ0);
1961 1960
1962 memcpy(dst, src+frag->offset, frag->size); 1961 memcpy(dst, src+frag->offset, frag->size);
1963 dst += frag->size; 1962 dst += frag->size;
1964 kunmap_atomic(src, KM_SOFTIRQ0); 1963 kunmap_atomic(src);
1965 } 1964 }
1966 if (padlen) { 1965 if (padlen) {
1967 memset(dst, 0, padlen); 1966 memset(dst, 0, padlen);
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index c164890224d2..cc75cbea936b 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1515,7 +1515,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1515 return -ENOMEM; 1515 return -ENOMEM;
1516 } 1516 }
1517 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1]; 1517 frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
1518 cp = kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ) 1518 cp = kmap_atomic(skb_frag_page(frag))
1519 + frag->page_offset; 1519 + frag->page_offset;
1520 } else { 1520 } else {
1521 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen); 1521 cp = (struct fcoe_crc_eof *)skb_put(skb, tlen);
@@ -1526,7 +1526,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1526 cp->fcoe_crc32 = cpu_to_le32(~crc); 1526 cp->fcoe_crc32 = cpu_to_le32(~crc);
1527 1527
1528 if (skb_is_nonlinear(skb)) { 1528 if (skb_is_nonlinear(skb)) {
1529 kunmap_atomic(cp, KM_SKB_DATA_SOFTIRQ); 1529 kunmap_atomic(cp);
1530 cp = NULL; 1530 cp = NULL;
1531 } 1531 }
1532 1532
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index bd97b2273f20..4d119a326d3b 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -210,10 +210,9 @@ u32 fcoe_fc_crc(struct fc_frame *fp)
210 while (len > 0) { 210 while (len > 0) {
211 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK)); 211 clen = min(len, PAGE_SIZE - (off & ~PAGE_MASK));
212 data = kmap_atomic( 212 data = kmap_atomic(
213 skb_frag_page(frag) + (off >> PAGE_SHIFT), 213 skb_frag_page(frag) + (off >> PAGE_SHIFT));
214 KM_SKB_DATA_SOFTIRQ);
215 crc = crc32(crc, data + (off & ~PAGE_MASK), clen); 214 crc = crc32(crc, data + (off & ~PAGE_MASK), clen);
216 kunmap_atomic(data, KM_SKB_DATA_SOFTIRQ); 215 kunmap_atomic(data);
217 off += clen; 216 off += clen;
218 len -= clen; 217 len -= clen;
219 } 218 }
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 3242bcabad97..d42ec921de46 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -2310,10 +2310,10 @@ static void gdth_copy_internal_data(gdth_ha_str *ha, Scsi_Cmnd *scp,
2310 return; 2310 return;
2311 } 2311 }
2312 local_irq_save(flags); 2312 local_irq_save(flags);
2313 address = kmap_atomic(sg_page(sl), KM_BIO_SRC_IRQ) + sl->offset; 2313 address = kmap_atomic(sg_page(sl)) + sl->offset;
2314 memcpy(address, buffer, cpnow); 2314 memcpy(address, buffer, cpnow);
2315 flush_dcache_page(sg_page(sl)); 2315 flush_dcache_page(sg_page(sl));
2316 kunmap_atomic(address, KM_BIO_SRC_IRQ); 2316 kunmap_atomic(address);
2317 local_irq_restore(flags); 2317 local_irq_restore(flags);
2318 if (cpsum == cpcount) 2318 if (cpsum == cpcount)
2319 break; 2319 break;
diff --git a/drivers/scsi/ips.c b/drivers/scsi/ips.c
index d77891e5683b..b6d7a5c2fc94 100644
--- a/drivers/scsi/ips.c
+++ b/drivers/scsi/ips.c
@@ -1511,14 +1511,14 @@ static int ips_is_passthru(struct scsi_cmnd *SC)
1511 /* kmap_atomic() ensures addressability of the user buffer.*/ 1511 /* kmap_atomic() ensures addressability of the user buffer.*/
1512 /* local_irq_save() protects the KM_IRQ0 address slot. */ 1512 /* local_irq_save() protects the KM_IRQ0 address slot. */
1513 local_irq_save(flags); 1513 local_irq_save(flags);
1514 buffer = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 1514 buffer = kmap_atomic(sg_page(sg)) + sg->offset;
1515 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' && 1515 if (buffer && buffer[0] == 'C' && buffer[1] == 'O' &&
1516 buffer[2] == 'P' && buffer[3] == 'P') { 1516 buffer[2] == 'P' && buffer[3] == 'P') {
1517 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1517 kunmap_atomic(buffer - sg->offset);
1518 local_irq_restore(flags); 1518 local_irq_restore(flags);
1519 return 1; 1519 return 1;
1520 } 1520 }
1521 kunmap_atomic(buffer - sg->offset, KM_IRQ0); 1521 kunmap_atomic(buffer - sg->offset);
1522 local_irq_restore(flags); 1522 local_irq_restore(flags);
1523 } 1523 }
1524 return 0; 1524 return 0;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 192cb48d849a..ee0dc05c6269 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -1304,9 +1304,9 @@ sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req,
1304 struct page *page = sg_page(sg); 1304 struct page *page = sg_page(sg);
1305 1305
1306 copy_len = min_t(int, total_len, sg_dma_len(sg)); 1306 copy_len = min_t(int, total_len, sg_dma_len(sg));
1307 kaddr = kmap_atomic(page, KM_IRQ0); 1307 kaddr = kmap_atomic(page);
1308 memcpy(kaddr + sg->offset, src_addr, copy_len); 1308 memcpy(kaddr + sg->offset, src_addr, copy_len);
1309 kunmap_atomic(kaddr, KM_IRQ0); 1309 kunmap_atomic(kaddr);
1310 total_len -= copy_len; 1310 total_len -= copy_len;
1311 src_addr += copy_len; 1311 src_addr += copy_len;
1312 sg = sg_next(sg); 1312 sg = sg_next(sg);
@@ -1654,7 +1654,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1654 sci_unsolicited_frame_control_get_header(&ihost->uf_control, 1654 sci_unsolicited_frame_control_get_header(&ihost->uf_control,
1655 frame_index, 1655 frame_index,
1656 &frame_header); 1656 &frame_header);
1657 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 1657 kaddr = kmap_atomic(sg_page(sg));
1658 rsp = kaddr + sg->offset; 1658 rsp = kaddr + sg->offset;
1659 sci_swab32_cpy(rsp, frame_header, 1); 1659 sci_swab32_cpy(rsp, frame_header, 1);
1660 1660
@@ -1691,7 +1691,7 @@ sci_io_request_frame_handler(struct isci_request *ireq,
1691 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; 1691 ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR;
1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED); 1692 sci_change_state(&ireq->sm, SCI_REQ_COMPLETED);
1693 } 1693 }
1694 kunmap_atomic(kaddr, KM_IRQ0); 1694 kunmap_atomic(kaddr);
1695 1695
1696 sci_controller_release_frame(ihost, frame_index); 1696 sci_controller_release_frame(ihost, frame_index);
1697 1697
@@ -3023,10 +3023,10 @@ static void isci_request_io_request_complete(struct isci_host *ihost,
3023 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); 3023 dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE);
3024 3024
3025 /* need to swab it back in case the command buffer is re-used */ 3025 /* need to swab it back in case the command buffer is re-used */
3026 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3026 kaddr = kmap_atomic(sg_page(sg));
3027 smp_req = kaddr + sg->offset; 3027 smp_req = kaddr + sg->offset;
3028 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3028 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3029 kunmap_atomic(kaddr, KM_IRQ0); 3029 kunmap_atomic(kaddr);
3030 break; 3030 break;
3031 } 3031 }
3032 default: 3032 default:
@@ -3311,7 +3311,7 @@ sci_io_request_construct_smp(struct device *dev,
3311 u8 req_len; 3311 u8 req_len;
3312 u32 cmd; 3312 u32 cmd;
3313 3313
3314 kaddr = kmap_atomic(sg_page(sg), KM_IRQ0); 3314 kaddr = kmap_atomic(sg_page(sg));
3315 smp_req = kaddr + sg->offset; 3315 smp_req = kaddr + sg->offset;
3316 /* 3316 /*
3317 * Look at the SMP requests' header fields; for certain SAS 1.x SMP 3317 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
@@ -3337,7 +3337,7 @@ sci_io_request_construct_smp(struct device *dev,
3337 req_len = smp_req->req_len; 3337 req_len = smp_req->req_len;
3338 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32)); 3338 sci_swab32_cpy(smp_req, smp_req, sg->length / sizeof(u32));
3339 cmd = *(u32 *) smp_req; 3339 cmd = *(u32 *) smp_req;
3340 kunmap_atomic(kaddr, KM_IRQ0); 3340 kunmap_atomic(kaddr);
3341 3341
3342 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) 3342 if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE))
3343 return SCI_FAILURE; 3343 return SCI_FAILURE;
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index f607314810ac..b577c907b318 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -485,11 +485,11 @@ static void fc_fcp_recv_data(struct fc_fcp_pkt *fsp, struct fc_frame *fp)
485 485
486 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) { 486 if (!(fr_flags(fp) & FCPHF_CRC_UNCHECKED)) {
487 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 487 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
488 &offset, KM_SOFTIRQ0, NULL); 488 &offset, NULL);
489 } else { 489 } else {
490 crc = crc32(~0, (u8 *) fh, sizeof(*fh)); 490 crc = crc32(~0, (u8 *) fh, sizeof(*fh));
491 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents, 491 copy_len = fc_copy_buffer_to_sglist(buf, len, sg, &nents,
492 &offset, KM_SOFTIRQ0, &crc); 492 &offset, &crc);
493 buf = fc_frame_payload_get(fp, 0); 493 buf = fc_frame_payload_get(fp, 0);
494 if (len % 4) 494 if (len % 4)
495 crc = crc32(crc, buf + len, 4 - (len % 4)); 495 crc = crc32(crc, buf + len, 4 - (len % 4));
@@ -650,10 +650,10 @@ static int fc_fcp_send_data(struct fc_fcp_pkt *fsp, struct fc_seq *seq,
650 * The scatterlist item may be bigger than PAGE_SIZE, 650 * The scatterlist item may be bigger than PAGE_SIZE,
651 * but we must not cross pages inside the kmap. 651 * but we must not cross pages inside the kmap.
652 */ 652 */
653 page_addr = kmap_atomic(page, KM_SOFTIRQ0); 653 page_addr = kmap_atomic(page);
654 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK), 654 memcpy(data, (char *)page_addr + (off & ~PAGE_MASK),
655 sg_bytes); 655 sg_bytes);
656 kunmap_atomic(page_addr, KM_SOFTIRQ0); 656 kunmap_atomic(page_addr);
657 data += sg_bytes; 657 data += sg_bytes;
658 } 658 }
659 offset += sg_bytes; 659 offset += sg_bytes;
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 1bf9841ef154..8d65a51a7598 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -105,14 +105,13 @@ module_exit(libfc_exit);
105 * @sg: pointer to the pointer of the SG list. 105 * @sg: pointer to the pointer of the SG list.
106 * @nents: pointer to the remaining number of entries in the SG list. 106 * @nents: pointer to the remaining number of entries in the SG list.
107 * @offset: pointer to the current offset in the SG list. 107 * @offset: pointer to the current offset in the SG list.
108 * @km_type: dedicated page table slot type for kmap_atomic.
109 * @crc: pointer to the 32-bit crc value. 108 * @crc: pointer to the 32-bit crc value.
110 * If crc is NULL, CRC is not calculated. 109 * If crc is NULL, CRC is not calculated.
111 */ 110 */
112u32 fc_copy_buffer_to_sglist(void *buf, size_t len, 111u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
113 struct scatterlist *sg, 112 struct scatterlist *sg,
114 u32 *nents, size_t *offset, 113 u32 *nents, size_t *offset,
115 enum km_type km_type, u32 *crc) 114 u32 *crc)
116{ 115{
117 size_t remaining = len; 116 size_t remaining = len;
118 u32 copy_len = 0; 117 u32 copy_len = 0;
@@ -142,12 +141,11 @@ u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
142 off = *offset + sg->offset; 141 off = *offset + sg->offset;
143 sg_bytes = min(sg_bytes, 142 sg_bytes = min(sg_bytes,
144 (size_t)(PAGE_SIZE - (off & ~PAGE_MASK))); 143 (size_t)(PAGE_SIZE - (off & ~PAGE_MASK)));
145 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT), 144 page_addr = kmap_atomic(sg_page(sg) + (off >> PAGE_SHIFT));
146 km_type);
147 if (crc) 145 if (crc)
148 *crc = crc32(*crc, buf, sg_bytes); 146 *crc = crc32(*crc, buf, sg_bytes);
149 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes); 147 memcpy((char *)page_addr + (off & ~PAGE_MASK), buf, sg_bytes);
150 kunmap_atomic(page_addr, km_type); 148 kunmap_atomic(page_addr);
151 buf += sg_bytes; 149 buf += sg_bytes;
152 *offset += sg_bytes; 150 *offset += sg_bytes;
153 remaining -= sg_bytes; 151 remaining -= sg_bytes;
diff --git a/drivers/scsi/libfc/fc_libfc.h b/drivers/scsi/libfc/fc_libfc.h
index c7d071289af5..c2830cc66d6a 100644
--- a/drivers/scsi/libfc/fc_libfc.h
+++ b/drivers/scsi/libfc/fc_libfc.h
@@ -134,6 +134,6 @@ extern void fc_fc4_conf_lport_params(struct fc_lport *, enum fc_fh_type);
134u32 fc_copy_buffer_to_sglist(void *buf, size_t len, 134u32 fc_copy_buffer_to_sglist(void *buf, size_t len,
135 struct scatterlist *sg, 135 struct scatterlist *sg,
136 u32 *nents, size_t *offset, 136 u32 *nents, size_t *offset,
137 enum km_type km_type, u32 *crc); 137 u32 *crc);
138 138
139#endif /* _FC_LIBFC_H_ */ 139#endif /* _FC_LIBFC_H_ */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 83750ebb527f..c1a808cc5920 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1698,7 +1698,7 @@ static void fc_lport_bsg_resp(struct fc_seq *sp, struct fc_frame *fp,
1698 1698
1699 job->reply->reply_payload_rcv_len += 1699 job->reply->reply_payload_rcv_len +=
1700 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents, 1700 fc_copy_buffer_to_sglist(buf, len, info->sg, &info->nents,
1701 &info->offset, KM_BIO_SRC_IRQ, NULL); 1701 &info->offset, NULL);
1702 1702
1703 if (fr_eof(fp) == FC_EOF_T && 1703 if (fr_eof(fp) == FC_EOF_T &&
1704 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) == 1704 (ntoh24(fh->fh_f_ctl) & (FC_FC_LAST_SEQ | FC_FC_END_SEQ)) ==
diff --git a/drivers/scsi/libiscsi_tcp.c b/drivers/scsi/libiscsi_tcp.c
index 5715a3d0a3d3..7f0465b9623e 100644
--- a/drivers/scsi/libiscsi_tcp.c
+++ b/drivers/scsi/libiscsi_tcp.c
@@ -135,7 +135,7 @@ static void iscsi_tcp_segment_map(struct iscsi_segment *segment, int recv)
135 135
136 if (recv) { 136 if (recv) {
137 segment->atomic_mapped = true; 137 segment->atomic_mapped = true;
138 segment->sg_mapped = kmap_atomic(sg_page(sg), KM_SOFTIRQ0); 138 segment->sg_mapped = kmap_atomic(sg_page(sg));
139 } else { 139 } else {
140 segment->atomic_mapped = false; 140 segment->atomic_mapped = false;
141 /* the xmit path can sleep with the page mapped so use kmap */ 141 /* the xmit path can sleep with the page mapped so use kmap */
@@ -149,7 +149,7 @@ void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
149{ 149{
150 if (segment->sg_mapped) { 150 if (segment->sg_mapped) {
151 if (segment->atomic_mapped) 151 if (segment->atomic_mapped)
152 kunmap_atomic(segment->sg_mapped, KM_SOFTIRQ0); 152 kunmap_atomic(segment->sg_mapped);
153 else 153 else
154 kunmap(sg_page(segment->sg)); 154 kunmap(sg_page(segment->sg));
155 segment->sg_mapped = NULL; 155 segment->sg_mapped = NULL;
diff --git a/drivers/scsi/libsas/sas_host_smp.c b/drivers/scsi/libsas/sas_host_smp.c
index bb8f49269a68..3814d3eed401 100644
--- a/drivers/scsi/libsas/sas_host_smp.c
+++ b/drivers/scsi/libsas/sas_host_smp.c
@@ -246,9 +246,9 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
246 } 246 }
247 247
248 local_irq_disable(); 248 local_irq_disable();
249 buf = kmap_atomic(bio_page(req->bio), KM_USER0) + bio_offset(req->bio); 249 buf = kmap_atomic(bio_page(req->bio));
250 memcpy(req_data, buf, blk_rq_bytes(req)); 250 memcpy(req_data, buf, blk_rq_bytes(req));
251 kunmap_atomic(buf - bio_offset(req->bio), KM_USER0); 251 kunmap_atomic(buf - bio_offset(req->bio));
252 local_irq_enable(); 252 local_irq_enable();
253 253
254 if (req_data[0] != SMP_REQUEST) 254 if (req_data[0] != SMP_REQUEST)
@@ -361,10 +361,10 @@ int sas_smp_host_handler(struct Scsi_Host *shost, struct request *req,
361 } 361 }
362 362
363 local_irq_disable(); 363 local_irq_disable();
364 buf = kmap_atomic(bio_page(rsp->bio), KM_USER0) + bio_offset(rsp->bio); 364 buf = kmap_atomic(bio_page(rsp->bio));
365 memcpy(buf, resp_data, blk_rq_bytes(rsp)); 365 memcpy(buf, resp_data, blk_rq_bytes(rsp));
366 flush_kernel_dcache_page(bio_page(rsp->bio)); 366 flush_kernel_dcache_page(bio_page(rsp->bio));
367 kunmap_atomic(buf - bio_offset(rsp->bio), KM_USER0); 367 kunmap_atomic(buf - bio_offset(rsp->bio));
368 local_irq_enable(); 368 local_irq_enable();
369 369
370 out: 370 out:
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index 15eefa1d61fd..4d39a9ffc081 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -670,10 +670,10 @@ mega_build_cmd(adapter_t *adapter, Scsi_Cmnd *cmd, int *busy)
670 struct scatterlist *sg; 670 struct scatterlist *sg;
671 671
672 sg = scsi_sglist(cmd); 672 sg = scsi_sglist(cmd);
673 buf = kmap_atomic(sg_page(sg), KM_IRQ0) + sg->offset; 673 buf = kmap_atomic(sg_page(sg)) + sg->offset;
674 674
675 memset(buf, 0, cmd->cmnd[4]); 675 memset(buf, 0, cmd->cmnd[4]);
676 kunmap_atomic(buf - sg->offset, KM_IRQ0); 676 kunmap_atomic(buf - sg->offset);
677 677
678 cmd->result = (DID_OK << 16); 678 cmd->result = (DID_OK << 16);
679 cmd->scsi_done(cmd); 679 cmd->scsi_done(cmd);
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index a4884a57cf79..01ab9c4d3464 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -1885,11 +1885,11 @@ int mvs_slot_complete(struct mvs_info *mvi, u32 rx_desc, u32 flags)
1885 case SAS_PROTOCOL_SMP: { 1885 case SAS_PROTOCOL_SMP: {
1886 struct scatterlist *sg_resp = &task->smp_task.smp_resp; 1886 struct scatterlist *sg_resp = &task->smp_task.smp_resp;
1887 tstat->stat = SAM_STAT_GOOD; 1887 tstat->stat = SAM_STAT_GOOD;
1888 to = kmap_atomic(sg_page(sg_resp), KM_IRQ0); 1888 to = kmap_atomic(sg_page(sg_resp));
1889 memcpy(to + sg_resp->offset, 1889 memcpy(to + sg_resp->offset,
1890 slot->response + sizeof(struct mvs_err_info), 1890 slot->response + sizeof(struct mvs_err_info),
1891 sg_dma_len(sg_resp)); 1891 sg_dma_len(sg_resp));
1892 kunmap_atomic(to, KM_IRQ0); 1892 kunmap_atomic(to);
1893 break; 1893 break;
1894 } 1894 }
1895 1895
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 6888b2ca5bfc..68da6c092f65 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -1778,7 +1778,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) { 1778 scsi_for_each_prot_sg(SCpnt, psgl, scsi_prot_sg_count(SCpnt), i) {
1779 int len = min(psgl->length, resid); 1779 int len = min(psgl->length, resid);
1780 1780
1781 paddr = kmap_atomic(sg_page(psgl), KM_IRQ0) + psgl->offset; 1781 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1782 memcpy(paddr, dif_storep + dif_offset(sector), len); 1782 memcpy(paddr, dif_storep + dif_offset(sector), len);
1783 1783
1784 sector += len >> 3; 1784 sector += len >> 3;
@@ -1788,7 +1788,7 @@ static int prot_verify_read(struct scsi_cmnd *SCpnt, sector_t start_sec,
1788 sector = do_div(tmp_sec, sdebug_store_sectors); 1788 sector = do_div(tmp_sec, sdebug_store_sectors);
1789 } 1789 }
1790 resid -= len; 1790 resid -= len;
1791 kunmap_atomic(paddr, KM_IRQ0); 1791 kunmap_atomic(paddr);
1792 } 1792 }
1793 1793
1794 dix_reads++; 1794 dix_reads++;
@@ -1881,12 +1881,12 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1881 BUG_ON(scsi_sg_count(SCpnt) == 0); 1881 BUG_ON(scsi_sg_count(SCpnt) == 0);
1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0); 1882 BUG_ON(scsi_prot_sg_count(SCpnt) == 0);
1883 1883
1884 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) + psgl->offset; 1884 paddr = kmap_atomic(sg_page(psgl)) + psgl->offset;
1885 ppage_offset = 0; 1885 ppage_offset = 0;
1886 1886
1887 /* For each data page */ 1887 /* For each data page */
1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) { 1888 scsi_for_each_sg(SCpnt, dsgl, scsi_sg_count(SCpnt), i) {
1889 daddr = kmap_atomic(sg_page(dsgl), KM_IRQ0) + dsgl->offset; 1889 daddr = kmap_atomic(sg_page(dsgl)) + dsgl->offset;
1890 1890
1891 /* For each sector-sized chunk in data page */ 1891 /* For each sector-sized chunk in data page */
1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) { 1892 for (j = 0 ; j < dsgl->length ; j += scsi_debug_sector_size) {
@@ -1895,10 +1895,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1895 * protection page advance to the next one 1895 * protection page advance to the next one
1896 */ 1896 */
1897 if (ppage_offset >= psgl->length) { 1897 if (ppage_offset >= psgl->length) {
1898 kunmap_atomic(paddr, KM_IRQ1); 1898 kunmap_atomic(paddr);
1899 psgl = sg_next(psgl); 1899 psgl = sg_next(psgl);
1900 BUG_ON(psgl == NULL); 1900 BUG_ON(psgl == NULL);
1901 paddr = kmap_atomic(sg_page(psgl), KM_IRQ1) 1901 paddr = kmap_atomic(sg_page(psgl))
1902 + psgl->offset; 1902 + psgl->offset;
1903 ppage_offset = 0; 1903 ppage_offset = 0;
1904 } 1904 }
@@ -1971,10 +1971,10 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1971 ppage_offset += sizeof(struct sd_dif_tuple); 1971 ppage_offset += sizeof(struct sd_dif_tuple);
1972 } 1972 }
1973 1973
1974 kunmap_atomic(daddr, KM_IRQ0); 1974 kunmap_atomic(daddr);
1975 } 1975 }
1976 1976
1977 kunmap_atomic(paddr, KM_IRQ1); 1977 kunmap_atomic(paddr);
1978 1978
1979 dix_writes++; 1979 dix_writes++;
1980 1980
@@ -1982,8 +1982,8 @@ static int prot_verify_write(struct scsi_cmnd *SCpnt, sector_t start_sec,
1982 1982
1983out: 1983out:
1984 dif_errors++; 1984 dif_errors++;
1985 kunmap_atomic(daddr, KM_IRQ0); 1985 kunmap_atomic(daddr);
1986 kunmap_atomic(paddr, KM_IRQ1); 1986 kunmap_atomic(paddr);
1987 return ret; 1987 return ret;
1988} 1988}
1989 1989
@@ -2303,7 +2303,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2303 2303
2304 offset = 0; 2304 offset = 0;
2305 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) { 2305 for_each_sg(sdb->table.sgl, sg, sdb->table.nents, i) {
2306 kaddr = (unsigned char *)kmap_atomic(sg_page(sg), KM_USER0); 2306 kaddr = (unsigned char *)kmap_atomic(sg_page(sg));
2307 if (!kaddr) 2307 if (!kaddr)
2308 goto out; 2308 goto out;
2309 2309
@@ -2311,7 +2311,7 @@ static int resp_xdwriteread(struct scsi_cmnd *scp, unsigned long long lba,
2311 *(kaddr + sg->offset + j) ^= *(buf + offset + j); 2311 *(kaddr + sg->offset + j) ^= *(buf + offset + j);
2312 2312
2313 offset += sg->length; 2313 offset += sg->length;
2314 kunmap_atomic(kaddr, KM_USER0); 2314 kunmap_atomic(kaddr);
2315 } 2315 }
2316 ret = 0; 2316 ret = 0;
2317out: 2317out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b2c95dbe9d65..a33b2b66da67 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2567,7 +2567,7 @@ void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
2567 if (*len > sg_len) 2567 if (*len > sg_len)
2568 *len = sg_len; 2568 *len = sg_len;
2569 2569
2570 return kmap_atomic(page, KM_BIO_SRC_IRQ); 2570 return kmap_atomic(page);
2571} 2571}
2572EXPORT_SYMBOL(scsi_kmap_atomic_sg); 2572EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2573 2573
@@ -2577,6 +2577,6 @@ EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2577 */ 2577 */
2578void scsi_kunmap_atomic_sg(void *virt) 2578void scsi_kunmap_atomic_sg(void *virt)
2579{ 2579{
2580 kunmap_atomic(virt, KM_BIO_SRC_IRQ); 2580 kunmap_atomic(virt);
2581} 2581}
2582EXPORT_SYMBOL(scsi_kunmap_atomic_sg); 2582EXPORT_SYMBOL(scsi_kunmap_atomic_sg);
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index f8fb2d691c0a..e52d5bc42bc4 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -392,7 +392,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
392 virt = bio->bi_integrity->bip_sector & 0xffffffff; 392 virt = bio->bi_integrity->bip_sector & 0xffffffff;
393 393
394 bip_for_each_vec(iv, bio->bi_integrity, i) { 394 bip_for_each_vec(iv, bio->bi_integrity, i) {
395 sdt = kmap_atomic(iv->bv_page, KM_USER0) 395 sdt = kmap_atomic(iv->bv_page)
396 + iv->bv_offset; 396 + iv->bv_offset;
397 397
398 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 398 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
@@ -405,7 +405,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
405 phys++; 405 phys++;
406 } 406 }
407 407
408 kunmap_atomic(sdt, KM_USER0); 408 kunmap_atomic(sdt);
409 } 409 }
410 410
411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY); 411 bio->bi_flags |= (1 << BIO_MAPPED_INTEGRITY);
@@ -414,7 +414,7 @@ int sd_dif_prepare(struct request *rq, sector_t hw_sector, unsigned int sector_s
414 return 0; 414 return 0;
415 415
416error: 416error:
417 kunmap_atomic(sdt, KM_USER0); 417 kunmap_atomic(sdt);
418 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n", 418 sd_printk(KERN_ERR, sdkp, "%s: virt %u, phys %u, ref %u, app %4x\n",
419 __func__, virt, phys, be32_to_cpu(sdt->ref_tag), 419 __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
420 be16_to_cpu(sdt->app_tag)); 420 be16_to_cpu(sdt->app_tag));
@@ -453,13 +453,13 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
453 virt = bio->bi_integrity->bip_sector & 0xffffffff; 453 virt = bio->bi_integrity->bip_sector & 0xffffffff;
454 454
455 bip_for_each_vec(iv, bio->bi_integrity, i) { 455 bip_for_each_vec(iv, bio->bi_integrity, i) {
456 sdt = kmap_atomic(iv->bv_page, KM_USER0) 456 sdt = kmap_atomic(iv->bv_page)
457 + iv->bv_offset; 457 + iv->bv_offset;
458 458
459 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 459 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) {
460 460
461 if (sectors == 0) { 461 if (sectors == 0) {
462 kunmap_atomic(sdt, KM_USER0); 462 kunmap_atomic(sdt);
463 return; 463 return;
464 } 464 }
465 465
@@ -474,7 +474,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
474 sectors--; 474 sectors--;
475 } 475 }
476 476
477 kunmap_atomic(sdt, KM_USER0); 477 kunmap_atomic(sdt);
478 } 478 }
479 } 479 }
480} 480}
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 695ffc36e02d..83a1972a1999 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -481,6 +481,19 @@ cleanup:
481 return NULL; 481 return NULL;
482} 482}
483 483
484/* Disgusting wrapper functions */
485static inline unsigned long sg_kmap_atomic(struct scatterlist *sgl, int idx)
486{
487 void *addr = kmap_atomic(sg_page(sgl + idx));
488 return (unsigned long)addr;
489}
490
491static inline void sg_kunmap_atomic(unsigned long addr)
492{
493 kunmap_atomic((void *)addr);
494}
495
496
484/* Assume the original sgl has enough room */ 497/* Assume the original sgl has enough room */
485static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl, 498static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
486 struct scatterlist *bounce_sgl, 499 struct scatterlist *bounce_sgl,
@@ -499,15 +512,12 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
499 local_irq_save(flags); 512 local_irq_save(flags);
500 513
501 for (i = 0; i < orig_sgl_count; i++) { 514 for (i = 0; i < orig_sgl_count; i++) {
502 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 515 dest_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
503 KM_IRQ0) + orig_sgl[i].offset;
504 dest = dest_addr; 516 dest = dest_addr;
505 destlen = orig_sgl[i].length; 517 destlen = orig_sgl[i].length;
506 518
507 if (bounce_addr == 0) 519 if (bounce_addr == 0)
508 bounce_addr = 520 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
509 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
510 KM_IRQ0);
511 521
512 while (destlen) { 522 while (destlen) {
513 src = bounce_addr + bounce_sgl[j].offset; 523 src = bounce_addr + bounce_sgl[j].offset;
@@ -523,7 +533,7 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
523 533
524 if (bounce_sgl[j].offset == bounce_sgl[j].length) { 534 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
525 /* full */ 535 /* full */
526 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 536 sg_kunmap_atomic(bounce_addr);
527 j++; 537 j++;
528 538
529 /* 539 /*
@@ -537,26 +547,21 @@ static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
537 /* 547 /*
538 * We are done; cleanup and return. 548 * We are done; cleanup and return.
539 */ 549 */
540 kunmap_atomic((void *)(dest_addr - 550 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
541 orig_sgl[i].offset),
542 KM_IRQ0);
543 local_irq_restore(flags); 551 local_irq_restore(flags);
544 return total_copied; 552 return total_copied;
545 } 553 }
546 554
547 /* if we need to use another bounce buffer */ 555 /* if we need to use another bounce buffer */
548 if (destlen || i != orig_sgl_count - 1) 556 if (destlen || i != orig_sgl_count - 1)
549 bounce_addr = 557 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
550 (unsigned long)kmap_atomic(
551 sg_page((&bounce_sgl[j])), KM_IRQ0);
552 } else if (destlen == 0 && i == orig_sgl_count - 1) { 558 } else if (destlen == 0 && i == orig_sgl_count - 1) {
553 /* unmap the last bounce that is < PAGE_SIZE */ 559 /* unmap the last bounce that is < PAGE_SIZE */
554 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 560 sg_kunmap_atomic(bounce_addr);
555 } 561 }
556 } 562 }
557 563
558 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset), 564 sg_kunmap_atomic(dest_addr - orig_sgl[i].offset);
559 KM_IRQ0);
560 } 565 }
561 566
562 local_irq_restore(flags); 567 local_irq_restore(flags);
@@ -581,15 +586,12 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
581 local_irq_save(flags); 586 local_irq_save(flags);
582 587
583 for (i = 0; i < orig_sgl_count; i++) { 588 for (i = 0; i < orig_sgl_count; i++) {
584 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])), 589 src_addr = sg_kmap_atomic(orig_sgl,i) + orig_sgl[i].offset;
585 KM_IRQ0) + orig_sgl[i].offset;
586 src = src_addr; 590 src = src_addr;
587 srclen = orig_sgl[i].length; 591 srclen = orig_sgl[i].length;
588 592
589 if (bounce_addr == 0) 593 if (bounce_addr == 0)
590 bounce_addr = 594 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
591 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
592 KM_IRQ0);
593 595
594 while (srclen) { 596 while (srclen) {
595 /* assume bounce offset always == 0 */ 597 /* assume bounce offset always == 0 */
@@ -606,22 +608,20 @@ static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
606 608
607 if (bounce_sgl[j].length == PAGE_SIZE) { 609 if (bounce_sgl[j].length == PAGE_SIZE) {
608 /* full..move to next entry */ 610 /* full..move to next entry */
609 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 611 sg_kunmap_atomic(bounce_addr);
610 j++; 612 j++;
611 613
612 /* if we need to use another bounce buffer */ 614 /* if we need to use another bounce buffer */
613 if (srclen || i != orig_sgl_count - 1) 615 if (srclen || i != orig_sgl_count - 1)
614 bounce_addr = 616 bounce_addr = sg_kmap_atomic(bounce_sgl,j);
615 (unsigned long)kmap_atomic(
616 sg_page((&bounce_sgl[j])), KM_IRQ0);
617 617
618 } else if (srclen == 0 && i == orig_sgl_count - 1) { 618 } else if (srclen == 0 && i == orig_sgl_count - 1) {
619 /* unmap the last bounce that is < PAGE_SIZE */ 619 /* unmap the last bounce that is < PAGE_SIZE */
620 kunmap_atomic((void *)bounce_addr, KM_IRQ0); 620 sg_kunmap_atomic(bounce_addr);
621 } 621 }
622 } 622 }
623 623
624 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0); 624 sg_kunmap_atomic(src_addr - orig_sgl[i].offset);
625 } 625 }
626 626
627 local_irq_restore(flags); 627 local_irq_restore(flags);
diff --git a/drivers/staging/ramster/xvmalloc.c b/drivers/staging/ramster/xvmalloc.c
index 1f9c5082b6d5..93ba8e9407aa 100644
--- a/drivers/staging/ramster/xvmalloc.c
+++ b/drivers/staging/ramster/xvmalloc.c
@@ -56,17 +56,17 @@ static void clear_flag(struct block_header *block, enum blockflags flag)
56 * This is called from xv_malloc/xv_free path, so it 56 * This is called from xv_malloc/xv_free path, so it
57 * needs to be fast. 57 * needs to be fast.
58 */ 58 */
59static void *get_ptr_atomic(struct page *page, u16 offset, enum km_type type) 59static void *get_ptr_atomic(struct page *page, u16 offset)
60{ 60{
61 unsigned char *base; 61 unsigned char *base;
62 62
63 base = kmap_atomic(page, type); 63 base = kmap_atomic(page);
64 return base + offset; 64 return base + offset;
65} 65}
66 66
67static void put_ptr_atomic(void *ptr, enum km_type type) 67static void put_ptr_atomic(void *ptr)
68{ 68{
69 kunmap_atomic(ptr, type); 69 kunmap_atomic(ptr);
70} 70}
71 71
72static u32 get_blockprev(struct block_header *block) 72static u32 get_blockprev(struct block_header *block)
@@ -202,10 +202,10 @@ static void insert_block(struct xv_pool *pool, struct page *page, u32 offset,
202 202
203 if (block->link.next_page) { 203 if (block->link.next_page) {
204 nextblock = get_ptr_atomic(block->link.next_page, 204 nextblock = get_ptr_atomic(block->link.next_page,
205 block->link.next_offset, KM_USER1); 205 block->link.next_offset);
206 nextblock->link.prev_page = page; 206 nextblock->link.prev_page = page;
207 nextblock->link.prev_offset = offset; 207 nextblock->link.prev_offset = offset;
208 put_ptr_atomic(nextblock, KM_USER1); 208 put_ptr_atomic(nextblock);
209 /* If there was a next page then the free bits are set. */ 209 /* If there was a next page then the free bits are set. */
210 return; 210 return;
211 } 211 }
@@ -225,18 +225,18 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
225 225
226 if (block->link.prev_page) { 226 if (block->link.prev_page) {
227 tmpblock = get_ptr_atomic(block->link.prev_page, 227 tmpblock = get_ptr_atomic(block->link.prev_page,
228 block->link.prev_offset, KM_USER1); 228 block->link.prev_offset);
229 tmpblock->link.next_page = block->link.next_page; 229 tmpblock->link.next_page = block->link.next_page;
230 tmpblock->link.next_offset = block->link.next_offset; 230 tmpblock->link.next_offset = block->link.next_offset;
231 put_ptr_atomic(tmpblock, KM_USER1); 231 put_ptr_atomic(tmpblock);
232 } 232 }
233 233
234 if (block->link.next_page) { 234 if (block->link.next_page) {
235 tmpblock = get_ptr_atomic(block->link.next_page, 235 tmpblock = get_ptr_atomic(block->link.next_page,
236 block->link.next_offset, KM_USER1); 236 block->link.next_offset);
237 tmpblock->link.prev_page = block->link.prev_page; 237 tmpblock->link.prev_page = block->link.prev_page;
238 tmpblock->link.prev_offset = block->link.prev_offset; 238 tmpblock->link.prev_offset = block->link.prev_offset;
239 put_ptr_atomic(tmpblock, KM_USER1); 239 put_ptr_atomic(tmpblock);
240 } 240 }
241 241
242 /* Is this block is at the head of the freelist? */ 242 /* Is this block is at the head of the freelist? */
@@ -249,11 +249,10 @@ static void remove_block(struct xv_pool *pool, struct page *page, u32 offset,
249 if (pool->freelist[slindex].page) { 249 if (pool->freelist[slindex].page) {
250 struct block_header *tmpblock; 250 struct block_header *tmpblock;
251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page, 251 tmpblock = get_ptr_atomic(pool->freelist[slindex].page,
252 pool->freelist[slindex].offset, 252 pool->freelist[slindex].offset);
253 KM_USER1);
254 tmpblock->link.prev_page = NULL; 253 tmpblock->link.prev_page = NULL;
255 tmpblock->link.prev_offset = 0; 254 tmpblock->link.prev_offset = 0;
256 put_ptr_atomic(tmpblock, KM_USER1); 255 put_ptr_atomic(tmpblock);
257 } else { 256 } else {
258 /* This freelist bucket is empty */ 257 /* This freelist bucket is empty */
259 __clear_bit(slindex % BITS_PER_LONG, 258 __clear_bit(slindex % BITS_PER_LONG,
@@ -284,7 +283,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
284 stat_inc(&pool->total_pages); 283 stat_inc(&pool->total_pages);
285 284
286 spin_lock(&pool->lock); 285 spin_lock(&pool->lock);
287 block = get_ptr_atomic(page, 0, KM_USER0); 286 block = get_ptr_atomic(page, 0);
288 287
289 block->size = PAGE_SIZE - XV_ALIGN; 288 block->size = PAGE_SIZE - XV_ALIGN;
290 set_flag(block, BLOCK_FREE); 289 set_flag(block, BLOCK_FREE);
@@ -293,7 +292,7 @@ static int grow_pool(struct xv_pool *pool, gfp_t flags)
293 292
294 insert_block(pool, page, 0, block); 293 insert_block(pool, page, 0, block);
295 294
296 put_ptr_atomic(block, KM_USER0); 295 put_ptr_atomic(block);
297 spin_unlock(&pool->lock); 296 spin_unlock(&pool->lock);
298 297
299 return 0; 298 return 0;
@@ -375,7 +374,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
375 return -ENOMEM; 374 return -ENOMEM;
376 } 375 }
377 376
378 block = get_ptr_atomic(*page, *offset, KM_USER0); 377 block = get_ptr_atomic(*page, *offset);
379 378
380 remove_block(pool, *page, *offset, block, index); 379 remove_block(pool, *page, *offset, block, index);
381 380
@@ -405,7 +404,7 @@ int xv_malloc(struct xv_pool *pool, u32 size, struct page **page,
405 block->size = origsize; 404 block->size = origsize;
406 clear_flag(block, BLOCK_FREE); 405 clear_flag(block, BLOCK_FREE);
407 406
408 put_ptr_atomic(block, KM_USER0); 407 put_ptr_atomic(block);
409 spin_unlock(&pool->lock); 408 spin_unlock(&pool->lock);
410 409
411 *offset += XV_ALIGN; 410 *offset += XV_ALIGN;
@@ -426,7 +425,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
426 425
427 spin_lock(&pool->lock); 426 spin_lock(&pool->lock);
428 427
429 page_start = get_ptr_atomic(page, 0, KM_USER0); 428 page_start = get_ptr_atomic(page, 0);
430 block = (struct block_header *)((char *)page_start + offset); 429 block = (struct block_header *)((char *)page_start + offset);
431 430
432 /* Catch double free bugs */ 431 /* Catch double free bugs */
@@ -468,7 +467,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
468 467
469 /* No used objects in this page. Free it. */ 468 /* No used objects in this page. Free it. */
470 if (block->size == PAGE_SIZE - XV_ALIGN) { 469 if (block->size == PAGE_SIZE - XV_ALIGN) {
471 put_ptr_atomic(page_start, KM_USER0); 470 put_ptr_atomic(page_start);
472 spin_unlock(&pool->lock); 471 spin_unlock(&pool->lock);
473 472
474 __free_page(page); 473 __free_page(page);
@@ -486,7 +485,7 @@ void xv_free(struct xv_pool *pool, struct page *page, u32 offset)
486 set_blockprev(tmpblock, offset); 485 set_blockprev(tmpblock, offset);
487 } 486 }
488 487
489 put_ptr_atomic(page_start, KM_USER0); 488 put_ptr_atomic(page_start);
490 spin_unlock(&pool->lock); 489 spin_unlock(&pool->lock);
491} 490}
492EXPORT_SYMBOL_GPL(xv_free); 491EXPORT_SYMBOL_GPL(xv_free);
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 36d53ed9d71a..68b2e053a0e6 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -496,13 +496,13 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
496 } 496 }
497 ASSERT_SENTINEL(zh, ZBH); 497 ASSERT_SENTINEL(zh, ZBH);
498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 498 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
499 to_va = kmap_atomic(page, KM_USER0); 499 to_va = kmap_atomic(page);
500 size = zh->size; 500 size = zh->size;
501 from_va = zbud_data(zh, size); 501 from_va = zbud_data(zh, size);
502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len); 502 ret = lzo1x_decompress_safe(from_va, size, to_va, &out_len);
503 BUG_ON(ret != LZO_E_OK); 503 BUG_ON(ret != LZO_E_OK);
504 BUG_ON(out_len != PAGE_SIZE); 504 BUG_ON(out_len != PAGE_SIZE);
505 kunmap_atomic(to_va, KM_USER0); 505 kunmap_atomic(to_va);
506out: 506out:
507 spin_unlock(&zbpg->lock); 507 spin_unlock(&zbpg->lock);
508 return ret; 508 return ret;
@@ -1109,7 +1109,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1109 goto out; 1109 goto out;
1110 atomic_inc(&zv_curr_dist_counts[chunks]); 1110 atomic_inc(&zv_curr_dist_counts[chunks]);
1111 atomic_inc(&zv_cumul_dist_counts[chunks]); 1111 atomic_inc(&zv_cumul_dist_counts[chunks]);
1112 zv = kmap_atomic(page, KM_USER0) + offset; 1112 zv = kmap_atomic(page) + offset;
1113 zv->index = index; 1113 zv->index = index;
1114 zv->oid = *oid; 1114 zv->oid = *oid;
1115 zv->pool_id = pool_id; 1115 zv->pool_id = pool_id;
@@ -1123,7 +1123,7 @@ static struct zv_hdr *zv_create(struct zcache_client *cli, uint32_t pool_id,
1123 spin_unlock(&zcache_rem_op_list_lock); 1123 spin_unlock(&zcache_rem_op_list_lock);
1124 } 1124 }
1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); 1125 memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen);
1126 kunmap_atomic(zv, KM_USER0); 1126 kunmap_atomic(zv);
1127out: 1127out:
1128 return zv; 1128 return zv;
1129} 1129}
@@ -1145,7 +1145,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1145 &page, &offset, ZCACHE_GFP_MASK); 1145 &page, &offset, ZCACHE_GFP_MASK);
1146 if (unlikely(ret)) 1146 if (unlikely(ret))
1147 goto out; 1147 goto out;
1148 zv = kmap_atomic(page, KM_USER0) + offset; 1148 zv = kmap_atomic(page) + offset;
1149 SET_SENTINEL(zv, ZVH); 1149 SET_SENTINEL(zv, ZVH);
1150 INIT_LIST_HEAD(&zv->rem_op.list); 1150 INIT_LIST_HEAD(&zv->rem_op.list);
1151 zv->client_id = LOCAL_CLIENT; 1151 zv->client_id = LOCAL_CLIENT;
@@ -1153,7 +1153,7 @@ static struct zv_hdr *zv_alloc(struct tmem_pool *pool,
1153 zv->index = index; 1153 zv->index = index;
1154 zv->oid = *oid; 1154 zv->oid = *oid;
1155 zv->pool_id = pool->pool_id; 1155 zv->pool_id = pool->pool_id;
1156 kunmap_atomic(zv, KM_USER0); 1156 kunmap_atomic(zv);
1157out: 1157out:
1158 return zv; 1158 return zv;
1159} 1159}
@@ -1194,10 +1194,10 @@ static void zv_decompress(struct page *page, struct zv_hdr *zv)
1194 ASSERT_SENTINEL(zv, ZVH); 1194 ASSERT_SENTINEL(zv, ZVH);
1195 size = xv_get_object_size(zv) - sizeof(*zv); 1195 size = xv_get_object_size(zv) - sizeof(*zv);
1196 BUG_ON(size == 0); 1196 BUG_ON(size == 0);
1197 to_va = kmap_atomic(page, KM_USER0); 1197 to_va = kmap_atomic(page);
1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv), 1198 ret = lzo1x_decompress_safe((char *)zv + sizeof(*zv),
1199 size, to_va, &clen); 1199 size, to_va, &clen);
1200 kunmap_atomic(to_va, KM_USER0); 1200 kunmap_atomic(to_va);
1201 BUG_ON(ret != LZO_E_OK); 1201 BUG_ON(ret != LZO_E_OK);
1202 BUG_ON(clen != PAGE_SIZE); 1202 BUG_ON(clen != PAGE_SIZE);
1203} 1203}
@@ -2203,12 +2203,12 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len)
2203 BUG_ON(!irqs_disabled()); 2203 BUG_ON(!irqs_disabled());
2204 if (unlikely(dmem == NULL || wmem == NULL)) 2204 if (unlikely(dmem == NULL || wmem == NULL))
2205 goto out; /* no buffer, so can't compress */ 2205 goto out; /* no buffer, so can't compress */
2206 from_va = kmap_atomic(from, KM_USER0); 2206 from_va = kmap_atomic(from);
2207 mb(); 2207 mb();
2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem); 2208 ret = lzo1x_1_compress(from_va, PAGE_SIZE, dmem, out_len, wmem);
2209 BUG_ON(ret != LZO_E_OK); 2209 BUG_ON(ret != LZO_E_OK);
2210 *out_va = dmem; 2210 *out_va = dmem;
2211 kunmap_atomic(from_va, KM_USER0); 2211 kunmap_atomic(from_va);
2212 ret = 1; 2212 ret = 1;
2213out: 2213out:
2214 return ret; 2214 return ret;
diff --git a/drivers/staging/rtl8192u/ieee80211/cipher.c b/drivers/staging/rtl8192u/ieee80211/cipher.c
index 69dcc3176ebc..d47345c4adcf 100644
--- a/drivers/staging/rtl8192u/ieee80211/cipher.c
+++ b/drivers/staging/rtl8192u/ieee80211/cipher.c
@@ -71,8 +71,8 @@ static int crypt(struct crypto_tfm *tfm,
71 u8 *src_p, *dst_p; 71 u8 *src_p, *dst_p;
72 int in_place; 72 int in_place;
73 73
74 scatterwalk_map(&walk_in, 0); 74 scatterwalk_map(&walk_in);
75 scatterwalk_map(&walk_out, 1); 75 scatterwalk_map(&walk_out);
76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src); 76 src_p = scatterwalk_whichbuf(&walk_in, bsize, tmp_src);
77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst); 77 dst_p = scatterwalk_whichbuf(&walk_out, bsize, tmp_dst);
78 in_place = scatterwalk_samebuf(&walk_in, &walk_out, 78 in_place = scatterwalk_samebuf(&walk_in, &walk_out,
@@ -84,10 +84,10 @@ static int crypt(struct crypto_tfm *tfm,
84 84
85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place); 85 prfn(tfm, dst_p, src_p, crfn, enc, info, in_place);
86 86
87 scatterwalk_done(&walk_in, 0, nbytes); 87 scatterwalk_done(&walk_in, nbytes);
88 88
89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1); 89 scatterwalk_copychunks(dst_p, &walk_out, bsize, 1);
90 scatterwalk_done(&walk_out, 1, nbytes); 90 scatterwalk_done(&walk_out, nbytes);
91 91
92 if (!nbytes) 92 if (!nbytes)
93 return 0; 93 return 0;
diff --git a/drivers/staging/rtl8192u/ieee80211/digest.c b/drivers/staging/rtl8192u/ieee80211/digest.c
index 301ed514ac9e..05e7497fd106 100644
--- a/drivers/staging/rtl8192u/ieee80211/digest.c
+++ b/drivers/staging/rtl8192u/ieee80211/digest.c
@@ -39,12 +39,12 @@ static void update(struct crypto_tfm *tfm,
39 unsigned int bytes_from_page = min(l, ((unsigned int) 39 unsigned int bytes_from_page = min(l, ((unsigned int)
40 (PAGE_SIZE)) - 40 (PAGE_SIZE)) -
41 offset); 41 offset);
42 char *p = crypto_kmap(pg, 0) + offset; 42 char *p = kmap_atomic(pg) + offset;
43 43
44 tfm->__crt_alg->cra_digest.dia_update 44 tfm->__crt_alg->cra_digest.dia_update
45 (crypto_tfm_ctx(tfm), p, 45 (crypto_tfm_ctx(tfm), p,
46 bytes_from_page); 46 bytes_from_page);
47 crypto_kunmap(p, 0); 47 kunmap_atomic(p);
48 crypto_yield(tfm); 48 crypto_yield(tfm);
49 offset = 0; 49 offset = 0;
50 pg++; 50 pg++;
@@ -75,10 +75,10 @@ static void digest(struct crypto_tfm *tfm,
75 tfm->crt_digest.dit_init(tfm); 75 tfm->crt_digest.dit_init(tfm);
76 76
77 for (i = 0; i < nsg; i++) { 77 for (i = 0; i < nsg; i++) {
78 char *p = crypto_kmap(sg[i].page, 0) + sg[i].offset; 78 char *p = kmap_atomic(sg[i].page) + sg[i].offset;
79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm), 79 tfm->__crt_alg->cra_digest.dia_update(crypto_tfm_ctx(tfm),
80 p, sg[i].length); 80 p, sg[i].length);
81 crypto_kunmap(p, 0); 81 kunmap_atomic(p);
82 crypto_yield(tfm); 82 crypto_yield(tfm);
83 } 83 }
84 crypto_digest_final(tfm, out); 84 crypto_digest_final(tfm, out);
diff --git a/drivers/staging/rtl8192u/ieee80211/internal.h b/drivers/staging/rtl8192u/ieee80211/internal.h
index a7c096eb269f..bebe13ac53b7 100644
--- a/drivers/staging/rtl8192u/ieee80211/internal.h
+++ b/drivers/staging/rtl8192u/ieee80211/internal.h
@@ -23,23 +23,6 @@
23#include <asm/kmap_types.h> 23#include <asm/kmap_types.h>
24 24
25 25
26extern enum km_type crypto_km_types[];
27
28static inline enum km_type crypto_kmap_type(int out)
29{
30 return crypto_km_types[(in_softirq() ? 2 : 0) + out];
31}
32
33static inline void *crypto_kmap(struct page *page, int out)
34{
35 return kmap_atomic(page, crypto_kmap_type(out));
36}
37
38static inline void crypto_kunmap(void *vaddr, int out)
39{
40 kunmap_atomic(vaddr, crypto_kmap_type(out));
41}
42
43static inline void crypto_yield(struct crypto_tfm *tfm) 26static inline void crypto_yield(struct crypto_tfm *tfm)
44{ 27{
45 if (!in_softirq()) 28 if (!in_softirq())
diff --git a/drivers/staging/rtl8192u/ieee80211/kmap_types.h b/drivers/staging/rtl8192u/ieee80211/kmap_types.h
deleted file mode 100644
index de67bb01b5f5..000000000000
--- a/drivers/staging/rtl8192u/ieee80211/kmap_types.h
+++ /dev/null
@@ -1,20 +0,0 @@
1#ifndef __KMAP_TYPES_H
2
3#define __KMAP_TYPES_H
4
5
6enum km_type {
7 KM_BOUNCE_READ,
8 KM_SKB_SUNRPC_DATA,
9 KM_SKB_DATA_SOFTIRQ,
10 KM_USER0,
11 KM_USER1,
12 KM_BH_IRQ,
13 KM_SOFTIRQ0,
14 KM_SOFTIRQ1,
15 KM_TYPE_NR
16};
17
18#define _ASM_KMAP_TYPES_H
19
20#endif
diff --git a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
index 3543a6145046..8b73f6cefcf9 100644
--- a/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
+++ b/drivers/staging/rtl8192u/ieee80211/scatterwalk.c
@@ -13,8 +13,6 @@
13 * any later version. 13 * any later version.
14 * 14 *
15 */ 15 */
16#include "kmap_types.h"
17
18#include <linux/kernel.h> 16#include <linux/kernel.h>
19#include <linux/mm.h> 17#include <linux/mm.h>
20#include <linux/pagemap.h> 18#include <linux/pagemap.h>
@@ -23,13 +21,6 @@
23#include "internal.h" 21#include "internal.h"
24#include "scatterwalk.h" 22#include "scatterwalk.h"
25 23
26enum km_type crypto_km_types[] = {
27 KM_USER0,
28 KM_USER1,
29 KM_SOFTIRQ0,
30 KM_SOFTIRQ1,
31};
32
33void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch) 24void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
34{ 25{
35 if (nbytes <= walk->len_this_page && 26 if (nbytes <= walk->len_this_page &&
@@ -62,9 +53,9 @@ void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
62 walk->offset = sg->offset; 53 walk->offset = sg->offset;
63} 54}
64 55
65void scatterwalk_map(struct scatter_walk *walk, int out) 56void scatterwalk_map(struct scatter_walk *walk)
66{ 57{
67 walk->data = crypto_kmap(walk->page, out) + walk->offset; 58 walk->data = kmap_atomic(walk->page) + walk->offset;
68} 59}
69 60
70static void scatterwalk_pagedone(struct scatter_walk *walk, int out, 61static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
@@ -103,7 +94,7 @@ void scatterwalk_done(struct scatter_walk *walk, int out, int more)
103 * has been verified as multiple of the block size. 94 * has been verified as multiple of the block size.
104 */ 95 */
105int scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 96int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
106 size_t nbytes, int out) 97 size_t nbytes)
107{ 98{
108 if (buf != walk->data) { 99 if (buf != walk->data) {
109 while (nbytes > walk->len_this_page) { 100 while (nbytes > walk->len_this_page) {
@@ -111,9 +102,9 @@ int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
111 buf += walk->len_this_page; 102 buf += walk->len_this_page;
112 nbytes -= walk->len_this_page; 103 nbytes -= walk->len_this_page;
113 104
114 crypto_kunmap(walk->data, out); 105 kunmap_atomic(walk->data);
115 scatterwalk_pagedone(walk, out, 1); 106 scatterwalk_pagedone(walk, out, 1);
116 scatterwalk_map(walk, out); 107 scatterwalk_map(walk);
117 } 108 }
118 109
119 memcpy_dir(buf, walk->data, nbytes, out); 110 memcpy_dir(buf, walk->data, nbytes, out);
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 70734652f724..ed2c800b3a7e 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -455,14 +455,14 @@ static int zbud_decompress(struct page *page, struct zbud_hdr *zh)
455 } 455 }
456 ASSERT_SENTINEL(zh, ZBH); 456 ASSERT_SENTINEL(zh, ZBH);
457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size()); 457 BUG_ON(zh->size == 0 || zh->size > zbud_max_buddy_size());
458 to_va = kmap_atomic(page, KM_USER0); 458 to_va = kmap_atomic(page);
459 size = zh->size; 459 size = zh->size;
460 from_va = zbud_data(zh, size); 460 from_va = zbud_data(zh, size);
461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size, 461 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, from_va, size,
462 to_va, &out_len); 462 to_va, &out_len);
463 BUG_ON(ret); 463 BUG_ON(ret);
464 BUG_ON(out_len != PAGE_SIZE); 464 BUG_ON(out_len != PAGE_SIZE);
465 kunmap_atomic(to_va, KM_USER0); 465 kunmap_atomic(to_va);
466out: 466out:
467 spin_unlock(&zbpg->lock); 467 spin_unlock(&zbpg->lock);
468 return ret; 468 return ret;
@@ -753,10 +753,10 @@ static void zv_decompress(struct page *page, void *handle)
753 zv = zs_map_object(zcache_host.zspool, handle); 753 zv = zs_map_object(zcache_host.zspool, handle);
754 BUG_ON(zv->size == 0); 754 BUG_ON(zv->size == 0);
755 ASSERT_SENTINEL(zv, ZVH); 755 ASSERT_SENTINEL(zv, ZVH);
756 to_va = kmap_atomic(page, KM_USER0); 756 to_va = kmap_atomic(page);
757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv), 757 ret = zcache_comp_op(ZCACHE_COMPOP_DECOMPRESS, (char *)zv + sizeof(*zv),
758 zv->size, to_va, &clen); 758 zv->size, to_va, &clen);
759 kunmap_atomic(to_va, KM_USER0); 759 kunmap_atomic(to_va);
760 zs_unmap_object(zcache_host.zspool, handle); 760 zs_unmap_object(zcache_host.zspool, handle);
761 BUG_ON(ret); 761 BUG_ON(ret);
762 BUG_ON(clen != PAGE_SIZE); 762 BUG_ON(clen != PAGE_SIZE);
@@ -1334,13 +1334,13 @@ static int zcache_compress(struct page *from, void **out_va, unsigned *out_len)
1334 if (unlikely(dmem == NULL)) 1334 if (unlikely(dmem == NULL))
1335 goto out; /* no buffer or no compressor so can't compress */ 1335 goto out; /* no buffer or no compressor so can't compress */
1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER; 1336 *out_len = PAGE_SIZE << ZCACHE_DSTMEM_ORDER;
1337 from_va = kmap_atomic(from, KM_USER0); 1337 from_va = kmap_atomic(from);
1338 mb(); 1338 mb();
1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, 1339 ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem,
1340 out_len); 1340 out_len);
1341 BUG_ON(ret); 1341 BUG_ON(ret);
1342 *out_va = dmem; 1342 *out_va = dmem;
1343 kunmap_atomic(from_va, KM_USER0); 1343 kunmap_atomic(from_va);
1344 ret = 1; 1344 ret = 1;
1345out: 1345out:
1346 return ret; 1346 return ret;
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 7f138196b3c9..685d612a627b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -175,9 +175,9 @@ static void handle_zero_page(struct bio_vec *bvec)
175 struct page *page = bvec->bv_page; 175 struct page *page = bvec->bv_page;
176 void *user_mem; 176 void *user_mem;
177 177
178 user_mem = kmap_atomic(page, KM_USER0); 178 user_mem = kmap_atomic(page);
179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len); 179 memset(user_mem + bvec->bv_offset, 0, bvec->bv_len);
180 kunmap_atomic(user_mem, KM_USER0); 180 kunmap_atomic(user_mem);
181 181
182 flush_dcache_page(page); 182 flush_dcache_page(page);
183} 183}
@@ -188,12 +188,12 @@ static void handle_uncompressed_page(struct zram *zram, struct bio_vec *bvec,
188 struct page *page = bvec->bv_page; 188 struct page *page = bvec->bv_page;
189 unsigned char *user_mem, *cmem; 189 unsigned char *user_mem, *cmem;
190 190
191 user_mem = kmap_atomic(page, KM_USER0); 191 user_mem = kmap_atomic(page);
192 cmem = kmap_atomic(zram->table[index].handle, KM_USER1); 192 cmem = kmap_atomic(zram->table[index].handle);
193 193
194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len); 194 memcpy(user_mem + bvec->bv_offset, cmem + offset, bvec->bv_len);
195 kunmap_atomic(cmem, KM_USER1); 195 kunmap_atomic(cmem);
196 kunmap_atomic(user_mem, KM_USER0); 196 kunmap_atomic(user_mem);
197 197
198 flush_dcache_page(page); 198 flush_dcache_page(page);
199} 199}
@@ -242,7 +242,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
242 } 242 }
243 } 243 }
244 244
245 user_mem = kmap_atomic(page, KM_USER0); 245 user_mem = kmap_atomic(page);
246 if (!is_partial_io(bvec)) 246 if (!is_partial_io(bvec))
247 uncmem = user_mem; 247 uncmem = user_mem;
248 clen = PAGE_SIZE; 248 clen = PAGE_SIZE;
@@ -260,7 +260,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
260 } 260 }
261 261
262 zs_unmap_object(zram->mem_pool, zram->table[index].handle); 262 zs_unmap_object(zram->mem_pool, zram->table[index].handle);
263 kunmap_atomic(user_mem, KM_USER0); 263 kunmap_atomic(user_mem);
264 264
265 /* Should NEVER happen. Return bio error if it does. */ 265 /* Should NEVER happen. Return bio error if it does. */
266 if (unlikely(ret != LZO_E_OK)) { 266 if (unlikely(ret != LZO_E_OK)) {
@@ -292,7 +292,7 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
292 /* Page is stored uncompressed since it's incompressible */ 292 /* Page is stored uncompressed since it's incompressible */
293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 293 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
294 memcpy(mem, cmem, PAGE_SIZE); 294 memcpy(mem, cmem, PAGE_SIZE);
295 kunmap_atomic(cmem, KM_USER0); 295 kunmap_atomic(cmem);
296 return 0; 296 return 0;
297 } 297 }
298 298
@@ -351,7 +351,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
351 zram_test_flag(zram, index, ZRAM_ZERO)) 351 zram_test_flag(zram, index, ZRAM_ZERO))
352 zram_free_page(zram, index); 352 zram_free_page(zram, index);
353 353
354 user_mem = kmap_atomic(page, KM_USER0); 354 user_mem = kmap_atomic(page);
355 355
356 if (is_partial_io(bvec)) 356 if (is_partial_io(bvec))
357 memcpy(uncmem + offset, user_mem + bvec->bv_offset, 357 memcpy(uncmem + offset, user_mem + bvec->bv_offset,
@@ -360,7 +360,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
360 uncmem = user_mem; 360 uncmem = user_mem;
361 361
362 if (page_zero_filled(uncmem)) { 362 if (page_zero_filled(uncmem)) {
363 kunmap_atomic(user_mem, KM_USER0); 363 kunmap_atomic(user_mem);
364 if (is_partial_io(bvec)) 364 if (is_partial_io(bvec))
365 kfree(uncmem); 365 kfree(uncmem);
366 zram_stat_inc(&zram->stats.pages_zero); 366 zram_stat_inc(&zram->stats.pages_zero);
@@ -372,7 +372,7 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen, 372 ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
373 zram->compress_workmem); 373 zram->compress_workmem);
374 374
375 kunmap_atomic(user_mem, KM_USER0); 375 kunmap_atomic(user_mem);
376 if (is_partial_io(bvec)) 376 if (is_partial_io(bvec))
377 kfree(uncmem); 377 kfree(uncmem);
378 378
@@ -400,8 +400,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED); 400 zram_set_flag(zram, index, ZRAM_UNCOMPRESSED);
401 zram_stat_inc(&zram->stats.pages_expand); 401 zram_stat_inc(&zram->stats.pages_expand);
402 handle = page_store; 402 handle = page_store;
403 src = kmap_atomic(page, KM_USER0); 403 src = kmap_atomic(page);
404 cmem = kmap_atomic(page_store, KM_USER1); 404 cmem = kmap_atomic(page_store);
405 goto memstore; 405 goto memstore;
406 } 406 }
407 407
@@ -427,8 +427,8 @@ memstore:
427 memcpy(cmem, src, clen); 427 memcpy(cmem, src, clen);
428 428
429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) { 429 if (unlikely(zram_test_flag(zram, index, ZRAM_UNCOMPRESSED))) {
430 kunmap_atomic(cmem, KM_USER1); 430 kunmap_atomic(cmem);
431 kunmap_atomic(src, KM_USER0); 431 kunmap_atomic(src);
432 } else { 432 } else {
433 zs_unmap_object(zram->mem_pool, handle); 433 zs_unmap_object(zram->mem_pool, handle);
434 } 434 }
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index cd5cd95812bb..929cc9364c8a 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -2344,7 +2344,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
2344 2344
2345 offset = 0; 2345 offset = 0;
2346 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) { 2346 for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
2347 addr = kmap_atomic(sg_page(sg), KM_USER0); 2347 addr = kmap_atomic(sg_page(sg));
2348 if (!addr) 2348 if (!addr)
2349 goto out; 2349 goto out;
2350 2350
@@ -2352,7 +2352,7 @@ static void transport_xor_callback(struct se_cmd *cmd)
2352 *(addr + sg->offset + i) ^= *(buf + offset + i); 2352 *(addr + sg->offset + i) ^= *(buf + offset + i);
2353 2353
2354 offset += sg->length; 2354 offset += sg->length;
2355 kunmap_atomic(addr, KM_USER0); 2355 kunmap_atomic(addr);
2356 } 2356 }
2357 2357
2358out: 2358out:
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index d8cabc21036d..2b693eefac55 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -146,14 +146,13 @@ int ft_queue_data_in(struct se_cmd *se_cmd)
146 PAGE_SIZE << compound_order(page); 146 PAGE_SIZE << compound_order(page);
147 } else { 147 } else {
148 BUG_ON(!page); 148 BUG_ON(!page);
149 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 149 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
150 KM_SOFTIRQ0);
151 page_addr = from; 150 page_addr = from;
152 from += mem_off & ~PAGE_MASK; 151 from += mem_off & ~PAGE_MASK;
153 tlen = min(tlen, (size_t)(PAGE_SIZE - 152 tlen = min(tlen, (size_t)(PAGE_SIZE -
154 (mem_off & ~PAGE_MASK))); 153 (mem_off & ~PAGE_MASK)));
155 memcpy(to, from, tlen); 154 memcpy(to, from, tlen);
156 kunmap_atomic(page_addr, KM_SOFTIRQ0); 155 kunmap_atomic(page_addr);
157 to += tlen; 156 to += tlen;
158 } 157 }
159 158
@@ -291,14 +290,13 @@ void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
291 290
292 tlen = min(mem_len, frame_len); 291 tlen = min(mem_len, frame_len);
293 292
294 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT), 293 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT));
295 KM_SOFTIRQ0);
296 page_addr = to; 294 page_addr = to;
297 to += mem_off & ~PAGE_MASK; 295 to += mem_off & ~PAGE_MASK;
298 tlen = min(tlen, (size_t)(PAGE_SIZE - 296 tlen = min(tlen, (size_t)(PAGE_SIZE -
299 (mem_off & ~PAGE_MASK))); 297 (mem_off & ~PAGE_MASK)));
300 memcpy(to, from, tlen); 298 memcpy(to, from, tlen);
301 kunmap_atomic(page_addr, KM_SOFTIRQ0); 299 kunmap_atomic(page_addr);
302 300
303 from += tlen; 301 from += tlen;
304 frame_len -= tlen; 302 frame_len -= tlen;
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index c14c42b95ab8..bdb2d6436b2b 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -937,9 +937,9 @@ static int set_bit_to_user(int nr, void __user *addr)
937 if (r < 0) 937 if (r < 0)
938 return r; 938 return r;
939 BUG_ON(r != 1); 939 BUG_ON(r != 1);
940 base = kmap_atomic(page, KM_USER0); 940 base = kmap_atomic(page);
941 set_bit(bit, base); 941 set_bit(bit, base);
942 kunmap_atomic(base, KM_USER0); 942 kunmap_atomic(base);
943 set_page_dirty_lock(page); 943 set_page_dirty_lock(page);
944 put_page(page); 944 put_page(page);
945 return 0; 945 return 0;
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 2f213d109c21..b960ff05ea0b 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -365,10 +365,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
365 _debug("extract data"); 365 _debug("extract data");
366 if (call->count > 0) { 366 if (call->count > 0) {
367 page = call->reply3; 367 page = call->reply3;
368 buffer = kmap_atomic(page, KM_USER0); 368 buffer = kmap_atomic(page);
369 ret = afs_extract_data(call, skb, last, buffer, 369 ret = afs_extract_data(call, skb, last, buffer,
370 call->count); 370 call->count);
371 kunmap_atomic(buffer, KM_USER0); 371 kunmap_atomic(buffer);
372 switch (ret) { 372 switch (ret) {
373 case 0: break; 373 case 0: break;
374 case -EAGAIN: return 0; 374 case -EAGAIN: return 0;
@@ -411,9 +411,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
411 if (call->count < PAGE_SIZE) { 411 if (call->count < PAGE_SIZE) {
412 _debug("clear"); 412 _debug("clear");
413 page = call->reply3; 413 page = call->reply3;
414 buffer = kmap_atomic(page, KM_USER0); 414 buffer = kmap_atomic(page);
415 memset(buffer + call->count, 0, PAGE_SIZE - call->count); 415 memset(buffer + call->count, 0, PAGE_SIZE - call->count);
416 kunmap_atomic(buffer, KM_USER0); 416 kunmap_atomic(buffer);
417 } 417 }
418 418
419 _leave(" = 0 [done]"); 419 _leave(" = 0 [done]");
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 8f4ce2658b7d..298cf8919ec7 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -200,9 +200,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
200 if (PageError(page)) 200 if (PageError(page))
201 goto error; 201 goto error;
202 202
203 buf = kmap_atomic(page, KM_USER0); 203 buf = kmap_atomic(page);
204 memcpy(devname, buf, size); 204 memcpy(devname, buf, size);
205 kunmap_atomic(buf, KM_USER0); 205 kunmap_atomic(buf);
206 page_cache_release(page); 206 page_cache_release(page);
207 page = NULL; 207 page = NULL;
208 } 208 }
diff --git a/fs/aio.c b/fs/aio.c
index b9d64d89a043..5b600cb8779e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
160 160
161 info->nr = nr_events; /* trusted copy */ 161 info->nr = nr_events; /* trusted copy */
162 162
163 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 163 ring = kmap_atomic(info->ring_pages[0]);
164 ring->nr = nr_events; /* user copy */ 164 ring->nr = nr_events; /* user copy */
165 ring->id = ctx->user_id; 165 ring->id = ctx->user_id;
166 ring->head = ring->tail = 0; 166 ring->head = ring->tail = 0;
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
168 ring->compat_features = AIO_RING_COMPAT_FEATURES; 168 ring->compat_features = AIO_RING_COMPAT_FEATURES;
169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
170 ring->header_length = sizeof(struct aio_ring); 170 ring->header_length = sizeof(struct aio_ring);
171 kunmap_atomic(ring, KM_USER0); 171 kunmap_atomic(ring);
172 172
173 return 0; 173 return 0;
174} 174}
175 175
176 176
177/* aio_ring_event: returns a pointer to the event at the given index from 177/* aio_ring_event: returns a pointer to the event at the given index from
178 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 178 * kmap_atomic(). Release the pointer with put_aio_ring_event();
179 */ 179 */
180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
183 183
184#define aio_ring_event(info, nr, km) ({ \ 184#define aio_ring_event(info, nr) ({ \
185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
186 struct io_event *__event; \ 186 struct io_event *__event; \
187 __event = kmap_atomic( \ 187 __event = kmap_atomic( \
188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
189 __event += pos % AIO_EVENTS_PER_PAGE; \ 189 __event += pos % AIO_EVENTS_PER_PAGE; \
190 __event; \ 190 __event; \
191}) 191})
192 192
193#define put_aio_ring_event(event, km) do { \ 193#define put_aio_ring_event(event) do { \
194 struct io_event *__event = (event); \ 194 struct io_event *__event = (event); \
195 (void)__event; \ 195 (void)__event; \
196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
197} while(0) 197} while(0)
198 198
199static void ctx_rcu_free(struct rcu_head *head) 199static void ctx_rcu_free(struct rcu_head *head)
@@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
1019 if (kiocbIsCancelled(iocb)) 1019 if (kiocbIsCancelled(iocb))
1020 goto put_rq; 1020 goto put_rq;
1021 1021
1022 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 1022 ring = kmap_atomic(info->ring_pages[0]);
1023 1023
1024 tail = info->tail; 1024 tail = info->tail;
1025 event = aio_ring_event(info, tail, KM_IRQ0); 1025 event = aio_ring_event(info, tail);
1026 if (++tail >= info->nr) 1026 if (++tail >= info->nr)
1027 tail = 0; 1027 tail = 0;
1028 1028
@@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
1043 info->tail = tail; 1043 info->tail = tail;
1044 ring->tail = tail; 1044 ring->tail = tail;
1045 1045
1046 put_aio_ring_event(event, KM_IRQ0); 1046 put_aio_ring_event(event);
1047 kunmap_atomic(ring, KM_IRQ1); 1047 kunmap_atomic(ring);
1048 1048
1049 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 1049 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1050 1050
@@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1089 unsigned long head; 1089 unsigned long head;
1090 int ret = 0; 1090 int ret = 0;
1091 1091
1092 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1092 ring = kmap_atomic(info->ring_pages[0]);
1093 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1093 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1094 (unsigned long)ring->head, (unsigned long)ring->tail, 1094 (unsigned long)ring->head, (unsigned long)ring->tail,
1095 (unsigned long)ring->nr); 1095 (unsigned long)ring->nr);
@@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1101 1101
1102 head = ring->head % info->nr; 1102 head = ring->head % info->nr;
1103 if (head != ring->tail) { 1103 if (head != ring->tail) {
1104 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1104 struct io_event *evp = aio_ring_event(info, head);
1105 *ent = *evp; 1105 *ent = *evp;
1106 head = (head + 1) % info->nr; 1106 head = (head + 1) % info->nr;
1107 smp_mb(); /* finish reading the event before updatng the head */ 1107 smp_mb(); /* finish reading the event before updatng the head */
1108 ring->head = head; 1108 ring->head = head;
1109 ret = 1; 1109 ret = 1;
1110 put_aio_ring_event(evp, KM_USER1); 1110 put_aio_ring_event(evp);
1111 } 1111 }
1112 spin_unlock(&info->ring_lock); 1112 spin_unlock(&info->ring_lock);
1113 1113
1114out: 1114out:
1115 kunmap_atomic(ring, KM_USER0); 1115 kunmap_atomic(ring);
1116 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1116 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
1117 (unsigned long)ring->head, (unsigned long)ring->tail); 1117 (unsigned long)ring->head, (unsigned long)ring->tail);
1118 return ret; 1118 return ret;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index c2183f3917cd..e85c04b9f61c 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
357 bix.sector_size = bi->sector_size; 357 bix.sector_size = bi->sector_size;
358 358
359 bio_for_each_segment(bv, bio, i) { 359 bio_for_each_segment(bv, bio, i) {
360 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); 360 void *kaddr = kmap_atomic(bv->bv_page);
361 bix.data_buf = kaddr + bv->bv_offset; 361 bix.data_buf = kaddr + bv->bv_offset;
362 bix.data_size = bv->bv_len; 362 bix.data_size = bv->bv_len;
363 bix.prot_buf = prot_buf; 363 bix.prot_buf = prot_buf;
@@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
371 total += sectors * bi->tuple_size; 371 total += sectors * bi->tuple_size;
372 BUG_ON(total > bio->bi_integrity->bip_size); 372 BUG_ON(total > bio->bi_integrity->bip_size);
373 373
374 kunmap_atomic(kaddr, KM_USER0); 374 kunmap_atomic(kaddr);
375 } 375 }
376} 376}
377 377
@@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
498 bix.sector_size = bi->sector_size; 498 bix.sector_size = bi->sector_size;
499 499
500 bio_for_each_segment(bv, bio, i) { 500 bio_for_each_segment(bv, bio, i) {
501 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); 501 void *kaddr = kmap_atomic(bv->bv_page);
502 bix.data_buf = kaddr + bv->bv_offset; 502 bix.data_buf = kaddr + bv->bv_offset;
503 bix.data_size = bv->bv_len; 503 bix.data_size = bv->bv_len;
504 bix.prot_buf = prot_buf; 504 bix.prot_buf = prot_buf;
@@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
507 ret = bi->verify_fn(&bix); 507 ret = bi->verify_fn(&bix);
508 508
509 if (ret) { 509 if (ret) {
510 kunmap_atomic(kaddr, KM_USER0); 510 kunmap_atomic(kaddr);
511 return ret; 511 return ret;
512 } 512 }
513 513
@@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
517 total += sectors * bi->tuple_size; 517 total += sectors * bi->tuple_size;
518 BUG_ON(total > bio->bi_integrity->bip_size); 518 BUG_ON(total > bio->bi_integrity->bip_size);
519 519
520 kunmap_atomic(kaddr, KM_USER0); 520 kunmap_atomic(kaddr);
521 } 521 }
522 522
523 return ret; 523 return ret;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d02c27cd14c7..b805afb37fa8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
120 page = cb->compressed_pages[i]; 120 page = cb->compressed_pages[i];
121 csum = ~(u32)0; 121 csum = ~(u32)0;
122 122
123 kaddr = kmap_atomic(page, KM_USER0); 123 kaddr = kmap_atomic(page);
124 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 124 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
125 btrfs_csum_final(csum, (char *)&csum); 125 btrfs_csum_final(csum, (char *)&csum);
126 kunmap_atomic(kaddr, KM_USER0); 126 kunmap_atomic(kaddr);
127 127
128 if (csum != *cb_sum) { 128 if (csum != *cb_sum) {
129 printk(KERN_INFO "btrfs csum failed ino %llu " 129 printk(KERN_INFO "btrfs csum failed ino %llu "
@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
521 if (zero_offset) { 521 if (zero_offset) {
522 int zeros; 522 int zeros;
523 zeros = PAGE_CACHE_SIZE - zero_offset; 523 zeros = PAGE_CACHE_SIZE - zero_offset;
524 userpage = kmap_atomic(page, KM_USER0); 524 userpage = kmap_atomic(page);
525 memset(userpage + zero_offset, 0, zeros); 525 memset(userpage + zero_offset, 0, zeros);
526 flush_dcache_page(page); 526 flush_dcache_page(page);
527 kunmap_atomic(userpage, KM_USER0); 527 kunmap_atomic(userpage);
528 } 528 }
529 } 529 }
530 530
@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
993 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 993 bytes = min(PAGE_CACHE_SIZE - *pg_offset,
994 PAGE_CACHE_SIZE - buf_offset); 994 PAGE_CACHE_SIZE - buf_offset);
995 bytes = min(bytes, working_bytes); 995 bytes = min(bytes, working_bytes);
996 kaddr = kmap_atomic(page_out, KM_USER0); 996 kaddr = kmap_atomic(page_out);
997 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 997 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
998 kunmap_atomic(kaddr, KM_USER0); 998 kunmap_atomic(kaddr);
999 flush_dcache_page(page_out); 999 flush_dcache_page(page_out);
1000 1000
1001 *pg_offset += bytes; 1001 *pg_offset += bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a55fbe6252de..2862454bcdb3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2546 2546
2547 if (zero_offset) { 2547 if (zero_offset) {
2548 iosize = PAGE_CACHE_SIZE - zero_offset; 2548 iosize = PAGE_CACHE_SIZE - zero_offset;
2549 userpage = kmap_atomic(page, KM_USER0); 2549 userpage = kmap_atomic(page);
2550 memset(userpage + zero_offset, 0, iosize); 2550 memset(userpage + zero_offset, 0, iosize);
2551 flush_dcache_page(page); 2551 flush_dcache_page(page);
2552 kunmap_atomic(userpage, KM_USER0); 2552 kunmap_atomic(userpage);
2553 } 2553 }
2554 } 2554 }
2555 while (cur <= end) { 2555 while (cur <= end) {
@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2558 struct extent_state *cached = NULL; 2558 struct extent_state *cached = NULL;
2559 2559
2560 iosize = PAGE_CACHE_SIZE - pg_offset; 2560 iosize = PAGE_CACHE_SIZE - pg_offset;
2561 userpage = kmap_atomic(page, KM_USER0); 2561 userpage = kmap_atomic(page);
2562 memset(userpage + pg_offset, 0, iosize); 2562 memset(userpage + pg_offset, 0, iosize);
2563 flush_dcache_page(page); 2563 flush_dcache_page(page);
2564 kunmap_atomic(userpage, KM_USER0); 2564 kunmap_atomic(userpage);
2565 set_extent_uptodate(tree, cur, cur + iosize - 1, 2565 set_extent_uptodate(tree, cur, cur + iosize - 1,
2566 &cached, GFP_NOFS); 2566 &cached, GFP_NOFS);
2567 unlock_extent_cached(tree, cur, cur + iosize - 1, 2567 unlock_extent_cached(tree, cur, cur + iosize - 1,
@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2607 char *userpage; 2607 char *userpage;
2608 struct extent_state *cached = NULL; 2608 struct extent_state *cached = NULL;
2609 2609
2610 userpage = kmap_atomic(page, KM_USER0); 2610 userpage = kmap_atomic(page);
2611 memset(userpage + pg_offset, 0, iosize); 2611 memset(userpage + pg_offset, 0, iosize);
2612 flush_dcache_page(page); 2612 flush_dcache_page(page);
2613 kunmap_atomic(userpage, KM_USER0); 2613 kunmap_atomic(userpage);
2614 2614
2615 set_extent_uptodate(tree, cur, cur + iosize - 1, 2615 set_extent_uptodate(tree, cur, cur + iosize - 1,
2616 &cached, GFP_NOFS); 2616 &cached, GFP_NOFS);
@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2756 if (page->index == end_index) { 2756 if (page->index == end_index) {
2757 char *userpage; 2757 char *userpage;
2758 2758
2759 userpage = kmap_atomic(page, KM_USER0); 2759 userpage = kmap_atomic(page);
2760 memset(userpage + pg_offset, 0, 2760 memset(userpage + pg_offset, 0,
2761 PAGE_CACHE_SIZE - pg_offset); 2761 PAGE_CACHE_SIZE - pg_offset);
2762 kunmap_atomic(userpage, KM_USER0); 2762 kunmap_atomic(userpage);
2763 flush_dcache_page(page); 2763 flush_dcache_page(page);
2764 } 2764 }
2765 pg_offset = 0; 2765 pg_offset = 0;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index c7fb3a4247d3..078b4fd54500 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
447 sums->bytenr = ordered->start; 447 sums->bytenr = ordered->start;
448 } 448 }
449 449
450 data = kmap_atomic(bvec->bv_page, KM_USER0); 450 data = kmap_atomic(bvec->bv_page);
451 sector_sum->sum = ~(u32)0; 451 sector_sum->sum = ~(u32)0;
452 sector_sum->sum = btrfs_csum_data(root, 452 sector_sum->sum = btrfs_csum_data(root,
453 data + bvec->bv_offset, 453 data + bvec->bv_offset,
454 sector_sum->sum, 454 sector_sum->sum,
455 bvec->bv_len); 455 bvec->bv_len);
456 kunmap_atomic(data, KM_USER0); 456 kunmap_atomic(data);
457 btrfs_csum_final(sector_sum->sum, 457 btrfs_csum_final(sector_sum->sum,
458 (char *)&sector_sum->sum); 458 (char *)&sector_sum->sum);
459 sector_sum->bytenr = disk_bytenr; 459 sector_sum->bytenr = disk_bytenr;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 892b34785ccc..3a0b5c1f9d31 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
173 cur_size = min_t(unsigned long, compressed_size, 173 cur_size = min_t(unsigned long, compressed_size,
174 PAGE_CACHE_SIZE); 174 PAGE_CACHE_SIZE);
175 175
176 kaddr = kmap_atomic(cpage, KM_USER0); 176 kaddr = kmap_atomic(cpage);
177 write_extent_buffer(leaf, kaddr, ptr, cur_size); 177 write_extent_buffer(leaf, kaddr, ptr, cur_size);
178 kunmap_atomic(kaddr, KM_USER0); 178 kunmap_atomic(kaddr);
179 179
180 i++; 180 i++;
181 ptr += cur_size; 181 ptr += cur_size;
@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
187 page = find_get_page(inode->i_mapping, 187 page = find_get_page(inode->i_mapping,
188 start >> PAGE_CACHE_SHIFT); 188 start >> PAGE_CACHE_SHIFT);
189 btrfs_set_file_extent_compression(leaf, ei, 0); 189 btrfs_set_file_extent_compression(leaf, ei, 0);
190 kaddr = kmap_atomic(page, KM_USER0); 190 kaddr = kmap_atomic(page);
191 offset = start & (PAGE_CACHE_SIZE - 1); 191 offset = start & (PAGE_CACHE_SIZE - 1);
192 write_extent_buffer(leaf, kaddr + offset, ptr, size); 192 write_extent_buffer(leaf, kaddr + offset, ptr, size);
193 kunmap_atomic(kaddr, KM_USER0); 193 kunmap_atomic(kaddr);
194 page_cache_release(page); 194 page_cache_release(page);
195 } 195 }
196 btrfs_mark_buffer_dirty(leaf); 196 btrfs_mark_buffer_dirty(leaf);
@@ -422,10 +422,10 @@ again:
422 * sending it down to disk 422 * sending it down to disk
423 */ 423 */
424 if (offset) { 424 if (offset) {
425 kaddr = kmap_atomic(page, KM_USER0); 425 kaddr = kmap_atomic(page);
426 memset(kaddr + offset, 0, 426 memset(kaddr + offset, 0,
427 PAGE_CACHE_SIZE - offset); 427 PAGE_CACHE_SIZE - offset);
428 kunmap_atomic(kaddr, KM_USER0); 428 kunmap_atomic(kaddr);
429 } 429 }
430 will_compress = 1; 430 will_compress = 1;
431 } 431 }
@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1873 } else { 1873 } else {
1874 ret = get_state_private(io_tree, start, &private); 1874 ret = get_state_private(io_tree, start, &private);
1875 } 1875 }
1876 kaddr = kmap_atomic(page, KM_USER0); 1876 kaddr = kmap_atomic(page);
1877 if (ret) 1877 if (ret)
1878 goto zeroit; 1878 goto zeroit;
1879 1879
@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1882 if (csum != private) 1882 if (csum != private)
1883 goto zeroit; 1883 goto zeroit;
1884 1884
1885 kunmap_atomic(kaddr, KM_USER0); 1885 kunmap_atomic(kaddr);
1886good: 1886good:
1887 return 0; 1887 return 0;
1888 1888
@@ -1894,7 +1894,7 @@ zeroit:
1894 (unsigned long long)private); 1894 (unsigned long long)private);
1895 memset(kaddr + offset, 1, end - start + 1); 1895 memset(kaddr + offset, 1, end - start + 1);
1896 flush_dcache_page(page); 1896 flush_dcache_page(page);
1897 kunmap_atomic(kaddr, KM_USER0); 1897 kunmap_atomic(kaddr);
1898 if (private == 0) 1898 if (private == 0)
1899 return 0; 1899 return 0;
1900 return -EIO; 1900 return -EIO;
@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
4937 ret = btrfs_decompress(compress_type, tmp, page, 4937 ret = btrfs_decompress(compress_type, tmp, page,
4938 extent_offset, inline_size, max_size); 4938 extent_offset, inline_size, max_size);
4939 if (ret) { 4939 if (ret) {
4940 char *kaddr = kmap_atomic(page, KM_USER0); 4940 char *kaddr = kmap_atomic(page);
4941 unsigned long copy_size = min_t(u64, 4941 unsigned long copy_size = min_t(u64,
4942 PAGE_CACHE_SIZE - pg_offset, 4942 PAGE_CACHE_SIZE - pg_offset,
4943 max_size - extent_offset); 4943 max_size - extent_offset);
4944 memset(kaddr + pg_offset, 0, copy_size); 4944 memset(kaddr + pg_offset, 0, copy_size);
4945 kunmap_atomic(kaddr, KM_USER0); 4945 kunmap_atomic(kaddr);
4946 } 4946 }
4947 kfree(tmp); 4947 kfree(tmp);
4948 return 0; 4948 return 0;
@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5719 unsigned long flags; 5719 unsigned long flags;
5720 5720
5721 local_irq_save(flags); 5721 local_irq_save(flags);
5722 kaddr = kmap_atomic(page, KM_IRQ0); 5722 kaddr = kmap_atomic(page);
5723 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 5723 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5724 csum, bvec->bv_len); 5724 csum, bvec->bv_len);
5725 btrfs_csum_final(csum, (char *)&csum); 5725 btrfs_csum_final(csum, (char *)&csum);
5726 kunmap_atomic(kaddr, KM_IRQ0); 5726 kunmap_atomic(kaddr);
5727 local_irq_restore(flags); 5727 local_irq_restore(flags);
5728 5728
5729 flush_dcache_page(bvec->bv_page); 5729 flush_dcache_page(bvec->bv_page);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a178f5ebea78..743b86fa4fcb 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
411 411
412 bytes = min_t(unsigned long, destlen, out_len - start_byte); 412 bytes = min_t(unsigned long, destlen, out_len - start_byte);
413 413
414 kaddr = kmap_atomic(dest_page, KM_USER0); 414 kaddr = kmap_atomic(dest_page);
415 memcpy(kaddr, workspace->buf + start_byte, bytes); 415 memcpy(kaddr, workspace->buf + start_byte, bytes);
416 kunmap_atomic(kaddr, KM_USER0); 416 kunmap_atomic(kaddr);
417out: 417out:
418 return ret; 418 return ret;
419} 419}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index abc0fbffa510..390e7102b0ff 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
591 u64 flags = sbio->spag[ix].flags; 591 u64 flags = sbio->spag[ix].flags;
592 592
593 page = sbio->bio->bi_io_vec[ix].bv_page; 593 page = sbio->bio->bi_io_vec[ix].bv_page;
594 buffer = kmap_atomic(page, KM_USER0); 594 buffer = kmap_atomic(page);
595 if (flags & BTRFS_EXTENT_FLAG_DATA) { 595 if (flags & BTRFS_EXTENT_FLAG_DATA) {
596 ret = scrub_checksum_data(sbio->sdev, 596 ret = scrub_checksum_data(sbio->sdev,
597 sbio->spag + ix, buffer); 597 sbio->spag + ix, buffer);
@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
603 } else { 603 } else {
604 WARN_ON(1); 604 WARN_ON(1);
605 } 605 }
606 kunmap_atomic(buffer, KM_USER0); 606 kunmap_atomic(buffer);
607 607
608 return ret; 608 return ret;
609} 609}
@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
792 } 792 }
793 for (i = 0; i < sbio->count; ++i) { 793 for (i = 0; i < sbio->count; ++i) {
794 page = sbio->bio->bi_io_vec[i].bv_page; 794 page = sbio->bio->bi_io_vec[i].bv_page;
795 buffer = kmap_atomic(page, KM_USER0); 795 buffer = kmap_atomic(page);
796 flags = sbio->spag[i].flags; 796 flags = sbio->spag[i].flags;
797 logical = sbio->logical + i * PAGE_SIZE; 797 logical = sbio->logical + i * PAGE_SIZE;
798 ret = 0; 798 ret = 0;
@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
807 } else { 807 } else {
808 WARN_ON(1); 808 WARN_ON(1);
809 } 809 }
810 kunmap_atomic(buffer, KM_USER0); 810 kunmap_atomic(buffer);
811 if (ret) { 811 if (ret) {
812 ret = scrub_recheck_error(sbio, i); 812 ret = scrub_recheck_error(sbio, i);
813 if (!ret) { 813 if (!ret) {
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index faccd47c6c46..92c20654cc55 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
370 PAGE_CACHE_SIZE - buf_offset); 370 PAGE_CACHE_SIZE - buf_offset);
371 bytes = min(bytes, bytes_left); 371 bytes = min(bytes, bytes_left);
372 372
373 kaddr = kmap_atomic(dest_page, KM_USER0); 373 kaddr = kmap_atomic(dest_page);
374 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); 374 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
375 kunmap_atomic(kaddr, KM_USER0); 375 kunmap_atomic(kaddr);
376 376
377 pg_offset += bytes; 377 pg_offset += bytes;
378 bytes_left -= bytes; 378 bytes_left -= bytes;
diff --git a/fs/exec.c b/fs/exec.c
index 95551c6da090..3908544f5d18 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1342,13 +1342,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
1342 ret = -EFAULT; 1342 ret = -EFAULT;
1343 goto out; 1343 goto out;
1344 } 1344 }
1345 kaddr = kmap_atomic(page, KM_USER0); 1345 kaddr = kmap_atomic(page);
1346 1346
1347 for (; offset < PAGE_SIZE && kaddr[offset]; 1347 for (; offset < PAGE_SIZE && kaddr[offset];
1348 offset++, bprm->p++) 1348 offset++, bprm->p++)
1349 ; 1349 ;
1350 1350
1351 kunmap_atomic(kaddr, KM_USER0); 1351 kunmap_atomic(kaddr);
1352 put_arg_page(page); 1352 put_arg_page(page);
1353 1353
1354 if (offset == PAGE_SIZE) 1354 if (offset == PAGE_SIZE)
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 80405836ba6e..c61e62ac231c 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -597,7 +597,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
597 goto fail; 597 goto fail;
598 } 598 }
599 599
600 kaddr = kmap_atomic(page, KM_USER0); 600 kaddr = kmap_atomic(page);
601 de = (struct exofs_dir_entry *)kaddr; 601 de = (struct exofs_dir_entry *)kaddr;
602 de->name_len = 1; 602 de->name_len = 1;
603 de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1)); 603 de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
@@ -611,7 +611,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
611 de->inode_no = cpu_to_le64(parent->i_ino); 611 de->inode_no = cpu_to_le64(parent->i_ino);
612 memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); 612 memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
613 exofs_set_de_type(de, inode); 613 exofs_set_de_type(de, inode);
614 kunmap_atomic(kaddr, KM_USER0); 614 kunmap_atomic(kaddr);
615 err = exofs_commit_chunk(page, 0, chunk_size); 615 err = exofs_commit_chunk(page, 0, chunk_size);
616fail: 616fail:
617 page_cache_release(page); 617 page_cache_release(page);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d37df352d324..0f4f5c929257 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -645,7 +645,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
645 unlock_page(page); 645 unlock_page(page);
646 goto fail; 646 goto fail;
647 } 647 }
648 kaddr = kmap_atomic(page, KM_USER0); 648 kaddr = kmap_atomic(page);
649 memset(kaddr, 0, chunk_size); 649 memset(kaddr, 0, chunk_size);
650 de = (struct ext2_dir_entry_2 *)kaddr; 650 de = (struct ext2_dir_entry_2 *)kaddr;
651 de->name_len = 1; 651 de->name_len = 1;
@@ -660,7 +660,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
660 de->inode = cpu_to_le32(parent->i_ino); 660 de->inode = cpu_to_le32(parent->i_ino);
661 memcpy (de->name, "..\0", 4); 661 memcpy (de->name, "..\0", 4);
662 ext2_set_de_type (de, inode); 662 ext2_set_de_type (de, inode);
663 kunmap_atomic(kaddr, KM_USER0); 663 kunmap_atomic(kaddr);
664 err = ext2_commit_chunk(page, 0, chunk_size); 664 err = ext2_commit_chunk(page, 0, chunk_size);
665fail: 665fail:
666 page_cache_release(page); 666 page_cache_release(page);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5f3368ab0fa9..7df2b5e8fbe1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -838,10 +838,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
838 } 838 }
839 } 839 }
840 if (page) { 840 if (page) {
841 void *mapaddr = kmap_atomic(page, KM_USER0); 841 void *mapaddr = kmap_atomic(page);
842 void *buf = mapaddr + offset; 842 void *buf = mapaddr + offset;
843 offset += fuse_copy_do(cs, &buf, &count); 843 offset += fuse_copy_do(cs, &buf, &count);
844 kunmap_atomic(mapaddr, KM_USER0); 844 kunmap_atomic(mapaddr);
845 } else 845 } else
846 offset += fuse_copy_do(cs, NULL, &count); 846 offset += fuse_copy_do(cs, NULL, &count);
847 } 847 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4a199fd93fbd..a841868bf9ce 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1887,11 +1887,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1887 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1887 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1888 goto out; 1888 goto out;
1889 1889
1890 vaddr = kmap_atomic(pages[0], KM_USER0); 1890 vaddr = kmap_atomic(pages[0]);
1891 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 1891 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
1892 transferred, in_iovs + out_iovs, 1892 transferred, in_iovs + out_iovs,
1893 (flags & FUSE_IOCTL_COMPAT) != 0); 1893 (flags & FUSE_IOCTL_COMPAT) != 0);
1894 kunmap_atomic(vaddr, KM_USER0); 1894 kunmap_atomic(vaddr);
1895 if (err) 1895 if (err)
1896 goto out; 1896 goto out;
1897 1897
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 501e5cba09b3..38b7a74a0f91 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -434,12 +434,12 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
434 if (error) 434 if (error)
435 return error; 435 return error;
436 436
437 kaddr = kmap_atomic(page, KM_USER0); 437 kaddr = kmap_atomic(page);
438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
442 kunmap_atomic(kaddr, KM_USER0); 442 kunmap_atomic(kaddr);
443 flush_dcache_page(page); 443 flush_dcache_page(page);
444 brelse(dibh); 444 brelse(dibh);
445 SetPageUptodate(page); 445 SetPageUptodate(page);
@@ -542,9 +542,9 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
543 if (IS_ERR(page)) 543 if (IS_ERR(page))
544 return PTR_ERR(page); 544 return PTR_ERR(page);
545 p = kmap_atomic(page, KM_USER0); 545 p = kmap_atomic(page);
546 memcpy(buf + copied, p + offset, amt); 546 memcpy(buf + copied, p + offset, amt);
547 kunmap_atomic(p, KM_USER0); 547 kunmap_atomic(p);
548 mark_page_accessed(page); 548 mark_page_accessed(page);
549 page_cache_release(page); 549 page_cache_release(page);
550 copied += amt; 550 copied += amt;
@@ -788,11 +788,11 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
788 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 788 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
789 789
790 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 790 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
791 kaddr = kmap_atomic(page, KM_USER0); 791 kaddr = kmap_atomic(page);
792 memcpy(buf + pos, kaddr + pos, copied); 792 memcpy(buf + pos, kaddr + pos, copied);
793 memset(kaddr + pos + copied, 0, len - copied); 793 memset(kaddr + pos + copied, 0, len - copied);
794 flush_dcache_page(page); 794 flush_dcache_page(page);
795 kunmap_atomic(kaddr, KM_USER0); 795 kunmap_atomic(kaddr);
796 796
797 if (!PageUptodate(page)) 797 if (!PageUptodate(page))
798 SetPageUptodate(page); 798 SetPageUptodate(page);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 0301be655b12..df7c6e8d0764 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -553,11 +553,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
553 __be32 *ptr; 553 __be32 *ptr;
554 554
555 clear_buffer_escaped(bh); 555 clear_buffer_escaped(bh);
556 kaddr = kmap_atomic(bh->b_page, KM_USER0); 556 kaddr = kmap_atomic(bh->b_page);
557 ptr = kaddr + bh_offset(bh); 557 ptr = kaddr + bh_offset(bh);
558 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 558 if (*ptr == cpu_to_be32(GFS2_MAGIC))
559 set_buffer_escaped(bh); 559 set_buffer_escaped(bh);
560 kunmap_atomic(kaddr, KM_USER0); 560 kunmap_atomic(kaddr);
561} 561}
562 562
563static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, 563static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -594,10 +594,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
594 if (buffer_escaped(bd->bd_bh)) { 594 if (buffer_escaped(bd->bd_bh)) {
595 void *kaddr; 595 void *kaddr;
596 bh1 = gfs2_log_get_buf(sdp); 596 bh1 = gfs2_log_get_buf(sdp);
597 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); 597 kaddr = kmap_atomic(bd->bd_bh->b_page);
598 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), 598 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
599 bh1->b_size); 599 bh1->b_size);
600 kunmap_atomic(kaddr, KM_USER0); 600 kunmap_atomic(kaddr);
601 *(__be32 *)bh1->b_data = 0; 601 *(__be32 *)bh1->b_data = 0;
602 clear_buffer_escaped(bd->bd_bh); 602 clear_buffer_escaped(bd->bd_bh);
603 unlock_buffer(bd->bd_bh); 603 unlock_buffer(bd->bd_bh);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a45b21b03915..c0f8904f0860 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -720,12 +720,12 @@ get_a_page:
720 720
721 gfs2_trans_add_bh(ip->i_gl, bh, 0); 721 gfs2_trans_add_bh(ip->i_gl, bh, 0);
722 722
723 kaddr = kmap_atomic(page, KM_USER0); 723 kaddr = kmap_atomic(page);
724 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 724 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
725 nbytes = PAGE_CACHE_SIZE - offset; 725 nbytes = PAGE_CACHE_SIZE - offset;
726 memcpy(kaddr + offset, ptr, nbytes); 726 memcpy(kaddr + offset, ptr, nbytes);
727 flush_dcache_page(page); 727 flush_dcache_page(page);
728 kunmap_atomic(kaddr, KM_USER0); 728 kunmap_atomic(kaddr);
729 unlock_page(page); 729 unlock_page(page);
730 page_cache_release(page); 730 page_cache_release(page);
731 731
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 59c09f9541b5..e49e81bb80ef 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -328,7 +328,7 @@ repeat:
328 new_offset = offset_in_page(jh2bh(jh_in)->b_data); 328 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
329 } 329 }
330 330
331 mapped_data = kmap_atomic(new_page, KM_USER0); 331 mapped_data = kmap_atomic(new_page);
332 /* 332 /*
333 * Check for escaping 333 * Check for escaping
334 */ 334 */
@@ -337,7 +337,7 @@ repeat:
337 need_copy_out = 1; 337 need_copy_out = 1;
338 do_escape = 1; 338 do_escape = 1;
339 } 339 }
340 kunmap_atomic(mapped_data, KM_USER0); 340 kunmap_atomic(mapped_data);
341 341
342 /* 342 /*
343 * Do we need to do a data copy? 343 * Do we need to do a data copy?
@@ -354,9 +354,9 @@ repeat:
354 } 354 }
355 355
356 jh_in->b_frozen_data = tmp; 356 jh_in->b_frozen_data = tmp;
357 mapped_data = kmap_atomic(new_page, KM_USER0); 357 mapped_data = kmap_atomic(new_page);
358 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); 358 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
359 kunmap_atomic(mapped_data, KM_USER0); 359 kunmap_atomic(mapped_data);
360 360
361 new_page = virt_to_page(tmp); 361 new_page = virt_to_page(tmp);
362 new_offset = offset_in_page(tmp); 362 new_offset = offset_in_page(tmp);
@@ -368,9 +368,9 @@ repeat:
368 * copying, we can finally do so. 368 * copying, we can finally do so.
369 */ 369 */
370 if (do_escape) { 370 if (do_escape) {
371 mapped_data = kmap_atomic(new_page, KM_USER0); 371 mapped_data = kmap_atomic(new_page);
372 *((unsigned int *)(mapped_data + new_offset)) = 0; 372 *((unsigned int *)(mapped_data + new_offset)) = 0;
373 kunmap_atomic(mapped_data, KM_USER0); 373 kunmap_atomic(mapped_data);
374 } 374 }
375 375
376 set_bh_page(new_bh, new_page, new_offset); 376 set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 7fce94b04bc3..b2a7e5244e39 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -718,9 +718,9 @@ done:
718 "Possible IO failure.\n"); 718 "Possible IO failure.\n");
719 page = jh2bh(jh)->b_page; 719 page = jh2bh(jh)->b_page;
720 offset = offset_in_page(jh2bh(jh)->b_data); 720 offset = offset_in_page(jh2bh(jh)->b_data);
721 source = kmap_atomic(page, KM_USER0); 721 source = kmap_atomic(page);
722 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 722 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
723 kunmap_atomic(source, KM_USER0); 723 kunmap_atomic(source);
724 } 724 }
725 jbd_unlock_bh_state(bh); 725 jbd_unlock_bh_state(bh);
726 726
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5069b8475150..c067a8cae63b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -286,10 +286,10 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
286 char *addr; 286 char *addr;
287 __u32 checksum; 287 __u32 checksum;
288 288
289 addr = kmap_atomic(page, KM_USER0); 289 addr = kmap_atomic(page);
290 checksum = crc32_be(crc32_sum, 290 checksum = crc32_be(crc32_sum,
291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); 291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
292 kunmap_atomic(addr, KM_USER0); 292 kunmap_atomic(addr);
293 293
294 return checksum; 294 return checksum;
295} 295}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c0a5f9f1b127..5ff8940b8f02 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -345,7 +345,7 @@ repeat:
345 new_offset = offset_in_page(jh2bh(jh_in)->b_data); 345 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
346 } 346 }
347 347
348 mapped_data = kmap_atomic(new_page, KM_USER0); 348 mapped_data = kmap_atomic(new_page);
349 /* 349 /*
350 * Fire data frozen trigger if data already wasn't frozen. Do this 350 * Fire data frozen trigger if data already wasn't frozen. Do this
351 * before checking for escaping, as the trigger may modify the magic 351 * before checking for escaping, as the trigger may modify the magic
@@ -364,7 +364,7 @@ repeat:
364 need_copy_out = 1; 364 need_copy_out = 1;
365 do_escape = 1; 365 do_escape = 1;
366 } 366 }
367 kunmap_atomic(mapped_data, KM_USER0); 367 kunmap_atomic(mapped_data);
368 368
369 /* 369 /*
370 * Do we need to do a data copy? 370 * Do we need to do a data copy?
@@ -385,9 +385,9 @@ repeat:
385 } 385 }
386 386
387 jh_in->b_frozen_data = tmp; 387 jh_in->b_frozen_data = tmp;
388 mapped_data = kmap_atomic(new_page, KM_USER0); 388 mapped_data = kmap_atomic(new_page);
389 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); 389 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
390 kunmap_atomic(mapped_data, KM_USER0); 390 kunmap_atomic(mapped_data);
391 391
392 new_page = virt_to_page(tmp); 392 new_page = virt_to_page(tmp);
393 new_offset = offset_in_page(tmp); 393 new_offset = offset_in_page(tmp);
@@ -406,9 +406,9 @@ repeat:
406 * copying, we can finally do so. 406 * copying, we can finally do so.
407 */ 407 */
408 if (do_escape) { 408 if (do_escape) {
409 mapped_data = kmap_atomic(new_page, KM_USER0); 409 mapped_data = kmap_atomic(new_page);
410 *((unsigned int *)(mapped_data + new_offset)) = 0; 410 *((unsigned int *)(mapped_data + new_offset)) = 0;
411 kunmap_atomic(mapped_data, KM_USER0); 411 kunmap_atomic(mapped_data);
412 } 412 }
413 413
414 set_bh_page(new_bh, new_page, new_offset); 414 set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 35ae096bed5d..e5aba56e1fd5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -783,12 +783,12 @@ done:
783 "Possible IO failure.\n"); 783 "Possible IO failure.\n");
784 page = jh2bh(jh)->b_page; 784 page = jh2bh(jh)->b_page;
785 offset = offset_in_page(jh2bh(jh)->b_data); 785 offset = offset_in_page(jh2bh(jh)->b_data);
786 source = kmap_atomic(page, KM_USER0); 786 source = kmap_atomic(page);
787 /* Fire data frozen trigger just before we copy the data */ 787 /* Fire data frozen trigger just before we copy the data */
788 jbd2_buffer_frozen_trigger(jh, source + offset, 788 jbd2_buffer_frozen_trigger(jh, source + offset,
789 jh->b_triggers); 789 jh->b_triggers);
790 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 790 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
791 kunmap_atomic(source, KM_USER0); 791 kunmap_atomic(source);
792 792
793 /* 793 /*
794 * Now that the frozen data is saved off, we need to store 794 * Now that the frozen data is saved off, we need to store
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 3de7a32cadbe..1b6e21dda286 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -177,17 +177,17 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
177 (filler_t *)logfs_readpage, NULL); 177 (filler_t *)logfs_readpage, NULL);
178 if (IS_ERR(page)) 178 if (IS_ERR(page))
179 return page; 179 return page;
180 dd = kmap_atomic(page, KM_USER0); 180 dd = kmap_atomic(page);
181 BUG_ON(dd->namelen == 0); 181 BUG_ON(dd->namelen == 0);
182 182
183 if (name->len != be16_to_cpu(dd->namelen) || 183 if (name->len != be16_to_cpu(dd->namelen) ||
184 memcmp(name->name, dd->name, name->len)) { 184 memcmp(name->name, dd->name, name->len)) {
185 kunmap_atomic(dd, KM_USER0); 185 kunmap_atomic(dd);
186 page_cache_release(page); 186 page_cache_release(page);
187 continue; 187 continue;
188 } 188 }
189 189
190 kunmap_atomic(dd, KM_USER0); 190 kunmap_atomic(dd);
191 return page; 191 return page;
192 } 192 }
193 return NULL; 193 return NULL;
@@ -365,9 +365,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
365 return NULL; 365 return NULL;
366 } 366 }
367 index = page->index; 367 index = page->index;
368 dd = kmap_atomic(page, KM_USER0); 368 dd = kmap_atomic(page);
369 ino = be64_to_cpu(dd->ino); 369 ino = be64_to_cpu(dd->ino);
370 kunmap_atomic(dd, KM_USER0); 370 kunmap_atomic(dd);
371 page_cache_release(page); 371 page_cache_release(page);
372 372
373 inode = logfs_iget(dir->i_sb, ino); 373 inode = logfs_iget(dir->i_sb, ino);
@@ -402,12 +402,12 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
402 if (!page) 402 if (!page)
403 return -ENOMEM; 403 return -ENOMEM;
404 404
405 dd = kmap_atomic(page, KM_USER0); 405 dd = kmap_atomic(page);
406 memset(dd, 0, sizeof(*dd)); 406 memset(dd, 0, sizeof(*dd));
407 dd->ino = cpu_to_be64(inode->i_ino); 407 dd->ino = cpu_to_be64(inode->i_ino);
408 dd->type = logfs_type(inode); 408 dd->type = logfs_type(inode);
409 logfs_set_name(dd, &dentry->d_name); 409 logfs_set_name(dd, &dentry->d_name);
410 kunmap_atomic(dd, KM_USER0); 410 kunmap_atomic(dd);
411 411
412 err = logfs_write_buf(dir, page, WF_LOCK); 412 err = logfs_write_buf(dir, page, WF_LOCK);
413 unlock_page(page); 413 unlock_page(page);
@@ -579,9 +579,9 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
579 if (IS_ERR(page)) 579 if (IS_ERR(page))
580 return PTR_ERR(page); 580 return PTR_ERR(page);
581 *pos = page->index; 581 *pos = page->index;
582 map = kmap_atomic(page, KM_USER0); 582 map = kmap_atomic(page);
583 memcpy(dd, map, sizeof(*dd)); 583 memcpy(dd, map, sizeof(*dd));
584 kunmap_atomic(map, KM_USER0); 584 kunmap_atomic(map);
585 page_cache_release(page); 585 page_cache_release(page);
586 return 0; 586 return 0;
587} 587}
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 4153e65b0148..e3ab5e5a904c 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -517,9 +517,9 @@ static int indirect_write_alias(struct super_block *sb,
517 517
518 ino = page->mapping->host->i_ino; 518 ino = page->mapping->host->i_ino;
519 logfs_unpack_index(page->index, &bix, &level); 519 logfs_unpack_index(page->index, &bix, &level);
520 child = kmap_atomic(page, KM_USER0); 520 child = kmap_atomic(page);
521 val = child[pos]; 521 val = child[pos];
522 kunmap_atomic(child, KM_USER0); 522 kunmap_atomic(child);
523 err = write_one_alias(sb, ino, bix, level, pos, val); 523 err = write_one_alias(sb, ino, bix, level, pos, val);
524 if (err) 524 if (err)
525 return err; 525 return err;
@@ -673,9 +673,9 @@ static void alloc_indirect_block(struct inode *inode, struct page *page,
673 alloc_data_block(inode, page); 673 alloc_data_block(inode, page);
674 674
675 block = logfs_block(page); 675 block = logfs_block(page);
676 array = kmap_atomic(page, KM_USER0); 676 array = kmap_atomic(page);
677 initialize_block_counters(page, block, array, page_is_empty); 677 initialize_block_counters(page, block, array, page_is_empty);
678 kunmap_atomic(array, KM_USER0); 678 kunmap_atomic(array);
679} 679}
680 680
681static void block_set_pointer(struct page *page, int index, u64 ptr) 681static void block_set_pointer(struct page *page, int index, u64 ptr)
@@ -685,10 +685,10 @@ static void block_set_pointer(struct page *page, int index, u64 ptr)
685 u64 oldptr; 685 u64 oldptr;
686 686
687 BUG_ON(!block); 687 BUG_ON(!block);
688 array = kmap_atomic(page, KM_USER0); 688 array = kmap_atomic(page);
689 oldptr = be64_to_cpu(array[index]); 689 oldptr = be64_to_cpu(array[index]);
690 array[index] = cpu_to_be64(ptr); 690 array[index] = cpu_to_be64(ptr);
691 kunmap_atomic(array, KM_USER0); 691 kunmap_atomic(array);
692 SetPageUptodate(page); 692 SetPageUptodate(page);
693 693
694 block->full += !!(ptr & LOGFS_FULLY_POPULATED) 694 block->full += !!(ptr & LOGFS_FULLY_POPULATED)
@@ -701,9 +701,9 @@ static u64 block_get_pointer(struct page *page, int index)
701 __be64 *block; 701 __be64 *block;
702 u64 ptr; 702 u64 ptr;
703 703
704 block = kmap_atomic(page, KM_USER0); 704 block = kmap_atomic(page);
705 ptr = be64_to_cpu(block[index]); 705 ptr = be64_to_cpu(block[index]);
706 kunmap_atomic(block, KM_USER0); 706 kunmap_atomic(block);
707 return ptr; 707 return ptr;
708} 708}
709 709
@@ -850,7 +850,7 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
850 } 850 }
851 851
852 slot = get_bits(bix, SUBLEVEL(level)); 852 slot = get_bits(bix, SUBLEVEL(level));
853 rblock = kmap_atomic(page, KM_USER0); 853 rblock = kmap_atomic(page);
854 while (slot < LOGFS_BLOCK_FACTOR) { 854 while (slot < LOGFS_BLOCK_FACTOR) {
855 if (data && (rblock[slot] != 0)) 855 if (data && (rblock[slot] != 0))
856 break; 856 break;
@@ -861,12 +861,12 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
861 bix &= ~(increment - 1); 861 bix &= ~(increment - 1);
862 } 862 }
863 if (slot >= LOGFS_BLOCK_FACTOR) { 863 if (slot >= LOGFS_BLOCK_FACTOR) {
864 kunmap_atomic(rblock, KM_USER0); 864 kunmap_atomic(rblock);
865 logfs_put_read_page(page); 865 logfs_put_read_page(page);
866 return bix; 866 return bix;
867 } 867 }
868 bofs = be64_to_cpu(rblock[slot]); 868 bofs = be64_to_cpu(rblock[slot]);
869 kunmap_atomic(rblock, KM_USER0); 869 kunmap_atomic(rblock);
870 logfs_put_read_page(page); 870 logfs_put_read_page(page);
871 if (!bofs) { 871 if (!bofs) {
872 BUG_ON(data); 872 BUG_ON(data);
@@ -1961,9 +1961,9 @@ int logfs_read_inode(struct inode *inode)
1961 if (IS_ERR(page)) 1961 if (IS_ERR(page))
1962 return PTR_ERR(page); 1962 return PTR_ERR(page);
1963 1963
1964 di = kmap_atomic(page, KM_USER0); 1964 di = kmap_atomic(page);
1965 logfs_disk_to_inode(di, inode); 1965 logfs_disk_to_inode(di, inode);
1966 kunmap_atomic(di, KM_USER0); 1966 kunmap_atomic(di);
1967 move_page_to_inode(inode, page); 1967 move_page_to_inode(inode, page);
1968 page_cache_release(page); 1968 page_cache_release(page);
1969 return 0; 1969 return 0;
@@ -1982,9 +1982,9 @@ static struct page *inode_to_page(struct inode *inode)
1982 if (!page) 1982 if (!page)
1983 return NULL; 1983 return NULL;
1984 1984
1985 di = kmap_atomic(page, KM_USER0); 1985 di = kmap_atomic(page);
1986 logfs_inode_to_disk(inode, di); 1986 logfs_inode_to_disk(inode, di);
1987 kunmap_atomic(di, KM_USER0); 1987 kunmap_atomic(di);
1988 move_inode_to_page(page, inode); 1988 move_inode_to_page(page, inode);
1989 return page; 1989 return page;
1990} 1990}
@@ -2041,13 +2041,13 @@ static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
2041 2041
2042 if (write) 2042 if (write)
2043 alloc_indirect_block(inode, page, 0); 2043 alloc_indirect_block(inode, page, 0);
2044 se = kmap_atomic(page, KM_USER0); 2044 se = kmap_atomic(page);
2045 change_se(se + child_no, arg); 2045 change_se(se + child_no, arg);
2046 if (write) { 2046 if (write) {
2047 logfs_set_alias(sb, logfs_block(page), child_no); 2047 logfs_set_alias(sb, logfs_block(page), child_no);
2048 BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize); 2048 BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
2049 } 2049 }
2050 kunmap_atomic(se, KM_USER0); 2050 kunmap_atomic(se);
2051 2051
2052 logfs_put_write_page(page); 2052 logfs_put_write_page(page);
2053} 2053}
@@ -2245,10 +2245,10 @@ int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
2245 if (!page) 2245 if (!page)
2246 return -ENOMEM; 2246 return -ENOMEM;
2247 2247
2248 pagebuf = kmap_atomic(page, KM_USER0); 2248 pagebuf = kmap_atomic(page);
2249 memcpy(pagebuf, buf, count); 2249 memcpy(pagebuf, buf, count);
2250 flush_dcache_page(page); 2250 flush_dcache_page(page);
2251 kunmap_atomic(pagebuf, KM_USER0); 2251 kunmap_atomic(pagebuf);
2252 2252
2253 if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE) 2253 if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
2254 i_size_write(inode, pos + LOGFS_BLOCKSIZE); 2254 i_size_write(inode, pos + LOGFS_BLOCKSIZE);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index ab798ed1cc88..e28d090c98d6 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -543,9 +543,9 @@ void move_page_to_btree(struct page *page)
543 BUG_ON(!item); /* mempool empty */ 543 BUG_ON(!item); /* mempool empty */
544 memset(item, 0, sizeof(*item)); 544 memset(item, 0, sizeof(*item));
545 545
546 child = kmap_atomic(page, KM_USER0); 546 child = kmap_atomic(page);
547 item->val = child[pos]; 547 item->val = child[pos];
548 kunmap_atomic(child, KM_USER0); 548 kunmap_atomic(child);
549 item->child_no = pos; 549 item->child_no = pos;
550 list_add(&item->list, &block->item_list); 550 list_add(&item->list, &block->item_list);
551 } 551 }
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 085a9262c692..685b2d981b87 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -335,7 +335,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
335 goto fail; 335 goto fail;
336 } 336 }
337 337
338 kaddr = kmap_atomic(page, KM_USER0); 338 kaddr = kmap_atomic(page);
339 memset(kaddr, 0, PAGE_CACHE_SIZE); 339 memset(kaddr, 0, PAGE_CACHE_SIZE);
340 340
341 if (sbi->s_version == MINIX_V3) { 341 if (sbi->s_version == MINIX_V3) {
@@ -355,7 +355,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
355 de->inode = dir->i_ino; 355 de->inode = dir->i_ino;
356 strcpy(de->name, ".."); 356 strcpy(de->name, "..");
357 } 357 }
358 kunmap_atomic(kaddr, KM_USER0); 358 kunmap_atomic(kaddr);
359 359
360 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 360 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
361fail: 361fail:
diff --git a/fs/namei.c b/fs/namei.c
index fa96a26d3291..20a4fcf001ec 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3493,9 +3493,9 @@ retry:
3493 if (err) 3493 if (err)
3494 goto fail; 3494 goto fail;
3495 3495
3496 kaddr = kmap_atomic(page, KM_USER0); 3496 kaddr = kmap_atomic(page);
3497 memcpy(kaddr, symname, len-1); 3497 memcpy(kaddr, symname, len-1);
3498 kunmap_atomic(kaddr, KM_USER0); 3498 kunmap_atomic(kaddr);
3499 3499
3500 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, 3500 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
3501 page, fsdata); 3501 page, fsdata);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fd9a872fada0..32aa6917265a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -260,10 +260,10 @@ void nfs_readdir_clear_array(struct page *page)
260 struct nfs_cache_array *array; 260 struct nfs_cache_array *array;
261 int i; 261 int i;
262 262
263 array = kmap_atomic(page, KM_USER0); 263 array = kmap_atomic(page);
264 for (i = 0; i < array->size; i++) 264 for (i = 0; i < array->size; i++)
265 kfree(array->array[i].string.name); 265 kfree(array->array[i].string.name);
266 kunmap_atomic(array, KM_USER0); 266 kunmap_atomic(array);
267} 267}
268 268
269/* 269/*
@@ -1870,11 +1870,11 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym
1870 if (!page) 1870 if (!page)
1871 return -ENOMEM; 1871 return -ENOMEM;
1872 1872
1873 kaddr = kmap_atomic(page, KM_USER0); 1873 kaddr = kmap_atomic(page);
1874 memcpy(kaddr, symname, pathlen); 1874 memcpy(kaddr, symname, pathlen);
1875 if (pathlen < PAGE_SIZE) 1875 if (pathlen < PAGE_SIZE)
1876 memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen); 1876 memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
1877 kunmap_atomic(kaddr, KM_USER0); 1877 kunmap_atomic(kaddr);
1878 1878
1879 error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr); 1879 error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
1880 if (error != 0) { 1880 if (error != 0) {
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ec9f6ef6c5dd..caf92d05c3a9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -193,7 +193,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
193 * when talking to the server, we always send cookie 0 193 * when talking to the server, we always send cookie 0
194 * instead of 1 or 2. 194 * instead of 1 or 2.
195 */ 195 */
196 start = p = kmap_atomic(*readdir->pages, KM_USER0); 196 start = p = kmap_atomic(*readdir->pages);
197 197
198 if (cookie == 0) { 198 if (cookie == 0) {
199 *p++ = xdr_one; /* next */ 199 *p++ = xdr_one; /* next */
@@ -221,7 +221,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
221 221
222 readdir->pgbase = (char *)p - (char *)start; 222 readdir->pgbase = (char *)p - (char *)start;
223 readdir->count -= readdir->pgbase; 223 readdir->count -= readdir->pgbase;
224 kunmap_atomic(start, KM_USER0); 224 kunmap_atomic(start);
225} 225}
226 226
227static int nfs4_wait_clnt_recover(struct nfs_client *clp) 227static int nfs4_wait_clnt_recover(struct nfs_client *clp)
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index c9b342c8b503..dab5c4c6dfaf 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
218 kaddr, 1); 218 kaddr, 1);
219 mark_buffer_dirty(cp_bh); 219 mark_buffer_dirty(cp_bh);
220 220
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 221 kaddr = kmap_atomic(header_bh->b_page);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr); 223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1); 224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0); 225 kunmap_atomic(kaddr);
226 mark_buffer_dirty(header_bh); 226 mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile); 227 nilfs_mdt_mark_dirty(cpfile);
228 } 228 }
@@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
313 continue; 313 continue;
314 } 314 }
315 315
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 316 kaddr = kmap_atomic(cp_bh->b_page);
317 cp = nilfs_cpfile_block_get_checkpoint( 317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr); 318 cpfile, cno, cp_bh, kaddr);
319 nicps = 0; 319 nicps = 0;
@@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
334 cpfile, cp_bh, kaddr, nicps); 334 cpfile, cp_bh, kaddr, nicps);
335 if (count == 0) { 335 if (count == 0) {
336 /* make hole */ 336 /* make hole */
337 kunmap_atomic(kaddr, KM_USER0); 337 kunmap_atomic(kaddr);
338 brelse(cp_bh); 338 brelse(cp_bh);
339 ret = 339 ret =
340 nilfs_cpfile_delete_checkpoint_block( 340 nilfs_cpfile_delete_checkpoint_block(
@@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
349 } 349 }
350 } 350 }
351 351
352 kunmap_atomic(kaddr, KM_USER0); 352 kunmap_atomic(kaddr);
353 brelse(cp_bh); 353 brelse(cp_bh);
354 } 354 }
355 355
356 if (tnicps > 0) { 356 if (tnicps > 0) {
357 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 357 kaddr = kmap_atomic(header_bh->b_page);
358 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 358 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
359 kaddr); 359 kaddr);
360 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); 360 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
361 mark_buffer_dirty(header_bh); 361 mark_buffer_dirty(header_bh);
362 nilfs_mdt_mark_dirty(cpfile); 362 nilfs_mdt_mark_dirty(cpfile);
363 kunmap_atomic(kaddr, KM_USER0); 363 kunmap_atomic(kaddr);
364 } 364 }
365 365
366 brelse(header_bh); 366 brelse(header_bh);
@@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
408 continue; /* skip hole */ 408 continue; /* skip hole */
409 } 409 }
410 410
411 kaddr = kmap_atomic(bh->b_page, KM_USER0); 411 kaddr = kmap_atomic(bh->b_page);
412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { 413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
414 if (!nilfs_checkpoint_invalid(cp)) { 414 if (!nilfs_checkpoint_invalid(cp)) {
@@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
418 n++; 418 n++;
419 } 419 }
420 } 420 }
421 kunmap_atomic(kaddr, KM_USER0); 421 kunmap_atomic(kaddr);
422 brelse(bh); 422 brelse(bh);
423 } 423 }
424 424
@@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
451 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 451 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
452 if (ret < 0) 452 if (ret < 0)
453 goto out; 453 goto out;
454 kaddr = kmap_atomic(bh->b_page, KM_USER0); 454 kaddr = kmap_atomic(bh->b_page);
455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); 456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
457 kunmap_atomic(kaddr, KM_USER0); 457 kunmap_atomic(kaddr);
458 brelse(bh); 458 brelse(bh);
459 if (curr == 0) { 459 if (curr == 0) {
460 ret = 0; 460 ret = 0;
@@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
472 ret = 0; /* No snapshots (started from a hole block) */ 472 ret = 0; /* No snapshots (started from a hole block) */
473 goto out; 473 goto out;
474 } 474 }
475 kaddr = kmap_atomic(bh->b_page, KM_USER0); 475 kaddr = kmap_atomic(bh->b_page);
476 while (n < nci) { 476 while (n < nci) {
477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); 477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
478 curr = ~(__u64)0; /* Terminator */ 478 curr = ~(__u64)0; /* Terminator */
@@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
488 488
489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); 489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
490 if (curr_blkoff != next_blkoff) { 490 if (curr_blkoff != next_blkoff) {
491 kunmap_atomic(kaddr, KM_USER0); 491 kunmap_atomic(kaddr);
492 brelse(bh); 492 brelse(bh);
493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
494 0, &bh); 494 0, &bh);
@@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
496 WARN_ON(ret == -ENOENT); 496 WARN_ON(ret == -ENOENT);
497 goto out; 497 goto out;
498 } 498 }
499 kaddr = kmap_atomic(bh->b_page, KM_USER0); 499 kaddr = kmap_atomic(bh->b_page);
500 } 500 }
501 curr = next; 501 curr = next;
502 curr_blkoff = next_blkoff; 502 curr_blkoff = next_blkoff;
503 } 503 }
504 kunmap_atomic(kaddr, KM_USER0); 504 kunmap_atomic(kaddr);
505 brelse(bh); 505 brelse(bh);
506 *cnop = curr; 506 *cnop = curr;
507 ret = n; 507 ret = n;
@@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
592 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 592 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
593 if (ret < 0) 593 if (ret < 0)
594 goto out_sem; 594 goto out_sem;
595 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 595 kaddr = kmap_atomic(cp_bh->b_page);
596 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 596 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
597 if (nilfs_checkpoint_invalid(cp)) { 597 if (nilfs_checkpoint_invalid(cp)) {
598 ret = -ENOENT; 598 ret = -ENOENT;
599 kunmap_atomic(kaddr, KM_USER0); 599 kunmap_atomic(kaddr);
600 goto out_cp; 600 goto out_cp;
601 } 601 }
602 if (nilfs_checkpoint_snapshot(cp)) { 602 if (nilfs_checkpoint_snapshot(cp)) {
603 ret = 0; 603 ret = 0;
604 kunmap_atomic(kaddr, KM_USER0); 604 kunmap_atomic(kaddr);
605 goto out_cp; 605 goto out_cp;
606 } 606 }
607 kunmap_atomic(kaddr, KM_USER0); 607 kunmap_atomic(kaddr);
608 608
609 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 609 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
610 if (ret < 0) 610 if (ret < 0)
611 goto out_cp; 611 goto out_cp;
612 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 612 kaddr = kmap_atomic(header_bh->b_page);
613 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 613 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
614 list = &header->ch_snapshot_list; 614 list = &header->ch_snapshot_list;
615 curr_bh = header_bh; 615 curr_bh = header_bh;
@@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
621 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); 621 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
622 curr = prev; 622 curr = prev;
623 if (curr_blkoff != prev_blkoff) { 623 if (curr_blkoff != prev_blkoff) {
624 kunmap_atomic(kaddr, KM_USER0); 624 kunmap_atomic(kaddr);
625 brelse(curr_bh); 625 brelse(curr_bh);
626 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 626 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
627 0, &curr_bh); 627 0, &curr_bh);
628 if (ret < 0) 628 if (ret < 0)
629 goto out_header; 629 goto out_header;
630 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 630 kaddr = kmap_atomic(curr_bh->b_page);
631 } 631 }
632 curr_blkoff = prev_blkoff; 632 curr_blkoff = prev_blkoff;
633 cp = nilfs_cpfile_block_get_checkpoint( 633 cp = nilfs_cpfile_block_get_checkpoint(
@@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
635 list = &cp->cp_snapshot_list; 635 list = &cp->cp_snapshot_list;
636 prev = le64_to_cpu(list->ssl_prev); 636 prev = le64_to_cpu(list->ssl_prev);
637 } 637 }
638 kunmap_atomic(kaddr, KM_USER0); 638 kunmap_atomic(kaddr);
639 639
640 if (prev != 0) { 640 if (prev != 0) {
641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
@@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
647 get_bh(prev_bh); 647 get_bh(prev_bh);
648 } 648 }
649 649
650 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 650 kaddr = kmap_atomic(curr_bh->b_page);
651 list = nilfs_cpfile_block_get_snapshot_list( 651 list = nilfs_cpfile_block_get_snapshot_list(
652 cpfile, curr, curr_bh, kaddr); 652 cpfile, curr, curr_bh, kaddr);
653 list->ssl_prev = cpu_to_le64(cno); 653 list->ssl_prev = cpu_to_le64(cno);
654 kunmap_atomic(kaddr, KM_USER0); 654 kunmap_atomic(kaddr);
655 655
656 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 656 kaddr = kmap_atomic(cp_bh->b_page);
657 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 657 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
658 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); 658 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
659 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); 659 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
660 nilfs_checkpoint_set_snapshot(cp); 660 nilfs_checkpoint_set_snapshot(cp);
661 kunmap_atomic(kaddr, KM_USER0); 661 kunmap_atomic(kaddr);
662 662
663 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 663 kaddr = kmap_atomic(prev_bh->b_page);
664 list = nilfs_cpfile_block_get_snapshot_list( 664 list = nilfs_cpfile_block_get_snapshot_list(
665 cpfile, prev, prev_bh, kaddr); 665 cpfile, prev, prev_bh, kaddr);
666 list->ssl_next = cpu_to_le64(cno); 666 list->ssl_next = cpu_to_le64(cno);
667 kunmap_atomic(kaddr, KM_USER0); 667 kunmap_atomic(kaddr);
668 668
669 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 669 kaddr = kmap_atomic(header_bh->b_page);
670 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 670 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
671 le64_add_cpu(&header->ch_nsnapshots, 1); 671 le64_add_cpu(&header->ch_nsnapshots, 1);
672 kunmap_atomic(kaddr, KM_USER0); 672 kunmap_atomic(kaddr);
673 673
674 mark_buffer_dirty(prev_bh); 674 mark_buffer_dirty(prev_bh);
675 mark_buffer_dirty(curr_bh); 675 mark_buffer_dirty(curr_bh);
@@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
710 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 710 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
711 if (ret < 0) 711 if (ret < 0)
712 goto out_sem; 712 goto out_sem;
713 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 713 kaddr = kmap_atomic(cp_bh->b_page);
714 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 714 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
715 if (nilfs_checkpoint_invalid(cp)) { 715 if (nilfs_checkpoint_invalid(cp)) {
716 ret = -ENOENT; 716 ret = -ENOENT;
717 kunmap_atomic(kaddr, KM_USER0); 717 kunmap_atomic(kaddr);
718 goto out_cp; 718 goto out_cp;
719 } 719 }
720 if (!nilfs_checkpoint_snapshot(cp)) { 720 if (!nilfs_checkpoint_snapshot(cp)) {
721 ret = 0; 721 ret = 0;
722 kunmap_atomic(kaddr, KM_USER0); 722 kunmap_atomic(kaddr);
723 goto out_cp; 723 goto out_cp;
724 } 724 }
725 725
726 list = &cp->cp_snapshot_list; 726 list = &cp->cp_snapshot_list;
727 next = le64_to_cpu(list->ssl_next); 727 next = le64_to_cpu(list->ssl_next);
728 prev = le64_to_cpu(list->ssl_prev); 728 prev = le64_to_cpu(list->ssl_prev);
729 kunmap_atomic(kaddr, KM_USER0); 729 kunmap_atomic(kaddr);
730 730
731 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 731 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
732 if (ret < 0) 732 if (ret < 0)
@@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
750 get_bh(prev_bh); 750 get_bh(prev_bh);
751 } 751 }
752 752
753 kaddr = kmap_atomic(next_bh->b_page, KM_USER0); 753 kaddr = kmap_atomic(next_bh->b_page);
754 list = nilfs_cpfile_block_get_snapshot_list( 754 list = nilfs_cpfile_block_get_snapshot_list(
755 cpfile, next, next_bh, kaddr); 755 cpfile, next, next_bh, kaddr);
756 list->ssl_prev = cpu_to_le64(prev); 756 list->ssl_prev = cpu_to_le64(prev);
757 kunmap_atomic(kaddr, KM_USER0); 757 kunmap_atomic(kaddr);
758 758
759 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 759 kaddr = kmap_atomic(prev_bh->b_page);
760 list = nilfs_cpfile_block_get_snapshot_list( 760 list = nilfs_cpfile_block_get_snapshot_list(
761 cpfile, prev, prev_bh, kaddr); 761 cpfile, prev, prev_bh, kaddr);
762 list->ssl_next = cpu_to_le64(next); 762 list->ssl_next = cpu_to_le64(next);
763 kunmap_atomic(kaddr, KM_USER0); 763 kunmap_atomic(kaddr);
764 764
765 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 765 kaddr = kmap_atomic(cp_bh->b_page);
766 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 766 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
767 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); 767 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
768 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); 768 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
769 nilfs_checkpoint_clear_snapshot(cp); 769 nilfs_checkpoint_clear_snapshot(cp);
770 kunmap_atomic(kaddr, KM_USER0); 770 kunmap_atomic(kaddr);
771 771
772 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 772 kaddr = kmap_atomic(header_bh->b_page);
773 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 773 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
774 le64_add_cpu(&header->ch_nsnapshots, -1); 774 le64_add_cpu(&header->ch_nsnapshots, -1);
775 kunmap_atomic(kaddr, KM_USER0); 775 kunmap_atomic(kaddr);
776 776
777 mark_buffer_dirty(next_bh); 777 mark_buffer_dirty(next_bh);
778 mark_buffer_dirty(prev_bh); 778 mark_buffer_dirty(prev_bh);
@@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
829 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 829 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
830 if (ret < 0) 830 if (ret < 0)
831 goto out; 831 goto out;
832 kaddr = kmap_atomic(bh->b_page, KM_USER0); 832 kaddr = kmap_atomic(bh->b_page);
833 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 833 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
834 if (nilfs_checkpoint_invalid(cp)) 834 if (nilfs_checkpoint_invalid(cp))
835 ret = -ENOENT; 835 ret = -ENOENT;
836 else 836 else
837 ret = nilfs_checkpoint_snapshot(cp); 837 ret = nilfs_checkpoint_snapshot(cp);
838 kunmap_atomic(kaddr, KM_USER0); 838 kunmap_atomic(kaddr);
839 brelse(bh); 839 brelse(bh);
840 840
841 out: 841 out:
@@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
912 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 912 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
913 if (ret < 0) 913 if (ret < 0)
914 goto out_sem; 914 goto out_sem;
915 kaddr = kmap_atomic(bh->b_page, KM_USER0); 915 kaddr = kmap_atomic(bh->b_page);
916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
917 cpstat->cs_cno = nilfs_mdt_cno(cpfile); 917 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); 918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); 919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
920 kunmap_atomic(kaddr, KM_USER0); 920 kunmap_atomic(kaddr);
921 brelse(bh); 921 brelse(bh);
922 922
923 out_sem: 923 out_sem:
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index fcc2f869af16..b5c13f3576b9 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
85 struct nilfs_dat_entry *entry; 85 struct nilfs_dat_entry *entry;
86 void *kaddr; 86 void *kaddr;
87 87
88 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 88 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
89 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 89 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
90 req->pr_entry_bh, kaddr); 90 req->pr_entry_bh, kaddr);
91 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 91 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
92 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); 92 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
93 entry->de_blocknr = cpu_to_le64(0); 93 entry->de_blocknr = cpu_to_le64(0);
94 kunmap_atomic(kaddr, KM_USER0); 94 kunmap_atomic(kaddr);
95 95
96 nilfs_palloc_commit_alloc_entry(dat, req); 96 nilfs_palloc_commit_alloc_entry(dat, req);
97 nilfs_dat_commit_entry(dat, req); 97 nilfs_dat_commit_entry(dat, req);
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
109 struct nilfs_dat_entry *entry; 109 struct nilfs_dat_entry *entry;
110 void *kaddr; 110 void *kaddr;
111 111
112 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 112 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
113 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 113 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
114 req->pr_entry_bh, kaddr); 114 req->pr_entry_bh, kaddr);
115 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 115 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
116 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); 116 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
117 entry->de_blocknr = cpu_to_le64(0); 117 entry->de_blocknr = cpu_to_le64(0);
118 kunmap_atomic(kaddr, KM_USER0); 118 kunmap_atomic(kaddr);
119 119
120 nilfs_dat_commit_entry(dat, req); 120 nilfs_dat_commit_entry(dat, req);
121 nilfs_palloc_commit_free_entry(dat, req); 121 nilfs_palloc_commit_free_entry(dat, req);
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
136 struct nilfs_dat_entry *entry; 136 struct nilfs_dat_entry *entry;
137 void *kaddr; 137 void *kaddr;
138 138
139 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 139 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
140 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 140 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
141 req->pr_entry_bh, kaddr); 141 req->pr_entry_bh, kaddr);
142 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); 142 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
143 entry->de_blocknr = cpu_to_le64(blocknr); 143 entry->de_blocknr = cpu_to_le64(blocknr);
144 kunmap_atomic(kaddr, KM_USER0); 144 kunmap_atomic(kaddr);
145 145
146 nilfs_dat_commit_entry(dat, req); 146 nilfs_dat_commit_entry(dat, req);
147} 147}
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
160 return ret; 160 return ret;
161 } 161 }
162 162
163 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 163 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
164 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 164 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
165 req->pr_entry_bh, kaddr); 165 req->pr_entry_bh, kaddr);
166 start = le64_to_cpu(entry->de_start); 166 start = le64_to_cpu(entry->de_start);
167 blocknr = le64_to_cpu(entry->de_blocknr); 167 blocknr = le64_to_cpu(entry->de_blocknr);
168 kunmap_atomic(kaddr, KM_USER0); 168 kunmap_atomic(kaddr);
169 169
170 if (blocknr == 0) { 170 if (blocknr == 0) {
171 ret = nilfs_palloc_prepare_free_entry(dat, req); 171 ret = nilfs_palloc_prepare_free_entry(dat, req);
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
186 sector_t blocknr; 186 sector_t blocknr;
187 void *kaddr; 187 void *kaddr;
188 188
189 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 189 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
190 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 190 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
191 req->pr_entry_bh, kaddr); 191 req->pr_entry_bh, kaddr);
192 end = start = le64_to_cpu(entry->de_start); 192 end = start = le64_to_cpu(entry->de_start);
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
196 } 196 }
197 entry->de_end = cpu_to_le64(end); 197 entry->de_end = cpu_to_le64(end);
198 blocknr = le64_to_cpu(entry->de_blocknr); 198 blocknr = le64_to_cpu(entry->de_blocknr);
199 kunmap_atomic(kaddr, KM_USER0); 199 kunmap_atomic(kaddr);
200 200
201 if (blocknr == 0) 201 if (blocknr == 0)
202 nilfs_dat_commit_free(dat, req); 202 nilfs_dat_commit_free(dat, req);
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
211 sector_t blocknr; 211 sector_t blocknr;
212 void *kaddr; 212 void *kaddr;
213 213
214 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 214 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
215 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 215 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
216 req->pr_entry_bh, kaddr); 216 req->pr_entry_bh, kaddr);
217 start = le64_to_cpu(entry->de_start); 217 start = le64_to_cpu(entry->de_start);
218 blocknr = le64_to_cpu(entry->de_blocknr); 218 blocknr = le64_to_cpu(entry->de_blocknr);
219 kunmap_atomic(kaddr, KM_USER0); 219 kunmap_atomic(kaddr);
220 220
221 if (start == nilfs_mdt_cno(dat) && blocknr == 0) 221 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
222 nilfs_palloc_abort_free_entry(dat, req); 222 nilfs_palloc_abort_free_entry(dat, req);
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
346 } 346 }
347 } 347 }
348 348
349 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 349 kaddr = kmap_atomic(entry_bh->b_page);
350 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 350 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
351 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { 351 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
352 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, 352 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
353 (unsigned long long)vblocknr, 353 (unsigned long long)vblocknr,
354 (unsigned long long)le64_to_cpu(entry->de_start), 354 (unsigned long long)le64_to_cpu(entry->de_start),
355 (unsigned long long)le64_to_cpu(entry->de_end)); 355 (unsigned long long)le64_to_cpu(entry->de_end));
356 kunmap_atomic(kaddr, KM_USER0); 356 kunmap_atomic(kaddr);
357 brelse(entry_bh); 357 brelse(entry_bh);
358 return -EINVAL; 358 return -EINVAL;
359 } 359 }
360 WARN_ON(blocknr == 0); 360 WARN_ON(blocknr == 0);
361 entry->de_blocknr = cpu_to_le64(blocknr); 361 entry->de_blocknr = cpu_to_le64(blocknr);
362 kunmap_atomic(kaddr, KM_USER0); 362 kunmap_atomic(kaddr);
363 363
364 mark_buffer_dirty(entry_bh); 364 mark_buffer_dirty(entry_bh);
365 nilfs_mdt_mark_dirty(dat); 365 nilfs_mdt_mark_dirty(dat);
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
409 } 409 }
410 } 410 }
411 411
412 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 412 kaddr = kmap_atomic(entry_bh->b_page);
413 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 413 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
414 blocknr = le64_to_cpu(entry->de_blocknr); 414 blocknr = le64_to_cpu(entry->de_blocknr);
415 if (blocknr == 0) { 415 if (blocknr == 0) {
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
419 *blocknrp = blocknr; 419 *blocknrp = blocknr;
420 420
421 out: 421 out:
422 kunmap_atomic(kaddr, KM_USER0); 422 kunmap_atomic(kaddr);
423 brelse(entry_bh); 423 brelse(entry_bh);
424 return ret; 424 return ret;
425} 425}
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
440 0, &entry_bh); 440 0, &entry_bh);
441 if (ret < 0) 441 if (ret < 0)
442 return ret; 442 return ret;
443 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 443 kaddr = kmap_atomic(entry_bh->b_page);
444 /* last virtual block number in this block */ 444 /* last virtual block number in this block */
445 first = vinfo->vi_vblocknr; 445 first = vinfo->vi_vblocknr;
446 do_div(first, entries_per_block); 446 do_div(first, entries_per_block);
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
456 vinfo->vi_end = le64_to_cpu(entry->de_end); 456 vinfo->vi_end = le64_to_cpu(entry->de_end);
457 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); 457 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
458 } 458 }
459 kunmap_atomic(kaddr, KM_USER0); 459 kunmap_atomic(kaddr);
460 brelse(entry_bh); 460 brelse(entry_bh);
461 } 461 }
462 462
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index ca35b3a46d17..df1a7fb238d1 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
602 unlock_page(page); 602 unlock_page(page);
603 goto fail; 603 goto fail;
604 } 604 }
605 kaddr = kmap_atomic(page, KM_USER0); 605 kaddr = kmap_atomic(page);
606 memset(kaddr, 0, chunk_size); 606 memset(kaddr, 0, chunk_size);
607 de = (struct nilfs_dir_entry *)kaddr; 607 de = (struct nilfs_dir_entry *)kaddr;
608 de->name_len = 1; 608 de->name_len = 1;
@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
617 de->inode = cpu_to_le64(parent->i_ino); 617 de->inode = cpu_to_le64(parent->i_ino);
618 memcpy(de->name, "..\0", 4); 618 memcpy(de->name, "..\0", 4);
619 nilfs_set_de_type(de, inode); 619 nilfs_set_de_type(de, inode);
620 kunmap_atomic(kaddr, KM_USER0); 620 kunmap_atomic(kaddr);
621 nilfs_commit_chunk(page, mapping, 0, chunk_size); 621 nilfs_commit_chunk(page, mapping, 0, chunk_size);
622fail: 622fail:
623 page_cache_release(page); 623 page_cache_release(page);
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 684d76300a80..5a48df79d674 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
122 return ret; 122 return ret;
123 } 123 }
124 124
125 kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0); 125 kaddr = kmap_atomic(req.pr_entry_bh->b_page);
126 raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, 126 raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
127 req.pr_entry_bh, kaddr); 127 req.pr_entry_bh, kaddr);
128 raw_inode->i_flags = 0; 128 raw_inode->i_flags = 0;
129 kunmap_atomic(kaddr, KM_USER0); 129 kunmap_atomic(kaddr);
130 130
131 mark_buffer_dirty(req.pr_entry_bh); 131 mark_buffer_dirty(req.pr_entry_bh);
132 brelse(req.pr_entry_bh); 132 brelse(req.pr_entry_bh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 800e8d78a83b..f9897d09c693 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
58 58
59 set_buffer_mapped(bh); 59 set_buffer_mapped(bh);
60 60
61 kaddr = kmap_atomic(bh->b_page, KM_USER0); 61 kaddr = kmap_atomic(bh->b_page);
62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); 62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
63 if (init_block) 63 if (init_block)
64 init_block(inode, bh, kaddr); 64 init_block(inode, bh, kaddr);
65 flush_dcache_page(bh->b_page); 65 flush_dcache_page(bh->b_page);
66 kunmap_atomic(kaddr, KM_USER0); 66 kunmap_atomic(kaddr);
67 67
68 set_buffer_uptodate(bh); 68 set_buffer_uptodate(bh);
69 mark_buffer_dirty(bh); 69 mark_buffer_dirty(bh);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 65221a04c6f0..3e7b2a0dc0c8 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
119 struct page *spage = sbh->b_page, *dpage = dbh->b_page; 119 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
120 struct buffer_head *bh; 120 struct buffer_head *bh;
121 121
122 kaddr0 = kmap_atomic(spage, KM_USER0); 122 kaddr0 = kmap_atomic(spage);
123 kaddr1 = kmap_atomic(dpage, KM_USER1); 123 kaddr1 = kmap_atomic(dpage);
124 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); 124 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
125 kunmap_atomic(kaddr1, KM_USER1); 125 kunmap_atomic(kaddr1);
126 kunmap_atomic(kaddr0, KM_USER0); 126 kunmap_atomic(kaddr0);
127 127
128 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; 128 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
129 dbh->b_blocknr = sbh->b_blocknr; 129 dbh->b_blocknr = sbh->b_blocknr;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index a604ac0331b2..f1626f5011c5 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
493 if (unlikely(!bh_org)) 493 if (unlikely(!bh_org))
494 return -EIO; 494 return -EIO;
495 495
496 kaddr = kmap_atomic(page, KM_USER0); 496 kaddr = kmap_atomic(page);
497 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); 497 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
498 kunmap_atomic(kaddr, KM_USER0); 498 kunmap_atomic(kaddr);
499 brelse(bh_org); 499 brelse(bh_org);
500 return 0; 500 return 0;
501} 501}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 850a7c0228fb..dc9a913784ab 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
227 crc = crc32_le(crc, bh->b_data, bh->b_size); 227 crc = crc32_le(crc, bh->b_data, bh->b_size);
228 } 228 }
229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
230 kaddr = kmap_atomic(bh->b_page, KM_USER0); 230 kaddr = kmap_atomic(bh->b_page);
231 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); 231 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
232 kunmap_atomic(kaddr, KM_USER0); 232 kunmap_atomic(kaddr);
233 } 233 }
234 raw_sum->ss_datasum = cpu_to_le32(crc); 234 raw_sum->ss_datasum = cpu_to_le32(crc);
235} 235}
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 0a0aba617d8a..c5b7653a4391 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
111 struct nilfs_sufile_header *header; 111 struct nilfs_sufile_header *header;
112 void *kaddr; 112 void *kaddr;
113 113
114 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 114 kaddr = kmap_atomic(header_bh->b_page);
115 header = kaddr + bh_offset(header_bh); 115 header = kaddr + bh_offset(header_bh);
116 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 116 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
117 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 117 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
118 kunmap_atomic(kaddr, KM_USER0); 118 kunmap_atomic(kaddr);
119 119
120 mark_buffer_dirty(header_bh); 120 mark_buffer_dirty(header_bh);
121} 121}
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
319 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 319 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
320 if (ret < 0) 320 if (ret < 0)
321 goto out_sem; 321 goto out_sem;
322 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 322 kaddr = kmap_atomic(header_bh->b_page);
323 header = kaddr + bh_offset(header_bh); 323 header = kaddr + bh_offset(header_bh);
324 ncleansegs = le64_to_cpu(header->sh_ncleansegs); 324 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
325 last_alloc = le64_to_cpu(header->sh_last_alloc); 325 last_alloc = le64_to_cpu(header->sh_last_alloc);
326 kunmap_atomic(kaddr, KM_USER0); 326 kunmap_atomic(kaddr);
327 327
328 nsegments = nilfs_sufile_get_nsegments(sufile); 328 nsegments = nilfs_sufile_get_nsegments(sufile);
329 maxsegnum = sui->allocmax; 329 maxsegnum = sui->allocmax;
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
356 &su_bh); 356 &su_bh);
357 if (ret < 0) 357 if (ret < 0)
358 goto out_header; 358 goto out_header;
359 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 359 kaddr = kmap_atomic(su_bh->b_page);
360 su = nilfs_sufile_block_get_segment_usage( 360 su = nilfs_sufile_block_get_segment_usage(
361 sufile, segnum, su_bh, kaddr); 361 sufile, segnum, su_bh, kaddr);
362 362
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
367 continue; 367 continue;
368 /* found a clean segment */ 368 /* found a clean segment */
369 nilfs_segment_usage_set_dirty(su); 369 nilfs_segment_usage_set_dirty(su);
370 kunmap_atomic(kaddr, KM_USER0); 370 kunmap_atomic(kaddr);
371 371
372 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 372 kaddr = kmap_atomic(header_bh->b_page);
373 header = kaddr + bh_offset(header_bh); 373 header = kaddr + bh_offset(header_bh);
374 le64_add_cpu(&header->sh_ncleansegs, -1); 374 le64_add_cpu(&header->sh_ncleansegs, -1);
375 le64_add_cpu(&header->sh_ndirtysegs, 1); 375 le64_add_cpu(&header->sh_ndirtysegs, 1);
376 header->sh_last_alloc = cpu_to_le64(segnum); 376 header->sh_last_alloc = cpu_to_le64(segnum);
377 kunmap_atomic(kaddr, KM_USER0); 377 kunmap_atomic(kaddr);
378 378
379 sui->ncleansegs--; 379 sui->ncleansegs--;
380 mark_buffer_dirty(header_bh); 380 mark_buffer_dirty(header_bh);
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
385 goto out_header; 385 goto out_header;
386 } 386 }
387 387
388 kunmap_atomic(kaddr, KM_USER0); 388 kunmap_atomic(kaddr);
389 brelse(su_bh); 389 brelse(su_bh);
390 } 390 }
391 391
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
407 struct nilfs_segment_usage *su; 407 struct nilfs_segment_usage *su;
408 void *kaddr; 408 void *kaddr;
409 409
410 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 410 kaddr = kmap_atomic(su_bh->b_page);
411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
412 if (unlikely(!nilfs_segment_usage_clean(su))) { 412 if (unlikely(!nilfs_segment_usage_clean(su))) {
413 printk(KERN_WARNING "%s: segment %llu must be clean\n", 413 printk(KERN_WARNING "%s: segment %llu must be clean\n",
414 __func__, (unsigned long long)segnum); 414 __func__, (unsigned long long)segnum);
415 kunmap_atomic(kaddr, KM_USER0); 415 kunmap_atomic(kaddr);
416 return; 416 return;
417 } 417 }
418 nilfs_segment_usage_set_dirty(su); 418 nilfs_segment_usage_set_dirty(su);
419 kunmap_atomic(kaddr, KM_USER0); 419 kunmap_atomic(kaddr);
420 420
421 nilfs_sufile_mod_counter(header_bh, -1, 1); 421 nilfs_sufile_mod_counter(header_bh, -1, 1);
422 NILFS_SUI(sufile)->ncleansegs--; 422 NILFS_SUI(sufile)->ncleansegs--;
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
433 void *kaddr; 433 void *kaddr;
434 int clean, dirty; 434 int clean, dirty;
435 435
436 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 436 kaddr = kmap_atomic(su_bh->b_page);
437 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 437 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
438 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 438 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
439 su->su_nblocks == cpu_to_le32(0)) { 439 su->su_nblocks == cpu_to_le32(0)) {
440 kunmap_atomic(kaddr, KM_USER0); 440 kunmap_atomic(kaddr);
441 return; 441 return;
442 } 442 }
443 clean = nilfs_segment_usage_clean(su); 443 clean = nilfs_segment_usage_clean(su);
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
447 su->su_lastmod = cpu_to_le64(0); 447 su->su_lastmod = cpu_to_le64(0);
448 su->su_nblocks = cpu_to_le32(0); 448 su->su_nblocks = cpu_to_le32(0);
449 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 449 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
450 kunmap_atomic(kaddr, KM_USER0); 450 kunmap_atomic(kaddr);
451 451
452 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 452 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
453 NILFS_SUI(sufile)->ncleansegs -= clean; 453 NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
464 void *kaddr; 464 void *kaddr;
465 int sudirty; 465 int sudirty;
466 466
467 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 467 kaddr = kmap_atomic(su_bh->b_page);
468 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 468 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
469 if (nilfs_segment_usage_clean(su)) { 469 if (nilfs_segment_usage_clean(su)) {
470 printk(KERN_WARNING "%s: segment %llu is already clean\n", 470 printk(KERN_WARNING "%s: segment %llu is already clean\n",
471 __func__, (unsigned long long)segnum); 471 __func__, (unsigned long long)segnum);
472 kunmap_atomic(kaddr, KM_USER0); 472 kunmap_atomic(kaddr);
473 return; 473 return;
474 } 474 }
475 WARN_ON(nilfs_segment_usage_error(su)); 475 WARN_ON(nilfs_segment_usage_error(su));
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
477 477
478 sudirty = nilfs_segment_usage_dirty(su); 478 sudirty = nilfs_segment_usage_dirty(su);
479 nilfs_segment_usage_set_clean(su); 479 nilfs_segment_usage_set_clean(su);
480 kunmap_atomic(kaddr, KM_USER0); 480 kunmap_atomic(kaddr);
481 mark_buffer_dirty(su_bh); 481 mark_buffer_dirty(su_bh);
482 482
483 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 483 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
525 if (ret < 0) 525 if (ret < 0)
526 goto out_sem; 526 goto out_sem;
527 527
528 kaddr = kmap_atomic(bh->b_page, KM_USER0); 528 kaddr = kmap_atomic(bh->b_page);
529 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 529 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
530 WARN_ON(nilfs_segment_usage_error(su)); 530 WARN_ON(nilfs_segment_usage_error(su));
531 if (modtime) 531 if (modtime)
532 su->su_lastmod = cpu_to_le64(modtime); 532 su->su_lastmod = cpu_to_le64(modtime);
533 su->su_nblocks = cpu_to_le32(nblocks); 533 su->su_nblocks = cpu_to_le32(nblocks);
534 kunmap_atomic(kaddr, KM_USER0); 534 kunmap_atomic(kaddr);
535 535
536 mark_buffer_dirty(bh); 536 mark_buffer_dirty(bh);
537 nilfs_mdt_mark_dirty(sufile); 537 nilfs_mdt_mark_dirty(sufile);
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
572 if (ret < 0) 572 if (ret < 0)
573 goto out_sem; 573 goto out_sem;
574 574
575 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 575 kaddr = kmap_atomic(header_bh->b_page);
576 header = kaddr + bh_offset(header_bh); 576 header = kaddr + bh_offset(header_bh);
577 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 577 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
578 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 578 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
582 spin_lock(&nilfs->ns_last_segment_lock); 582 spin_lock(&nilfs->ns_last_segment_lock);
583 sustat->ss_prot_seq = nilfs->ns_prot_seq; 583 sustat->ss_prot_seq = nilfs->ns_prot_seq;
584 spin_unlock(&nilfs->ns_last_segment_lock); 584 spin_unlock(&nilfs->ns_last_segment_lock);
585 kunmap_atomic(kaddr, KM_USER0); 585 kunmap_atomic(kaddr);
586 brelse(header_bh); 586 brelse(header_bh);
587 587
588 out_sem: 588 out_sem:
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
598 void *kaddr; 598 void *kaddr;
599 int suclean; 599 int suclean;
600 600
601 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 601 kaddr = kmap_atomic(su_bh->b_page);
602 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 602 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
603 if (nilfs_segment_usage_error(su)) { 603 if (nilfs_segment_usage_error(su)) {
604 kunmap_atomic(kaddr, KM_USER0); 604 kunmap_atomic(kaddr);
605 return; 605 return;
606 } 606 }
607 suclean = nilfs_segment_usage_clean(su); 607 suclean = nilfs_segment_usage_clean(su);
608 nilfs_segment_usage_set_error(su); 608 nilfs_segment_usage_set_error(su);
609 kunmap_atomic(kaddr, KM_USER0); 609 kunmap_atomic(kaddr);
610 610
611 if (suclean) { 611 if (suclean) {
612 nilfs_sufile_mod_counter(header_bh, -1, 0); 612 nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
675 /* hole */ 675 /* hole */
676 continue; 676 continue;
677 } 677 }
678 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 678 kaddr = kmap_atomic(su_bh->b_page);
679 su = nilfs_sufile_block_get_segment_usage( 679 su = nilfs_sufile_block_get_segment_usage(
680 sufile, segnum, su_bh, kaddr); 680 sufile, segnum, su_bh, kaddr);
681 su2 = su; 681 su2 = su;
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || 684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
685 nilfs_segment_is_active(nilfs, segnum + j)) { 685 nilfs_segment_is_active(nilfs, segnum + j)) {
686 ret = -EBUSY; 686 ret = -EBUSY;
687 kunmap_atomic(kaddr, KM_USER0); 687 kunmap_atomic(kaddr);
688 brelse(su_bh); 688 brelse(su_bh);
689 goto out_header; 689 goto out_header;
690 } 690 }
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
696 nc++; 696 nc++;
697 } 697 }
698 } 698 }
699 kunmap_atomic(kaddr, KM_USER0); 699 kunmap_atomic(kaddr);
700 if (nc > 0) { 700 if (nc > 0) {
701 mark_buffer_dirty(su_bh); 701 mark_buffer_dirty(su_bh);
702 ncleaned += nc; 702 ncleaned += nc;
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
772 sui->ncleansegs -= nsegs - newnsegs; 772 sui->ncleansegs -= nsegs - newnsegs;
773 } 773 }
774 774
775 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 775 kaddr = kmap_atomic(header_bh->b_page);
776 header = kaddr + bh_offset(header_bh); 776 header = kaddr + bh_offset(header_bh);
777 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 777 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
778 kunmap_atomic(kaddr, KM_USER0); 778 kunmap_atomic(kaddr);
779 779
780 mark_buffer_dirty(header_bh); 780 mark_buffer_dirty(header_bh);
781 nilfs_mdt_mark_dirty(sufile); 781 nilfs_mdt_mark_dirty(sufile);
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
840 continue; 840 continue;
841 } 841 }
842 842
843 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 843 kaddr = kmap_atomic(su_bh->b_page);
844 su = nilfs_sufile_block_get_segment_usage( 844 su = nilfs_sufile_block_get_segment_usage(
845 sufile, segnum, su_bh, kaddr); 845 sufile, segnum, su_bh, kaddr);
846 for (j = 0; j < n; 846 for (j = 0; j < n;
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
853 si->sui_flags |= 853 si->sui_flags |=
854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
855 } 855 }
856 kunmap_atomic(kaddr, KM_USER0); 856 kunmap_atomic(kaddr);
857 brelse(su_bh); 857 brelse(su_bh);
858 } 858 }
859 ret = nsegs; 859 ret = nsegs;
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
902 goto failed; 902 goto failed;
903 903
904 sui = NILFS_SUI(sufile); 904 sui = NILFS_SUI(sufile);
905 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 905 kaddr = kmap_atomic(header_bh->b_page);
906 header = kaddr + bh_offset(header_bh); 906 header = kaddr + bh_offset(header_bh);
907 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 907 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
908 kunmap_atomic(kaddr, KM_USER0); 908 kunmap_atomic(kaddr);
909 brelse(header_bh); 909 brelse(header_bh);
910 910
911 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 911 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 0b1e885b8cf8..fa9c05f97af4 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
94 if (file_ofs < init_size) 94 if (file_ofs < init_size)
95 ofs = init_size - file_ofs; 95 ofs = init_size - file_ofs;
96 local_irq_save(flags); 96 local_irq_save(flags);
97 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 97 kaddr = kmap_atomic(page);
98 memset(kaddr + bh_offset(bh) + ofs, 0, 98 memset(kaddr + bh_offset(bh) + ofs, 0,
99 bh->b_size - ofs); 99 bh->b_size - ofs);
100 flush_dcache_page(page); 100 flush_dcache_page(page);
101 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 101 kunmap_atomic(kaddr);
102 local_irq_restore(flags); 102 local_irq_restore(flags);
103 } 103 }
104 } else { 104 } else {
@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
147 /* Should have been verified before we got here... */ 147 /* Should have been verified before we got here... */
148 BUG_ON(!recs); 148 BUG_ON(!recs);
149 local_irq_save(flags); 149 local_irq_save(flags);
150 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 150 kaddr = kmap_atomic(page);
151 for (i = 0; i < recs; i++) 151 for (i = 0; i < recs; i++)
152 post_read_mst_fixup((NTFS_RECORD*)(kaddr + 152 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
153 i * rec_size), rec_size); 153 i * rec_size), rec_size);
154 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 154 kunmap_atomic(kaddr);
155 local_irq_restore(flags); 155 local_irq_restore(flags);
156 flush_dcache_page(page); 156 flush_dcache_page(page);
157 if (likely(page_uptodate && !PageError(page))) 157 if (likely(page_uptodate && !PageError(page)))
@@ -504,7 +504,7 @@ retry_readpage:
504 /* Race with shrinking truncate. */ 504 /* Race with shrinking truncate. */
505 attr_len = i_size; 505 attr_len = i_size;
506 } 506 }
507 addr = kmap_atomic(page, KM_USER0); 507 addr = kmap_atomic(page);
508 /* Copy the data to the page. */ 508 /* Copy the data to the page. */
509 memcpy(addr, (u8*)ctx->attr + 509 memcpy(addr, (u8*)ctx->attr +
510 le16_to_cpu(ctx->attr->data.resident.value_offset), 510 le16_to_cpu(ctx->attr->data.resident.value_offset),
@@ -512,7 +512,7 @@ retry_readpage:
512 /* Zero the remainder of the page. */ 512 /* Zero the remainder of the page. */
513 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 513 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
514 flush_dcache_page(page); 514 flush_dcache_page(page);
515 kunmap_atomic(addr, KM_USER0); 515 kunmap_atomic(addr);
516put_unm_err_out: 516put_unm_err_out:
517 ntfs_attr_put_search_ctx(ctx); 517 ntfs_attr_put_search_ctx(ctx);
518unm_err_out: 518unm_err_out:
@@ -746,14 +746,14 @@ lock_retry_remap:
746 unsigned long *bpos, *bend; 746 unsigned long *bpos, *bend;
747 747
748 /* Check if the buffer is zero. */ 748 /* Check if the buffer is zero. */
749 kaddr = kmap_atomic(page, KM_USER0); 749 kaddr = kmap_atomic(page);
750 bpos = (unsigned long *)(kaddr + bh_offset(bh)); 750 bpos = (unsigned long *)(kaddr + bh_offset(bh));
751 bend = (unsigned long *)((u8*)bpos + blocksize); 751 bend = (unsigned long *)((u8*)bpos + blocksize);
752 do { 752 do {
753 if (unlikely(*bpos)) 753 if (unlikely(*bpos))
754 break; 754 break;
755 } while (likely(++bpos < bend)); 755 } while (likely(++bpos < bend));
756 kunmap_atomic(kaddr, KM_USER0); 756 kunmap_atomic(kaddr);
757 if (bpos == bend) { 757 if (bpos == bend) {
758 /* 758 /*
759 * Buffer is zero and sparse, no need to write 759 * Buffer is zero and sparse, no need to write
@@ -1495,14 +1495,14 @@ retry_writepage:
1495 /* Shrinking cannot fail. */ 1495 /* Shrinking cannot fail. */
1496 BUG_ON(err); 1496 BUG_ON(err);
1497 } 1497 }
1498 addr = kmap_atomic(page, KM_USER0); 1498 addr = kmap_atomic(page);
1499 /* Copy the data from the page to the mft record. */ 1499 /* Copy the data from the page to the mft record. */
1500 memcpy((u8*)ctx->attr + 1500 memcpy((u8*)ctx->attr +
1501 le16_to_cpu(ctx->attr->data.resident.value_offset), 1501 le16_to_cpu(ctx->attr->data.resident.value_offset),
1502 addr, attr_len); 1502 addr, attr_len);
1503 /* Zero out of bounds area in the page cache page. */ 1503 /* Zero out of bounds area in the page cache page. */
1504 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1504 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1505 kunmap_atomic(addr, KM_USER0); 1505 kunmap_atomic(addr);
1506 flush_dcache_page(page); 1506 flush_dcache_page(page);
1507 flush_dcache_mft_record_page(ctx->ntfs_ino); 1507 flush_dcache_mft_record_page(ctx->ntfs_ino);
1508 /* We are done with the page. */ 1508 /* We are done with the page. */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index e0281992ddc3..a27e3fecefaf 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1656 attr_size = le32_to_cpu(a->data.resident.value_length); 1656 attr_size = le32_to_cpu(a->data.resident.value_length);
1657 BUG_ON(attr_size != data_size); 1657 BUG_ON(attr_size != data_size);
1658 if (page && !PageUptodate(page)) { 1658 if (page && !PageUptodate(page)) {
1659 kaddr = kmap_atomic(page, KM_USER0); 1659 kaddr = kmap_atomic(page);
1660 memcpy(kaddr, (u8*)a + 1660 memcpy(kaddr, (u8*)a +
1661 le16_to_cpu(a->data.resident.value_offset), 1661 le16_to_cpu(a->data.resident.value_offset),
1662 attr_size); 1662 attr_size);
1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
1664 kunmap_atomic(kaddr, KM_USER0); 1664 kunmap_atomic(kaddr);
1665 flush_dcache_page(page); 1665 flush_dcache_page(page);
1666 SetPageUptodate(page); 1666 SetPageUptodate(page);
1667 } 1667 }
@@ -1806,9 +1806,9 @@ undo_err_out:
1806 sizeof(a->data.resident.reserved)); 1806 sizeof(a->data.resident.reserved));
1807 /* Copy the data from the page back to the attribute value. */ 1807 /* Copy the data from the page back to the attribute value. */
1808 if (page) { 1808 if (page) {
1809 kaddr = kmap_atomic(page, KM_USER0); 1809 kaddr = kmap_atomic(page);
1810 memcpy((u8*)a + mp_ofs, kaddr, attr_size); 1810 memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1811 kunmap_atomic(kaddr, KM_USER0); 1811 kunmap_atomic(kaddr);
1812 } 1812 }
1813 /* Setup the allocated size in the ntfs inode in case it changed. */ 1813 /* Setup the allocated size in the ntfs inode in case it changed. */
1814 write_lock_irqsave(&ni->size_lock, flags); 1814 write_lock_irqsave(&ni->size_lock, flags);
@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2540 size = PAGE_CACHE_SIZE; 2540 size = PAGE_CACHE_SIZE;
2541 if (idx == end) 2541 if (idx == end)
2542 size = end_ofs; 2542 size = end_ofs;
2543 kaddr = kmap_atomic(page, KM_USER0); 2543 kaddr = kmap_atomic(page);
2544 memset(kaddr + start_ofs, val, size - start_ofs); 2544 memset(kaddr + start_ofs, val, size - start_ofs);
2545 flush_dcache_page(page); 2545 flush_dcache_page(page);
2546 kunmap_atomic(kaddr, KM_USER0); 2546 kunmap_atomic(kaddr);
2547 set_page_dirty(page); 2547 set_page_dirty(page);
2548 page_cache_release(page); 2548 page_cache_release(page);
2549 balance_dirty_pages_ratelimited(mapping); 2549 balance_dirty_pages_ratelimited(mapping);
@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2561 "page (index 0x%lx).", idx); 2561 "page (index 0x%lx).", idx);
2562 return -ENOMEM; 2562 return -ENOMEM;
2563 } 2563 }
2564 kaddr = kmap_atomic(page, KM_USER0); 2564 kaddr = kmap_atomic(page);
2565 memset(kaddr, val, PAGE_CACHE_SIZE); 2565 memset(kaddr, val, PAGE_CACHE_SIZE);
2566 flush_dcache_page(page); 2566 flush_dcache_page(page);
2567 kunmap_atomic(kaddr, KM_USER0); 2567 kunmap_atomic(kaddr);
2568 /* 2568 /*
2569 * If the page has buffers, mark them uptodate since buffer 2569 * If the page has buffers, mark them uptodate since buffer
2570 * state and not page state is definitive in 2.6 kernels. 2570 * state and not page state is definitive in 2.6 kernels.
@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2598 "(error, index 0x%lx).", idx); 2598 "(error, index 0x%lx).", idx);
2599 return PTR_ERR(page); 2599 return PTR_ERR(page);
2600 } 2600 }
2601 kaddr = kmap_atomic(page, KM_USER0); 2601 kaddr = kmap_atomic(page);
2602 memset(kaddr, val, end_ofs); 2602 memset(kaddr, val, end_ofs);
2603 flush_dcache_page(page); 2603 flush_dcache_page(page);
2604 kunmap_atomic(kaddr, KM_USER0); 2604 kunmap_atomic(kaddr);
2605 set_page_dirty(page); 2605 set_page_dirty(page);
2606 page_cache_release(page); 2606 page_cache_release(page);
2607 balance_dirty_pages_ratelimited(mapping); 2607 balance_dirty_pages_ratelimited(mapping);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index c587e2d27183..8639169221c7 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -704,7 +704,7 @@ map_buffer_cached:
704 u8 *kaddr; 704 u8 *kaddr;
705 unsigned pofs; 705 unsigned pofs;
706 706
707 kaddr = kmap_atomic(page, KM_USER0); 707 kaddr = kmap_atomic(page);
708 if (bh_pos < pos) { 708 if (bh_pos < pos) {
709 pofs = bh_pos & ~PAGE_CACHE_MASK; 709 pofs = bh_pos & ~PAGE_CACHE_MASK;
710 memset(kaddr + pofs, 0, pos - bh_pos); 710 memset(kaddr + pofs, 0, pos - bh_pos);
@@ -713,7 +713,7 @@ map_buffer_cached:
713 pofs = end & ~PAGE_CACHE_MASK; 713 pofs = end & ~PAGE_CACHE_MASK;
714 memset(kaddr + pofs, 0, bh_end - end); 714 memset(kaddr + pofs, 0, bh_end - end);
715 } 715 }
716 kunmap_atomic(kaddr, KM_USER0); 716 kunmap_atomic(kaddr);
717 flush_dcache_page(page); 717 flush_dcache_page(page);
718 } 718 }
719 continue; 719 continue;
@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
1287 len = PAGE_CACHE_SIZE - ofs; 1287 len = PAGE_CACHE_SIZE - ofs;
1288 if (len > bytes) 1288 if (len > bytes)
1289 len = bytes; 1289 len = bytes;
1290 addr = kmap_atomic(*pages, KM_USER0); 1290 addr = kmap_atomic(*pages);
1291 left = __copy_from_user_inatomic(addr + ofs, buf, len); 1291 left = __copy_from_user_inatomic(addr + ofs, buf, len);
1292 kunmap_atomic(addr, KM_USER0); 1292 kunmap_atomic(addr);
1293 if (unlikely(left)) { 1293 if (unlikely(left)) {
1294 /* Do it the slow way. */ 1294 /* Do it the slow way. */
1295 addr = kmap(*pages); 1295 addr = kmap(*pages);
@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1401 len = PAGE_CACHE_SIZE - ofs; 1401 len = PAGE_CACHE_SIZE - ofs;
1402 if (len > bytes) 1402 if (len > bytes)
1403 len = bytes; 1403 len = bytes;
1404 addr = kmap_atomic(*pages, KM_USER0); 1404 addr = kmap_atomic(*pages);
1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, 1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1406 *iov, *iov_ofs, len); 1406 *iov, *iov_ofs, len);
1407 kunmap_atomic(addr, KM_USER0); 1407 kunmap_atomic(addr);
1408 if (unlikely(copied != len)) { 1408 if (unlikely(copied != len)) {
1409 /* Do it the slow way. */ 1409 /* Do it the slow way. */
1410 addr = kmap(*pages); 1410 addr = kmap(*pages);
@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1691 BUG_ON(end > le32_to_cpu(a->length) - 1691 BUG_ON(end > le32_to_cpu(a->length) -
1692 le16_to_cpu(a->data.resident.value_offset)); 1692 le16_to_cpu(a->data.resident.value_offset));
1693 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 1693 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1694 kaddr = kmap_atomic(page, KM_USER0); 1694 kaddr = kmap_atomic(page);
1695 /* Copy the received data from the page to the mft record. */ 1695 /* Copy the received data from the page to the mft record. */
1696 memcpy(kattr + pos, kaddr + pos, bytes); 1696 memcpy(kattr + pos, kaddr + pos, bytes);
1697 /* Update the attribute length if necessary. */ 1697 /* Update the attribute length if necessary. */
@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1713 flush_dcache_page(page); 1713 flush_dcache_page(page);
1714 SetPageUptodate(page); 1714 SetPageUptodate(page);
1715 } 1715 }
1716 kunmap_atomic(kaddr, KM_USER0); 1716 kunmap_atomic(kaddr);
1717 /* Update initialized_size/i_size if necessary. */ 1717 /* Update initialized_size/i_size if necessary. */
1718 read_lock_irqsave(&ni->size_lock, flags); 1718 read_lock_irqsave(&ni->size_lock, flags);
1719 initialized_size = ni->initialized_size; 1719 initialized_size = ni->initialized_size;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index f907611cca73..28d4e6ab6634 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2473 nr_free -= PAGE_CACHE_SIZE * 8; 2473 nr_free -= PAGE_CACHE_SIZE * 8;
2474 continue; 2474 continue;
2475 } 2475 }
2476 kaddr = kmap_atomic(page, KM_USER0); 2476 kaddr = kmap_atomic(page);
2477 /* 2477 /*
2478 * Subtract the number of set bits. If this 2478 * Subtract the number of set bits. If this
2479 * is the last page and it is partial we don't really care as 2479 * is the last page and it is partial we don't really care as
@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2483 */ 2483 */
2484 nr_free -= bitmap_weight(kaddr, 2484 nr_free -= bitmap_weight(kaddr,
2485 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2485 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2486 kunmap_atomic(kaddr, KM_USER0); 2486 kunmap_atomic(kaddr);
2487 page_cache_release(page); 2487 page_cache_release(page);
2488 } 2488 }
2489 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); 2489 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2544 nr_free -= PAGE_CACHE_SIZE * 8; 2544 nr_free -= PAGE_CACHE_SIZE * 8;
2545 continue; 2545 continue;
2546 } 2546 }
2547 kaddr = kmap_atomic(page, KM_USER0); 2547 kaddr = kmap_atomic(page);
2548 /* 2548 /*
2549 * Subtract the number of set bits. If this 2549 * Subtract the number of set bits. If this
2550 * is the last page and it is partial we don't really care as 2550 * is the last page and it is partial we don't really care as
@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2554 */ 2554 */
2555 nr_free -= bitmap_weight(kaddr, 2555 nr_free -= bitmap_weight(kaddr,
2556 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2556 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2557 kunmap_atomic(kaddr, KM_USER0); 2557 kunmap_atomic(kaddr);
2558 page_cache_release(page); 2558 page_cache_release(page);
2559 } 2559 }
2560 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2560 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 78b68af3b0e3..657743254eb9 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -102,7 +102,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
102 * copy, the data is still good. */ 102 * copy, the data is still good. */
103 if (buffer_jbd(buffer_cache_bh) 103 if (buffer_jbd(buffer_cache_bh)
104 && ocfs2_inode_is_new(inode)) { 104 && ocfs2_inode_is_new(inode)) {
105 kaddr = kmap_atomic(bh_result->b_page, KM_USER0); 105 kaddr = kmap_atomic(bh_result->b_page);
106 if (!kaddr) { 106 if (!kaddr) {
107 mlog(ML_ERROR, "couldn't kmap!\n"); 107 mlog(ML_ERROR, "couldn't kmap!\n");
108 goto bail; 108 goto bail;
@@ -110,7 +110,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
110 memcpy(kaddr + (bh_result->b_size * iblock), 110 memcpy(kaddr + (bh_result->b_size * iblock),
111 buffer_cache_bh->b_data, 111 buffer_cache_bh->b_data,
112 bh_result->b_size); 112 bh_result->b_size);
113 kunmap_atomic(kaddr, KM_USER0); 113 kunmap_atomic(kaddr);
114 set_buffer_uptodate(bh_result); 114 set_buffer_uptodate(bh_result);
115 } 115 }
116 brelse(buffer_cache_bh); 116 brelse(buffer_cache_bh);
@@ -236,13 +236,13 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
236 return -EROFS; 236 return -EROFS;
237 } 237 }
238 238
239 kaddr = kmap_atomic(page, KM_USER0); 239 kaddr = kmap_atomic(page);
240 if (size) 240 if (size)
241 memcpy(kaddr, di->id2.i_data.id_data, size); 241 memcpy(kaddr, di->id2.i_data.id_data, size);
242 /* Clear the remaining part of the page */ 242 /* Clear the remaining part of the page */
243 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 243 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
244 flush_dcache_page(page); 244 flush_dcache_page(page);
245 kunmap_atomic(kaddr, KM_USER0); 245 kunmap_atomic(kaddr);
246 246
247 SetPageUptodate(page); 247 SetPageUptodate(page);
248 248
@@ -689,7 +689,7 @@ static void ocfs2_clear_page_regions(struct page *page,
689 689
690 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); 690 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
691 691
692 kaddr = kmap_atomic(page, KM_USER0); 692 kaddr = kmap_atomic(page);
693 693
694 if (from || to) { 694 if (from || to) {
695 if (from > cluster_start) 695 if (from > cluster_start)
@@ -700,7 +700,7 @@ static void ocfs2_clear_page_regions(struct page *page,
700 memset(kaddr + cluster_start, 0, cluster_end - cluster_start); 700 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
701 } 701 }
702 702
703 kunmap_atomic(kaddr, KM_USER0); 703 kunmap_atomic(kaddr);
704} 704}
705 705
706/* 706/*
@@ -1981,9 +1981,9 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1981 } 1981 }
1982 } 1982 }
1983 1983
1984 kaddr = kmap_atomic(wc->w_target_page, KM_USER0); 1984 kaddr = kmap_atomic(wc->w_target_page);
1985 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1985 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1986 kunmap_atomic(kaddr, KM_USER0); 1986 kunmap_atomic(kaddr);
1987 1987
1988 trace_ocfs2_write_end_inline( 1988 trace_ocfs2_write_end_inline(
1989 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1989 (unsigned long long)OCFS2_I(inode)->ip_blkno,
diff --git a/fs/pipe.c b/fs/pipe.c
index a932ced92a16..fe0502f9beb2 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
230{ 230{
231 if (atomic) { 231 if (atomic) {
232 buf->flags |= PIPE_BUF_FLAG_ATOMIC; 232 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
233 return kmap_atomic(buf->page, KM_USER0); 233 return kmap_atomic(buf->page);
234 } 234 }
235 235
236 return kmap(buf->page); 236 return kmap(buf->page);
@@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
251{ 251{
252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) { 252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC; 253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
254 kunmap_atomic(map_data, KM_USER0); 254 kunmap_atomic(map_data);
255 } else 255 } else
256 kunmap(buf->page); 256 kunmap(buf->page);
257} 257}
@@ -565,14 +565,14 @@ redo1:
565 iov_fault_in_pages_read(iov, chars); 565 iov_fault_in_pages_read(iov, chars);
566redo2: 566redo2:
567 if (atomic) 567 if (atomic)
568 src = kmap_atomic(page, KM_USER0); 568 src = kmap_atomic(page);
569 else 569 else
570 src = kmap(page); 570 src = kmap(page);
571 571
572 error = pipe_iov_copy_from_user(src, iov, chars, 572 error = pipe_iov_copy_from_user(src, iov, chars,
573 atomic); 573 atomic);
574 if (atomic) 574 if (atomic)
575 kunmap_atomic(src, KM_USER0); 575 kunmap_atomic(src);
576 else 576 else
577 kunmap(page); 577 kunmap(page);
578 578
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 313d39d639eb..77df82f9e70a 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1284,12 +1284,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1284 ** -clm 1284 ** -clm
1285 */ 1285 */
1286 1286
1287 data = kmap_atomic(un_bh->b_page, KM_USER0); 1287 data = kmap_atomic(un_bh->b_page);
1288 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); 1288 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
1289 memcpy(data + off, 1289 memcpy(data + off,
1290 B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), 1290 B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
1291 ret_value); 1291 ret_value);
1292 kunmap_atomic(data, KM_USER0); 1292 kunmap_atomic(data);
1293 } 1293 }
1294 /* Perform balancing after all resources have been collected at once. */ 1294 /* Perform balancing after all resources have been collected at once. */
1295 do_balance(&s_del_balance, NULL, NULL, M_DELETE); 1295 do_balance(&s_del_balance, NULL, NULL, M_DELETE);
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index d7f6e51bef2a..8f546bd473b8 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -128,9 +128,9 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
128 if (up_to_date_bh) { 128 if (up_to_date_bh) {
129 unsigned pgoff = 129 unsigned pgoff =
130 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); 130 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
131 char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0); 131 char *kaddr = kmap_atomic(up_to_date_bh->b_page);
132 memset(kaddr + pgoff, 0, blk_size - total_tail); 132 memset(kaddr + pgoff, 0, blk_size - total_tail);
133 kunmap_atomic(kaddr, KM_USER0); 133 kunmap_atomic(kaddr);
134 } 134 }
135 135
136 REISERFS_I(inode)->i_first_direct_byte = U32_MAX; 136 REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
diff --git a/fs/splice.c b/fs/splice.c
index 1ec0493266b3..f16402ed915c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
737 goto out; 737 goto out;
738 738
739 if (buf->page != page) { 739 if (buf->page != page) {
740 /*
741 * Careful, ->map() uses KM_USER0!
742 */
743 char *src = buf->ops->map(pipe, buf, 1); 740 char *src = buf->ops->map(pipe, buf, 1);
744 char *dst = kmap_atomic(page, KM_USER1); 741 char *dst = kmap_atomic(page);
745 742
746 memcpy(dst + offset, src + buf->offset, this_len); 743 memcpy(dst + offset, src + buf->offset, this_len);
747 flush_dcache_page(page); 744 flush_dcache_page(page);
748 kunmap_atomic(dst, KM_USER1); 745 kunmap_atomic(dst);
749 buf->ops->unmap(pipe, buf, src); 746 buf->ops->unmap(pipe, buf, src);
750 } 747 }
751 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 748 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 38bb1c640559..8ca62c28fe12 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -464,10 +464,10 @@ static int squashfs_readpage(struct file *file, struct page *page)
464 if (PageUptodate(push_page)) 464 if (PageUptodate(push_page))
465 goto skip_page; 465 goto skip_page;
466 466
467 pageaddr = kmap_atomic(push_page, KM_USER0); 467 pageaddr = kmap_atomic(push_page);
468 squashfs_copy_data(pageaddr, buffer, offset, avail); 468 squashfs_copy_data(pageaddr, buffer, offset, avail);
469 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 469 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
470 kunmap_atomic(pageaddr, KM_USER0); 470 kunmap_atomic(pageaddr);
471 flush_dcache_page(push_page); 471 flush_dcache_page(push_page);
472 SetPageUptodate(push_page); 472 SetPageUptodate(push_page);
473skip_page: 473skip_page:
@@ -484,9 +484,9 @@ skip_page:
484error_out: 484error_out:
485 SetPageError(page); 485 SetPageError(page);
486out: 486out:
487 pageaddr = kmap_atomic(page, KM_USER0); 487 pageaddr = kmap_atomic(page);
488 memset(pageaddr, 0, PAGE_CACHE_SIZE); 488 memset(pageaddr, 0, PAGE_CACHE_SIZE);
489 kunmap_atomic(pageaddr, KM_USER0); 489 kunmap_atomic(pageaddr);
490 flush_dcache_page(page); 490 flush_dcache_page(page);
491 if (!PageError(page)) 491 if (!PageError(page))
492 SetPageUptodate(page); 492 SetPageUptodate(page);
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 1191817264cc..12806dffb345 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -90,14 +90,14 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
90 goto error_out; 90 goto error_out;
91 } 91 }
92 92
93 pageaddr = kmap_atomic(page, KM_USER0); 93 pageaddr = kmap_atomic(page);
94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset, 94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
95 length - bytes); 95 length - bytes);
96 if (copied == length - bytes) 96 if (copied == length - bytes)
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); 97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
98 else 98 else
99 block = entry->next_index; 99 block = entry->next_index;
100 kunmap_atomic(pageaddr, KM_USER0); 100 kunmap_atomic(pageaddr);
101 squashfs_cache_put(entry); 101 squashfs_cache_put(entry);
102 } 102 }
103 103
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index f9c234bf33d3..5c8f6dc1d28b 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1042,10 +1042,10 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1042 * the page size, the remaining memory is zeroed when mapped, and 1042 * the page size, the remaining memory is zeroed when mapped, and
1043 * writes to that region are not written out to the file." 1043 * writes to that region are not written out to the file."
1044 */ 1044 */
1045 kaddr = kmap_atomic(page, KM_USER0); 1045 kaddr = kmap_atomic(page);
1046 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); 1046 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1047 flush_dcache_page(page); 1047 flush_dcache_page(page);
1048 kunmap_atomic(kaddr, KM_USER0); 1048 kunmap_atomic(kaddr);
1049 1049
1050 if (i_size > synced_i_size) { 1050 if (i_size > synced_i_size) {
1051 err = inode->i_sb->s_op->write_inode(inode, NULL); 1051 err = inode->i_sb->s_op->write_inode(inode, NULL);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index d567b8448dfc..7f3f7ba3df6e 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -87,10 +87,10 @@ static int udf_adinicb_write_end(struct file *file,
87 char *kaddr; 87 char *kaddr;
88 struct udf_inode_info *iinfo = UDF_I(inode); 88 struct udf_inode_info *iinfo = UDF_I(inode);
89 89
90 kaddr = kmap_atomic(page, KM_USER0); 90 kaddr = kmap_atomic(page);
91 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, 91 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
92 kaddr + offset, copied); 92 kaddr + offset, copied);
93 kunmap_atomic(kaddr, KM_USER0); 93 kunmap_atomic(kaddr);
94 94
95 return simple_write_end(file, mapping, pos, len, copied, page, fsdata); 95 return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
96} 96}
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 4fd95a323beb..3744d2a642df 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -25,28 +25,6 @@
25#include <linux/scatterlist.h> 25#include <linux/scatterlist.h>
26#include <linux/sched.h> 26#include <linux/sched.h>
27 27
28static inline enum km_type crypto_kmap_type(int out)
29{
30 enum km_type type;
31
32 if (in_softirq())
33 type = out * (KM_SOFTIRQ1 - KM_SOFTIRQ0) + KM_SOFTIRQ0;
34 else
35 type = out * (KM_USER1 - KM_USER0) + KM_USER0;
36
37 return type;
38}
39
40static inline void *crypto_kmap(struct page *page, int out)
41{
42 return kmap_atomic(page, crypto_kmap_type(out));
43}
44
45static inline void crypto_kunmap(void *vaddr, int out)
46{
47 kunmap_atomic(vaddr, crypto_kmap_type(out));
48}
49
50static inline void crypto_yield(u32 flags) 28static inline void crypto_yield(u32 flags)
51{ 29{
52 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP) 30 if (flags & CRYPTO_TFM_REQ_MAY_SLEEP)
@@ -121,15 +99,15 @@ static inline struct page *scatterwalk_page(struct scatter_walk *walk)
121 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT); 99 return sg_page(walk->sg) + (walk->offset >> PAGE_SHIFT);
122} 100}
123 101
124static inline void scatterwalk_unmap(void *vaddr, int out) 102static inline void scatterwalk_unmap(void *vaddr)
125{ 103{
126 crypto_kunmap(vaddr, out); 104 kunmap_atomic(vaddr);
127} 105}
128 106
129void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg); 107void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg);
130void scatterwalk_copychunks(void *buf, struct scatter_walk *walk, 108void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
131 size_t nbytes, int out); 109 size_t nbytes, int out);
132void *scatterwalk_map(struct scatter_walk *walk, int out); 110void *scatterwalk_map(struct scatter_walk *walk);
133void scatterwalk_done(struct scatter_walk *walk, int out, int more); 111void scatterwalk_done(struct scatter_walk *walk, int out, int more);
134 112
135void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg, 113void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 129a9c097958..de5422a57511 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -101,10 +101,10 @@ static inline int bio_has_allocated_vec(struct bio *bio)
101 * I/O completely on that queue (see ide-dma for example) 101 * I/O completely on that queue (see ide-dma for example)
102 */ 102 */
103#define __bio_kmap_atomic(bio, idx, kmtype) \ 103#define __bio_kmap_atomic(bio, idx, kmtype) \
104 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page, kmtype) + \ 104 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \
105 bio_iovec_idx((bio), (idx))->bv_offset) 105 bio_iovec_idx((bio), (idx))->bv_offset)
106 106
107#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr, kmtype) 107#define __bio_kunmap_atomic(addr, kmtype) kunmap_atomic(addr)
108 108
109/* 109/*
110 * merge helpers etc 110 * merge helpers etc
@@ -317,7 +317,7 @@ static inline char *bvec_kmap_irq(struct bio_vec *bvec, unsigned long *flags)
317 * balancing is a lot nicer this way 317 * balancing is a lot nicer this way
318 */ 318 */
319 local_irq_save(*flags); 319 local_irq_save(*flags);
320 addr = (unsigned long) kmap_atomic(bvec->bv_page, KM_BIO_SRC_IRQ); 320 addr = (unsigned long) kmap_atomic(bvec->bv_page);
321 321
322 BUG_ON(addr & ~PAGE_MASK); 322 BUG_ON(addr & ~PAGE_MASK);
323 323
@@ -328,7 +328,7 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
328{ 328{
329 unsigned long ptr = (unsigned long) buffer & PAGE_MASK; 329 unsigned long ptr = (unsigned long) buffer & PAGE_MASK;
330 330
331 kunmap_atomic((void *) ptr, KM_BIO_SRC_IRQ); 331 kunmap_atomic((void *) ptr);
332 local_irq_restore(*flags); 332 local_irq_restore(*flags);
333} 333}
334 334
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 3a93f73a8acc..6549ed75e0a7 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -55,12 +55,12 @@ static inline void kunmap(struct page *page)
55{ 55{
56} 56}
57 57
58static inline void *__kmap_atomic(struct page *page) 58static inline void *kmap_atomic(struct page *page)
59{ 59{
60 pagefault_disable(); 60 pagefault_disable();
61 return page_address(page); 61 return page_address(page);
62} 62}
63#define kmap_atomic_prot(page, prot) __kmap_atomic(page) 63#define kmap_atomic_prot(page, prot) kmap_atomic(page)
64 64
65static inline void __kunmap_atomic(void *addr) 65static inline void __kunmap_atomic(void *addr)
66{ 66{
@@ -109,27 +109,62 @@ static inline void kmap_atomic_idx_pop(void)
109#endif 109#endif
110 110
111/* 111/*
112 * Make both: kmap_atomic(page, idx) and kmap_atomic(page) work. 112 * NOTE:
113 * kmap_atomic() and kunmap_atomic() with two arguments are deprecated.
114 * We only keep them for backward compatibility, any usage of them
115 * are now warned.
113 */ 116 */
114#define kmap_atomic(page, args...) __kmap_atomic(page) 117
118#define PASTE(a, b) a ## b
119#define PASTE2(a, b) PASTE(a, b)
120
121#define NARG_(_2, _1, n, ...) n
122#define NARG(...) NARG_(__VA_ARGS__, 2, 1, :)
123
124static inline void __deprecated *kmap_atomic_deprecated(struct page *page,
125 enum km_type km)
126{
127 return kmap_atomic(page);
128}
129
130#define kmap_atomic1(...) kmap_atomic(__VA_ARGS__)
131#define kmap_atomic2(...) kmap_atomic_deprecated(__VA_ARGS__)
132#define kmap_atomic(...) PASTE2(kmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
133
134static inline void __deprecated __kunmap_atomic_deprecated(void *addr,
135 enum km_type km)
136{
137 __kunmap_atomic(addr);
138}
115 139
116/* 140/*
117 * Prevent people trying to call kunmap_atomic() as if it were kunmap() 141 * Prevent people trying to call kunmap_atomic() as if it were kunmap()
118 * kunmap_atomic() should get the return value of kmap_atomic, not the page. 142 * kunmap_atomic() should get the return value of kmap_atomic, not the page.
119 */ 143 */
120#define kunmap_atomic(addr, args...) \ 144#define kunmap_atomic_deprecated(addr, km) \
121do { \ 145do { \
122 BUILD_BUG_ON(__same_type((addr), struct page *)); \ 146 BUILD_BUG_ON(__same_type((addr), struct page *)); \
123 __kunmap_atomic(addr); \ 147 __kunmap_atomic_deprecated(addr, km); \
124} while (0) 148} while (0)
125 149
150#define kunmap_atomic_withcheck(addr) \
151do { \
152 BUILD_BUG_ON(__same_type((addr), struct page *)); \
153 __kunmap_atomic(addr); \
154} while (0)
155
156#define kunmap_atomic1(...) kunmap_atomic_withcheck(__VA_ARGS__)
157#define kunmap_atomic2(...) kunmap_atomic_deprecated(__VA_ARGS__)
158#define kunmap_atomic(...) PASTE2(kunmap_atomic, NARG(__VA_ARGS__)(__VA_ARGS__))
159/**** End of C pre-processor tricks for deprecated macros ****/
160
126/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */ 161/* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
127#ifndef clear_user_highpage 162#ifndef clear_user_highpage
128static inline void clear_user_highpage(struct page *page, unsigned long vaddr) 163static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
129{ 164{
130 void *addr = kmap_atomic(page, KM_USER0); 165 void *addr = kmap_atomic(page);
131 clear_user_page(addr, vaddr, page); 166 clear_user_page(addr, vaddr, page);
132 kunmap_atomic(addr, KM_USER0); 167 kunmap_atomic(addr);
133} 168}
134#endif 169#endif
135 170
@@ -180,16 +215,16 @@ alloc_zeroed_user_highpage_movable(struct vm_area_struct *vma,
180 215
181static inline void clear_highpage(struct page *page) 216static inline void clear_highpage(struct page *page)
182{ 217{
183 void *kaddr = kmap_atomic(page, KM_USER0); 218 void *kaddr = kmap_atomic(page);
184 clear_page(kaddr); 219 clear_page(kaddr);
185 kunmap_atomic(kaddr, KM_USER0); 220 kunmap_atomic(kaddr);
186} 221}
187 222
188static inline void zero_user_segments(struct page *page, 223static inline void zero_user_segments(struct page *page,
189 unsigned start1, unsigned end1, 224 unsigned start1, unsigned end1,
190 unsigned start2, unsigned end2) 225 unsigned start2, unsigned end2)
191{ 226{
192 void *kaddr = kmap_atomic(page, KM_USER0); 227 void *kaddr = kmap_atomic(page);
193 228
194 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE); 229 BUG_ON(end1 > PAGE_SIZE || end2 > PAGE_SIZE);
195 230
@@ -199,7 +234,7 @@ static inline void zero_user_segments(struct page *page,
199 if (end2 > start2) 234 if (end2 > start2)
200 memset(kaddr + start2, 0, end2 - start2); 235 memset(kaddr + start2, 0, end2 - start2);
201 236
202 kunmap_atomic(kaddr, KM_USER0); 237 kunmap_atomic(kaddr);
203 flush_dcache_page(page); 238 flush_dcache_page(page);
204} 239}
205 240
@@ -228,11 +263,11 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
228{ 263{
229 char *vfrom, *vto; 264 char *vfrom, *vto;
230 265
231 vfrom = kmap_atomic(from, KM_USER0); 266 vfrom = kmap_atomic(from);
232 vto = kmap_atomic(to, KM_USER1); 267 vto = kmap_atomic(to);
233 copy_user_page(vto, vfrom, vaddr, to); 268 copy_user_page(vto, vfrom, vaddr, to);
234 kunmap_atomic(vto, KM_USER1); 269 kunmap_atomic(vto);
235 kunmap_atomic(vfrom, KM_USER0); 270 kunmap_atomic(vfrom);
236} 271}
237 272
238#endif 273#endif
@@ -241,11 +276,11 @@ static inline void copy_highpage(struct page *to, struct page *from)
241{ 276{
242 char *vfrom, *vto; 277 char *vfrom, *vto;
243 278
244 vfrom = kmap_atomic(from, KM_USER0); 279 vfrom = kmap_atomic(from);
245 vto = kmap_atomic(to, KM_USER1); 280 vto = kmap_atomic(to);
246 copy_page(vto, vfrom); 281 copy_page(vto, vfrom);
247 kunmap_atomic(vto, KM_USER1); 282 kunmap_atomic(vto);
248 kunmap_atomic(vfrom, KM_USER0); 283 kunmap_atomic(vfrom);
249} 284}
250 285
251#endif /* _LINUX_HIGHMEM_H */ 286#endif /* _LINUX_HIGHMEM_H */
diff --git a/kernel/debug/kdb/kdb_support.c b/kernel/debug/kdb/kdb_support.c
index 7d6fb40d2188..d35cc2d3a4cc 100644
--- a/kernel/debug/kdb/kdb_support.c
+++ b/kernel/debug/kdb/kdb_support.c
@@ -384,9 +384,9 @@ static int kdb_getphys(void *res, unsigned long addr, size_t size)
384 if (!pfn_valid(pfn)) 384 if (!pfn_valid(pfn))
385 return 1; 385 return 1;
386 page = pfn_to_page(pfn); 386 page = pfn_to_page(pfn);
387 vaddr = kmap_atomic(page, KM_KDB); 387 vaddr = kmap_atomic(page);
388 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size); 388 memcpy(res, vaddr + (addr & (PAGE_SIZE - 1)), size);
389 kunmap_atomic(vaddr, KM_KDB); 389 kunmap_atomic(vaddr);
390 390
391 return 0; 391 return 0;
392} 392}
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index 6a768e537001..3a564ac85f36 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1000,20 +1000,20 @@ static void copy_data_page(unsigned long dst_pfn, unsigned long src_pfn)
1000 s_page = pfn_to_page(src_pfn); 1000 s_page = pfn_to_page(src_pfn);
1001 d_page = pfn_to_page(dst_pfn); 1001 d_page = pfn_to_page(dst_pfn);
1002 if (PageHighMem(s_page)) { 1002 if (PageHighMem(s_page)) {
1003 src = kmap_atomic(s_page, KM_USER0); 1003 src = kmap_atomic(s_page);
1004 dst = kmap_atomic(d_page, KM_USER1); 1004 dst = kmap_atomic(d_page);
1005 do_copy_page(dst, src); 1005 do_copy_page(dst, src);
1006 kunmap_atomic(dst, KM_USER1); 1006 kunmap_atomic(dst);
1007 kunmap_atomic(src, KM_USER0); 1007 kunmap_atomic(src);
1008 } else { 1008 } else {
1009 if (PageHighMem(d_page)) { 1009 if (PageHighMem(d_page)) {
1010 /* Page pointed to by src may contain some kernel 1010 /* Page pointed to by src may contain some kernel
1011 * data modified by kmap_atomic() 1011 * data modified by kmap_atomic()
1012 */ 1012 */
1013 safe_copy_page(buffer, s_page); 1013 safe_copy_page(buffer, s_page);
1014 dst = kmap_atomic(d_page, KM_USER0); 1014 dst = kmap_atomic(d_page);
1015 copy_page(dst, buffer); 1015 copy_page(dst, buffer);
1016 kunmap_atomic(dst, KM_USER0); 1016 kunmap_atomic(dst);
1017 } else { 1017 } else {
1018 safe_copy_page(page_address(d_page), s_page); 1018 safe_copy_page(page_address(d_page), s_page);
1019 } 1019 }
@@ -1728,9 +1728,9 @@ int snapshot_read_next(struct snapshot_handle *handle)
1728 */ 1728 */
1729 void *kaddr; 1729 void *kaddr;
1730 1730
1731 kaddr = kmap_atomic(page, KM_USER0); 1731 kaddr = kmap_atomic(page);
1732 copy_page(buffer, kaddr); 1732 copy_page(buffer, kaddr);
1733 kunmap_atomic(kaddr, KM_USER0); 1733 kunmap_atomic(kaddr);
1734 handle->buffer = buffer; 1734 handle->buffer = buffer;
1735 } else { 1735 } else {
1736 handle->buffer = page_address(page); 1736 handle->buffer = page_address(page);
@@ -2014,9 +2014,9 @@ static void copy_last_highmem_page(void)
2014 if (last_highmem_page) { 2014 if (last_highmem_page) {
2015 void *dst; 2015 void *dst;
2016 2016
2017 dst = kmap_atomic(last_highmem_page, KM_USER0); 2017 dst = kmap_atomic(last_highmem_page);
2018 copy_page(dst, buffer); 2018 copy_page(dst, buffer);
2019 kunmap_atomic(dst, KM_USER0); 2019 kunmap_atomic(dst);
2020 last_highmem_page = NULL; 2020 last_highmem_page = NULL;
2021 } 2021 }
2022} 2022}
@@ -2309,13 +2309,13 @@ swap_two_pages_data(struct page *p1, struct page *p2, void *buf)
2309{ 2309{
2310 void *kaddr1, *kaddr2; 2310 void *kaddr1, *kaddr2;
2311 2311
2312 kaddr1 = kmap_atomic(p1, KM_USER0); 2312 kaddr1 = kmap_atomic(p1);
2313 kaddr2 = kmap_atomic(p2, KM_USER1); 2313 kaddr2 = kmap_atomic(p2);
2314 copy_page(buf, kaddr1); 2314 copy_page(buf, kaddr1);
2315 copy_page(kaddr1, kaddr2); 2315 copy_page(kaddr1, kaddr2);
2316 copy_page(kaddr2, buf); 2316 copy_page(kaddr2, buf);
2317 kunmap_atomic(kaddr2, KM_USER1); 2317 kunmap_atomic(kaddr2);
2318 kunmap_atomic(kaddr1, KM_USER0); 2318 kunmap_atomic(kaddr1);
2319} 2319}
2320 2320
2321/** 2321/**
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 4ceb05d772ae..33b2cbb97380 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -390,7 +390,7 @@ bool sg_miter_next(struct sg_mapping_iter *miter)
390 miter->consumed = miter->length; 390 miter->consumed = miter->length;
391 391
392 if (miter->__flags & SG_MITER_ATOMIC) 392 if (miter->__flags & SG_MITER_ATOMIC)
393 miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off; 393 miter->addr = kmap_atomic(miter->page) + off;
394 else 394 else
395 miter->addr = kmap(miter->page) + off; 395 miter->addr = kmap(miter->page) + off;
396 396
@@ -424,7 +424,7 @@ void sg_miter_stop(struct sg_mapping_iter *miter)
424 424
425 if (miter->__flags & SG_MITER_ATOMIC) { 425 if (miter->__flags & SG_MITER_ATOMIC) {
426 WARN_ON(!irqs_disabled()); 426 WARN_ON(!irqs_disabled());
427 kunmap_atomic(miter->addr, KM_BIO_SRC_IRQ); 427 kunmap_atomic(miter->addr);
428 } else 428 } else
429 kunmap(miter->page); 429 kunmap(miter->page);
430 430
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 058935ef3975..d0f6315f4a24 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -349,13 +349,12 @@ void swiotlb_bounce(phys_addr_t phys, char *dma_addr, size_t size,
349 sz = min_t(size_t, PAGE_SIZE - offset, size); 349 sz = min_t(size_t, PAGE_SIZE - offset, size);
350 350
351 local_irq_save(flags); 351 local_irq_save(flags);
352 buffer = kmap_atomic(pfn_to_page(pfn), 352 buffer = kmap_atomic(pfn_to_page(pfn));
353 KM_BOUNCE_READ);
354 if (dir == DMA_TO_DEVICE) 353 if (dir == DMA_TO_DEVICE)
355 memcpy(dma_addr, buffer + offset, sz); 354 memcpy(dma_addr, buffer + offset, sz);
356 else 355 else
357 memcpy(buffer + offset, dma_addr, sz); 356 memcpy(buffer + offset, dma_addr, sz);
358 kunmap_atomic(buffer, KM_BOUNCE_READ); 357 kunmap_atomic(buffer);
359 local_irq_restore(flags); 358 local_irq_restore(flags);
360 359
361 size -= sz; 360 size -= sz;
diff --git a/mm/bounce.c b/mm/bounce.c
index 4e9ae722af83..d1be02ca1889 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
50 unsigned char *vto; 50 unsigned char *vto;
51 51
52 local_irq_save(flags); 52 local_irq_save(flags);
53 vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); 53 vto = kmap_atomic(to->bv_page);
54 memcpy(vto + to->bv_offset, vfrom, to->bv_len); 54 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
55 kunmap_atomic(vto, KM_BOUNCE_READ); 55 kunmap_atomic(vto);
56 local_irq_restore(flags); 56 local_irq_restore(flags);
57} 57}
58 58
diff --git a/mm/filemap.c b/mm/filemap.c
index b66275757c28..2f8165075a5a 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
1318 * taking the kmap. 1318 * taking the kmap.
1319 */ 1319 */
1320 if (!fault_in_pages_writeable(desc->arg.buf, size)) { 1320 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1321 kaddr = kmap_atomic(page, KM_USER0); 1321 kaddr = kmap_atomic(page);
1322 left = __copy_to_user_inatomic(desc->arg.buf, 1322 left = __copy_to_user_inatomic(desc->arg.buf,
1323 kaddr + offset, size); 1323 kaddr + offset, size);
1324 kunmap_atomic(kaddr, KM_USER0); 1324 kunmap_atomic(kaddr);
1325 if (left == 0) 1325 if (left == 0)
1326 goto success; 1326 goto success;
1327 } 1327 }
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
2045 size_t copied; 2045 size_t copied;
2046 2046
2047 BUG_ON(!in_atomic()); 2047 BUG_ON(!in_atomic());
2048 kaddr = kmap_atomic(page, KM_USER0); 2048 kaddr = kmap_atomic(page);
2049 if (likely(i->nr_segs == 1)) { 2049 if (likely(i->nr_segs == 1)) {
2050 int left; 2050 int left;
2051 char __user *buf = i->iov->iov_base + i->iov_offset; 2051 char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
2055 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 2055 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2056 i->iov, i->iov_offset, bytes); 2056 i->iov, i->iov_offset, bytes);
2057 } 2057 }
2058 kunmap_atomic(kaddr, KM_USER0); 2058 kunmap_atomic(kaddr);
2059 2059
2060 return copied; 2060 return copied;
2061} 2061}
diff --git a/mm/ksm.c b/mm/ksm.c
index 310544a379ae..a6d3fb7e6c10 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -672,9 +672,9 @@ error:
672static u32 calc_checksum(struct page *page) 672static u32 calc_checksum(struct page *page)
673{ 673{
674 u32 checksum; 674 u32 checksum;
675 void *addr = kmap_atomic(page, KM_USER0); 675 void *addr = kmap_atomic(page);
676 checksum = jhash2(addr, PAGE_SIZE / 4, 17); 676 checksum = jhash2(addr, PAGE_SIZE / 4, 17);
677 kunmap_atomic(addr, KM_USER0); 677 kunmap_atomic(addr);
678 return checksum; 678 return checksum;
679} 679}
680 680
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
683 char *addr1, *addr2; 683 char *addr1, *addr2;
684 int ret; 684 int ret;
685 685
686 addr1 = kmap_atomic(page1, KM_USER0); 686 addr1 = kmap_atomic(page1);
687 addr2 = kmap_atomic(page2, KM_USER1); 687 addr2 = kmap_atomic(page2);
688 ret = memcmp(addr1, addr2, PAGE_SIZE); 688 ret = memcmp(addr1, addr2, PAGE_SIZE);
689 kunmap_atomic(addr2, KM_USER1); 689 kunmap_atomic(addr2);
690 kunmap_atomic(addr1, KM_USER0); 690 kunmap_atomic(addr1);
691 return ret; 691 return ret;
692} 692}
693 693
diff --git a/mm/memory.c b/mm/memory.c
index fa2f04e0337c..347e5fad1cfa 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2447 * fails, we just zero-fill it. Live with it. 2447 * fails, we just zero-fill it. Live with it.
2448 */ 2448 */
2449 if (unlikely(!src)) { 2449 if (unlikely(!src)) {
2450 void *kaddr = kmap_atomic(dst, KM_USER0); 2450 void *kaddr = kmap_atomic(dst);
2451 void __user *uaddr = (void __user *)(va & PAGE_MASK); 2451 void __user *uaddr = (void __user *)(va & PAGE_MASK);
2452 2452
2453 /* 2453 /*
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2458 */ 2458 */
2459 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 2459 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2460 clear_page(kaddr); 2460 clear_page(kaddr);
2461 kunmap_atomic(kaddr, KM_USER0); 2461 kunmap_atomic(kaddr);
2462 flush_dcache_page(dst); 2462 flush_dcache_page(dst);
2463 } else 2463 } else
2464 copy_user_highpage(dst, src, va, vma); 2464 copy_user_highpage(dst, src, va, vma);
diff --git a/mm/shmem.c b/mm/shmem.c
index 269d049294ab..b7e195571862 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1656 } 1656 }
1657 inode->i_mapping->a_ops = &shmem_aops; 1657 inode->i_mapping->a_ops = &shmem_aops;
1658 inode->i_op = &shmem_symlink_inode_operations; 1658 inode->i_op = &shmem_symlink_inode_operations;
1659 kaddr = kmap_atomic(page, KM_USER0); 1659 kaddr = kmap_atomic(page);
1660 memcpy(kaddr, symname, len); 1660 memcpy(kaddr, symname, len);
1661 kunmap_atomic(kaddr, KM_USER0); 1661 kunmap_atomic(kaddr);
1662 set_page_dirty(page); 1662 set_page_dirty(page);
1663 unlock_page(page); 1663 unlock_page(page);
1664 page_cache_release(page); 1664 page_cache_release(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d999f090dfda..00a962caab1a 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2427 if (!(count & COUNT_CONTINUED)) 2427 if (!(count & COUNT_CONTINUED))
2428 goto out; 2428 goto out;
2429 2429
2430 map = kmap_atomic(list_page, KM_USER0) + offset; 2430 map = kmap_atomic(list_page) + offset;
2431 count = *map; 2431 count = *map;
2432 kunmap_atomic(map, KM_USER0); 2432 kunmap_atomic(map);
2433 2433
2434 /* 2434 /*
2435 * If this continuation count now has some space in it, 2435 * If this continuation count now has some space in it,
@@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
2472 2472
2473 offset &= ~PAGE_MASK; 2473 offset &= ~PAGE_MASK;
2474 page = list_entry(head->lru.next, struct page, lru); 2474 page = list_entry(head->lru.next, struct page, lru);
2475 map = kmap_atomic(page, KM_USER0) + offset; 2475 map = kmap_atomic(page) + offset;
2476 2476
2477 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 2477 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2478 goto init_map; /* jump over SWAP_CONT_MAX checks */ 2478 goto init_map; /* jump over SWAP_CONT_MAX checks */
@@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
2482 * Think of how you add 1 to 999 2482 * Think of how you add 1 to 999
2483 */ 2483 */
2484 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 2484 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2485 kunmap_atomic(map, KM_USER0); 2485 kunmap_atomic(map);
2486 page = list_entry(page->lru.next, struct page, lru); 2486 page = list_entry(page->lru.next, struct page, lru);
2487 BUG_ON(page == head); 2487 BUG_ON(page == head);
2488 map = kmap_atomic(page, KM_USER0) + offset; 2488 map = kmap_atomic(page) + offset;
2489 } 2489 }
2490 if (*map == SWAP_CONT_MAX) { 2490 if (*map == SWAP_CONT_MAX) {
2491 kunmap_atomic(map, KM_USER0); 2491 kunmap_atomic(map);
2492 page = list_entry(page->lru.next, struct page, lru); 2492 page = list_entry(page->lru.next, struct page, lru);
2493 if (page == head) 2493 if (page == head)
2494 return false; /* add count continuation */ 2494 return false; /* add count continuation */
2495 map = kmap_atomic(page, KM_USER0) + offset; 2495 map = kmap_atomic(page) + offset;
2496init_map: *map = 0; /* we didn't zero the page */ 2496init_map: *map = 0; /* we didn't zero the page */
2497 } 2497 }
2498 *map += 1; 2498 *map += 1;
2499 kunmap_atomic(map, KM_USER0); 2499 kunmap_atomic(map);
2500 page = list_entry(page->lru.prev, struct page, lru); 2500 page = list_entry(page->lru.prev, struct page, lru);
2501 while (page != head) { 2501 while (page != head) {
2502 map = kmap_atomic(page, KM_USER0) + offset; 2502 map = kmap_atomic(page) + offset;
2503 *map = COUNT_CONTINUED; 2503 *map = COUNT_CONTINUED;
2504 kunmap_atomic(map, KM_USER0); 2504 kunmap_atomic(map);
2505 page = list_entry(page->lru.prev, struct page, lru); 2505 page = list_entry(page->lru.prev, struct page, lru);
2506 } 2506 }
2507 return true; /* incremented */ 2507 return true; /* incremented */
@@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */
2512 */ 2512 */
2513 BUG_ON(count != COUNT_CONTINUED); 2513 BUG_ON(count != COUNT_CONTINUED);
2514 while (*map == COUNT_CONTINUED) { 2514 while (*map == COUNT_CONTINUED) {
2515 kunmap_atomic(map, KM_USER0); 2515 kunmap_atomic(map);
2516 page = list_entry(page->lru.next, struct page, lru); 2516 page = list_entry(page->lru.next, struct page, lru);
2517 BUG_ON(page == head); 2517 BUG_ON(page == head);
2518 map = kmap_atomic(page, KM_USER0) + offset; 2518 map = kmap_atomic(page) + offset;
2519 } 2519 }
2520 BUG_ON(*map == 0); 2520 BUG_ON(*map == 0);
2521 *map -= 1; 2521 *map -= 1;
2522 if (*map == 0) 2522 if (*map == 0)
2523 count = 0; 2523 count = 0;
2524 kunmap_atomic(map, KM_USER0); 2524 kunmap_atomic(map);
2525 page = list_entry(page->lru.prev, struct page, lru); 2525 page = list_entry(page->lru.prev, struct page, lru);
2526 while (page != head) { 2526 while (page != head) {
2527 map = kmap_atomic(page, KM_USER0) + offset; 2527 map = kmap_atomic(page) + offset;
2528 *map = SWAP_CONT_MAX | count; 2528 *map = SWAP_CONT_MAX | count;
2529 count = COUNT_CONTINUED; 2529 count = COUNT_CONTINUED;
2530 kunmap_atomic(map, KM_USER0); 2530 kunmap_atomic(map);
2531 page = list_entry(page->lru.prev, struct page, lru); 2531 page = list_entry(page->lru.prev, struct page, lru);
2532 } 2532 }
2533 return count == COUNT_CONTINUED; 2533 return count == COUNT_CONTINUED;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86ce9a526c17..94dff883b449 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
1906 * we can expect USER0 is not used (see vread/vwrite's 1906 * we can expect USER0 is not used (see vread/vwrite's
1907 * function description) 1907 * function description)
1908 */ 1908 */
1909 void *map = kmap_atomic(p, KM_USER0); 1909 void *map = kmap_atomic(p);
1910 memcpy(buf, map + offset, length); 1910 memcpy(buf, map + offset, length);
1911 kunmap_atomic(map, KM_USER0); 1911 kunmap_atomic(map);
1912 } else 1912 } else
1913 memset(buf, 0, length); 1913 memset(buf, 0, length);
1914 1914
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1945 * we can expect USER0 is not used (see vread/vwrite's 1945 * we can expect USER0 is not used (see vread/vwrite's
1946 * function description) 1946 * function description)
1947 */ 1947 */
1948 void *map = kmap_atomic(p, KM_USER0); 1948 void *map = kmap_atomic(p);
1949 memcpy(map + offset, buf, length); 1949 memcpy(map + offset, buf, length);
1950 kunmap_atomic(map, KM_USER0); 1950 kunmap_atomic(map);
1951 } 1951 }
1952 addr += length; 1952 addr += length;
1953 buf += length; 1953 buf += length;
diff --git a/net/core/kmap_skb.h b/net/core/kmap_skb.h
index 81e1ed7c8383..52d0a4459041 100644
--- a/net/core/kmap_skb.h
+++ b/net/core/kmap_skb.h
@@ -7,12 +7,12 @@ static inline void *kmap_skb_frag(const skb_frag_t *frag)
7 7
8 local_bh_disable(); 8 local_bh_disable();
9#endif 9#endif
10 return kmap_atomic(skb_frag_page(frag), KM_SKB_DATA_SOFTIRQ); 10 return kmap_atomic(skb_frag_page(frag));
11} 11}
12 12
13static inline void kunmap_skb_frag(void *vaddr) 13static inline void kunmap_skb_frag(void *vaddr)
14{ 14{
15 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ); 15 kunmap_atomic(vaddr);
16#ifdef CONFIG_HIGHMEM 16#ifdef CONFIG_HIGHMEM
17 local_bh_enable(); 17 local_bh_enable();
18#endif 18#endif
diff --git a/net/rds/ib_recv.c b/net/rds/ib_recv.c
index 8d6d9702d1ec..8d194912c695 100644
--- a/net/rds/ib_recv.c
+++ b/net/rds/ib_recv.c
@@ -763,7 +763,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 763 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 764 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
765 765
766 addr = kmap_atomic(sg_page(&frag->f_sg), KM_SOFTIRQ0); 766 addr = kmap_atomic(sg_page(&frag->f_sg));
767 767
768 src = addr + frag_off; 768 src = addr + frag_off;
769 dst = (void *)map->m_page_addrs[map_page] + map_off; 769 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -773,7 +773,7 @@ static void rds_ib_cong_recv(struct rds_connection *conn,
773 uncongested |= ~(*src) & *dst; 773 uncongested |= ~(*src) & *dst;
774 *dst++ = *src++; 774 *dst++ = *src++;
775 } 775 }
776 kunmap_atomic(addr, KM_SOFTIRQ0); 776 kunmap_atomic(addr);
777 777
778 copied += to_copy; 778 copied += to_copy;
779 779
@@ -919,8 +919,7 @@ static void rds_ib_process_recv(struct rds_connection *conn,
919 rds_ib_cong_recv(conn, ibinc); 919 rds_ib_cong_recv(conn, ibinc);
920 else { 920 else {
921 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, 921 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
922 &ibinc->ii_inc, GFP_ATOMIC, 922 &ibinc->ii_inc, GFP_ATOMIC);
923 KM_SOFTIRQ0);
924 state->ack_next = be64_to_cpu(hdr->h_sequence); 923 state->ack_next = be64_to_cpu(hdr->h_sequence);
925 state->ack_next_valid = 1; 924 state->ack_next_valid = 1;
926 } 925 }
diff --git a/net/rds/info.c b/net/rds/info.c
index f1c016c4146e..9a6b4f66187c 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -104,7 +104,7 @@ EXPORT_SYMBOL_GPL(rds_info_deregister_func);
104void rds_info_iter_unmap(struct rds_info_iterator *iter) 104void rds_info_iter_unmap(struct rds_info_iterator *iter)
105{ 105{
106 if (iter->addr) { 106 if (iter->addr) {
107 kunmap_atomic(iter->addr, KM_USER0); 107 kunmap_atomic(iter->addr);
108 iter->addr = NULL; 108 iter->addr = NULL;
109 } 109 }
110} 110}
@@ -119,7 +119,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
119 119
120 while (bytes) { 120 while (bytes) {
121 if (!iter->addr) 121 if (!iter->addr)
122 iter->addr = kmap_atomic(*iter->pages, KM_USER0); 122 iter->addr = kmap_atomic(*iter->pages);
123 123
124 this = min(bytes, PAGE_SIZE - iter->offset); 124 this = min(bytes, PAGE_SIZE - iter->offset);
125 125
@@ -134,7 +134,7 @@ void rds_info_copy(struct rds_info_iterator *iter, void *data,
134 iter->offset += this; 134 iter->offset += this;
135 135
136 if (iter->offset == PAGE_SIZE) { 136 if (iter->offset == PAGE_SIZE) {
137 kunmap_atomic(iter->addr, KM_USER0); 137 kunmap_atomic(iter->addr);
138 iter->addr = NULL; 138 iter->addr = NULL;
139 iter->offset = 0; 139 iter->offset = 0;
140 iter->pages++; 140 iter->pages++;
diff --git a/net/rds/iw_recv.c b/net/rds/iw_recv.c
index 3c8717005018..45033358358e 100644
--- a/net/rds/iw_recv.c
+++ b/net/rds/iw_recv.c
@@ -598,7 +598,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
598 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off); 598 to_copy = min(RDS_FRAG_SIZE - frag_off, PAGE_SIZE - map_off);
599 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */ 599 BUG_ON(to_copy & 7); /* Must be 64bit aligned. */
600 600
601 addr = kmap_atomic(frag->f_page, KM_SOFTIRQ0); 601 addr = kmap_atomic(frag->f_page);
602 602
603 src = addr + frag_off; 603 src = addr + frag_off;
604 dst = (void *)map->m_page_addrs[map_page] + map_off; 604 dst = (void *)map->m_page_addrs[map_page] + map_off;
@@ -608,7 +608,7 @@ static void rds_iw_cong_recv(struct rds_connection *conn,
608 uncongested |= ~(*src) & *dst; 608 uncongested |= ~(*src) & *dst;
609 *dst++ = *src++; 609 *dst++ = *src++;
610 } 610 }
611 kunmap_atomic(addr, KM_SOFTIRQ0); 611 kunmap_atomic(addr);
612 612
613 copied += to_copy; 613 copied += to_copy;
614 614
@@ -754,8 +754,7 @@ static void rds_iw_process_recv(struct rds_connection *conn,
754 rds_iw_cong_recv(conn, iwinc); 754 rds_iw_cong_recv(conn, iwinc);
755 else { 755 else {
756 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr, 756 rds_recv_incoming(conn, conn->c_faddr, conn->c_laddr,
757 &iwinc->ii_inc, GFP_ATOMIC, 757 &iwinc->ii_inc, GFP_ATOMIC);
758 KM_SOFTIRQ0);
759 state->ack_next = be64_to_cpu(hdr->h_sequence); 758 state->ack_next = be64_to_cpu(hdr->h_sequence);
760 state->ack_next_valid = 1; 759 state->ack_next_valid = 1;
761 } 760 }
diff --git a/net/rds/loop.c b/net/rds/loop.c
index bca6761a3ca2..87ff2a8a454b 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -79,7 +79,7 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm,
79 rds_message_addref(rm); 79 rds_message_addref(rm);
80 80
81 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc, 81 rds_recv_incoming(conn, conn->c_laddr, conn->c_faddr, &rm->m_inc,
82 GFP_KERNEL, KM_USER0); 82 GFP_KERNEL);
83 83
84 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence), 84 rds_send_drop_acked(conn, be64_to_cpu(rm->m_inc.i_hdr.h_sequence),
85 NULL); 85 NULL);
diff --git a/net/rds/rds.h b/net/rds/rds.h
index 7eaba1831f0d..ec1d731ecff0 100644
--- a/net/rds/rds.h
+++ b/net/rds/rds.h
@@ -704,7 +704,7 @@ void rds_inc_init(struct rds_incoming *inc, struct rds_connection *conn,
704 __be32 saddr); 704 __be32 saddr);
705void rds_inc_put(struct rds_incoming *inc); 705void rds_inc_put(struct rds_incoming *inc);
706void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 706void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
707 struct rds_incoming *inc, gfp_t gfp, enum km_type km); 707 struct rds_incoming *inc, gfp_t gfp);
708int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, 708int rds_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg,
709 size_t size, int msg_flags); 709 size_t size, int msg_flags);
710void rds_clear_recv_queue(struct rds_sock *rs); 710void rds_clear_recv_queue(struct rds_sock *rs);
diff --git a/net/rds/recv.c b/net/rds/recv.c
index bc3f8cd6d070..5c6e9f132026 100644
--- a/net/rds/recv.c
+++ b/net/rds/recv.c
@@ -155,7 +155,7 @@ static void rds_recv_incoming_exthdrs(struct rds_incoming *inc, struct rds_sock
155 * tell us which roles the addrs in the conn are playing for this message. 155 * tell us which roles the addrs in the conn are playing for this message.
156 */ 156 */
157void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr, 157void rds_recv_incoming(struct rds_connection *conn, __be32 saddr, __be32 daddr,
158 struct rds_incoming *inc, gfp_t gfp, enum km_type km) 158 struct rds_incoming *inc, gfp_t gfp)
159{ 159{
160 struct rds_sock *rs = NULL; 160 struct rds_sock *rs = NULL;
161 struct sock *sk; 161 struct sock *sk;
diff --git a/net/rds/tcp_recv.c b/net/rds/tcp_recv.c
index 78205e25500a..6243258f840f 100644
--- a/net/rds/tcp_recv.c
+++ b/net/rds/tcp_recv.c
@@ -169,7 +169,6 @@ static void rds_tcp_cong_recv(struct rds_connection *conn,
169struct rds_tcp_desc_arg { 169struct rds_tcp_desc_arg {
170 struct rds_connection *conn; 170 struct rds_connection *conn;
171 gfp_t gfp; 171 gfp_t gfp;
172 enum km_type km;
173}; 172};
174 173
175static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb, 174static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
@@ -255,7 +254,7 @@ static int rds_tcp_data_recv(read_descriptor_t *desc, struct sk_buff *skb,
255 else 254 else
256 rds_recv_incoming(conn, conn->c_faddr, 255 rds_recv_incoming(conn, conn->c_faddr,
257 conn->c_laddr, &tinc->ti_inc, 256 conn->c_laddr, &tinc->ti_inc,
258 arg->gfp, arg->km); 257 arg->gfp);
259 258
260 tc->t_tinc_hdr_rem = sizeof(struct rds_header); 259 tc->t_tinc_hdr_rem = sizeof(struct rds_header);
261 tc->t_tinc_data_rem = 0; 260 tc->t_tinc_data_rem = 0;
@@ -272,8 +271,7 @@ out:
272} 271}
273 272
274/* the caller has to hold the sock lock */ 273/* the caller has to hold the sock lock */
275static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp, 274static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp)
276 enum km_type km)
277{ 275{
278 struct rds_tcp_connection *tc = conn->c_transport_data; 276 struct rds_tcp_connection *tc = conn->c_transport_data;
279 struct socket *sock = tc->t_sock; 277 struct socket *sock = tc->t_sock;
@@ -283,7 +281,6 @@ static int rds_tcp_read_sock(struct rds_connection *conn, gfp_t gfp,
283 /* It's like glib in the kernel! */ 281 /* It's like glib in the kernel! */
284 arg.conn = conn; 282 arg.conn = conn;
285 arg.gfp = gfp; 283 arg.gfp = gfp;
286 arg.km = km;
287 desc.arg.data = &arg; 284 desc.arg.data = &arg;
288 desc.error = 0; 285 desc.error = 0;
289 desc.count = 1; /* give more than one skb per call */ 286 desc.count = 1; /* give more than one skb per call */
@@ -311,7 +308,7 @@ int rds_tcp_recv(struct rds_connection *conn)
311 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock); 308 rdsdebug("recv worker conn %p tc %p sock %p\n", conn, tc, sock);
312 309
313 lock_sock(sock->sk); 310 lock_sock(sock->sk);
314 ret = rds_tcp_read_sock(conn, GFP_KERNEL, KM_USER0); 311 ret = rds_tcp_read_sock(conn, GFP_KERNEL);
315 release_sock(sock->sk); 312 release_sock(sock->sk);
316 313
317 return ret; 314 return ret;
@@ -336,7 +333,7 @@ void rds_tcp_data_ready(struct sock *sk, int bytes)
336 ready = tc->t_orig_data_ready; 333 ready = tc->t_orig_data_ready;
337 rds_tcp_stats_inc(s_tcp_data_ready_calls); 334 rds_tcp_stats_inc(s_tcp_data_ready_calls);
338 335
339 if (rds_tcp_read_sock(conn, GFP_ATOMIC, KM_SOFTIRQ0) == -ENOMEM) 336 if (rds_tcp_read_sock(conn, GFP_ATOMIC) == -ENOMEM)
340 queue_delayed_work(rds_wq, &conn->c_recv_w, 0); 337 queue_delayed_work(rds_wq, &conn->c_recv_w, 0);
341out: 338out:
342 read_unlock_bh(&sk->sk_callback_lock); 339 read_unlock_bh(&sk->sk_callback_lock);
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 2763e3e48db4..38f388c39dce 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -82,9 +82,9 @@ gss_krb5_remove_padding(struct xdr_buf *buf, int blocksize)
82 >>PAGE_CACHE_SHIFT; 82 >>PAGE_CACHE_SHIFT;
83 unsigned int offset = (buf->page_base + len - 1) 83 unsigned int offset = (buf->page_base + len - 1)
84 & (PAGE_CACHE_SIZE - 1); 84 & (PAGE_CACHE_SIZE - 1);
85 ptr = kmap_atomic(buf->pages[last], KM_USER0); 85 ptr = kmap_atomic(buf->pages[last]);
86 pad = *(ptr + offset); 86 pad = *(ptr + offset);
87 kunmap_atomic(ptr, KM_USER0); 87 kunmap_atomic(ptr);
88 goto out; 88 goto out;
89 } else 89 } else
90 len -= buf->page_len; 90 len -= buf->page_len;
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 145e6784f508..0a648c502fc3 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -114,7 +114,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
114 } 114 }
115 115
116 len = PAGE_CACHE_SIZE; 116 len = PAGE_CACHE_SIZE;
117 kaddr = kmap_atomic(*ppage, KM_SKB_SUNRPC_DATA); 117 kaddr = kmap_atomic(*ppage);
118 if (base) { 118 if (base) {
119 len -= base; 119 len -= base;
120 if (pglen < len) 120 if (pglen < len)
@@ -127,7 +127,7 @@ ssize_t xdr_partial_copy_from_skb(struct xdr_buf *xdr, unsigned int base, struct
127 ret = copy_actor(desc, kaddr, len); 127 ret = copy_actor(desc, kaddr, len);
128 } 128 }
129 flush_dcache_page(*ppage); 129 flush_dcache_page(*ppage);
130 kunmap_atomic(kaddr, KM_SKB_SUNRPC_DATA); 130 kunmap_atomic(kaddr);
131 copied += ret; 131 copied += ret;
132 if (ret != len || !desc->count) 132 if (ret != len || !desc->count)
133 goto out; 133 goto out;
diff --git a/net/sunrpc/xdr.c b/net/sunrpc/xdr.c
index 593f4c605305..b97a3dd9a60a 100644
--- a/net/sunrpc/xdr.c
+++ b/net/sunrpc/xdr.c
@@ -122,9 +122,9 @@ xdr_terminate_string(struct xdr_buf *buf, const u32 len)
122{ 122{
123 char *kaddr; 123 char *kaddr;
124 124
125 kaddr = kmap_atomic(buf->pages[0], KM_USER0); 125 kaddr = kmap_atomic(buf->pages[0]);
126 kaddr[buf->page_base + len] = '\0'; 126 kaddr[buf->page_base + len] = '\0';
127 kunmap_atomic(kaddr, KM_USER0); 127 kunmap_atomic(kaddr);
128} 128}
129EXPORT_SYMBOL_GPL(xdr_terminate_string); 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
130 130
@@ -232,12 +232,12 @@ _shift_data_right_pages(struct page **pages, size_t pgto_base,
232 pgto_base -= copy; 232 pgto_base -= copy;
233 pgfrom_base -= copy; 233 pgfrom_base -= copy;
234 234
235 vto = kmap_atomic(*pgto, KM_USER0); 235 vto = kmap_atomic(*pgto);
236 vfrom = kmap_atomic(*pgfrom, KM_USER1); 236 vfrom = kmap_atomic(*pgfrom);
237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy); 237 memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
238 flush_dcache_page(*pgto); 238 flush_dcache_page(*pgto);
239 kunmap_atomic(vfrom, KM_USER1); 239 kunmap_atomic(vfrom);
240 kunmap_atomic(vto, KM_USER0); 240 kunmap_atomic(vto);
241 241
242 } while ((len -= copy) != 0); 242 } while ((len -= copy) != 0);
243} 243}
@@ -267,9 +267,9 @@ _copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
267 if (copy > len) 267 if (copy > len)
268 copy = len; 268 copy = len;
269 269
270 vto = kmap_atomic(*pgto, KM_USER0); 270 vto = kmap_atomic(*pgto);
271 memcpy(vto + pgbase, p, copy); 271 memcpy(vto + pgbase, p, copy);
272 kunmap_atomic(vto, KM_USER0); 272 kunmap_atomic(vto);
273 273
274 len -= copy; 274 len -= copy;
275 if (len == 0) 275 if (len == 0)
@@ -311,9 +311,9 @@ _copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
311 if (copy > len) 311 if (copy > len)
312 copy = len; 312 copy = len;
313 313
314 vfrom = kmap_atomic(*pgfrom, KM_USER0); 314 vfrom = kmap_atomic(*pgfrom);
315 memcpy(p, vfrom + pgbase, copy); 315 memcpy(p, vfrom + pgbase, copy);
316 kunmap_atomic(vfrom, KM_USER0); 316 kunmap_atomic(vfrom);
317 317
318 pgbase += copy; 318 pgbase += copy;
319 if (pgbase == PAGE_CACHE_SIZE) { 319 if (pgbase == PAGE_CACHE_SIZE) {
diff --git a/net/sunrpc/xprtrdma/rpc_rdma.c b/net/sunrpc/xprtrdma/rpc_rdma.c
index 554d0814c875..1776e5731dcf 100644
--- a/net/sunrpc/xprtrdma/rpc_rdma.c
+++ b/net/sunrpc/xprtrdma/rpc_rdma.c
@@ -338,9 +338,9 @@ rpcrdma_inline_pullup(struct rpc_rqst *rqst, int pad)
338 curlen = copy_len; 338 curlen = copy_len;
339 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n", 339 dprintk("RPC: %s: page %d destp 0x%p len %d curlen %d\n",
340 __func__, i, destp, copy_len, curlen); 340 __func__, i, destp, copy_len, curlen);
341 srcp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA); 341 srcp = kmap_atomic(ppages[i]);
342 memcpy(destp, srcp+page_base, curlen); 342 memcpy(destp, srcp+page_base, curlen);
343 kunmap_atomic(srcp, KM_SKB_SUNRPC_DATA); 343 kunmap_atomic(srcp);
344 rqst->rq_svec[0].iov_len += curlen; 344 rqst->rq_svec[0].iov_len += curlen;
345 destp += curlen; 345 destp += curlen;
346 copy_len -= curlen; 346 copy_len -= curlen;
@@ -639,10 +639,10 @@ rpcrdma_inline_fixup(struct rpc_rqst *rqst, char *srcp, int copy_len, int pad)
639 dprintk("RPC: %s: page %d" 639 dprintk("RPC: %s: page %d"
640 " srcp 0x%p len %d curlen %d\n", 640 " srcp 0x%p len %d curlen %d\n",
641 __func__, i, srcp, copy_len, curlen); 641 __func__, i, srcp, copy_len, curlen);
642 destp = kmap_atomic(ppages[i], KM_SKB_SUNRPC_DATA); 642 destp = kmap_atomic(ppages[i]);
643 memcpy(destp + page_base, srcp, curlen); 643 memcpy(destp + page_base, srcp, curlen);
644 flush_dcache_page(ppages[i]); 644 flush_dcache_page(ppages[i]);
645 kunmap_atomic(destp, KM_SKB_SUNRPC_DATA); 645 kunmap_atomic(destp);
646 srcp += curlen; 646 srcp += curlen;
647 copy_len -= curlen; 647 copy_len -= curlen;
648 if (copy_len == 0) 648 if (copy_len == 0)
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 9027ac1534af..38651454ed08 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -886,12 +886,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
886 * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic(). 886 * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
887 * So do I. 887 * So do I.
888 */ 888 */
889 char *kaddr = kmap_atomic(page, KM_USER0); 889 char *kaddr = kmap_atomic(page);
890 890
891 dump->page = page; 891 dump->page = page;
892 memcpy(dump->data + offset, kaddr + offset, 892 memcpy(dump->data + offset, kaddr + offset,
893 PAGE_SIZE - offset); 893 PAGE_SIZE - offset);
894 kunmap_atomic(kaddr, KM_USER0); 894 kunmap_atomic(kaddr);
895 } 895 }
896 /* Same with put_arg_page(page) in fs/exec.c */ 896 /* Same with put_arg_page(page) in fs/exec.c */
897#ifdef CONFIG_MMU 897#ifdef CONFIG_MMU