aboutsummaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 12:40:26 -0400
commit9f3938346a5c1fa504647670edb5fea5756cfb00 (patch)
tree7cf6d24d6b076c8db8571494984924cac03703a2 /fs
parent69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff)
parent317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff)
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang. It's been in -next for a long time, and it gets rid of the (no longer used) second argument to k[un]map_atomic(). Fix up a few trivial conflicts in various drivers, and do an "evil merge" to catch some new uses that have come in since Cong's tree. * 'kmap_atomic' of git://github.com/congwang/linux: (59 commits) feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename] drbd: remove the second argument of k[un]map_atomic() zcache: remove the second argument of k[un]map_atomic() gma500: remove the second argument of k[un]map_atomic() dm: remove the second argument of k[un]map_atomic() tomoyo: remove the second argument of k[un]map_atomic() sunrpc: remove the second argument of k[un]map_atomic() rds: remove the second argument of k[un]map_atomic() net: remove the second argument of k[un]map_atomic() mm: remove the second argument of k[un]map_atomic() lib: remove the second argument of k[un]map_atomic() power: remove the second argument of k[un]map_atomic() kdb: remove the second argument of k[un]map_atomic() udf: remove the second argument of k[un]map_atomic() ubifs: remove the second argument of k[un]map_atomic() squashfs: remove the second argument of k[un]map_atomic() reiserfs: remove the second argument of k[un]map_atomic() ocfs2: remove the second argument of k[un]map_atomic() ntfs: remove the second argument of k[un]map_atomic() ...
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/fsclient.c8
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/aio.c30
-rw-r--r--fs/bio-integrity.c10
-rw-r--r--fs/btrfs/compression.c12
-rw-r--r--fs/btrfs/extent_io.c16
-rw-r--r--fs/btrfs/file-item.c4
-rw-r--r--fs/btrfs/inode.c26
-rw-r--r--fs/btrfs/lzo.c4
-rw-r--r--fs/btrfs/scrub.c8
-rw-r--r--fs/btrfs/zlib.c4
-rw-r--r--fs/exec.c4
-rw-r--r--fs/exofs/dir.c4
-rw-r--r--fs/ext2/dir.c4
-rw-r--r--fs/fuse/dev.c4
-rw-r--r--fs/fuse/file.c4
-rw-r--r--fs/gfs2/aops.c12
-rw-r--r--fs/gfs2/lops.c8
-rw-r--r--fs/gfs2/quota.c4
-rw-r--r--fs/jbd/journal.c12
-rw-r--r--fs/jbd/transaction.c4
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/journal.c12
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/logfs/dir.c18
-rw-r--r--fs/logfs/readwrite.c38
-rw-r--r--fs/logfs/segment.c4
-rw-r--r--fs/minix/dir.c4
-rw-r--r--fs/namei.c4
-rw-r--r--fs/nfs/dir.c8
-rw-r--r--fs/nfs/nfs4proc.c4
-rw-r--r--fs/nilfs2/cpfile.c94
-rw-r--r--fs/nilfs2/dat.c38
-rw-r--r--fs/nilfs2/dir.c4
-rw-r--r--fs/nilfs2/ifile.c4
-rw-r--r--fs/nilfs2/mdt.c4
-rw-r--r--fs/nilfs2/page.c8
-rw-r--r--fs/nilfs2/recovery.c4
-rw-r--r--fs/nilfs2/segbuf.c4
-rw-r--r--fs/nilfs2/sufile.c68
-rw-r--r--fs/ntfs/aops.c20
-rw-r--r--fs/ntfs/attrib.c20
-rw-r--r--fs/ntfs/file.c16
-rw-r--r--fs/ntfs/super.c8
-rw-r--r--fs/ocfs2/aops.c16
-rw-r--r--fs/pipe.c8
-rw-r--r--fs/reiserfs/stree.c4
-rw-r--r--fs/reiserfs/tail_conversion.c4
-rw-r--r--fs/splice.c7
-rw-r--r--fs/squashfs/file.c8
-rw-r--r--fs/squashfs/symlink.c4
-rw-r--r--fs/ubifs/file.c4
-rw-r--r--fs/udf/file.c4
53 files changed, 317 insertions, 320 deletions
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index 2f213d109c21..b960ff05ea0b 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -365,10 +365,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
365 _debug("extract data"); 365 _debug("extract data");
366 if (call->count > 0) { 366 if (call->count > 0) {
367 page = call->reply3; 367 page = call->reply3;
368 buffer = kmap_atomic(page, KM_USER0); 368 buffer = kmap_atomic(page);
369 ret = afs_extract_data(call, skb, last, buffer, 369 ret = afs_extract_data(call, skb, last, buffer,
370 call->count); 370 call->count);
371 kunmap_atomic(buffer, KM_USER0); 371 kunmap_atomic(buffer);
372 switch (ret) { 372 switch (ret) {
373 case 0: break; 373 case 0: break;
374 case -EAGAIN: return 0; 374 case -EAGAIN: return 0;
@@ -411,9 +411,9 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call,
411 if (call->count < PAGE_SIZE) { 411 if (call->count < PAGE_SIZE) {
412 _debug("clear"); 412 _debug("clear");
413 page = call->reply3; 413 page = call->reply3;
414 buffer = kmap_atomic(page, KM_USER0); 414 buffer = kmap_atomic(page);
415 memset(buffer + call->count, 0, PAGE_SIZE - call->count); 415 memset(buffer + call->count, 0, PAGE_SIZE - call->count);
416 kunmap_atomic(buffer, KM_USER0); 416 kunmap_atomic(buffer);
417 } 417 }
418 418
419 _leave(" = 0 [done]"); 419 _leave(" = 0 [done]");
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 8f4ce2658b7d..298cf8919ec7 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -200,9 +200,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
200 if (PageError(page)) 200 if (PageError(page))
201 goto error; 201 goto error;
202 202
203 buf = kmap_atomic(page, KM_USER0); 203 buf = kmap_atomic(page);
204 memcpy(devname, buf, size); 204 memcpy(devname, buf, size);
205 kunmap_atomic(buf, KM_USER0); 205 kunmap_atomic(buf);
206 page_cache_release(page); 206 page_cache_release(page);
207 page = NULL; 207 page = NULL;
208 } 208 }
diff --git a/fs/aio.c b/fs/aio.c
index b9d64d89a043..5b600cb8779e 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
160 160
161 info->nr = nr_events; /* trusted copy */ 161 info->nr = nr_events; /* trusted copy */
162 162
163 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 163 ring = kmap_atomic(info->ring_pages[0]);
164 ring->nr = nr_events; /* user copy */ 164 ring->nr = nr_events; /* user copy */
165 ring->id = ctx->user_id; 165 ring->id = ctx->user_id;
166 ring->head = ring->tail = 0; 166 ring->head = ring->tail = 0;
@@ -168,32 +168,32 @@ static int aio_setup_ring(struct kioctx *ctx)
168 ring->compat_features = AIO_RING_COMPAT_FEATURES; 168 ring->compat_features = AIO_RING_COMPAT_FEATURES;
169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES; 169 ring->incompat_features = AIO_RING_INCOMPAT_FEATURES;
170 ring->header_length = sizeof(struct aio_ring); 170 ring->header_length = sizeof(struct aio_ring);
171 kunmap_atomic(ring, KM_USER0); 171 kunmap_atomic(ring);
172 172
173 return 0; 173 return 0;
174} 174}
175 175
176 176
177/* aio_ring_event: returns a pointer to the event at the given index from 177/* aio_ring_event: returns a pointer to the event at the given index from
178 * kmap_atomic(, km). Release the pointer with put_aio_ring_event(); 178 * kmap_atomic(). Release the pointer with put_aio_ring_event();
179 */ 179 */
180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event)) 180#define AIO_EVENTS_PER_PAGE (PAGE_SIZE / sizeof(struct io_event))
181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event)) 181#define AIO_EVENTS_FIRST_PAGE ((PAGE_SIZE - sizeof(struct aio_ring)) / sizeof(struct io_event))
182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE) 182#define AIO_EVENTS_OFFSET (AIO_EVENTS_PER_PAGE - AIO_EVENTS_FIRST_PAGE)
183 183
184#define aio_ring_event(info, nr, km) ({ \ 184#define aio_ring_event(info, nr) ({ \
185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \ 185 unsigned pos = (nr) + AIO_EVENTS_OFFSET; \
186 struct io_event *__event; \ 186 struct io_event *__event; \
187 __event = kmap_atomic( \ 187 __event = kmap_atomic( \
188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE], km); \ 188 (info)->ring_pages[pos / AIO_EVENTS_PER_PAGE]); \
189 __event += pos % AIO_EVENTS_PER_PAGE; \ 189 __event += pos % AIO_EVENTS_PER_PAGE; \
190 __event; \ 190 __event; \
191}) 191})
192 192
193#define put_aio_ring_event(event, km) do { \ 193#define put_aio_ring_event(event) do { \
194 struct io_event *__event = (event); \ 194 struct io_event *__event = (event); \
195 (void)__event; \ 195 (void)__event; \
196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK), km); \ 196 kunmap_atomic((void *)((unsigned long)__event & PAGE_MASK)); \
197} while(0) 197} while(0)
198 198
199static void ctx_rcu_free(struct rcu_head *head) 199static void ctx_rcu_free(struct rcu_head *head)
@@ -1019,10 +1019,10 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
1019 if (kiocbIsCancelled(iocb)) 1019 if (kiocbIsCancelled(iocb))
1020 goto put_rq; 1020 goto put_rq;
1021 1021
1022 ring = kmap_atomic(info->ring_pages[0], KM_IRQ1); 1022 ring = kmap_atomic(info->ring_pages[0]);
1023 1023
1024 tail = info->tail; 1024 tail = info->tail;
1025 event = aio_ring_event(info, tail, KM_IRQ0); 1025 event = aio_ring_event(info, tail);
1026 if (++tail >= info->nr) 1026 if (++tail >= info->nr)
1027 tail = 0; 1027 tail = 0;
1028 1028
@@ -1043,8 +1043,8 @@ int aio_complete(struct kiocb *iocb, long res, long res2)
1043 info->tail = tail; 1043 info->tail = tail;
1044 ring->tail = tail; 1044 ring->tail = tail;
1045 1045
1046 put_aio_ring_event(event, KM_IRQ0); 1046 put_aio_ring_event(event);
1047 kunmap_atomic(ring, KM_IRQ1); 1047 kunmap_atomic(ring);
1048 1048
1049 pr_debug("added to ring %p at [%lu]\n", iocb, tail); 1049 pr_debug("added to ring %p at [%lu]\n", iocb, tail);
1050 1050
@@ -1089,7 +1089,7 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1089 unsigned long head; 1089 unsigned long head;
1090 int ret = 0; 1090 int ret = 0;
1091 1091
1092 ring = kmap_atomic(info->ring_pages[0], KM_USER0); 1092 ring = kmap_atomic(info->ring_pages[0]);
1093 dprintk("in aio_read_evt h%lu t%lu m%lu\n", 1093 dprintk("in aio_read_evt h%lu t%lu m%lu\n",
1094 (unsigned long)ring->head, (unsigned long)ring->tail, 1094 (unsigned long)ring->head, (unsigned long)ring->tail,
1095 (unsigned long)ring->nr); 1095 (unsigned long)ring->nr);
@@ -1101,18 +1101,18 @@ static int aio_read_evt(struct kioctx *ioctx, struct io_event *ent)
1101 1101
1102 head = ring->head % info->nr; 1102 head = ring->head % info->nr;
1103 if (head != ring->tail) { 1103 if (head != ring->tail) {
1104 struct io_event *evp = aio_ring_event(info, head, KM_USER1); 1104 struct io_event *evp = aio_ring_event(info, head);
1105 *ent = *evp; 1105 *ent = *evp;
1106 head = (head + 1) % info->nr; 1106 head = (head + 1) % info->nr;
1107 smp_mb(); /* finish reading the event before updatng the head */ 1107 smp_mb(); /* finish reading the event before updatng the head */
1108 ring->head = head; 1108 ring->head = head;
1109 ret = 1; 1109 ret = 1;
1110 put_aio_ring_event(evp, KM_USER1); 1110 put_aio_ring_event(evp);
1111 } 1111 }
1112 spin_unlock(&info->ring_lock); 1112 spin_unlock(&info->ring_lock);
1113 1113
1114out: 1114out:
1115 kunmap_atomic(ring, KM_USER0); 1115 kunmap_atomic(ring);
1116 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret, 1116 dprintk("leaving aio_read_evt: %d h%lu t%lu\n", ret,
1117 (unsigned long)ring->head, (unsigned long)ring->tail); 1117 (unsigned long)ring->head, (unsigned long)ring->tail);
1118 return ret; 1118 return ret;
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index c2183f3917cd..e85c04b9f61c 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -357,7 +357,7 @@ static void bio_integrity_generate(struct bio *bio)
357 bix.sector_size = bi->sector_size; 357 bix.sector_size = bi->sector_size;
358 358
359 bio_for_each_segment(bv, bio, i) { 359 bio_for_each_segment(bv, bio, i) {
360 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); 360 void *kaddr = kmap_atomic(bv->bv_page);
361 bix.data_buf = kaddr + bv->bv_offset; 361 bix.data_buf = kaddr + bv->bv_offset;
362 bix.data_size = bv->bv_len; 362 bix.data_size = bv->bv_len;
363 bix.prot_buf = prot_buf; 363 bix.prot_buf = prot_buf;
@@ -371,7 +371,7 @@ static void bio_integrity_generate(struct bio *bio)
371 total += sectors * bi->tuple_size; 371 total += sectors * bi->tuple_size;
372 BUG_ON(total > bio->bi_integrity->bip_size); 372 BUG_ON(total > bio->bi_integrity->bip_size);
373 373
374 kunmap_atomic(kaddr, KM_USER0); 374 kunmap_atomic(kaddr);
375 } 375 }
376} 376}
377 377
@@ -498,7 +498,7 @@ static int bio_integrity_verify(struct bio *bio)
498 bix.sector_size = bi->sector_size; 498 bix.sector_size = bi->sector_size;
499 499
500 bio_for_each_segment(bv, bio, i) { 500 bio_for_each_segment(bv, bio, i) {
501 void *kaddr = kmap_atomic(bv->bv_page, KM_USER0); 501 void *kaddr = kmap_atomic(bv->bv_page);
502 bix.data_buf = kaddr + bv->bv_offset; 502 bix.data_buf = kaddr + bv->bv_offset;
503 bix.data_size = bv->bv_len; 503 bix.data_size = bv->bv_len;
504 bix.prot_buf = prot_buf; 504 bix.prot_buf = prot_buf;
@@ -507,7 +507,7 @@ static int bio_integrity_verify(struct bio *bio)
507 ret = bi->verify_fn(&bix); 507 ret = bi->verify_fn(&bix);
508 508
509 if (ret) { 509 if (ret) {
510 kunmap_atomic(kaddr, KM_USER0); 510 kunmap_atomic(kaddr);
511 return ret; 511 return ret;
512 } 512 }
513 513
@@ -517,7 +517,7 @@ static int bio_integrity_verify(struct bio *bio)
517 total += sectors * bi->tuple_size; 517 total += sectors * bi->tuple_size;
518 BUG_ON(total > bio->bi_integrity->bip_size); 518 BUG_ON(total > bio->bi_integrity->bip_size);
519 519
520 kunmap_atomic(kaddr, KM_USER0); 520 kunmap_atomic(kaddr);
521 } 521 }
522 522
523 return ret; 523 return ret;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d02c27cd14c7..b805afb37fa8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -120,10 +120,10 @@ static int check_compressed_csum(struct inode *inode,
120 page = cb->compressed_pages[i]; 120 page = cb->compressed_pages[i];
121 csum = ~(u32)0; 121 csum = ~(u32)0;
122 122
123 kaddr = kmap_atomic(page, KM_USER0); 123 kaddr = kmap_atomic(page);
124 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE); 124 csum = btrfs_csum_data(root, kaddr, csum, PAGE_CACHE_SIZE);
125 btrfs_csum_final(csum, (char *)&csum); 125 btrfs_csum_final(csum, (char *)&csum);
126 kunmap_atomic(kaddr, KM_USER0); 126 kunmap_atomic(kaddr);
127 127
128 if (csum != *cb_sum) { 128 if (csum != *cb_sum) {
129 printk(KERN_INFO "btrfs csum failed ino %llu " 129 printk(KERN_INFO "btrfs csum failed ino %llu "
@@ -521,10 +521,10 @@ static noinline int add_ra_bio_pages(struct inode *inode,
521 if (zero_offset) { 521 if (zero_offset) {
522 int zeros; 522 int zeros;
523 zeros = PAGE_CACHE_SIZE - zero_offset; 523 zeros = PAGE_CACHE_SIZE - zero_offset;
524 userpage = kmap_atomic(page, KM_USER0); 524 userpage = kmap_atomic(page);
525 memset(userpage + zero_offset, 0, zeros); 525 memset(userpage + zero_offset, 0, zeros);
526 flush_dcache_page(page); 526 flush_dcache_page(page);
527 kunmap_atomic(userpage, KM_USER0); 527 kunmap_atomic(userpage);
528 } 528 }
529 } 529 }
530 530
@@ -993,9 +993,9 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
993 bytes = min(PAGE_CACHE_SIZE - *pg_offset, 993 bytes = min(PAGE_CACHE_SIZE - *pg_offset,
994 PAGE_CACHE_SIZE - buf_offset); 994 PAGE_CACHE_SIZE - buf_offset);
995 bytes = min(bytes, working_bytes); 995 bytes = min(bytes, working_bytes);
996 kaddr = kmap_atomic(page_out, KM_USER0); 996 kaddr = kmap_atomic(page_out);
997 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 997 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
998 kunmap_atomic(kaddr, KM_USER0); 998 kunmap_atomic(kaddr);
999 flush_dcache_page(page_out); 999 flush_dcache_page(page_out);
1000 1000
1001 *pg_offset += bytes; 1001 *pg_offset += bytes;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index a55fbe6252de..2862454bcdb3 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2546,10 +2546,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2546 2546
2547 if (zero_offset) { 2547 if (zero_offset) {
2548 iosize = PAGE_CACHE_SIZE - zero_offset; 2548 iosize = PAGE_CACHE_SIZE - zero_offset;
2549 userpage = kmap_atomic(page, KM_USER0); 2549 userpage = kmap_atomic(page);
2550 memset(userpage + zero_offset, 0, iosize); 2550 memset(userpage + zero_offset, 0, iosize);
2551 flush_dcache_page(page); 2551 flush_dcache_page(page);
2552 kunmap_atomic(userpage, KM_USER0); 2552 kunmap_atomic(userpage);
2553 } 2553 }
2554 } 2554 }
2555 while (cur <= end) { 2555 while (cur <= end) {
@@ -2558,10 +2558,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2558 struct extent_state *cached = NULL; 2558 struct extent_state *cached = NULL;
2559 2559
2560 iosize = PAGE_CACHE_SIZE - pg_offset; 2560 iosize = PAGE_CACHE_SIZE - pg_offset;
2561 userpage = kmap_atomic(page, KM_USER0); 2561 userpage = kmap_atomic(page);
2562 memset(userpage + pg_offset, 0, iosize); 2562 memset(userpage + pg_offset, 0, iosize);
2563 flush_dcache_page(page); 2563 flush_dcache_page(page);
2564 kunmap_atomic(userpage, KM_USER0); 2564 kunmap_atomic(userpage);
2565 set_extent_uptodate(tree, cur, cur + iosize - 1, 2565 set_extent_uptodate(tree, cur, cur + iosize - 1,
2566 &cached, GFP_NOFS); 2566 &cached, GFP_NOFS);
2567 unlock_extent_cached(tree, cur, cur + iosize - 1, 2567 unlock_extent_cached(tree, cur, cur + iosize - 1,
@@ -2607,10 +2607,10 @@ static int __extent_read_full_page(struct extent_io_tree *tree,
2607 char *userpage; 2607 char *userpage;
2608 struct extent_state *cached = NULL; 2608 struct extent_state *cached = NULL;
2609 2609
2610 userpage = kmap_atomic(page, KM_USER0); 2610 userpage = kmap_atomic(page);
2611 memset(userpage + pg_offset, 0, iosize); 2611 memset(userpage + pg_offset, 0, iosize);
2612 flush_dcache_page(page); 2612 flush_dcache_page(page);
2613 kunmap_atomic(userpage, KM_USER0); 2613 kunmap_atomic(userpage);
2614 2614
2615 set_extent_uptodate(tree, cur, cur + iosize - 1, 2615 set_extent_uptodate(tree, cur, cur + iosize - 1,
2616 &cached, GFP_NOFS); 2616 &cached, GFP_NOFS);
@@ -2756,10 +2756,10 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc,
2756 if (page->index == end_index) { 2756 if (page->index == end_index) {
2757 char *userpage; 2757 char *userpage;
2758 2758
2759 userpage = kmap_atomic(page, KM_USER0); 2759 userpage = kmap_atomic(page);
2760 memset(userpage + pg_offset, 0, 2760 memset(userpage + pg_offset, 0,
2761 PAGE_CACHE_SIZE - pg_offset); 2761 PAGE_CACHE_SIZE - pg_offset);
2762 kunmap_atomic(userpage, KM_USER0); 2762 kunmap_atomic(userpage);
2763 flush_dcache_page(page); 2763 flush_dcache_page(page);
2764 } 2764 }
2765 pg_offset = 0; 2765 pg_offset = 0;
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index c7fb3a4247d3..078b4fd54500 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -447,13 +447,13 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
447 sums->bytenr = ordered->start; 447 sums->bytenr = ordered->start;
448 } 448 }
449 449
450 data = kmap_atomic(bvec->bv_page, KM_USER0); 450 data = kmap_atomic(bvec->bv_page);
451 sector_sum->sum = ~(u32)0; 451 sector_sum->sum = ~(u32)0;
452 sector_sum->sum = btrfs_csum_data(root, 452 sector_sum->sum = btrfs_csum_data(root,
453 data + bvec->bv_offset, 453 data + bvec->bv_offset,
454 sector_sum->sum, 454 sector_sum->sum,
455 bvec->bv_len); 455 bvec->bv_len);
456 kunmap_atomic(data, KM_USER0); 456 kunmap_atomic(data);
457 btrfs_csum_final(sector_sum->sum, 457 btrfs_csum_final(sector_sum->sum,
458 (char *)&sector_sum->sum); 458 (char *)&sector_sum->sum);
459 sector_sum->bytenr = disk_bytenr; 459 sector_sum->bytenr = disk_bytenr;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 892b34785ccc..3a0b5c1f9d31 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -173,9 +173,9 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
173 cur_size = min_t(unsigned long, compressed_size, 173 cur_size = min_t(unsigned long, compressed_size,
174 PAGE_CACHE_SIZE); 174 PAGE_CACHE_SIZE);
175 175
176 kaddr = kmap_atomic(cpage, KM_USER0); 176 kaddr = kmap_atomic(cpage);
177 write_extent_buffer(leaf, kaddr, ptr, cur_size); 177 write_extent_buffer(leaf, kaddr, ptr, cur_size);
178 kunmap_atomic(kaddr, KM_USER0); 178 kunmap_atomic(kaddr);
179 179
180 i++; 180 i++;
181 ptr += cur_size; 181 ptr += cur_size;
@@ -187,10 +187,10 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans,
187 page = find_get_page(inode->i_mapping, 187 page = find_get_page(inode->i_mapping,
188 start >> PAGE_CACHE_SHIFT); 188 start >> PAGE_CACHE_SHIFT);
189 btrfs_set_file_extent_compression(leaf, ei, 0); 189 btrfs_set_file_extent_compression(leaf, ei, 0);
190 kaddr = kmap_atomic(page, KM_USER0); 190 kaddr = kmap_atomic(page);
191 offset = start & (PAGE_CACHE_SIZE - 1); 191 offset = start & (PAGE_CACHE_SIZE - 1);
192 write_extent_buffer(leaf, kaddr + offset, ptr, size); 192 write_extent_buffer(leaf, kaddr + offset, ptr, size);
193 kunmap_atomic(kaddr, KM_USER0); 193 kunmap_atomic(kaddr);
194 page_cache_release(page); 194 page_cache_release(page);
195 } 195 }
196 btrfs_mark_buffer_dirty(leaf); 196 btrfs_mark_buffer_dirty(leaf);
@@ -422,10 +422,10 @@ again:
422 * sending it down to disk 422 * sending it down to disk
423 */ 423 */
424 if (offset) { 424 if (offset) {
425 kaddr = kmap_atomic(page, KM_USER0); 425 kaddr = kmap_atomic(page);
426 memset(kaddr + offset, 0, 426 memset(kaddr + offset, 0,
427 PAGE_CACHE_SIZE - offset); 427 PAGE_CACHE_SIZE - offset);
428 kunmap_atomic(kaddr, KM_USER0); 428 kunmap_atomic(kaddr);
429 } 429 }
430 will_compress = 1; 430 will_compress = 1;
431 } 431 }
@@ -1873,7 +1873,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1873 } else { 1873 } else {
1874 ret = get_state_private(io_tree, start, &private); 1874 ret = get_state_private(io_tree, start, &private);
1875 } 1875 }
1876 kaddr = kmap_atomic(page, KM_USER0); 1876 kaddr = kmap_atomic(page);
1877 if (ret) 1877 if (ret)
1878 goto zeroit; 1878 goto zeroit;
1879 1879
@@ -1882,7 +1882,7 @@ static int btrfs_readpage_end_io_hook(struct page *page, u64 start, u64 end,
1882 if (csum != private) 1882 if (csum != private)
1883 goto zeroit; 1883 goto zeroit;
1884 1884
1885 kunmap_atomic(kaddr, KM_USER0); 1885 kunmap_atomic(kaddr);
1886good: 1886good:
1887 return 0; 1887 return 0;
1888 1888
@@ -1894,7 +1894,7 @@ zeroit:
1894 (unsigned long long)private); 1894 (unsigned long long)private);
1895 memset(kaddr + offset, 1, end - start + 1); 1895 memset(kaddr + offset, 1, end - start + 1);
1896 flush_dcache_page(page); 1896 flush_dcache_page(page);
1897 kunmap_atomic(kaddr, KM_USER0); 1897 kunmap_atomic(kaddr);
1898 if (private == 0) 1898 if (private == 0)
1899 return 0; 1899 return 0;
1900 return -EIO; 1900 return -EIO;
@@ -4937,12 +4937,12 @@ static noinline int uncompress_inline(struct btrfs_path *path,
4937 ret = btrfs_decompress(compress_type, tmp, page, 4937 ret = btrfs_decompress(compress_type, tmp, page,
4938 extent_offset, inline_size, max_size); 4938 extent_offset, inline_size, max_size);
4939 if (ret) { 4939 if (ret) {
4940 char *kaddr = kmap_atomic(page, KM_USER0); 4940 char *kaddr = kmap_atomic(page);
4941 unsigned long copy_size = min_t(u64, 4941 unsigned long copy_size = min_t(u64,
4942 PAGE_CACHE_SIZE - pg_offset, 4942 PAGE_CACHE_SIZE - pg_offset,
4943 max_size - extent_offset); 4943 max_size - extent_offset);
4944 memset(kaddr + pg_offset, 0, copy_size); 4944 memset(kaddr + pg_offset, 0, copy_size);
4945 kunmap_atomic(kaddr, KM_USER0); 4945 kunmap_atomic(kaddr);
4946 } 4946 }
4947 kfree(tmp); 4947 kfree(tmp);
4948 return 0; 4948 return 0;
@@ -5719,11 +5719,11 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
5719 unsigned long flags; 5719 unsigned long flags;
5720 5720
5721 local_irq_save(flags); 5721 local_irq_save(flags);
5722 kaddr = kmap_atomic(page, KM_IRQ0); 5722 kaddr = kmap_atomic(page);
5723 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset, 5723 csum = btrfs_csum_data(root, kaddr + bvec->bv_offset,
5724 csum, bvec->bv_len); 5724 csum, bvec->bv_len);
5725 btrfs_csum_final(csum, (char *)&csum); 5725 btrfs_csum_final(csum, (char *)&csum);
5726 kunmap_atomic(kaddr, KM_IRQ0); 5726 kunmap_atomic(kaddr);
5727 local_irq_restore(flags); 5727 local_irq_restore(flags);
5728 5728
5729 flush_dcache_page(bvec->bv_page); 5729 flush_dcache_page(bvec->bv_page);
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index a178f5ebea78..743b86fa4fcb 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -411,9 +411,9 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
411 411
412 bytes = min_t(unsigned long, destlen, out_len - start_byte); 412 bytes = min_t(unsigned long, destlen, out_len - start_byte);
413 413
414 kaddr = kmap_atomic(dest_page, KM_USER0); 414 kaddr = kmap_atomic(dest_page);
415 memcpy(kaddr, workspace->buf + start_byte, bytes); 415 memcpy(kaddr, workspace->buf + start_byte, bytes);
416 kunmap_atomic(kaddr, KM_USER0); 416 kunmap_atomic(kaddr);
417out: 417out:
418 return ret; 418 return ret;
419} 419}
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index abc0fbffa510..390e7102b0ff 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -591,7 +591,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
591 u64 flags = sbio->spag[ix].flags; 591 u64 flags = sbio->spag[ix].flags;
592 592
593 page = sbio->bio->bi_io_vec[ix].bv_page; 593 page = sbio->bio->bi_io_vec[ix].bv_page;
594 buffer = kmap_atomic(page, KM_USER0); 594 buffer = kmap_atomic(page);
595 if (flags & BTRFS_EXTENT_FLAG_DATA) { 595 if (flags & BTRFS_EXTENT_FLAG_DATA) {
596 ret = scrub_checksum_data(sbio->sdev, 596 ret = scrub_checksum_data(sbio->sdev,
597 sbio->spag + ix, buffer); 597 sbio->spag + ix, buffer);
@@ -603,7 +603,7 @@ static int scrub_fixup_check(struct scrub_bio *sbio, int ix)
603 } else { 603 } else {
604 WARN_ON(1); 604 WARN_ON(1);
605 } 605 }
606 kunmap_atomic(buffer, KM_USER0); 606 kunmap_atomic(buffer);
607 607
608 return ret; 608 return ret;
609} 609}
@@ -792,7 +792,7 @@ static void scrub_checksum(struct btrfs_work *work)
792 } 792 }
793 for (i = 0; i < sbio->count; ++i) { 793 for (i = 0; i < sbio->count; ++i) {
794 page = sbio->bio->bi_io_vec[i].bv_page; 794 page = sbio->bio->bi_io_vec[i].bv_page;
795 buffer = kmap_atomic(page, KM_USER0); 795 buffer = kmap_atomic(page);
796 flags = sbio->spag[i].flags; 796 flags = sbio->spag[i].flags;
797 logical = sbio->logical + i * PAGE_SIZE; 797 logical = sbio->logical + i * PAGE_SIZE;
798 ret = 0; 798 ret = 0;
@@ -807,7 +807,7 @@ static void scrub_checksum(struct btrfs_work *work)
807 } else { 807 } else {
808 WARN_ON(1); 808 WARN_ON(1);
809 } 809 }
810 kunmap_atomic(buffer, KM_USER0); 810 kunmap_atomic(buffer);
811 if (ret) { 811 if (ret) {
812 ret = scrub_recheck_error(sbio, i); 812 ret = scrub_recheck_error(sbio, i);
813 if (!ret) { 813 if (!ret) {
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index faccd47c6c46..92c20654cc55 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -370,9 +370,9 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
370 PAGE_CACHE_SIZE - buf_offset); 370 PAGE_CACHE_SIZE - buf_offset);
371 bytes = min(bytes, bytes_left); 371 bytes = min(bytes, bytes_left);
372 372
373 kaddr = kmap_atomic(dest_page, KM_USER0); 373 kaddr = kmap_atomic(dest_page);
374 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes); 374 memcpy(kaddr + pg_offset, workspace->buf + buf_offset, bytes);
375 kunmap_atomic(kaddr, KM_USER0); 375 kunmap_atomic(kaddr);
376 376
377 pg_offset += bytes; 377 pg_offset += bytes;
378 bytes_left -= bytes; 378 bytes_left -= bytes;
diff --git a/fs/exec.c b/fs/exec.c
index 95551c6da090..3908544f5d18 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -1342,13 +1342,13 @@ int remove_arg_zero(struct linux_binprm *bprm)
1342 ret = -EFAULT; 1342 ret = -EFAULT;
1343 goto out; 1343 goto out;
1344 } 1344 }
1345 kaddr = kmap_atomic(page, KM_USER0); 1345 kaddr = kmap_atomic(page);
1346 1346
1347 for (; offset < PAGE_SIZE && kaddr[offset]; 1347 for (; offset < PAGE_SIZE && kaddr[offset];
1348 offset++, bprm->p++) 1348 offset++, bprm->p++)
1349 ; 1349 ;
1350 1350
1351 kunmap_atomic(kaddr, KM_USER0); 1351 kunmap_atomic(kaddr);
1352 put_arg_page(page); 1352 put_arg_page(page);
1353 1353
1354 if (offset == PAGE_SIZE) 1354 if (offset == PAGE_SIZE)
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 80405836ba6e..c61e62ac231c 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -597,7 +597,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
597 goto fail; 597 goto fail;
598 } 598 }
599 599
600 kaddr = kmap_atomic(page, KM_USER0); 600 kaddr = kmap_atomic(page);
601 de = (struct exofs_dir_entry *)kaddr; 601 de = (struct exofs_dir_entry *)kaddr;
602 de->name_len = 1; 602 de->name_len = 1;
603 de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1)); 603 de->rec_len = cpu_to_le16(EXOFS_DIR_REC_LEN(1));
@@ -611,7 +611,7 @@ int exofs_make_empty(struct inode *inode, struct inode *parent)
611 de->inode_no = cpu_to_le64(parent->i_ino); 611 de->inode_no = cpu_to_le64(parent->i_ino);
612 memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR)); 612 memcpy(de->name, PARENT_DIR, sizeof(PARENT_DIR));
613 exofs_set_de_type(de, inode); 613 exofs_set_de_type(de, inode);
614 kunmap_atomic(kaddr, KM_USER0); 614 kunmap_atomic(kaddr);
615 err = exofs_commit_chunk(page, 0, chunk_size); 615 err = exofs_commit_chunk(page, 0, chunk_size);
616fail: 616fail:
617 page_cache_release(page); 617 page_cache_release(page);
diff --git a/fs/ext2/dir.c b/fs/ext2/dir.c
index d37df352d324..0f4f5c929257 100644
--- a/fs/ext2/dir.c
+++ b/fs/ext2/dir.c
@@ -645,7 +645,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
645 unlock_page(page); 645 unlock_page(page);
646 goto fail; 646 goto fail;
647 } 647 }
648 kaddr = kmap_atomic(page, KM_USER0); 648 kaddr = kmap_atomic(page);
649 memset(kaddr, 0, chunk_size); 649 memset(kaddr, 0, chunk_size);
650 de = (struct ext2_dir_entry_2 *)kaddr; 650 de = (struct ext2_dir_entry_2 *)kaddr;
651 de->name_len = 1; 651 de->name_len = 1;
@@ -660,7 +660,7 @@ int ext2_make_empty(struct inode *inode, struct inode *parent)
660 de->inode = cpu_to_le32(parent->i_ino); 660 de->inode = cpu_to_le32(parent->i_ino);
661 memcpy (de->name, "..\0", 4); 661 memcpy (de->name, "..\0", 4);
662 ext2_set_de_type (de, inode); 662 ext2_set_de_type (de, inode);
663 kunmap_atomic(kaddr, KM_USER0); 663 kunmap_atomic(kaddr);
664 err = ext2_commit_chunk(page, 0, chunk_size); 664 err = ext2_commit_chunk(page, 0, chunk_size);
665fail: 665fail:
666 page_cache_release(page); 666 page_cache_release(page);
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 5f3368ab0fa9..7df2b5e8fbe1 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -838,10 +838,10 @@ static int fuse_copy_page(struct fuse_copy_state *cs, struct page **pagep,
838 } 838 }
839 } 839 }
840 if (page) { 840 if (page) {
841 void *mapaddr = kmap_atomic(page, KM_USER0); 841 void *mapaddr = kmap_atomic(page);
842 void *buf = mapaddr + offset; 842 void *buf = mapaddr + offset;
843 offset += fuse_copy_do(cs, &buf, &count); 843 offset += fuse_copy_do(cs, &buf, &count);
844 kunmap_atomic(mapaddr, KM_USER0); 844 kunmap_atomic(mapaddr);
845 } else 845 } else
846 offset += fuse_copy_do(cs, NULL, &count); 846 offset += fuse_copy_do(cs, NULL, &count);
847 } 847 }
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 4a199fd93fbd..a841868bf9ce 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1887,11 +1887,11 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
1887 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV) 1887 in_iovs + out_iovs > FUSE_IOCTL_MAX_IOV)
1888 goto out; 1888 goto out;
1889 1889
1890 vaddr = kmap_atomic(pages[0], KM_USER0); 1890 vaddr = kmap_atomic(pages[0]);
1891 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr, 1891 err = fuse_copy_ioctl_iovec(fc, iov_page, vaddr,
1892 transferred, in_iovs + out_iovs, 1892 transferred, in_iovs + out_iovs,
1893 (flags & FUSE_IOCTL_COMPAT) != 0); 1893 (flags & FUSE_IOCTL_COMPAT) != 0);
1894 kunmap_atomic(vaddr, KM_USER0); 1894 kunmap_atomic(vaddr);
1895 if (err) 1895 if (err)
1896 goto out; 1896 goto out;
1897 1897
diff --git a/fs/gfs2/aops.c b/fs/gfs2/aops.c
index 501e5cba09b3..38b7a74a0f91 100644
--- a/fs/gfs2/aops.c
+++ b/fs/gfs2/aops.c
@@ -434,12 +434,12 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
434 if (error) 434 if (error)
435 return error; 435 return error;
436 436
437 kaddr = kmap_atomic(page, KM_USER0); 437 kaddr = kmap_atomic(page);
438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode))) 438 if (dsize > (dibh->b_size - sizeof(struct gfs2_dinode)))
439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode)); 439 dsize = (dibh->b_size - sizeof(struct gfs2_dinode));
440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize); 440 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize); 441 memset(kaddr + dsize, 0, PAGE_CACHE_SIZE - dsize);
442 kunmap_atomic(kaddr, KM_USER0); 442 kunmap_atomic(kaddr);
443 flush_dcache_page(page); 443 flush_dcache_page(page);
444 brelse(dibh); 444 brelse(dibh);
445 SetPageUptodate(page); 445 SetPageUptodate(page);
@@ -542,9 +542,9 @@ int gfs2_internal_read(struct gfs2_inode *ip, struct file_ra_state *ra_state,
542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL); 542 page = read_cache_page(mapping, index, __gfs2_readpage, NULL);
543 if (IS_ERR(page)) 543 if (IS_ERR(page))
544 return PTR_ERR(page); 544 return PTR_ERR(page);
545 p = kmap_atomic(page, KM_USER0); 545 p = kmap_atomic(page);
546 memcpy(buf + copied, p + offset, amt); 546 memcpy(buf + copied, p + offset, amt);
547 kunmap_atomic(p, KM_USER0); 547 kunmap_atomic(p);
548 mark_page_accessed(page); 548 mark_page_accessed(page);
549 page_cache_release(page); 549 page_cache_release(page);
550 copied += amt; 550 copied += amt;
@@ -788,11 +788,11 @@ static int gfs2_stuffed_write_end(struct inode *inode, struct buffer_head *dibh,
788 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode); 788 unsigned char *buf = dibh->b_data + sizeof(struct gfs2_dinode);
789 789
790 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode))); 790 BUG_ON((pos + len) > (dibh->b_size - sizeof(struct gfs2_dinode)));
791 kaddr = kmap_atomic(page, KM_USER0); 791 kaddr = kmap_atomic(page);
792 memcpy(buf + pos, kaddr + pos, copied); 792 memcpy(buf + pos, kaddr + pos, copied);
793 memset(kaddr + pos + copied, 0, len - copied); 793 memset(kaddr + pos + copied, 0, len - copied);
794 flush_dcache_page(page); 794 flush_dcache_page(page);
795 kunmap_atomic(kaddr, KM_USER0); 795 kunmap_atomic(kaddr);
796 796
797 if (!PageUptodate(page)) 797 if (!PageUptodate(page))
798 SetPageUptodate(page); 798 SetPageUptodate(page);
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 0301be655b12..df7c6e8d0764 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -553,11 +553,11 @@ static void gfs2_check_magic(struct buffer_head *bh)
553 __be32 *ptr; 553 __be32 *ptr;
554 554
555 clear_buffer_escaped(bh); 555 clear_buffer_escaped(bh);
556 kaddr = kmap_atomic(bh->b_page, KM_USER0); 556 kaddr = kmap_atomic(bh->b_page);
557 ptr = kaddr + bh_offset(bh); 557 ptr = kaddr + bh_offset(bh);
558 if (*ptr == cpu_to_be32(GFS2_MAGIC)) 558 if (*ptr == cpu_to_be32(GFS2_MAGIC))
559 set_buffer_escaped(bh); 559 set_buffer_escaped(bh);
560 kunmap_atomic(kaddr, KM_USER0); 560 kunmap_atomic(kaddr);
561} 561}
562 562
563static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh, 563static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
@@ -594,10 +594,10 @@ static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
594 if (buffer_escaped(bd->bd_bh)) { 594 if (buffer_escaped(bd->bd_bh)) {
595 void *kaddr; 595 void *kaddr;
596 bh1 = gfs2_log_get_buf(sdp); 596 bh1 = gfs2_log_get_buf(sdp);
597 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0); 597 kaddr = kmap_atomic(bd->bd_bh->b_page);
598 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh), 598 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
599 bh1->b_size); 599 bh1->b_size);
600 kunmap_atomic(kaddr, KM_USER0); 600 kunmap_atomic(kaddr);
601 *(__be32 *)bh1->b_data = 0; 601 *(__be32 *)bh1->b_data = 0;
602 clear_buffer_escaped(bd->bd_bh); 602 clear_buffer_escaped(bd->bd_bh);
603 unlock_buffer(bd->bd_bh); 603 unlock_buffer(bd->bd_bh);
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index a45b21b03915..c0f8904f0860 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -720,12 +720,12 @@ get_a_page:
720 720
721 gfs2_trans_add_bh(ip->i_gl, bh, 0); 721 gfs2_trans_add_bh(ip->i_gl, bh, 0);
722 722
723 kaddr = kmap_atomic(page, KM_USER0); 723 kaddr = kmap_atomic(page);
724 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE) 724 if (offset + sizeof(struct gfs2_quota) > PAGE_CACHE_SIZE)
725 nbytes = PAGE_CACHE_SIZE - offset; 725 nbytes = PAGE_CACHE_SIZE - offset;
726 memcpy(kaddr + offset, ptr, nbytes); 726 memcpy(kaddr + offset, ptr, nbytes);
727 flush_dcache_page(page); 727 flush_dcache_page(page);
728 kunmap_atomic(kaddr, KM_USER0); 728 kunmap_atomic(kaddr);
729 unlock_page(page); 729 unlock_page(page);
730 page_cache_release(page); 730 page_cache_release(page);
731 731
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 59c09f9541b5..e49e81bb80ef 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -328,7 +328,7 @@ repeat:
328 new_offset = offset_in_page(jh2bh(jh_in)->b_data); 328 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
329 } 329 }
330 330
331 mapped_data = kmap_atomic(new_page, KM_USER0); 331 mapped_data = kmap_atomic(new_page);
332 /* 332 /*
333 * Check for escaping 333 * Check for escaping
334 */ 334 */
@@ -337,7 +337,7 @@ repeat:
337 need_copy_out = 1; 337 need_copy_out = 1;
338 do_escape = 1; 338 do_escape = 1;
339 } 339 }
340 kunmap_atomic(mapped_data, KM_USER0); 340 kunmap_atomic(mapped_data);
341 341
342 /* 342 /*
343 * Do we need to do a data copy? 343 * Do we need to do a data copy?
@@ -354,9 +354,9 @@ repeat:
354 } 354 }
355 355
356 jh_in->b_frozen_data = tmp; 356 jh_in->b_frozen_data = tmp;
357 mapped_data = kmap_atomic(new_page, KM_USER0); 357 mapped_data = kmap_atomic(new_page);
358 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); 358 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
359 kunmap_atomic(mapped_data, KM_USER0); 359 kunmap_atomic(mapped_data);
360 360
361 new_page = virt_to_page(tmp); 361 new_page = virt_to_page(tmp);
362 new_offset = offset_in_page(tmp); 362 new_offset = offset_in_page(tmp);
@@ -368,9 +368,9 @@ repeat:
368 * copying, we can finally do so. 368 * copying, we can finally do so.
369 */ 369 */
370 if (do_escape) { 370 if (do_escape) {
371 mapped_data = kmap_atomic(new_page, KM_USER0); 371 mapped_data = kmap_atomic(new_page);
372 *((unsigned int *)(mapped_data + new_offset)) = 0; 372 *((unsigned int *)(mapped_data + new_offset)) = 0;
373 kunmap_atomic(mapped_data, KM_USER0); 373 kunmap_atomic(mapped_data);
374 } 374 }
375 375
376 set_bh_page(new_bh, new_page, new_offset); 376 set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index 7fce94b04bc3..b2a7e5244e39 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -718,9 +718,9 @@ done:
718 "Possible IO failure.\n"); 718 "Possible IO failure.\n");
719 page = jh2bh(jh)->b_page; 719 page = jh2bh(jh)->b_page;
720 offset = offset_in_page(jh2bh(jh)->b_data); 720 offset = offset_in_page(jh2bh(jh)->b_data);
721 source = kmap_atomic(page, KM_USER0); 721 source = kmap_atomic(page);
722 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 722 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
723 kunmap_atomic(source, KM_USER0); 723 kunmap_atomic(source);
724 } 724 }
725 jbd_unlock_bh_state(bh); 725 jbd_unlock_bh_state(bh);
726 726
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 5069b8475150..c067a8cae63b 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -286,10 +286,10 @@ static __u32 jbd2_checksum_data(__u32 crc32_sum, struct buffer_head *bh)
286 char *addr; 286 char *addr;
287 __u32 checksum; 287 __u32 checksum;
288 288
289 addr = kmap_atomic(page, KM_USER0); 289 addr = kmap_atomic(page);
290 checksum = crc32_be(crc32_sum, 290 checksum = crc32_be(crc32_sum,
291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size); 291 (void *)(addr + offset_in_page(bh->b_data)), bh->b_size);
292 kunmap_atomic(addr, KM_USER0); 292 kunmap_atomic(addr);
293 293
294 return checksum; 294 return checksum;
295} 295}
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index c0a5f9f1b127..5ff8940b8f02 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -345,7 +345,7 @@ repeat:
345 new_offset = offset_in_page(jh2bh(jh_in)->b_data); 345 new_offset = offset_in_page(jh2bh(jh_in)->b_data);
346 } 346 }
347 347
348 mapped_data = kmap_atomic(new_page, KM_USER0); 348 mapped_data = kmap_atomic(new_page);
349 /* 349 /*
350 * Fire data frozen trigger if data already wasn't frozen. Do this 350 * Fire data frozen trigger if data already wasn't frozen. Do this
351 * before checking for escaping, as the trigger may modify the magic 351 * before checking for escaping, as the trigger may modify the magic
@@ -364,7 +364,7 @@ repeat:
364 need_copy_out = 1; 364 need_copy_out = 1;
365 do_escape = 1; 365 do_escape = 1;
366 } 366 }
367 kunmap_atomic(mapped_data, KM_USER0); 367 kunmap_atomic(mapped_data);
368 368
369 /* 369 /*
370 * Do we need to do a data copy? 370 * Do we need to do a data copy?
@@ -385,9 +385,9 @@ repeat:
385 } 385 }
386 386
387 jh_in->b_frozen_data = tmp; 387 jh_in->b_frozen_data = tmp;
388 mapped_data = kmap_atomic(new_page, KM_USER0); 388 mapped_data = kmap_atomic(new_page);
389 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size); 389 memcpy(tmp, mapped_data + new_offset, jh2bh(jh_in)->b_size);
390 kunmap_atomic(mapped_data, KM_USER0); 390 kunmap_atomic(mapped_data);
391 391
392 new_page = virt_to_page(tmp); 392 new_page = virt_to_page(tmp);
393 new_offset = offset_in_page(tmp); 393 new_offset = offset_in_page(tmp);
@@ -406,9 +406,9 @@ repeat:
406 * copying, we can finally do so. 406 * copying, we can finally do so.
407 */ 407 */
408 if (do_escape) { 408 if (do_escape) {
409 mapped_data = kmap_atomic(new_page, KM_USER0); 409 mapped_data = kmap_atomic(new_page);
410 *((unsigned int *)(mapped_data + new_offset)) = 0; 410 *((unsigned int *)(mapped_data + new_offset)) = 0;
411 kunmap_atomic(mapped_data, KM_USER0); 411 kunmap_atomic(mapped_data);
412 } 412 }
413 413
414 set_bh_page(new_bh, new_page, new_offset); 414 set_bh_page(new_bh, new_page, new_offset);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 35ae096bed5d..e5aba56e1fd5 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -783,12 +783,12 @@ done:
783 "Possible IO failure.\n"); 783 "Possible IO failure.\n");
784 page = jh2bh(jh)->b_page; 784 page = jh2bh(jh)->b_page;
785 offset = offset_in_page(jh2bh(jh)->b_data); 785 offset = offset_in_page(jh2bh(jh)->b_data);
786 source = kmap_atomic(page, KM_USER0); 786 source = kmap_atomic(page);
787 /* Fire data frozen trigger just before we copy the data */ 787 /* Fire data frozen trigger just before we copy the data */
788 jbd2_buffer_frozen_trigger(jh, source + offset, 788 jbd2_buffer_frozen_trigger(jh, source + offset,
789 jh->b_triggers); 789 jh->b_triggers);
790 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); 790 memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size);
791 kunmap_atomic(source, KM_USER0); 791 kunmap_atomic(source);
792 792
793 /* 793 /*
794 * Now that the frozen data is saved off, we need to store 794 * Now that the frozen data is saved off, we need to store
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 3de7a32cadbe..1b6e21dda286 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -177,17 +177,17 @@ static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry)
177 (filler_t *)logfs_readpage, NULL); 177 (filler_t *)logfs_readpage, NULL);
178 if (IS_ERR(page)) 178 if (IS_ERR(page))
179 return page; 179 return page;
180 dd = kmap_atomic(page, KM_USER0); 180 dd = kmap_atomic(page);
181 BUG_ON(dd->namelen == 0); 181 BUG_ON(dd->namelen == 0);
182 182
183 if (name->len != be16_to_cpu(dd->namelen) || 183 if (name->len != be16_to_cpu(dd->namelen) ||
184 memcmp(name->name, dd->name, name->len)) { 184 memcmp(name->name, dd->name, name->len)) {
185 kunmap_atomic(dd, KM_USER0); 185 kunmap_atomic(dd);
186 page_cache_release(page); 186 page_cache_release(page);
187 continue; 187 continue;
188 } 188 }
189 189
190 kunmap_atomic(dd, KM_USER0); 190 kunmap_atomic(dd);
191 return page; 191 return page;
192 } 192 }
193 return NULL; 193 return NULL;
@@ -365,9 +365,9 @@ static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry,
365 return NULL; 365 return NULL;
366 } 366 }
367 index = page->index; 367 index = page->index;
368 dd = kmap_atomic(page, KM_USER0); 368 dd = kmap_atomic(page);
369 ino = be64_to_cpu(dd->ino); 369 ino = be64_to_cpu(dd->ino);
370 kunmap_atomic(dd, KM_USER0); 370 kunmap_atomic(dd);
371 page_cache_release(page); 371 page_cache_release(page);
372 372
373 inode = logfs_iget(dir->i_sb, ino); 373 inode = logfs_iget(dir->i_sb, ino);
@@ -402,12 +402,12 @@ static int logfs_write_dir(struct inode *dir, struct dentry *dentry,
402 if (!page) 402 if (!page)
403 return -ENOMEM; 403 return -ENOMEM;
404 404
405 dd = kmap_atomic(page, KM_USER0); 405 dd = kmap_atomic(page);
406 memset(dd, 0, sizeof(*dd)); 406 memset(dd, 0, sizeof(*dd));
407 dd->ino = cpu_to_be64(inode->i_ino); 407 dd->ino = cpu_to_be64(inode->i_ino);
408 dd->type = logfs_type(inode); 408 dd->type = logfs_type(inode);
409 logfs_set_name(dd, &dentry->d_name); 409 logfs_set_name(dd, &dentry->d_name);
410 kunmap_atomic(dd, KM_USER0); 410 kunmap_atomic(dd);
411 411
412 err = logfs_write_buf(dir, page, WF_LOCK); 412 err = logfs_write_buf(dir, page, WF_LOCK);
413 unlock_page(page); 413 unlock_page(page);
@@ -579,9 +579,9 @@ static int logfs_get_dd(struct inode *dir, struct dentry *dentry,
579 if (IS_ERR(page)) 579 if (IS_ERR(page))
580 return PTR_ERR(page); 580 return PTR_ERR(page);
581 *pos = page->index; 581 *pos = page->index;
582 map = kmap_atomic(page, KM_USER0); 582 map = kmap_atomic(page);
583 memcpy(dd, map, sizeof(*dd)); 583 memcpy(dd, map, sizeof(*dd));
584 kunmap_atomic(map, KM_USER0); 584 kunmap_atomic(map);
585 page_cache_release(page); 585 page_cache_release(page);
586 return 0; 586 return 0;
587} 587}
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 4153e65b0148..e3ab5e5a904c 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -517,9 +517,9 @@ static int indirect_write_alias(struct super_block *sb,
517 517
518 ino = page->mapping->host->i_ino; 518 ino = page->mapping->host->i_ino;
519 logfs_unpack_index(page->index, &bix, &level); 519 logfs_unpack_index(page->index, &bix, &level);
520 child = kmap_atomic(page, KM_USER0); 520 child = kmap_atomic(page);
521 val = child[pos]; 521 val = child[pos];
522 kunmap_atomic(child, KM_USER0); 522 kunmap_atomic(child);
523 err = write_one_alias(sb, ino, bix, level, pos, val); 523 err = write_one_alias(sb, ino, bix, level, pos, val);
524 if (err) 524 if (err)
525 return err; 525 return err;
@@ -673,9 +673,9 @@ static void alloc_indirect_block(struct inode *inode, struct page *page,
673 alloc_data_block(inode, page); 673 alloc_data_block(inode, page);
674 674
675 block = logfs_block(page); 675 block = logfs_block(page);
676 array = kmap_atomic(page, KM_USER0); 676 array = kmap_atomic(page);
677 initialize_block_counters(page, block, array, page_is_empty); 677 initialize_block_counters(page, block, array, page_is_empty);
678 kunmap_atomic(array, KM_USER0); 678 kunmap_atomic(array);
679} 679}
680 680
681static void block_set_pointer(struct page *page, int index, u64 ptr) 681static void block_set_pointer(struct page *page, int index, u64 ptr)
@@ -685,10 +685,10 @@ static void block_set_pointer(struct page *page, int index, u64 ptr)
685 u64 oldptr; 685 u64 oldptr;
686 686
687 BUG_ON(!block); 687 BUG_ON(!block);
688 array = kmap_atomic(page, KM_USER0); 688 array = kmap_atomic(page);
689 oldptr = be64_to_cpu(array[index]); 689 oldptr = be64_to_cpu(array[index]);
690 array[index] = cpu_to_be64(ptr); 690 array[index] = cpu_to_be64(ptr);
691 kunmap_atomic(array, KM_USER0); 691 kunmap_atomic(array);
692 SetPageUptodate(page); 692 SetPageUptodate(page);
693 693
694 block->full += !!(ptr & LOGFS_FULLY_POPULATED) 694 block->full += !!(ptr & LOGFS_FULLY_POPULATED)
@@ -701,9 +701,9 @@ static u64 block_get_pointer(struct page *page, int index)
701 __be64 *block; 701 __be64 *block;
702 u64 ptr; 702 u64 ptr;
703 703
704 block = kmap_atomic(page, KM_USER0); 704 block = kmap_atomic(page);
705 ptr = be64_to_cpu(block[index]); 705 ptr = be64_to_cpu(block[index]);
706 kunmap_atomic(block, KM_USER0); 706 kunmap_atomic(block);
707 return ptr; 707 return ptr;
708} 708}
709 709
@@ -850,7 +850,7 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
850 } 850 }
851 851
852 slot = get_bits(bix, SUBLEVEL(level)); 852 slot = get_bits(bix, SUBLEVEL(level));
853 rblock = kmap_atomic(page, KM_USER0); 853 rblock = kmap_atomic(page);
854 while (slot < LOGFS_BLOCK_FACTOR) { 854 while (slot < LOGFS_BLOCK_FACTOR) {
855 if (data && (rblock[slot] != 0)) 855 if (data && (rblock[slot] != 0))
856 break; 856 break;
@@ -861,12 +861,12 @@ static u64 seek_holedata_loop(struct inode *inode, u64 bix, int data)
861 bix &= ~(increment - 1); 861 bix &= ~(increment - 1);
862 } 862 }
863 if (slot >= LOGFS_BLOCK_FACTOR) { 863 if (slot >= LOGFS_BLOCK_FACTOR) {
864 kunmap_atomic(rblock, KM_USER0); 864 kunmap_atomic(rblock);
865 logfs_put_read_page(page); 865 logfs_put_read_page(page);
866 return bix; 866 return bix;
867 } 867 }
868 bofs = be64_to_cpu(rblock[slot]); 868 bofs = be64_to_cpu(rblock[slot]);
869 kunmap_atomic(rblock, KM_USER0); 869 kunmap_atomic(rblock);
870 logfs_put_read_page(page); 870 logfs_put_read_page(page);
871 if (!bofs) { 871 if (!bofs) {
872 BUG_ON(data); 872 BUG_ON(data);
@@ -1961,9 +1961,9 @@ int logfs_read_inode(struct inode *inode)
1961 if (IS_ERR(page)) 1961 if (IS_ERR(page))
1962 return PTR_ERR(page); 1962 return PTR_ERR(page);
1963 1963
1964 di = kmap_atomic(page, KM_USER0); 1964 di = kmap_atomic(page);
1965 logfs_disk_to_inode(di, inode); 1965 logfs_disk_to_inode(di, inode);
1966 kunmap_atomic(di, KM_USER0); 1966 kunmap_atomic(di);
1967 move_page_to_inode(inode, page); 1967 move_page_to_inode(inode, page);
1968 page_cache_release(page); 1968 page_cache_release(page);
1969 return 0; 1969 return 0;
@@ -1982,9 +1982,9 @@ static struct page *inode_to_page(struct inode *inode)
1982 if (!page) 1982 if (!page)
1983 return NULL; 1983 return NULL;
1984 1984
1985 di = kmap_atomic(page, KM_USER0); 1985 di = kmap_atomic(page);
1986 logfs_inode_to_disk(inode, di); 1986 logfs_inode_to_disk(inode, di);
1987 kunmap_atomic(di, KM_USER0); 1987 kunmap_atomic(di);
1988 move_inode_to_page(page, inode); 1988 move_inode_to_page(page, inode);
1989 return page; 1989 return page;
1990} 1990}
@@ -2041,13 +2041,13 @@ static void logfs_mod_segment_entry(struct super_block *sb, u32 segno,
2041 2041
2042 if (write) 2042 if (write)
2043 alloc_indirect_block(inode, page, 0); 2043 alloc_indirect_block(inode, page, 0);
2044 se = kmap_atomic(page, KM_USER0); 2044 se = kmap_atomic(page);
2045 change_se(se + child_no, arg); 2045 change_se(se + child_no, arg);
2046 if (write) { 2046 if (write) {
2047 logfs_set_alias(sb, logfs_block(page), child_no); 2047 logfs_set_alias(sb, logfs_block(page), child_no);
2048 BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize); 2048 BUG_ON((int)be32_to_cpu(se[child_no].valid) > super->s_segsize);
2049 } 2049 }
2050 kunmap_atomic(se, KM_USER0); 2050 kunmap_atomic(se);
2051 2051
2052 logfs_put_write_page(page); 2052 logfs_put_write_page(page);
2053} 2053}
@@ -2245,10 +2245,10 @@ int logfs_inode_write(struct inode *inode, const void *buf, size_t count,
2245 if (!page) 2245 if (!page)
2246 return -ENOMEM; 2246 return -ENOMEM;
2247 2247
2248 pagebuf = kmap_atomic(page, KM_USER0); 2248 pagebuf = kmap_atomic(page);
2249 memcpy(pagebuf, buf, count); 2249 memcpy(pagebuf, buf, count);
2250 flush_dcache_page(page); 2250 flush_dcache_page(page);
2251 kunmap_atomic(pagebuf, KM_USER0); 2251 kunmap_atomic(pagebuf);
2252 2252
2253 if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE) 2253 if (i_size_read(inode) < pos + LOGFS_BLOCKSIZE)
2254 i_size_write(inode, pos + LOGFS_BLOCKSIZE); 2254 i_size_write(inode, pos + LOGFS_BLOCKSIZE);
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index ab798ed1cc88..e28d090c98d6 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -543,9 +543,9 @@ void move_page_to_btree(struct page *page)
543 BUG_ON(!item); /* mempool empty */ 543 BUG_ON(!item); /* mempool empty */
544 memset(item, 0, sizeof(*item)); 544 memset(item, 0, sizeof(*item));
545 545
546 child = kmap_atomic(page, KM_USER0); 546 child = kmap_atomic(page);
547 item->val = child[pos]; 547 item->val = child[pos];
548 kunmap_atomic(child, KM_USER0); 548 kunmap_atomic(child);
549 item->child_no = pos; 549 item->child_no = pos;
550 list_add(&item->list, &block->item_list); 550 list_add(&item->list, &block->item_list);
551 } 551 }
diff --git a/fs/minix/dir.c b/fs/minix/dir.c
index 085a9262c692..685b2d981b87 100644
--- a/fs/minix/dir.c
+++ b/fs/minix/dir.c
@@ -335,7 +335,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
335 goto fail; 335 goto fail;
336 } 336 }
337 337
338 kaddr = kmap_atomic(page, KM_USER0); 338 kaddr = kmap_atomic(page);
339 memset(kaddr, 0, PAGE_CACHE_SIZE); 339 memset(kaddr, 0, PAGE_CACHE_SIZE);
340 340
341 if (sbi->s_version == MINIX_V3) { 341 if (sbi->s_version == MINIX_V3) {
@@ -355,7 +355,7 @@ int minix_make_empty(struct inode *inode, struct inode *dir)
355 de->inode = dir->i_ino; 355 de->inode = dir->i_ino;
356 strcpy(de->name, ".."); 356 strcpy(de->name, "..");
357 } 357 }
358 kunmap_atomic(kaddr, KM_USER0); 358 kunmap_atomic(kaddr);
359 359
360 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize); 360 err = dir_commit_chunk(page, 0, 2 * sbi->s_dirsize);
361fail: 361fail:
diff --git a/fs/namei.c b/fs/namei.c
index fa96a26d3291..20a4fcf001ec 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -3493,9 +3493,9 @@ retry:
3493 if (err) 3493 if (err)
3494 goto fail; 3494 goto fail;
3495 3495
3496 kaddr = kmap_atomic(page, KM_USER0); 3496 kaddr = kmap_atomic(page);
3497 memcpy(kaddr, symname, len-1); 3497 memcpy(kaddr, symname, len-1);
3498 kunmap_atomic(kaddr, KM_USER0); 3498 kunmap_atomic(kaddr);
3499 3499
3500 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1, 3500 err = pagecache_write_end(NULL, mapping, 0, len-1, len-1,
3501 page, fsdata); 3501 page, fsdata);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index fd9a872fada0..32aa6917265a 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -260,10 +260,10 @@ void nfs_readdir_clear_array(struct page *page)
260 struct nfs_cache_array *array; 260 struct nfs_cache_array *array;
261 int i; 261 int i;
262 262
263 array = kmap_atomic(page, KM_USER0); 263 array = kmap_atomic(page);
264 for (i = 0; i < array->size; i++) 264 for (i = 0; i < array->size; i++)
265 kfree(array->array[i].string.name); 265 kfree(array->array[i].string.name);
266 kunmap_atomic(array, KM_USER0); 266 kunmap_atomic(array);
267} 267}
268 268
269/* 269/*
@@ -1870,11 +1870,11 @@ static int nfs_symlink(struct inode *dir, struct dentry *dentry, const char *sym
1870 if (!page) 1870 if (!page)
1871 return -ENOMEM; 1871 return -ENOMEM;
1872 1872
1873 kaddr = kmap_atomic(page, KM_USER0); 1873 kaddr = kmap_atomic(page);
1874 memcpy(kaddr, symname, pathlen); 1874 memcpy(kaddr, symname, pathlen);
1875 if (pathlen < PAGE_SIZE) 1875 if (pathlen < PAGE_SIZE)
1876 memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen); 1876 memset(kaddr + pathlen, 0, PAGE_SIZE - pathlen);
1877 kunmap_atomic(kaddr, KM_USER0); 1877 kunmap_atomic(kaddr);
1878 1878
1879 error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr); 1879 error = NFS_PROTO(dir)->symlink(dir, dentry, page, pathlen, &attr);
1880 if (error != 0) { 1880 if (error != 0) {
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ec9f6ef6c5dd..caf92d05c3a9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -193,7 +193,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
193 * when talking to the server, we always send cookie 0 193 * when talking to the server, we always send cookie 0
194 * instead of 1 or 2. 194 * instead of 1 or 2.
195 */ 195 */
196 start = p = kmap_atomic(*readdir->pages, KM_USER0); 196 start = p = kmap_atomic(*readdir->pages);
197 197
198 if (cookie == 0) { 198 if (cookie == 0) {
199 *p++ = xdr_one; /* next */ 199 *p++ = xdr_one; /* next */
@@ -221,7 +221,7 @@ static void nfs4_setup_readdir(u64 cookie, __be32 *verifier, struct dentry *dent
221 221
222 readdir->pgbase = (char *)p - (char *)start; 222 readdir->pgbase = (char *)p - (char *)start;
223 readdir->count -= readdir->pgbase; 223 readdir->count -= readdir->pgbase;
224 kunmap_atomic(start, KM_USER0); 224 kunmap_atomic(start);
225} 225}
226 226
227static int nfs4_wait_clnt_recover(struct nfs_client *clp) 227static int nfs4_wait_clnt_recover(struct nfs_client *clp)
diff --git a/fs/nilfs2/cpfile.c b/fs/nilfs2/cpfile.c
index c9b342c8b503..dab5c4c6dfaf 100644
--- a/fs/nilfs2/cpfile.c
+++ b/fs/nilfs2/cpfile.c
@@ -218,11 +218,11 @@ int nilfs_cpfile_get_checkpoint(struct inode *cpfile,
218 kaddr, 1); 218 kaddr, 1);
219 mark_buffer_dirty(cp_bh); 219 mark_buffer_dirty(cp_bh);
220 220
221 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 221 kaddr = kmap_atomic(header_bh->b_page);
222 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 222 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
223 kaddr); 223 kaddr);
224 le64_add_cpu(&header->ch_ncheckpoints, 1); 224 le64_add_cpu(&header->ch_ncheckpoints, 1);
225 kunmap_atomic(kaddr, KM_USER0); 225 kunmap_atomic(kaddr);
226 mark_buffer_dirty(header_bh); 226 mark_buffer_dirty(header_bh);
227 nilfs_mdt_mark_dirty(cpfile); 227 nilfs_mdt_mark_dirty(cpfile);
228 } 228 }
@@ -313,7 +313,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
313 continue; 313 continue;
314 } 314 }
315 315
316 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 316 kaddr = kmap_atomic(cp_bh->b_page);
317 cp = nilfs_cpfile_block_get_checkpoint( 317 cp = nilfs_cpfile_block_get_checkpoint(
318 cpfile, cno, cp_bh, kaddr); 318 cpfile, cno, cp_bh, kaddr);
319 nicps = 0; 319 nicps = 0;
@@ -334,7 +334,7 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
334 cpfile, cp_bh, kaddr, nicps); 334 cpfile, cp_bh, kaddr, nicps);
335 if (count == 0) { 335 if (count == 0) {
336 /* make hole */ 336 /* make hole */
337 kunmap_atomic(kaddr, KM_USER0); 337 kunmap_atomic(kaddr);
338 brelse(cp_bh); 338 brelse(cp_bh);
339 ret = 339 ret =
340 nilfs_cpfile_delete_checkpoint_block( 340 nilfs_cpfile_delete_checkpoint_block(
@@ -349,18 +349,18 @@ int nilfs_cpfile_delete_checkpoints(struct inode *cpfile,
349 } 349 }
350 } 350 }
351 351
352 kunmap_atomic(kaddr, KM_USER0); 352 kunmap_atomic(kaddr);
353 brelse(cp_bh); 353 brelse(cp_bh);
354 } 354 }
355 355
356 if (tnicps > 0) { 356 if (tnicps > 0) {
357 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 357 kaddr = kmap_atomic(header_bh->b_page);
358 header = nilfs_cpfile_block_get_header(cpfile, header_bh, 358 header = nilfs_cpfile_block_get_header(cpfile, header_bh,
359 kaddr); 359 kaddr);
360 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps); 360 le64_add_cpu(&header->ch_ncheckpoints, -(u64)tnicps);
361 mark_buffer_dirty(header_bh); 361 mark_buffer_dirty(header_bh);
362 nilfs_mdt_mark_dirty(cpfile); 362 nilfs_mdt_mark_dirty(cpfile);
363 kunmap_atomic(kaddr, KM_USER0); 363 kunmap_atomic(kaddr);
364 } 364 }
365 365
366 brelse(header_bh); 366 brelse(header_bh);
@@ -408,7 +408,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
408 continue; /* skip hole */ 408 continue; /* skip hole */
409 } 409 }
410 410
411 kaddr = kmap_atomic(bh->b_page, KM_USER0); 411 kaddr = kmap_atomic(bh->b_page);
412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 412 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) { 413 for (i = 0; i < ncps && n < nci; i++, cp = (void *)cp + cpsz) {
414 if (!nilfs_checkpoint_invalid(cp)) { 414 if (!nilfs_checkpoint_invalid(cp)) {
@@ -418,7 +418,7 @@ static ssize_t nilfs_cpfile_do_get_cpinfo(struct inode *cpfile, __u64 *cnop,
418 n++; 418 n++;
419 } 419 }
420 } 420 }
421 kunmap_atomic(kaddr, KM_USER0); 421 kunmap_atomic(kaddr);
422 brelse(bh); 422 brelse(bh);
423 } 423 }
424 424
@@ -451,10 +451,10 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
451 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 451 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
452 if (ret < 0) 452 if (ret < 0)
453 goto out; 453 goto out;
454 kaddr = kmap_atomic(bh->b_page, KM_USER0); 454 kaddr = kmap_atomic(bh->b_page);
455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 455 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); 456 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next);
457 kunmap_atomic(kaddr, KM_USER0); 457 kunmap_atomic(kaddr);
458 brelse(bh); 458 brelse(bh);
459 if (curr == 0) { 459 if (curr == 0) {
460 ret = 0; 460 ret = 0;
@@ -472,7 +472,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
472 ret = 0; /* No snapshots (started from a hole block) */ 472 ret = 0; /* No snapshots (started from a hole block) */
473 goto out; 473 goto out;
474 } 474 }
475 kaddr = kmap_atomic(bh->b_page, KM_USER0); 475 kaddr = kmap_atomic(bh->b_page);
476 while (n < nci) { 476 while (n < nci) {
477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); 477 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr);
478 curr = ~(__u64)0; /* Terminator */ 478 curr = ~(__u64)0; /* Terminator */
@@ -488,7 +488,7 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
488 488
489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next); 489 next_blkoff = nilfs_cpfile_get_blkoff(cpfile, next);
490 if (curr_blkoff != next_blkoff) { 490 if (curr_blkoff != next_blkoff) {
491 kunmap_atomic(kaddr, KM_USER0); 491 kunmap_atomic(kaddr);
492 brelse(bh); 492 brelse(bh);
493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next, 493 ret = nilfs_cpfile_get_checkpoint_block(cpfile, next,
494 0, &bh); 494 0, &bh);
@@ -496,12 +496,12 @@ static ssize_t nilfs_cpfile_do_get_ssinfo(struct inode *cpfile, __u64 *cnop,
496 WARN_ON(ret == -ENOENT); 496 WARN_ON(ret == -ENOENT);
497 goto out; 497 goto out;
498 } 498 }
499 kaddr = kmap_atomic(bh->b_page, KM_USER0); 499 kaddr = kmap_atomic(bh->b_page);
500 } 500 }
501 curr = next; 501 curr = next;
502 curr_blkoff = next_blkoff; 502 curr_blkoff = next_blkoff;
503 } 503 }
504 kunmap_atomic(kaddr, KM_USER0); 504 kunmap_atomic(kaddr);
505 brelse(bh); 505 brelse(bh);
506 *cnop = curr; 506 *cnop = curr;
507 ret = n; 507 ret = n;
@@ -592,24 +592,24 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
592 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 592 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
593 if (ret < 0) 593 if (ret < 0)
594 goto out_sem; 594 goto out_sem;
595 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 595 kaddr = kmap_atomic(cp_bh->b_page);
596 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 596 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
597 if (nilfs_checkpoint_invalid(cp)) { 597 if (nilfs_checkpoint_invalid(cp)) {
598 ret = -ENOENT; 598 ret = -ENOENT;
599 kunmap_atomic(kaddr, KM_USER0); 599 kunmap_atomic(kaddr);
600 goto out_cp; 600 goto out_cp;
601 } 601 }
602 if (nilfs_checkpoint_snapshot(cp)) { 602 if (nilfs_checkpoint_snapshot(cp)) {
603 ret = 0; 603 ret = 0;
604 kunmap_atomic(kaddr, KM_USER0); 604 kunmap_atomic(kaddr);
605 goto out_cp; 605 goto out_cp;
606 } 606 }
607 kunmap_atomic(kaddr, KM_USER0); 607 kunmap_atomic(kaddr);
608 608
609 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 609 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
610 if (ret < 0) 610 if (ret < 0)
611 goto out_cp; 611 goto out_cp;
612 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 612 kaddr = kmap_atomic(header_bh->b_page);
613 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 613 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
614 list = &header->ch_snapshot_list; 614 list = &header->ch_snapshot_list;
615 curr_bh = header_bh; 615 curr_bh = header_bh;
@@ -621,13 +621,13 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
621 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev); 621 prev_blkoff = nilfs_cpfile_get_blkoff(cpfile, prev);
622 curr = prev; 622 curr = prev;
623 if (curr_blkoff != prev_blkoff) { 623 if (curr_blkoff != prev_blkoff) {
624 kunmap_atomic(kaddr, KM_USER0); 624 kunmap_atomic(kaddr);
625 brelse(curr_bh); 625 brelse(curr_bh);
626 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 626 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr,
627 0, &curr_bh); 627 0, &curr_bh);
628 if (ret < 0) 628 if (ret < 0)
629 goto out_header; 629 goto out_header;
630 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 630 kaddr = kmap_atomic(curr_bh->b_page);
631 } 631 }
632 curr_blkoff = prev_blkoff; 632 curr_blkoff = prev_blkoff;
633 cp = nilfs_cpfile_block_get_checkpoint( 633 cp = nilfs_cpfile_block_get_checkpoint(
@@ -635,7 +635,7 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
635 list = &cp->cp_snapshot_list; 635 list = &cp->cp_snapshot_list;
636 prev = le64_to_cpu(list->ssl_prev); 636 prev = le64_to_cpu(list->ssl_prev);
637 } 637 }
638 kunmap_atomic(kaddr, KM_USER0); 638 kunmap_atomic(kaddr);
639 639
640 if (prev != 0) { 640 if (prev != 0) {
641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0, 641 ret = nilfs_cpfile_get_checkpoint_block(cpfile, prev, 0,
@@ -647,29 +647,29 @@ static int nilfs_cpfile_set_snapshot(struct inode *cpfile, __u64 cno)
647 get_bh(prev_bh); 647 get_bh(prev_bh);
648 } 648 }
649 649
650 kaddr = kmap_atomic(curr_bh->b_page, KM_USER0); 650 kaddr = kmap_atomic(curr_bh->b_page);
651 list = nilfs_cpfile_block_get_snapshot_list( 651 list = nilfs_cpfile_block_get_snapshot_list(
652 cpfile, curr, curr_bh, kaddr); 652 cpfile, curr, curr_bh, kaddr);
653 list->ssl_prev = cpu_to_le64(cno); 653 list->ssl_prev = cpu_to_le64(cno);
654 kunmap_atomic(kaddr, KM_USER0); 654 kunmap_atomic(kaddr);
655 655
656 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 656 kaddr = kmap_atomic(cp_bh->b_page);
657 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 657 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
658 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); 658 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr);
659 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev); 659 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(prev);
660 nilfs_checkpoint_set_snapshot(cp); 660 nilfs_checkpoint_set_snapshot(cp);
661 kunmap_atomic(kaddr, KM_USER0); 661 kunmap_atomic(kaddr);
662 662
663 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 663 kaddr = kmap_atomic(prev_bh->b_page);
664 list = nilfs_cpfile_block_get_snapshot_list( 664 list = nilfs_cpfile_block_get_snapshot_list(
665 cpfile, prev, prev_bh, kaddr); 665 cpfile, prev, prev_bh, kaddr);
666 list->ssl_next = cpu_to_le64(cno); 666 list->ssl_next = cpu_to_le64(cno);
667 kunmap_atomic(kaddr, KM_USER0); 667 kunmap_atomic(kaddr);
668 668
669 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 669 kaddr = kmap_atomic(header_bh->b_page);
670 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 670 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
671 le64_add_cpu(&header->ch_nsnapshots, 1); 671 le64_add_cpu(&header->ch_nsnapshots, 1);
672 kunmap_atomic(kaddr, KM_USER0); 672 kunmap_atomic(kaddr);
673 673
674 mark_buffer_dirty(prev_bh); 674 mark_buffer_dirty(prev_bh);
675 mark_buffer_dirty(curr_bh); 675 mark_buffer_dirty(curr_bh);
@@ -710,23 +710,23 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
710 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh); 710 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &cp_bh);
711 if (ret < 0) 711 if (ret < 0)
712 goto out_sem; 712 goto out_sem;
713 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 713 kaddr = kmap_atomic(cp_bh->b_page);
714 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 714 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
715 if (nilfs_checkpoint_invalid(cp)) { 715 if (nilfs_checkpoint_invalid(cp)) {
716 ret = -ENOENT; 716 ret = -ENOENT;
717 kunmap_atomic(kaddr, KM_USER0); 717 kunmap_atomic(kaddr);
718 goto out_cp; 718 goto out_cp;
719 } 719 }
720 if (!nilfs_checkpoint_snapshot(cp)) { 720 if (!nilfs_checkpoint_snapshot(cp)) {
721 ret = 0; 721 ret = 0;
722 kunmap_atomic(kaddr, KM_USER0); 722 kunmap_atomic(kaddr);
723 goto out_cp; 723 goto out_cp;
724 } 724 }
725 725
726 list = &cp->cp_snapshot_list; 726 list = &cp->cp_snapshot_list;
727 next = le64_to_cpu(list->ssl_next); 727 next = le64_to_cpu(list->ssl_next);
728 prev = le64_to_cpu(list->ssl_prev); 728 prev = le64_to_cpu(list->ssl_prev);
729 kunmap_atomic(kaddr, KM_USER0); 729 kunmap_atomic(kaddr);
730 730
731 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh); 731 ret = nilfs_cpfile_get_header_block(cpfile, &header_bh);
732 if (ret < 0) 732 if (ret < 0)
@@ -750,29 +750,29 @@ static int nilfs_cpfile_clear_snapshot(struct inode *cpfile, __u64 cno)
750 get_bh(prev_bh); 750 get_bh(prev_bh);
751 } 751 }
752 752
753 kaddr = kmap_atomic(next_bh->b_page, KM_USER0); 753 kaddr = kmap_atomic(next_bh->b_page);
754 list = nilfs_cpfile_block_get_snapshot_list( 754 list = nilfs_cpfile_block_get_snapshot_list(
755 cpfile, next, next_bh, kaddr); 755 cpfile, next, next_bh, kaddr);
756 list->ssl_prev = cpu_to_le64(prev); 756 list->ssl_prev = cpu_to_le64(prev);
757 kunmap_atomic(kaddr, KM_USER0); 757 kunmap_atomic(kaddr);
758 758
759 kaddr = kmap_atomic(prev_bh->b_page, KM_USER0); 759 kaddr = kmap_atomic(prev_bh->b_page);
760 list = nilfs_cpfile_block_get_snapshot_list( 760 list = nilfs_cpfile_block_get_snapshot_list(
761 cpfile, prev, prev_bh, kaddr); 761 cpfile, prev, prev_bh, kaddr);
762 list->ssl_next = cpu_to_le64(next); 762 list->ssl_next = cpu_to_le64(next);
763 kunmap_atomic(kaddr, KM_USER0); 763 kunmap_atomic(kaddr);
764 764
765 kaddr = kmap_atomic(cp_bh->b_page, KM_USER0); 765 kaddr = kmap_atomic(cp_bh->b_page);
766 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr); 766 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, cp_bh, kaddr);
767 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0); 767 cp->cp_snapshot_list.ssl_next = cpu_to_le64(0);
768 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0); 768 cp->cp_snapshot_list.ssl_prev = cpu_to_le64(0);
769 nilfs_checkpoint_clear_snapshot(cp); 769 nilfs_checkpoint_clear_snapshot(cp);
770 kunmap_atomic(kaddr, KM_USER0); 770 kunmap_atomic(kaddr);
771 771
772 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 772 kaddr = kmap_atomic(header_bh->b_page);
773 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr); 773 header = nilfs_cpfile_block_get_header(cpfile, header_bh, kaddr);
774 le64_add_cpu(&header->ch_nsnapshots, -1); 774 le64_add_cpu(&header->ch_nsnapshots, -1);
775 kunmap_atomic(kaddr, KM_USER0); 775 kunmap_atomic(kaddr);
776 776
777 mark_buffer_dirty(next_bh); 777 mark_buffer_dirty(next_bh);
778 mark_buffer_dirty(prev_bh); 778 mark_buffer_dirty(prev_bh);
@@ -829,13 +829,13 @@ int nilfs_cpfile_is_snapshot(struct inode *cpfile, __u64 cno)
829 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh); 829 ret = nilfs_cpfile_get_checkpoint_block(cpfile, cno, 0, &bh);
830 if (ret < 0) 830 if (ret < 0)
831 goto out; 831 goto out;
832 kaddr = kmap_atomic(bh->b_page, KM_USER0); 832 kaddr = kmap_atomic(bh->b_page);
833 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr); 833 cp = nilfs_cpfile_block_get_checkpoint(cpfile, cno, bh, kaddr);
834 if (nilfs_checkpoint_invalid(cp)) 834 if (nilfs_checkpoint_invalid(cp))
835 ret = -ENOENT; 835 ret = -ENOENT;
836 else 836 else
837 ret = nilfs_checkpoint_snapshot(cp); 837 ret = nilfs_checkpoint_snapshot(cp);
838 kunmap_atomic(kaddr, KM_USER0); 838 kunmap_atomic(kaddr);
839 brelse(bh); 839 brelse(bh);
840 840
841 out: 841 out:
@@ -912,12 +912,12 @@ int nilfs_cpfile_get_stat(struct inode *cpfile, struct nilfs_cpstat *cpstat)
912 ret = nilfs_cpfile_get_header_block(cpfile, &bh); 912 ret = nilfs_cpfile_get_header_block(cpfile, &bh);
913 if (ret < 0) 913 if (ret < 0)
914 goto out_sem; 914 goto out_sem;
915 kaddr = kmap_atomic(bh->b_page, KM_USER0); 915 kaddr = kmap_atomic(bh->b_page);
916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr); 916 header = nilfs_cpfile_block_get_header(cpfile, bh, kaddr);
917 cpstat->cs_cno = nilfs_mdt_cno(cpfile); 917 cpstat->cs_cno = nilfs_mdt_cno(cpfile);
918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints); 918 cpstat->cs_ncps = le64_to_cpu(header->ch_ncheckpoints);
919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots); 919 cpstat->cs_nsss = le64_to_cpu(header->ch_nsnapshots);
920 kunmap_atomic(kaddr, KM_USER0); 920 kunmap_atomic(kaddr);
921 brelse(bh); 921 brelse(bh);
922 922
923 out_sem: 923 out_sem:
diff --git a/fs/nilfs2/dat.c b/fs/nilfs2/dat.c
index fcc2f869af16..b5c13f3576b9 100644
--- a/fs/nilfs2/dat.c
+++ b/fs/nilfs2/dat.c
@@ -85,13 +85,13 @@ void nilfs_dat_commit_alloc(struct inode *dat, struct nilfs_palloc_req *req)
85 struct nilfs_dat_entry *entry; 85 struct nilfs_dat_entry *entry;
86 void *kaddr; 86 void *kaddr;
87 87
88 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 88 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
89 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 89 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
90 req->pr_entry_bh, kaddr); 90 req->pr_entry_bh, kaddr);
91 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 91 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
92 entry->de_end = cpu_to_le64(NILFS_CNO_MAX); 92 entry->de_end = cpu_to_le64(NILFS_CNO_MAX);
93 entry->de_blocknr = cpu_to_le64(0); 93 entry->de_blocknr = cpu_to_le64(0);
94 kunmap_atomic(kaddr, KM_USER0); 94 kunmap_atomic(kaddr);
95 95
96 nilfs_palloc_commit_alloc_entry(dat, req); 96 nilfs_palloc_commit_alloc_entry(dat, req);
97 nilfs_dat_commit_entry(dat, req); 97 nilfs_dat_commit_entry(dat, req);
@@ -109,13 +109,13 @@ static void nilfs_dat_commit_free(struct inode *dat,
109 struct nilfs_dat_entry *entry; 109 struct nilfs_dat_entry *entry;
110 void *kaddr; 110 void *kaddr;
111 111
112 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 112 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
113 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 113 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
114 req->pr_entry_bh, kaddr); 114 req->pr_entry_bh, kaddr);
115 entry->de_start = cpu_to_le64(NILFS_CNO_MIN); 115 entry->de_start = cpu_to_le64(NILFS_CNO_MIN);
116 entry->de_end = cpu_to_le64(NILFS_CNO_MIN); 116 entry->de_end = cpu_to_le64(NILFS_CNO_MIN);
117 entry->de_blocknr = cpu_to_le64(0); 117 entry->de_blocknr = cpu_to_le64(0);
118 kunmap_atomic(kaddr, KM_USER0); 118 kunmap_atomic(kaddr);
119 119
120 nilfs_dat_commit_entry(dat, req); 120 nilfs_dat_commit_entry(dat, req);
121 nilfs_palloc_commit_free_entry(dat, req); 121 nilfs_palloc_commit_free_entry(dat, req);
@@ -136,12 +136,12 @@ void nilfs_dat_commit_start(struct inode *dat, struct nilfs_palloc_req *req,
136 struct nilfs_dat_entry *entry; 136 struct nilfs_dat_entry *entry;
137 void *kaddr; 137 void *kaddr;
138 138
139 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 139 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
140 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 140 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
141 req->pr_entry_bh, kaddr); 141 req->pr_entry_bh, kaddr);
142 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat)); 142 entry->de_start = cpu_to_le64(nilfs_mdt_cno(dat));
143 entry->de_blocknr = cpu_to_le64(blocknr); 143 entry->de_blocknr = cpu_to_le64(blocknr);
144 kunmap_atomic(kaddr, KM_USER0); 144 kunmap_atomic(kaddr);
145 145
146 nilfs_dat_commit_entry(dat, req); 146 nilfs_dat_commit_entry(dat, req);
147} 147}
@@ -160,12 +160,12 @@ int nilfs_dat_prepare_end(struct inode *dat, struct nilfs_palloc_req *req)
160 return ret; 160 return ret;
161 } 161 }
162 162
163 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 163 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
164 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 164 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
165 req->pr_entry_bh, kaddr); 165 req->pr_entry_bh, kaddr);
166 start = le64_to_cpu(entry->de_start); 166 start = le64_to_cpu(entry->de_start);
167 blocknr = le64_to_cpu(entry->de_blocknr); 167 blocknr = le64_to_cpu(entry->de_blocknr);
168 kunmap_atomic(kaddr, KM_USER0); 168 kunmap_atomic(kaddr);
169 169
170 if (blocknr == 0) { 170 if (blocknr == 0) {
171 ret = nilfs_palloc_prepare_free_entry(dat, req); 171 ret = nilfs_palloc_prepare_free_entry(dat, req);
@@ -186,7 +186,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
186 sector_t blocknr; 186 sector_t blocknr;
187 void *kaddr; 187 void *kaddr;
188 188
189 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 189 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
190 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 190 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
191 req->pr_entry_bh, kaddr); 191 req->pr_entry_bh, kaddr);
192 end = start = le64_to_cpu(entry->de_start); 192 end = start = le64_to_cpu(entry->de_start);
@@ -196,7 +196,7 @@ void nilfs_dat_commit_end(struct inode *dat, struct nilfs_palloc_req *req,
196 } 196 }
197 entry->de_end = cpu_to_le64(end); 197 entry->de_end = cpu_to_le64(end);
198 blocknr = le64_to_cpu(entry->de_blocknr); 198 blocknr = le64_to_cpu(entry->de_blocknr);
199 kunmap_atomic(kaddr, KM_USER0); 199 kunmap_atomic(kaddr);
200 200
201 if (blocknr == 0) 201 if (blocknr == 0)
202 nilfs_dat_commit_free(dat, req); 202 nilfs_dat_commit_free(dat, req);
@@ -211,12 +211,12 @@ void nilfs_dat_abort_end(struct inode *dat, struct nilfs_palloc_req *req)
211 sector_t blocknr; 211 sector_t blocknr;
212 void *kaddr; 212 void *kaddr;
213 213
214 kaddr = kmap_atomic(req->pr_entry_bh->b_page, KM_USER0); 214 kaddr = kmap_atomic(req->pr_entry_bh->b_page);
215 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr, 215 entry = nilfs_palloc_block_get_entry(dat, req->pr_entry_nr,
216 req->pr_entry_bh, kaddr); 216 req->pr_entry_bh, kaddr);
217 start = le64_to_cpu(entry->de_start); 217 start = le64_to_cpu(entry->de_start);
218 blocknr = le64_to_cpu(entry->de_blocknr); 218 blocknr = le64_to_cpu(entry->de_blocknr);
219 kunmap_atomic(kaddr, KM_USER0); 219 kunmap_atomic(kaddr);
220 220
221 if (start == nilfs_mdt_cno(dat) && blocknr == 0) 221 if (start == nilfs_mdt_cno(dat) && blocknr == 0)
222 nilfs_palloc_abort_free_entry(dat, req); 222 nilfs_palloc_abort_free_entry(dat, req);
@@ -346,20 +346,20 @@ int nilfs_dat_move(struct inode *dat, __u64 vblocknr, sector_t blocknr)
346 } 346 }
347 } 347 }
348 348
349 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 349 kaddr = kmap_atomic(entry_bh->b_page);
350 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 350 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
351 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) { 351 if (unlikely(entry->de_blocknr == cpu_to_le64(0))) {
352 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__, 352 printk(KERN_CRIT "%s: vbn = %llu, [%llu, %llu)\n", __func__,
353 (unsigned long long)vblocknr, 353 (unsigned long long)vblocknr,
354 (unsigned long long)le64_to_cpu(entry->de_start), 354 (unsigned long long)le64_to_cpu(entry->de_start),
355 (unsigned long long)le64_to_cpu(entry->de_end)); 355 (unsigned long long)le64_to_cpu(entry->de_end));
356 kunmap_atomic(kaddr, KM_USER0); 356 kunmap_atomic(kaddr);
357 brelse(entry_bh); 357 brelse(entry_bh);
358 return -EINVAL; 358 return -EINVAL;
359 } 359 }
360 WARN_ON(blocknr == 0); 360 WARN_ON(blocknr == 0);
361 entry->de_blocknr = cpu_to_le64(blocknr); 361 entry->de_blocknr = cpu_to_le64(blocknr);
362 kunmap_atomic(kaddr, KM_USER0); 362 kunmap_atomic(kaddr);
363 363
364 mark_buffer_dirty(entry_bh); 364 mark_buffer_dirty(entry_bh);
365 nilfs_mdt_mark_dirty(dat); 365 nilfs_mdt_mark_dirty(dat);
@@ -409,7 +409,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
409 } 409 }
410 } 410 }
411 411
412 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 412 kaddr = kmap_atomic(entry_bh->b_page);
413 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr); 413 entry = nilfs_palloc_block_get_entry(dat, vblocknr, entry_bh, kaddr);
414 blocknr = le64_to_cpu(entry->de_blocknr); 414 blocknr = le64_to_cpu(entry->de_blocknr);
415 if (blocknr == 0) { 415 if (blocknr == 0) {
@@ -419,7 +419,7 @@ int nilfs_dat_translate(struct inode *dat, __u64 vblocknr, sector_t *blocknrp)
419 *blocknrp = blocknr; 419 *blocknrp = blocknr;
420 420
421 out: 421 out:
422 kunmap_atomic(kaddr, KM_USER0); 422 kunmap_atomic(kaddr);
423 brelse(entry_bh); 423 brelse(entry_bh);
424 return ret; 424 return ret;
425} 425}
@@ -440,7 +440,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
440 0, &entry_bh); 440 0, &entry_bh);
441 if (ret < 0) 441 if (ret < 0)
442 return ret; 442 return ret;
443 kaddr = kmap_atomic(entry_bh->b_page, KM_USER0); 443 kaddr = kmap_atomic(entry_bh->b_page);
444 /* last virtual block number in this block */ 444 /* last virtual block number in this block */
445 first = vinfo->vi_vblocknr; 445 first = vinfo->vi_vblocknr;
446 do_div(first, entries_per_block); 446 do_div(first, entries_per_block);
@@ -456,7 +456,7 @@ ssize_t nilfs_dat_get_vinfo(struct inode *dat, void *buf, unsigned visz,
456 vinfo->vi_end = le64_to_cpu(entry->de_end); 456 vinfo->vi_end = le64_to_cpu(entry->de_end);
457 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr); 457 vinfo->vi_blocknr = le64_to_cpu(entry->de_blocknr);
458 } 458 }
459 kunmap_atomic(kaddr, KM_USER0); 459 kunmap_atomic(kaddr);
460 brelse(entry_bh); 460 brelse(entry_bh);
461 } 461 }
462 462
diff --git a/fs/nilfs2/dir.c b/fs/nilfs2/dir.c
index ca35b3a46d17..df1a7fb238d1 100644
--- a/fs/nilfs2/dir.c
+++ b/fs/nilfs2/dir.c
@@ -602,7 +602,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
602 unlock_page(page); 602 unlock_page(page);
603 goto fail; 603 goto fail;
604 } 604 }
605 kaddr = kmap_atomic(page, KM_USER0); 605 kaddr = kmap_atomic(page);
606 memset(kaddr, 0, chunk_size); 606 memset(kaddr, 0, chunk_size);
607 de = (struct nilfs_dir_entry *)kaddr; 607 de = (struct nilfs_dir_entry *)kaddr;
608 de->name_len = 1; 608 de->name_len = 1;
@@ -617,7 +617,7 @@ int nilfs_make_empty(struct inode *inode, struct inode *parent)
617 de->inode = cpu_to_le64(parent->i_ino); 617 de->inode = cpu_to_le64(parent->i_ino);
618 memcpy(de->name, "..\0", 4); 618 memcpy(de->name, "..\0", 4);
619 nilfs_set_de_type(de, inode); 619 nilfs_set_de_type(de, inode);
620 kunmap_atomic(kaddr, KM_USER0); 620 kunmap_atomic(kaddr);
621 nilfs_commit_chunk(page, mapping, 0, chunk_size); 621 nilfs_commit_chunk(page, mapping, 0, chunk_size);
622fail: 622fail:
623 page_cache_release(page); 623 page_cache_release(page);
diff --git a/fs/nilfs2/ifile.c b/fs/nilfs2/ifile.c
index 684d76300a80..5a48df79d674 100644
--- a/fs/nilfs2/ifile.c
+++ b/fs/nilfs2/ifile.c
@@ -122,11 +122,11 @@ int nilfs_ifile_delete_inode(struct inode *ifile, ino_t ino)
122 return ret; 122 return ret;
123 } 123 }
124 124
125 kaddr = kmap_atomic(req.pr_entry_bh->b_page, KM_USER0); 125 kaddr = kmap_atomic(req.pr_entry_bh->b_page);
126 raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr, 126 raw_inode = nilfs_palloc_block_get_entry(ifile, req.pr_entry_nr,
127 req.pr_entry_bh, kaddr); 127 req.pr_entry_bh, kaddr);
128 raw_inode->i_flags = 0; 128 raw_inode->i_flags = 0;
129 kunmap_atomic(kaddr, KM_USER0); 129 kunmap_atomic(kaddr);
130 130
131 mark_buffer_dirty(req.pr_entry_bh); 131 mark_buffer_dirty(req.pr_entry_bh);
132 brelse(req.pr_entry_bh); 132 brelse(req.pr_entry_bh);
diff --git a/fs/nilfs2/mdt.c b/fs/nilfs2/mdt.c
index 800e8d78a83b..f9897d09c693 100644
--- a/fs/nilfs2/mdt.c
+++ b/fs/nilfs2/mdt.c
@@ -58,12 +58,12 @@ nilfs_mdt_insert_new_block(struct inode *inode, unsigned long block,
58 58
59 set_buffer_mapped(bh); 59 set_buffer_mapped(bh);
60 60
61 kaddr = kmap_atomic(bh->b_page, KM_USER0); 61 kaddr = kmap_atomic(bh->b_page);
62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits); 62 memset(kaddr + bh_offset(bh), 0, 1 << inode->i_blkbits);
63 if (init_block) 63 if (init_block)
64 init_block(inode, bh, kaddr); 64 init_block(inode, bh, kaddr);
65 flush_dcache_page(bh->b_page); 65 flush_dcache_page(bh->b_page);
66 kunmap_atomic(kaddr, KM_USER0); 66 kunmap_atomic(kaddr);
67 67
68 set_buffer_uptodate(bh); 68 set_buffer_uptodate(bh);
69 mark_buffer_dirty(bh); 69 mark_buffer_dirty(bh);
diff --git a/fs/nilfs2/page.c b/fs/nilfs2/page.c
index 65221a04c6f0..3e7b2a0dc0c8 100644
--- a/fs/nilfs2/page.c
+++ b/fs/nilfs2/page.c
@@ -119,11 +119,11 @@ void nilfs_copy_buffer(struct buffer_head *dbh, struct buffer_head *sbh)
119 struct page *spage = sbh->b_page, *dpage = dbh->b_page; 119 struct page *spage = sbh->b_page, *dpage = dbh->b_page;
120 struct buffer_head *bh; 120 struct buffer_head *bh;
121 121
122 kaddr0 = kmap_atomic(spage, KM_USER0); 122 kaddr0 = kmap_atomic(spage);
123 kaddr1 = kmap_atomic(dpage, KM_USER1); 123 kaddr1 = kmap_atomic(dpage);
124 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size); 124 memcpy(kaddr1 + bh_offset(dbh), kaddr0 + bh_offset(sbh), sbh->b_size);
125 kunmap_atomic(kaddr1, KM_USER1); 125 kunmap_atomic(kaddr1);
126 kunmap_atomic(kaddr0, KM_USER0); 126 kunmap_atomic(kaddr0);
127 127
128 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS; 128 dbh->b_state = sbh->b_state & NILFS_BUFFER_INHERENT_BITS;
129 dbh->b_blocknr = sbh->b_blocknr; 129 dbh->b_blocknr = sbh->b_blocknr;
diff --git a/fs/nilfs2/recovery.c b/fs/nilfs2/recovery.c
index a604ac0331b2..f1626f5011c5 100644
--- a/fs/nilfs2/recovery.c
+++ b/fs/nilfs2/recovery.c
@@ -493,9 +493,9 @@ static int nilfs_recovery_copy_block(struct the_nilfs *nilfs,
493 if (unlikely(!bh_org)) 493 if (unlikely(!bh_org))
494 return -EIO; 494 return -EIO;
495 495
496 kaddr = kmap_atomic(page, KM_USER0); 496 kaddr = kmap_atomic(page);
497 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size); 497 memcpy(kaddr + bh_offset(bh_org), bh_org->b_data, bh_org->b_size);
498 kunmap_atomic(kaddr, KM_USER0); 498 kunmap_atomic(kaddr);
499 brelse(bh_org); 499 brelse(bh_org);
500 return 0; 500 return 0;
501} 501}
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 850a7c0228fb..dc9a913784ab 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -227,9 +227,9 @@ static void nilfs_segbuf_fill_in_data_crc(struct nilfs_segment_buffer *segbuf,
227 crc = crc32_le(crc, bh->b_data, bh->b_size); 227 crc = crc32_le(crc, bh->b_data, bh->b_size);
228 } 228 }
229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) { 229 list_for_each_entry(bh, &segbuf->sb_payload_buffers, b_assoc_buffers) {
230 kaddr = kmap_atomic(bh->b_page, KM_USER0); 230 kaddr = kmap_atomic(bh->b_page);
231 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size); 231 crc = crc32_le(crc, kaddr + bh_offset(bh), bh->b_size);
232 kunmap_atomic(kaddr, KM_USER0); 232 kunmap_atomic(kaddr);
233 } 233 }
234 raw_sum->ss_datasum = cpu_to_le32(crc); 234 raw_sum->ss_datasum = cpu_to_le32(crc);
235} 235}
diff --git a/fs/nilfs2/sufile.c b/fs/nilfs2/sufile.c
index 0a0aba617d8a..c5b7653a4391 100644
--- a/fs/nilfs2/sufile.c
+++ b/fs/nilfs2/sufile.c
@@ -111,11 +111,11 @@ static void nilfs_sufile_mod_counter(struct buffer_head *header_bh,
111 struct nilfs_sufile_header *header; 111 struct nilfs_sufile_header *header;
112 void *kaddr; 112 void *kaddr;
113 113
114 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 114 kaddr = kmap_atomic(header_bh->b_page);
115 header = kaddr + bh_offset(header_bh); 115 header = kaddr + bh_offset(header_bh);
116 le64_add_cpu(&header->sh_ncleansegs, ncleanadd); 116 le64_add_cpu(&header->sh_ncleansegs, ncleanadd);
117 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd); 117 le64_add_cpu(&header->sh_ndirtysegs, ndirtyadd);
118 kunmap_atomic(kaddr, KM_USER0); 118 kunmap_atomic(kaddr);
119 119
120 mark_buffer_dirty(header_bh); 120 mark_buffer_dirty(header_bh);
121} 121}
@@ -319,11 +319,11 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
319 ret = nilfs_sufile_get_header_block(sufile, &header_bh); 319 ret = nilfs_sufile_get_header_block(sufile, &header_bh);
320 if (ret < 0) 320 if (ret < 0)
321 goto out_sem; 321 goto out_sem;
322 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 322 kaddr = kmap_atomic(header_bh->b_page);
323 header = kaddr + bh_offset(header_bh); 323 header = kaddr + bh_offset(header_bh);
324 ncleansegs = le64_to_cpu(header->sh_ncleansegs); 324 ncleansegs = le64_to_cpu(header->sh_ncleansegs);
325 last_alloc = le64_to_cpu(header->sh_last_alloc); 325 last_alloc = le64_to_cpu(header->sh_last_alloc);
326 kunmap_atomic(kaddr, KM_USER0); 326 kunmap_atomic(kaddr);
327 327
328 nsegments = nilfs_sufile_get_nsegments(sufile); 328 nsegments = nilfs_sufile_get_nsegments(sufile);
329 maxsegnum = sui->allocmax; 329 maxsegnum = sui->allocmax;
@@ -356,7 +356,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
356 &su_bh); 356 &su_bh);
357 if (ret < 0) 357 if (ret < 0)
358 goto out_header; 358 goto out_header;
359 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 359 kaddr = kmap_atomic(su_bh->b_page);
360 su = nilfs_sufile_block_get_segment_usage( 360 su = nilfs_sufile_block_get_segment_usage(
361 sufile, segnum, su_bh, kaddr); 361 sufile, segnum, su_bh, kaddr);
362 362
@@ -367,14 +367,14 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
367 continue; 367 continue;
368 /* found a clean segment */ 368 /* found a clean segment */
369 nilfs_segment_usage_set_dirty(su); 369 nilfs_segment_usage_set_dirty(su);
370 kunmap_atomic(kaddr, KM_USER0); 370 kunmap_atomic(kaddr);
371 371
372 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 372 kaddr = kmap_atomic(header_bh->b_page);
373 header = kaddr + bh_offset(header_bh); 373 header = kaddr + bh_offset(header_bh);
374 le64_add_cpu(&header->sh_ncleansegs, -1); 374 le64_add_cpu(&header->sh_ncleansegs, -1);
375 le64_add_cpu(&header->sh_ndirtysegs, 1); 375 le64_add_cpu(&header->sh_ndirtysegs, 1);
376 header->sh_last_alloc = cpu_to_le64(segnum); 376 header->sh_last_alloc = cpu_to_le64(segnum);
377 kunmap_atomic(kaddr, KM_USER0); 377 kunmap_atomic(kaddr);
378 378
379 sui->ncleansegs--; 379 sui->ncleansegs--;
380 mark_buffer_dirty(header_bh); 380 mark_buffer_dirty(header_bh);
@@ -385,7 +385,7 @@ int nilfs_sufile_alloc(struct inode *sufile, __u64 *segnump)
385 goto out_header; 385 goto out_header;
386 } 386 }
387 387
388 kunmap_atomic(kaddr, KM_USER0); 388 kunmap_atomic(kaddr);
389 brelse(su_bh); 389 brelse(su_bh);
390 } 390 }
391 391
@@ -407,16 +407,16 @@ void nilfs_sufile_do_cancel_free(struct inode *sufile, __u64 segnum,
407 struct nilfs_segment_usage *su; 407 struct nilfs_segment_usage *su;
408 void *kaddr; 408 void *kaddr;
409 409
410 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 410 kaddr = kmap_atomic(su_bh->b_page);
411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 411 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
412 if (unlikely(!nilfs_segment_usage_clean(su))) { 412 if (unlikely(!nilfs_segment_usage_clean(su))) {
413 printk(KERN_WARNING "%s: segment %llu must be clean\n", 413 printk(KERN_WARNING "%s: segment %llu must be clean\n",
414 __func__, (unsigned long long)segnum); 414 __func__, (unsigned long long)segnum);
415 kunmap_atomic(kaddr, KM_USER0); 415 kunmap_atomic(kaddr);
416 return; 416 return;
417 } 417 }
418 nilfs_segment_usage_set_dirty(su); 418 nilfs_segment_usage_set_dirty(su);
419 kunmap_atomic(kaddr, KM_USER0); 419 kunmap_atomic(kaddr);
420 420
421 nilfs_sufile_mod_counter(header_bh, -1, 1); 421 nilfs_sufile_mod_counter(header_bh, -1, 1);
422 NILFS_SUI(sufile)->ncleansegs--; 422 NILFS_SUI(sufile)->ncleansegs--;
@@ -433,11 +433,11 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
433 void *kaddr; 433 void *kaddr;
434 int clean, dirty; 434 int clean, dirty;
435 435
436 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 436 kaddr = kmap_atomic(su_bh->b_page);
437 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 437 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
438 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) && 438 if (su->su_flags == cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY) &&
439 su->su_nblocks == cpu_to_le32(0)) { 439 su->su_nblocks == cpu_to_le32(0)) {
440 kunmap_atomic(kaddr, KM_USER0); 440 kunmap_atomic(kaddr);
441 return; 441 return;
442 } 442 }
443 clean = nilfs_segment_usage_clean(su); 443 clean = nilfs_segment_usage_clean(su);
@@ -447,7 +447,7 @@ void nilfs_sufile_do_scrap(struct inode *sufile, __u64 segnum,
447 su->su_lastmod = cpu_to_le64(0); 447 su->su_lastmod = cpu_to_le64(0);
448 su->su_nblocks = cpu_to_le32(0); 448 su->su_nblocks = cpu_to_le32(0);
449 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY); 449 su->su_flags = cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY);
450 kunmap_atomic(kaddr, KM_USER0); 450 kunmap_atomic(kaddr);
451 451
452 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1); 452 nilfs_sufile_mod_counter(header_bh, clean ? (u64)-1 : 0, dirty ? 0 : 1);
453 NILFS_SUI(sufile)->ncleansegs -= clean; 453 NILFS_SUI(sufile)->ncleansegs -= clean;
@@ -464,12 +464,12 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
464 void *kaddr; 464 void *kaddr;
465 int sudirty; 465 int sudirty;
466 466
467 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 467 kaddr = kmap_atomic(su_bh->b_page);
468 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 468 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
469 if (nilfs_segment_usage_clean(su)) { 469 if (nilfs_segment_usage_clean(su)) {
470 printk(KERN_WARNING "%s: segment %llu is already clean\n", 470 printk(KERN_WARNING "%s: segment %llu is already clean\n",
471 __func__, (unsigned long long)segnum); 471 __func__, (unsigned long long)segnum);
472 kunmap_atomic(kaddr, KM_USER0); 472 kunmap_atomic(kaddr);
473 return; 473 return;
474 } 474 }
475 WARN_ON(nilfs_segment_usage_error(su)); 475 WARN_ON(nilfs_segment_usage_error(su));
@@ -477,7 +477,7 @@ void nilfs_sufile_do_free(struct inode *sufile, __u64 segnum,
477 477
478 sudirty = nilfs_segment_usage_dirty(su); 478 sudirty = nilfs_segment_usage_dirty(su);
479 nilfs_segment_usage_set_clean(su); 479 nilfs_segment_usage_set_clean(su);
480 kunmap_atomic(kaddr, KM_USER0); 480 kunmap_atomic(kaddr);
481 mark_buffer_dirty(su_bh); 481 mark_buffer_dirty(su_bh);
482 482
483 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0); 483 nilfs_sufile_mod_counter(header_bh, 1, sudirty ? (u64)-1 : 0);
@@ -525,13 +525,13 @@ int nilfs_sufile_set_segment_usage(struct inode *sufile, __u64 segnum,
525 if (ret < 0) 525 if (ret < 0)
526 goto out_sem; 526 goto out_sem;
527 527
528 kaddr = kmap_atomic(bh->b_page, KM_USER0); 528 kaddr = kmap_atomic(bh->b_page);
529 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr); 529 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, bh, kaddr);
530 WARN_ON(nilfs_segment_usage_error(su)); 530 WARN_ON(nilfs_segment_usage_error(su));
531 if (modtime) 531 if (modtime)
532 su->su_lastmod = cpu_to_le64(modtime); 532 su->su_lastmod = cpu_to_le64(modtime);
533 su->su_nblocks = cpu_to_le32(nblocks); 533 su->su_nblocks = cpu_to_le32(nblocks);
534 kunmap_atomic(kaddr, KM_USER0); 534 kunmap_atomic(kaddr);
535 535
536 mark_buffer_dirty(bh); 536 mark_buffer_dirty(bh);
537 nilfs_mdt_mark_dirty(sufile); 537 nilfs_mdt_mark_dirty(sufile);
@@ -572,7 +572,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
572 if (ret < 0) 572 if (ret < 0)
573 goto out_sem; 573 goto out_sem;
574 574
575 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 575 kaddr = kmap_atomic(header_bh->b_page);
576 header = kaddr + bh_offset(header_bh); 576 header = kaddr + bh_offset(header_bh);
577 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile); 577 sustat->ss_nsegs = nilfs_sufile_get_nsegments(sufile);
578 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs); 578 sustat->ss_ncleansegs = le64_to_cpu(header->sh_ncleansegs);
@@ -582,7 +582,7 @@ int nilfs_sufile_get_stat(struct inode *sufile, struct nilfs_sustat *sustat)
582 spin_lock(&nilfs->ns_last_segment_lock); 582 spin_lock(&nilfs->ns_last_segment_lock);
583 sustat->ss_prot_seq = nilfs->ns_prot_seq; 583 sustat->ss_prot_seq = nilfs->ns_prot_seq;
584 spin_unlock(&nilfs->ns_last_segment_lock); 584 spin_unlock(&nilfs->ns_last_segment_lock);
585 kunmap_atomic(kaddr, KM_USER0); 585 kunmap_atomic(kaddr);
586 brelse(header_bh); 586 brelse(header_bh);
587 587
588 out_sem: 588 out_sem:
@@ -598,15 +598,15 @@ void nilfs_sufile_do_set_error(struct inode *sufile, __u64 segnum,
598 void *kaddr; 598 void *kaddr;
599 int suclean; 599 int suclean;
600 600
601 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 601 kaddr = kmap_atomic(su_bh->b_page);
602 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr); 602 su = nilfs_sufile_block_get_segment_usage(sufile, segnum, su_bh, kaddr);
603 if (nilfs_segment_usage_error(su)) { 603 if (nilfs_segment_usage_error(su)) {
604 kunmap_atomic(kaddr, KM_USER0); 604 kunmap_atomic(kaddr);
605 return; 605 return;
606 } 606 }
607 suclean = nilfs_segment_usage_clean(su); 607 suclean = nilfs_segment_usage_clean(su);
608 nilfs_segment_usage_set_error(su); 608 nilfs_segment_usage_set_error(su);
609 kunmap_atomic(kaddr, KM_USER0); 609 kunmap_atomic(kaddr);
610 610
611 if (suclean) { 611 if (suclean) {
612 nilfs_sufile_mod_counter(header_bh, -1, 0); 612 nilfs_sufile_mod_counter(header_bh, -1, 0);
@@ -675,7 +675,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
675 /* hole */ 675 /* hole */
676 continue; 676 continue;
677 } 677 }
678 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 678 kaddr = kmap_atomic(su_bh->b_page);
679 su = nilfs_sufile_block_get_segment_usage( 679 su = nilfs_sufile_block_get_segment_usage(
680 sufile, segnum, su_bh, kaddr); 680 sufile, segnum, su_bh, kaddr);
681 su2 = su; 681 su2 = su;
@@ -684,7 +684,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) || 684 ~(1UL << NILFS_SEGMENT_USAGE_ERROR)) ||
685 nilfs_segment_is_active(nilfs, segnum + j)) { 685 nilfs_segment_is_active(nilfs, segnum + j)) {
686 ret = -EBUSY; 686 ret = -EBUSY;
687 kunmap_atomic(kaddr, KM_USER0); 687 kunmap_atomic(kaddr);
688 brelse(su_bh); 688 brelse(su_bh);
689 goto out_header; 689 goto out_header;
690 } 690 }
@@ -696,7 +696,7 @@ static int nilfs_sufile_truncate_range(struct inode *sufile,
696 nc++; 696 nc++;
697 } 697 }
698 } 698 }
699 kunmap_atomic(kaddr, KM_USER0); 699 kunmap_atomic(kaddr);
700 if (nc > 0) { 700 if (nc > 0) {
701 mark_buffer_dirty(su_bh); 701 mark_buffer_dirty(su_bh);
702 ncleaned += nc; 702 ncleaned += nc;
@@ -772,10 +772,10 @@ int nilfs_sufile_resize(struct inode *sufile, __u64 newnsegs)
772 sui->ncleansegs -= nsegs - newnsegs; 772 sui->ncleansegs -= nsegs - newnsegs;
773 } 773 }
774 774
775 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 775 kaddr = kmap_atomic(header_bh->b_page);
776 header = kaddr + bh_offset(header_bh); 776 header = kaddr + bh_offset(header_bh);
777 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs); 777 header->sh_ncleansegs = cpu_to_le64(sui->ncleansegs);
778 kunmap_atomic(kaddr, KM_USER0); 778 kunmap_atomic(kaddr);
779 779
780 mark_buffer_dirty(header_bh); 780 mark_buffer_dirty(header_bh);
781 nilfs_mdt_mark_dirty(sufile); 781 nilfs_mdt_mark_dirty(sufile);
@@ -840,7 +840,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
840 continue; 840 continue;
841 } 841 }
842 842
843 kaddr = kmap_atomic(su_bh->b_page, KM_USER0); 843 kaddr = kmap_atomic(su_bh->b_page);
844 su = nilfs_sufile_block_get_segment_usage( 844 su = nilfs_sufile_block_get_segment_usage(
845 sufile, segnum, su_bh, kaddr); 845 sufile, segnum, su_bh, kaddr);
846 for (j = 0; j < n; 846 for (j = 0; j < n;
@@ -853,7 +853,7 @@ ssize_t nilfs_sufile_get_suinfo(struct inode *sufile, __u64 segnum, void *buf,
853 si->sui_flags |= 853 si->sui_flags |=
854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE); 854 (1UL << NILFS_SEGMENT_USAGE_ACTIVE);
855 } 855 }
856 kunmap_atomic(kaddr, KM_USER0); 856 kunmap_atomic(kaddr);
857 brelse(su_bh); 857 brelse(su_bh);
858 } 858 }
859 ret = nsegs; 859 ret = nsegs;
@@ -902,10 +902,10 @@ int nilfs_sufile_read(struct super_block *sb, size_t susize,
902 goto failed; 902 goto failed;
903 903
904 sui = NILFS_SUI(sufile); 904 sui = NILFS_SUI(sufile);
905 kaddr = kmap_atomic(header_bh->b_page, KM_USER0); 905 kaddr = kmap_atomic(header_bh->b_page);
906 header = kaddr + bh_offset(header_bh); 906 header = kaddr + bh_offset(header_bh);
907 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs); 907 sui->ncleansegs = le64_to_cpu(header->sh_ncleansegs);
908 kunmap_atomic(kaddr, KM_USER0); 908 kunmap_atomic(kaddr);
909 brelse(header_bh); 909 brelse(header_bh);
910 910
911 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1; 911 sui->allocmax = nilfs_sufile_get_nsegments(sufile) - 1;
diff --git a/fs/ntfs/aops.c b/fs/ntfs/aops.c
index 0b1e885b8cf8..fa9c05f97af4 100644
--- a/fs/ntfs/aops.c
+++ b/fs/ntfs/aops.c
@@ -94,11 +94,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
94 if (file_ofs < init_size) 94 if (file_ofs < init_size)
95 ofs = init_size - file_ofs; 95 ofs = init_size - file_ofs;
96 local_irq_save(flags); 96 local_irq_save(flags);
97 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 97 kaddr = kmap_atomic(page);
98 memset(kaddr + bh_offset(bh) + ofs, 0, 98 memset(kaddr + bh_offset(bh) + ofs, 0,
99 bh->b_size - ofs); 99 bh->b_size - ofs);
100 flush_dcache_page(page); 100 flush_dcache_page(page);
101 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 101 kunmap_atomic(kaddr);
102 local_irq_restore(flags); 102 local_irq_restore(flags);
103 } 103 }
104 } else { 104 } else {
@@ -147,11 +147,11 @@ static void ntfs_end_buffer_async_read(struct buffer_head *bh, int uptodate)
147 /* Should have been verified before we got here... */ 147 /* Should have been verified before we got here... */
148 BUG_ON(!recs); 148 BUG_ON(!recs);
149 local_irq_save(flags); 149 local_irq_save(flags);
150 kaddr = kmap_atomic(page, KM_BIO_SRC_IRQ); 150 kaddr = kmap_atomic(page);
151 for (i = 0; i < recs; i++) 151 for (i = 0; i < recs; i++)
152 post_read_mst_fixup((NTFS_RECORD*)(kaddr + 152 post_read_mst_fixup((NTFS_RECORD*)(kaddr +
153 i * rec_size), rec_size); 153 i * rec_size), rec_size);
154 kunmap_atomic(kaddr, KM_BIO_SRC_IRQ); 154 kunmap_atomic(kaddr);
155 local_irq_restore(flags); 155 local_irq_restore(flags);
156 flush_dcache_page(page); 156 flush_dcache_page(page);
157 if (likely(page_uptodate && !PageError(page))) 157 if (likely(page_uptodate && !PageError(page)))
@@ -504,7 +504,7 @@ retry_readpage:
504 /* Race with shrinking truncate. */ 504 /* Race with shrinking truncate. */
505 attr_len = i_size; 505 attr_len = i_size;
506 } 506 }
507 addr = kmap_atomic(page, KM_USER0); 507 addr = kmap_atomic(page);
508 /* Copy the data to the page. */ 508 /* Copy the data to the page. */
509 memcpy(addr, (u8*)ctx->attr + 509 memcpy(addr, (u8*)ctx->attr +
510 le16_to_cpu(ctx->attr->data.resident.value_offset), 510 le16_to_cpu(ctx->attr->data.resident.value_offset),
@@ -512,7 +512,7 @@ retry_readpage:
512 /* Zero the remainder of the page. */ 512 /* Zero the remainder of the page. */
513 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 513 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
514 flush_dcache_page(page); 514 flush_dcache_page(page);
515 kunmap_atomic(addr, KM_USER0); 515 kunmap_atomic(addr);
516put_unm_err_out: 516put_unm_err_out:
517 ntfs_attr_put_search_ctx(ctx); 517 ntfs_attr_put_search_ctx(ctx);
518unm_err_out: 518unm_err_out:
@@ -746,14 +746,14 @@ lock_retry_remap:
746 unsigned long *bpos, *bend; 746 unsigned long *bpos, *bend;
747 747
748 /* Check if the buffer is zero. */ 748 /* Check if the buffer is zero. */
749 kaddr = kmap_atomic(page, KM_USER0); 749 kaddr = kmap_atomic(page);
750 bpos = (unsigned long *)(kaddr + bh_offset(bh)); 750 bpos = (unsigned long *)(kaddr + bh_offset(bh));
751 bend = (unsigned long *)((u8*)bpos + blocksize); 751 bend = (unsigned long *)((u8*)bpos + blocksize);
752 do { 752 do {
753 if (unlikely(*bpos)) 753 if (unlikely(*bpos))
754 break; 754 break;
755 } while (likely(++bpos < bend)); 755 } while (likely(++bpos < bend));
756 kunmap_atomic(kaddr, KM_USER0); 756 kunmap_atomic(kaddr);
757 if (bpos == bend) { 757 if (bpos == bend) {
758 /* 758 /*
759 * Buffer is zero and sparse, no need to write 759 * Buffer is zero and sparse, no need to write
@@ -1495,14 +1495,14 @@ retry_writepage:
1495 /* Shrinking cannot fail. */ 1495 /* Shrinking cannot fail. */
1496 BUG_ON(err); 1496 BUG_ON(err);
1497 } 1497 }
1498 addr = kmap_atomic(page, KM_USER0); 1498 addr = kmap_atomic(page);
1499 /* Copy the data from the page to the mft record. */ 1499 /* Copy the data from the page to the mft record. */
1500 memcpy((u8*)ctx->attr + 1500 memcpy((u8*)ctx->attr +
1501 le16_to_cpu(ctx->attr->data.resident.value_offset), 1501 le16_to_cpu(ctx->attr->data.resident.value_offset),
1502 addr, attr_len); 1502 addr, attr_len);
1503 /* Zero out of bounds area in the page cache page. */ 1503 /* Zero out of bounds area in the page cache page. */
1504 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len); 1504 memset(addr + attr_len, 0, PAGE_CACHE_SIZE - attr_len);
1505 kunmap_atomic(addr, KM_USER0); 1505 kunmap_atomic(addr);
1506 flush_dcache_page(page); 1506 flush_dcache_page(page);
1507 flush_dcache_mft_record_page(ctx->ntfs_ino); 1507 flush_dcache_mft_record_page(ctx->ntfs_ino);
1508 /* We are done with the page. */ 1508 /* We are done with the page. */
diff --git a/fs/ntfs/attrib.c b/fs/ntfs/attrib.c
index e0281992ddc3..a27e3fecefaf 100644
--- a/fs/ntfs/attrib.c
+++ b/fs/ntfs/attrib.c
@@ -1656,12 +1656,12 @@ int ntfs_attr_make_non_resident(ntfs_inode *ni, const u32 data_size)
1656 attr_size = le32_to_cpu(a->data.resident.value_length); 1656 attr_size = le32_to_cpu(a->data.resident.value_length);
1657 BUG_ON(attr_size != data_size); 1657 BUG_ON(attr_size != data_size);
1658 if (page && !PageUptodate(page)) { 1658 if (page && !PageUptodate(page)) {
1659 kaddr = kmap_atomic(page, KM_USER0); 1659 kaddr = kmap_atomic(page);
1660 memcpy(kaddr, (u8*)a + 1660 memcpy(kaddr, (u8*)a +
1661 le16_to_cpu(a->data.resident.value_offset), 1661 le16_to_cpu(a->data.resident.value_offset),
1662 attr_size); 1662 attr_size);
1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size); 1663 memset(kaddr + attr_size, 0, PAGE_CACHE_SIZE - attr_size);
1664 kunmap_atomic(kaddr, KM_USER0); 1664 kunmap_atomic(kaddr);
1665 flush_dcache_page(page); 1665 flush_dcache_page(page);
1666 SetPageUptodate(page); 1666 SetPageUptodate(page);
1667 } 1667 }
@@ -1806,9 +1806,9 @@ undo_err_out:
1806 sizeof(a->data.resident.reserved)); 1806 sizeof(a->data.resident.reserved));
1807 /* Copy the data from the page back to the attribute value. */ 1807 /* Copy the data from the page back to the attribute value. */
1808 if (page) { 1808 if (page) {
1809 kaddr = kmap_atomic(page, KM_USER0); 1809 kaddr = kmap_atomic(page);
1810 memcpy((u8*)a + mp_ofs, kaddr, attr_size); 1810 memcpy((u8*)a + mp_ofs, kaddr, attr_size);
1811 kunmap_atomic(kaddr, KM_USER0); 1811 kunmap_atomic(kaddr);
1812 } 1812 }
1813 /* Setup the allocated size in the ntfs inode in case it changed. */ 1813 /* Setup the allocated size in the ntfs inode in case it changed. */
1814 write_lock_irqsave(&ni->size_lock, flags); 1814 write_lock_irqsave(&ni->size_lock, flags);
@@ -2540,10 +2540,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2540 size = PAGE_CACHE_SIZE; 2540 size = PAGE_CACHE_SIZE;
2541 if (idx == end) 2541 if (idx == end)
2542 size = end_ofs; 2542 size = end_ofs;
2543 kaddr = kmap_atomic(page, KM_USER0); 2543 kaddr = kmap_atomic(page);
2544 memset(kaddr + start_ofs, val, size - start_ofs); 2544 memset(kaddr + start_ofs, val, size - start_ofs);
2545 flush_dcache_page(page); 2545 flush_dcache_page(page);
2546 kunmap_atomic(kaddr, KM_USER0); 2546 kunmap_atomic(kaddr);
2547 set_page_dirty(page); 2547 set_page_dirty(page);
2548 page_cache_release(page); 2548 page_cache_release(page);
2549 balance_dirty_pages_ratelimited(mapping); 2549 balance_dirty_pages_ratelimited(mapping);
@@ -2561,10 +2561,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2561 "page (index 0x%lx).", idx); 2561 "page (index 0x%lx).", idx);
2562 return -ENOMEM; 2562 return -ENOMEM;
2563 } 2563 }
2564 kaddr = kmap_atomic(page, KM_USER0); 2564 kaddr = kmap_atomic(page);
2565 memset(kaddr, val, PAGE_CACHE_SIZE); 2565 memset(kaddr, val, PAGE_CACHE_SIZE);
2566 flush_dcache_page(page); 2566 flush_dcache_page(page);
2567 kunmap_atomic(kaddr, KM_USER0); 2567 kunmap_atomic(kaddr);
2568 /* 2568 /*
2569 * If the page has buffers, mark them uptodate since buffer 2569 * If the page has buffers, mark them uptodate since buffer
2570 * state and not page state is definitive in 2.6 kernels. 2570 * state and not page state is definitive in 2.6 kernels.
@@ -2598,10 +2598,10 @@ int ntfs_attr_set(ntfs_inode *ni, const s64 ofs, const s64 cnt, const u8 val)
2598 "(error, index 0x%lx).", idx); 2598 "(error, index 0x%lx).", idx);
2599 return PTR_ERR(page); 2599 return PTR_ERR(page);
2600 } 2600 }
2601 kaddr = kmap_atomic(page, KM_USER0); 2601 kaddr = kmap_atomic(page);
2602 memset(kaddr, val, end_ofs); 2602 memset(kaddr, val, end_ofs);
2603 flush_dcache_page(page); 2603 flush_dcache_page(page);
2604 kunmap_atomic(kaddr, KM_USER0); 2604 kunmap_atomic(kaddr);
2605 set_page_dirty(page); 2605 set_page_dirty(page);
2606 page_cache_release(page); 2606 page_cache_release(page);
2607 balance_dirty_pages_ratelimited(mapping); 2607 balance_dirty_pages_ratelimited(mapping);
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index c587e2d27183..8639169221c7 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -704,7 +704,7 @@ map_buffer_cached:
704 u8 *kaddr; 704 u8 *kaddr;
705 unsigned pofs; 705 unsigned pofs;
706 706
707 kaddr = kmap_atomic(page, KM_USER0); 707 kaddr = kmap_atomic(page);
708 if (bh_pos < pos) { 708 if (bh_pos < pos) {
709 pofs = bh_pos & ~PAGE_CACHE_MASK; 709 pofs = bh_pos & ~PAGE_CACHE_MASK;
710 memset(kaddr + pofs, 0, pos - bh_pos); 710 memset(kaddr + pofs, 0, pos - bh_pos);
@@ -713,7 +713,7 @@ map_buffer_cached:
713 pofs = end & ~PAGE_CACHE_MASK; 713 pofs = end & ~PAGE_CACHE_MASK;
714 memset(kaddr + pofs, 0, bh_end - end); 714 memset(kaddr + pofs, 0, bh_end - end);
715 } 715 }
716 kunmap_atomic(kaddr, KM_USER0); 716 kunmap_atomic(kaddr);
717 flush_dcache_page(page); 717 flush_dcache_page(page);
718 } 718 }
719 continue; 719 continue;
@@ -1287,9 +1287,9 @@ static inline size_t ntfs_copy_from_user(struct page **pages,
1287 len = PAGE_CACHE_SIZE - ofs; 1287 len = PAGE_CACHE_SIZE - ofs;
1288 if (len > bytes) 1288 if (len > bytes)
1289 len = bytes; 1289 len = bytes;
1290 addr = kmap_atomic(*pages, KM_USER0); 1290 addr = kmap_atomic(*pages);
1291 left = __copy_from_user_inatomic(addr + ofs, buf, len); 1291 left = __copy_from_user_inatomic(addr + ofs, buf, len);
1292 kunmap_atomic(addr, KM_USER0); 1292 kunmap_atomic(addr);
1293 if (unlikely(left)) { 1293 if (unlikely(left)) {
1294 /* Do it the slow way. */ 1294 /* Do it the slow way. */
1295 addr = kmap(*pages); 1295 addr = kmap(*pages);
@@ -1401,10 +1401,10 @@ static inline size_t ntfs_copy_from_user_iovec(struct page **pages,
1401 len = PAGE_CACHE_SIZE - ofs; 1401 len = PAGE_CACHE_SIZE - ofs;
1402 if (len > bytes) 1402 if (len > bytes)
1403 len = bytes; 1403 len = bytes;
1404 addr = kmap_atomic(*pages, KM_USER0); 1404 addr = kmap_atomic(*pages);
1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs, 1405 copied = __ntfs_copy_from_user_iovec_inatomic(addr + ofs,
1406 *iov, *iov_ofs, len); 1406 *iov, *iov_ofs, len);
1407 kunmap_atomic(addr, KM_USER0); 1407 kunmap_atomic(addr);
1408 if (unlikely(copied != len)) { 1408 if (unlikely(copied != len)) {
1409 /* Do it the slow way. */ 1409 /* Do it the slow way. */
1410 addr = kmap(*pages); 1410 addr = kmap(*pages);
@@ -1691,7 +1691,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1691 BUG_ON(end > le32_to_cpu(a->length) - 1691 BUG_ON(end > le32_to_cpu(a->length) -
1692 le16_to_cpu(a->data.resident.value_offset)); 1692 le16_to_cpu(a->data.resident.value_offset));
1693 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset); 1693 kattr = (u8*)a + le16_to_cpu(a->data.resident.value_offset);
1694 kaddr = kmap_atomic(page, KM_USER0); 1694 kaddr = kmap_atomic(page);
1695 /* Copy the received data from the page to the mft record. */ 1695 /* Copy the received data from the page to the mft record. */
1696 memcpy(kattr + pos, kaddr + pos, bytes); 1696 memcpy(kattr + pos, kaddr + pos, bytes);
1697 /* Update the attribute length if necessary. */ 1697 /* Update the attribute length if necessary. */
@@ -1713,7 +1713,7 @@ static int ntfs_commit_pages_after_write(struct page **pages,
1713 flush_dcache_page(page); 1713 flush_dcache_page(page);
1714 SetPageUptodate(page); 1714 SetPageUptodate(page);
1715 } 1715 }
1716 kunmap_atomic(kaddr, KM_USER0); 1716 kunmap_atomic(kaddr);
1717 /* Update initialized_size/i_size if necessary. */ 1717 /* Update initialized_size/i_size if necessary. */
1718 read_lock_irqsave(&ni->size_lock, flags); 1718 read_lock_irqsave(&ni->size_lock, flags);
1719 initialized_size = ni->initialized_size; 1719 initialized_size = ni->initialized_size;
diff --git a/fs/ntfs/super.c b/fs/ntfs/super.c
index f907611cca73..28d4e6ab6634 100644
--- a/fs/ntfs/super.c
+++ b/fs/ntfs/super.c
@@ -2473,7 +2473,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2473 nr_free -= PAGE_CACHE_SIZE * 8; 2473 nr_free -= PAGE_CACHE_SIZE * 8;
2474 continue; 2474 continue;
2475 } 2475 }
2476 kaddr = kmap_atomic(page, KM_USER0); 2476 kaddr = kmap_atomic(page);
2477 /* 2477 /*
2478 * Subtract the number of set bits. If this 2478 * Subtract the number of set bits. If this
2479 * is the last page and it is partial we don't really care as 2479 * is the last page and it is partial we don't really care as
@@ -2483,7 +2483,7 @@ static s64 get_nr_free_clusters(ntfs_volume *vol)
2483 */ 2483 */
2484 nr_free -= bitmap_weight(kaddr, 2484 nr_free -= bitmap_weight(kaddr,
2485 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2485 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2486 kunmap_atomic(kaddr, KM_USER0); 2486 kunmap_atomic(kaddr);
2487 page_cache_release(page); 2487 page_cache_release(page);
2488 } 2488 }
2489 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1); 2489 ntfs_debug("Finished reading $Bitmap, last index = 0x%lx.", index - 1);
@@ -2544,7 +2544,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2544 nr_free -= PAGE_CACHE_SIZE * 8; 2544 nr_free -= PAGE_CACHE_SIZE * 8;
2545 continue; 2545 continue;
2546 } 2546 }
2547 kaddr = kmap_atomic(page, KM_USER0); 2547 kaddr = kmap_atomic(page);
2548 /* 2548 /*
2549 * Subtract the number of set bits. If this 2549 * Subtract the number of set bits. If this
2550 * is the last page and it is partial we don't really care as 2550 * is the last page and it is partial we don't really care as
@@ -2554,7 +2554,7 @@ static unsigned long __get_nr_free_mft_records(ntfs_volume *vol,
2554 */ 2554 */
2555 nr_free -= bitmap_weight(kaddr, 2555 nr_free -= bitmap_weight(kaddr,
2556 PAGE_CACHE_SIZE * BITS_PER_BYTE); 2556 PAGE_CACHE_SIZE * BITS_PER_BYTE);
2557 kunmap_atomic(kaddr, KM_USER0); 2557 kunmap_atomic(kaddr);
2558 page_cache_release(page); 2558 page_cache_release(page);
2559 } 2559 }
2560 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.", 2560 ntfs_debug("Finished reading $MFT/$BITMAP, last index = 0x%lx.",
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 78b68af3b0e3..657743254eb9 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -102,7 +102,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
102 * copy, the data is still good. */ 102 * copy, the data is still good. */
103 if (buffer_jbd(buffer_cache_bh) 103 if (buffer_jbd(buffer_cache_bh)
104 && ocfs2_inode_is_new(inode)) { 104 && ocfs2_inode_is_new(inode)) {
105 kaddr = kmap_atomic(bh_result->b_page, KM_USER0); 105 kaddr = kmap_atomic(bh_result->b_page);
106 if (!kaddr) { 106 if (!kaddr) {
107 mlog(ML_ERROR, "couldn't kmap!\n"); 107 mlog(ML_ERROR, "couldn't kmap!\n");
108 goto bail; 108 goto bail;
@@ -110,7 +110,7 @@ static int ocfs2_symlink_get_block(struct inode *inode, sector_t iblock,
110 memcpy(kaddr + (bh_result->b_size * iblock), 110 memcpy(kaddr + (bh_result->b_size * iblock),
111 buffer_cache_bh->b_data, 111 buffer_cache_bh->b_data,
112 bh_result->b_size); 112 bh_result->b_size);
113 kunmap_atomic(kaddr, KM_USER0); 113 kunmap_atomic(kaddr);
114 set_buffer_uptodate(bh_result); 114 set_buffer_uptodate(bh_result);
115 } 115 }
116 brelse(buffer_cache_bh); 116 brelse(buffer_cache_bh);
@@ -236,13 +236,13 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
236 return -EROFS; 236 return -EROFS;
237 } 237 }
238 238
239 kaddr = kmap_atomic(page, KM_USER0); 239 kaddr = kmap_atomic(page);
240 if (size) 240 if (size)
241 memcpy(kaddr, di->id2.i_data.id_data, size); 241 memcpy(kaddr, di->id2.i_data.id_data, size);
242 /* Clear the remaining part of the page */ 242 /* Clear the remaining part of the page */
243 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size); 243 memset(kaddr + size, 0, PAGE_CACHE_SIZE - size);
244 flush_dcache_page(page); 244 flush_dcache_page(page);
245 kunmap_atomic(kaddr, KM_USER0); 245 kunmap_atomic(kaddr);
246 246
247 SetPageUptodate(page); 247 SetPageUptodate(page);
248 248
@@ -689,7 +689,7 @@ static void ocfs2_clear_page_regions(struct page *page,
689 689
690 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end); 690 ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
691 691
692 kaddr = kmap_atomic(page, KM_USER0); 692 kaddr = kmap_atomic(page);
693 693
694 if (from || to) { 694 if (from || to) {
695 if (from > cluster_start) 695 if (from > cluster_start)
@@ -700,7 +700,7 @@ static void ocfs2_clear_page_regions(struct page *page,
700 memset(kaddr + cluster_start, 0, cluster_end - cluster_start); 700 memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
701 } 701 }
702 702
703 kunmap_atomic(kaddr, KM_USER0); 703 kunmap_atomic(kaddr);
704} 704}
705 705
706/* 706/*
@@ -1981,9 +1981,9 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
1981 } 1981 }
1982 } 1982 }
1983 1983
1984 kaddr = kmap_atomic(wc->w_target_page, KM_USER0); 1984 kaddr = kmap_atomic(wc->w_target_page);
1985 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied); 1985 memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
1986 kunmap_atomic(kaddr, KM_USER0); 1986 kunmap_atomic(kaddr);
1987 1987
1988 trace_ocfs2_write_end_inline( 1988 trace_ocfs2_write_end_inline(
1989 (unsigned long long)OCFS2_I(inode)->ip_blkno, 1989 (unsigned long long)OCFS2_I(inode)->ip_blkno,
diff --git a/fs/pipe.c b/fs/pipe.c
index a932ced92a16..fe0502f9beb2 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -230,7 +230,7 @@ void *generic_pipe_buf_map(struct pipe_inode_info *pipe,
230{ 230{
231 if (atomic) { 231 if (atomic) {
232 buf->flags |= PIPE_BUF_FLAG_ATOMIC; 232 buf->flags |= PIPE_BUF_FLAG_ATOMIC;
233 return kmap_atomic(buf->page, KM_USER0); 233 return kmap_atomic(buf->page);
234 } 234 }
235 235
236 return kmap(buf->page); 236 return kmap(buf->page);
@@ -251,7 +251,7 @@ void generic_pipe_buf_unmap(struct pipe_inode_info *pipe,
251{ 251{
252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) { 252 if (buf->flags & PIPE_BUF_FLAG_ATOMIC) {
253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC; 253 buf->flags &= ~PIPE_BUF_FLAG_ATOMIC;
254 kunmap_atomic(map_data, KM_USER0); 254 kunmap_atomic(map_data);
255 } else 255 } else
256 kunmap(buf->page); 256 kunmap(buf->page);
257} 257}
@@ -565,14 +565,14 @@ redo1:
565 iov_fault_in_pages_read(iov, chars); 565 iov_fault_in_pages_read(iov, chars);
566redo2: 566redo2:
567 if (atomic) 567 if (atomic)
568 src = kmap_atomic(page, KM_USER0); 568 src = kmap_atomic(page);
569 else 569 else
570 src = kmap(page); 570 src = kmap(page);
571 571
572 error = pipe_iov_copy_from_user(src, iov, chars, 572 error = pipe_iov_copy_from_user(src, iov, chars,
573 atomic); 573 atomic);
574 if (atomic) 574 if (atomic)
575 kunmap_atomic(src, KM_USER0); 575 kunmap_atomic(src);
576 else 576 else
577 kunmap(page); 577 kunmap(page);
578 578
diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c
index 313d39d639eb..77df82f9e70a 100644
--- a/fs/reiserfs/stree.c
+++ b/fs/reiserfs/stree.c
@@ -1284,12 +1284,12 @@ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
1284 ** -clm 1284 ** -clm
1285 */ 1285 */
1286 1286
1287 data = kmap_atomic(un_bh->b_page, KM_USER0); 1287 data = kmap_atomic(un_bh->b_page);
1288 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1)); 1288 off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
1289 memcpy(data + off, 1289 memcpy(data + off,
1290 B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih), 1290 B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
1291 ret_value); 1291 ret_value);
1292 kunmap_atomic(data, KM_USER0); 1292 kunmap_atomic(data);
1293 } 1293 }
1294 /* Perform balancing after all resources have been collected at once. */ 1294 /* Perform balancing after all resources have been collected at once. */
1295 do_balance(&s_del_balance, NULL, NULL, M_DELETE); 1295 do_balance(&s_del_balance, NULL, NULL, M_DELETE);
diff --git a/fs/reiserfs/tail_conversion.c b/fs/reiserfs/tail_conversion.c
index d7f6e51bef2a..8f546bd473b8 100644
--- a/fs/reiserfs/tail_conversion.c
+++ b/fs/reiserfs/tail_conversion.c
@@ -128,9 +128,9 @@ int direct2indirect(struct reiserfs_transaction_handle *th, struct inode *inode,
128 if (up_to_date_bh) { 128 if (up_to_date_bh) {
129 unsigned pgoff = 129 unsigned pgoff =
130 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1); 130 (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
131 char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0); 131 char *kaddr = kmap_atomic(up_to_date_bh->b_page);
132 memset(kaddr + pgoff, 0, blk_size - total_tail); 132 memset(kaddr + pgoff, 0, blk_size - total_tail);
133 kunmap_atomic(kaddr, KM_USER0); 133 kunmap_atomic(kaddr);
134 } 134 }
135 135
136 REISERFS_I(inode)->i_first_direct_byte = U32_MAX; 136 REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
diff --git a/fs/splice.c b/fs/splice.c
index 1ec0493266b3..f16402ed915c 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -737,15 +737,12 @@ int pipe_to_file(struct pipe_inode_info *pipe, struct pipe_buffer *buf,
737 goto out; 737 goto out;
738 738
739 if (buf->page != page) { 739 if (buf->page != page) {
740 /*
741 * Careful, ->map() uses KM_USER0!
742 */
743 char *src = buf->ops->map(pipe, buf, 1); 740 char *src = buf->ops->map(pipe, buf, 1);
744 char *dst = kmap_atomic(page, KM_USER1); 741 char *dst = kmap_atomic(page);
745 742
746 memcpy(dst + offset, src + buf->offset, this_len); 743 memcpy(dst + offset, src + buf->offset, this_len);
747 flush_dcache_page(page); 744 flush_dcache_page(page);
748 kunmap_atomic(dst, KM_USER1); 745 kunmap_atomic(dst);
749 buf->ops->unmap(pipe, buf, src); 746 buf->ops->unmap(pipe, buf, src);
750 } 747 }
751 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len, 748 ret = pagecache_write_end(file, mapping, sd->pos, this_len, this_len,
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c
index 38bb1c640559..8ca62c28fe12 100644
--- a/fs/squashfs/file.c
+++ b/fs/squashfs/file.c
@@ -464,10 +464,10 @@ static int squashfs_readpage(struct file *file, struct page *page)
464 if (PageUptodate(push_page)) 464 if (PageUptodate(push_page))
465 goto skip_page; 465 goto skip_page;
466 466
467 pageaddr = kmap_atomic(push_page, KM_USER0); 467 pageaddr = kmap_atomic(push_page);
468 squashfs_copy_data(pageaddr, buffer, offset, avail); 468 squashfs_copy_data(pageaddr, buffer, offset, avail);
469 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail); 469 memset(pageaddr + avail, 0, PAGE_CACHE_SIZE - avail);
470 kunmap_atomic(pageaddr, KM_USER0); 470 kunmap_atomic(pageaddr);
471 flush_dcache_page(push_page); 471 flush_dcache_page(push_page);
472 SetPageUptodate(push_page); 472 SetPageUptodate(push_page);
473skip_page: 473skip_page:
@@ -484,9 +484,9 @@ skip_page:
484error_out: 484error_out:
485 SetPageError(page); 485 SetPageError(page);
486out: 486out:
487 pageaddr = kmap_atomic(page, KM_USER0); 487 pageaddr = kmap_atomic(page);
488 memset(pageaddr, 0, PAGE_CACHE_SIZE); 488 memset(pageaddr, 0, PAGE_CACHE_SIZE);
489 kunmap_atomic(pageaddr, KM_USER0); 489 kunmap_atomic(pageaddr);
490 flush_dcache_page(page); 490 flush_dcache_page(page);
491 if (!PageError(page)) 491 if (!PageError(page))
492 SetPageUptodate(page); 492 SetPageUptodate(page);
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c
index 1191817264cc..12806dffb345 100644
--- a/fs/squashfs/symlink.c
+++ b/fs/squashfs/symlink.c
@@ -90,14 +90,14 @@ static int squashfs_symlink_readpage(struct file *file, struct page *page)
90 goto error_out; 90 goto error_out;
91 } 91 }
92 92
93 pageaddr = kmap_atomic(page, KM_USER0); 93 pageaddr = kmap_atomic(page);
94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset, 94 copied = squashfs_copy_data(pageaddr + bytes, entry, offset,
95 length - bytes); 95 length - bytes);
96 if (copied == length - bytes) 96 if (copied == length - bytes)
97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length); 97 memset(pageaddr + length, 0, PAGE_CACHE_SIZE - length);
98 else 98 else
99 block = entry->next_index; 99 block = entry->next_index;
100 kunmap_atomic(pageaddr, KM_USER0); 100 kunmap_atomic(pageaddr);
101 squashfs_cache_put(entry); 101 squashfs_cache_put(entry);
102 } 102 }
103 103
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index f9c234bf33d3..5c8f6dc1d28b 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -1042,10 +1042,10 @@ static int ubifs_writepage(struct page *page, struct writeback_control *wbc)
1042 * the page size, the remaining memory is zeroed when mapped, and 1042 * the page size, the remaining memory is zeroed when mapped, and
1043 * writes to that region are not written out to the file." 1043 * writes to that region are not written out to the file."
1044 */ 1044 */
1045 kaddr = kmap_atomic(page, KM_USER0); 1045 kaddr = kmap_atomic(page);
1046 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len); 1046 memset(kaddr + len, 0, PAGE_CACHE_SIZE - len);
1047 flush_dcache_page(page); 1047 flush_dcache_page(page);
1048 kunmap_atomic(kaddr, KM_USER0); 1048 kunmap_atomic(kaddr);
1049 1049
1050 if (i_size > synced_i_size) { 1050 if (i_size > synced_i_size) {
1051 err = inode->i_sb->s_op->write_inode(inode, NULL); 1051 err = inode->i_sb->s_op->write_inode(inode, NULL);
diff --git a/fs/udf/file.c b/fs/udf/file.c
index d567b8448dfc..7f3f7ba3df6e 100644
--- a/fs/udf/file.c
+++ b/fs/udf/file.c
@@ -87,10 +87,10 @@ static int udf_adinicb_write_end(struct file *file,
87 char *kaddr; 87 char *kaddr;
88 struct udf_inode_info *iinfo = UDF_I(inode); 88 struct udf_inode_info *iinfo = UDF_I(inode);
89 89
90 kaddr = kmap_atomic(page, KM_USER0); 90 kaddr = kmap_atomic(page);
91 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset, 91 memcpy(iinfo->i_ext.i_data + iinfo->i_lenEAttr + offset,
92 kaddr + offset, copied); 92 kaddr + offset, copied);
93 kunmap_atomic(kaddr, KM_USER0); 93 kunmap_atomic(kaddr);
94 94
95 return simple_write_end(file, mapping, pos, len, copied, page, fsdata); 95 return simple_write_end(file, mapping, pos, len, copied, page, fsdata);
96} 96}