aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/bounce.c4
-rw-r--r--mm/filemap.c8
-rw-r--r--mm/ksm.c12
-rw-r--r--mm/memory.c4
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/swapfile.c30
-rw-r--r--mm/vmalloc.c8
7 files changed, 35 insertions, 35 deletions
diff --git a/mm/bounce.c b/mm/bounce.c
index 4e9ae722af8..d1be02ca188 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -50,9 +50,9 @@ static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
50 unsigned char *vto; 50 unsigned char *vto;
51 51
52 local_irq_save(flags); 52 local_irq_save(flags);
53 vto = kmap_atomic(to->bv_page, KM_BOUNCE_READ); 53 vto = kmap_atomic(to->bv_page);
54 memcpy(vto + to->bv_offset, vfrom, to->bv_len); 54 memcpy(vto + to->bv_offset, vfrom, to->bv_len);
55 kunmap_atomic(vto, KM_BOUNCE_READ); 55 kunmap_atomic(vto);
56 local_irq_restore(flags); 56 local_irq_restore(flags);
57} 57}
58 58
diff --git a/mm/filemap.c b/mm/filemap.c
index b66275757c2..2f8165075a5 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1318,10 +1318,10 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
1318 * taking the kmap. 1318 * taking the kmap.
1319 */ 1319 */
1320 if (!fault_in_pages_writeable(desc->arg.buf, size)) { 1320 if (!fault_in_pages_writeable(desc->arg.buf, size)) {
1321 kaddr = kmap_atomic(page, KM_USER0); 1321 kaddr = kmap_atomic(page);
1322 left = __copy_to_user_inatomic(desc->arg.buf, 1322 left = __copy_to_user_inatomic(desc->arg.buf,
1323 kaddr + offset, size); 1323 kaddr + offset, size);
1324 kunmap_atomic(kaddr, KM_USER0); 1324 kunmap_atomic(kaddr);
1325 if (left == 0) 1325 if (left == 0)
1326 goto success; 1326 goto success;
1327 } 1327 }
@@ -2045,7 +2045,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
2045 size_t copied; 2045 size_t copied;
2046 2046
2047 BUG_ON(!in_atomic()); 2047 BUG_ON(!in_atomic());
2048 kaddr = kmap_atomic(page, KM_USER0); 2048 kaddr = kmap_atomic(page);
2049 if (likely(i->nr_segs == 1)) { 2049 if (likely(i->nr_segs == 1)) {
2050 int left; 2050 int left;
2051 char __user *buf = i->iov->iov_base + i->iov_offset; 2051 char __user *buf = i->iov->iov_base + i->iov_offset;
@@ -2055,7 +2055,7 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
2055 copied = __iovec_copy_from_user_inatomic(kaddr + offset, 2055 copied = __iovec_copy_from_user_inatomic(kaddr + offset,
2056 i->iov, i->iov_offset, bytes); 2056 i->iov, i->iov_offset, bytes);
2057 } 2057 }
2058 kunmap_atomic(kaddr, KM_USER0); 2058 kunmap_atomic(kaddr);
2059 2059
2060 return copied; 2060 return copied;
2061} 2061}
diff --git a/mm/ksm.c b/mm/ksm.c
index 310544a379a..a6d3fb7e6c1 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -672,9 +672,9 @@ error:
672static u32 calc_checksum(struct page *page) 672static u32 calc_checksum(struct page *page)
673{ 673{
674 u32 checksum; 674 u32 checksum;
675 void *addr = kmap_atomic(page, KM_USER0); 675 void *addr = kmap_atomic(page);
676 checksum = jhash2(addr, PAGE_SIZE / 4, 17); 676 checksum = jhash2(addr, PAGE_SIZE / 4, 17);
677 kunmap_atomic(addr, KM_USER0); 677 kunmap_atomic(addr);
678 return checksum; 678 return checksum;
679} 679}
680 680
@@ -683,11 +683,11 @@ static int memcmp_pages(struct page *page1, struct page *page2)
683 char *addr1, *addr2; 683 char *addr1, *addr2;
684 int ret; 684 int ret;
685 685
686 addr1 = kmap_atomic(page1, KM_USER0); 686 addr1 = kmap_atomic(page1);
687 addr2 = kmap_atomic(page2, KM_USER1); 687 addr2 = kmap_atomic(page2);
688 ret = memcmp(addr1, addr2, PAGE_SIZE); 688 ret = memcmp(addr1, addr2, PAGE_SIZE);
689 kunmap_atomic(addr2, KM_USER1); 689 kunmap_atomic(addr2);
690 kunmap_atomic(addr1, KM_USER0); 690 kunmap_atomic(addr1);
691 return ret; 691 return ret;
692} 692}
693 693
diff --git a/mm/memory.c b/mm/memory.c
index fa2f04e0337..347e5fad1cf 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2447,7 +2447,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2447 * fails, we just zero-fill it. Live with it. 2447 * fails, we just zero-fill it. Live with it.
2448 */ 2448 */
2449 if (unlikely(!src)) { 2449 if (unlikely(!src)) {
2450 void *kaddr = kmap_atomic(dst, KM_USER0); 2450 void *kaddr = kmap_atomic(dst);
2451 void __user *uaddr = (void __user *)(va & PAGE_MASK); 2451 void __user *uaddr = (void __user *)(va & PAGE_MASK);
2452 2452
2453 /* 2453 /*
@@ -2458,7 +2458,7 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
2458 */ 2458 */
2459 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) 2459 if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
2460 clear_page(kaddr); 2460 clear_page(kaddr);
2461 kunmap_atomic(kaddr, KM_USER0); 2461 kunmap_atomic(kaddr);
2462 flush_dcache_page(dst); 2462 flush_dcache_page(dst);
2463 } else 2463 } else
2464 copy_user_highpage(dst, src, va, vma); 2464 copy_user_highpage(dst, src, va, vma);
diff --git a/mm/shmem.c b/mm/shmem.c
index 269d049294a..b7e19557186 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1656,9 +1656,9 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1656 } 1656 }
1657 inode->i_mapping->a_ops = &shmem_aops; 1657 inode->i_mapping->a_ops = &shmem_aops;
1658 inode->i_op = &shmem_symlink_inode_operations; 1658 inode->i_op = &shmem_symlink_inode_operations;
1659 kaddr = kmap_atomic(page, KM_USER0); 1659 kaddr = kmap_atomic(page);
1660 memcpy(kaddr, symname, len); 1660 memcpy(kaddr, symname, len);
1661 kunmap_atomic(kaddr, KM_USER0); 1661 kunmap_atomic(kaddr);
1662 set_page_dirty(page); 1662 set_page_dirty(page);
1663 unlock_page(page); 1663 unlock_page(page);
1664 page_cache_release(page); 1664 page_cache_release(page);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index d999f090dfd..00a962caab1 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -2427,9 +2427,9 @@ int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
2427 if (!(count & COUNT_CONTINUED)) 2427 if (!(count & COUNT_CONTINUED))
2428 goto out; 2428 goto out;
2429 2429
2430 map = kmap_atomic(list_page, KM_USER0) + offset; 2430 map = kmap_atomic(list_page) + offset;
2431 count = *map; 2431 count = *map;
2432 kunmap_atomic(map, KM_USER0); 2432 kunmap_atomic(map);
2433 2433
2434 /* 2434 /*
2435 * If this continuation count now has some space in it, 2435 * If this continuation count now has some space in it,
@@ -2472,7 +2472,7 @@ static bool swap_count_continued(struct swap_info_struct *si,
2472 2472
2473 offset &= ~PAGE_MASK; 2473 offset &= ~PAGE_MASK;
2474 page = list_entry(head->lru.next, struct page, lru); 2474 page = list_entry(head->lru.next, struct page, lru);
2475 map = kmap_atomic(page, KM_USER0) + offset; 2475 map = kmap_atomic(page) + offset;
2476 2476
2477 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 2477 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
2478 goto init_map; /* jump over SWAP_CONT_MAX checks */ 2478 goto init_map; /* jump over SWAP_CONT_MAX checks */
@@ -2482,26 +2482,26 @@ static bool swap_count_continued(struct swap_info_struct *si,
2482 * Think of how you add 1 to 999 2482 * Think of how you add 1 to 999
2483 */ 2483 */
2484 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 2484 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
2485 kunmap_atomic(map, KM_USER0); 2485 kunmap_atomic(map);
2486 page = list_entry(page->lru.next, struct page, lru); 2486 page = list_entry(page->lru.next, struct page, lru);
2487 BUG_ON(page == head); 2487 BUG_ON(page == head);
2488 map = kmap_atomic(page, KM_USER0) + offset; 2488 map = kmap_atomic(page) + offset;
2489 } 2489 }
2490 if (*map == SWAP_CONT_MAX) { 2490 if (*map == SWAP_CONT_MAX) {
2491 kunmap_atomic(map, KM_USER0); 2491 kunmap_atomic(map);
2492 page = list_entry(page->lru.next, struct page, lru); 2492 page = list_entry(page->lru.next, struct page, lru);
2493 if (page == head) 2493 if (page == head)
2494 return false; /* add count continuation */ 2494 return false; /* add count continuation */
2495 map = kmap_atomic(page, KM_USER0) + offset; 2495 map = kmap_atomic(page) + offset;
2496init_map: *map = 0; /* we didn't zero the page */ 2496init_map: *map = 0; /* we didn't zero the page */
2497 } 2497 }
2498 *map += 1; 2498 *map += 1;
2499 kunmap_atomic(map, KM_USER0); 2499 kunmap_atomic(map);
2500 page = list_entry(page->lru.prev, struct page, lru); 2500 page = list_entry(page->lru.prev, struct page, lru);
2501 while (page != head) { 2501 while (page != head) {
2502 map = kmap_atomic(page, KM_USER0) + offset; 2502 map = kmap_atomic(page) + offset;
2503 *map = COUNT_CONTINUED; 2503 *map = COUNT_CONTINUED;
2504 kunmap_atomic(map, KM_USER0); 2504 kunmap_atomic(map);
2505 page = list_entry(page->lru.prev, struct page, lru); 2505 page = list_entry(page->lru.prev, struct page, lru);
2506 } 2506 }
2507 return true; /* incremented */ 2507 return true; /* incremented */
@@ -2512,22 +2512,22 @@ init_map: *map = 0; /* we didn't zero the page */
2512 */ 2512 */
2513 BUG_ON(count != COUNT_CONTINUED); 2513 BUG_ON(count != COUNT_CONTINUED);
2514 while (*map == COUNT_CONTINUED) { 2514 while (*map == COUNT_CONTINUED) {
2515 kunmap_atomic(map, KM_USER0); 2515 kunmap_atomic(map);
2516 page = list_entry(page->lru.next, struct page, lru); 2516 page = list_entry(page->lru.next, struct page, lru);
2517 BUG_ON(page == head); 2517 BUG_ON(page == head);
2518 map = kmap_atomic(page, KM_USER0) + offset; 2518 map = kmap_atomic(page) + offset;
2519 } 2519 }
2520 BUG_ON(*map == 0); 2520 BUG_ON(*map == 0);
2521 *map -= 1; 2521 *map -= 1;
2522 if (*map == 0) 2522 if (*map == 0)
2523 count = 0; 2523 count = 0;
2524 kunmap_atomic(map, KM_USER0); 2524 kunmap_atomic(map);
2525 page = list_entry(page->lru.prev, struct page, lru); 2525 page = list_entry(page->lru.prev, struct page, lru);
2526 while (page != head) { 2526 while (page != head) {
2527 map = kmap_atomic(page, KM_USER0) + offset; 2527 map = kmap_atomic(page) + offset;
2528 *map = SWAP_CONT_MAX | count; 2528 *map = SWAP_CONT_MAX | count;
2529 count = COUNT_CONTINUED; 2529 count = COUNT_CONTINUED;
2530 kunmap_atomic(map, KM_USER0); 2530 kunmap_atomic(map);
2531 page = list_entry(page->lru.prev, struct page, lru); 2531 page = list_entry(page->lru.prev, struct page, lru);
2532 } 2532 }
2533 return count == COUNT_CONTINUED; 2533 return count == COUNT_CONTINUED;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 86ce9a526c1..94dff883b44 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1906,9 +1906,9 @@ static int aligned_vread(char *buf, char *addr, unsigned long count)
1906 * we can expect USER0 is not used (see vread/vwrite's 1906 * we can expect USER0 is not used (see vread/vwrite's
1907 * function description) 1907 * function description)
1908 */ 1908 */
1909 void *map = kmap_atomic(p, KM_USER0); 1909 void *map = kmap_atomic(p);
1910 memcpy(buf, map + offset, length); 1910 memcpy(buf, map + offset, length);
1911 kunmap_atomic(map, KM_USER0); 1911 kunmap_atomic(map);
1912 } else 1912 } else
1913 memset(buf, 0, length); 1913 memset(buf, 0, length);
1914 1914
@@ -1945,9 +1945,9 @@ static int aligned_vwrite(char *buf, char *addr, unsigned long count)
1945 * we can expect USER0 is not used (see vread/vwrite's 1945 * we can expect USER0 is not used (see vread/vwrite's
1946 * function description) 1946 * function description)
1947 */ 1947 */
1948 void *map = kmap_atomic(p, KM_USER0); 1948 void *map = kmap_atomic(p);
1949 memcpy(map + offset, buf, length); 1949 memcpy(map + offset, buf, length);
1950 kunmap_atomic(map, KM_USER0); 1950 kunmap_atomic(map);
1951 } 1951 }
1952 addr += length; 1952 addr += length;
1953 buf += length; 1953 buf += length;