diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 12:40:26 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 12:40:26 -0400 |
commit | 9f3938346a5c1fa504647670edb5fea5756cfb00 (patch) | |
tree | 7cf6d24d6b076c8db8571494984924cac03703a2 /arch | |
parent | 69a7aebcf019ab3ff5764525ad6858fbe23bb86d (diff) | |
parent | 317b6e128247f75976b0fc2b9fd8d2c20ef13b3a (diff) |
Merge branch 'kmap_atomic' of git://github.com/congwang/linux
Pull kmap_atomic cleanup from Cong Wang.
It's been in -next for a long time, and it gets rid of the (no longer
used) second argument to k[un]map_atomic().
Fix up a few trivial conflicts in various drivers, and do an "evil
merge" to catch some new uses that have come in since Cong's tree.
* 'kmap_atomic' of git://github.com/congwang/linux: (59 commits)
feature-removal-schedule.txt: schedule the deprecated form of kmap_atomic() for removal
highmem: kill all __kmap_atomic() [swarren@nvidia.com: highmem: Fix ARM build break due to __kmap_atomic rename]
drbd: remove the second argument of k[un]map_atomic()
zcache: remove the second argument of k[un]map_atomic()
gma500: remove the second argument of k[un]map_atomic()
dm: remove the second argument of k[un]map_atomic()
tomoyo: remove the second argument of k[un]map_atomic()
sunrpc: remove the second argument of k[un]map_atomic()
rds: remove the second argument of k[un]map_atomic()
net: remove the second argument of k[un]map_atomic()
mm: remove the second argument of k[un]map_atomic()
lib: remove the second argument of k[un]map_atomic()
power: remove the second argument of k[un]map_atomic()
kdb: remove the second argument of k[un]map_atomic()
udf: remove the second argument of k[un]map_atomic()
ubifs: remove the second argument of k[un]map_atomic()
squashfs: remove the second argument of k[un]map_atomic()
reiserfs: remove the second argument of k[un]map_atomic()
ocfs2: remove the second argument of k[un]map_atomic()
ntfs: remove the second argument of k[un]map_atomic()
...
Diffstat (limited to 'arch')
39 files changed, 122 insertions, 123 deletions
diff --git a/arch/arm/include/asm/highmem.h b/arch/arm/include/asm/highmem.h index a4edd19dd3d6..8c5e828f484d 100644 --- a/arch/arm/include/asm/highmem.h +++ b/arch/arm/include/asm/highmem.h | |||
@@ -57,7 +57,7 @@ static inline void *kmap_high_get(struct page *page) | |||
57 | #ifdef CONFIG_HIGHMEM | 57 | #ifdef CONFIG_HIGHMEM |
58 | extern void *kmap(struct page *page); | 58 | extern void *kmap(struct page *page); |
59 | extern void kunmap(struct page *page); | 59 | extern void kunmap(struct page *page); |
60 | extern void *__kmap_atomic(struct page *page); | 60 | extern void *kmap_atomic(struct page *page); |
61 | extern void __kunmap_atomic(void *kvaddr); | 61 | extern void __kunmap_atomic(void *kvaddr); |
62 | extern void *kmap_atomic_pfn(unsigned long pfn); | 62 | extern void *kmap_atomic_pfn(unsigned long pfn); |
63 | extern struct page *kmap_atomic_to_page(const void *ptr); | 63 | extern struct page *kmap_atomic_to_page(const void *ptr); |
diff --git a/arch/arm/mm/copypage-fa.c b/arch/arm/mm/copypage-fa.c index d2852e1635b1..d130a5ece5d5 100644 --- a/arch/arm/mm/copypage-fa.c +++ b/arch/arm/mm/copypage-fa.c | |||
@@ -44,11 +44,11 @@ void fa_copy_user_highpage(struct page *to, struct page *from, | |||
44 | { | 44 | { |
45 | void *kto, *kfrom; | 45 | void *kto, *kfrom; |
46 | 46 | ||
47 | kto = kmap_atomic(to, KM_USER0); | 47 | kto = kmap_atomic(to); |
48 | kfrom = kmap_atomic(from, KM_USER1); | 48 | kfrom = kmap_atomic(from); |
49 | fa_copy_user_page(kto, kfrom); | 49 | fa_copy_user_page(kto, kfrom); |
50 | kunmap_atomic(kfrom, KM_USER1); | 50 | kunmap_atomic(kfrom); |
51 | kunmap_atomic(kto, KM_USER0); | 51 | kunmap_atomic(kto); |
52 | } | 52 | } |
53 | 53 | ||
54 | /* | 54 | /* |
@@ -58,7 +58,7 @@ void fa_copy_user_highpage(struct page *to, struct page *from, | |||
58 | */ | 58 | */ |
59 | void fa_clear_user_highpage(struct page *page, unsigned long vaddr) | 59 | void fa_clear_user_highpage(struct page *page, unsigned long vaddr) |
60 | { | 60 | { |
61 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 61 | void *ptr, *kaddr = kmap_atomic(page); |
62 | asm volatile("\ | 62 | asm volatile("\ |
63 | mov r1, %2 @ 1\n\ | 63 | mov r1, %2 @ 1\n\ |
64 | mov r2, #0 @ 1\n\ | 64 | mov r2, #0 @ 1\n\ |
@@ -77,7 +77,7 @@ void fa_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
77 | : "=r" (ptr) | 77 | : "=r" (ptr) |
78 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 78 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
79 | : "r1", "r2", "r3", "ip", "lr"); | 79 | : "r1", "r2", "r3", "ip", "lr"); |
80 | kunmap_atomic(kaddr, KM_USER0); | 80 | kunmap_atomic(kaddr); |
81 | } | 81 | } |
82 | 82 | ||
83 | struct cpu_user_fns fa_user_fns __initdata = { | 83 | struct cpu_user_fns fa_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-feroceon.c b/arch/arm/mm/copypage-feroceon.c index ac163de7dc01..49ee0c1a7209 100644 --- a/arch/arm/mm/copypage-feroceon.c +++ b/arch/arm/mm/copypage-feroceon.c | |||
@@ -72,17 +72,17 @@ void feroceon_copy_user_highpage(struct page *to, struct page *from, | |||
72 | { | 72 | { |
73 | void *kto, *kfrom; | 73 | void *kto, *kfrom; |
74 | 74 | ||
75 | kto = kmap_atomic(to, KM_USER0); | 75 | kto = kmap_atomic(to); |
76 | kfrom = kmap_atomic(from, KM_USER1); | 76 | kfrom = kmap_atomic(from); |
77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 77 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
78 | feroceon_copy_user_page(kto, kfrom); | 78 | feroceon_copy_user_page(kto, kfrom); |
79 | kunmap_atomic(kfrom, KM_USER1); | 79 | kunmap_atomic(kfrom); |
80 | kunmap_atomic(kto, KM_USER0); | 80 | kunmap_atomic(kto); |
81 | } | 81 | } |
82 | 82 | ||
83 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | 83 | void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) |
84 | { | 84 | { |
85 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 85 | void *ptr, *kaddr = kmap_atomic(page); |
86 | asm volatile ("\ | 86 | asm volatile ("\ |
87 | mov r1, %2 \n\ | 87 | mov r1, %2 \n\ |
88 | mov r2, #0 \n\ | 88 | mov r2, #0 \n\ |
@@ -102,7 +102,7 @@ void feroceon_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
102 | : "=r" (ptr) | 102 | : "=r" (ptr) |
103 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 103 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
104 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); | 104 | : "r1", "r2", "r3", "r4", "r5", "r6", "r7", "ip", "lr"); |
105 | kunmap_atomic(kaddr, KM_USER0); | 105 | kunmap_atomic(kaddr); |
106 | } | 106 | } |
107 | 107 | ||
108 | struct cpu_user_fns feroceon_user_fns __initdata = { | 108 | struct cpu_user_fns feroceon_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v3.c b/arch/arm/mm/copypage-v3.c index f72303e1d804..3935bddd4769 100644 --- a/arch/arm/mm/copypage-v3.c +++ b/arch/arm/mm/copypage-v3.c | |||
@@ -42,11 +42,11 @@ void v3_copy_user_highpage(struct page *to, struct page *from, | |||
42 | { | 42 | { |
43 | void *kto, *kfrom; | 43 | void *kto, *kfrom; |
44 | 44 | ||
45 | kto = kmap_atomic(to, KM_USER0); | 45 | kto = kmap_atomic(to); |
46 | kfrom = kmap_atomic(from, KM_USER1); | 46 | kfrom = kmap_atomic(from); |
47 | v3_copy_user_page(kto, kfrom); | 47 | v3_copy_user_page(kto, kfrom); |
48 | kunmap_atomic(kfrom, KM_USER1); | 48 | kunmap_atomic(kfrom); |
49 | kunmap_atomic(kto, KM_USER0); | 49 | kunmap_atomic(kto); |
50 | } | 50 | } |
51 | 51 | ||
52 | /* | 52 | /* |
@@ -56,7 +56,7 @@ void v3_copy_user_highpage(struct page *to, struct page *from, | |||
56 | */ | 56 | */ |
57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | 57 | void v3_clear_user_highpage(struct page *page, unsigned long vaddr) |
58 | { | 58 | { |
59 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 59 | void *ptr, *kaddr = kmap_atomic(page); |
60 | asm volatile("\n\ | 60 | asm volatile("\n\ |
61 | mov r1, %2 @ 1\n\ | 61 | mov r1, %2 @ 1\n\ |
62 | mov r2, #0 @ 1\n\ | 62 | mov r2, #0 @ 1\n\ |
@@ -72,7 +72,7 @@ void v3_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
72 | : "=r" (ptr) | 72 | : "=r" (ptr) |
73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 73 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
74 | : "r1", "r2", "r3", "ip", "lr"); | 74 | : "r1", "r2", "r3", "ip", "lr"); |
75 | kunmap_atomic(kaddr, KM_USER0); | 75 | kunmap_atomic(kaddr); |
76 | } | 76 | } |
77 | 77 | ||
78 | struct cpu_user_fns v3_user_fns __initdata = { | 78 | struct cpu_user_fns v3_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4mc.c b/arch/arm/mm/copypage-v4mc.c index 7d0a8c230342..ec8c3befb9c8 100644 --- a/arch/arm/mm/copypage-v4mc.c +++ b/arch/arm/mm/copypage-v4mc.c | |||
@@ -71,7 +71,7 @@ mc_copy_user_page(void *from, void *to) | |||
71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, | 71 | void v4_mc_copy_user_highpage(struct page *to, struct page *from, |
72 | unsigned long vaddr, struct vm_area_struct *vma) | 72 | unsigned long vaddr, struct vm_area_struct *vma) |
73 | { | 73 | { |
74 | void *kto = kmap_atomic(to, KM_USER1); | 74 | void *kto = kmap_atomic(to); |
75 | 75 | ||
76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 76 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
77 | __flush_dcache_page(page_mapping(from), from); | 77 | __flush_dcache_page(page_mapping(from), from); |
@@ -85,7 +85,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
85 | 85 | ||
86 | raw_spin_unlock(&minicache_lock); | 86 | raw_spin_unlock(&minicache_lock); |
87 | 87 | ||
88 | kunmap_atomic(kto, KM_USER1); | 88 | kunmap_atomic(kto); |
89 | } | 89 | } |
90 | 90 | ||
91 | /* | 91 | /* |
@@ -93,7 +93,7 @@ void v4_mc_copy_user_highpage(struct page *to, struct page *from, | |||
93 | */ | 93 | */ |
94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 94 | void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
95 | { | 95 | { |
96 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 96 | void *ptr, *kaddr = kmap_atomic(page); |
97 | asm volatile("\ | 97 | asm volatile("\ |
98 | mov r1, %2 @ 1\n\ | 98 | mov r1, %2 @ 1\n\ |
99 | mov r2, #0 @ 1\n\ | 99 | mov r2, #0 @ 1\n\ |
@@ -111,7 +111,7 @@ void v4_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
111 | : "=r" (ptr) | 111 | : "=r" (ptr) |
112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 112 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
113 | : "r1", "r2", "r3", "ip", "lr"); | 113 | : "r1", "r2", "r3", "ip", "lr"); |
114 | kunmap_atomic(kaddr, KM_USER0); | 114 | kunmap_atomic(kaddr); |
115 | } | 115 | } |
116 | 116 | ||
117 | struct cpu_user_fns v4_mc_user_fns __initdata = { | 117 | struct cpu_user_fns v4_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4wb.c b/arch/arm/mm/copypage-v4wb.c index cb589cbb2b6c..067d0fdd630c 100644 --- a/arch/arm/mm/copypage-v4wb.c +++ b/arch/arm/mm/copypage-v4wb.c | |||
@@ -52,12 +52,12 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
52 | { | 52 | { |
53 | void *kto, *kfrom; | 53 | void *kto, *kfrom; |
54 | 54 | ||
55 | kto = kmap_atomic(to, KM_USER0); | 55 | kto = kmap_atomic(to); |
56 | kfrom = kmap_atomic(from, KM_USER1); | 56 | kfrom = kmap_atomic(from); |
57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 57 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
58 | v4wb_copy_user_page(kto, kfrom); | 58 | v4wb_copy_user_page(kto, kfrom); |
59 | kunmap_atomic(kfrom, KM_USER1); | 59 | kunmap_atomic(kfrom); |
60 | kunmap_atomic(kto, KM_USER0); | 60 | kunmap_atomic(kto); |
61 | } | 61 | } |
62 | 62 | ||
63 | /* | 63 | /* |
@@ -67,7 +67,7 @@ void v4wb_copy_user_highpage(struct page *to, struct page *from, | |||
67 | */ | 67 | */ |
68 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | 68 | void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) |
69 | { | 69 | { |
70 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 70 | void *ptr, *kaddr = kmap_atomic(page); |
71 | asm volatile("\ | 71 | asm volatile("\ |
72 | mov r1, %2 @ 1\n\ | 72 | mov r1, %2 @ 1\n\ |
73 | mov r2, #0 @ 1\n\ | 73 | mov r2, #0 @ 1\n\ |
@@ -86,7 +86,7 @@ void v4wb_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
86 | : "=r" (ptr) | 86 | : "=r" (ptr) |
87 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 87 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
88 | : "r1", "r2", "r3", "ip", "lr"); | 88 | : "r1", "r2", "r3", "ip", "lr"); |
89 | kunmap_atomic(kaddr, KM_USER0); | 89 | kunmap_atomic(kaddr); |
90 | } | 90 | } |
91 | 91 | ||
92 | struct cpu_user_fns v4wb_user_fns __initdata = { | 92 | struct cpu_user_fns v4wb_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v4wt.c b/arch/arm/mm/copypage-v4wt.c index 30c7d048a324..b85c5da2e510 100644 --- a/arch/arm/mm/copypage-v4wt.c +++ b/arch/arm/mm/copypage-v4wt.c | |||
@@ -48,11 +48,11 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
48 | { | 48 | { |
49 | void *kto, *kfrom; | 49 | void *kto, *kfrom; |
50 | 50 | ||
51 | kto = kmap_atomic(to, KM_USER0); | 51 | kto = kmap_atomic(to); |
52 | kfrom = kmap_atomic(from, KM_USER1); | 52 | kfrom = kmap_atomic(from); |
53 | v4wt_copy_user_page(kto, kfrom); | 53 | v4wt_copy_user_page(kto, kfrom); |
54 | kunmap_atomic(kfrom, KM_USER1); | 54 | kunmap_atomic(kfrom); |
55 | kunmap_atomic(kto, KM_USER0); | 55 | kunmap_atomic(kto); |
56 | } | 56 | } |
57 | 57 | ||
58 | /* | 58 | /* |
@@ -62,7 +62,7 @@ void v4wt_copy_user_highpage(struct page *to, struct page *from, | |||
62 | */ | 62 | */ |
63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | 63 | void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) |
64 | { | 64 | { |
65 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 65 | void *ptr, *kaddr = kmap_atomic(page); |
66 | asm volatile("\ | 66 | asm volatile("\ |
67 | mov r1, %2 @ 1\n\ | 67 | mov r1, %2 @ 1\n\ |
68 | mov r2, #0 @ 1\n\ | 68 | mov r2, #0 @ 1\n\ |
@@ -79,7 +79,7 @@ void v4wt_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
79 | : "=r" (ptr) | 79 | : "=r" (ptr) |
80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) | 80 | : "0" (kaddr), "I" (PAGE_SIZE / 64) |
81 | : "r1", "r2", "r3", "ip", "lr"); | 81 | : "r1", "r2", "r3", "ip", "lr"); |
82 | kunmap_atomic(kaddr, KM_USER0); | 82 | kunmap_atomic(kaddr); |
83 | } | 83 | } |
84 | 84 | ||
85 | struct cpu_user_fns v4wt_user_fns __initdata = { | 85 | struct cpu_user_fns v4wt_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c index 3d9a1552cef6..8b03a5814d00 100644 --- a/arch/arm/mm/copypage-v6.c +++ b/arch/arm/mm/copypage-v6.c | |||
@@ -38,11 +38,11 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
38 | { | 38 | { |
39 | void *kto, *kfrom; | 39 | void *kto, *kfrom; |
40 | 40 | ||
41 | kfrom = kmap_atomic(from, KM_USER0); | 41 | kfrom = kmap_atomic(from); |
42 | kto = kmap_atomic(to, KM_USER1); | 42 | kto = kmap_atomic(to); |
43 | copy_page(kto, kfrom); | 43 | copy_page(kto, kfrom); |
44 | kunmap_atomic(kto, KM_USER1); | 44 | kunmap_atomic(kto); |
45 | kunmap_atomic(kfrom, KM_USER0); | 45 | kunmap_atomic(kfrom); |
46 | } | 46 | } |
47 | 47 | ||
48 | /* | 48 | /* |
@@ -51,9 +51,9 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to, | |||
51 | */ | 51 | */ |
52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) | 52 | static void v6_clear_user_highpage_nonaliasing(struct page *page, unsigned long vaddr) |
53 | { | 53 | { |
54 | void *kaddr = kmap_atomic(page, KM_USER0); | 54 | void *kaddr = kmap_atomic(page); |
55 | clear_page(kaddr); | 55 | clear_page(kaddr); |
56 | kunmap_atomic(kaddr, KM_USER0); | 56 | kunmap_atomic(kaddr); |
57 | } | 57 | } |
58 | 58 | ||
59 | /* | 59 | /* |
diff --git a/arch/arm/mm/copypage-xsc3.c b/arch/arm/mm/copypage-xsc3.c index f9cde0702f1e..03a2042aced5 100644 --- a/arch/arm/mm/copypage-xsc3.c +++ b/arch/arm/mm/copypage-xsc3.c | |||
@@ -75,12 +75,12 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
75 | { | 75 | { |
76 | void *kto, *kfrom; | 76 | void *kto, *kfrom; |
77 | 77 | ||
78 | kto = kmap_atomic(to, KM_USER0); | 78 | kto = kmap_atomic(to); |
79 | kfrom = kmap_atomic(from, KM_USER1); | 79 | kfrom = kmap_atomic(from); |
80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); | 80 | flush_cache_page(vma, vaddr, page_to_pfn(from)); |
81 | xsc3_mc_copy_user_page(kto, kfrom); | 81 | xsc3_mc_copy_user_page(kto, kfrom); |
82 | kunmap_atomic(kfrom, KM_USER1); | 82 | kunmap_atomic(kfrom); |
83 | kunmap_atomic(kto, KM_USER0); | 83 | kunmap_atomic(kto); |
84 | } | 84 | } |
85 | 85 | ||
86 | /* | 86 | /* |
@@ -90,7 +90,7 @@ void xsc3_mc_copy_user_highpage(struct page *to, struct page *from, | |||
90 | */ | 90 | */ |
91 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 91 | void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
92 | { | 92 | { |
93 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 93 | void *ptr, *kaddr = kmap_atomic(page); |
94 | asm volatile ("\ | 94 | asm volatile ("\ |
95 | mov r1, %2 \n\ | 95 | mov r1, %2 \n\ |
96 | mov r2, #0 \n\ | 96 | mov r2, #0 \n\ |
@@ -105,7 +105,7 @@ void xsc3_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
105 | : "=r" (ptr) | 105 | : "=r" (ptr) |
106 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 106 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
107 | : "r1", "r2", "r3"); | 107 | : "r1", "r2", "r3"); |
108 | kunmap_atomic(kaddr, KM_USER0); | 108 | kunmap_atomic(kaddr); |
109 | } | 109 | } |
110 | 110 | ||
111 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { | 111 | struct cpu_user_fns xsc3_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/copypage-xscale.c b/arch/arm/mm/copypage-xscale.c index 610c24ced310..439d106ae638 100644 --- a/arch/arm/mm/copypage-xscale.c +++ b/arch/arm/mm/copypage-xscale.c | |||
@@ -93,7 +93,7 @@ mc_copy_user_page(void *from, void *to) | |||
93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | 93 | void xscale_mc_copy_user_highpage(struct page *to, struct page *from, |
94 | unsigned long vaddr, struct vm_area_struct *vma) | 94 | unsigned long vaddr, struct vm_area_struct *vma) |
95 | { | 95 | { |
96 | void *kto = kmap_atomic(to, KM_USER1); | 96 | void *kto = kmap_atomic(to); |
97 | 97 | ||
98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) | 98 | if (!test_and_set_bit(PG_dcache_clean, &from->flags)) |
99 | __flush_dcache_page(page_mapping(from), from); | 99 | __flush_dcache_page(page_mapping(from), from); |
@@ -107,7 +107,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
107 | 107 | ||
108 | raw_spin_unlock(&minicache_lock); | 108 | raw_spin_unlock(&minicache_lock); |
109 | 109 | ||
110 | kunmap_atomic(kto, KM_USER1); | 110 | kunmap_atomic(kto); |
111 | } | 111 | } |
112 | 112 | ||
113 | /* | 113 | /* |
@@ -116,7 +116,7 @@ void xscale_mc_copy_user_highpage(struct page *to, struct page *from, | |||
116 | void | 116 | void |
117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | 117 | xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) |
118 | { | 118 | { |
119 | void *ptr, *kaddr = kmap_atomic(page, KM_USER0); | 119 | void *ptr, *kaddr = kmap_atomic(page); |
120 | asm volatile( | 120 | asm volatile( |
121 | "mov r1, %2 \n\ | 121 | "mov r1, %2 \n\ |
122 | mov r2, #0 \n\ | 122 | mov r2, #0 \n\ |
@@ -133,7 +133,7 @@ xscale_mc_clear_user_highpage(struct page *page, unsigned long vaddr) | |||
133 | : "=r" (ptr) | 133 | : "=r" (ptr) |
134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) | 134 | : "0" (kaddr), "I" (PAGE_SIZE / 32) |
135 | : "r1", "r2", "r3", "ip"); | 135 | : "r1", "r2", "r3", "ip"); |
136 | kunmap_atomic(kaddr, KM_USER0); | 136 | kunmap_atomic(kaddr); |
137 | } | 137 | } |
138 | 138 | ||
139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { | 139 | struct cpu_user_fns xscale_mc_user_fns __initdata = { |
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c index 807c0573abbe..5a21505d7550 100644 --- a/arch/arm/mm/highmem.c +++ b/arch/arm/mm/highmem.c | |||
@@ -36,7 +36,7 @@ void kunmap(struct page *page) | |||
36 | } | 36 | } |
37 | EXPORT_SYMBOL(kunmap); | 37 | EXPORT_SYMBOL(kunmap); |
38 | 38 | ||
39 | void *__kmap_atomic(struct page *page) | 39 | void *kmap_atomic(struct page *page) |
40 | { | 40 | { |
41 | unsigned int idx; | 41 | unsigned int idx; |
42 | unsigned long vaddr; | 42 | unsigned long vaddr; |
@@ -81,7 +81,7 @@ void *__kmap_atomic(struct page *page) | |||
81 | 81 | ||
82 | return (void *)vaddr; | 82 | return (void *)vaddr; |
83 | } | 83 | } |
84 | EXPORT_SYMBOL(__kmap_atomic); | 84 | EXPORT_SYMBOL(kmap_atomic); |
85 | 85 | ||
86 | void __kunmap_atomic(void *kvaddr) | 86 | void __kunmap_atomic(void *kvaddr) |
87 | { | 87 | { |
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h index a8d6565d415d..716956a5317b 100644 --- a/arch/frv/include/asm/highmem.h +++ b/arch/frv/include/asm/highmem.h | |||
@@ -157,7 +157,7 @@ static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) | |||
157 | pagefault_enable(); | 157 | pagefault_enable(); |
158 | } | 158 | } |
159 | 159 | ||
160 | void *__kmap_atomic(struct page *page); | 160 | void *kmap_atomic(struct page *page); |
161 | void __kunmap_atomic(void *kvaddr); | 161 | void __kunmap_atomic(void *kvaddr); |
162 | 162 | ||
163 | #endif /* !__ASSEMBLY__ */ | 163 | #endif /* !__ASSEMBLY__ */ |
diff --git a/arch/frv/mm/highmem.c b/arch/frv/mm/highmem.c index fd7fcd4c2e33..31902c9d5be5 100644 --- a/arch/frv/mm/highmem.c +++ b/arch/frv/mm/highmem.c | |||
@@ -37,7 +37,7 @@ struct page *kmap_atomic_to_page(void *ptr) | |||
37 | return virt_to_page(ptr); | 37 | return virt_to_page(ptr); |
38 | } | 38 | } |
39 | 39 | ||
40 | void *__kmap_atomic(struct page *page) | 40 | void *kmap_atomic(struct page *page) |
41 | { | 41 | { |
42 | unsigned long paddr; | 42 | unsigned long paddr; |
43 | int type; | 43 | int type; |
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page) | |||
64 | return NULL; | 64 | return NULL; |
65 | } | 65 | } |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(__kmap_atomic); | 67 | EXPORT_SYMBOL(kmap_atomic); |
68 | 68 | ||
69 | void __kunmap_atomic(void *kvaddr) | 69 | void __kunmap_atomic(void *kvaddr) |
70 | { | 70 | { |
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h index 77e644082a3b..2d91888c9b74 100644 --- a/arch/mips/include/asm/highmem.h +++ b/arch/mips/include/asm/highmem.h | |||
@@ -47,7 +47,7 @@ extern void kunmap_high(struct page *page); | |||
47 | 47 | ||
48 | extern void *kmap(struct page *page); | 48 | extern void *kmap(struct page *page); |
49 | extern void kunmap(struct page *page); | 49 | extern void kunmap(struct page *page); |
50 | extern void *__kmap_atomic(struct page *page); | 50 | extern void *kmap_atomic(struct page *page); |
51 | extern void __kunmap_atomic(void *kvaddr); | 51 | extern void __kunmap_atomic(void *kvaddr); |
52 | extern void *kmap_atomic_pfn(unsigned long pfn); | 52 | extern void *kmap_atomic_pfn(unsigned long pfn); |
53 | extern struct page *kmap_atomic_to_page(void *ptr); | 53 | extern struct page *kmap_atomic_to_page(void *ptr); |
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c index 4f9eb0b23036..c97087d12d07 100644 --- a/arch/mips/mm/c-r4k.c +++ b/arch/mips/mm/c-r4k.c | |||
@@ -498,7 +498,7 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
498 | if (map_coherent) | 498 | if (map_coherent) |
499 | vaddr = kmap_coherent(page, addr); | 499 | vaddr = kmap_coherent(page, addr); |
500 | else | 500 | else |
501 | vaddr = kmap_atomic(page, KM_USER0); | 501 | vaddr = kmap_atomic(page); |
502 | addr = (unsigned long)vaddr; | 502 | addr = (unsigned long)vaddr; |
503 | } | 503 | } |
504 | 504 | ||
@@ -521,7 +521,7 @@ static inline void local_r4k_flush_cache_page(void *args) | |||
521 | if (map_coherent) | 521 | if (map_coherent) |
522 | kunmap_coherent(); | 522 | kunmap_coherent(); |
523 | else | 523 | else |
524 | kunmap_atomic(vaddr, KM_USER0); | 524 | kunmap_atomic(vaddr); |
525 | } | 525 | } |
526 | } | 526 | } |
527 | 527 | ||
diff --git a/arch/mips/mm/highmem.c b/arch/mips/mm/highmem.c index 3634c7ea06ac..aff57057a949 100644 --- a/arch/mips/mm/highmem.c +++ b/arch/mips/mm/highmem.c | |||
@@ -41,7 +41,7 @@ EXPORT_SYMBOL(kunmap); | |||
41 | * kmaps are appropriate for short, tight code paths only. | 41 | * kmaps are appropriate for short, tight code paths only. |
42 | */ | 42 | */ |
43 | 43 | ||
44 | void *__kmap_atomic(struct page *page) | 44 | void *kmap_atomic(struct page *page) |
45 | { | 45 | { |
46 | unsigned long vaddr; | 46 | unsigned long vaddr; |
47 | int idx, type; | 47 | int idx, type; |
@@ -62,7 +62,7 @@ void *__kmap_atomic(struct page *page) | |||
62 | 62 | ||
63 | return (void*) vaddr; | 63 | return (void*) vaddr; |
64 | } | 64 | } |
65 | EXPORT_SYMBOL(__kmap_atomic); | 65 | EXPORT_SYMBOL(kmap_atomic); |
66 | 66 | ||
67 | void __kunmap_atomic(void *kvaddr) | 67 | void __kunmap_atomic(void *kvaddr) |
68 | { | 68 | { |
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c index 3b3ffd439cd7..1a85ba92eb5c 100644 --- a/arch/mips/mm/init.c +++ b/arch/mips/mm/init.c | |||
@@ -207,21 +207,21 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
207 | { | 207 | { |
208 | void *vfrom, *vto; | 208 | void *vfrom, *vto; |
209 | 209 | ||
210 | vto = kmap_atomic(to, KM_USER1); | 210 | vto = kmap_atomic(to); |
211 | if (cpu_has_dc_aliases && | 211 | if (cpu_has_dc_aliases && |
212 | page_mapped(from) && !Page_dcache_dirty(from)) { | 212 | page_mapped(from) && !Page_dcache_dirty(from)) { |
213 | vfrom = kmap_coherent(from, vaddr); | 213 | vfrom = kmap_coherent(from, vaddr); |
214 | copy_page(vto, vfrom); | 214 | copy_page(vto, vfrom); |
215 | kunmap_coherent(); | 215 | kunmap_coherent(); |
216 | } else { | 216 | } else { |
217 | vfrom = kmap_atomic(from, KM_USER0); | 217 | vfrom = kmap_atomic(from); |
218 | copy_page(vto, vfrom); | 218 | copy_page(vto, vfrom); |
219 | kunmap_atomic(vfrom, KM_USER0); | 219 | kunmap_atomic(vfrom); |
220 | } | 220 | } |
221 | if ((!cpu_has_ic_fills_f_dc) || | 221 | if ((!cpu_has_ic_fills_f_dc) || |
222 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) | 222 | pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK)) |
223 | flush_data_cache_page((unsigned long)vto); | 223 | flush_data_cache_page((unsigned long)vto); |
224 | kunmap_atomic(vto, KM_USER1); | 224 | kunmap_atomic(vto); |
225 | /* Make sure this page is cleared on other CPU's too before using it */ | 225 | /* Make sure this page is cleared on other CPU's too before using it */ |
226 | smp_wmb(); | 226 | smp_wmb(); |
227 | } | 227 | } |
diff --git a/arch/mn10300/include/asm/highmem.h b/arch/mn10300/include/asm/highmem.h index bfe2d88604d9..7c137cd8aa37 100644 --- a/arch/mn10300/include/asm/highmem.h +++ b/arch/mn10300/include/asm/highmem.h | |||
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page) | |||
70 | * be used in IRQ contexts, so in some (very limited) cases we need | 70 | * be used in IRQ contexts, so in some (very limited) cases we need |
71 | * it. | 71 | * it. |
72 | */ | 72 | */ |
73 | static inline unsigned long __kmap_atomic(struct page *page) | 73 | static inline unsigned long kmap_atomic(struct page *page) |
74 | { | 74 | { |
75 | unsigned long vaddr; | 75 | unsigned long vaddr; |
76 | int idx, type; | 76 | int idx, type; |
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h index da601dd34c05..9f21ab0c02e3 100644 --- a/arch/parisc/include/asm/cacheflush.h +++ b/arch/parisc/include/asm/cacheflush.h | |||
@@ -140,7 +140,7 @@ static inline void *kmap(struct page *page) | |||
140 | 140 | ||
141 | #define kunmap(page) kunmap_parisc(page_address(page)) | 141 | #define kunmap(page) kunmap_parisc(page_address(page)) |
142 | 142 | ||
143 | static inline void *__kmap_atomic(struct page *page) | 143 | static inline void *kmap_atomic(struct page *page) |
144 | { | 144 | { |
145 | pagefault_disable(); | 145 | pagefault_disable(); |
146 | return page_address(page); | 146 | return page_address(page); |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index dbc264010d0b..caaf6e00630d 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page) | |||
79 | kunmap_high(page); | 79 | kunmap_high(page); |
80 | } | 80 | } |
81 | 81 | ||
82 | static inline void *__kmap_atomic(struct page *page) | 82 | static inline void *kmap_atomic(struct page *page) |
83 | { | 83 | { |
84 | return kmap_atomic_prot(page, kmap_prot); | 84 | return kmap_atomic_prot(page, kmap_prot); |
85 | } | 85 | } |
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c index e2cfb9e1e20e..220fcdf26978 100644 --- a/arch/powerpc/kvm/book3s_pr.c +++ b/arch/powerpc/kvm/book3s_pr.c | |||
@@ -227,14 +227,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
227 | hpage_offset /= 4; | 227 | hpage_offset /= 4; |
228 | 228 | ||
229 | get_page(hpage); | 229 | get_page(hpage); |
230 | page = kmap_atomic(hpage, KM_USER0); | 230 | page = kmap_atomic(hpage); |
231 | 231 | ||
232 | /* patch dcbz into reserved instruction, so we trap */ | 232 | /* patch dcbz into reserved instruction, so we trap */ |
233 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | 233 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) |
234 | if ((page[i] & 0xff0007ff) == INS_DCBZ) | 234 | if ((page[i] & 0xff0007ff) == INS_DCBZ) |
235 | page[i] &= 0xfffffff7; | 235 | page[i] &= 0xfffffff7; |
236 | 236 | ||
237 | kunmap_atomic(page, KM_USER0); | 237 | kunmap_atomic(page); |
238 | put_page(hpage); | 238 | put_page(hpage); |
239 | } | 239 | } |
240 | 240 | ||
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 329be36c0a8d..6747eece84af 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c | |||
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page, | |||
365 | local_irq_save(flags); | 365 | local_irq_save(flags); |
366 | 366 | ||
367 | do { | 367 | do { |
368 | start = (unsigned long)kmap_atomic(page + seg_nr, | 368 | start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset; |
369 | KM_PPC_SYNC_PAGE) + seg_offset; | ||
370 | 369 | ||
371 | /* Sync this buffer segment */ | 370 | /* Sync this buffer segment */ |
372 | __dma_sync((void *)start, seg_size, direction); | 371 | __dma_sync((void *)start, seg_size, direction); |
373 | kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); | 372 | kunmap_atomic((void *)start); |
374 | seg_nr++; | 373 | seg_nr++; |
375 | 374 | ||
376 | /* Calculate next buffer segment size */ | 375 | /* Calculate next buffer segment size */ |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index a8b3cc7d90fe..57c7465e656e 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -910,9 +910,9 @@ void flush_dcache_icache_hugepage(struct page *page) | |||
910 | if (!PageHighMem(page)) { | 910 | if (!PageHighMem(page)) { |
911 | __flush_dcache_icache(page_address(page+i)); | 911 | __flush_dcache_icache(page_address(page+i)); |
912 | } else { | 912 | } else { |
913 | start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); | 913 | start = kmap_atomic(page+i); |
914 | __flush_dcache_icache(start); | 914 | __flush_dcache_icache(start); |
915 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | 915 | kunmap_atomic(start); |
916 | } | 916 | } |
917 | } | 917 | } |
918 | } | 918 | } |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index d974b79a3068..baaafde7d135 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page) | |||
458 | #endif | 458 | #endif |
459 | #ifdef CONFIG_BOOKE | 459 | #ifdef CONFIG_BOOKE |
460 | { | 460 | { |
461 | void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); | 461 | void *start = kmap_atomic(page); |
462 | __flush_dcache_icache(start); | 462 | __flush_dcache_icache(start); |
463 | kunmap_atomic(start, KM_PPC_SYNC_ICACHE); | 463 | kunmap_atomic(start); |
464 | } | 464 | } |
465 | #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) | 465 | #elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) |
466 | /* On 8xx there is no need to kmap since highmem is not supported */ | 466 | /* On 8xx there is no need to kmap since highmem is not supported */ |
diff --git a/arch/sh/mm/cache-sh4.c b/arch/sh/mm/cache-sh4.c index 92eb98633ab0..112fea12522a 100644 --- a/arch/sh/mm/cache-sh4.c +++ b/arch/sh/mm/cache-sh4.c | |||
@@ -244,7 +244,7 @@ static void sh4_flush_cache_page(void *args) | |||
244 | if (map_coherent) | 244 | if (map_coherent) |
245 | vaddr = kmap_coherent(page, address); | 245 | vaddr = kmap_coherent(page, address); |
246 | else | 246 | else |
247 | vaddr = kmap_atomic(page, KM_USER0); | 247 | vaddr = kmap_atomic(page); |
248 | 248 | ||
249 | address = (unsigned long)vaddr; | 249 | address = (unsigned long)vaddr; |
250 | } | 250 | } |
@@ -259,7 +259,7 @@ static void sh4_flush_cache_page(void *args) | |||
259 | if (map_coherent) | 259 | if (map_coherent) |
260 | kunmap_coherent(vaddr); | 260 | kunmap_coherent(vaddr); |
261 | else | 261 | else |
262 | kunmap_atomic(vaddr, KM_USER0); | 262 | kunmap_atomic(vaddr); |
263 | } | 263 | } |
264 | } | 264 | } |
265 | 265 | ||
diff --git a/arch/sh/mm/cache.c b/arch/sh/mm/cache.c index 5a580ea04429..616966a96cba 100644 --- a/arch/sh/mm/cache.c +++ b/arch/sh/mm/cache.c | |||
@@ -95,7 +95,7 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
95 | { | 95 | { |
96 | void *vfrom, *vto; | 96 | void *vfrom, *vto; |
97 | 97 | ||
98 | vto = kmap_atomic(to, KM_USER1); | 98 | vto = kmap_atomic(to); |
99 | 99 | ||
100 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && | 100 | if (boot_cpu_data.dcache.n_aliases && page_mapped(from) && |
101 | test_bit(PG_dcache_clean, &from->flags)) { | 101 | test_bit(PG_dcache_clean, &from->flags)) { |
@@ -103,16 +103,16 @@ void copy_user_highpage(struct page *to, struct page *from, | |||
103 | copy_page(vto, vfrom); | 103 | copy_page(vto, vfrom); |
104 | kunmap_coherent(vfrom); | 104 | kunmap_coherent(vfrom); |
105 | } else { | 105 | } else { |
106 | vfrom = kmap_atomic(from, KM_USER0); | 106 | vfrom = kmap_atomic(from); |
107 | copy_page(vto, vfrom); | 107 | copy_page(vto, vfrom); |
108 | kunmap_atomic(vfrom, KM_USER0); | 108 | kunmap_atomic(vfrom); |
109 | } | 109 | } |
110 | 110 | ||
111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || | 111 | if (pages_do_alias((unsigned long)vto, vaddr & PAGE_MASK) || |
112 | (vma->vm_flags & VM_EXEC)) | 112 | (vma->vm_flags & VM_EXEC)) |
113 | __flush_purge_region(vto, PAGE_SIZE); | 113 | __flush_purge_region(vto, PAGE_SIZE); |
114 | 114 | ||
115 | kunmap_atomic(vto, KM_USER1); | 115 | kunmap_atomic(vto); |
116 | /* Make sure this page is cleared on other CPU's too before using it */ | 116 | /* Make sure this page is cleared on other CPU's too before using it */ |
117 | smp_wmb(); | 117 | smp_wmb(); |
118 | } | 118 | } |
@@ -120,14 +120,14 @@ EXPORT_SYMBOL(copy_user_highpage); | |||
120 | 120 | ||
121 | void clear_user_highpage(struct page *page, unsigned long vaddr) | 121 | void clear_user_highpage(struct page *page, unsigned long vaddr) |
122 | { | 122 | { |
123 | void *kaddr = kmap_atomic(page, KM_USER0); | 123 | void *kaddr = kmap_atomic(page); |
124 | 124 | ||
125 | clear_page(kaddr); | 125 | clear_page(kaddr); |
126 | 126 | ||
127 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) | 127 | if (pages_do_alias((unsigned long)kaddr, vaddr & PAGE_MASK)) |
128 | __flush_purge_region(kaddr, PAGE_SIZE); | 128 | __flush_purge_region(kaddr, PAGE_SIZE); |
129 | 129 | ||
130 | kunmap_atomic(kaddr, KM_USER0); | 130 | kunmap_atomic(kaddr); |
131 | } | 131 | } |
132 | EXPORT_SYMBOL(clear_user_highpage); | 132 | EXPORT_SYMBOL(clear_user_highpage); |
133 | 133 | ||
diff --git a/arch/sparc/include/asm/highmem.h b/arch/sparc/include/asm/highmem.h index 3d7afbb7f4bb..3b6e00dd96e5 100644 --- a/arch/sparc/include/asm/highmem.h +++ b/arch/sparc/include/asm/highmem.h | |||
@@ -70,7 +70,7 @@ static inline void kunmap(struct page *page) | |||
70 | kunmap_high(page); | 70 | kunmap_high(page); |
71 | } | 71 | } |
72 | 72 | ||
73 | extern void *__kmap_atomic(struct page *page); | 73 | extern void *kmap_atomic(struct page *page); |
74 | extern void __kunmap_atomic(void *kvaddr); | 74 | extern void __kunmap_atomic(void *kvaddr); |
75 | extern struct page *kmap_atomic_to_page(void *vaddr); | 75 | extern struct page *kmap_atomic_to_page(void *vaddr); |
76 | 76 | ||
diff --git a/arch/sparc/mm/highmem.c b/arch/sparc/mm/highmem.c index 77140a02c86a..055c66cf1bf4 100644 --- a/arch/sparc/mm/highmem.c +++ b/arch/sparc/mm/highmem.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include <asm/tlbflush.h> | 30 | #include <asm/tlbflush.h> |
31 | #include <asm/fixmap.h> | 31 | #include <asm/fixmap.h> |
32 | 32 | ||
33 | void *__kmap_atomic(struct page *page) | 33 | void *kmap_atomic(struct page *page) |
34 | { | 34 | { |
35 | unsigned long vaddr; | 35 | unsigned long vaddr; |
36 | long idx, type; | 36 | long idx, type; |
@@ -64,7 +64,7 @@ void *__kmap_atomic(struct page *page) | |||
64 | 64 | ||
65 | return (void*) vaddr; | 65 | return (void*) vaddr; |
66 | } | 66 | } |
67 | EXPORT_SYMBOL(__kmap_atomic); | 67 | EXPORT_SYMBOL(kmap_atomic); |
68 | 68 | ||
69 | void __kunmap_atomic(void *kvaddr) | 69 | void __kunmap_atomic(void *kvaddr) |
70 | { | 70 | { |
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h index b2a6c5de79ab..fc8429a31c85 100644 --- a/arch/tile/include/asm/highmem.h +++ b/arch/tile/include/asm/highmem.h | |||
@@ -59,7 +59,7 @@ void *kmap_fix_kpte(struct page *page, int finished); | |||
59 | /* This macro is used only in map_new_virtual() to map "page". */ | 59 | /* This macro is used only in map_new_virtual() to map "page". */ |
60 | #define kmap_prot page_to_kpgprot(page) | 60 | #define kmap_prot page_to_kpgprot(page) |
61 | 61 | ||
62 | void *__kmap_atomic(struct page *page); | 62 | void *kmap_atomic(struct page *page); |
63 | void __kunmap_atomic(void *kvaddr); | 63 | void __kunmap_atomic(void *kvaddr); |
64 | void *kmap_atomic_pfn(unsigned long pfn); | 64 | void *kmap_atomic_pfn(unsigned long pfn); |
65 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); | 65 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); |
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c index 31dbbd9afe47..ef8e5a62b6e3 100644 --- a/arch/tile/mm/highmem.c +++ b/arch/tile/mm/highmem.c | |||
@@ -224,12 +224,12 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |||
224 | } | 224 | } |
225 | EXPORT_SYMBOL(kmap_atomic_prot); | 225 | EXPORT_SYMBOL(kmap_atomic_prot); |
226 | 226 | ||
227 | void *__kmap_atomic(struct page *page) | 227 | void *kmap_atomic(struct page *page) |
228 | { | 228 | { |
229 | /* PAGE_NONE is a magic value that tells us to check immutability. */ | 229 | /* PAGE_NONE is a magic value that tells us to check immutability. */ |
230 | return kmap_atomic_prot(page, PAGE_NONE); | 230 | return kmap_atomic_prot(page, PAGE_NONE); |
231 | } | 231 | } |
232 | EXPORT_SYMBOL(__kmap_atomic); | 232 | EXPORT_SYMBOL(kmap_atomic); |
233 | 233 | ||
234 | void __kunmap_atomic(void *kvaddr) | 234 | void __kunmap_atomic(void *kvaddr) |
235 | { | 235 | { |
diff --git a/arch/um/kernel/skas/uaccess.c b/arch/um/kernel/skas/uaccess.c index 9fefd924fb49..cd7df79c6a56 100644 --- a/arch/um/kernel/skas/uaccess.c +++ b/arch/um/kernel/skas/uaccess.c | |||
@@ -69,7 +69,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write, | |||
69 | return -1; | 69 | return -1; |
70 | 70 | ||
71 | page = pte_page(*pte); | 71 | page = pte_page(*pte); |
72 | addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + | 72 | addr = (unsigned long) kmap_atomic(page) + |
73 | (addr & ~PAGE_MASK); | 73 | (addr & ~PAGE_MASK); |
74 | 74 | ||
75 | current->thread.fault_catcher = &buf; | 75 | current->thread.fault_catcher = &buf; |
@@ -82,7 +82,7 @@ static int do_op_one_page(unsigned long addr, int len, int is_write, | |||
82 | 82 | ||
83 | current->thread.fault_catcher = NULL; | 83 | current->thread.fault_catcher = NULL; |
84 | 84 | ||
85 | kunmap_atomic((void *)addr, KM_UML_USERCOPY); | 85 | kunmap_atomic((void *)addr); |
86 | 86 | ||
87 | return n; | 87 | return n; |
88 | } | 88 | } |
diff --git a/arch/x86/crypto/aesni-intel_glue.c b/arch/x86/crypto/aesni-intel_glue.c index b3350bd32c60..c799352e24fc 100644 --- a/arch/x86/crypto/aesni-intel_glue.c +++ b/arch/x86/crypto/aesni-intel_glue.c | |||
@@ -1108,12 +1108,12 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
1108 | one_entry_in_sg = 1; | 1108 | one_entry_in_sg = 1; |
1109 | scatterwalk_start(&src_sg_walk, req->src); | 1109 | scatterwalk_start(&src_sg_walk, req->src); |
1110 | scatterwalk_start(&assoc_sg_walk, req->assoc); | 1110 | scatterwalk_start(&assoc_sg_walk, req->assoc); |
1111 | src = scatterwalk_map(&src_sg_walk, 0); | 1111 | src = scatterwalk_map(&src_sg_walk); |
1112 | assoc = scatterwalk_map(&assoc_sg_walk, 0); | 1112 | assoc = scatterwalk_map(&assoc_sg_walk); |
1113 | dst = src; | 1113 | dst = src; |
1114 | if (unlikely(req->src != req->dst)) { | 1114 | if (unlikely(req->src != req->dst)) { |
1115 | scatterwalk_start(&dst_sg_walk, req->dst); | 1115 | scatterwalk_start(&dst_sg_walk, req->dst); |
1116 | dst = scatterwalk_map(&dst_sg_walk, 0); | 1116 | dst = scatterwalk_map(&dst_sg_walk); |
1117 | } | 1117 | } |
1118 | 1118 | ||
1119 | } else { | 1119 | } else { |
@@ -1137,11 +1137,11 @@ static int __driver_rfc4106_encrypt(struct aead_request *req) | |||
1137 | * back to the packet. */ | 1137 | * back to the packet. */ |
1138 | if (one_entry_in_sg) { | 1138 | if (one_entry_in_sg) { |
1139 | if (unlikely(req->src != req->dst)) { | 1139 | if (unlikely(req->src != req->dst)) { |
1140 | scatterwalk_unmap(dst, 0); | 1140 | scatterwalk_unmap(dst); |
1141 | scatterwalk_done(&dst_sg_walk, 0, 0); | 1141 | scatterwalk_done(&dst_sg_walk, 0, 0); |
1142 | } | 1142 | } |
1143 | scatterwalk_unmap(src, 0); | 1143 | scatterwalk_unmap(src); |
1144 | scatterwalk_unmap(assoc, 0); | 1144 | scatterwalk_unmap(assoc); |
1145 | scatterwalk_done(&src_sg_walk, 0, 0); | 1145 | scatterwalk_done(&src_sg_walk, 0, 0); |
1146 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1146 | scatterwalk_done(&assoc_sg_walk, 0, 0); |
1147 | } else { | 1147 | } else { |
@@ -1190,12 +1190,12 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1190 | one_entry_in_sg = 1; | 1190 | one_entry_in_sg = 1; |
1191 | scatterwalk_start(&src_sg_walk, req->src); | 1191 | scatterwalk_start(&src_sg_walk, req->src); |
1192 | scatterwalk_start(&assoc_sg_walk, req->assoc); | 1192 | scatterwalk_start(&assoc_sg_walk, req->assoc); |
1193 | src = scatterwalk_map(&src_sg_walk, 0); | 1193 | src = scatterwalk_map(&src_sg_walk); |
1194 | assoc = scatterwalk_map(&assoc_sg_walk, 0); | 1194 | assoc = scatterwalk_map(&assoc_sg_walk); |
1195 | dst = src; | 1195 | dst = src; |
1196 | if (unlikely(req->src != req->dst)) { | 1196 | if (unlikely(req->src != req->dst)) { |
1197 | scatterwalk_start(&dst_sg_walk, req->dst); | 1197 | scatterwalk_start(&dst_sg_walk, req->dst); |
1198 | dst = scatterwalk_map(&dst_sg_walk, 0); | 1198 | dst = scatterwalk_map(&dst_sg_walk); |
1199 | } | 1199 | } |
1200 | 1200 | ||
1201 | } else { | 1201 | } else { |
@@ -1220,11 +1220,11 @@ static int __driver_rfc4106_decrypt(struct aead_request *req) | |||
1220 | 1220 | ||
1221 | if (one_entry_in_sg) { | 1221 | if (one_entry_in_sg) { |
1222 | if (unlikely(req->src != req->dst)) { | 1222 | if (unlikely(req->src != req->dst)) { |
1223 | scatterwalk_unmap(dst, 0); | 1223 | scatterwalk_unmap(dst); |
1224 | scatterwalk_done(&dst_sg_walk, 0, 0); | 1224 | scatterwalk_done(&dst_sg_walk, 0, 0); |
1225 | } | 1225 | } |
1226 | scatterwalk_unmap(src, 0); | 1226 | scatterwalk_unmap(src); |
1227 | scatterwalk_unmap(assoc, 0); | 1227 | scatterwalk_unmap(assoc); |
1228 | scatterwalk_done(&src_sg_walk, 0, 0); | 1228 | scatterwalk_done(&src_sg_walk, 0, 0); |
1229 | scatterwalk_done(&assoc_sg_walk, 0, 0); | 1229 | scatterwalk_done(&assoc_sg_walk, 0, 0); |
1230 | } else { | 1230 | } else { |
diff --git a/arch/x86/include/asm/highmem.h b/arch/x86/include/asm/highmem.h index 3bd04022fd0c..302a323b3f67 100644 --- a/arch/x86/include/asm/highmem.h +++ b/arch/x86/include/asm/highmem.h | |||
@@ -61,7 +61,7 @@ void *kmap(struct page *page); | |||
61 | void kunmap(struct page *page); | 61 | void kunmap(struct page *page); |
62 | 62 | ||
63 | void *kmap_atomic_prot(struct page *page, pgprot_t prot); | 63 | void *kmap_atomic_prot(struct page *page, pgprot_t prot); |
64 | void *__kmap_atomic(struct page *page); | 64 | void *kmap_atomic(struct page *page); |
65 | void __kunmap_atomic(void *kvaddr); | 65 | void __kunmap_atomic(void *kvaddr); |
66 | void *kmap_atomic_pfn(unsigned long pfn); | 66 | void *kmap_atomic_pfn(unsigned long pfn); |
67 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); | 67 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot); |
diff --git a/arch/x86/kernel/crash_dump_32.c b/arch/x86/kernel/crash_dump_32.c index 642f75a68cd5..11891ca7b716 100644 --- a/arch/x86/kernel/crash_dump_32.c +++ b/arch/x86/kernel/crash_dump_32.c | |||
@@ -62,16 +62,16 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
62 | 62 | ||
63 | if (!userbuf) { | 63 | if (!userbuf) { |
64 | memcpy(buf, (vaddr + offset), csize); | 64 | memcpy(buf, (vaddr + offset), csize); |
65 | kunmap_atomic(vaddr, KM_PTE0); | 65 | kunmap_atomic(vaddr); |
66 | } else { | 66 | } else { |
67 | if (!kdump_buf_page) { | 67 | if (!kdump_buf_page) { |
68 | printk(KERN_WARNING "Kdump: Kdump buffer page not" | 68 | printk(KERN_WARNING "Kdump: Kdump buffer page not" |
69 | " allocated\n"); | 69 | " allocated\n"); |
70 | kunmap_atomic(vaddr, KM_PTE0); | 70 | kunmap_atomic(vaddr); |
71 | return -EFAULT; | 71 | return -EFAULT; |
72 | } | 72 | } |
73 | copy_page(kdump_buf_page, vaddr); | 73 | copy_page(kdump_buf_page, vaddr); |
74 | kunmap_atomic(vaddr, KM_PTE0); | 74 | kunmap_atomic(vaddr); |
75 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | 75 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) |
76 | return -EFAULT; | 76 | return -EFAULT; |
77 | } | 77 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index cfdc6e0ef002..31bfc6927bc0 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -1283,9 +1283,9 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) | |||
1283 | if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) | 1283 | if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) |
1284 | return; | 1284 | return; |
1285 | 1285 | ||
1286 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); | 1286 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page); |
1287 | data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); | 1287 | data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); |
1288 | kunmap_atomic(vapic, KM_USER0); | 1288 | kunmap_atomic(vapic); |
1289 | 1289 | ||
1290 | apic_set_tpr(vcpu->arch.apic, data & 0xff); | 1290 | apic_set_tpr(vcpu->arch.apic, data & 0xff); |
1291 | } | 1291 | } |
@@ -1310,9 +1310,9 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) | |||
1310 | max_isr = 0; | 1310 | max_isr = 0; |
1311 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); | 1311 | data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); |
1312 | 1312 | ||
1313 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); | 1313 | vapic = kmap_atomic(vcpu->arch.apic->vapic_page); |
1314 | *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; | 1314 | *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; |
1315 | kunmap_atomic(vapic, KM_USER0); | 1315 | kunmap_atomic(vapic); |
1316 | } | 1316 | } |
1317 | 1317 | ||
1318 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) | 1318 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 15610285ebb6..df5a70311be8 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -92,9 +92,9 @@ static int FNAME(cmpxchg_gpte)(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu, | |||
92 | if (unlikely(npages != 1)) | 92 | if (unlikely(npages != 1)) |
93 | return -EFAULT; | 93 | return -EFAULT; |
94 | 94 | ||
95 | table = kmap_atomic(page, KM_USER0); | 95 | table = kmap_atomic(page); |
96 | ret = CMPXCHG(&table[index], orig_pte, new_pte); | 96 | ret = CMPXCHG(&table[index], orig_pte, new_pte); |
97 | kunmap_atomic(table, KM_USER0); | 97 | kunmap_atomic(table); |
98 | 98 | ||
99 | kvm_release_page_dirty(page); | 99 | kvm_release_page_dirty(page); |
100 | 100 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 9cbfc0698118..bb4fd2636bc2 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -1162,12 +1162,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) | |||
1162 | */ | 1162 | */ |
1163 | vcpu->hv_clock.version += 2; | 1163 | vcpu->hv_clock.version += 2; |
1164 | 1164 | ||
1165 | shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); | 1165 | shared_kaddr = kmap_atomic(vcpu->time_page); |
1166 | 1166 | ||
1167 | memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, | 1167 | memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, |
1168 | sizeof(vcpu->hv_clock)); | 1168 | sizeof(vcpu->hv_clock)); |
1169 | 1169 | ||
1170 | kunmap_atomic(shared_kaddr, KM_USER0); | 1170 | kunmap_atomic(shared_kaddr); |
1171 | 1171 | ||
1172 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); | 1172 | mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); |
1173 | return 0; | 1173 | return 0; |
@@ -3848,7 +3848,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, | |||
3848 | goto emul_write; | 3848 | goto emul_write; |
3849 | } | 3849 | } |
3850 | 3850 | ||
3851 | kaddr = kmap_atomic(page, KM_USER0); | 3851 | kaddr = kmap_atomic(page); |
3852 | kaddr += offset_in_page(gpa); | 3852 | kaddr += offset_in_page(gpa); |
3853 | switch (bytes) { | 3853 | switch (bytes) { |
3854 | case 1: | 3854 | case 1: |
@@ -3866,7 +3866,7 @@ static int emulator_cmpxchg_emulated(struct x86_emulate_ctxt *ctxt, | |||
3866 | default: | 3866 | default: |
3867 | BUG(); | 3867 | BUG(); |
3868 | } | 3868 | } |
3869 | kunmap_atomic(kaddr, KM_USER0); | 3869 | kunmap_atomic(kaddr); |
3870 | kvm_release_page_dirty(page); | 3870 | kvm_release_page_dirty(page); |
3871 | 3871 | ||
3872 | if (!exchanged) | 3872 | if (!exchanged) |
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c index e218d5df85ff..d9b094ca7aaa 100644 --- a/arch/x86/lib/usercopy_32.c +++ b/arch/x86/lib/usercopy_32.c | |||
@@ -760,9 +760,9 @@ survive: | |||
760 | break; | 760 | break; |
761 | } | 761 | } |
762 | 762 | ||
763 | maddr = kmap_atomic(pg, KM_USER0); | 763 | maddr = kmap_atomic(pg); |
764 | memcpy(maddr + offset, from, len); | 764 | memcpy(maddr + offset, from, len); |
765 | kunmap_atomic(maddr, KM_USER0); | 765 | kunmap_atomic(maddr); |
766 | set_page_dirty_lock(pg); | 766 | set_page_dirty_lock(pg); |
767 | put_page(pg); | 767 | put_page(pg); |
768 | up_read(¤t->mm->mmap_sem); | 768 | up_read(¤t->mm->mmap_sem); |
diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c index f4f29b19fac5..6f31ee56c008 100644 --- a/arch/x86/mm/highmem_32.c +++ b/arch/x86/mm/highmem_32.c | |||
@@ -51,11 +51,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot) | |||
51 | } | 51 | } |
52 | EXPORT_SYMBOL(kmap_atomic_prot); | 52 | EXPORT_SYMBOL(kmap_atomic_prot); |
53 | 53 | ||
54 | void *__kmap_atomic(struct page *page) | 54 | void *kmap_atomic(struct page *page) |
55 | { | 55 | { |
56 | return kmap_atomic_prot(page, kmap_prot); | 56 | return kmap_atomic_prot(page, kmap_prot); |
57 | } | 57 | } |
58 | EXPORT_SYMBOL(__kmap_atomic); | 58 | EXPORT_SYMBOL(kmap_atomic); |
59 | 59 | ||
60 | /* | 60 | /* |
61 | * This is the same as kmap_atomic() but can map memory that doesn't | 61 | * This is the same as kmap_atomic() but can map memory that doesn't |