aboutsummaryrefslogtreecommitdiffstats
path: root/arch/frv/include
diff options
context:
space:
mode:
authorCong Wang <amwang@redhat.com>2012-06-27 01:02:49 -0400
committerCong Wang <amwang@redhat.com>2012-07-23 02:11:22 -0400
commit144cf8647a831c96bce9c947671467d088d89d16 (patch)
tree0c13deab46514cf82d2409ecf5c85684dc917e26 /arch/frv/include
parent906adea1536fe476c1068d86df01f8b130cde105 (diff)
frv: remove the second parameter of kmap_atomic_primary()
All callers of kmap_atomic_primary() use __KM_CACHE, so it can be removed safely, and __kmap_atomic_primary() only check if 'type' if __KM_CACHE or not, so 'type' can be changed to a boolean as well. Ditto for kunmap_atomic_primary()/__kunmap_atomic_primary(). Acked-by: Geert Uytterhoeven <geert@linux-m68k.org> Signed-off-by: Cong Wang <amwang@redhat.com>
Diffstat (limited to 'arch/frv/include')
-rw-r--r--arch/frv/include/asm/highmem.h34
1 files changed, 9 insertions, 25 deletions
diff --git a/arch/frv/include/asm/highmem.h b/arch/frv/include/asm/highmem.h
index 716956a5317b..b3adc93611f3 100644
--- a/arch/frv/include/asm/highmem.h
+++ b/arch/frv/include/asm/highmem.h
@@ -76,15 +76,16 @@ extern struct page *kmap_atomic_to_page(void *ptr);
76 76
77#ifndef __ASSEMBLY__ 77#ifndef __ASSEMBLY__
78 78
79#define __kmap_atomic_primary(type, paddr, ampr) \ 79#define __kmap_atomic_primary(cached, paddr, ampr) \
80({ \ 80({ \
81 unsigned long damlr, dampr; \ 81 unsigned long damlr, dampr; \
82 \ 82 \
83 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \ 83 dampr = paddr | xAMPRx_L | xAMPRx_M | xAMPRx_S | xAMPRx_SS_16Kb | xAMPRx_V; \
84 \ 84 \
85 if (type != __KM_CACHE) \ 85 if (!cached) \
86 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \ 86 asm volatile("movgs %0,dampr"#ampr :: "r"(dampr) : "memory"); \
87 else \ 87 else \
88 /* cache flush page attachment point */ \
88 asm volatile("movgs %0,iampr"#ampr"\n" \ 89 asm volatile("movgs %0,iampr"#ampr"\n" \
89 "movgs %0,dampr"#ampr"\n" \ 90 "movgs %0,dampr"#ampr"\n" \
90 :: "r"(dampr) : "memory" \ 91 :: "r"(dampr) : "memory" \
@@ -112,29 +113,20 @@ extern struct page *kmap_atomic_to_page(void *ptr);
112 (void *) damlr; \ 113 (void *) damlr; \
113}) 114})
114 115
115static inline void *kmap_atomic_primary(struct page *page, enum km_type type) 116static inline void *kmap_atomic_primary(struct page *page)
116{ 117{
117 unsigned long paddr; 118 unsigned long paddr;
118 119
119 pagefault_disable(); 120 pagefault_disable();
120 paddr = page_to_phys(page); 121 paddr = page_to_phys(page);
121 122
122 switch (type) { 123 return __kmap_atomic_primary(1, paddr, 2);
123 case 0: return __kmap_atomic_primary(0, paddr, 2);
124 case 1: return __kmap_atomic_primary(1, paddr, 3);
125 case 2: return __kmap_atomic_primary(2, paddr, 4);
126 case 3: return __kmap_atomic_primary(3, paddr, 5);
127
128 default:
129 BUG();
130 return NULL;
131 }
132} 124}
133 125
134#define __kunmap_atomic_primary(type, ampr) \ 126#define __kunmap_atomic_primary(cached, ampr) \
135do { \ 127do { \
136 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \ 128 asm volatile("movgs gr0,dampr"#ampr"\n" ::: "memory"); \
137 if (type == __KM_CACHE) \ 129 if (cached) \
138 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \ 130 asm volatile("movgs gr0,iampr"#ampr"\n" ::: "memory"); \
139} while(0) 131} while(0)
140 132
@@ -143,17 +135,9 @@ do { \
143 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \ 135 asm volatile("tlbpr %0,gr0,#4,#1" : : "r"(vaddr) : "memory"); \
144} while(0) 136} while(0)
145 137
146static inline void kunmap_atomic_primary(void *kvaddr, enum km_type type) 138static inline void kunmap_atomic_primary(void *kvaddr)
147{ 139{
148 switch (type) { 140 __kunmap_atomic_primary(1, 2);
149 case 0: __kunmap_atomic_primary(0, 2); break;
150 case 1: __kunmap_atomic_primary(1, 3); break;
151 case 2: __kunmap_atomic_primary(2, 4); break;
152 case 3: __kunmap_atomic_primary(3, 5); break;
153
154 default:
155 BUG();
156 }
157 pagefault_enable(); 141 pagefault_enable();
158} 142}
159 143