aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorvenkatesh.pallipadi@intel.com <venkatesh.pallipadi@intel.com>2008-03-18 20:00:16 -0400
committerIngo Molnar <mingo@elte.hu>2008-04-17 11:41:19 -0400
commit3a96ce8cac808fbed5493adc5c605bced28e2ca1 (patch)
treea6ab4f019e6cd228823fd28be3b75a0a8bf84690 /arch
parent55c626820a82b25d7fceca702e9422037ae80626 (diff)
x86: PAT make ioremap_change_attr non-static
Make ioremap_change_attr() non-static and use prot_val in place of ioremap_mode. This interface is used in subsequent PAT patches. Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com> Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/mm/ioremap.c29
1 files changed, 12 insertions, 17 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index df95d1d6b4df..2ac09a5822cb 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -20,11 +20,6 @@
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21#include <asm/pgalloc.h> 21#include <asm/pgalloc.h>
22 22
23enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
28#ifdef CONFIG_X86_64 23#ifdef CONFIG_X86_64
29 24
30unsigned long __phys_addr(unsigned long x) 25unsigned long __phys_addr(unsigned long x)
@@ -90,18 +85,18 @@ int page_is_ram(unsigned long pagenr)
90 * Fix up the linear direct mapping of the kernel to avoid cache attribute 85 * Fix up the linear direct mapping of the kernel to avoid cache attribute
91 * conflicts. 86 * conflicts.
92 */ 87 */
93static int ioremap_change_attr(unsigned long vaddr, unsigned long size, 88int ioremap_change_attr(unsigned long vaddr, unsigned long size,
94 enum ioremap_mode mode) 89 unsigned long prot_val)
95{ 90{
96 unsigned long nrpages = size >> PAGE_SHIFT; 91 unsigned long nrpages = size >> PAGE_SHIFT;
97 int err; 92 int err;
98 93
99 switch (mode) { 94 switch (prot_val) {
100 case IOR_MODE_UNCACHED: 95 case _PAGE_CACHE_UC:
101 default: 96 default:
102 err = set_memory_uc(vaddr, nrpages); 97 err = set_memory_uc(vaddr, nrpages);
103 break; 98 break;
104 case IOR_MODE_CACHED: 99 case _PAGE_CACHE_WB:
105 err = set_memory_wb(vaddr, nrpages); 100 err = set_memory_wb(vaddr, nrpages);
106 break; 101 break;
107 } 102 }
@@ -119,7 +114,7 @@ static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
119 * caller shouldn't need to know that small detail. 114 * caller shouldn't need to know that small detail.
120 */ 115 */
121static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size, 116static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
122 enum ioremap_mode mode) 117 unsigned long prot_val)
123{ 118{
124 unsigned long pfn, offset, last_addr, vaddr; 119 unsigned long pfn, offset, last_addr, vaddr;
125 struct vm_struct *area; 120 struct vm_struct *area;
@@ -156,12 +151,12 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
156 WARN_ON_ONCE(is_ram); 151 WARN_ON_ONCE(is_ram);
157 } 152 }
158 153
159 switch (mode) { 154 switch (prot_val) {
160 case IOR_MODE_UNCACHED: 155 case _PAGE_CACHE_UC:
161 default: 156 default:
162 prot = PAGE_KERNEL_NOCACHE; 157 prot = PAGE_KERNEL_NOCACHE;
163 break; 158 break;
164 case IOR_MODE_CACHED: 159 case _PAGE_CACHE_WB:
165 prot = PAGE_KERNEL; 160 prot = PAGE_KERNEL;
166 break; 161 break;
167 } 162 }
@@ -186,7 +181,7 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
186 return NULL; 181 return NULL;
187 } 182 }
188 183
189 if (ioremap_change_attr(vaddr, size, mode) < 0) { 184 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
190 vunmap(area->addr); 185 vunmap(area->addr);
191 return NULL; 186 return NULL;
192 } 187 }
@@ -217,13 +212,13 @@ static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
217 */ 212 */
218void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) 213void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
219{ 214{
220 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED); 215 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
221} 216}
222EXPORT_SYMBOL(ioremap_nocache); 217EXPORT_SYMBOL(ioremap_nocache);
223 218
224void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) 219void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
225{ 220{
226 return __ioremap(phys_addr, size, IOR_MODE_CACHED); 221 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
227} 222}
228EXPORT_SYMBOL(ioremap_cache); 223EXPORT_SYMBOL(ioremap_cache);
229 224