aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:06 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:06 -0500
commitd806e5ee20f62a892b09aa59559f143d465285db (patch)
treeda18296bcd680e97e4b20627c90ef0d9ad3d9309 /arch/x86
parent5f8681529cb243b3a492e55f2da9d632ad0d5e32 (diff)
x86: cpa: convert ioremap to new API
Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/ioremap.c57
1 files changed, 35 insertions, 22 deletions
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index 8777bb7688f4..b86f66fa5185 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -19,6 +19,11 @@
19#include <asm/pgtable.h> 19#include <asm/pgtable.h>
20#include <asm/tlbflush.h> 20#include <asm/tlbflush.h>
21 21
22enum ioremap_mode {
23 IOR_MODE_UNCACHED,
24 IOR_MODE_CACHED,
25};
26
22#ifdef CONFIG_X86_64 27#ifdef CONFIG_X86_64
23 28
24unsigned long __phys_addr(unsigned long x) 29unsigned long __phys_addr(unsigned long x)
@@ -64,19 +69,17 @@ int page_is_ram(unsigned long pagenr)
64 * Fix up the linear direct mapping of the kernel to avoid cache attribute 69 * Fix up the linear direct mapping of the kernel to avoid cache attribute
65 * conflicts. 70 * conflicts.
66 */ 71 */
67static int ioremap_change_attr(unsigned long phys_addr, unsigned long size, 72static int ioremap_change_attr(unsigned long paddr, unsigned long size,
68 pgprot_t prot) 73 enum ioremap_mode mode)
69{ 74{
70 unsigned long npages, vaddr, last_addr = phys_addr + size - 1; 75 unsigned long vaddr = (unsigned long)__va(paddr);
76 unsigned long nrpages = size >> PAGE_SHIFT;
71 int err, level; 77 int err, level;
72 78
73 /* No change for pages after the last mapping */ 79 /* No change for pages after the last mapping */
74 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT)) 80 if ((paddr + size - 1) >= (max_pfn_mapped << PAGE_SHIFT))
75 return 0; 81 return 0;
76 82
77 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 vaddr = (unsigned long) __va(phys_addr);
79
80 /* 83 /*
81 * If there is no identity map for this address, 84 * If there is no identity map for this address,
82 * change_page_attr_addr is unnecessary 85 * change_page_attr_addr is unnecessary
@@ -84,13 +87,15 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
84 if (!lookup_address(vaddr, &level)) 87 if (!lookup_address(vaddr, &level))
85 return 0; 88 return 0;
86 89
87 /* 90 switch (mode) {
88 * Must use an address here and not struct page because the 91 case IOR_MODE_UNCACHED:
89 * phys addr can be a in hole between nodes and not have a 92 default:
90 * memmap entry. 93 err = set_memory_uc(vaddr, nrpages);
91 */ 94 break;
92 err = change_page_attr_addr(vaddr, npages, prot); 95 case IOR_MODE_CACHED:
93 96 err = set_memory_wb(vaddr, nrpages);
97 break;
98 }
94 if (!err) 99 if (!err)
95 global_flush_tlb(); 100 global_flush_tlb();
96 101
@@ -107,12 +112,12 @@ static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
107 * caller shouldn't need to know that small detail. 112 * caller shouldn't need to know that small detail.
108 */ 113 */
109static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size, 114static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
110 unsigned long flags) 115 enum ioremap_mode mode)
111{ 116{
112 void __iomem *addr; 117 void __iomem *addr;
113 struct vm_struct *area; 118 struct vm_struct *area;
114 unsigned long offset, last_addr; 119 unsigned long offset, last_addr;
115 pgprot_t pgprot; 120 pgprot_t prot;
116 121
117 /* Don't allow wraparound or zero size */ 122 /* Don't allow wraparound or zero size */
118 last_addr = phys_addr + size - 1; 123 last_addr = phys_addr + size - 1;
@@ -134,7 +139,15 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
134 return NULL; 139 return NULL;
135 } 140 }
136 141
137 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags); 142 switch (mode) {
143 case IOR_MODE_UNCACHED:
144 default:
145 prot = PAGE_KERNEL_NOCACHE;
146 break;
147 case IOR_MODE_CACHED:
148 prot = PAGE_KERNEL;
149 break;
150 }
138 151
139 /* 152 /*
140 * Mappings have to be page-aligned 153 * Mappings have to be page-aligned
@@ -152,12 +165,12 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
152 area->phys_addr = phys_addr; 165 area->phys_addr = phys_addr;
153 addr = (void __iomem *) area->addr; 166 addr = (void __iomem *) area->addr;
154 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, 167 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
155 phys_addr, pgprot)) { 168 phys_addr, prot)) {
156 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); 169 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
157 return NULL; 170 return NULL;
158 } 171 }
159 172
160 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) { 173 if (ioremap_change_attr(phys_addr, size, mode) < 0) {
161 vunmap(addr); 174 vunmap(addr);
162 return NULL; 175 return NULL;
163 } 176 }
@@ -188,13 +201,13 @@ static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
188 */ 201 */
189void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size) 202void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
190{ 203{
191 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); 204 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
192} 205}
193EXPORT_SYMBOL(ioremap_nocache); 206EXPORT_SYMBOL(ioremap_nocache);
194 207
195void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size) 208void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
196{ 209{
197 return __ioremap(phys_addr, size, 0); 210 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
198} 211}
199EXPORT_SYMBOL(ioremap_cache); 212EXPORT_SYMBOL(ioremap_cache);
200 213
@@ -242,7 +255,7 @@ void iounmap(volatile void __iomem *addr)
242 } 255 }
243 256
244 /* Reset the direct mapping. Can block */ 257 /* Reset the direct mapping. Can block */
245 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL); 258 ioremap_change_attr(p->phys_addr, p->size, IOR_MODE_CACHED);
246 259
247 /* Finally remove it */ 260 /* Finally remove it */
248 o = remove_vm_area((void *)addr); 261 o = remove_vm_area((void *)addr);