aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2008-01-30 07:34:05 -0500
committerIngo Molnar <mingo@elte.hu>2008-01-30 07:34:05 -0500
commit91eebf40b3cb5abd76e813e17dbc320ff2ea3295 (patch)
treea94d05d8d87a6b03ad26d09a8ae08087d90dfee6 /arch/x86
parent1aaf74e919be54be0023c3124923fb537c7fb772 (diff)
x86: style cleanup of ioremap code
Fix the coding style before going further. Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/mm/ioremap_32.c97
-rw-r--r--arch/x86/mm/ioremap_64.c48
2 files changed, 70 insertions, 75 deletions
diff --git a/arch/x86/mm/ioremap_32.c b/arch/x86/mm/ioremap_32.c
index 18757f058bda..30ff2586db1e 100644
--- a/arch/x86/mm/ioremap_32.c
+++ b/arch/x86/mm/ioremap_32.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it. 2 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the 3 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's 4 * 640k-1MB IO memory area on PC's
@@ -22,10 +20,6 @@
22#define ISA_END_ADDRESS 0x100000 20#define ISA_END_ADDRESS 0x100000
23 21
24/* 22/*
25 * Generic mapping function (not visible outside):
26 */
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual 23 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses 24 * address space. Needed when the kernel wants to access high addresses
31 * directly. 25 * directly.
@@ -34,10 +28,11 @@
34 * have to convert them into an offset in a page-aligned mapping, but the 28 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail. 29 * caller shouldn't need to know that small detail.
36 */ 30 */
37void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 31void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
32 unsigned long flags)
38{ 33{
39 void __iomem * addr; 34 void __iomem *addr;
40 struct vm_struct * area; 35 struct vm_struct *area;
41 unsigned long offset, last_addr; 36 unsigned long offset, last_addr;
42 pgprot_t prot; 37 pgprot_t prot;
43 38
@@ -61,9 +56,10 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
61 56
62 t_addr = __va(phys_addr); 57 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1); 58 t_end = t_addr + (size - 1);
64 59
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) 60 for (page = virt_to_page(t_addr);
66 if(!PageReserved(page)) 61 page <= virt_to_page(t_end); page++)
62 if (!PageReserved(page))
67 return NULL; 63 return NULL;
68 } 64 }
69 65
@@ -85,7 +81,7 @@ void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned l
85 area->phys_addr = phys_addr; 81 area->phys_addr = phys_addr;
86 addr = (void __iomem *) area->addr; 82 addr = (void __iomem *) area->addr;
87 if (ioremap_page_range((unsigned long) addr, 83 if (ioremap_page_range((unsigned long) addr,
88 (unsigned long) addr + size, phys_addr, prot)) { 84 (unsigned long) addr + size, phys_addr, prot)) {
89 vunmap((void __force *) addr); 85 vunmap((void __force *) addr);
90 return NULL; 86 return NULL;
91 } 87 }
@@ -102,31 +98,31 @@ EXPORT_SYMBOL(__ioremap);
102 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 98 * make bus memory CPU accessible via the readb/readw/readl/writeb/
103 * writew/writel functions and the other mmio helpers. The returned 99 * writew/writel functions and the other mmio helpers. The returned
104 * address is not guaranteed to be usable directly as a virtual 100 * address is not guaranteed to be usable directly as a virtual
105 * address. 101 * address.
106 * 102 *
107 * This version of ioremap ensures that the memory is marked uncachable 103 * This version of ioremap ensures that the memory is marked uncachable
108 * on the CPU as well as honouring existing caching rules from things like 104 * on the CPU as well as honouring existing caching rules from things like
109 * the PCI bus. Note that there are other caches and buffers on many 105 * the PCI bus. Note that there are other caches and buffers on many
110 * busses. In particular driver authors should read up on PCI writes 106 * busses. In particular driver authors should read up on PCI writes
111 * 107 *
112 * It's useful if some control registers are in such an area and 108 * It's useful if some control registers are in such an area and
113 * write combining or read caching is not desirable: 109 * write combining or read caching is not desirable:
114 * 110 *
115 * Must be freed with iounmap. 111 * Must be freed with iounmap.
116 */ 112 */
117 113void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
118void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
119{ 114{
120 unsigned long last_addr; 115 unsigned long last_addr;
121 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); 116 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
122 if (!p) 117
123 return p; 118 if (!p)
119 return p;
124 120
125 /* Guaranteed to be > phys_addr, as per __ioremap() */ 121 /* Guaranteed to be > phys_addr, as per __ioremap() */
126 last_addr = phys_addr + size - 1; 122 last_addr = phys_addr + size - 1;
127 123
128 if (last_addr < virt_to_phys(high_memory) - 1) { 124 if (last_addr < virt_to_phys(high_memory) - 1) {
129 struct page *ppage = virt_to_page(__va(phys_addr)); 125 struct page *ppage = virt_to_page(__va(phys_addr));
130 unsigned long npages; 126 unsigned long npages;
131 127
132 phys_addr &= PAGE_MASK; 128 phys_addr &= PAGE_MASK;
@@ -135,18 +131,18 @@ void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
135 last_addr = PAGE_ALIGN(last_addr); 131 last_addr = PAGE_ALIGN(last_addr);
136 132
137 /* .. but that's ok, because modulo-2**n arithmetic will make 133 /* .. but that's ok, because modulo-2**n arithmetic will make
138 * the page-aligned "last - first" come out right. 134 * the page-aligned "last - first" come out right.
139 */ 135 */
140 npages = (last_addr - phys_addr) >> PAGE_SHIFT; 136 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
141 137
142 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { 138 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
143 iounmap(p); 139 iounmap(p);
144 p = NULL; 140 p = NULL;
145 } 141 }
146 global_flush_tlb(); 142 global_flush_tlb();
147 } 143 }
148 144
149 return p; 145 return p;
150} 146}
151EXPORT_SYMBOL(ioremap_nocache); 147EXPORT_SYMBOL(ioremap_nocache);
152 148
@@ -169,10 +165,11 @@ void iounmap(volatile void __iomem *addr)
169 * of ISA space. So handle that here. 165 * of ISA space. So handle that here.
170 */ 166 */
171 if (addr >= phys_to_virt(ISA_START_ADDRESS) && 167 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
172 addr < phys_to_virt(ISA_END_ADDRESS)) 168 addr < phys_to_virt(ISA_END_ADDRESS))
173 return; 169 return;
174 170
175 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); 171 addr = (volatile void __iomem *)
172 (PAGE_MASK & (unsigned long __force)addr);
176 173
177 /* Use the vm area unlocked, assuming the caller 174 /* Use the vm area unlocked, assuming the caller
178 ensures there isn't another iounmap for the same address 175 ensures there isn't another iounmap for the same address
@@ -187,7 +184,7 @@ void iounmap(volatile void __iomem *addr)
187 read_unlock(&vmlist_lock); 184 read_unlock(&vmlist_lock);
188 185
189 if (!p) { 186 if (!p) {
190 printk("iounmap: bad address %p\n", addr); 187 printk(KERN_ERR "iounmap: bad address %p\n", addr);
191 dump_stack(); 188 dump_stack();
192 return; 189 return;
193 } 190 }
@@ -198,12 +195,12 @@ void iounmap(volatile void __iomem *addr)
198 get_vm_area_size(p) >> PAGE_SHIFT, 195 get_vm_area_size(p) >> PAGE_SHIFT,
199 PAGE_KERNEL); 196 PAGE_KERNEL);
200 global_flush_tlb(); 197 global_flush_tlb();
201 } 198 }
202 199
203 /* Finally remove it */ 200 /* Finally remove it */
204 o = remove_vm_area((void *)addr); 201 o = remove_vm_area((void *)addr);
205 BUG_ON(p != o || o == NULL); 202 BUG_ON(p != o || o == NULL);
206 kfree(p); 203 kfree(p);
207} 204}
208EXPORT_SYMBOL(iounmap); 205EXPORT_SYMBOL(iounmap);
209 206
@@ -237,7 +234,7 @@ void __init early_ioremap_init(void)
237 unsigned long *pgd; 234 unsigned long *pgd;
238 235
239 if (early_ioremap_debug) 236 if (early_ioremap_debug)
240 printk("early_ioremap_init()\n"); 237 printk(KERN_DEBUG "early_ioremap_init()\n");
241 238
242 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 239 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
243 *pgd = __pa(bm_pte) | _PAGE_TABLE; 240 *pgd = __pa(bm_pte) | _PAGE_TABLE;
@@ -248,15 +245,16 @@ void __init early_ioremap_init(void)
248 */ 245 */
249 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) { 246 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
250 WARN_ON(1); 247 WARN_ON(1);
251 printk("pgd %p != %p\n", 248 printk(KERN_WARNING "pgd %p != %p\n",
252 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))); 249 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
253 printk("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", 250 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
254 fix_to_virt(FIX_BTMAP_BEGIN)); 251 fix_to_virt(FIX_BTMAP_BEGIN));
255 printk("fix_to_virt(FIX_BTMAP_END): %08lx\n", 252 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
256 fix_to_virt(FIX_BTMAP_END)); 253 fix_to_virt(FIX_BTMAP_END));
257 254
258 printk("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); 255 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
259 printk("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); 256 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
257 FIX_BTMAP_BEGIN);
260 } 258 }
261} 259}
262 260
@@ -265,7 +263,7 @@ void __init early_ioremap_clear(void)
265 unsigned long *pgd; 263 unsigned long *pgd;
266 264
267 if (early_ioremap_debug) 265 if (early_ioremap_debug)
268 printk("early_ioremap_clear()\n"); 266 printk(KERN_DEBUG "early_ioremap_clear()\n");
269 267
270 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN)); 268 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
271 *pgd = 0; 269 *pgd = 0;
@@ -331,10 +329,10 @@ static int __init check_early_ioremap_leak(void)
331 return 0; 329 return 0;
332 330
333 printk(KERN_WARNING 331 printk(KERN_WARNING
334 "Debug warning: early ioremap leak of %d areas detected.\n", 332 "Debug warning: early ioremap leak of %d areas detected.\n",
335 early_ioremap_nested); 333 early_ioremap_nested);
336 printk(KERN_WARNING 334 printk(KERN_WARNING
337 "please boot with early_ioremap_debug and report the dmesg.\n"); 335 "please boot with early_ioremap_debug and report the dmesg.\n");
338 WARN_ON(1); 336 WARN_ON(1);
339 337
340 return 1; 338 return 1;
@@ -351,8 +349,8 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
351 349
352 nesting = early_ioremap_nested; 350 nesting = early_ioremap_nested;
353 if (early_ioremap_debug) { 351 if (early_ioremap_debug) {
354 printk("early_ioremap(%08lx, %08lx) [%d] => ", 352 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
355 phys_addr, size, nesting); 353 phys_addr, size, nesting);
356 dump_stack(); 354 dump_stack();
357 } 355 }
358 356
@@ -398,7 +396,7 @@ void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
398 if (early_ioremap_debug) 396 if (early_ioremap_debug)
399 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0)); 397 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
400 398
401 return (void*) (offset + fix_to_virt(idx0)); 399 return (void *) (offset + fix_to_virt(idx0));
402} 400}
403 401
404void __init early_iounmap(void *addr, unsigned long size) 402void __init early_iounmap(void *addr, unsigned long size)
@@ -413,7 +411,8 @@ void __init early_iounmap(void *addr, unsigned long size)
413 WARN_ON(nesting < 0); 411 WARN_ON(nesting < 0);
414 412
415 if (early_ioremap_debug) { 413 if (early_ioremap_debug) {
416 printk("early_iounmap(%p, %08lx) [%d]\n", addr, size, nesting); 414 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
415 size, nesting);
417 dump_stack(); 416 dump_stack();
418 } 417 }
419 418
diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c
index a37556124c86..bd962cc636c5 100644
--- a/arch/x86/mm/ioremap_64.c
+++ b/arch/x86/mm/ioremap_64.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * arch/x86_64/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it. 2 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the 3 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's 4 * 640k-1MB IO memory area on PC's
@@ -33,9 +31,8 @@ EXPORT_SYMBOL(__phys_addr);
33 * Fix up the linear direct mapping of the kernel to avoid cache attribute 31 * Fix up the linear direct mapping of the kernel to avoid cache attribute
34 * conflicts. 32 * conflicts.
35 */ 33 */
36static int 34static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
37ioremap_change_attr(unsigned long phys_addr, unsigned long size, 35 unsigned long flags)
38 unsigned long flags)
39{ 36{
40 int err = 0; 37 int err = 0;
41 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { 38 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
@@ -50,10 +47,12 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
50 if (!lookup_address(vaddr, &level)) 47 if (!lookup_address(vaddr, &level))
51 return err; 48 return err;
52 /* 49 /*
53 * Must use a address here and not struct page because the phys addr 50 * Must use a address here and not struct page because
54 * can be a in hole between nodes and not have an memmap entry. 51 * the phys addr can be a in hole between nodes and
52 * not have an memmap entry.
55 */ 53 */
56 err = change_page_attr_addr(vaddr,npages,MAKE_GLOBAL(__PAGE_KERNEL|flags)); 54 err = change_page_attr_addr(vaddr,npages,
55 MAKE_GLOBAL(__PAGE_KERNEL|flags));
57 if (!err) 56 if (!err)
58 global_flush_tlb(); 57 global_flush_tlb();
59 } 58 }
@@ -61,10 +60,6 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
61} 60}
62 61
63/* 62/*
64 * Generic mapping function
65 */
66
67/*
68 * Remap an arbitrary physical address space into the kernel virtual 63 * Remap an arbitrary physical address space into the kernel virtual
69 * address space. Needed when the kernel wants to access high addresses 64 * address space. Needed when the kernel wants to access high addresses
70 * directly. 65 * directly.
@@ -73,10 +68,11 @@ ioremap_change_attr(unsigned long phys_addr, unsigned long size,
73 * have to convert them into an offset in a page-aligned mapping, but the 68 * have to convert them into an offset in a page-aligned mapping, but the
74 * caller shouldn't need to know that small detail. 69 * caller shouldn't need to know that small detail.
75 */ 70 */
76void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) 71void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
72 unsigned long flags)
77{ 73{
78 void * addr; 74 void *addr;
79 struct vm_struct * area; 75 struct vm_struct *area;
80 unsigned long offset, last_addr; 76 unsigned long offset, last_addr;
81 pgprot_t pgprot; 77 pgprot_t pgprot;
82 78
@@ -130,20 +126,19 @@ EXPORT_SYMBOL(__ioremap);
130 * make bus memory CPU accessible via the readb/readw/readl/writeb/ 126 * make bus memory CPU accessible via the readb/readw/readl/writeb/
131 * writew/writel functions and the other mmio helpers. The returned 127 * writew/writel functions and the other mmio helpers. The returned
132 * address is not guaranteed to be usable directly as a virtual 128 * address is not guaranteed to be usable directly as a virtual
133 * address. 129 * address.
134 * 130 *
135 * This version of ioremap ensures that the memory is marked uncachable 131 * This version of ioremap ensures that the memory is marked uncachable
136 * on the CPU as well as honouring existing caching rules from things like 132 * on the CPU as well as honouring existing caching rules from things like
137 * the PCI bus. Note that there are other caches and buffers on many 133 * the PCI bus. Note that there are other caches and buffers on many
138 * busses. In particular driver authors should read up on PCI writes 134 * busses. In particular driver authors should read up on PCI writes
139 * 135 *
140 * It's useful if some control registers are in such an area and 136 * It's useful if some control registers are in such an area and
141 * write combining or read caching is not desirable: 137 * write combining or read caching is not desirable:
142 * 138 *
143 * Must be freed with iounmap. 139 * Must be freed with iounmap.
144 */ 140 */
145 141void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
146void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
147{ 142{
148 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT); 143 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
149} 144}
@@ -159,13 +154,14 @@ void iounmap(volatile void __iomem *addr)
159{ 154{
160 struct vm_struct *p, *o; 155 struct vm_struct *p, *o;
161 156
162 if (addr <= high_memory) 157 if (addr <= high_memory)
163 return; 158 return;
164 if (addr >= phys_to_virt(ISA_START_ADDRESS) && 159 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
165 addr < phys_to_virt(ISA_END_ADDRESS)) 160 addr < phys_to_virt(ISA_END_ADDRESS))
166 return; 161 return;
167 162
168 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); 163 addr = (volatile void __iomem *)
164 (PAGE_MASK & (unsigned long __force)addr);
169 /* Use the vm area unlocked, assuming the caller 165 /* Use the vm area unlocked, assuming the caller
170 ensures there isn't another iounmap for the same address 166 ensures there isn't another iounmap for the same address
171 in parallel. Reuse of the virtual address is prevented by 167 in parallel. Reuse of the virtual address is prevented by
@@ -179,7 +175,7 @@ void iounmap(volatile void __iomem *addr)
179 read_unlock(&vmlist_lock); 175 read_unlock(&vmlist_lock);
180 176
181 if (!p) { 177 if (!p) {
182 printk("iounmap: bad address %p\n", addr); 178 printk(KERN_ERR "iounmap: bad address %p\n", addr);
183 dump_stack(); 179 dump_stack();
184 return; 180 return;
185 } 181 }
@@ -191,7 +187,7 @@ void iounmap(volatile void __iomem *addr)
191 /* Finally remove it */ 187 /* Finally remove it */
192 o = remove_vm_area((void *)addr); 188 o = remove_vm_area((void *)addr);
193 BUG_ON(p != o || o == NULL); 189 BUG_ON(p != o || o == NULL);
194 kfree(p); 190 kfree(p);
195} 191}
196EXPORT_SYMBOL(iounmap); 192EXPORT_SYMBOL(iounmap);
197 193