diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/i386/mm/ioremap.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/i386/mm/ioremap.c')
-rw-r--r-- | arch/i386/mm/ioremap.c | 320 |
1 files changed, 320 insertions, 0 deletions
diff --git a/arch/i386/mm/ioremap.c b/arch/i386/mm/ioremap.c new file mode 100644 index 000000000000..db06f7399913 --- /dev/null +++ b/arch/i386/mm/ioremap.c | |||
@@ -0,0 +1,320 @@ | |||
1 | /* | ||
2 | * arch/i386/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/fixmap.h> | ||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/tlbflush.h> | ||
18 | #include <asm/pgtable.h> | ||
19 | |||
20 | #define ISA_START_ADDRESS 0xa0000 | ||
21 | #define ISA_END_ADDRESS 0x100000 | ||
22 | |||
23 | static int ioremap_pte_range(pmd_t *pmd, unsigned long addr, | ||
24 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
25 | { | ||
26 | pte_t *pte; | ||
27 | unsigned long pfn; | ||
28 | |||
29 | pfn = phys_addr >> PAGE_SHIFT; | ||
30 | pte = pte_alloc_kernel(&init_mm, pmd, addr); | ||
31 | if (!pte) | ||
32 | return -ENOMEM; | ||
33 | do { | ||
34 | BUG_ON(!pte_none(*pte)); | ||
35 | set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
36 | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); | ||
37 | pfn++; | ||
38 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr, | ||
43 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
44 | { | ||
45 | pmd_t *pmd; | ||
46 | unsigned long next; | ||
47 | |||
48 | phys_addr -= addr; | ||
49 | pmd = pmd_alloc(&init_mm, pud, addr); | ||
50 | if (!pmd) | ||
51 | return -ENOMEM; | ||
52 | do { | ||
53 | next = pmd_addr_end(addr, end); | ||
54 | if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags)) | ||
55 | return -ENOMEM; | ||
56 | } while (pmd++, addr = next, addr != end); | ||
57 | return 0; | ||
58 | } | ||
59 | |||
60 | static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr, | ||
61 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
62 | { | ||
63 | pud_t *pud; | ||
64 | unsigned long next; | ||
65 | |||
66 | phys_addr -= addr; | ||
67 | pud = pud_alloc(&init_mm, pgd, addr); | ||
68 | if (!pud) | ||
69 | return -ENOMEM; | ||
70 | do { | ||
71 | next = pud_addr_end(addr, end); | ||
72 | if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags)) | ||
73 | return -ENOMEM; | ||
74 | } while (pud++, addr = next, addr != end); | ||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int ioremap_page_range(unsigned long addr, | ||
79 | unsigned long end, unsigned long phys_addr, unsigned long flags) | ||
80 | { | ||
81 | pgd_t *pgd; | ||
82 | unsigned long next; | ||
83 | int err; | ||
84 | |||
85 | BUG_ON(addr >= end); | ||
86 | flush_cache_all(); | ||
87 | phys_addr -= addr; | ||
88 | pgd = pgd_offset_k(addr); | ||
89 | spin_lock(&init_mm.page_table_lock); | ||
90 | do { | ||
91 | next = pgd_addr_end(addr, end); | ||
92 | err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags); | ||
93 | if (err) | ||
94 | break; | ||
95 | } while (pgd++, addr = next, addr != end); | ||
96 | spin_unlock(&init_mm.page_table_lock); | ||
97 | flush_tlb_all(); | ||
98 | return err; | ||
99 | } | ||
100 | |||
101 | /* | ||
102 | * Generic mapping function (not visible outside): | ||
103 | */ | ||
104 | |||
105 | /* | ||
106 | * Remap an arbitrary physical address space into the kernel virtual | ||
107 | * address space. Needed when the kernel wants to access high addresses | ||
108 | * directly. | ||
109 | * | ||
110 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
111 | * have to convert them into an offset in a page-aligned mapping, but the | ||
112 | * caller shouldn't need to know that small detail. | ||
113 | */ | ||
114 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
115 | { | ||
116 | void __iomem * addr; | ||
117 | struct vm_struct * area; | ||
118 | unsigned long offset, last_addr; | ||
119 | |||
120 | /* Don't allow wraparound or zero size */ | ||
121 | last_addr = phys_addr + size - 1; | ||
122 | if (!size || last_addr < phys_addr) | ||
123 | return NULL; | ||
124 | |||
125 | /* | ||
126 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
127 | */ | ||
128 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | ||
129 | return (void __iomem *) phys_to_virt(phys_addr); | ||
130 | |||
131 | /* | ||
132 | * Don't allow anybody to remap normal RAM that we're using.. | ||
133 | */ | ||
134 | if (phys_addr <= virt_to_phys(high_memory - 1)) { | ||
135 | char *t_addr, *t_end; | ||
136 | struct page *page; | ||
137 | |||
138 | t_addr = __va(phys_addr); | ||
139 | t_end = t_addr + (size - 1); | ||
140 | |||
141 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | ||
142 | if(!PageReserved(page)) | ||
143 | return NULL; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Mappings have to be page-aligned | ||
148 | */ | ||
149 | offset = phys_addr & ~PAGE_MASK; | ||
150 | phys_addr &= PAGE_MASK; | ||
151 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
152 | |||
153 | /* | ||
154 | * Ok, go for it.. | ||
155 | */ | ||
156 | area = get_vm_area(size, VM_IOREMAP | (flags << 20)); | ||
157 | if (!area) | ||
158 | return NULL; | ||
159 | area->phys_addr = phys_addr; | ||
160 | addr = (void __iomem *) area->addr; | ||
161 | if (ioremap_page_range((unsigned long) addr, | ||
162 | (unsigned long) addr + size, phys_addr, flags)) { | ||
163 | vunmap((void __force *) addr); | ||
164 | return NULL; | ||
165 | } | ||
166 | return (void __iomem *) (offset + (char __iomem *)addr); | ||
167 | } | ||
168 | |||
169 | |||
170 | /** | ||
171 | * ioremap_nocache - map bus memory into CPU space | ||
172 | * @offset: bus address of the memory | ||
173 | * @size: size of the resource to map | ||
174 | * | ||
175 | * ioremap_nocache performs a platform specific sequence of operations to | ||
176 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
177 | * writew/writel functions and the other mmio helpers. The returned | ||
178 | * address is not guaranteed to be usable directly as a virtual | ||
179 | * address. | ||
180 | * | ||
181 | * This version of ioremap ensures that the memory is marked uncachable | ||
182 | * on the CPU as well as honouring existing caching rules from things like | ||
183 | * the PCI bus. Note that there are other caches and buffers on many | ||
184 | * busses. In particular driver authors should read up on PCI writes | ||
185 | * | ||
186 | * It's useful if some control registers are in such an area and | ||
187 | * write combining or read caching is not desirable: | ||
188 | * | ||
189 | * Must be freed with iounmap. | ||
190 | */ | ||
191 | |||
192 | void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) | ||
193 | { | ||
194 | unsigned long last_addr; | ||
195 | void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD); | ||
196 | if (!p) | ||
197 | return p; | ||
198 | |||
199 | /* Guaranteed to be > phys_addr, as per __ioremap() */ | ||
200 | last_addr = phys_addr + size - 1; | ||
201 | |||
202 | if (last_addr < virt_to_phys(high_memory) - 1) { | ||
203 | struct page *ppage = virt_to_page(__va(phys_addr)); | ||
204 | unsigned long npages; | ||
205 | |||
206 | phys_addr &= PAGE_MASK; | ||
207 | |||
208 | /* This might overflow and become zero.. */ | ||
209 | last_addr = PAGE_ALIGN(last_addr); | ||
210 | |||
211 | /* .. but that's ok, because modulo-2**n arithmetic will make | ||
212 | * the page-aligned "last - first" come out right. | ||
213 | */ | ||
214 | npages = (last_addr - phys_addr) >> PAGE_SHIFT; | ||
215 | |||
216 | if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) { | ||
217 | iounmap(p); | ||
218 | p = NULL; | ||
219 | } | ||
220 | global_flush_tlb(); | ||
221 | } | ||
222 | |||
223 | return p; | ||
224 | } | ||
225 | |||
226 | void iounmap(volatile void __iomem *addr) | ||
227 | { | ||
228 | struct vm_struct *p; | ||
229 | if ((void __force *) addr <= high_memory) | ||
230 | return; | ||
231 | |||
232 | /* | ||
233 | * __ioremap special-cases the PCI/ISA range by not instantiating a | ||
234 | * vm_area and by simply returning an address into the kernel mapping | ||
235 | * of ISA space. So handle that here. | ||
236 | */ | ||
237 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | ||
238 | addr < phys_to_virt(ISA_END_ADDRESS)) | ||
239 | return; | ||
240 | |||
241 | p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr)); | ||
242 | if (!p) { | ||
243 | printk("__iounmap: bad address %p\n", addr); | ||
244 | return; | ||
245 | } | ||
246 | |||
247 | if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) { | ||
248 | /* p->size includes the guard page, but cpa doesn't like that */ | ||
249 | change_page_attr(virt_to_page(__va(p->phys_addr)), | ||
250 | p->size >> PAGE_SHIFT, | ||
251 | PAGE_KERNEL); | ||
252 | global_flush_tlb(); | ||
253 | } | ||
254 | kfree(p); | ||
255 | } | ||
256 | |||
257 | void __init *bt_ioremap(unsigned long phys_addr, unsigned long size) | ||
258 | { | ||
259 | unsigned long offset, last_addr; | ||
260 | unsigned int nrpages; | ||
261 | enum fixed_addresses idx; | ||
262 | |||
263 | /* Don't allow wraparound or zero size */ | ||
264 | last_addr = phys_addr + size - 1; | ||
265 | if (!size || last_addr < phys_addr) | ||
266 | return NULL; | ||
267 | |||
268 | /* | ||
269 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
270 | */ | ||
271 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | ||
272 | return phys_to_virt(phys_addr); | ||
273 | |||
274 | /* | ||
275 | * Mappings have to be page-aligned | ||
276 | */ | ||
277 | offset = phys_addr & ~PAGE_MASK; | ||
278 | phys_addr &= PAGE_MASK; | ||
279 | size = PAGE_ALIGN(last_addr) - phys_addr; | ||
280 | |||
281 | /* | ||
282 | * Mappings have to fit in the FIX_BTMAP area. | ||
283 | */ | ||
284 | nrpages = size >> PAGE_SHIFT; | ||
285 | if (nrpages > NR_FIX_BTMAPS) | ||
286 | return NULL; | ||
287 | |||
288 | /* | ||
289 | * Ok, go for it.. | ||
290 | */ | ||
291 | idx = FIX_BTMAP_BEGIN; | ||
292 | while (nrpages > 0) { | ||
293 | set_fixmap(idx, phys_addr); | ||
294 | phys_addr += PAGE_SIZE; | ||
295 | --idx; | ||
296 | --nrpages; | ||
297 | } | ||
298 | return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN)); | ||
299 | } | ||
300 | |||
301 | void __init bt_iounmap(void *addr, unsigned long size) | ||
302 | { | ||
303 | unsigned long virt_addr; | ||
304 | unsigned long offset; | ||
305 | unsigned int nrpages; | ||
306 | enum fixed_addresses idx; | ||
307 | |||
308 | virt_addr = (unsigned long)addr; | ||
309 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) | ||
310 | return; | ||
311 | offset = virt_addr & ~PAGE_MASK; | ||
312 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; | ||
313 | |||
314 | idx = FIX_BTMAP_BEGIN; | ||
315 | while (nrpages > 0) { | ||
316 | clear_fixmap(idx); | ||
317 | --idx; | ||
318 | --nrpages; | ||
319 | } | ||
320 | } | ||