diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/x86_64/mm/ioremap.c |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'arch/x86_64/mm/ioremap.c')
-rw-r--r-- | arch/x86_64/mm/ioremap.c | 283 |
1 files changed, 283 insertions, 0 deletions
diff --git a/arch/x86_64/mm/ioremap.c b/arch/x86_64/mm/ioremap.c new file mode 100644 index 000000000000..74ec8554b195 --- /dev/null +++ b/arch/x86_64/mm/ioremap.c | |||
@@ -0,0 +1,283 @@ | |||
1 | /* | ||
2 | * arch/x86_64/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <asm/io.h> | ||
15 | #include <asm/pgalloc.h> | ||
16 | #include <asm/fixmap.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/tlbflush.h> | ||
19 | #include <asm/proto.h> | ||
20 | |||
21 | #define ISA_START_ADDRESS 0xa0000 | ||
22 | #define ISA_END_ADDRESS 0x100000 | ||
23 | |||
24 | static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size, | ||
25 | unsigned long phys_addr, unsigned long flags) | ||
26 | { | ||
27 | unsigned long end; | ||
28 | unsigned long pfn; | ||
29 | |||
30 | address &= ~PMD_MASK; | ||
31 | end = address + size; | ||
32 | if (end > PMD_SIZE) | ||
33 | end = PMD_SIZE; | ||
34 | if (address >= end) | ||
35 | BUG(); | ||
36 | pfn = phys_addr >> PAGE_SHIFT; | ||
37 | do { | ||
38 | if (!pte_none(*pte)) { | ||
39 | printk("remap_area_pte: page already exists\n"); | ||
40 | BUG(); | ||
41 | } | ||
42 | set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW | | ||
43 | _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags))); | ||
44 | address += PAGE_SIZE; | ||
45 | pfn++; | ||
46 | pte++; | ||
47 | } while (address && (address < end)); | ||
48 | } | ||
49 | |||
50 | static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size, | ||
51 | unsigned long phys_addr, unsigned long flags) | ||
52 | { | ||
53 | unsigned long end; | ||
54 | |||
55 | address &= ~PUD_MASK; | ||
56 | end = address + size; | ||
57 | if (end > PUD_SIZE) | ||
58 | end = PUD_SIZE; | ||
59 | phys_addr -= address; | ||
60 | if (address >= end) | ||
61 | BUG(); | ||
62 | do { | ||
63 | pte_t * pte = pte_alloc_kernel(&init_mm, pmd, address); | ||
64 | if (!pte) | ||
65 | return -ENOMEM; | ||
66 | remap_area_pte(pte, address, end - address, address + phys_addr, flags); | ||
67 | address = (address + PMD_SIZE) & PMD_MASK; | ||
68 | pmd++; | ||
69 | } while (address && (address < end)); | ||
70 | return 0; | ||
71 | } | ||
72 | |||
73 | static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size, | ||
74 | unsigned long phys_addr, unsigned long flags) | ||
75 | { | ||
76 | unsigned long end; | ||
77 | |||
78 | address &= ~PGDIR_MASK; | ||
79 | end = address + size; | ||
80 | if (end > PGDIR_SIZE) | ||
81 | end = PGDIR_SIZE; | ||
82 | phys_addr -= address; | ||
83 | if (address >= end) | ||
84 | BUG(); | ||
85 | do { | ||
86 | pmd_t * pmd = pmd_alloc(&init_mm, pud, address); | ||
87 | if (!pmd) | ||
88 | return -ENOMEM; | ||
89 | remap_area_pmd(pmd, address, end - address, address + phys_addr, flags); | ||
90 | address = (address + PUD_SIZE) & PUD_MASK; | ||
91 | pud++; | ||
92 | } while (address && (address < end)); | ||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, | ||
97 | unsigned long size, unsigned long flags) | ||
98 | { | ||
99 | int error; | ||
100 | pgd_t *pgd; | ||
101 | unsigned long end = address + size; | ||
102 | |||
103 | phys_addr -= address; | ||
104 | pgd = pgd_offset_k(address); | ||
105 | flush_cache_all(); | ||
106 | if (address >= end) | ||
107 | BUG(); | ||
108 | spin_lock(&init_mm.page_table_lock); | ||
109 | do { | ||
110 | pud_t *pud; | ||
111 | pud = pud_alloc(&init_mm, pgd, address); | ||
112 | error = -ENOMEM; | ||
113 | if (!pud) | ||
114 | break; | ||
115 | if (remap_area_pud(pud, address, end - address, | ||
116 | phys_addr + address, flags)) | ||
117 | break; | ||
118 | error = 0; | ||
119 | address = (address + PGDIR_SIZE) & PGDIR_MASK; | ||
120 | pgd++; | ||
121 | } while (address && (address < end)); | ||
122 | spin_unlock(&init_mm.page_table_lock); | ||
123 | flush_tlb_all(); | ||
124 | return error; | ||
125 | } | ||
126 | |||
127 | /* | ||
128 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | ||
129 | * conflicts. | ||
130 | */ | ||
131 | static int | ||
132 | ioremap_change_attr(unsigned long phys_addr, unsigned long size, | ||
133 | unsigned long flags) | ||
134 | { | ||
135 | int err = 0; | ||
136 | if (flags && phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { | ||
137 | unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
138 | unsigned long vaddr = (unsigned long) __va(phys_addr); | ||
139 | |||
140 | /* | ||
141 | * Must use a address here and not struct page because the phys addr | ||
142 | * can be a in hole between nodes and not have an memmap entry. | ||
143 | */ | ||
144 | err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags)); | ||
145 | if (!err) | ||
146 | global_flush_tlb(); | ||
147 | } | ||
148 | return err; | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * Generic mapping function | ||
153 | */ | ||
154 | |||
155 | /* | ||
156 | * Remap an arbitrary physical address space into the kernel virtual | ||
157 | * address space. Needed when the kernel wants to access high addresses | ||
158 | * directly. | ||
159 | * | ||
160 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
161 | * have to convert them into an offset in a page-aligned mapping, but the | ||
162 | * caller shouldn't need to know that small detail. | ||
163 | */ | ||
164 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
165 | { | ||
166 | void * addr; | ||
167 | struct vm_struct * area; | ||
168 | unsigned long offset, last_addr; | ||
169 | |||
170 | /* Don't allow wraparound or zero size */ | ||
171 | last_addr = phys_addr + size - 1; | ||
172 | if (!size || last_addr < phys_addr) | ||
173 | return NULL; | ||
174 | |||
175 | /* | ||
176 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
177 | */ | ||
178 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | ||
179 | return (__force void __iomem *)phys_to_virt(phys_addr); | ||
180 | |||
181 | #ifndef CONFIG_DISCONTIGMEM | ||
182 | /* | ||
183 | * Don't allow anybody to remap normal RAM that we're using.. | ||
184 | */ | ||
185 | if (last_addr < virt_to_phys(high_memory)) { | ||
186 | char *t_addr, *t_end; | ||
187 | struct page *page; | ||
188 | |||
189 | t_addr = __va(phys_addr); | ||
190 | t_end = t_addr + (size - 1); | ||
191 | |||
192 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | ||
193 | if(!PageReserved(page)) | ||
194 | return NULL; | ||
195 | } | ||
196 | #endif | ||
197 | |||
198 | /* | ||
199 | * Mappings have to be page-aligned | ||
200 | */ | ||
201 | offset = phys_addr & ~PAGE_MASK; | ||
202 | phys_addr &= PAGE_MASK; | ||
203 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
204 | |||
205 | /* | ||
206 | * Ok, go for it.. | ||
207 | */ | ||
208 | area = get_vm_area(size, VM_IOREMAP | (flags << 20)); | ||
209 | if (!area) | ||
210 | return NULL; | ||
211 | area->phys_addr = phys_addr; | ||
212 | addr = area->addr; | ||
213 | if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) { | ||
214 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | ||
215 | return NULL; | ||
216 | } | ||
217 | if (ioremap_change_attr(phys_addr, size, flags) < 0) { | ||
218 | area->flags &= 0xffffff; | ||
219 | vunmap(addr); | ||
220 | return NULL; | ||
221 | } | ||
222 | return (__force void __iomem *) (offset + (char *)addr); | ||
223 | } | ||
224 | |||
225 | /** | ||
226 | * ioremap_nocache - map bus memory into CPU space | ||
227 | * @offset: bus address of the memory | ||
228 | * @size: size of the resource to map | ||
229 | * | ||
230 | * ioremap_nocache performs a platform specific sequence of operations to | ||
231 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
232 | * writew/writel functions and the other mmio helpers. The returned | ||
233 | * address is not guaranteed to be usable directly as a virtual | ||
234 | * address. | ||
235 | * | ||
236 | * This version of ioremap ensures that the memory is marked uncachable | ||
237 | * on the CPU as well as honouring existing caching rules from things like | ||
238 | * the PCI bus. Note that there are other caches and buffers on many | ||
239 | * busses. In particular driver authors should read up on PCI writes | ||
240 | * | ||
241 | * It's useful if some control registers are in such an area and | ||
242 | * write combining or read caching is not desirable: | ||
243 | * | ||
244 | * Must be freed with iounmap. | ||
245 | */ | ||
246 | |||
247 | void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) | ||
248 | { | ||
249 | return __ioremap(phys_addr, size, _PAGE_PCD); | ||
250 | } | ||
251 | |||
252 | void iounmap(volatile void __iomem *addr) | ||
253 | { | ||
254 | struct vm_struct *p, **pprev; | ||
255 | |||
256 | if (addr <= high_memory) | ||
257 | return; | ||
258 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | ||
259 | addr < phys_to_virt(ISA_END_ADDRESS)) | ||
260 | return; | ||
261 | |||
262 | write_lock(&vmlist_lock); | ||
263 | for (p = vmlist, pprev = &vmlist; p != NULL; pprev = &p->next, p = *pprev) | ||
264 | if (p->addr == (void *)(PAGE_MASK & (unsigned long)addr)) | ||
265 | break; | ||
266 | if (!p) { | ||
267 | printk("__iounmap: bad address %p\n", addr); | ||
268 | goto out_unlock; | ||
269 | } | ||
270 | *pprev = p->next; | ||
271 | unmap_vm_area(p); | ||
272 | if ((p->flags >> 20) && | ||
273 | p->phys_addr + p->size - 1 < virt_to_phys(high_memory)) { | ||
274 | /* p->size includes the guard page, but cpa doesn't like that */ | ||
275 | change_page_attr(virt_to_page(__va(p->phys_addr)), | ||
276 | p->size >> PAGE_SHIFT, | ||
277 | PAGE_KERNEL); | ||
278 | global_flush_tlb(); | ||
279 | } | ||
280 | out_unlock: | ||
281 | write_unlock(&vmlist_lock); | ||
282 | kfree(p); | ||
283 | } | ||