diff options
Diffstat (limited to 'arch/x86/mm/ioremap_64.c')
-rw-r--r-- | arch/x86/mm/ioremap_64.c | 210 |
1 files changed, 0 insertions, 210 deletions
diff --git a/arch/x86/mm/ioremap_64.c b/arch/x86/mm/ioremap_64.c deleted file mode 100644 index 6cac90aa5032..000000000000 --- a/arch/x86/mm/ioremap_64.c +++ /dev/null | |||
@@ -1,210 +0,0 @@ | |||
1 | /* | ||
2 | * arch/x86_64/mm/ioremap.c | ||
3 | * | ||
4 | * Re-map IO memory to kernel address space so that we can access it. | ||
5 | * This is needed for high PCI addresses that aren't mapped in the | ||
6 | * 640k-1MB IO memory area on PC's | ||
7 | * | ||
8 | * (C) Copyright 1995 1996 Linus Torvalds | ||
9 | */ | ||
10 | |||
11 | #include <linux/vmalloc.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/io.h> | ||
16 | |||
17 | #include <asm/pgalloc.h> | ||
18 | #include <asm/fixmap.h> | ||
19 | #include <asm/tlbflush.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/proto.h> | ||
22 | |||
23 | unsigned long __phys_addr(unsigned long x) | ||
24 | { | ||
25 | if (x >= __START_KERNEL_map) | ||
26 | return x - __START_KERNEL_map + phys_base; | ||
27 | return x - PAGE_OFFSET; | ||
28 | } | ||
29 | EXPORT_SYMBOL(__phys_addr); | ||
30 | |||
31 | #define ISA_START_ADDRESS 0xa0000 | ||
32 | #define ISA_END_ADDRESS 0x100000 | ||
33 | |||
34 | /* | ||
35 | * Fix up the linear direct mapping of the kernel to avoid cache attribute | ||
36 | * conflicts. | ||
37 | */ | ||
38 | static int | ||
39 | ioremap_change_attr(unsigned long phys_addr, unsigned long size, | ||
40 | unsigned long flags) | ||
41 | { | ||
42 | int err = 0; | ||
43 | if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { | ||
44 | unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
45 | unsigned long vaddr = (unsigned long) __va(phys_addr); | ||
46 | |||
47 | /* | ||
48 | * Must use a address here and not struct page because the phys addr | ||
49 | * can be a in hole between nodes and not have an memmap entry. | ||
50 | */ | ||
51 | err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags)); | ||
52 | if (!err) | ||
53 | global_flush_tlb(); | ||
54 | } | ||
55 | return err; | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Generic mapping function | ||
60 | */ | ||
61 | |||
62 | /* | ||
63 | * Remap an arbitrary physical address space into the kernel virtual | ||
64 | * address space. Needed when the kernel wants to access high addresses | ||
65 | * directly. | ||
66 | * | ||
67 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously | ||
68 | * have to convert them into an offset in a page-aligned mapping, but the | ||
69 | * caller shouldn't need to know that small detail. | ||
70 | */ | ||
71 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) | ||
72 | { | ||
73 | void * addr; | ||
74 | struct vm_struct * area; | ||
75 | unsigned long offset, last_addr; | ||
76 | pgprot_t pgprot; | ||
77 | |||
78 | /* Don't allow wraparound or zero size */ | ||
79 | last_addr = phys_addr + size - 1; | ||
80 | if (!size || last_addr < phys_addr) | ||
81 | return NULL; | ||
82 | |||
83 | /* | ||
84 | * Don't remap the low PCI/ISA area, it's always mapped.. | ||
85 | */ | ||
86 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) | ||
87 | return (__force void __iomem *)phys_to_virt(phys_addr); | ||
88 | |||
89 | #ifdef CONFIG_FLATMEM | ||
90 | /* | ||
91 | * Don't allow anybody to remap normal RAM that we're using.. | ||
92 | */ | ||
93 | if (last_addr < virt_to_phys(high_memory)) { | ||
94 | char *t_addr, *t_end; | ||
95 | struct page *page; | ||
96 | |||
97 | t_addr = __va(phys_addr); | ||
98 | t_end = t_addr + (size - 1); | ||
99 | |||
100 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) | ||
101 | if(!PageReserved(page)) | ||
102 | return NULL; | ||
103 | } | ||
104 | #endif | ||
105 | |||
106 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL | ||
107 | | _PAGE_DIRTY | _PAGE_ACCESSED | flags); | ||
108 | /* | ||
109 | * Mappings have to be page-aligned | ||
110 | */ | ||
111 | offset = phys_addr & ~PAGE_MASK; | ||
112 | phys_addr &= PAGE_MASK; | ||
113 | size = PAGE_ALIGN(last_addr+1) - phys_addr; | ||
114 | |||
115 | /* | ||
116 | * Ok, go for it.. | ||
117 | */ | ||
118 | area = get_vm_area(size, VM_IOREMAP | (flags << 20)); | ||
119 | if (!area) | ||
120 | return NULL; | ||
121 | area->phys_addr = phys_addr; | ||
122 | addr = area->addr; | ||
123 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, | ||
124 | phys_addr, pgprot)) { | ||
125 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); | ||
126 | return NULL; | ||
127 | } | ||
128 | if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) { | ||
129 | area->flags &= 0xffffff; | ||
130 | vunmap(addr); | ||
131 | return NULL; | ||
132 | } | ||
133 | return (__force void __iomem *) (offset + (char *)addr); | ||
134 | } | ||
135 | EXPORT_SYMBOL(__ioremap); | ||
136 | |||
137 | /** | ||
138 | * ioremap_nocache - map bus memory into CPU space | ||
139 | * @offset: bus address of the memory | ||
140 | * @size: size of the resource to map | ||
141 | * | ||
142 | * ioremap_nocache performs a platform specific sequence of operations to | ||
143 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ | ||
144 | * writew/writel functions and the other mmio helpers. The returned | ||
145 | * address is not guaranteed to be usable directly as a virtual | ||
146 | * address. | ||
147 | * | ||
148 | * This version of ioremap ensures that the memory is marked uncachable | ||
149 | * on the CPU as well as honouring existing caching rules from things like | ||
150 | * the PCI bus. Note that there are other caches and buffers on many | ||
151 | * busses. In particular driver authors should read up on PCI writes | ||
152 | * | ||
153 | * It's useful if some control registers are in such an area and | ||
154 | * write combining or read caching is not desirable: | ||
155 | * | ||
156 | * Must be freed with iounmap. | ||
157 | */ | ||
158 | |||
159 | void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) | ||
160 | { | ||
161 | return __ioremap(phys_addr, size, _PAGE_PCD); | ||
162 | } | ||
163 | EXPORT_SYMBOL(ioremap_nocache); | ||
164 | |||
165 | /** | ||
166 | * iounmap - Free a IO remapping | ||
167 | * @addr: virtual address from ioremap_* | ||
168 | * | ||
169 | * Caller must ensure there is only one unmapping for the same pointer. | ||
170 | */ | ||
171 | void iounmap(volatile void __iomem *addr) | ||
172 | { | ||
173 | struct vm_struct *p, *o; | ||
174 | |||
175 | if (addr <= high_memory) | ||
176 | return; | ||
177 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && | ||
178 | addr < phys_to_virt(ISA_END_ADDRESS)) | ||
179 | return; | ||
180 | |||
181 | addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); | ||
182 | /* Use the vm area unlocked, assuming the caller | ||
183 | ensures there isn't another iounmap for the same address | ||
184 | in parallel. Reuse of the virtual address is prevented by | ||
185 | leaving it in the global lists until we're done with it. | ||
186 | cpa takes care of the direct mappings. */ | ||
187 | read_lock(&vmlist_lock); | ||
188 | for (p = vmlist; p; p = p->next) { | ||
189 | if (p->addr == addr) | ||
190 | break; | ||
191 | } | ||
192 | read_unlock(&vmlist_lock); | ||
193 | |||
194 | if (!p) { | ||
195 | printk("iounmap: bad address %p\n", addr); | ||
196 | dump_stack(); | ||
197 | return; | ||
198 | } | ||
199 | |||
200 | /* Reset the direct mapping. Can block */ | ||
201 | if (p->flags >> 20) | ||
202 | ioremap_change_attr(p->phys_addr, p->size, 0); | ||
203 | |||
204 | /* Finally remove it */ | ||
205 | o = remove_vm_area((void *)addr); | ||
206 | BUG_ON(p != o || o == NULL); | ||
207 | kfree(p); | ||
208 | } | ||
209 | EXPORT_SYMBOL(iounmap); | ||
210 | |||