diff options
Diffstat (limited to 'lib')
-rw-r--r-- | lib/Kconfig | 9 | ||||
-rw-r--r-- | lib/Makefile | 6 | ||||
-rw-r--r-- | lib/devres.c | 300 | ||||
-rw-r--r-- | lib/iomap.c | 296 |
4 files changed, 310 insertions, 301 deletions
diff --git a/lib/Kconfig b/lib/Kconfig index 9b03581cdecb..384249915047 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -101,9 +101,14 @@ config TEXTSEARCH_FSM | |||
101 | config PLIST | 101 | config PLIST |
102 | boolean | 102 | boolean |
103 | 103 | ||
104 | config IOMAP_COPY | 104 | config HAS_IOMEM |
105 | boolean | 105 | boolean |
106 | depends on !UML | 106 | depends on !NO_IOMEM |
107 | default y | ||
108 | |||
109 | config HAS_IOPORT | ||
110 | boolean | ||
111 | depends on HAS_IOMEM && !NO_IOPORT | ||
107 | default y | 112 | default y |
108 | 113 | ||
109 | endmenu | 114 | endmenu |
diff --git a/lib/Makefile b/lib/Makefile index b819e37440db..992a39ef9ffd 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -12,15 +12,15 @@ lib-$(CONFIG_SMP) += cpumask.o | |||
12 | 12 | ||
13 | lib-y += kobject.o kref.o kobject_uevent.o klist.o | 13 | lib-y += kobject.o kref.o kobject_uevent.o klist.o |
14 | 14 | ||
15 | obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o iomap.o \ | 15 | obj-y += sort.o parser.o halfmd4.o debug_locks.o random32.o bust_spinlocks.o |
16 | bust_spinlocks.o | ||
17 | 16 | ||
18 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) | 17 | ifeq ($(CONFIG_DEBUG_KOBJECT),y) |
19 | CFLAGS_kobject.o += -DDEBUG | 18 | CFLAGS_kobject.o += -DDEBUG |
20 | CFLAGS_kobject_uevent.o += -DDEBUG | 19 | CFLAGS_kobject_uevent.o += -DDEBUG |
21 | endif | 20 | endif |
22 | 21 | ||
23 | obj-$(CONFIG_IOMAP_COPY) += iomap_copy.o | 22 | obj-$(CONFIG_GENERIC_IOMAP) += iomap.o |
23 | obj-$(CONFIG_HAS_IOMEM) += iomap_copy.o devres.o | ||
24 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | 24 | obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o |
25 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 25 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
26 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 26 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
diff --git a/lib/devres.c b/lib/devres.c new file mode 100644 index 000000000000..2a668dd7cac7 --- /dev/null +++ b/lib/devres.c | |||
@@ -0,0 +1,300 @@ | |||
1 | #include <linux/pci.h> | ||
2 | #include <linux/io.h> | ||
3 | #include <linux/module.h> | ||
4 | |||
5 | static void devm_ioremap_release(struct device *dev, void *res) | ||
6 | { | ||
7 | iounmap(*(void __iomem **)res); | ||
8 | } | ||
9 | |||
10 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | ||
11 | { | ||
12 | return *(void **)res == match_data; | ||
13 | } | ||
14 | |||
15 | /** | ||
16 | * devm_ioremap - Managed ioremap() | ||
17 | * @dev: Generic device to remap IO address for | ||
18 | * @offset: BUS offset to map | ||
19 | * @size: Size of map | ||
20 | * | ||
21 | * Managed ioremap(). Map is automatically unmapped on driver detach. | ||
22 | */ | ||
23 | void __iomem *devm_ioremap(struct device *dev, unsigned long offset, | ||
24 | unsigned long size) | ||
25 | { | ||
26 | void __iomem **ptr, *addr; | ||
27 | |||
28 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | ||
29 | if (!ptr) | ||
30 | return NULL; | ||
31 | |||
32 | addr = ioremap(offset, size); | ||
33 | if (addr) { | ||
34 | *ptr = addr; | ||
35 | devres_add(dev, ptr); | ||
36 | } else | ||
37 | devres_free(ptr); | ||
38 | |||
39 | return addr; | ||
40 | } | ||
41 | EXPORT_SYMBOL(devm_ioremap); | ||
42 | |||
43 | /** | ||
44 | * devm_ioremap_nocache - Managed ioremap_nocache() | ||
45 | * @dev: Generic device to remap IO address for | ||
46 | * @offset: BUS offset to map | ||
47 | * @size: Size of map | ||
48 | * | ||
49 | * Managed ioremap_nocache(). Map is automatically unmapped on driver | ||
50 | * detach. | ||
51 | */ | ||
52 | void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, | ||
53 | unsigned long size) | ||
54 | { | ||
55 | void __iomem **ptr, *addr; | ||
56 | |||
57 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | ||
58 | if (!ptr) | ||
59 | return NULL; | ||
60 | |||
61 | addr = ioremap_nocache(offset, size); | ||
62 | if (addr) { | ||
63 | *ptr = addr; | ||
64 | devres_add(dev, ptr); | ||
65 | } else | ||
66 | devres_free(ptr); | ||
67 | |||
68 | return addr; | ||
69 | } | ||
70 | EXPORT_SYMBOL(devm_ioremap_nocache); | ||
71 | |||
72 | /** | ||
73 | * devm_iounmap - Managed iounmap() | ||
74 | * @dev: Generic device to unmap for | ||
75 | * @addr: Address to unmap | ||
76 | * | ||
77 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). | ||
78 | */ | ||
79 | void devm_iounmap(struct device *dev, void __iomem *addr) | ||
80 | { | ||
81 | iounmap(addr); | ||
82 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, | ||
83 | (void *)addr)); | ||
84 | } | ||
85 | EXPORT_SYMBOL(devm_iounmap); | ||
86 | |||
87 | #ifdef CONFIG_HAS_IOPORT | ||
88 | /* | ||
89 | * Generic iomap devres | ||
90 | */ | ||
91 | static void devm_ioport_map_release(struct device *dev, void *res) | ||
92 | { | ||
93 | ioport_unmap(*(void __iomem **)res); | ||
94 | } | ||
95 | |||
96 | static int devm_ioport_map_match(struct device *dev, void *res, | ||
97 | void *match_data) | ||
98 | { | ||
99 | return *(void **)res == match_data; | ||
100 | } | ||
101 | |||
102 | /** | ||
103 | * devm_ioport_map - Managed ioport_map() | ||
104 | * @dev: Generic device to map ioport for | ||
105 | * @port: Port to map | ||
106 | * @nr: Number of ports to map | ||
107 | * | ||
108 | * Managed ioport_map(). Map is automatically unmapped on driver | ||
109 | * detach. | ||
110 | */ | ||
111 | void __iomem * devm_ioport_map(struct device *dev, unsigned long port, | ||
112 | unsigned int nr) | ||
113 | { | ||
114 | void __iomem **ptr, *addr; | ||
115 | |||
116 | ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); | ||
117 | if (!ptr) | ||
118 | return NULL; | ||
119 | |||
120 | addr = ioport_map(port, nr); | ||
121 | if (addr) { | ||
122 | *ptr = addr; | ||
123 | devres_add(dev, ptr); | ||
124 | } else | ||
125 | devres_free(ptr); | ||
126 | |||
127 | return addr; | ||
128 | } | ||
129 | EXPORT_SYMBOL(devm_ioport_map); | ||
130 | |||
131 | /** | ||
132 | * devm_ioport_unmap - Managed ioport_unmap() | ||
133 | * @dev: Generic device to unmap for | ||
134 | * @addr: Address to unmap | ||
135 | * | ||
136 | * Managed ioport_unmap(). @addr must have been mapped using | ||
137 | * devm_ioport_map(). | ||
138 | */ | ||
139 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) | ||
140 | { | ||
141 | ioport_unmap(addr); | ||
142 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | ||
143 | devm_ioport_map_match, (void *)addr)); | ||
144 | } | ||
145 | EXPORT_SYMBOL(devm_ioport_unmap); | ||
146 | |||
147 | #ifdef CONFIG_PCI | ||
148 | /* | ||
149 | * PCI iomap devres | ||
150 | */ | ||
151 | #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE | ||
152 | |||
153 | struct pcim_iomap_devres { | ||
154 | void __iomem *table[PCIM_IOMAP_MAX]; | ||
155 | }; | ||
156 | |||
157 | static void pcim_iomap_release(struct device *gendev, void *res) | ||
158 | { | ||
159 | struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); | ||
160 | struct pcim_iomap_devres *this = res; | ||
161 | int i; | ||
162 | |||
163 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | ||
164 | if (this->table[i]) | ||
165 | pci_iounmap(dev, this->table[i]); | ||
166 | } | ||
167 | |||
168 | /** | ||
169 | * pcim_iomap_table - access iomap allocation table | ||
170 | * @pdev: PCI device to access iomap table for | ||
171 | * | ||
172 | * Access iomap allocation table for @dev. If iomap table doesn't | ||
173 | * exist and @pdev is managed, it will be allocated. All iomaps | ||
174 | * recorded in the iomap table are automatically unmapped on driver | ||
175 | * detach. | ||
176 | * | ||
177 | * This function might sleep when the table is first allocated but can | ||
178 | * be safely called without context and guaranteed to succed once | ||
179 | * allocated. | ||
180 | */ | ||
181 | void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) | ||
182 | { | ||
183 | struct pcim_iomap_devres *dr, *new_dr; | ||
184 | |||
185 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); | ||
186 | if (dr) | ||
187 | return dr->table; | ||
188 | |||
189 | new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); | ||
190 | if (!new_dr) | ||
191 | return NULL; | ||
192 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); | ||
193 | return dr->table; | ||
194 | } | ||
195 | EXPORT_SYMBOL(pcim_iomap_table); | ||
196 | |||
197 | /** | ||
198 | * pcim_iomap - Managed pcim_iomap() | ||
199 | * @pdev: PCI device to iomap for | ||
200 | * @bar: BAR to iomap | ||
201 | * @maxlen: Maximum length of iomap | ||
202 | * | ||
203 | * Managed pci_iomap(). Map is automatically unmapped on driver | ||
204 | * detach. | ||
205 | */ | ||
206 | void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) | ||
207 | { | ||
208 | void __iomem **tbl; | ||
209 | |||
210 | BUG_ON(bar >= PCIM_IOMAP_MAX); | ||
211 | |||
212 | tbl = (void __iomem **)pcim_iomap_table(pdev); | ||
213 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ | ||
214 | return NULL; | ||
215 | |||
216 | tbl[bar] = pci_iomap(pdev, bar, maxlen); | ||
217 | return tbl[bar]; | ||
218 | } | ||
219 | EXPORT_SYMBOL(pcim_iomap); | ||
220 | |||
221 | /** | ||
222 | * pcim_iounmap - Managed pci_iounmap() | ||
223 | * @pdev: PCI device to iounmap for | ||
224 | * @addr: Address to unmap | ||
225 | * | ||
226 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). | ||
227 | */ | ||
228 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) | ||
229 | { | ||
230 | void __iomem **tbl; | ||
231 | int i; | ||
232 | |||
233 | pci_iounmap(pdev, addr); | ||
234 | |||
235 | tbl = (void __iomem **)pcim_iomap_table(pdev); | ||
236 | BUG_ON(!tbl); | ||
237 | |||
238 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | ||
239 | if (tbl[i] == addr) { | ||
240 | tbl[i] = NULL; | ||
241 | return; | ||
242 | } | ||
243 | WARN_ON(1); | ||
244 | } | ||
245 | EXPORT_SYMBOL(pcim_iounmap); | ||
246 | |||
247 | /** | ||
248 | * pcim_iomap_regions - Request and iomap PCI BARs | ||
249 | * @pdev: PCI device to map IO resources for | ||
250 | * @mask: Mask of BARs to request and iomap | ||
251 | * @name: Name used when requesting regions | ||
252 | * | ||
253 | * Request and iomap regions specified by @mask. | ||
254 | */ | ||
255 | int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) | ||
256 | { | ||
257 | void __iomem * const *iomap; | ||
258 | int i, rc; | ||
259 | |||
260 | iomap = pcim_iomap_table(pdev); | ||
261 | if (!iomap) | ||
262 | return -ENOMEM; | ||
263 | |||
264 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
265 | unsigned long len; | ||
266 | |||
267 | if (!(mask & (1 << i))) | ||
268 | continue; | ||
269 | |||
270 | rc = -EINVAL; | ||
271 | len = pci_resource_len(pdev, i); | ||
272 | if (!len) | ||
273 | goto err_inval; | ||
274 | |||
275 | rc = pci_request_region(pdev, i, name); | ||
276 | if (rc) | ||
277 | goto err_region; | ||
278 | |||
279 | rc = -ENOMEM; | ||
280 | if (!pcim_iomap(pdev, i, 0)) | ||
281 | goto err_iomap; | ||
282 | } | ||
283 | |||
284 | return 0; | ||
285 | |||
286 | err_iomap: | ||
287 | pcim_iounmap(pdev, iomap[i]); | ||
288 | err_region: | ||
289 | pci_release_region(pdev, i); | ||
290 | err_inval: | ||
291 | while (--i >= 0) { | ||
292 | pcim_iounmap(pdev, iomap[i]); | ||
293 | pci_release_region(pdev, i); | ||
294 | } | ||
295 | |||
296 | return rc; | ||
297 | } | ||
298 | EXPORT_SYMBOL(pcim_iomap_regions); | ||
299 | #endif | ||
300 | #endif | ||
diff --git a/lib/iomap.c b/lib/iomap.c index 4990c736bc4b..4d43f37c0154 100644 --- a/lib/iomap.c +++ b/lib/iomap.c | |||
@@ -6,7 +6,6 @@ | |||
6 | #include <linux/pci.h> | 6 | #include <linux/pci.h> |
7 | #include <linux/io.h> | 7 | #include <linux/io.h> |
8 | 8 | ||
9 | #ifdef CONFIG_GENERIC_IOMAP | ||
10 | #include <linux/module.h> | 9 | #include <linux/module.h> |
11 | 10 | ||
12 | /* | 11 | /* |
@@ -256,298 +255,3 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr) | |||
256 | } | 255 | } |
257 | EXPORT_SYMBOL(pci_iomap); | 256 | EXPORT_SYMBOL(pci_iomap); |
258 | EXPORT_SYMBOL(pci_iounmap); | 257 | EXPORT_SYMBOL(pci_iounmap); |
259 | |||
260 | #endif /* CONFIG_GENERIC_IOMAP */ | ||
261 | |||
262 | /* | ||
263 | * Generic iomap devres | ||
264 | */ | ||
265 | static void devm_ioport_map_release(struct device *dev, void *res) | ||
266 | { | ||
267 | ioport_unmap(*(void __iomem **)res); | ||
268 | } | ||
269 | |||
270 | static int devm_ioport_map_match(struct device *dev, void *res, | ||
271 | void *match_data) | ||
272 | { | ||
273 | return *(void **)res == match_data; | ||
274 | } | ||
275 | |||
276 | /** | ||
277 | * devm_ioport_map - Managed ioport_map() | ||
278 | * @dev: Generic device to map ioport for | ||
279 | * @port: Port to map | ||
280 | * @nr: Number of ports to map | ||
281 | * | ||
282 | * Managed ioport_map(). Map is automatically unmapped on driver | ||
283 | * detach. | ||
284 | */ | ||
285 | void __iomem * devm_ioport_map(struct device *dev, unsigned long port, | ||
286 | unsigned int nr) | ||
287 | { | ||
288 | void __iomem **ptr, *addr; | ||
289 | |||
290 | ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL); | ||
291 | if (!ptr) | ||
292 | return NULL; | ||
293 | |||
294 | addr = ioport_map(port, nr); | ||
295 | if (addr) { | ||
296 | *ptr = addr; | ||
297 | devres_add(dev, ptr); | ||
298 | } else | ||
299 | devres_free(ptr); | ||
300 | |||
301 | return addr; | ||
302 | } | ||
303 | EXPORT_SYMBOL(devm_ioport_map); | ||
304 | |||
305 | /** | ||
306 | * devm_ioport_unmap - Managed ioport_unmap() | ||
307 | * @dev: Generic device to unmap for | ||
308 | * @addr: Address to unmap | ||
309 | * | ||
310 | * Managed ioport_unmap(). @addr must have been mapped using | ||
311 | * devm_ioport_map(). | ||
312 | */ | ||
313 | void devm_ioport_unmap(struct device *dev, void __iomem *addr) | ||
314 | { | ||
315 | ioport_unmap(addr); | ||
316 | WARN_ON(devres_destroy(dev, devm_ioport_map_release, | ||
317 | devm_ioport_map_match, (void *)addr)); | ||
318 | } | ||
319 | EXPORT_SYMBOL(devm_ioport_unmap); | ||
320 | |||
321 | static void devm_ioremap_release(struct device *dev, void *res) | ||
322 | { | ||
323 | iounmap(*(void __iomem **)res); | ||
324 | } | ||
325 | |||
326 | static int devm_ioremap_match(struct device *dev, void *res, void *match_data) | ||
327 | { | ||
328 | return *(void **)res == match_data; | ||
329 | } | ||
330 | |||
331 | /** | ||
332 | * devm_ioremap - Managed ioremap() | ||
333 | * @dev: Generic device to remap IO address for | ||
334 | * @offset: BUS offset to map | ||
335 | * @size: Size of map | ||
336 | * | ||
337 | * Managed ioremap(). Map is automatically unmapped on driver detach. | ||
338 | */ | ||
339 | void __iomem *devm_ioremap(struct device *dev, unsigned long offset, | ||
340 | unsigned long size) | ||
341 | { | ||
342 | void __iomem **ptr, *addr; | ||
343 | |||
344 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | ||
345 | if (!ptr) | ||
346 | return NULL; | ||
347 | |||
348 | addr = ioremap(offset, size); | ||
349 | if (addr) { | ||
350 | *ptr = addr; | ||
351 | devres_add(dev, ptr); | ||
352 | } else | ||
353 | devres_free(ptr); | ||
354 | |||
355 | return addr; | ||
356 | } | ||
357 | EXPORT_SYMBOL(devm_ioremap); | ||
358 | |||
359 | /** | ||
360 | * devm_ioremap_nocache - Managed ioremap_nocache() | ||
361 | * @dev: Generic device to remap IO address for | ||
362 | * @offset: BUS offset to map | ||
363 | * @size: Size of map | ||
364 | * | ||
365 | * Managed ioremap_nocache(). Map is automatically unmapped on driver | ||
366 | * detach. | ||
367 | */ | ||
368 | void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset, | ||
369 | unsigned long size) | ||
370 | { | ||
371 | void __iomem **ptr, *addr; | ||
372 | |||
373 | ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL); | ||
374 | if (!ptr) | ||
375 | return NULL; | ||
376 | |||
377 | addr = ioremap_nocache(offset, size); | ||
378 | if (addr) { | ||
379 | *ptr = addr; | ||
380 | devres_add(dev, ptr); | ||
381 | } else | ||
382 | devres_free(ptr); | ||
383 | |||
384 | return addr; | ||
385 | } | ||
386 | EXPORT_SYMBOL(devm_ioremap_nocache); | ||
387 | |||
388 | /** | ||
389 | * devm_iounmap - Managed iounmap() | ||
390 | * @dev: Generic device to unmap for | ||
391 | * @addr: Address to unmap | ||
392 | * | ||
393 | * Managed iounmap(). @addr must have been mapped using devm_ioremap*(). | ||
394 | */ | ||
395 | void devm_iounmap(struct device *dev, void __iomem *addr) | ||
396 | { | ||
397 | iounmap(addr); | ||
398 | WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match, | ||
399 | (void *)addr)); | ||
400 | } | ||
401 | EXPORT_SYMBOL(devm_iounmap); | ||
402 | |||
403 | /* | ||
404 | * PCI iomap devres | ||
405 | */ | ||
406 | #define PCIM_IOMAP_MAX PCI_ROM_RESOURCE | ||
407 | |||
408 | struct pcim_iomap_devres { | ||
409 | void __iomem *table[PCIM_IOMAP_MAX]; | ||
410 | }; | ||
411 | |||
412 | static void pcim_iomap_release(struct device *gendev, void *res) | ||
413 | { | ||
414 | struct pci_dev *dev = container_of(gendev, struct pci_dev, dev); | ||
415 | struct pcim_iomap_devres *this = res; | ||
416 | int i; | ||
417 | |||
418 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | ||
419 | if (this->table[i]) | ||
420 | pci_iounmap(dev, this->table[i]); | ||
421 | } | ||
422 | |||
423 | /** | ||
424 | * pcim_iomap_table - access iomap allocation table | ||
425 | * @pdev: PCI device to access iomap table for | ||
426 | * | ||
427 | * Access iomap allocation table for @dev. If iomap table doesn't | ||
428 | * exist and @pdev is managed, it will be allocated. All iomaps | ||
429 | * recorded in the iomap table are automatically unmapped on driver | ||
430 | * detach. | ||
431 | * | ||
432 | * This function might sleep when the table is first allocated but can | ||
433 | * be safely called without context and guaranteed to succed once | ||
434 | * allocated. | ||
435 | */ | ||
436 | void __iomem * const * pcim_iomap_table(struct pci_dev *pdev) | ||
437 | { | ||
438 | struct pcim_iomap_devres *dr, *new_dr; | ||
439 | |||
440 | dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL); | ||
441 | if (dr) | ||
442 | return dr->table; | ||
443 | |||
444 | new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL); | ||
445 | if (!new_dr) | ||
446 | return NULL; | ||
447 | dr = devres_get(&pdev->dev, new_dr, NULL, NULL); | ||
448 | return dr->table; | ||
449 | } | ||
450 | EXPORT_SYMBOL(pcim_iomap_table); | ||
451 | |||
452 | /** | ||
453 | * pcim_iomap - Managed pcim_iomap() | ||
454 | * @pdev: PCI device to iomap for | ||
455 | * @bar: BAR to iomap | ||
456 | * @maxlen: Maximum length of iomap | ||
457 | * | ||
458 | * Managed pci_iomap(). Map is automatically unmapped on driver | ||
459 | * detach. | ||
460 | */ | ||
461 | void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) | ||
462 | { | ||
463 | void __iomem **tbl; | ||
464 | |||
465 | BUG_ON(bar >= PCIM_IOMAP_MAX); | ||
466 | |||
467 | tbl = (void __iomem **)pcim_iomap_table(pdev); | ||
468 | if (!tbl || tbl[bar]) /* duplicate mappings not allowed */ | ||
469 | return NULL; | ||
470 | |||
471 | tbl[bar] = pci_iomap(pdev, bar, maxlen); | ||
472 | return tbl[bar]; | ||
473 | } | ||
474 | EXPORT_SYMBOL(pcim_iomap); | ||
475 | |||
476 | /** | ||
477 | * pcim_iounmap - Managed pci_iounmap() | ||
478 | * @pdev: PCI device to iounmap for | ||
479 | * @addr: Address to unmap | ||
480 | * | ||
481 | * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap(). | ||
482 | */ | ||
483 | void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr) | ||
484 | { | ||
485 | void __iomem **tbl; | ||
486 | int i; | ||
487 | |||
488 | pci_iounmap(pdev, addr); | ||
489 | |||
490 | tbl = (void __iomem **)pcim_iomap_table(pdev); | ||
491 | BUG_ON(!tbl); | ||
492 | |||
493 | for (i = 0; i < PCIM_IOMAP_MAX; i++) | ||
494 | if (tbl[i] == addr) { | ||
495 | tbl[i] = NULL; | ||
496 | return; | ||
497 | } | ||
498 | WARN_ON(1); | ||
499 | } | ||
500 | EXPORT_SYMBOL(pcim_iounmap); | ||
501 | |||
502 | /** | ||
503 | * pcim_iomap_regions - Request and iomap PCI BARs | ||
504 | * @pdev: PCI device to map IO resources for | ||
505 | * @mask: Mask of BARs to request and iomap | ||
506 | * @name: Name used when requesting regions | ||
507 | * | ||
508 | * Request and iomap regions specified by @mask. | ||
509 | */ | ||
510 | int pcim_iomap_regions(struct pci_dev *pdev, u16 mask, const char *name) | ||
511 | { | ||
512 | void __iomem * const *iomap; | ||
513 | int i, rc; | ||
514 | |||
515 | iomap = pcim_iomap_table(pdev); | ||
516 | if (!iomap) | ||
517 | return -ENOMEM; | ||
518 | |||
519 | for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { | ||
520 | unsigned long len; | ||
521 | |||
522 | if (!(mask & (1 << i))) | ||
523 | continue; | ||
524 | |||
525 | rc = -EINVAL; | ||
526 | len = pci_resource_len(pdev, i); | ||
527 | if (!len) | ||
528 | goto err_inval; | ||
529 | |||
530 | rc = pci_request_region(pdev, i, name); | ||
531 | if (rc) | ||
532 | goto err_region; | ||
533 | |||
534 | rc = -ENOMEM; | ||
535 | if (!pcim_iomap(pdev, i, 0)) | ||
536 | goto err_iomap; | ||
537 | } | ||
538 | |||
539 | return 0; | ||
540 | |||
541 | err_iomap: | ||
542 | pcim_iounmap(pdev, iomap[i]); | ||
543 | err_region: | ||
544 | pci_release_region(pdev, i); | ||
545 | err_inval: | ||
546 | while (--i >= 0) { | ||
547 | pcim_iounmap(pdev, iomap[i]); | ||
548 | pci_release_region(pdev, i); | ||
549 | } | ||
550 | |||
551 | return rc; | ||
552 | } | ||
553 | EXPORT_SYMBOL(pcim_iomap_regions); | ||