aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2007-01-20 02:00:26 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-09 17:39:36 -0500
commit9ac7849e35f705830f7b016ff272b0ff1f7ff759 (patch)
tree7f17cdff87e154937a15cc2ec8da9b4e6018ce8e /lib
parent77a527eadb425b60db3f5f0aae6a4c51c38e35e5 (diff)
devres: device resource management
Implement device resource management, in short, devres. A device driver can allocate arbirary size of devres data which is associated with a release function. On driver detach, release function is invoked on the devres data, then, devres data is freed. devreses are typed by associated release functions. Some devreses are better represented by single instance of the type while others need multiple instances sharing the same release function. Both usages are supported. devreses can be grouped using devres group such that a device driver can easily release acquired resources halfway through initialization or selectively release resources (e.g. resources for port 1 out of 4 ports). This patch adds devres core including documentation and the following managed interfaces. * alloc/free : devm_kzalloc(), devm_kzfree() * IO region : devm_request_region(), devm_release_region() * IRQ : devm_request_irq(), devm_free_irq() * DMA : dmam_alloc_coherent(), dmam_free_coherent(), dmam_declare_coherent_memory(), dmam_pool_create(), dmam_pool_destroy() * PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed() * iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(), devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(), pcim_iomap(), pcim_iounmap() Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'lib')
-rw-r--r--lib/Makefile3
-rw-r--r--lib/iomap.c246
2 files changed, 246 insertions, 3 deletions
diff --git a/lib/Makefile b/lib/Makefile
index 77b4bad7d441..29b2e9912bbb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,7 +5,7 @@
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o 8 sha1.o irq_regs.o reciprocal_div.o iomap.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o 10lib-$(CONFIG_MMU) += ioremap.o
11lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
@@ -41,7 +41,6 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
41obj-$(CONFIG_CRC16) += crc16.o 41obj-$(CONFIG_CRC16) += crc16.o
42obj-$(CONFIG_CRC32) += crc32.o 42obj-$(CONFIG_CRC32) += crc32.o
43obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 43obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
44obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
45obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 44obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
46 45
47obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 46obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
diff --git a/lib/iomap.c b/lib/iomap.c
index d6ccdd85df53..3214028141da 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -4,8 +4,10 @@
4 * (C) Copyright 2004 Linus Torvalds 4 * (C) Copyright 2004 Linus Torvalds
5 */ 5 */
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/io.h>
8
9#ifdef CONFIG_GENERIC_IOMAP
7#include <linux/module.h> 10#include <linux/module.h>
8#include <asm/io.h>
9 11
10/* 12/*
11 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO 13 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
@@ -254,3 +256,245 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
254} 256}
255EXPORT_SYMBOL(pci_iomap); 257EXPORT_SYMBOL(pci_iomap);
256EXPORT_SYMBOL(pci_iounmap); 258EXPORT_SYMBOL(pci_iounmap);
259
260#endif /* CONFIG_GENERIC_IOMAP */
261
262/*
263 * Generic iomap devres
264 */
265static void devm_ioport_map_release(struct device *dev, void *res)
266{
267 ioport_unmap(*(void __iomem **)res);
268}
269
270static int devm_ioport_map_match(struct device *dev, void *res,
271 void *match_data)
272{
273 return *(void **)res == match_data;
274}
275
276/**
277 * devm_ioport_map - Managed ioport_map()
278 * @dev: Generic device to map ioport for
279 * @port: Port to map
280 * @nr: Number of ports to map
281 *
282 * Managed ioport_map(). Map is automatically unmapped on driver
283 * detach.
284 */
285void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
286 unsigned int nr)
287{
288 void __iomem **ptr, *addr;
289
290 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
291 if (!ptr)
292 return NULL;
293
294 addr = ioport_map(port, nr);
295 if (addr) {
296 *ptr = addr;
297 devres_add(dev, ptr);
298 } else
299 devres_free(ptr);
300
301 return addr;
302}
303EXPORT_SYMBOL(devm_ioport_map);
304
305/**
306 * devm_ioport_unmap - Managed ioport_unmap()
307 * @dev: Generic device to unmap for
308 * @addr: Address to unmap
309 *
310 * Managed ioport_unmap(). @addr must have been mapped using
311 * devm_ioport_map().
312 */
313void devm_ioport_unmap(struct device *dev, void __iomem *addr)
314{
315 ioport_unmap(addr);
316 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
317 devm_ioport_map_match, (void *)addr));
318}
319EXPORT_SYMBOL(devm_ioport_unmap);
320
321static void devm_ioremap_release(struct device *dev, void *res)
322{
323 iounmap(*(void __iomem **)res);
324}
325
326static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
327{
328 return *(void **)res == match_data;
329}
330
331/**
332 * devm_ioremap - Managed ioremap()
333 * @dev: Generic device to remap IO address for
334 * @offset: BUS offset to map
335 * @size: Size of map
336 *
337 * Managed ioremap(). Map is automatically unmapped on driver detach.
338 */
339void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
340 unsigned long size)
341{
342 void __iomem **ptr, *addr;
343
344 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
345 if (!ptr)
346 return NULL;
347
348 addr = ioremap(offset, size);
349 if (addr) {
350 *ptr = addr;
351 devres_add(dev, ptr);
352 } else
353 devres_free(ptr);
354
355 return addr;
356}
357EXPORT_SYMBOL(devm_ioremap);
358
359/**
360 * devm_ioremap_nocache - Managed ioremap_nocache()
361 * @dev: Generic device to remap IO address for
362 * @offset: BUS offset to map
363 * @size: Size of map
364 *
365 * Managed ioremap_nocache(). Map is automatically unmapped on driver
366 * detach.
367 */
368void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
369 unsigned long size)
370{
371 void __iomem **ptr, *addr;
372
373 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
374 if (!ptr)
375 return NULL;
376
377 addr = ioremap_nocache(offset, size);
378 if (addr) {
379 *ptr = addr;
380 devres_add(dev, ptr);
381 } else
382 devres_free(ptr);
383
384 return addr;
385}
386EXPORT_SYMBOL(devm_ioremap_nocache);
387
388/**
389 * devm_iounmap - Managed iounmap()
390 * @dev: Generic device to unmap for
391 * @addr: Address to unmap
392 *
393 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
394 */
395void devm_iounmap(struct device *dev, void __iomem *addr)
396{
397 iounmap(addr);
398 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
399 (void *)addr));
400}
401EXPORT_SYMBOL(devm_iounmap);
402
403/*
404 * PCI iomap devres
405 */
406#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
407
408struct pcim_iomap_devres {
409 void __iomem *table[PCIM_IOMAP_MAX];
410};
411
412static void pcim_iomap_release(struct device *gendev, void *res)
413{
414 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
415 struct pcim_iomap_devres *this = res;
416 int i;
417
418 for (i = 0; i < PCIM_IOMAP_MAX; i++)
419 if (this->table[i])
420 pci_iounmap(dev, this->table[i]);
421}
422
423/**
424 * pcim_iomap_table - access iomap allocation table
425 * @pdev: PCI device to access iomap table for
426 *
427 * Access iomap allocation table for @dev. If iomap table doesn't
428 * exist and @pdev is managed, it will be allocated. All iomaps
429 * recorded in the iomap table are automatically unmapped on driver
430 * detach.
431 *
432 * This function might sleep when the table is first allocated but can
433 * be safely called without context and guaranteed to succed once
434 * allocated.
435 */
436void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
437{
438 struct pcim_iomap_devres *dr, *new_dr;
439
440 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
441 if (dr)
442 return dr->table;
443
444 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
445 if (!new_dr)
446 return NULL;
447 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
448 return dr->table;
449}
450EXPORT_SYMBOL(pcim_iomap_table);
451
452/**
453 * pcim_iomap - Managed pcim_iomap()
454 * @pdev: PCI device to iomap for
455 * @bar: BAR to iomap
456 * @maxlen: Maximum length of iomap
457 *
458 * Managed pci_iomap(). Map is automatically unmapped on driver
459 * detach.
460 */
461void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
462{
463 void __iomem **tbl;
464
465 BUG_ON(bar >= PCIM_IOMAP_MAX);
466
467 tbl = (void __iomem **)pcim_iomap_table(pdev);
468 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
469 return NULL;
470
471 tbl[bar] = pci_iomap(pdev, bar, maxlen);
472 return tbl[bar];
473}
474EXPORT_SYMBOL(pcim_iomap);
475
476/**
477 * pcim_iounmap - Managed pci_iounmap()
478 * @pdev: PCI device to iounmap for
479 * @addr: Address to unmap
480 *
481 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
482 */
483void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
484{
485 void __iomem **tbl;
486 int i;
487
488 pci_iounmap(pdev, addr);
489
490 tbl = (void __iomem **)pcim_iomap_table(pdev);
491 BUG_ON(!tbl);
492
493 for (i = 0; i < PCIM_IOMAP_MAX; i++)
494 if (tbl[i] == addr) {
495 tbl[i] = NULL;
496 return;
497 }
498 WARN_ON(1);
499}
500EXPORT_SYMBOL(pcim_iounmap);