aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/base
diff options
context:
space:
mode:
authorTejun Heo <htejun@gmail.com>2007-01-20 02:00:26 -0500
committerJeff Garzik <jeff@garzik.org>2007-02-09 17:39:36 -0500
commit9ac7849e35f705830f7b016ff272b0ff1f7ff759 (patch)
tree7f17cdff87e154937a15cc2ec8da9b4e6018ce8e /drivers/base
parent77a527eadb425b60db3f5f0aae6a4c51c38e35e5 (diff)
devres: device resource management
Implement device resource management, in short, devres. A device driver can allocate arbirary size of devres data which is associated with a release function. On driver detach, release function is invoked on the devres data, then, devres data is freed. devreses are typed by associated release functions. Some devreses are better represented by single instance of the type while others need multiple instances sharing the same release function. Both usages are supported. devreses can be grouped using devres group such that a device driver can easily release acquired resources halfway through initialization or selectively release resources (e.g. resources for port 1 out of 4 ports). This patch adds devres core including documentation and the following managed interfaces. * alloc/free : devm_kzalloc(), devm_kzfree() * IO region : devm_request_region(), devm_release_region() * IRQ : devm_request_irq(), devm_free_irq() * DMA : dmam_alloc_coherent(), dmam_free_coherent(), dmam_declare_coherent_memory(), dmam_pool_create(), dmam_pool_destroy() * PCI : pcim_enable_device(), pcim_pin_device(), pci_is_managed() * iomap : devm_ioport_map(), devm_ioport_unmap(), devm_ioremap(), devm_ioremap_nocache(), devm_iounmap(), pcim_iomap_table(), pcim_iomap(), pcim_iounmap() Signed-off-by: Tejun Heo <htejun@gmail.com> Signed-off-by: Jeff Garzik <jeff@garzik.org>
Diffstat (limited to 'drivers/base')
-rw-r--r--drivers/base/Kconfig12
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/devres.c644
-rw-r--r--drivers/base/dma-mapping.c218
-rw-r--r--drivers/base/dmapool.c59
8 files changed, 940 insertions, 0 deletions
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 1429f3a2629e..5d6312e33490 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -37,6 +37,18 @@ config DEBUG_DRIVER
37 37
38 If you are unsure about this, say N here. 38 If you are unsure about this, say N here.
39 39
40config DEBUG_DEVRES
41 bool "Managed device resources verbose debug messages"
42 depends on DEBUG_KERNEL
43 help
44 This option enables kernel parameter devres.log. If set to
45 non-zero, devres debug messages are printed. Select this if
46 you are having a problem with devres or want to debug
47 resource management for a managed device. devres.log can be
48 switched on and off from sysfs node.
49
50 If you are unsure about this, Say N here.
51
40config SYS_HYPERVISOR 52config SYS_HYPERVISOR
41 bool 53 bool
42 default n 54 default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7bbb9eeda235..e9eb7382ac3a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -3,6 +3,7 @@
3obj-y := core.o sys.o bus.o dd.o \ 3obj-y := core.o sys.o bus.o dd.o \
4 driver.o class.o platform.o \ 4 driver.o class.o platform.o \
5 cpu.o firmware.o init.o map.o dmapool.o \ 5 cpu.o firmware.o init.o map.o dmapool.o \
6 dma-mapping.o devres.o \
6 attribute_container.o transport_class.o 7 attribute_container.o transport_class.o
7obj-y += power/ 8obj-y += power/
8obj-$(CONFIG_ISA) += isa.o 9obj-$(CONFIG_ISA) += isa.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index d26644a59537..de7e1442ce60 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -44,3 +44,4 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
44 44
45extern char *make_class_name(const char *name, struct kobject *kobj); 45extern char *make_class_name(const char *name, struct kobject *kobj);
46 46
47extern void devres_release_all(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e13614241c9e..a8ac34ba6107 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -428,6 +428,8 @@ void device_initialize(struct device *dev)
428 INIT_LIST_HEAD(&dev->dma_pools); 428 INIT_LIST_HEAD(&dev->dma_pools);
429 INIT_LIST_HEAD(&dev->node); 429 INIT_LIST_HEAD(&dev->node);
430 init_MUTEX(&dev->sem); 430 init_MUTEX(&dev->sem);
431 spin_lock_init(&dev->devres_lock);
432 INIT_LIST_HEAD(&dev->devres_head);
431 device_init_wakeup(dev, 0); 433 device_init_wakeup(dev, 0);
432 set_dev_node(dev, -1); 434 set_dev_node(dev, -1);
433} 435}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b5bf243d9cd6..6a48824e43ff 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -112,6 +112,7 @@ static int really_probe(void *void_data)
112 atomic_inc(&probe_count); 112 atomic_inc(&probe_count);
113 pr_debug("%s: Probing driver %s with device %s\n", 113 pr_debug("%s: Probing driver %s with device %s\n",
114 drv->bus->name, drv->name, dev->bus_id); 114 drv->bus->name, drv->name, dev->bus_id);
115 WARN_ON(!list_empty(&dev->devres_head));
115 116
116 dev->driver = drv; 117 dev->driver = drv;
117 if (driver_sysfs_add(dev)) { 118 if (driver_sysfs_add(dev)) {
@@ -137,6 +138,7 @@ static int really_probe(void *void_data)
137 goto done; 138 goto done;
138 139
139probe_failed: 140probe_failed:
141 devres_release_all(dev);
140 driver_sysfs_remove(dev); 142 driver_sysfs_remove(dev);
141 dev->driver = NULL; 143 dev->driver = NULL;
142 144
@@ -327,6 +329,7 @@ static void __device_release_driver(struct device * dev)
327 dev->bus->remove(dev); 329 dev->bus->remove(dev);
328 else if (drv->remove) 330 else if (drv->remove)
329 drv->remove(dev); 331 drv->remove(dev);
332 devres_release_all(dev);
330 dev->driver = NULL; 333 dev->driver = NULL;
331 put_driver(drv); 334 put_driver(drv);
332 } 335 }
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
new file mode 100644
index 000000000000..e177c9533b6c
--- /dev/null
+++ b/drivers/base/devres.c
@@ -0,0 +1,644 @@
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12
13struct devres_node {
14 struct list_head entry;
15 dr_release_t release;
16#ifdef CONFIG_DEBUG_DEVRES
17 const char *name;
18 size_t size;
19#endif
20};
21
22struct devres {
23 struct devres_node node;
24 /* -- 3 pointers */
25 unsigned long long data[]; /* guarantee ull alignment */
26};
27
28struct devres_group {
29 struct devres_node node[2];
30 void *id;
31 int color;
32 /* -- 8 pointers */
33};
34
35#ifdef CONFIG_DEBUG_DEVRES
36static int log_devres = 0;
37module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
38
39static void set_node_dbginfo(struct devres_node *node, const char *name,
40 size_t size)
41{
42 node->name = name;
43 node->size = size;
44}
45
46static void devres_log(struct device *dev, struct devres_node *node,
47 const char *op)
48{
49 if (unlikely(log_devres))
50 dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
51 op, node, node->name, (unsigned long)node->size);
52}
53#else /* CONFIG_DEBUG_DEVRES */
54#define set_node_dbginfo(node, n, s) do {} while (0)
55#define devres_log(dev, node, op) do {} while (0)
56#endif /* CONFIG_DEBUG_DEVRES */
57
58/*
59 * Release functions for devres group. These callbacks are used only
60 * for identification.
61 */
62static void group_open_release(struct device *dev, void *res)
63{
64 /* noop */
65}
66
67static void group_close_release(struct device *dev, void *res)
68{
69 /* noop */
70}
71
72static struct devres_group * node_to_group(struct devres_node *node)
73{
74 if (node->release == &group_open_release)
75 return container_of(node, struct devres_group, node[0]);
76 if (node->release == &group_close_release)
77 return container_of(node, struct devres_group, node[1]);
78 return NULL;
79}
80
81static __always_inline struct devres * alloc_dr(dr_release_t release,
82 size_t size, gfp_t gfp)
83{
84 size_t tot_size = sizeof(struct devres) + size;
85 struct devres *dr;
86
87 dr = kmalloc_track_caller(tot_size, gfp);
88 if (unlikely(!dr))
89 return NULL;
90
91 memset(dr, 0, tot_size);
92 INIT_LIST_HEAD(&dr->node.entry);
93 dr->node.release = release;
94 return dr;
95}
96
97static void add_dr(struct device *dev, struct devres_node *node)
98{
99 devres_log(dev, node, "ADD");
100 BUG_ON(!list_empty(&node->entry));
101 list_add_tail(&node->entry, &dev->devres_head);
102}
103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name)
120{
121 struct devres *dr;
122
123 dr = alloc_dr(release, size, gfp);
124 if (unlikely(!dr))
125 return NULL;
126 set_node_dbginfo(&dr->node, name, size);
127 return dr->data;
128}
129EXPORT_SYMBOL_GPL(__devres_alloc);
130#else
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{
133 struct devres *dr;
134
135 dr = alloc_dr(release, size, gfp);
136 if (unlikely(!dr))
137 return NULL;
138 return dr->data;
139}
140EXPORT_SYMBOL_GPL(devres_alloc);
141#endif
142
143/**
144 * devres_free - Free device resource data
145 * @res: Pointer to devres data to free
146 *
147 * Free devres created with devres_alloc().
148 */
149void devres_free(void *res)
150{
151 if (res) {
152 struct devres *dr = container_of(res, struct devres, data);
153
154 BUG_ON(!list_empty(&dr->node.entry));
155 kfree(dr);
156 }
157}
158EXPORT_SYMBOL_GPL(devres_free);
159
160/**
161 * devres_add - Register device resource
162 * @dev: Device to add resource to
163 * @res: Resource to register
164 *
165 * Register devres @res to @dev. @res should have been allocated
166 * using devres_alloc(). On driver detach, the associated release
167 * function will be invoked and devres will be freed automatically.
168 */
169void devres_add(struct device *dev, void *res)
170{
171 struct devres *dr = container_of(res, struct devres, data);
172 unsigned long flags;
173
174 spin_lock_irqsave(&dev->devres_lock, flags);
175 add_dr(dev, &dr->node);
176 spin_unlock_irqrestore(&dev->devres_lock, flags);
177}
178EXPORT_SYMBOL_GPL(devres_add);
179
180static struct devres *find_dr(struct device *dev, dr_release_t release,
181 dr_match_t match, void *match_data)
182{
183 struct devres_node *node;
184
185 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
186 struct devres *dr = container_of(node, struct devres, node);
187
188 if (node->release != release)
189 continue;
190 if (match && !match(dev, dr->data, match_data))
191 continue;
192 return dr;
193 }
194
195 return NULL;
196}
197
198/**
199 * devres_find - Find device resource
200 * @dev: Device to lookup resource from
201 * @release: Look for resources associated with this release function
202 * @match: Match function (optional)
203 * @match_data: Data for the match function
204 *
205 * Find the latest devres of @dev which is associated with @release
206 * and for which @match returns 1. If @match is NULL, it's considered
207 * to match all.
208 *
209 * RETURNS:
210 * Pointer to found devres, NULL if not found.
211 */
212void * devres_find(struct device *dev, dr_release_t release,
213 dr_match_t match, void *match_data)
214{
215 struct devres *dr;
216 unsigned long flags;
217
218 spin_lock_irqsave(&dev->devres_lock, flags);
219 dr = find_dr(dev, release, match, match_data);
220 spin_unlock_irqrestore(&dev->devres_lock, flags);
221
222 if (dr)
223 return dr->data;
224 return NULL;
225}
226EXPORT_SYMBOL_GPL(devres_find);
227
228/**
229 * devres_get - Find devres, if non-existent, add one atomically
230 * @dev: Device to lookup or add devres for
231 * @new_res: Pointer to new initialized devres to add if not found
232 * @match: Match function (optional)
233 * @match_data: Data for the match function
234 *
235 * Find the latest devres of @dev which has the same release function
236 * as @new_res and for which @match return 1. If found, @new_res is
237 * freed; otherwise, @new_res is added atomically.
238 *
239 * RETURNS:
240 * Pointer to found or added devres.
241 */
242void * devres_get(struct device *dev, void *new_res,
243 dr_match_t match, void *match_data)
244{
245 struct devres *new_dr = container_of(new_res, struct devres, data);
246 struct devres *dr;
247 unsigned long flags;
248
249 spin_lock_irqsave(&dev->devres_lock, flags);
250 dr = find_dr(dev, new_dr->node.release, match, match_data);
251 if (!dr) {
252 add_dr(dev, &new_dr->node);
253 dr = new_dr;
254 new_dr = NULL;
255 }
256 spin_unlock_irqrestore(&dev->devres_lock, flags);
257 devres_free(new_dr);
258
259 return dr->data;
260}
261EXPORT_SYMBOL_GPL(devres_get);
262
263/**
264 * devres_remove - Find a device resource and remove it
265 * @dev: Device to find resource from
266 * @release: Look for resources associated with this release function
267 * @match: Match function (optional)
268 * @match_data: Data for the match function
269 *
270 * Find the latest devres of @dev associated with @release and for
271 * which @match returns 1. If @match is NULL, it's considered to
272 * match all. If found, the resource is removed atomically and
273 * returned.
274 *
275 * RETURNS:
276 * Pointer to removed devres on success, NULL if not found.
277 */
278void * devres_remove(struct device *dev, dr_release_t release,
279 dr_match_t match, void *match_data)
280{
281 struct devres *dr;
282 unsigned long flags;
283
284 spin_lock_irqsave(&dev->devres_lock, flags);
285 dr = find_dr(dev, release, match, match_data);
286 if (dr) {
287 list_del_init(&dr->node.entry);
288 devres_log(dev, &dr->node, "REM");
289 }
290 spin_unlock_irqrestore(&dev->devres_lock, flags);
291
292 if (dr)
293 return dr->data;
294 return NULL;
295}
296EXPORT_SYMBOL_GPL(devres_remove);
297
298/**
299 * devres_destroy - Find a device resource and destroy it
300 * @dev: Device to find resource from
301 * @release: Look for resources associated with this release function
302 * @match: Match function (optional)
303 * @match_data: Data for the match function
304 *
305 * Find the latest devres of @dev associated with @release and for
306 * which @match returns 1. If @match is NULL, it's considered to
307 * match all. If found, the resource is removed atomically and freed.
308 *
309 * RETURNS:
310 * 0 if devres is found and freed, -ENOENT if not found.
311 */
312int devres_destroy(struct device *dev, dr_release_t release,
313 dr_match_t match, void *match_data)
314{
315 void *res;
316
317 res = devres_remove(dev, release, match, match_data);
318 if (unlikely(!res))
319 return -ENOENT;
320
321 devres_free(res);
322 return 0;
323}
324EXPORT_SYMBOL_GPL(devres_destroy);
325
326static int remove_nodes(struct device *dev,
327 struct list_head *first, struct list_head *end,
328 struct list_head *todo)
329{
330 int cnt = 0, nr_groups = 0;
331 struct list_head *cur;
332
333 /* First pass - move normal devres entries to @todo and clear
334 * devres_group colors.
335 */
336 cur = first;
337 while (cur != end) {
338 struct devres_node *node;
339 struct devres_group *grp;
340
341 node = list_entry(cur, struct devres_node, entry);
342 cur = cur->next;
343
344 grp = node_to_group(node);
345 if (grp) {
346 /* clear color of group markers in the first pass */
347 grp->color = 0;
348 nr_groups++;
349 } else {
350 /* regular devres entry */
351 if (&node->entry == first)
352 first = first->next;
353 list_move_tail(&node->entry, todo);
354 cnt++;
355 }
356 }
357
358 if (!nr_groups)
359 return cnt;
360
361 /* Second pass - Scan groups and color them. A group gets
362 * color value of two iff the group is wholly contained in
363 * [cur, end). That is, for a closed group, both opening and
364 * closing markers should be in the range, while just the
365 * opening marker is enough for an open group.
366 */
367 cur = first;
368 while (cur != end) {
369 struct devres_node *node;
370 struct devres_group *grp;
371
372 node = list_entry(cur, struct devres_node, entry);
373 cur = cur->next;
374
375 grp = node_to_group(node);
376 BUG_ON(!grp || list_empty(&grp->node[0].entry));
377
378 grp->color++;
379 if (list_empty(&grp->node[1].entry))
380 grp->color++;
381
382 BUG_ON(grp->color <= 0 || grp->color > 2);
383 if (grp->color == 2) {
384 /* No need to update cur or end. The removed
385 * nodes are always before both.
386 */
387 list_move_tail(&grp->node[0].entry, todo);
388 list_del_init(&grp->node[1].entry);
389 }
390 }
391
392 return cnt;
393}
394
395static int release_nodes(struct device *dev, struct list_head *first,
396 struct list_head *end, unsigned long flags)
397{
398 LIST_HEAD(todo);
399 int cnt;
400 struct devres *dr, *tmp;
401
402 cnt = remove_nodes(dev, first, end, &todo);
403
404 spin_unlock_irqrestore(&dev->devres_lock, flags);
405
406 /* Release. Note that both devres and devres_group are
407 * handled as devres in the following loop. This is safe.
408 */
409 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
410 devres_log(dev, &dr->node, "REL");
411 dr->node.release(dev, dr->data);
412 kfree(dr);
413 }
414
415 return cnt;
416}
417
418/**
419 * devres_release_all - Release all resources
420 * @dev: Device to release resources for
421 *
422 * Release all resources associated with @dev. This function is
423 * called on driver detach.
424 */
425int devres_release_all(struct device *dev)
426{
427 unsigned long flags;
428
429 spin_lock_irqsave(&dev->devres_lock, flags);
430 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
431 flags);
432}
433
434/**
435 * devres_open_group - Open a new devres group
436 * @dev: Device to open devres group for
437 * @id: Separator ID
438 * @gfp: Allocation flags
439 *
440 * Open a new devres group for @dev with @id. For @id, using a
441 * pointer to an object which won't be used for another group is
442 * recommended. If @id is NULL, address-wise unique ID is created.
443 *
444 * RETURNS:
445 * ID of the new group, NULL on failure.
446 */
447void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
448{
449 struct devres_group *grp;
450 unsigned long flags;
451
452 grp = kmalloc(sizeof(*grp), gfp);
453 if (unlikely(!grp))
454 return NULL;
455
456 grp->node[0].release = &group_open_release;
457 grp->node[1].release = &group_close_release;
458 INIT_LIST_HEAD(&grp->node[0].entry);
459 INIT_LIST_HEAD(&grp->node[1].entry);
460 set_node_dbginfo(&grp->node[0], "grp<", 0);
461 set_node_dbginfo(&grp->node[1], "grp>", 0);
462 grp->id = grp;
463 if (id)
464 grp->id = id;
465
466 spin_lock_irqsave(&dev->devres_lock, flags);
467 add_dr(dev, &grp->node[0]);
468 spin_unlock_irqrestore(&dev->devres_lock, flags);
469 return grp->id;
470}
471EXPORT_SYMBOL_GPL(devres_open_group);
472
473/* Find devres group with ID @id. If @id is NULL, look for the latest. */
474static struct devres_group * find_group(struct device *dev, void *id)
475{
476 struct devres_node *node;
477
478 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
479 struct devres_group *grp;
480
481 if (node->release != &group_open_release)
482 continue;
483
484 grp = container_of(node, struct devres_group, node[0]);
485
486 if (id) {
487 if (grp->id == id)
488 return grp;
489 } else if (list_empty(&grp->node[1].entry))
490 return grp;
491 }
492
493 return NULL;
494}
495
496/**
497 * devres_close_group - Close a devres group
498 * @dev: Device to close devres group for
499 * @id: ID of target group, can be NULL
500 *
501 * Close the group identified by @id. If @id is NULL, the latest open
502 * group is selected.
503 */
504void devres_close_group(struct device *dev, void *id)
505{
506 struct devres_group *grp;
507 unsigned long flags;
508
509 spin_lock_irqsave(&dev->devres_lock, flags);
510
511 grp = find_group(dev, id);
512 if (grp)
513 add_dr(dev, &grp->node[1]);
514 else
515 WARN_ON(1);
516
517 spin_unlock_irqrestore(&dev->devres_lock, flags);
518}
519EXPORT_SYMBOL_GPL(devres_close_group);
520
521/**
522 * devres_remove_group - Remove a devres group
523 * @dev: Device to remove group for
524 * @id: ID of target group, can be NULL
525 *
526 * Remove the group identified by @id. If @id is NULL, the latest
527 * open group is selected. Note that removing a group doesn't affect
528 * any other resources.
529 */
530void devres_remove_group(struct device *dev, void *id)
531{
532 struct devres_group *grp;
533 unsigned long flags;
534
535 spin_lock_irqsave(&dev->devres_lock, flags);
536
537 grp = find_group(dev, id);
538 if (grp) {
539 list_del_init(&grp->node[0].entry);
540 list_del_init(&grp->node[1].entry);
541 devres_log(dev, &grp->node[0], "REM");
542 } else
543 WARN_ON(1);
544
545 spin_unlock_irqrestore(&dev->devres_lock, flags);
546
547 kfree(grp);
548}
549EXPORT_SYMBOL_GPL(devres_remove_group);
550
551/**
552 * devres_release_group - Release resources in a devres group
553 * @dev: Device to release group for
554 * @id: ID of target group, can be NULL
555 *
556 * Release all resources in the group identified by @id. If @id is
557 * NULL, the latest open group is selected. The selected group and
558 * groups properly nested inside the selected group are removed.
559 *
560 * RETURNS:
561 * The number of released non-group resources.
562 */
563int devres_release_group(struct device *dev, void *id)
564{
565 struct devres_group *grp;
566 unsigned long flags;
567 int cnt = 0;
568
569 spin_lock_irqsave(&dev->devres_lock, flags);
570
571 grp = find_group(dev, id);
572 if (grp) {
573 struct list_head *first = &grp->node[0].entry;
574 struct list_head *end = &dev->devres_head;
575
576 if (!list_empty(&grp->node[1].entry))
577 end = grp->node[1].entry.next;
578
579 cnt = release_nodes(dev, first, end, flags);
580 } else {
581 WARN_ON(1);
582 spin_unlock_irqrestore(&dev->devres_lock, flags);
583 }
584
585 return cnt;
586}
587EXPORT_SYMBOL_GPL(devres_release_group);
588
589/*
590 * Managed kzalloc/kfree
591 */
592static void devm_kzalloc_release(struct device *dev, void *res)
593{
594 /* noop */
595}
596
597static int devm_kzalloc_match(struct device *dev, void *res, void *data)
598{
599 return res == data;
600}
601
602/**
603 * devm_kzalloc - Managed kzalloc
604 * @dev: Device to allocate memory for
605 * @size: Allocation size
606 * @gfp: Allocation gfp flags
607 *
608 * Managed kzalloc. Memory allocated with this function is
609 * automatically freed on driver detach. Like all other devres
610 * resources, guaranteed alignment is unsigned long long.
611 *
612 * RETURNS:
613 * Pointer to allocated memory on success, NULL on failure.
614 */
615void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
616{
617 struct devres *dr;
618
619 /* use raw alloc_dr for kmalloc caller tracing */
620 dr = alloc_dr(devm_kzalloc_release, size, gfp);
621 if (unlikely(!dr))
622 return NULL;
623
624 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
625 devres_add(dev, dr->data);
626 return dr->data;
627}
628EXPORT_SYMBOL_GPL(devm_kzalloc);
629
630/**
631 * devm_kfree - Managed kfree
632 * @dev: Device this memory belongs to
633 * @p: Memory to free
634 *
635 * Free memory allocated with dev_kzalloc().
636 */
637void devm_kfree(struct device *dev, void *p)
638{
639 int rc;
640
641 rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
642 WARN_ON(rc);
643}
644EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
new file mode 100644
index 000000000000..ca9186f70a69
--- /dev/null
+++ b/drivers/base/dma-mapping.c
@@ -0,0 +1,218 @@
1/*
2 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/dma-mapping.h>
11
12/*
13 * Managed DMA API
14 */
15struct dma_devres {
16 size_t size;
17 void *vaddr;
18 dma_addr_t dma_handle;
19};
20
21static void dmam_coherent_release(struct device *dev, void *res)
22{
23 struct dma_devres *this = res;
24
25 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
26}
27
28static void dmam_noncoherent_release(struct device *dev, void *res)
29{
30 struct dma_devres *this = res;
31
32 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
33}
34
35static int dmam_match(struct device *dev, void *res, void *match_data)
36{
37 struct dma_devres *this = res, *match = match_data;
38
39 if (this->vaddr == match->vaddr) {
40 WARN_ON(this->size != match->size ||
41 this->dma_handle != match->dma_handle);
42 return 1;
43 }
44 return 0;
45}
46
47/**
48 * dmam_alloc_coherent - Managed dma_alloc_coherent()
49 * @dev: Device to allocate coherent memory for
50 * @size: Size of allocation
51 * @dma_handle: Out argument for allocated DMA handle
52 * @gfp: Allocation flags
53 *
54 * Managed dma_alloc_coherent(). Memory allocated using this function
55 * will be automatically released on driver detach.
56 *
57 * RETURNS:
58 * Pointer to allocated memory on success, NULL on failure.
59 */
60void * dmam_alloc_coherent(struct device *dev, size_t size,
61 dma_addr_t *dma_handle, gfp_t gfp)
62{
63 struct dma_devres *dr;
64 void *vaddr;
65
66 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
67 if (!dr)
68 return NULL;
69
70 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
71 if (!vaddr) {
72 devres_free(dr);
73 return NULL;
74 }
75
76 dr->vaddr = vaddr;
77 dr->dma_handle = *dma_handle;
78 dr->size = size;
79
80 devres_add(dev, dr);
81
82 return vaddr;
83}
84EXPORT_SYMBOL(dmam_alloc_coherent);
85
86/**
87 * dmam_free_coherent - Managed dma_free_coherent()
88 * @dev: Device to free coherent memory for
89 * @size: Size of allocation
90 * @vaddr: Virtual address of the memory to free
91 * @dma_handle: DMA handle of the memory to free
92 *
93 * Managed dma_free_coherent().
94 */
95void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
97{
98 struct dma_devres match_data = { size, vaddr, dma_handle };
99
100 dma_free_coherent(dev, size, vaddr, dma_handle);
101 WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
102 &match_data));
103}
104EXPORT_SYMBOL(dmam_free_coherent);
105
106/**
107 * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
108 * @dev: Device to allocate non_coherent memory for
109 * @size: Size of allocation
110 * @dma_handle: Out argument for allocated DMA handle
111 * @gfp: Allocation flags
112 *
113 * Managed dma_alloc_non_coherent(). Memory allocated using this
114 * function will be automatically released on driver detach.
115 *
116 * RETURNS:
117 * Pointer to allocated memory on success, NULL on failure.
118 */
119void *dmam_alloc_noncoherent(struct device *dev, size_t size,
120 dma_addr_t *dma_handle, gfp_t gfp)
121{
122 struct dma_devres *dr;
123 void *vaddr;
124
125 dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
126 if (!dr)
127 return NULL;
128
129 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
130 if (!vaddr) {
131 devres_free(dr);
132 return NULL;
133 }
134
135 dr->vaddr = vaddr;
136 dr->dma_handle = *dma_handle;
137 dr->size = size;
138
139 devres_add(dev, dr);
140
141 return vaddr;
142}
143EXPORT_SYMBOL(dmam_alloc_noncoherent);
144
145/**
146 * dmam_free_coherent - Managed dma_free_noncoherent()
147 * @dev: Device to free noncoherent memory for
148 * @size: Size of allocation
149 * @vaddr: Virtual address of the memory to free
150 * @dma_handle: DMA handle of the memory to free
151 *
152 * Managed dma_free_noncoherent().
153 */
154void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
155 dma_addr_t dma_handle)
156{
157 struct dma_devres match_data = { size, vaddr, dma_handle };
158
159 dma_free_noncoherent(dev, size, vaddr, dma_handle);
160 WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
161 &match_data));
162}
163EXPORT_SYMBOL(dmam_free_noncoherent);
164
165#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
166
167static void dmam_coherent_decl_release(struct device *dev, void *res)
168{
169 dma_release_declared_memory(dev);
170}
171
172/**
173 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
174 * @dev: Device to declare coherent memory for
175 * @bus_addr: Bus address of coherent memory to be declared
176 * @device_addr: Device address of coherent memory to be declared
177 * @size: Size of coherent memory to be declared
178 * @flags: Flags
179 *
180 * Managed dma_declare_coherent_memory().
181 *
182 * RETURNS:
183 * 0 on success, -errno on failure.
184 */
185int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
186 dma_addr_t device_addr, size_t size, int flags)
187{
188 void *res;
189 int rc;
190
191 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
192 if (!res)
193 return -ENOMEM;
194
195 rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
196 flags);
197 if (rc == 0)
198 devres_add(dev, res);
199 else
200 devres_free(res);
201
202 return rc;
203}
204EXPORT_SYMBOL(dmam_declare_coherent_memory);
205
206/**
207 * dmam_release_declared_memory - Managed dma_release_declared_memory().
208 * @dev: Device to release declared coherent memory for
209 *
210 * Managed dmam_release_declared_memory().
211 */
212void dmam_release_declared_memory(struct device *dev)
213{
214 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
215}
216EXPORT_SYMBOL(dmam_release_declared_memory);
217
218#endif
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index f95d50277274..cd467c9f33b3 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -415,8 +415,67 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
415 spin_unlock_irqrestore (&pool->lock, flags); 415 spin_unlock_irqrestore (&pool->lock, flags);
416} 416}
417 417
418/*
419 * Managed DMA pool
420 */
421static void dmam_pool_release(struct device *dev, void *res)
422{
423 struct dma_pool *pool = *(struct dma_pool **)res;
424
425 dma_pool_destroy(pool);
426}
427
428static int dmam_pool_match(struct device *dev, void *res, void *match_data)
429{
430 return *(struct dma_pool **)res == match_data;
431}
432
433/**
434 * dmam_pool_create - Managed dma_pool_create()
435 * @name: name of pool, for diagnostics
436 * @dev: device that will be doing the DMA
437 * @size: size of the blocks in this pool.
438 * @align: alignment requirement for blocks; must be a power of two
439 * @allocation: returned blocks won't cross this boundary (or zero)
440 *
441 * Managed dma_pool_create(). DMA pool created with this function is
442 * automatically destroyed on driver detach.
443 */
444struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
445 size_t size, size_t align, size_t allocation)
446{
447 struct dma_pool **ptr, *pool;
448
449 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
450 if (!ptr)
451 return NULL;
452
453 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
454 if (pool)
455 devres_add(dev, ptr);
456 else
457 devres_free(ptr);
458
459 return pool;
460}
461
462/**
463 * dmam_pool_destroy - Managed dma_pool_destroy()
464 * @pool: dma pool that will be destroyed
465 *
466 * Managed dma_pool_destroy().
467 */
468void dmam_pool_destroy(struct dma_pool *pool)
469{
470 struct device *dev = pool->dev;
471
472 dma_pool_destroy(pool);
473 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
474}
418 475
419EXPORT_SYMBOL (dma_pool_create); 476EXPORT_SYMBOL (dma_pool_create);
420EXPORT_SYMBOL (dma_pool_destroy); 477EXPORT_SYMBOL (dma_pool_destroy);
421EXPORT_SYMBOL (dma_pool_alloc); 478EXPORT_SYMBOL (dma_pool_alloc);
422EXPORT_SYMBOL (dma_pool_free); 479EXPORT_SYMBOL (dma_pool_free);
480EXPORT_SYMBOL (dmam_pool_create);
481EXPORT_SYMBOL (dmam_pool_destroy);