aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/driver-model/devres.txt268
-rw-r--r--drivers/base/Kconfig12
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/base.h1
-rw-r--r--drivers/base/core.c2
-rw-r--r--drivers/base/dd.c3
-rw-r--r--drivers/base/devres.c644
-rw-r--r--drivers/base/dma-mapping.c218
-rw-r--r--drivers/base/dmapool.c59
-rw-r--r--drivers/pci/pci.c127
-rw-r--r--include/linux/device.h38
-rw-r--r--include/linux/dma-mapping.h29
-rw-r--r--include/linux/dmapool.h7
-rw-r--r--include/linux/interrupt.h6
-rw-r--r--include/linux/io.h17
-rw-r--r--include/linux/ioport.h20
-rw-r--r--include/linux/pci.h9
-rw-r--r--kernel/irq/manage.c86
-rw-r--r--kernel/resource.c62
-rw-r--r--lib/Makefile3
-rw-r--r--lib/iomap.c246
21 files changed, 1853 insertions, 5 deletions
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
new file mode 100644
index 000000000000..5163b85308f5
--- /dev/null
+++ b/Documentation/driver-model/devres.txt
@@ -0,0 +1,268 @@
1Devres - Managed Device Resource
2================================
3
4Tejun Heo <teheo@suse.de>
5
6First draft 10 January 2007
7
8
91. Intro : Huh? Devres?
102. Devres : Devres in a nutshell
113. Devres Group : Group devres'es and release them together
124. Details : Life time rules, calling context, ...
135. Overhead : How much do we have to pay for this?
146. List of managed interfaces : Currently implemented managed interfaces
15
16
17 1. Intro
18 --------
19
20devres came up while trying to convert libata to use iomap. Each
21iomapped address should be kept and unmapped on driver detach. For
22example, a plain SFF ATA controller (that is, good old PCI IDE) in
23native mode makes use of 5 PCI BARs and all of them should be
24maintained.
25
26As with many other device drivers, libata low level drivers have
27sufficient bugs in ->remove and ->probe failure path. Well, yes,
28that's probably because libata low level driver developers are lazy
29bunch, but aren't all low level driver developers? After spending a
30day fiddling with braindamaged hardware with no document or
31braindamaged document, if it's finally working, well, it's working.
32
33For one reason or another, low level drivers don't receive as much
34attention or testing as core code, and bugs on driver detach or
35initilaization failure doesn't happen often enough to be noticeable.
36Init failure path is worse because it's much less travelled while
37needs to handle multiple entry points.
38
39So, many low level drivers end up leaking resources on driver detach
40and having half broken failure path implementation in ->probe() which
41would leak resources or even cause oops when failure occurs. iomap
42adds more to this mix. So do msi and msix.
43
44
45 2. Devres
46 ---------
47
48devres is basically linked list of arbitrarily sized memory areas
49associated with a struct device. Each devres entry is associated with
50a release function. A devres can be released in several ways. No
51matter what, all devres entries are released on driver detach. On
52release, the associated release function is invoked and then the
53devres entry is freed.
54
55Managed interface is created for resources commonly used by device
56drivers using devres. For example, coherent DMA memory is acquired
57using dma_alloc_coherent(). The managed version is called
58dmam_alloc_coherent(). It is identical to dma_alloc_coherent() except
59for the DMA memory allocated using it is managed and will be
60automatically released on driver detach. Implementation looks like
61the following.
62
63 struct dma_devres {
64 size_t size;
65 void *vaddr;
66 dma_addr_t dma_handle;
67 };
68
69 static void dmam_coherent_release(struct device *dev, void *res)
70 {
71 struct dma_devres *this = res;
72
73 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
74 }
75
76 dmam_alloc_coherent(dev, size, dma_handle, gfp)
77 {
78 struct dma_devres *dr;
79 void *vaddr;
80
81 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
82 ...
83
84 /* alloc DMA memory as usual */
85 vaddr = dma_alloc_coherent(...);
86 ...
87
88 /* record size, vaddr, dma_handle in dr */
89 dr->vaddr = vaddr;
90 ...
91
92 devres_add(dev, dr);
93
94 return vaddr;
95 }
96
97If a driver uses dmam_alloc_coherent(), the area is guaranteed to be
98freed whether initialization fails half-way or the device gets
99detached. If most resources are acquired using managed interface, a
100driver can have much simpler init and exit code. Init path basically
101looks like the following.
102
103 my_init_one()
104 {
105 struct mydev *d;
106
107 d = devm_kzalloc(dev, sizeof(*d), GFP_KERNEL);
108 if (!d)
109 return -ENOMEM;
110
111 d->ring = dmam_alloc_coherent(...);
112 if (!d->ring)
113 return -ENOMEM;
114
115 if (check something)
116 return -EINVAL;
117 ...
118
119 return register_to_upper_layer(d);
120 }
121
122And exit path,
123
124 my_remove_one()
125 {
126 unregister_from_upper_layer(d);
127 shutdown_my_hardware();
128 }
129
130As shown above, low level drivers can be simplified a lot by using
131devres. Complexity is shifted from less maintained low level drivers
132to better maintained higher layer. Also, as init failure path is
133shared with exit path, both can get more testing.
134
135
136 3. Devres group
137 ---------------
138
139Devres entries can be grouped using devres group. When a group is
140released, all contained normal devres entries and properly nested
141groups are released. One usage is to rollback series of acquired
142resources on failure. For example,
143
144 if (!devres_open_group(dev, NULL, GFP_KERNEL))
145 return -ENOMEM;
146
147 acquire A;
148 if (failed)
149 goto err;
150
151 acquire B;
152 if (failed)
153 goto err;
154 ...
155
156 devres_remove_group(dev, NULL);
157 return 0;
158
159 err:
160 devres_release_group(dev, NULL);
161 return err_code;
162
163As resource acquision failure usually means probe failure, constructs
164like above are usually useful in midlayer driver (e.g. libata core
165layer) where interface function shouldn't have side effect on failure.
166For LLDs, just returning error code suffices in most cases.
167
168Each group is identified by void *id. It can either be explicitly
169specified by @id argument to devres_open_group() or automatically
170created by passing NULL as @id as in the above example. In both
171cases, devres_open_group() returns the group's id. The returned id
172can be passed to other devres functions to select the target group.
173If NULL is given to those functions, the latest open group is
174selected.
175
176For example, you can do something like the following.
177
178 int my_midlayer_create_something()
179 {
180 if (!devres_open_group(dev, my_midlayer_create_something, GFP_KERNEL))
181 return -ENOMEM;
182
183 ...
184
185 devres_close_group(dev, my_midlayer_something);
186 return 0;
187 }
188
189 void my_midlayer_destroy_something()
190 {
191 devres_release_group(dev, my_midlayer_create_soemthing);
192 }
193
194
195 4. Details
196 ----------
197
198Lifetime of a devres entry begins on devres allocation and finishes
199when it is released or destroyed (removed and freed) - no reference
200counting.
201
202devres core guarantees atomicity to all basic devres operations and
203has support for single-instance devres types (atomic
204lookup-and-add-if-not-found). Other than that, synchronizing
205concurrent accesses to allocated devres data is caller's
206responsibility. This is usually non-issue because bus ops and
207resource allocations already do the job.
208
209For an example of single-instance devres type, read pcim_iomap_table()
210in lib/iomap.c.
211
212All devres interface functions can be called without context if the
213right gfp mask is given.
214
215
216 5. Overhead
217 -----------
218
219Each devres bookkeeping info is allocated together with requested data
220area. With debug option turned off, bookkeeping info occupies 16
221bytes on 32bit machines and 24 bytes on 64bit (three pointers rounded
222up to ull alignment). If singly linked list is used, it can be
223reduced to two pointers (8 bytes on 32bit, 16 bytes on 64bit).
224
225Each devres group occupies 8 pointers. It can be reduced to 6 if
226singly linked list is used.
227
228Memory space overhead on ahci controller with two ports is between 300
229and 400 bytes on 32bit machine after naive conversion (we can
230certainly invest a bit more effort into libata core layer).
231
232
233 6. List of managed interfaces
234 -----------------------------
235
236IO region
237 devm_request_region()
238 devm_request_mem_region()
239 devm_release_region()
240 devm_release_mem_region()
241
242IRQ
243 devm_request_irq()
244 devm_free_irq()
245
246DMA
247 dmam_alloc_coherent()
248 dmam_free_coherent()
249 dmam_alloc_noncoherent()
250 dmam_free_noncoherent()
251 dmam_declare_coherent_memory()
252 dmam_pool_create()
253 dmam_pool_destroy()
254
255PCI
256 pcim_enable_device() : after success, all PCI ops become managed
257 pcim_pin_device() : keep PCI device enabled after release
258
259IOMAP
260 devm_ioport_map()
261 devm_ioport_unmap()
262 devm_ioremap()
263 devm_ioremap_nocache()
264 devm_iounmap()
265 pcim_iomap()
266 pcim_iounmap()
267 pcim_iomap_table() : array of mapped addresses indexed by BAR
268 pcim_iomap_regions() : do request_region() and iomap() on multiple BARs
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 1429f3a2629e..5d6312e33490 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -37,6 +37,18 @@ config DEBUG_DRIVER
37 37
38 If you are unsure about this, say N here. 38 If you are unsure about this, say N here.
39 39
40config DEBUG_DEVRES
41 bool "Managed device resources verbose debug messages"
42 depends on DEBUG_KERNEL
43 help
44 This option enables kernel parameter devres.log. If set to
45 non-zero, devres debug messages are printed. Select this if
46 you are having a problem with devres or want to debug
47 resource management for a managed device. devres.log can be
48 switched on and off from sysfs node.
49
50 If you are unsure about this, Say N here.
51
40config SYS_HYPERVISOR 52config SYS_HYPERVISOR
41 bool 53 bool
42 default n 54 default n
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index 7bbb9eeda235..e9eb7382ac3a 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -3,6 +3,7 @@
3obj-y := core.o sys.o bus.o dd.o \ 3obj-y := core.o sys.o bus.o dd.o \
4 driver.o class.o platform.o \ 4 driver.o class.o platform.o \
5 cpu.o firmware.o init.o map.o dmapool.o \ 5 cpu.o firmware.o init.o map.o dmapool.o \
6 dma-mapping.o devres.o \
6 attribute_container.o transport_class.o 7 attribute_container.o transport_class.o
7obj-y += power/ 8obj-y += power/
8obj-$(CONFIG_ISA) += isa.o 9obj-$(CONFIG_ISA) += isa.o
diff --git a/drivers/base/base.h b/drivers/base/base.h
index d26644a59537..de7e1442ce60 100644
--- a/drivers/base/base.h
+++ b/drivers/base/base.h
@@ -44,3 +44,4 @@ struct class_device_attribute *to_class_dev_attr(struct attribute *_attr)
44 44
45extern char *make_class_name(const char *name, struct kobject *kobj); 45extern char *make_class_name(const char *name, struct kobject *kobj);
46 46
47extern void devres_release_all(struct device *dev);
diff --git a/drivers/base/core.c b/drivers/base/core.c
index e13614241c9e..a8ac34ba6107 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -428,6 +428,8 @@ void device_initialize(struct device *dev)
428 INIT_LIST_HEAD(&dev->dma_pools); 428 INIT_LIST_HEAD(&dev->dma_pools);
429 INIT_LIST_HEAD(&dev->node); 429 INIT_LIST_HEAD(&dev->node);
430 init_MUTEX(&dev->sem); 430 init_MUTEX(&dev->sem);
431 spin_lock_init(&dev->devres_lock);
432 INIT_LIST_HEAD(&dev->devres_head);
431 device_init_wakeup(dev, 0); 433 device_init_wakeup(dev, 0);
432 set_dev_node(dev, -1); 434 set_dev_node(dev, -1);
433} 435}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index b5bf243d9cd6..6a48824e43ff 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -112,6 +112,7 @@ static int really_probe(void *void_data)
112 atomic_inc(&probe_count); 112 atomic_inc(&probe_count);
113 pr_debug("%s: Probing driver %s with device %s\n", 113 pr_debug("%s: Probing driver %s with device %s\n",
114 drv->bus->name, drv->name, dev->bus_id); 114 drv->bus->name, drv->name, dev->bus_id);
115 WARN_ON(!list_empty(&dev->devres_head));
115 116
116 dev->driver = drv; 117 dev->driver = drv;
117 if (driver_sysfs_add(dev)) { 118 if (driver_sysfs_add(dev)) {
@@ -137,6 +138,7 @@ static int really_probe(void *void_data)
137 goto done; 138 goto done;
138 139
139probe_failed: 140probe_failed:
141 devres_release_all(dev);
140 driver_sysfs_remove(dev); 142 driver_sysfs_remove(dev);
141 dev->driver = NULL; 143 dev->driver = NULL;
142 144
@@ -327,6 +329,7 @@ static void __device_release_driver(struct device * dev)
327 dev->bus->remove(dev); 329 dev->bus->remove(dev);
328 else if (drv->remove) 330 else if (drv->remove)
329 drv->remove(dev); 331 drv->remove(dev);
332 devres_release_all(dev);
330 dev->driver = NULL; 333 dev->driver = NULL;
331 put_driver(drv); 334 put_driver(drv);
332 } 335 }
diff --git a/drivers/base/devres.c b/drivers/base/devres.c
new file mode 100644
index 000000000000..e177c9533b6c
--- /dev/null
+++ b/drivers/base/devres.c
@@ -0,0 +1,644 @@
1/*
2 * drivers/base/devres.c - device resource management
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/device.h>
11#include <linux/module.h>
12
13struct devres_node {
14 struct list_head entry;
15 dr_release_t release;
16#ifdef CONFIG_DEBUG_DEVRES
17 const char *name;
18 size_t size;
19#endif
20};
21
22struct devres {
23 struct devres_node node;
24 /* -- 3 pointers */
25 unsigned long long data[]; /* guarantee ull alignment */
26};
27
28struct devres_group {
29 struct devres_node node[2];
30 void *id;
31 int color;
32 /* -- 8 pointers */
33};
34
35#ifdef CONFIG_DEBUG_DEVRES
36static int log_devres = 0;
37module_param_named(log, log_devres, int, S_IRUGO | S_IWUSR);
38
39static void set_node_dbginfo(struct devres_node *node, const char *name,
40 size_t size)
41{
42 node->name = name;
43 node->size = size;
44}
45
46static void devres_log(struct device *dev, struct devres_node *node,
47 const char *op)
48{
49 if (unlikely(log_devres))
50 dev_printk(KERN_ERR, dev, "DEVRES %3s %p %s (%lu bytes)\n",
51 op, node, node->name, (unsigned long)node->size);
52}
53#else /* CONFIG_DEBUG_DEVRES */
54#define set_node_dbginfo(node, n, s) do {} while (0)
55#define devres_log(dev, node, op) do {} while (0)
56#endif /* CONFIG_DEBUG_DEVRES */
57
58/*
59 * Release functions for devres group. These callbacks are used only
60 * for identification.
61 */
62static void group_open_release(struct device *dev, void *res)
63{
64 /* noop */
65}
66
67static void group_close_release(struct device *dev, void *res)
68{
69 /* noop */
70}
71
72static struct devres_group * node_to_group(struct devres_node *node)
73{
74 if (node->release == &group_open_release)
75 return container_of(node, struct devres_group, node[0]);
76 if (node->release == &group_close_release)
77 return container_of(node, struct devres_group, node[1]);
78 return NULL;
79}
80
81static __always_inline struct devres * alloc_dr(dr_release_t release,
82 size_t size, gfp_t gfp)
83{
84 size_t tot_size = sizeof(struct devres) + size;
85 struct devres *dr;
86
87 dr = kmalloc_track_caller(tot_size, gfp);
88 if (unlikely(!dr))
89 return NULL;
90
91 memset(dr, 0, tot_size);
92 INIT_LIST_HEAD(&dr->node.entry);
93 dr->node.release = release;
94 return dr;
95}
96
97static void add_dr(struct device *dev, struct devres_node *node)
98{
99 devres_log(dev, node, "ADD");
100 BUG_ON(!list_empty(&node->entry));
101 list_add_tail(&node->entry, &dev->devres_head);
102}
103
104/**
105 * devres_alloc - Allocate device resource data
106 * @release: Release function devres will be associated with
107 * @size: Allocation size
108 * @gfp: Allocation flags
109 *
110 * allocate devres of @size bytes. The allocated area is zeroed, then
111 * associated with @release. The returned pointer can be passed to
112 * other devres_*() functions.
113 *
114 * RETURNS:
115 * Pointer to allocated devres on success, NULL on failure.
116 */
117#ifdef CONFIG_DEBUG_DEVRES
118void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
119 const char *name)
120{
121 struct devres *dr;
122
123 dr = alloc_dr(release, size, gfp);
124 if (unlikely(!dr))
125 return NULL;
126 set_node_dbginfo(&dr->node, name, size);
127 return dr->data;
128}
129EXPORT_SYMBOL_GPL(__devres_alloc);
130#else
131void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp)
132{
133 struct devres *dr;
134
135 dr = alloc_dr(release, size, gfp);
136 if (unlikely(!dr))
137 return NULL;
138 return dr->data;
139}
140EXPORT_SYMBOL_GPL(devres_alloc);
141#endif
142
143/**
144 * devres_free - Free device resource data
145 * @res: Pointer to devres data to free
146 *
147 * Free devres created with devres_alloc().
148 */
149void devres_free(void *res)
150{
151 if (res) {
152 struct devres *dr = container_of(res, struct devres, data);
153
154 BUG_ON(!list_empty(&dr->node.entry));
155 kfree(dr);
156 }
157}
158EXPORT_SYMBOL_GPL(devres_free);
159
160/**
161 * devres_add - Register device resource
162 * @dev: Device to add resource to
163 * @res: Resource to register
164 *
165 * Register devres @res to @dev. @res should have been allocated
166 * using devres_alloc(). On driver detach, the associated release
167 * function will be invoked and devres will be freed automatically.
168 */
169void devres_add(struct device *dev, void *res)
170{
171 struct devres *dr = container_of(res, struct devres, data);
172 unsigned long flags;
173
174 spin_lock_irqsave(&dev->devres_lock, flags);
175 add_dr(dev, &dr->node);
176 spin_unlock_irqrestore(&dev->devres_lock, flags);
177}
178EXPORT_SYMBOL_GPL(devres_add);
179
180static struct devres *find_dr(struct device *dev, dr_release_t release,
181 dr_match_t match, void *match_data)
182{
183 struct devres_node *node;
184
185 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
186 struct devres *dr = container_of(node, struct devres, node);
187
188 if (node->release != release)
189 continue;
190 if (match && !match(dev, dr->data, match_data))
191 continue;
192 return dr;
193 }
194
195 return NULL;
196}
197
198/**
199 * devres_find - Find device resource
200 * @dev: Device to lookup resource from
201 * @release: Look for resources associated with this release function
202 * @match: Match function (optional)
203 * @match_data: Data for the match function
204 *
205 * Find the latest devres of @dev which is associated with @release
206 * and for which @match returns 1. If @match is NULL, it's considered
207 * to match all.
208 *
209 * RETURNS:
210 * Pointer to found devres, NULL if not found.
211 */
212void * devres_find(struct device *dev, dr_release_t release,
213 dr_match_t match, void *match_data)
214{
215 struct devres *dr;
216 unsigned long flags;
217
218 spin_lock_irqsave(&dev->devres_lock, flags);
219 dr = find_dr(dev, release, match, match_data);
220 spin_unlock_irqrestore(&dev->devres_lock, flags);
221
222 if (dr)
223 return dr->data;
224 return NULL;
225}
226EXPORT_SYMBOL_GPL(devres_find);
227
228/**
229 * devres_get - Find devres, if non-existent, add one atomically
230 * @dev: Device to lookup or add devres for
231 * @new_res: Pointer to new initialized devres to add if not found
232 * @match: Match function (optional)
233 * @match_data: Data for the match function
234 *
235 * Find the latest devres of @dev which has the same release function
236 * as @new_res and for which @match return 1. If found, @new_res is
237 * freed; otherwise, @new_res is added atomically.
238 *
239 * RETURNS:
240 * Pointer to found or added devres.
241 */
242void * devres_get(struct device *dev, void *new_res,
243 dr_match_t match, void *match_data)
244{
245 struct devres *new_dr = container_of(new_res, struct devres, data);
246 struct devres *dr;
247 unsigned long flags;
248
249 spin_lock_irqsave(&dev->devres_lock, flags);
250 dr = find_dr(dev, new_dr->node.release, match, match_data);
251 if (!dr) {
252 add_dr(dev, &new_dr->node);
253 dr = new_dr;
254 new_dr = NULL;
255 }
256 spin_unlock_irqrestore(&dev->devres_lock, flags);
257 devres_free(new_dr);
258
259 return dr->data;
260}
261EXPORT_SYMBOL_GPL(devres_get);
262
263/**
264 * devres_remove - Find a device resource and remove it
265 * @dev: Device to find resource from
266 * @release: Look for resources associated with this release function
267 * @match: Match function (optional)
268 * @match_data: Data for the match function
269 *
270 * Find the latest devres of @dev associated with @release and for
271 * which @match returns 1. If @match is NULL, it's considered to
272 * match all. If found, the resource is removed atomically and
273 * returned.
274 *
275 * RETURNS:
276 * Pointer to removed devres on success, NULL if not found.
277 */
278void * devres_remove(struct device *dev, dr_release_t release,
279 dr_match_t match, void *match_data)
280{
281 struct devres *dr;
282 unsigned long flags;
283
284 spin_lock_irqsave(&dev->devres_lock, flags);
285 dr = find_dr(dev, release, match, match_data);
286 if (dr) {
287 list_del_init(&dr->node.entry);
288 devres_log(dev, &dr->node, "REM");
289 }
290 spin_unlock_irqrestore(&dev->devres_lock, flags);
291
292 if (dr)
293 return dr->data;
294 return NULL;
295}
296EXPORT_SYMBOL_GPL(devres_remove);
297
298/**
299 * devres_destroy - Find a device resource and destroy it
300 * @dev: Device to find resource from
301 * @release: Look for resources associated with this release function
302 * @match: Match function (optional)
303 * @match_data: Data for the match function
304 *
305 * Find the latest devres of @dev associated with @release and for
306 * which @match returns 1. If @match is NULL, it's considered to
307 * match all. If found, the resource is removed atomically and freed.
308 *
309 * RETURNS:
310 * 0 if devres is found and freed, -ENOENT if not found.
311 */
312int devres_destroy(struct device *dev, dr_release_t release,
313 dr_match_t match, void *match_data)
314{
315 void *res;
316
317 res = devres_remove(dev, release, match, match_data);
318 if (unlikely(!res))
319 return -ENOENT;
320
321 devres_free(res);
322 return 0;
323}
324EXPORT_SYMBOL_GPL(devres_destroy);
325
326static int remove_nodes(struct device *dev,
327 struct list_head *first, struct list_head *end,
328 struct list_head *todo)
329{
330 int cnt = 0, nr_groups = 0;
331 struct list_head *cur;
332
333 /* First pass - move normal devres entries to @todo and clear
334 * devres_group colors.
335 */
336 cur = first;
337 while (cur != end) {
338 struct devres_node *node;
339 struct devres_group *grp;
340
341 node = list_entry(cur, struct devres_node, entry);
342 cur = cur->next;
343
344 grp = node_to_group(node);
345 if (grp) {
346 /* clear color of group markers in the first pass */
347 grp->color = 0;
348 nr_groups++;
349 } else {
350 /* regular devres entry */
351 if (&node->entry == first)
352 first = first->next;
353 list_move_tail(&node->entry, todo);
354 cnt++;
355 }
356 }
357
358 if (!nr_groups)
359 return cnt;
360
361 /* Second pass - Scan groups and color them. A group gets
362 * color value of two iff the group is wholly contained in
363 * [cur, end). That is, for a closed group, both opening and
364 * closing markers should be in the range, while just the
365 * opening marker is enough for an open group.
366 */
367 cur = first;
368 while (cur != end) {
369 struct devres_node *node;
370 struct devres_group *grp;
371
372 node = list_entry(cur, struct devres_node, entry);
373 cur = cur->next;
374
375 grp = node_to_group(node);
376 BUG_ON(!grp || list_empty(&grp->node[0].entry));
377
378 grp->color++;
379 if (list_empty(&grp->node[1].entry))
380 grp->color++;
381
382 BUG_ON(grp->color <= 0 || grp->color > 2);
383 if (grp->color == 2) {
384 /* No need to update cur or end. The removed
385 * nodes are always before both.
386 */
387 list_move_tail(&grp->node[0].entry, todo);
388 list_del_init(&grp->node[1].entry);
389 }
390 }
391
392 return cnt;
393}
394
395static int release_nodes(struct device *dev, struct list_head *first,
396 struct list_head *end, unsigned long flags)
397{
398 LIST_HEAD(todo);
399 int cnt;
400 struct devres *dr, *tmp;
401
402 cnt = remove_nodes(dev, first, end, &todo);
403
404 spin_unlock_irqrestore(&dev->devres_lock, flags);
405
406 /* Release. Note that both devres and devres_group are
407 * handled as devres in the following loop. This is safe.
408 */
409 list_for_each_entry_safe_reverse(dr, tmp, &todo, node.entry) {
410 devres_log(dev, &dr->node, "REL");
411 dr->node.release(dev, dr->data);
412 kfree(dr);
413 }
414
415 return cnt;
416}
417
418/**
419 * devres_release_all - Release all resources
420 * @dev: Device to release resources for
421 *
422 * Release all resources associated with @dev. This function is
423 * called on driver detach.
424 */
425int devres_release_all(struct device *dev)
426{
427 unsigned long flags;
428
429 spin_lock_irqsave(&dev->devres_lock, flags);
430 return release_nodes(dev, dev->devres_head.next, &dev->devres_head,
431 flags);
432}
433
434/**
435 * devres_open_group - Open a new devres group
436 * @dev: Device to open devres group for
437 * @id: Separator ID
438 * @gfp: Allocation flags
439 *
440 * Open a new devres group for @dev with @id. For @id, using a
441 * pointer to an object which won't be used for another group is
442 * recommended. If @id is NULL, address-wise unique ID is created.
443 *
444 * RETURNS:
445 * ID of the new group, NULL on failure.
446 */
447void * devres_open_group(struct device *dev, void *id, gfp_t gfp)
448{
449 struct devres_group *grp;
450 unsigned long flags;
451
452 grp = kmalloc(sizeof(*grp), gfp);
453 if (unlikely(!grp))
454 return NULL;
455
456 grp->node[0].release = &group_open_release;
457 grp->node[1].release = &group_close_release;
458 INIT_LIST_HEAD(&grp->node[0].entry);
459 INIT_LIST_HEAD(&grp->node[1].entry);
460 set_node_dbginfo(&grp->node[0], "grp<", 0);
461 set_node_dbginfo(&grp->node[1], "grp>", 0);
462 grp->id = grp;
463 if (id)
464 grp->id = id;
465
466 spin_lock_irqsave(&dev->devres_lock, flags);
467 add_dr(dev, &grp->node[0]);
468 spin_unlock_irqrestore(&dev->devres_lock, flags);
469 return grp->id;
470}
471EXPORT_SYMBOL_GPL(devres_open_group);
472
473/* Find devres group with ID @id. If @id is NULL, look for the latest. */
474static struct devres_group * find_group(struct device *dev, void *id)
475{
476 struct devres_node *node;
477
478 list_for_each_entry_reverse(node, &dev->devres_head, entry) {
479 struct devres_group *grp;
480
481 if (node->release != &group_open_release)
482 continue;
483
484 grp = container_of(node, struct devres_group, node[0]);
485
486 if (id) {
487 if (grp->id == id)
488 return grp;
489 } else if (list_empty(&grp->node[1].entry))
490 return grp;
491 }
492
493 return NULL;
494}
495
496/**
497 * devres_close_group - Close a devres group
498 * @dev: Device to close devres group for
499 * @id: ID of target group, can be NULL
500 *
501 * Close the group identified by @id. If @id is NULL, the latest open
502 * group is selected.
503 */
504void devres_close_group(struct device *dev, void *id)
505{
506 struct devres_group *grp;
507 unsigned long flags;
508
509 spin_lock_irqsave(&dev->devres_lock, flags);
510
511 grp = find_group(dev, id);
512 if (grp)
513 add_dr(dev, &grp->node[1]);
514 else
515 WARN_ON(1);
516
517 spin_unlock_irqrestore(&dev->devres_lock, flags);
518}
519EXPORT_SYMBOL_GPL(devres_close_group);
520
521/**
522 * devres_remove_group - Remove a devres group
523 * @dev: Device to remove group for
524 * @id: ID of target group, can be NULL
525 *
526 * Remove the group identified by @id. If @id is NULL, the latest
527 * open group is selected. Note that removing a group doesn't affect
528 * any other resources.
529 */
530void devres_remove_group(struct device *dev, void *id)
531{
532 struct devres_group *grp;
533 unsigned long flags;
534
535 spin_lock_irqsave(&dev->devres_lock, flags);
536
537 grp = find_group(dev, id);
538 if (grp) {
539 list_del_init(&grp->node[0].entry);
540 list_del_init(&grp->node[1].entry);
541 devres_log(dev, &grp->node[0], "REM");
542 } else
543 WARN_ON(1);
544
545 spin_unlock_irqrestore(&dev->devres_lock, flags);
546
547 kfree(grp);
548}
549EXPORT_SYMBOL_GPL(devres_remove_group);
550
551/**
552 * devres_release_group - Release resources in a devres group
553 * @dev: Device to release group for
554 * @id: ID of target group, can be NULL
555 *
556 * Release all resources in the group identified by @id. If @id is
557 * NULL, the latest open group is selected. The selected group and
558 * groups properly nested inside the selected group are removed.
559 *
560 * RETURNS:
561 * The number of released non-group resources.
562 */
563int devres_release_group(struct device *dev, void *id)
564{
565 struct devres_group *grp;
566 unsigned long flags;
567 int cnt = 0;
568
569 spin_lock_irqsave(&dev->devres_lock, flags);
570
571 grp = find_group(dev, id);
572 if (grp) {
573 struct list_head *first = &grp->node[0].entry;
574 struct list_head *end = &dev->devres_head;
575
576 if (!list_empty(&grp->node[1].entry))
577 end = grp->node[1].entry.next;
578
579 cnt = release_nodes(dev, first, end, flags);
580 } else {
581 WARN_ON(1);
582 spin_unlock_irqrestore(&dev->devres_lock, flags);
583 }
584
585 return cnt;
586}
587EXPORT_SYMBOL_GPL(devres_release_group);
588
589/*
590 * Managed kzalloc/kfree
591 */
592static void devm_kzalloc_release(struct device *dev, void *res)
593{
594 /* noop */
595}
596
597static int devm_kzalloc_match(struct device *dev, void *res, void *data)
598{
599 return res == data;
600}
601
602/**
603 * devm_kzalloc - Managed kzalloc
604 * @dev: Device to allocate memory for
605 * @size: Allocation size
606 * @gfp: Allocation gfp flags
607 *
608 * Managed kzalloc. Memory allocated with this function is
609 * automatically freed on driver detach. Like all other devres
610 * resources, guaranteed alignment is unsigned long long.
611 *
612 * RETURNS:
613 * Pointer to allocated memory on success, NULL on failure.
614 */
615void * devm_kzalloc(struct device *dev, size_t size, gfp_t gfp)
616{
617 struct devres *dr;
618
619 /* use raw alloc_dr for kmalloc caller tracing */
620 dr = alloc_dr(devm_kzalloc_release, size, gfp);
621 if (unlikely(!dr))
622 return NULL;
623
624 set_node_dbginfo(&dr->node, "devm_kzalloc_release", size);
625 devres_add(dev, dr->data);
626 return dr->data;
627}
628EXPORT_SYMBOL_GPL(devm_kzalloc);
629
630/**
631 * devm_kfree - Managed kfree
632 * @dev: Device this memory belongs to
633 * @p: Memory to free
634 *
635 * Free memory allocated with dev_kzalloc().
636 */
637void devm_kfree(struct device *dev, void *p)
638{
639 int rc;
640
641 rc = devres_destroy(dev, devm_kzalloc_release, devm_kzalloc_match, p);
642 WARN_ON(rc);
643}
644EXPORT_SYMBOL_GPL(devm_kfree);
diff --git a/drivers/base/dma-mapping.c b/drivers/base/dma-mapping.c
new file mode 100644
index 000000000000..ca9186f70a69
--- /dev/null
+++ b/drivers/base/dma-mapping.c
@@ -0,0 +1,218 @@
1/*
2 * drivers/base/dma-mapping.c - arch-independent dma-mapping routines
3 *
4 * Copyright (c) 2006 SUSE Linux Products GmbH
5 * Copyright (c) 2006 Tejun Heo <teheo@suse.de>
6 *
7 * This file is released under the GPLv2.
8 */
9
10#include <linux/dma-mapping.h>
11
12/*
13 * Managed DMA API
14 */
15struct dma_devres {
16 size_t size;
17 void *vaddr;
18 dma_addr_t dma_handle;
19};
20
21static void dmam_coherent_release(struct device *dev, void *res)
22{
23 struct dma_devres *this = res;
24
25 dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle);
26}
27
28static void dmam_noncoherent_release(struct device *dev, void *res)
29{
30 struct dma_devres *this = res;
31
32 dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle);
33}
34
35static int dmam_match(struct device *dev, void *res, void *match_data)
36{
37 struct dma_devres *this = res, *match = match_data;
38
39 if (this->vaddr == match->vaddr) {
40 WARN_ON(this->size != match->size ||
41 this->dma_handle != match->dma_handle);
42 return 1;
43 }
44 return 0;
45}
46
47/**
48 * dmam_alloc_coherent - Managed dma_alloc_coherent()
49 * @dev: Device to allocate coherent memory for
50 * @size: Size of allocation
51 * @dma_handle: Out argument for allocated DMA handle
52 * @gfp: Allocation flags
53 *
54 * Managed dma_alloc_coherent(). Memory allocated using this function
55 * will be automatically released on driver detach.
56 *
57 * RETURNS:
58 * Pointer to allocated memory on success, NULL on failure.
59 */
60void * dmam_alloc_coherent(struct device *dev, size_t size,
61 dma_addr_t *dma_handle, gfp_t gfp)
62{
63 struct dma_devres *dr;
64 void *vaddr;
65
66 dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp);
67 if (!dr)
68 return NULL;
69
70 vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp);
71 if (!vaddr) {
72 devres_free(dr);
73 return NULL;
74 }
75
76 dr->vaddr = vaddr;
77 dr->dma_handle = *dma_handle;
78 dr->size = size;
79
80 devres_add(dev, dr);
81
82 return vaddr;
83}
84EXPORT_SYMBOL(dmam_alloc_coherent);
85
86/**
87 * dmam_free_coherent - Managed dma_free_coherent()
88 * @dev: Device to free coherent memory for
89 * @size: Size of allocation
90 * @vaddr: Virtual address of the memory to free
91 * @dma_handle: DMA handle of the memory to free
92 *
93 * Managed dma_free_coherent().
94 */
95void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
96 dma_addr_t dma_handle)
97{
98 struct dma_devres match_data = { size, vaddr, dma_handle };
99
100 dma_free_coherent(dev, size, vaddr, dma_handle);
101 WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match,
102 &match_data));
103}
104EXPORT_SYMBOL(dmam_free_coherent);
105
106/**
107 * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent()
108 * @dev: Device to allocate non_coherent memory for
109 * @size: Size of allocation
110 * @dma_handle: Out argument for allocated DMA handle
111 * @gfp: Allocation flags
112 *
113 * Managed dma_alloc_non_coherent(). Memory allocated using this
114 * function will be automatically released on driver detach.
115 *
116 * RETURNS:
117 * Pointer to allocated memory on success, NULL on failure.
118 */
119void *dmam_alloc_noncoherent(struct device *dev, size_t size,
120 dma_addr_t *dma_handle, gfp_t gfp)
121{
122 struct dma_devres *dr;
123 void *vaddr;
124
125 dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp);
126 if (!dr)
127 return NULL;
128
129 vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
130 if (!vaddr) {
131 devres_free(dr);
132 return NULL;
133 }
134
135 dr->vaddr = vaddr;
136 dr->dma_handle = *dma_handle;
137 dr->size = size;
138
139 devres_add(dev, dr);
140
141 return vaddr;
142}
143EXPORT_SYMBOL(dmam_alloc_noncoherent);
144
145/**
146 * dmam_free_coherent - Managed dma_free_noncoherent()
147 * @dev: Device to free noncoherent memory for
148 * @size: Size of allocation
149 * @vaddr: Virtual address of the memory to free
150 * @dma_handle: DMA handle of the memory to free
151 *
152 * Managed dma_free_noncoherent().
153 */
154void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
155 dma_addr_t dma_handle)
156{
157 struct dma_devres match_data = { size, vaddr, dma_handle };
158
159 dma_free_noncoherent(dev, size, vaddr, dma_handle);
160 WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match,
161 &match_data));
162}
163EXPORT_SYMBOL(dmam_free_noncoherent);
164
165#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
166
167static void dmam_coherent_decl_release(struct device *dev, void *res)
168{
169 dma_release_declared_memory(dev);
170}
171
172/**
173 * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory()
174 * @dev: Device to declare coherent memory for
175 * @bus_addr: Bus address of coherent memory to be declared
176 * @device_addr: Device address of coherent memory to be declared
177 * @size: Size of coherent memory to be declared
178 * @flags: Flags
179 *
180 * Managed dma_declare_coherent_memory().
181 *
182 * RETURNS:
183 * 0 on success, -errno on failure.
184 */
185int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
186 dma_addr_t device_addr, size_t size, int flags)
187{
188 void *res;
189 int rc;
190
191 res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL);
192 if (!res)
193 return -ENOMEM;
194
195 rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size,
196 flags);
197 if (rc == 0)
198 devres_add(dev, res);
199 else
200 devres_free(res);
201
202 return rc;
203}
204EXPORT_SYMBOL(dmam_declare_coherent_memory);
205
206/**
207 * dmam_release_declared_memory - Managed dma_release_declared_memory().
208 * @dev: Device to release declared coherent memory for
209 *
210 * Managed dmam_release_declared_memory().
211 */
212void dmam_release_declared_memory(struct device *dev)
213{
214 WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL));
215}
216EXPORT_SYMBOL(dmam_release_declared_memory);
217
218#endif
diff --git a/drivers/base/dmapool.c b/drivers/base/dmapool.c
index f95d50277274..cd467c9f33b3 100644
--- a/drivers/base/dmapool.c
+++ b/drivers/base/dmapool.c
@@ -415,8 +415,67 @@ dma_pool_free (struct dma_pool *pool, void *vaddr, dma_addr_t dma)
415 spin_unlock_irqrestore (&pool->lock, flags); 415 spin_unlock_irqrestore (&pool->lock, flags);
416} 416}
417 417
418/*
419 * Managed DMA pool
420 */
421static void dmam_pool_release(struct device *dev, void *res)
422{
423 struct dma_pool *pool = *(struct dma_pool **)res;
424
425 dma_pool_destroy(pool);
426}
427
428static int dmam_pool_match(struct device *dev, void *res, void *match_data)
429{
430 return *(struct dma_pool **)res == match_data;
431}
432
433/**
434 * dmam_pool_create - Managed dma_pool_create()
435 * @name: name of pool, for diagnostics
436 * @dev: device that will be doing the DMA
437 * @size: size of the blocks in this pool.
438 * @align: alignment requirement for blocks; must be a power of two
439 * @allocation: returned blocks won't cross this boundary (or zero)
440 *
441 * Managed dma_pool_create(). DMA pool created with this function is
442 * automatically destroyed on driver detach.
443 */
444struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
445 size_t size, size_t align, size_t allocation)
446{
447 struct dma_pool **ptr, *pool;
448
449 ptr = devres_alloc(dmam_pool_release, sizeof(*ptr), GFP_KERNEL);
450 if (!ptr)
451 return NULL;
452
453 pool = *ptr = dma_pool_create(name, dev, size, align, allocation);
454 if (pool)
455 devres_add(dev, ptr);
456 else
457 devres_free(ptr);
458
459 return pool;
460}
461
462/**
463 * dmam_pool_destroy - Managed dma_pool_destroy()
464 * @pool: dma pool that will be destroyed
465 *
466 * Managed dma_pool_destroy().
467 */
468void dmam_pool_destroy(struct dma_pool *pool)
469{
470 struct device *dev = pool->dev;
471
472 dma_pool_destroy(pool);
473 WARN_ON(devres_destroy(dev, dmam_pool_release, dmam_pool_match, pool));
474}
418 475
419EXPORT_SYMBOL (dma_pool_create); 476EXPORT_SYMBOL (dma_pool_create);
420EXPORT_SYMBOL (dma_pool_destroy); 477EXPORT_SYMBOL (dma_pool_destroy);
421EXPORT_SYMBOL (dma_pool_alloc); 478EXPORT_SYMBOL (dma_pool_alloc);
422EXPORT_SYMBOL (dma_pool_free); 479EXPORT_SYMBOL (dma_pool_free);
480EXPORT_SYMBOL (dmam_pool_create);
481EXPORT_SYMBOL (dmam_pool_destroy);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 84c757ba0664..8b44cff2c176 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -744,6 +744,104 @@ int pci_enable_device(struct pci_dev *dev)
744 return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1); 744 return pci_enable_device_bars(dev, (1 << PCI_NUM_RESOURCES) - 1);
745} 745}
746 746
747/*
748 * Managed PCI resources. This manages device on/off, intx/msi/msix
749 * on/off and BAR regions. pci_dev itself records msi/msix status, so
750 * there's no need to track it separately. pci_devres is initialized
751 * when a device is enabled using managed PCI device enable interface.
752 */
753struct pci_devres {
754 unsigned int disable:1;
755 unsigned int orig_intx:1;
756 unsigned int restore_intx:1;
757 u32 region_mask;
758};
759
760static void pcim_release(struct device *gendev, void *res)
761{
762 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
763 struct pci_devres *this = res;
764 int i;
765
766 if (dev->msi_enabled)
767 pci_disable_msi(dev);
768 if (dev->msix_enabled)
769 pci_disable_msix(dev);
770
771 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++)
772 if (this->region_mask & (1 << i))
773 pci_release_region(dev, i);
774
775 if (this->restore_intx)
776 pci_intx(dev, this->orig_intx);
777
778 if (this->disable)
779 pci_disable_device(dev);
780}
781
782static struct pci_devres * get_pci_dr(struct pci_dev *pdev)
783{
784 struct pci_devres *dr, *new_dr;
785
786 dr = devres_find(&pdev->dev, pcim_release, NULL, NULL);
787 if (dr)
788 return dr;
789
790 new_dr = devres_alloc(pcim_release, sizeof(*new_dr), GFP_KERNEL);
791 if (!new_dr)
792 return NULL;
793 return devres_get(&pdev->dev, new_dr, NULL, NULL);
794}
795
796static struct pci_devres * find_pci_dr(struct pci_dev *pdev)
797{
798 if (pci_is_managed(pdev))
799 return devres_find(&pdev->dev, pcim_release, NULL, NULL);
800 return NULL;
801}
802
803/**
804 * pcim_enable_device - Managed pci_enable_device()
805 * @pdev: PCI device to be initialized
806 *
807 * Managed pci_enable_device().
808 */
809int pcim_enable_device(struct pci_dev *pdev)
810{
811 struct pci_devres *dr;
812 int rc;
813
814 dr = get_pci_dr(pdev);
815 if (unlikely(!dr))
816 return -ENOMEM;
817 WARN_ON(!!dr->disable);
818
819 rc = pci_enable_device(pdev);
820 if (!rc) {
821 pdev->is_managed = 1;
822 dr->disable = 1;
823 }
824 return rc;
825}
826
827/**
828 * pcim_pin_device - Pin managed PCI device
829 * @pdev: PCI device to pin
830 *
831 * Pin managed PCI device @pdev. Pinned device won't be disabled on
832 * driver detach. @pdev must have been enabled with
833 * pcim_enable_device().
834 */
835void pcim_pin_device(struct pci_dev *pdev)
836{
837 struct pci_devres *dr;
838
839 dr = find_pci_dr(pdev);
840 WARN_ON(!dr || !dr->disable);
841 if (dr)
842 dr->disable = 0;
843}
844
747/** 845/**
748 * pcibios_disable_device - disable arch specific PCI resources for device dev 846 * pcibios_disable_device - disable arch specific PCI resources for device dev
749 * @dev: the PCI device to disable 847 * @dev: the PCI device to disable
@@ -767,8 +865,13 @@ void __attribute__ ((weak)) pcibios_disable_device (struct pci_dev *dev) {}
767void 865void
768pci_disable_device(struct pci_dev *dev) 866pci_disable_device(struct pci_dev *dev)
769{ 867{
868 struct pci_devres *dr;
770 u16 pci_command; 869 u16 pci_command;
771 870
871 dr = find_pci_dr(dev);
872 if (dr)
873 dr->disable = 0;
874
772 if (atomic_sub_return(1, &dev->enable_cnt) != 0) 875 if (atomic_sub_return(1, &dev->enable_cnt) != 0)
773 return; 876 return;
774 877
@@ -867,6 +970,8 @@ pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge)
867 */ 970 */
868void pci_release_region(struct pci_dev *pdev, int bar) 971void pci_release_region(struct pci_dev *pdev, int bar)
869{ 972{
973 struct pci_devres *dr;
974
870 if (pci_resource_len(pdev, bar) == 0) 975 if (pci_resource_len(pdev, bar) == 0)
871 return; 976 return;
872 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO) 977 if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
@@ -875,6 +980,10 @@ void pci_release_region(struct pci_dev *pdev, int bar)
875 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM) 980 else if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
876 release_mem_region(pci_resource_start(pdev, bar), 981 release_mem_region(pci_resource_start(pdev, bar),
877 pci_resource_len(pdev, bar)); 982 pci_resource_len(pdev, bar));
983
984 dr = find_pci_dr(pdev);
985 if (dr)
986 dr->region_mask &= ~(1 << bar);
878} 987}
879 988
880/** 989/**
@@ -893,6 +1002,8 @@ void pci_release_region(struct pci_dev *pdev, int bar)
893 */ 1002 */
894int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1003int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
895{ 1004{
1005 struct pci_devres *dr;
1006
896 if (pci_resource_len(pdev, bar) == 0) 1007 if (pci_resource_len(pdev, bar) == 0)
897 return 0; 1008 return 0;
898 1009
@@ -906,7 +1017,11 @@ int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name)
906 pci_resource_len(pdev, bar), res_name)) 1017 pci_resource_len(pdev, bar), res_name))
907 goto err_out; 1018 goto err_out;
908 } 1019 }
909 1020
1021 dr = find_pci_dr(pdev);
1022 if (dr)
1023 dr->region_mask |= 1 << bar;
1024
910 return 0; 1025 return 0;
911 1026
912err_out: 1027err_out:
@@ -1144,7 +1259,15 @@ pci_intx(struct pci_dev *pdev, int enable)
1144 } 1259 }
1145 1260
1146 if (new != pci_command) { 1261 if (new != pci_command) {
1262 struct pci_devres *dr;
1263
1147 pci_write_config_word(pdev, PCI_COMMAND, new); 1264 pci_write_config_word(pdev, PCI_COMMAND, new);
1265
1266 dr = find_pci_dr(pdev);
1267 if (dr && !dr->restore_intx) {
1268 dr->restore_intx = 1;
1269 dr->orig_intx = !enable;
1270 }
1148 } 1271 }
1149} 1272}
1150 1273
@@ -1226,6 +1349,8 @@ device_initcall(pci_init);
1226EXPORT_SYMBOL_GPL(pci_restore_bars); 1349EXPORT_SYMBOL_GPL(pci_restore_bars);
1227EXPORT_SYMBOL(pci_enable_device_bars); 1350EXPORT_SYMBOL(pci_enable_device_bars);
1228EXPORT_SYMBOL(pci_enable_device); 1351EXPORT_SYMBOL(pci_enable_device);
1352EXPORT_SYMBOL(pcim_enable_device);
1353EXPORT_SYMBOL(pcim_pin_device);
1229EXPORT_SYMBOL(pci_disable_device); 1354EXPORT_SYMBOL(pci_disable_device);
1230EXPORT_SYMBOL(pci_find_capability); 1355EXPORT_SYMBOL(pci_find_capability);
1231EXPORT_SYMBOL(pci_bus_find_capability); 1356EXPORT_SYMBOL(pci_bus_find_capability);
diff --git a/include/linux/device.h b/include/linux/device.h
index 5ca1cdba563a..26e4692f2d1a 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -354,6 +354,41 @@ extern int __must_check device_create_bin_file(struct device *dev,
354 struct bin_attribute *attr); 354 struct bin_attribute *attr);
355extern void device_remove_bin_file(struct device *dev, 355extern void device_remove_bin_file(struct device *dev,
356 struct bin_attribute *attr); 356 struct bin_attribute *attr);
357
358/* device resource management */
359typedef void (*dr_release_t)(struct device *dev, void *res);
360typedef int (*dr_match_t)(struct device *dev, void *res, void *match_data);
361
362#ifdef CONFIG_DEBUG_DEVRES
363extern void * __devres_alloc(dr_release_t release, size_t size, gfp_t gfp,
364 const char *name);
365#define devres_alloc(release, size, gfp) \
366 __devres_alloc(release, size, gfp, #release)
367#else
368extern void * devres_alloc(dr_release_t release, size_t size, gfp_t gfp);
369#endif
370extern void devres_free(void *res);
371extern void devres_add(struct device *dev, void *res);
372extern void * devres_find(struct device *dev, dr_release_t release,
373 dr_match_t match, void *match_data);
374extern void * devres_get(struct device *dev, void *new_res,
375 dr_match_t match, void *match_data);
376extern void * devres_remove(struct device *dev, dr_release_t release,
377 dr_match_t match, void *match_data);
378extern int devres_destroy(struct device *dev, dr_release_t release,
379 dr_match_t match, void *match_data);
380
381/* devres group */
382extern void * __must_check devres_open_group(struct device *dev, void *id,
383 gfp_t gfp);
384extern void devres_close_group(struct device *dev, void *id);
385extern void devres_remove_group(struct device *dev, void *id);
386extern int devres_release_group(struct device *dev, void *id);
387
388/* managed kzalloc/kfree for device drivers, no kmalloc, always use kzalloc */
389extern void *devm_kzalloc(struct device *dev, size_t size, gfp_t gfp);
390extern void devm_kfree(struct device *dev, void *p);
391
357struct device { 392struct device {
358 struct klist klist_children; 393 struct klist klist_children;
359 struct klist_node knode_parent; /* node in sibling list */ 394 struct klist_node knode_parent; /* node in sibling list */
@@ -397,6 +432,9 @@ struct device {
397 /* arch specific additions */ 432 /* arch specific additions */
398 struct dev_archdata archdata; 433 struct dev_archdata archdata;
399 434
435 spinlock_t devres_lock;
436 struct list_head devres_head;
437
400 /* class_device migration path */ 438 /* class_device migration path */
401 struct list_head node; 439 struct list_head node;
402 struct class *class; 440 struct class *class;
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index ff203c465fed..9a663c6db16a 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -66,6 +66,33 @@ dma_mark_declared_memory_occupied(struct device *dev,
66} 66}
67#endif 67#endif
68 68
69#endif 69/*
70 * Managed DMA API
71 */
72extern void *dmam_alloc_coherent(struct device *dev, size_t size,
73 dma_addr_t *dma_handle, gfp_t gfp);
74extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
75 dma_addr_t dma_handle);
76extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
77 dma_addr_t *dma_handle, gfp_t gfp);
78extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
79 dma_addr_t dma_handle);
80#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
81extern int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
82 dma_addr_t device_addr, size_t size,
83 int flags);
84extern void dmam_release_declared_memory(struct device *dev);
85#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
86static inline int dmam_declare_coherent_memory(struct device *dev,
87 dma_addr_t bus_addr, dma_addr_t device_addr,
88 size_t size, gfp_t gfp)
89{
90 return 0;
91}
70 92
93static inline void dmam_release_declared_memory(struct device *dev)
94{
95}
96#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
71 97
98#endif
diff --git a/include/linux/dmapool.h b/include/linux/dmapool.h
index 76f12f46db7f..022e34fcbd1b 100644
--- a/include/linux/dmapool.h
+++ b/include/linux/dmapool.h
@@ -24,5 +24,12 @@ void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags,
24 24
25void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr); 25void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t addr);
26 26
27/*
28 * Managed DMA pool
29 */
30struct dma_pool *dmam_pool_create(const char *name, struct device *dev,
31 size_t size, size_t align, size_t allocation);
32void dmam_pool_destroy(struct dma_pool *pool);
33
27#endif 34#endif
28 35
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index e36e86c869fb..5a8ba0b8ccba 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -12,6 +12,7 @@
12#include <linux/sched.h> 12#include <linux/sched.h>
13#include <linux/irqflags.h> 13#include <linux/irqflags.h>
14#include <linux/bottom_half.h> 14#include <linux/bottom_half.h>
15#include <linux/device.h>
15#include <asm/atomic.h> 16#include <asm/atomic.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17#include <asm/system.h> 18#include <asm/system.h>
@@ -83,6 +84,11 @@ extern int request_irq(unsigned int, irq_handler_t handler,
83 unsigned long, const char *, void *); 84 unsigned long, const char *, void *);
84extern void free_irq(unsigned int, void *); 85extern void free_irq(unsigned int, void *);
85 86
87extern int devm_request_irq(struct device *dev, unsigned int irq,
88 irq_handler_t handler, unsigned long irqflags,
89 const char *devname, void *dev_id);
90extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
91
86/* 92/*
87 * On lockdep we dont want to enable hardirqs in hardirq 93 * On lockdep we dont want to enable hardirqs in hardirq
88 * context. Use local_irq_enable_in_hardirq() to annotate 94 * context. Use local_irq_enable_in_hardirq() to annotate
diff --git a/include/linux/io.h b/include/linux/io.h
index 81877ea39309..f5edf9c5de0a 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -28,6 +28,23 @@ void __iowrite64_copy(void __iomem *to, const void *from, size_t count);
28int ioremap_page_range(unsigned long addr, unsigned long end, 28int ioremap_page_range(unsigned long addr, unsigned long end,
29 unsigned long phys_addr, pgprot_t prot); 29 unsigned long phys_addr, pgprot_t prot);
30 30
31/*
32 * Managed iomap interface
33 */
34void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
35 unsigned int nr);
36void devm_ioport_unmap(struct device *dev, void __iomem *addr);
37
38void __iomem * devm_ioremap(struct device *dev, unsigned long offset,
39 unsigned long size);
40void __iomem * devm_ioremap_nocache(struct device *dev, unsigned long offset,
41 unsigned long size);
42void devm_iounmap(struct device *dev, void __iomem *addr);
43
44void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
45void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr);
46void __iomem * const * pcim_iomap_table(struct pci_dev *pdev);
47
31/** 48/**
32 * check_signature - find BIOS signatures 49 * check_signature - find BIOS signatures
33 * @io_addr: mmio address to check 50 * @io_addr: mmio address to check
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 15228d79c5bc..6859a3b14088 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -137,4 +137,24 @@ static inline int __deprecated check_region(resource_size_t s,
137{ 137{
138 return __check_region(&ioport_resource, s, n); 138 return __check_region(&ioport_resource, s, n);
139} 139}
140
141/* Wrappers for managed devices */
142struct device;
143#define devm_request_region(dev,start,n,name) \
144 __devm_request_region(dev, &ioport_resource, (start), (n), (name))
145#define devm_request_mem_region(dev,start,n,name) \
146 __devm_request_region(dev, &iomem_resource, (start), (n), (name))
147
148extern struct resource * __devm_request_region(struct device *dev,
149 struct resource *parent, resource_size_t start,
150 resource_size_t n, const char *name);
151
152#define devm_release_region(start,n) \
153 __devm_release_region(dev, &ioport_resource, (start), (n))
154#define devm_release_mem_region(start,n) \
155 __devm_release_region(dev, &iomem_resource, (start), (n))
156
157extern void __devm_release_region(struct device *dev, struct resource *parent,
158 resource_size_t start, resource_size_t n);
159
140#endif /* _LINUX_IOPORT_H */ 160#endif /* _LINUX_IOPORT_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 805412cc6875..9e3042e7e1cc 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -167,6 +167,7 @@ struct pci_dev {
167 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 167 unsigned int broken_parity_status:1; /* Device generates false positive parity */
168 unsigned int msi_enabled:1; 168 unsigned int msi_enabled:1;
169 unsigned int msix_enabled:1; 169 unsigned int msix_enabled:1;
170 unsigned int is_managed:1;
170 atomic_t enable_cnt; /* pci_enable_device has been called */ 171 atomic_t enable_cnt; /* pci_enable_device has been called */
171 172
172 u32 saved_config_space[16]; /* config space saved at suspend time */ 173 u32 saved_config_space[16]; /* config space saved at suspend time */
@@ -528,6 +529,14 @@ static inline int pci_write_config_dword(struct pci_dev *dev, int where, u32 val
528 529
529int __must_check pci_enable_device(struct pci_dev *dev); 530int __must_check pci_enable_device(struct pci_dev *dev);
530int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask); 531int __must_check pci_enable_device_bars(struct pci_dev *dev, int mask);
532int __must_check pcim_enable_device(struct pci_dev *pdev);
533void pcim_pin_device(struct pci_dev *pdev);
534
535static inline int pci_is_managed(struct pci_dev *pdev)
536{
537 return pdev->is_managed;
538}
539
531void pci_disable_device(struct pci_dev *dev); 540void pci_disable_device(struct pci_dev *dev);
532void pci_set_master(struct pci_dev *dev); 541void pci_set_master(struct pci_dev *dev);
533#define HAVE_PCI_SET_MWI 542#define HAVE_PCI_SET_MWI
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 8b961adc3bd2..c4b7ed1cebf7 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -482,3 +482,89 @@ int request_irq(unsigned int irq, irq_handler_t handler,
482 return retval; 482 return retval;
483} 483}
484EXPORT_SYMBOL(request_irq); 484EXPORT_SYMBOL(request_irq);
485
486/*
487 * Device resource management aware IRQ request/free implementation.
488 */
489struct irq_devres {
490 unsigned int irq;
491 void *dev_id;
492};
493
494static void devm_irq_release(struct device *dev, void *res)
495{
496 struct irq_devres *this = res;
497
498 free_irq(this->irq, this->dev_id);
499}
500
501static int devm_irq_match(struct device *dev, void *res, void *data)
502{
503 struct irq_devres *this = res, *match = data;
504
505 return this->irq == match->irq && this->dev_id == match->dev_id;
506}
507
508/**
509 * devm_request_irq - allocate an interrupt line for a managed device
510 * @dev: device to request interrupt for
511 * @irq: Interrupt line to allocate
512 * @handler: Function to be called when the IRQ occurs
513 * @irqflags: Interrupt type flags
514 * @devname: An ascii name for the claiming device
515 * @dev_id: A cookie passed back to the handler function
516 *
517 * Except for the extra @dev argument, this function takes the
518 * same arguments and performs the same function as
519 * request_irq(). IRQs requested with this function will be
520 * automatically freed on driver detach.
521 *
522 * If an IRQ allocated with this function needs to be freed
523 * separately, dev_free_irq() must be used.
524 */
525int devm_request_irq(struct device *dev, unsigned int irq,
526 irq_handler_t handler, unsigned long irqflags,
527 const char *devname, void *dev_id)
528{
529 struct irq_devres *dr;
530 int rc;
531
532 dr = devres_alloc(devm_irq_release, sizeof(struct irq_devres),
533 GFP_KERNEL);
534 if (!dr)
535 return -ENOMEM;
536
537 rc = request_irq(irq, handler, irqflags, devname, dev_id);
538 if (rc) {
539 kfree(dr);
540 return rc;
541 }
542
543 dr->irq = irq;
544 dr->dev_id = dev_id;
545 devres_add(dev, dr);
546
547 return 0;
548}
549EXPORT_SYMBOL(devm_request_irq);
550
551/**
552 * devm_free_irq - free an interrupt
553 * @dev: device to free interrupt for
554 * @irq: Interrupt line to free
555 * @dev_id: Device identity to free
556 *
557 * Except for the extra @dev argument, this function takes the
558 * same arguments and performs the same function as free_irq().
559 * This function instead of free_irq() should be used to manually
560 * free IRQs allocated with dev_request_irq().
561 */
562void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id)
563{
564 struct irq_devres match_data = { irq, dev_id };
565
566 free_irq(irq, dev_id);
567 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match,
568 &match_data));
569}
570EXPORT_SYMBOL(devm_free_irq);
diff --git a/kernel/resource.c b/kernel/resource.c
index 7b9a497419d9..2a3f88636580 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -17,6 +17,7 @@
17#include <linux/fs.h> 17#include <linux/fs.h>
18#include <linux/proc_fs.h> 18#include <linux/proc_fs.h>
19#include <linux/seq_file.h> 19#include <linux/seq_file.h>
20#include <linux/device.h>
20#include <asm/io.h> 21#include <asm/io.h>
21 22
22 23
@@ -618,6 +619,67 @@ void __release_region(struct resource *parent, resource_size_t start,
618EXPORT_SYMBOL(__release_region); 619EXPORT_SYMBOL(__release_region);
619 620
620/* 621/*
622 * Managed region resource
623 */
624struct region_devres {
625 struct resource *parent;
626 resource_size_t start;
627 resource_size_t n;
628};
629
630static void devm_region_release(struct device *dev, void *res)
631{
632 struct region_devres *this = res;
633
634 __release_region(this->parent, this->start, this->n);
635}
636
637static int devm_region_match(struct device *dev, void *res, void *match_data)
638{
639 struct region_devres *this = res, *match = match_data;
640
641 return this->parent == match->parent &&
642 this->start == match->start && this->n == match->n;
643}
644
645struct resource * __devm_request_region(struct device *dev,
646 struct resource *parent, resource_size_t start,
647 resource_size_t n, const char *name)
648{
649 struct region_devres *dr = NULL;
650 struct resource *res;
651
652 dr = devres_alloc(devm_region_release, sizeof(struct region_devres),
653 GFP_KERNEL);
654 if (!dr)
655 return NULL;
656
657 dr->parent = parent;
658 dr->start = start;
659 dr->n = n;
660
661 res = __request_region(parent, start, n, name);
662 if (res)
663 devres_add(dev, dr);
664 else
665 devres_free(dr);
666
667 return res;
668}
669EXPORT_SYMBOL(__devm_request_region);
670
671void __devm_release_region(struct device *dev, struct resource *parent,
672 resource_size_t start, resource_size_t n)
673{
674 struct region_devres match_data = { parent, start, n };
675
676 __release_region(parent, start, n);
677 WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
678 &match_data));
679}
680EXPORT_SYMBOL(__devm_release_region);
681
682/*
621 * Called from init/main.c to reserve IO ports. 683 * Called from init/main.c to reserve IO ports.
622 */ 684 */
623#define MAXRESERVE 4 685#define MAXRESERVE 4
diff --git a/lib/Makefile b/lib/Makefile
index 77b4bad7d441..29b2e9912bbb 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -5,7 +5,7 @@
5lib-y := ctype.o string.o vsprintf.o cmdline.o \ 5lib-y := ctype.o string.o vsprintf.o cmdline.o \
6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \ 6 bust_spinlocks.o rbtree.o radix-tree.o dump_stack.o \
7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \ 7 idr.o div64.o int_sqrt.o bitmap.o extable.o prio_tree.o \
8 sha1.o irq_regs.o reciprocal_div.o 8 sha1.o irq_regs.o reciprocal_div.o iomap.o
9 9
10lib-$(CONFIG_MMU) += ioremap.o 10lib-$(CONFIG_MMU) += ioremap.o
11lib-$(CONFIG_SMP) += cpumask.o 11lib-$(CONFIG_SMP) += cpumask.o
@@ -41,7 +41,6 @@ obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
41obj-$(CONFIG_CRC16) += crc16.o 41obj-$(CONFIG_CRC16) += crc16.o
42obj-$(CONFIG_CRC32) += crc32.o 42obj-$(CONFIG_CRC32) += crc32.o
43obj-$(CONFIG_LIBCRC32C) += libcrc32c.o 43obj-$(CONFIG_LIBCRC32C) += libcrc32c.o
44obj-$(CONFIG_GENERIC_IOMAP) += iomap.o
45obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o 44obj-$(CONFIG_GENERIC_ALLOCATOR) += genalloc.o
46 45
47obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/ 46obj-$(CONFIG_ZLIB_INFLATE) += zlib_inflate/
diff --git a/lib/iomap.c b/lib/iomap.c
index d6ccdd85df53..3214028141da 100644
--- a/lib/iomap.c
+++ b/lib/iomap.c
@@ -4,8 +4,10 @@
4 * (C) Copyright 2004 Linus Torvalds 4 * (C) Copyright 2004 Linus Torvalds
5 */ 5 */
6#include <linux/pci.h> 6#include <linux/pci.h>
7#include <linux/io.h>
8
9#ifdef CONFIG_GENERIC_IOMAP
7#include <linux/module.h> 10#include <linux/module.h>
8#include <asm/io.h>
9 11
10/* 12/*
11 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO 13 * Read/write from/to an (offsettable) iomem cookie. It might be a PIO
@@ -254,3 +256,245 @@ void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
254} 256}
255EXPORT_SYMBOL(pci_iomap); 257EXPORT_SYMBOL(pci_iomap);
256EXPORT_SYMBOL(pci_iounmap); 258EXPORT_SYMBOL(pci_iounmap);
259
260#endif /* CONFIG_GENERIC_IOMAP */
261
262/*
263 * Generic iomap devres
264 */
265static void devm_ioport_map_release(struct device *dev, void *res)
266{
267 ioport_unmap(*(void __iomem **)res);
268}
269
270static int devm_ioport_map_match(struct device *dev, void *res,
271 void *match_data)
272{
273 return *(void **)res == match_data;
274}
275
276/**
277 * devm_ioport_map - Managed ioport_map()
278 * @dev: Generic device to map ioport for
279 * @port: Port to map
280 * @nr: Number of ports to map
281 *
282 * Managed ioport_map(). Map is automatically unmapped on driver
283 * detach.
284 */
285void __iomem * devm_ioport_map(struct device *dev, unsigned long port,
286 unsigned int nr)
287{
288 void __iomem **ptr, *addr;
289
290 ptr = devres_alloc(devm_ioport_map_release, sizeof(*ptr), GFP_KERNEL);
291 if (!ptr)
292 return NULL;
293
294 addr = ioport_map(port, nr);
295 if (addr) {
296 *ptr = addr;
297 devres_add(dev, ptr);
298 } else
299 devres_free(ptr);
300
301 return addr;
302}
303EXPORT_SYMBOL(devm_ioport_map);
304
305/**
306 * devm_ioport_unmap - Managed ioport_unmap()
307 * @dev: Generic device to unmap for
308 * @addr: Address to unmap
309 *
310 * Managed ioport_unmap(). @addr must have been mapped using
311 * devm_ioport_map().
312 */
313void devm_ioport_unmap(struct device *dev, void __iomem *addr)
314{
315 ioport_unmap(addr);
316 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
317 devm_ioport_map_match, (void *)addr));
318}
319EXPORT_SYMBOL(devm_ioport_unmap);
320
321static void devm_ioremap_release(struct device *dev, void *res)
322{
323 iounmap(*(void __iomem **)res);
324}
325
326static int devm_ioremap_match(struct device *dev, void *res, void *match_data)
327{
328 return *(void **)res == match_data;
329}
330
331/**
332 * devm_ioremap - Managed ioremap()
333 * @dev: Generic device to remap IO address for
334 * @offset: BUS offset to map
335 * @size: Size of map
336 *
337 * Managed ioremap(). Map is automatically unmapped on driver detach.
338 */
339void __iomem *devm_ioremap(struct device *dev, unsigned long offset,
340 unsigned long size)
341{
342 void __iomem **ptr, *addr;
343
344 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
345 if (!ptr)
346 return NULL;
347
348 addr = ioremap(offset, size);
349 if (addr) {
350 *ptr = addr;
351 devres_add(dev, ptr);
352 } else
353 devres_free(ptr);
354
355 return addr;
356}
357EXPORT_SYMBOL(devm_ioremap);
358
359/**
360 * devm_ioremap_nocache - Managed ioremap_nocache()
361 * @dev: Generic device to remap IO address for
362 * @offset: BUS offset to map
363 * @size: Size of map
364 *
365 * Managed ioremap_nocache(). Map is automatically unmapped on driver
366 * detach.
367 */
368void __iomem *devm_ioremap_nocache(struct device *dev, unsigned long offset,
369 unsigned long size)
370{
371 void __iomem **ptr, *addr;
372
373 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
374 if (!ptr)
375 return NULL;
376
377 addr = ioremap_nocache(offset, size);
378 if (addr) {
379 *ptr = addr;
380 devres_add(dev, ptr);
381 } else
382 devres_free(ptr);
383
384 return addr;
385}
386EXPORT_SYMBOL(devm_ioremap_nocache);
387
388/**
389 * devm_iounmap - Managed iounmap()
390 * @dev: Generic device to unmap for
391 * @addr: Address to unmap
392 *
393 * Managed iounmap(). @addr must have been mapped using devm_ioremap*().
394 */
395void devm_iounmap(struct device *dev, void __iomem *addr)
396{
397 iounmap(addr);
398 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
399 (void *)addr));
400}
401EXPORT_SYMBOL(devm_iounmap);
402
403/*
404 * PCI iomap devres
405 */
406#define PCIM_IOMAP_MAX PCI_ROM_RESOURCE
407
408struct pcim_iomap_devres {
409 void __iomem *table[PCIM_IOMAP_MAX];
410};
411
412static void pcim_iomap_release(struct device *gendev, void *res)
413{
414 struct pci_dev *dev = container_of(gendev, struct pci_dev, dev);
415 struct pcim_iomap_devres *this = res;
416 int i;
417
418 for (i = 0; i < PCIM_IOMAP_MAX; i++)
419 if (this->table[i])
420 pci_iounmap(dev, this->table[i]);
421}
422
423/**
424 * pcim_iomap_table - access iomap allocation table
425 * @pdev: PCI device to access iomap table for
426 *
427 * Access iomap allocation table for @dev. If iomap table doesn't
428 * exist and @pdev is managed, it will be allocated. All iomaps
429 * recorded in the iomap table are automatically unmapped on driver
430 * detach.
431 *
432 * This function might sleep when the table is first allocated but can
433 * be safely called without context and guaranteed to succed once
434 * allocated.
435 */
436void __iomem * const * pcim_iomap_table(struct pci_dev *pdev)
437{
438 struct pcim_iomap_devres *dr, *new_dr;
439
440 dr = devres_find(&pdev->dev, pcim_iomap_release, NULL, NULL);
441 if (dr)
442 return dr->table;
443
444 new_dr = devres_alloc(pcim_iomap_release, sizeof(*new_dr), GFP_KERNEL);
445 if (!new_dr)
446 return NULL;
447 dr = devres_get(&pdev->dev, new_dr, NULL, NULL);
448 return dr->table;
449}
450EXPORT_SYMBOL(pcim_iomap_table);
451
452/**
453 * pcim_iomap - Managed pcim_iomap()
454 * @pdev: PCI device to iomap for
455 * @bar: BAR to iomap
456 * @maxlen: Maximum length of iomap
457 *
458 * Managed pci_iomap(). Map is automatically unmapped on driver
459 * detach.
460 */
461void __iomem * pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen)
462{
463 void __iomem **tbl;
464
465 BUG_ON(bar >= PCIM_IOMAP_MAX);
466
467 tbl = (void __iomem **)pcim_iomap_table(pdev);
468 if (!tbl || tbl[bar]) /* duplicate mappings not allowed */
469 return NULL;
470
471 tbl[bar] = pci_iomap(pdev, bar, maxlen);
472 return tbl[bar];
473}
474EXPORT_SYMBOL(pcim_iomap);
475
476/**
477 * pcim_iounmap - Managed pci_iounmap()
478 * @pdev: PCI device to iounmap for
479 * @addr: Address to unmap
480 *
481 * Managed pci_iounmap(). @addr must have been mapped using pcim_iomap().
482 */
483void pcim_iounmap(struct pci_dev *pdev, void __iomem *addr)
484{
485 void __iomem **tbl;
486 int i;
487
488 pci_iounmap(pdev, addr);
489
490 tbl = (void __iomem **)pcim_iomap_table(pdev);
491 BUG_ON(!tbl);
492
493 for (i = 0; i < PCIM_IOMAP_MAX; i++)
494 if (tbl[i] == addr) {
495 tbl[i] = NULL;
496 return;
497 }
498 WARN_ON(1);
499}
500EXPORT_SYMBOL(pcim_iounmap);