aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorLars-Peter Clausen <lars@metafoo.de>2013-04-19 05:42:14 -0400
committerVinod Koul <vinod.koul@intel.com>2013-05-02 12:20:38 -0400
commitde61608acf89779c8831aaa1428b6975d49d98c0 (patch)
tree482bb0d77306ddf7f83050e2129885c89124e56f /drivers/dma
parentf22eb1402244885126c4263eb36b857e4182dd6f (diff)
dma:of: Use a mutex to protect the of_dma_list
Currently the OF DMA code uses a spin lock to protect the of_dma_list from concurrent access and a per controller reference count to protect the controller from being freed while a request operation is in progress. If of_dma_controller_free() is called for a controller who's reference count is not zero it will return -EBUSY and not remove the controller. This is fine up until here, but leaves the question what the caller of of_dma_controller_free() is supposed to do if the controller couldn't be freed. The only viable solution for the caller is to spin on of_dma_controller_free() until it returns success. E.g. do { ret = of_dma_controller_free(dev->of_node) } while (ret != -EBUSY); This is rather ugly and unnecessary and none of the current users of of_dma_controller_free() check it's return value anyway. Instead protect the list by a mutex. The mutex will be held as long as a request operation is in progress. So if of_dma_controller_free() is called while a request operation is in progress it will be put to sleep and only wake up once the request operation has finished. This means that it is no longer possible to register or unregister OF DMA controllers from a context where it's not possible to sleep. But I doubt that we'll ever need this. Also rename of_dma_get_controller back to of_dma_find_controller. Signed-off-by: Lars-Peter Clausen <lars@metafoo.de> Acked-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/of-dma.c76
1 files changed, 20 insertions, 56 deletions
diff --git a/drivers/dma/of-dma.c b/drivers/dma/of-dma.c
index 2882403a39cf..7aa0864cd487 100644
--- a/drivers/dma/of-dma.c
+++ b/drivers/dma/of-dma.c
@@ -13,38 +13,31 @@
13#include <linux/device.h> 13#include <linux/device.h>
14#include <linux/err.h> 14#include <linux/err.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/rculist.h> 16#include <linux/mutex.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/of.h> 18#include <linux/of.h>
19#include <linux/of_dma.h> 19#include <linux/of_dma.h>
20 20
21static LIST_HEAD(of_dma_list); 21static LIST_HEAD(of_dma_list);
22static DEFINE_SPINLOCK(of_dma_lock); 22static DEFINE_MUTEX(of_dma_lock);
23 23
24/** 24/**
25 * of_dma_get_controller - Get a DMA controller in DT DMA helpers list 25 * of_dma_find_controller - Get a DMA controller in DT DMA helpers list
26 * @dma_spec: pointer to DMA specifier as found in the device tree 26 * @dma_spec: pointer to DMA specifier as found in the device tree
27 * 27 *
28 * Finds a DMA controller with matching device node and number for dma cells 28 * Finds a DMA controller with matching device node and number for dma cells
29 * in a list of registered DMA controllers. If a match is found the use_count 29 * in a list of registered DMA controllers. If a match is found a valid pointer
30 * variable is increased and a valid pointer to the DMA data stored is retuned. 30 * to the DMA data stored is retuned. A NULL pointer is returned if no match is
31 * A NULL pointer is returned if no match is found. 31 * found.
32 */ 32 */
33static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec) 33static struct of_dma *of_dma_find_controller(struct of_phandle_args *dma_spec)
34{ 34{
35 struct of_dma *ofdma; 35 struct of_dma *ofdma;
36 36
37 spin_lock(&of_dma_lock);
38
39 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) 37 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
40 if ((ofdma->of_node == dma_spec->np) && 38 if ((ofdma->of_node == dma_spec->np) &&
41 (ofdma->of_dma_nbcells == dma_spec->args_count)) { 39 (ofdma->of_dma_nbcells == dma_spec->args_count))
42 ofdma->use_count++;
43 spin_unlock(&of_dma_lock);
44 return ofdma; 40 return ofdma;
45 }
46
47 spin_unlock(&of_dma_lock);
48 41
49 pr_debug("%s: can't find DMA controller %s\n", __func__, 42 pr_debug("%s: can't find DMA controller %s\n", __func__,
50 dma_spec->np->full_name); 43 dma_spec->np->full_name);
@@ -53,22 +46,6 @@ static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
53} 46}
54 47
55/** 48/**
56 * of_dma_put_controller - Decrement use count for a registered DMA controller
57 * @of_dma: pointer to DMA controller data
58 *
59 * Decrements the use_count variable in the DMA data structure. This function
60 * should be called only when a valid pointer is returned from
61 * of_dma_get_controller() and no further accesses to data referenced by that
62 * pointer are needed.
63 */
64static void of_dma_put_controller(struct of_dma *ofdma)
65{
66 spin_lock(&of_dma_lock);
67 ofdma->use_count--;
68 spin_unlock(&of_dma_lock);
69}
70
71/**
72 * of_dma_controller_register - Register a DMA controller to DT DMA helpers 49 * of_dma_controller_register - Register a DMA controller to DT DMA helpers
73 * @np: device node of DMA controller 50 * @np: device node of DMA controller
74 * @of_dma_xlate: translation function which converts a phandle 51 * @of_dma_xlate: translation function which converts a phandle
@@ -114,12 +91,11 @@ int of_dma_controller_register(struct device_node *np,
114 ofdma->of_dma_nbcells = nbcells; 91 ofdma->of_dma_nbcells = nbcells;
115 ofdma->of_dma_xlate = of_dma_xlate; 92 ofdma->of_dma_xlate = of_dma_xlate;
116 ofdma->of_dma_data = data; 93 ofdma->of_dma_data = data;
117 ofdma->use_count = 0;
118 94
119 /* Now queue of_dma controller structure in list */ 95 /* Now queue of_dma controller structure in list */
120 spin_lock(&of_dma_lock); 96 mutex_lock(&of_dma_lock);
121 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list); 97 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
122 spin_unlock(&of_dma_lock); 98 mutex_unlock(&of_dma_lock);
123 99
124 return 0; 100 return 0;
125} 101}
@@ -131,32 +107,20 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register);
131 * 107 *
132 * Memory allocated by of_dma_controller_register() is freed here. 108 * Memory allocated by of_dma_controller_register() is freed here.
133 */ 109 */
134int of_dma_controller_free(struct device_node *np) 110void of_dma_controller_free(struct device_node *np)
135{ 111{
136 struct of_dma *ofdma; 112 struct of_dma *ofdma;
137 113
138 spin_lock(&of_dma_lock); 114 mutex_lock(&of_dma_lock);
139
140 if (list_empty(&of_dma_list)) {
141 spin_unlock(&of_dma_lock);
142 return -ENODEV;
143 }
144 115
145 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers) 116 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
146 if (ofdma->of_node == np) { 117 if (ofdma->of_node == np) {
147 if (ofdma->use_count) {
148 spin_unlock(&of_dma_lock);
149 return -EBUSY;
150 }
151
152 list_del(&ofdma->of_dma_controllers); 118 list_del(&ofdma->of_dma_controllers);
153 spin_unlock(&of_dma_lock);
154 kfree(ofdma); 119 kfree(ofdma);
155 return 0; 120 break;
156 } 121 }
157 122
158 spin_unlock(&of_dma_lock); 123 mutex_unlock(&of_dma_lock);
159 return -ENODEV;
160} 124}
161EXPORT_SYMBOL_GPL(of_dma_controller_free); 125EXPORT_SYMBOL_GPL(of_dma_controller_free);
162 126
@@ -219,15 +183,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
219 if (of_dma_match_channel(np, name, i, &dma_spec)) 183 if (of_dma_match_channel(np, name, i, &dma_spec))
220 continue; 184 continue;
221 185
222 ofdma = of_dma_get_controller(&dma_spec); 186 mutex_lock(&of_dma_lock);
187 ofdma = of_dma_find_controller(&dma_spec);
223 188
224 if (ofdma) { 189 if (ofdma)
225 chan = ofdma->of_dma_xlate(&dma_spec, ofdma); 190 chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
226 191 else
227 of_dma_put_controller(ofdma);
228 } else {
229 chan = NULL; 192 chan = NULL;
230 } 193
194 mutex_unlock(&of_dma_lock);
231 195
232 of_node_put(dma_spec.np); 196 of_node_put(dma_spec.np);
233 197