aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/of/dma.c
diff options
context:
space:
mode:
authorJon Hunter <jon-hunter@ti.com>2012-10-11 15:43:01 -0400
committerVinod Koul <vinod.koul@intel.com>2013-01-08 01:05:01 -0500
commit9743a3b62dee8c9d8af1319f8d1c1ff39130267d (patch)
tree48252c1a1ee909d59fe64e719520a8005576184d /drivers/of/dma.c
parent5ca7c109e8eee8d97267d3308548509bb80d8bf0 (diff)
of: dma: fix protection of DMA controller data stored by DMA helpers
In the current implementation of the OF DMA helpers, read-copy-update (RCU) linked lists are being used for storing and accessing the DMA controller data. This part of implementation is based upon V2 of the DMA helpers by Nicolas [1]. During a recent review of RCU, it became apparent that the code is missing the required rcu_read_lock()/unlock() calls as well as synchronisation calls before freeing any memory protected by RCU. Having looked into adding the appropriate RCU calls to protect the DMA data it became apparent that with the current DMA helper implementation, using RCU is not as attractive as it may have been before. The main reasons being that ... 1. We need to protect the DMA data around calls to the xlate function. 2. The of_dma_simple_xlate() function calls the DMA engine function dma_request_channel() which employs a mutex and so could sleep. 3. The RCU read-side critical sections must not sleep and so we cannot hold an RCU read lock around the xlate function. Therefore, instead of using RCU, an alternative for this use-case is to employ a simple spinlock inconjunction with a usage count variable to keep track of how many current users of the DMA data structure there are. With this implementation, the DMA data cannot be freed until all current users of the DMA data are finished. This patch is based upon the DMA helpers fix for potential deadlock [2]. [1] http://article.gmane.org/gmane.linux.ports.arm.omap/73622 [2] http://marc.info/?l=linux-arm-kernel&m=134859982520984&w=2 Signed-off-by: Jon Hunter <jon-hunter@ti.com> Signed-off-by: Vinod Koul <vinod.koul@linux.intel.com>
Diffstat (limited to 'drivers/of/dma.c')
-rw-r--r--drivers/of/dma.c89
1 files changed, 67 insertions, 22 deletions
diff --git a/drivers/of/dma.c b/drivers/of/dma.c
index 4bed490a69e4..59631b2c4666 100644
--- a/drivers/of/dma.c
+++ b/drivers/of/dma.c
@@ -19,28 +19,61 @@
19#include <linux/of_dma.h> 19#include <linux/of_dma.h>
20 20
21static LIST_HEAD(of_dma_list); 21static LIST_HEAD(of_dma_list);
22static DEFINE_SPINLOCK(of_dma_lock);
22 23
23/** 24/**
24 * of_dma_find_controller - Find a DMA controller in DT DMA helpers list 25 * of_dma_get_controller - Get a DMA controller in DT DMA helpers list
25 * @np: device node of DMA controller 26 * @dma_spec: pointer to DMA specifier as found in the device tree
27 *
28 * Finds a DMA controller with matching device node and number for dma cells
29 * in a list of registered DMA controllers. If a match is found the use_count
30 * variable is increased and a valid pointer to the DMA data stored is retuned.
31 * A NULL pointer is returned if no match is found.
26 */ 32 */
27static struct of_dma *of_dma_find_controller(struct device_node *np) 33static struct of_dma *of_dma_get_controller(struct of_phandle_args *dma_spec)
28{ 34{
29 struct of_dma *ofdma; 35 struct of_dma *ofdma;
30 36
37 spin_lock(&of_dma_lock);
38
31 if (list_empty(&of_dma_list)) { 39 if (list_empty(&of_dma_list)) {
32 pr_err("empty DMA controller list\n"); 40 spin_unlock(&of_dma_lock);
33 return NULL; 41 return NULL;
34 } 42 }
35 43
36 list_for_each_entry_rcu(ofdma, &of_dma_list, of_dma_controllers) 44 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
37 if (ofdma->of_node == np) 45 if ((ofdma->of_node == dma_spec->np) &&
46 (ofdma->of_dma_nbcells == dma_spec->args_count)) {
47 ofdma->use_count++;
48 spin_unlock(&of_dma_lock);
38 return ofdma; 49 return ofdma;
50 }
51
52 spin_unlock(&of_dma_lock);
53
54 pr_debug("%s: can't find DMA controller %s\n", __func__,
55 dma_spec->np->full_name);
39 56
40 return NULL; 57 return NULL;
41} 58}
42 59
43/** 60/**
61 * of_dma_put_controller - Decrement use count for a registered DMA controller
62 * @of_dma: pointer to DMA controller data
63 *
64 * Decrements the use_count variable in the DMA data structure. This function
65 * should be called only when a valid pointer is returned from
66 * of_dma_get_controller() and no further accesses to data referenced by that
67 * pointer are needed.
68 */
69static void of_dma_put_controller(struct of_dma *ofdma)
70{
71 spin_lock(&of_dma_lock);
72 ofdma->use_count--;
73 spin_unlock(&of_dma_lock);
74}
75
76/**
44 * of_dma_controller_register - Register a DMA controller to DT DMA helpers 77 * of_dma_controller_register - Register a DMA controller to DT DMA helpers
45 * @np: device node of DMA controller 78 * @np: device node of DMA controller
46 * @of_dma_xlate: translation function which converts a phandle 79 * @of_dma_xlate: translation function which converts a phandle
@@ -81,9 +114,10 @@ int of_dma_controller_register(struct device_node *np,
81 ofdma->of_dma_nbcells = nbcells; 114 ofdma->of_dma_nbcells = nbcells;
82 ofdma->of_dma_xlate = of_dma_xlate; 115 ofdma->of_dma_xlate = of_dma_xlate;
83 ofdma->of_dma_data = data; 116 ofdma->of_dma_data = data;
117 ofdma->use_count = 0;
84 118
85 /* Now queue of_dma controller structure in list */ 119 /* Now queue of_dma controller structure in list */
86 list_add_tail_rcu(&ofdma->of_dma_controllers, &of_dma_list); 120 list_add_tail(&ofdma->of_dma_controllers, &of_dma_list);
87 121
88 return 0; 122 return 0;
89} 123}
@@ -95,15 +129,32 @@ EXPORT_SYMBOL_GPL(of_dma_controller_register);
95 * 129 *
96 * Memory allocated by of_dma_controller_register() is freed here. 130 * Memory allocated by of_dma_controller_register() is freed here.
97 */ 131 */
98void of_dma_controller_free(struct device_node *np) 132int of_dma_controller_free(struct device_node *np)
99{ 133{
100 struct of_dma *ofdma; 134 struct of_dma *ofdma;
101 135
102 ofdma = of_dma_find_controller(np); 136 spin_lock(&of_dma_lock);
103 if (ofdma) { 137
104 list_del_rcu(&ofdma->of_dma_controllers); 138 if (list_empty(&of_dma_list)) {
105 kfree(ofdma); 139 spin_unlock(&of_dma_lock);
140 return -ENODEV;
106 } 141 }
142
143 list_for_each_entry(ofdma, &of_dma_list, of_dma_controllers)
144 if (ofdma->of_node == np) {
145 if (ofdma->use_count) {
146 spin_unlock(&of_dma_lock);
147 return -EBUSY;
148 }
149
150 list_del(&ofdma->of_dma_controllers);
151 spin_unlock(&of_dma_lock);
152 kfree(ofdma);
153 return 0;
154 }
155
156 spin_unlock(&of_dma_lock);
157 return -ENODEV;
107} 158}
108EXPORT_SYMBOL_GPL(of_dma_controller_free); 159EXPORT_SYMBOL_GPL(of_dma_controller_free);
109 160
@@ -166,21 +217,15 @@ struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
166 if (of_dma_match_channel(np, name, i, &dma_spec)) 217 if (of_dma_match_channel(np, name, i, &dma_spec))
167 continue; 218 continue;
168 219
169 ofdma = of_dma_find_controller(dma_spec.np); 220 ofdma = of_dma_get_controller(&dma_spec);
170 if (!ofdma) {
171 pr_debug("%s: can't find DMA controller %s\n",
172 np->full_name, dma_spec.np->full_name);
173 continue;
174 }
175 221
176 if (dma_spec.args_count != ofdma->of_dma_nbcells) { 222 if (!ofdma)
177 pr_debug("%s: wrong #dma-cells for %s\n", np->full_name,
178 dma_spec.np->full_name);
179 continue; 223 continue;
180 }
181 224
182 chan = ofdma->of_dma_xlate(&dma_spec, ofdma); 225 chan = ofdma->of_dma_xlate(&dma_spec, ofdma);
183 226
227 of_dma_put_controller(ofdma);
228
184 of_node_put(dma_spec.np); 229 of_node_put(dma_spec.np);
185 230
186 if (chan) 231 if (chan)