aboutsummaryrefslogtreecommitdiffstats
path: root/mm/dmapool.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2014-10-09 18:28:50 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:59 -0400
commit01c2965f0723a25209d5cf4cac630ed0f6d0edf4 (patch)
tree4d3641ba5e946f22d22a0c06093318fa6086b701 /mm/dmapool.c
parent6f817f4cda68b09621312ec5ba84217bc5e37b3d (diff)
mm: dmapool: add/remove sysfs file outside of the pool lock lock
cat /sys/.../pools followed by removal the device leads to: |====================================================== |[ INFO: possible circular locking dependency detected ] |3.17.0-rc4+ #1498 Not tainted |------------------------------------------------------- |rmmod/2505 is trying to acquire lock: | (s_active#28){++++.+}, at: [<c017f754>] kernfs_remove_by_name_ns+0x3c/0x88 | |but task is already holding lock: | (pools_lock){+.+.+.}, at: [<c011494c>] dma_pool_destroy+0x18/0x17c | |which lock already depends on the new lock. |the existing dependency chain (in reverse order) is: | |-> #1 (pools_lock){+.+.+.}: | [<c0114ae8>] show_pools+0x30/0xf8 | [<c0313210>] dev_attr_show+0x1c/0x48 | [<c0180e84>] sysfs_kf_seq_show+0x88/0x10c | [<c017f960>] kernfs_seq_show+0x24/0x28 | [<c013efc4>] seq_read+0x1b8/0x480 | [<c011e820>] vfs_read+0x8c/0x148 | [<c011ea10>] SyS_read+0x40/0x8c | [<c000e960>] ret_fast_syscall+0x0/0x48 | |-> #0 (s_active#28){++++.+}: | [<c017e9ac>] __kernfs_remove+0x258/0x2ec | [<c017f754>] kernfs_remove_by_name_ns+0x3c/0x88 | [<c0114a7c>] dma_pool_destroy+0x148/0x17c | [<c03ad288>] hcd_buffer_destroy+0x20/0x34 | [<c03a4780>] usb_remove_hcd+0x110/0x1a4 The problem is the lock order of pools_lock and kernfs_mutex in dma_pool_destroy() vs show_pools() call path. This patch breaks out the creation of the sysfs file outside of the pools_lock mutex. The newly added pools_reg_lock ensures that there is no race of create vs destroy code path in terms whether or not the sysfs file has to be deleted (and was it deleted before we try to create a new one) and what to do if device_create_file() failed. Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/dmapool.c')
-rw-r--r--mm/dmapool.c43
1 files changed, 35 insertions, 8 deletions
diff --git a/mm/dmapool.c b/mm/dmapool.c
index ba8019b063e1..2372ed5a33d3 100644
--- a/mm/dmapool.c
+++ b/mm/dmapool.c
@@ -62,6 +62,7 @@ struct dma_page { /* cacheable header for 'allocation' bytes */
62}; 62};
63 63
64static DEFINE_MUTEX(pools_lock); 64static DEFINE_MUTEX(pools_lock);
65static DEFINE_MUTEX(pools_reg_lock);
65 66
66static ssize_t 67static ssize_t
67show_pools(struct device *dev, struct device_attribute *attr, char *buf) 68show_pools(struct device *dev, struct device_attribute *attr, char *buf)
@@ -132,6 +133,7 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
132{ 133{
133 struct dma_pool *retval; 134 struct dma_pool *retval;
134 size_t allocation; 135 size_t allocation;
136 bool empty = false;
135 137
136 if (align == 0) { 138 if (align == 0) {
137 align = 1; 139 align = 1;
@@ -172,15 +174,34 @@ struct dma_pool *dma_pool_create(const char *name, struct device *dev,
172 174
173 INIT_LIST_HEAD(&retval->pools); 175 INIT_LIST_HEAD(&retval->pools);
174 176
177 /*
178 * pools_lock ensures that the ->dma_pools list does not get corrupted.
179 * pools_reg_lock ensures that there is not a race between
180 * dma_pool_create() and dma_pool_destroy() or within dma_pool_create()
181 * when the first invocation of dma_pool_create() failed on
182 * device_create_file() and the second assumes that it has been done (I
183 * know it is a short window).
184 */
185 mutex_lock(&pools_reg_lock);
175 mutex_lock(&pools_lock); 186 mutex_lock(&pools_lock);
176 if (list_empty(&dev->dma_pools) && 187 if (list_empty(&dev->dma_pools))
177 device_create_file(dev, &dev_attr_pools)) { 188 empty = true;
178 kfree(retval); 189 list_add(&retval->pools, &dev->dma_pools);
179 retval = NULL;
180 } else
181 list_add(&retval->pools, &dev->dma_pools);
182 mutex_unlock(&pools_lock); 190 mutex_unlock(&pools_lock);
183 191 if (empty) {
192 int err;
193
194 err = device_create_file(dev, &dev_attr_pools);
195 if (err) {
196 mutex_lock(&pools_lock);
197 list_del(&retval->pools);
198 mutex_unlock(&pools_lock);
199 mutex_unlock(&pools_reg_lock);
200 kfree(retval);
201 return NULL;
202 }
203 }
204 mutex_unlock(&pools_reg_lock);
184 return retval; 205 return retval;
185} 206}
186EXPORT_SYMBOL(dma_pool_create); 207EXPORT_SYMBOL(dma_pool_create);
@@ -251,11 +272,17 @@ static void pool_free_page(struct dma_pool *pool, struct dma_page *page)
251 */ 272 */
252void dma_pool_destroy(struct dma_pool *pool) 273void dma_pool_destroy(struct dma_pool *pool)
253{ 274{
275 bool empty = false;
276
277 mutex_lock(&pools_reg_lock);
254 mutex_lock(&pools_lock); 278 mutex_lock(&pools_lock);
255 list_del(&pool->pools); 279 list_del(&pool->pools);
256 if (pool->dev && list_empty(&pool->dev->dma_pools)) 280 if (pool->dev && list_empty(&pool->dev->dma_pools))
257 device_remove_file(pool->dev, &dev_attr_pools); 281 empty = true;
258 mutex_unlock(&pools_lock); 282 mutex_unlock(&pools_lock);
283 if (empty)
284 device_remove_file(pool->dev, &dev_attr_pools);
285 mutex_unlock(&pools_reg_lock);
259 286
260 while (!list_empty(&pool->page_list)) { 287 while (!list_empty(&pool->page_list)) {
261 struct dma_page *page; 288 struct dma_page *page;