aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/mmc/host/sh_mmcif.c
diff options
context:
space:
mode:
authorGuennadi Liakhovetski <g.liakhovetski@gmx.de>2012-12-12 09:38:14 -0500
committerChris Ball <cjb@laptop.org>2013-02-11 13:28:28 -0500
commit8047310ee984b3efe932ee5b561f2523396466dd (patch)
tree7cb156d38a9b3b7c3d0b65dd6f43d1d9e5a5fa0b /drivers/mmc/host/sh_mmcif.c
parent99eb9d8df996fc1695d4de687873875fbd8f6719 (diff)
mmc: sh_mmcif: fix a race, causing an Oops on SMP
Oopses have been observed on SMP in the sh-mmcif IRQ thread, when the two IRQ threads run simultaneously on two CPUs. Also take care to guard the timeout work and the DMA completion callback from possible NULL-pointer dereferences and races. Signed-off-by: Guennadi Liakhovetski <g.liakhovetski@gmx.de> Signed-off-by: Chris Ball <cjb@laptop.org>
Diffstat (limited to 'drivers/mmc/host/sh_mmcif.c')
-rw-r--r--drivers/mmc/host/sh_mmcif.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index de4b6d073599..3cfe383dc22b 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -56,6 +56,7 @@
56#include <linux/mmc/sh_mmcif.h> 56#include <linux/mmc/sh_mmcif.h>
57#include <linux/mmc/slot-gpio.h> 57#include <linux/mmc/slot-gpio.h>
58#include <linux/mod_devicetable.h> 58#include <linux/mod_devicetable.h>
59#include <linux/mutex.h>
59#include <linux/pagemap.h> 60#include <linux/pagemap.h>
60#include <linux/platform_device.h> 61#include <linux/platform_device.h>
61#include <linux/pm_qos.h> 62#include <linux/pm_qos.h>
@@ -196,6 +197,7 @@ enum mmcif_state {
196 STATE_IDLE, 197 STATE_IDLE,
197 STATE_REQUEST, 198 STATE_REQUEST,
198 STATE_IOS, 199 STATE_IOS,
200 STATE_TIMEOUT,
199}; 201};
200 202
201enum mmcif_wait_for { 203enum mmcif_wait_for {
@@ -232,6 +234,7 @@ struct sh_mmcif_host {
232 int sg_blkidx; 234 int sg_blkidx;
233 bool power; 235 bool power;
234 bool card_present; 236 bool card_present;
237 struct mutex thread_lock;
235 238
236 /* DMA support */ 239 /* DMA support */
237 struct dma_chan *chan_rx; 240 struct dma_chan *chan_rx;
@@ -255,11 +258,11 @@ static inline void sh_mmcif_bitclr(struct sh_mmcif_host *host,
255static void mmcif_dma_complete(void *arg) 258static void mmcif_dma_complete(void *arg)
256{ 259{
257 struct sh_mmcif_host *host = arg; 260 struct sh_mmcif_host *host = arg;
258 struct mmc_data *data = host->mrq->data; 261 struct mmc_request *mrq = host->mrq;
259 262
260 dev_dbg(&host->pd->dev, "Command completed\n"); 263 dev_dbg(&host->pd->dev, "Command completed\n");
261 264
262 if (WARN(!data, "%s: NULL data in DMA completion!\n", 265 if (WARN(!mrq || !mrq->data, "%s: NULL data in DMA completion!\n",
263 dev_name(&host->pd->dev))) 266 dev_name(&host->pd->dev)))
264 return; 267 return;
265 268
@@ -1113,11 +1116,21 @@ static bool sh_mmcif_end_cmd(struct sh_mmcif_host *host)
1113static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id) 1116static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1114{ 1117{
1115 struct sh_mmcif_host *host = dev_id; 1118 struct sh_mmcif_host *host = dev_id;
1116 struct mmc_request *mrq = host->mrq; 1119 struct mmc_request *mrq;
1117 bool wait = false; 1120 bool wait = false;
1118 1121
1119 cancel_delayed_work_sync(&host->timeout_work); 1122 cancel_delayed_work_sync(&host->timeout_work);
1120 1123
1124 mutex_lock(&host->thread_lock);
1125
1126 mrq = host->mrq;
1127 if (!mrq) {
1128 dev_dbg(&host->pd->dev, "IRQ thread state %u, wait %u: NULL mrq!\n",
1129 host->state, host->wait_for);
1130 mutex_unlock(&host->thread_lock);
1131 return IRQ_HANDLED;
1132 }
1133
1121 /* 1134 /*
1122 * All handlers return true, if processing continues, and false, if the 1135 * All handlers return true, if processing continues, and false, if the
1123 * request has to be completed - successfully or not 1136 * request has to be completed - successfully or not
@@ -1125,6 +1138,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1125 switch (host->wait_for) { 1138 switch (host->wait_for) {
1126 case MMCIF_WAIT_FOR_REQUEST: 1139 case MMCIF_WAIT_FOR_REQUEST:
1127 /* We're too late, the timeout has already kicked in */ 1140 /* We're too late, the timeout has already kicked in */
1141 mutex_unlock(&host->thread_lock);
1128 return IRQ_HANDLED; 1142 return IRQ_HANDLED;
1129 case MMCIF_WAIT_FOR_CMD: 1143 case MMCIF_WAIT_FOR_CMD:
1130 /* Wait for data? */ 1144 /* Wait for data? */
@@ -1166,6 +1180,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1166 if (wait) { 1180 if (wait) {
1167 schedule_delayed_work(&host->timeout_work, host->timeout); 1181 schedule_delayed_work(&host->timeout_work, host->timeout);
1168 /* Wait for more data */ 1182 /* Wait for more data */
1183 mutex_unlock(&host->thread_lock);
1169 return IRQ_HANDLED; 1184 return IRQ_HANDLED;
1170 } 1185 }
1171 1186
@@ -1179,6 +1194,7 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1179 sh_mmcif_stop_cmd(host, mrq); 1194 sh_mmcif_stop_cmd(host, mrq);
1180 if (!mrq->stop->error) { 1195 if (!mrq->stop->error) {
1181 schedule_delayed_work(&host->timeout_work, host->timeout); 1196 schedule_delayed_work(&host->timeout_work, host->timeout);
1197 mutex_unlock(&host->thread_lock);
1182 return IRQ_HANDLED; 1198 return IRQ_HANDLED;
1183 } 1199 }
1184 } 1200 }
@@ -1189,6 +1205,8 @@ static irqreturn_t sh_mmcif_irqt(int irq, void *dev_id)
1189 host->mrq = NULL; 1205 host->mrq = NULL;
1190 mmc_request_done(host->mmc, mrq); 1206 mmc_request_done(host->mmc, mrq);
1191 1207
1208 mutex_unlock(&host->thread_lock);
1209
1192 return IRQ_HANDLED; 1210 return IRQ_HANDLED;
1193} 1211}
1194 1212
@@ -1262,11 +1280,24 @@ static void mmcif_timeout_work(struct work_struct *work)
1262 struct delayed_work *d = container_of(work, struct delayed_work, work); 1280 struct delayed_work *d = container_of(work, struct delayed_work, work);
1263 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work); 1281 struct sh_mmcif_host *host = container_of(d, struct sh_mmcif_host, timeout_work);
1264 struct mmc_request *mrq = host->mrq; 1282 struct mmc_request *mrq = host->mrq;
1283 unsigned long flags;
1265 1284
1266 if (host->dying) 1285 if (host->dying)
1267 /* Don't run after mmc_remove_host() */ 1286 /* Don't run after mmc_remove_host() */
1268 return; 1287 return;
1269 1288
1289 dev_dbg(&host->pd->dev, "Timeout waiting for %u, opcode %u\n",
1290 host->wait_for, mrq->cmd->opcode);
1291
1292 spin_lock_irqsave(&host->lock, flags);
1293 if (host->state == STATE_IDLE) {
1294 spin_unlock_irqrestore(&host->lock, flags);
1295 return;
1296 }
1297
1298 host->state = STATE_TIMEOUT;
1299 spin_unlock_irqrestore(&host->lock, flags);
1300
1270 /* 1301 /*
1271 * Handle races with cancel_delayed_work(), unless 1302 * Handle races with cancel_delayed_work(), unless
1272 * cancel_delayed_work_sync() is used 1303 * cancel_delayed_work_sync() is used
@@ -1410,6 +1441,8 @@ static int sh_mmcif_probe(struct platform_device *pdev)
1410 goto erqcd; 1441 goto erqcd;
1411 } 1442 }
1412 1443
1444 mutex_init(&host->thread_lock);
1445
1413 clk_disable(host->hclk); 1446 clk_disable(host->hclk);
1414 ret = mmc_add_host(mmc); 1447 ret = mmc_add_host(mmc);
1415 if (ret < 0) 1448 if (ret < 0)