diff options
| author | Mark Brown <broonie@kernel.org> | 2019-01-23 12:29:53 -0500 |
|---|---|---|
| committer | Mark Brown <broonie@kernel.org> | 2019-01-23 12:29:53 -0500 |
| commit | f0125f1a559be1033055f44e511174aaa75b60cc (patch) | |
| tree | 83d67533a010578ce2bc018302fc5ec2e291c9a4 /drivers/spi | |
| parent | 51eea52d26d4939b788b7244c28cf47e902b4c4c (diff) | |
spi: Go back to immediate teardown
Commit 412e6037324 ("spi: core: avoid waking pump thread from spi_sync
instead run teardown delayed") introduced regressions on some boards,
apparently connected to spi_mem not triggering shutdown properly any
more. Since we've thus far been unable to figure out exactly where the
breakage is revert the optimisation for now.
Reported-by: Jon Hunter <jonathanh@nvidia.com>
Signed-off-by: Mark Brown <broonie@kernel.org>
Cc: kernel@martin.sperl.org
Diffstat (limited to 'drivers/spi')
| -rw-r--r-- | drivers/spi/spi.c | 122 |
1 files changed, 33 insertions, 89 deletions
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 06b9139664a3..13f447a67d67 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
| @@ -1225,7 +1225,7 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) | |||
| 1225 | return; | 1225 | return; |
| 1226 | } | 1226 | } |
| 1227 | 1227 | ||
| 1228 | /* If another context is idling the device then defer to kthread */ | 1228 | /* If another context is idling the device then defer */ |
| 1229 | if (ctlr->idling) { | 1229 | if (ctlr->idling) { |
| 1230 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); | 1230 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
| 1231 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | 1231 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
| @@ -1239,10 +1239,34 @@ static void __spi_pump_messages(struct spi_controller *ctlr, bool in_kthread) | |||
| 1239 | return; | 1239 | return; |
| 1240 | } | 1240 | } |
| 1241 | 1241 | ||
| 1242 | /* schedule idle teardown with a delay of 1 second */ | 1242 | /* Only do teardown in the thread */ |
| 1243 | kthread_mod_delayed_work(&ctlr->kworker, | 1243 | if (!in_kthread) { |
| 1244 | &ctlr->pump_idle_teardown, | 1244 | kthread_queue_work(&ctlr->kworker, |
| 1245 | HZ); | 1245 | &ctlr->pump_messages); |
| 1246 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | ||
| 1247 | return; | ||
| 1248 | } | ||
| 1249 | |||
| 1250 | ctlr->busy = false; | ||
| 1251 | ctlr->idling = true; | ||
| 1252 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | ||
| 1253 | |||
| 1254 | kfree(ctlr->dummy_rx); | ||
| 1255 | ctlr->dummy_rx = NULL; | ||
| 1256 | kfree(ctlr->dummy_tx); | ||
| 1257 | ctlr->dummy_tx = NULL; | ||
| 1258 | if (ctlr->unprepare_transfer_hardware && | ||
| 1259 | ctlr->unprepare_transfer_hardware(ctlr)) | ||
| 1260 | dev_err(&ctlr->dev, | ||
| 1261 | "failed to unprepare transfer hardware\n"); | ||
| 1262 | if (ctlr->auto_runtime_pm) { | ||
| 1263 | pm_runtime_mark_last_busy(ctlr->dev.parent); | ||
| 1264 | pm_runtime_put_autosuspend(ctlr->dev.parent); | ||
| 1265 | } | ||
| 1266 | trace_spi_controller_idle(ctlr); | ||
| 1267 | |||
| 1268 | spin_lock_irqsave(&ctlr->queue_lock, flags); | ||
| 1269 | ctlr->idling = false; | ||
| 1246 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | 1270 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
| 1247 | return; | 1271 | return; |
| 1248 | } | 1272 | } |
| @@ -1335,77 +1359,6 @@ static void spi_pump_messages(struct kthread_work *work) | |||
| 1335 | __spi_pump_messages(ctlr, true); | 1359 | __spi_pump_messages(ctlr, true); |
| 1336 | } | 1360 | } |
| 1337 | 1361 | ||
| 1338 | /** | ||
| 1339 | * spi_pump_idle_teardown - kthread delayed work function which tears down | ||
| 1340 | * the controller settings after some delay | ||
| 1341 | * @work: pointer to kthread work struct contained in the controller struct | ||
| 1342 | */ | ||
| 1343 | static void spi_pump_idle_teardown(struct kthread_work *work) | ||
| 1344 | { | ||
| 1345 | struct spi_controller *ctlr = | ||
| 1346 | container_of(work, struct spi_controller, | ||
| 1347 | pump_idle_teardown.work); | ||
| 1348 | unsigned long flags; | ||
| 1349 | |||
| 1350 | /* Lock queue */ | ||
| 1351 | spin_lock_irqsave(&ctlr->queue_lock, flags); | ||
| 1352 | |||
| 1353 | /* Make sure we are not already running a message */ | ||
| 1354 | if (ctlr->cur_msg) | ||
| 1355 | goto out; | ||
| 1356 | |||
| 1357 | /* if there is anything in the list then exit */ | ||
| 1358 | if (!list_empty(&ctlr->queue)) | ||
| 1359 | goto out; | ||
| 1360 | |||
| 1361 | /* if the controller is running then exit */ | ||
| 1362 | if (ctlr->running) | ||
| 1363 | goto out; | ||
| 1364 | |||
| 1365 | /* if the controller is busy then exit */ | ||
| 1366 | if (ctlr->busy) | ||
| 1367 | goto out; | ||
| 1368 | |||
| 1369 | /* if the controller is idling then exit | ||
| 1370 | * this is actually a bit strange and would indicate that | ||
| 1371 | * this function is scheduled twice, which should not happen | ||
| 1372 | */ | ||
| 1373 | if (ctlr->idling) | ||
| 1374 | goto out; | ||
| 1375 | |||
| 1376 | /* set up the initial states */ | ||
| 1377 | ctlr->busy = false; | ||
| 1378 | ctlr->idling = true; | ||
| 1379 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | ||
| 1380 | |||
| 1381 | /* free dummy receive buffers */ | ||
| 1382 | kfree(ctlr->dummy_rx); | ||
| 1383 | ctlr->dummy_rx = NULL; | ||
| 1384 | kfree(ctlr->dummy_tx); | ||
| 1385 | ctlr->dummy_tx = NULL; | ||
| 1386 | |||
| 1387 | /* unprepare hardware */ | ||
| 1388 | if (ctlr->unprepare_transfer_hardware && | ||
| 1389 | ctlr->unprepare_transfer_hardware(ctlr)) | ||
| 1390 | dev_err(&ctlr->dev, | ||
| 1391 | "failed to unprepare transfer hardware\n"); | ||
| 1392 | /* handle pm */ | ||
| 1393 | if (ctlr->auto_runtime_pm) { | ||
| 1394 | pm_runtime_mark_last_busy(ctlr->dev.parent); | ||
| 1395 | pm_runtime_put_autosuspend(ctlr->dev.parent); | ||
| 1396 | } | ||
| 1397 | |||
| 1398 | /* mark controller as idle */ | ||
| 1399 | trace_spi_controller_idle(ctlr); | ||
| 1400 | |||
| 1401 | /* finally put us from idling into stopped */ | ||
| 1402 | spin_lock_irqsave(&ctlr->queue_lock, flags); | ||
| 1403 | ctlr->idling = false; | ||
| 1404 | |||
| 1405 | out: | ||
| 1406 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | ||
| 1407 | } | ||
| 1408 | |||
| 1409 | static int spi_init_queue(struct spi_controller *ctlr) | 1362 | static int spi_init_queue(struct spi_controller *ctlr) |
| 1410 | { | 1363 | { |
| 1411 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; | 1364 | struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 }; |
| @@ -1421,8 +1374,7 @@ static int spi_init_queue(struct spi_controller *ctlr) | |||
| 1421 | return PTR_ERR(ctlr->kworker_task); | 1374 | return PTR_ERR(ctlr->kworker_task); |
| 1422 | } | 1375 | } |
| 1423 | kthread_init_work(&ctlr->pump_messages, spi_pump_messages); | 1376 | kthread_init_work(&ctlr->pump_messages, spi_pump_messages); |
| 1424 | kthread_init_delayed_work(&ctlr->pump_idle_teardown, | 1377 | |
| 1425 | spi_pump_idle_teardown); | ||
| 1426 | /* | 1378 | /* |
| 1427 | * Controller config will indicate if this controller should run the | 1379 | * Controller config will indicate if this controller should run the |
| 1428 | * message pump with high (realtime) priority to reduce the transfer | 1380 | * message pump with high (realtime) priority to reduce the transfer |
| @@ -1494,16 +1446,7 @@ void spi_finalize_current_message(struct spi_controller *ctlr) | |||
| 1494 | spin_lock_irqsave(&ctlr->queue_lock, flags); | 1446 | spin_lock_irqsave(&ctlr->queue_lock, flags); |
| 1495 | ctlr->cur_msg = NULL; | 1447 | ctlr->cur_msg = NULL; |
| 1496 | ctlr->cur_msg_prepared = false; | 1448 | ctlr->cur_msg_prepared = false; |
| 1497 | 1449 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); | |
| 1498 | /* if there is something queued, then wake the queue */ | ||
| 1499 | if (!list_empty(&ctlr->queue)) | ||
| 1500 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); | ||
| 1501 | else | ||
| 1502 | /* otherwise schedule delayed teardown */ | ||
| 1503 | kthread_mod_delayed_work(&ctlr->kworker, | ||
| 1504 | &ctlr->pump_idle_teardown, | ||
| 1505 | HZ); | ||
| 1506 | |||
| 1507 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | 1450 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
| 1508 | 1451 | ||
| 1509 | trace_spi_message_done(mesg); | 1452 | trace_spi_message_done(mesg); |
| @@ -1608,7 +1551,7 @@ static int __spi_queued_transfer(struct spi_device *spi, | |||
| 1608 | msg->status = -EINPROGRESS; | 1551 | msg->status = -EINPROGRESS; |
| 1609 | 1552 | ||
| 1610 | list_add_tail(&msg->queue, &ctlr->queue); | 1553 | list_add_tail(&msg->queue, &ctlr->queue); |
| 1611 | if (need_pump) | 1554 | if (!ctlr->busy && need_pump) |
| 1612 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); | 1555 | kthread_queue_work(&ctlr->kworker, &ctlr->pump_messages); |
| 1613 | 1556 | ||
| 1614 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); | 1557 | spin_unlock_irqrestore(&ctlr->queue_lock, flags); |
| @@ -3783,3 +3726,4 @@ err0: | |||
| 3783 | * include needing to have boardinfo data structures be much more public. | 3726 | * include needing to have boardinfo data structures be much more public. |
| 3784 | */ | 3727 | */ |
| 3785 | postcore_initcall(spi_init); | 3728 | postcore_initcall(spi_init); |
| 3729 | |||
