aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 16:39:18 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-15 16:39:18 -0500
commit3c18767a45650009d02537677ffb7997bd402a2c (patch)
treebfe4d355208b0e0156367d9ca4dfa4a2c6d8d17f
parent19b9aaf8a5fa634b2f16c3a2cfa819b74991273e (diff)
parent1f90a2162fb3cdfd9c44380bf16209af00f7acbe (diff)
Merge tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jassi Brar: "Change to POLL api and fixes for FlexRM and OMAP driver. Summary: - Core: Prefer ACK method over POLL, if both supported - Test: use flag instead of special character - FlexRM: Usual driver internal minor churn - Omap: fix error path" * tag 'mailbox-v4.15' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox/omap: unregister mbox class mailbox: mailbox-test: don't rely on rx_buffer content to signal data ready mailbox: reset txdone_method TXDONE_BY_POLL if client knows_txdone mailbox: Build Broadcom FlexRM driver as loadable module for iProc SOCs mailbox: bcm-flexrm-mailbox: Use common GPL comment header mailbox: bcm-flexrm-mailbox: add depends on ARCH_BCM_IPROC mailbox: bcm-flexrm-mailbox: Print ring number in errors and warnings mailbox: bcm-flexrm-mailbox: Fix FlexRM ring flush sequence
-rw-r--r--drivers/mailbox/Kconfig3
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c66
-rw-r--r--drivers/mailbox/mailbox-test.c11
-rw-r--r--drivers/mailbox/mailbox.c4
-rw-r--r--drivers/mailbox/omap-mailbox.c6
-rw-r--r--drivers/mailbox/pcc.c4
6 files changed, 66 insertions, 28 deletions
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index c5731e5e3c6c..ba2f1525f4ee 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -163,9 +163,10 @@ config BCM_PDC_MBOX
163config BCM_FLEXRM_MBOX 163config BCM_FLEXRM_MBOX
164 tristate "Broadcom FlexRM Mailbox" 164 tristate "Broadcom FlexRM Mailbox"
165 depends on ARM64 165 depends on ARM64
166 depends on ARCH_BCM_IPROC || COMPILE_TEST
166 depends on HAS_DMA 167 depends on HAS_DMA
167 select GENERIC_MSI_IRQ_DOMAIN 168 select GENERIC_MSI_IRQ_DOMAIN
168 default ARCH_BCM_IPROC 169 default m if ARCH_BCM_IPROC
169 help 170 help
170 Mailbox implementation of the Broadcom FlexRM ring manager, 171 Mailbox implementation of the Broadcom FlexRM ring manager,
171 which provides access to various offload engines on Broadcom 172 which provides access to various offload engines on Broadcom
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
index ae6146311934..a8cf4333a68f 100644
--- a/drivers/mailbox/bcm-flexrm-mailbox.c
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -1,10 +1,18 @@
1/* Broadcom FlexRM Mailbox Driver 1/*
2 *
3 * Copyright (C) 2017 Broadcom 2 * Copyright (C) 2017 Broadcom
4 * 3 *
5 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or
6 * it under the terms of the GNU General Public License version 2 as 5 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation. 6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/*
15 * Broadcom FlexRM Mailbox Driver
8 * 16 *
9 * Each Broadcom FlexSparx4 offload engine is implemented as an 17 * Each Broadcom FlexSparx4 offload engine is implemented as an
10 * extension to Broadcom FlexRM ring manager. The FlexRM ring 18 * extension to Broadcom FlexRM ring manager. The FlexRM ring
@@ -1116,8 +1124,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
1116 err = flexrm_cmpl_desc_to_error(desc); 1124 err = flexrm_cmpl_desc_to_error(desc);
1117 if (err < 0) { 1125 if (err < 0) {
1118 dev_warn(ring->mbox->dev, 1126 dev_warn(ring->mbox->dev,
1119 "got completion desc=0x%lx with error %d", 1127 "ring%d got completion desc=0x%lx with error %d\n",
1120 (unsigned long)desc, err); 1128 ring->num, (unsigned long)desc, err);
1121 } 1129 }
1122 1130
1123 /* Determine request id from completion descriptor */ 1131 /* Determine request id from completion descriptor */
@@ -1127,8 +1135,8 @@ static int flexrm_process_completions(struct flexrm_ring *ring)
1127 msg = ring->requests[reqid]; 1135 msg = ring->requests[reqid];
1128 if (!msg) { 1136 if (!msg) {
1129 dev_warn(ring->mbox->dev, 1137 dev_warn(ring->mbox->dev,
1130 "null msg pointer for completion desc=0x%lx", 1138 "ring%d null msg pointer for completion desc=0x%lx\n",
1131 (unsigned long)desc); 1139 ring->num, (unsigned long)desc);
1132 continue; 1140 continue;
1133 } 1141 }
1134 1142
@@ -1238,7 +1246,9 @@ static int flexrm_startup(struct mbox_chan *chan)
1238 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool, 1246 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1239 GFP_KERNEL, &ring->bd_dma_base); 1247 GFP_KERNEL, &ring->bd_dma_base);
1240 if (!ring->bd_base) { 1248 if (!ring->bd_base) {
1241 dev_err(ring->mbox->dev, "can't allocate BD memory\n"); 1249 dev_err(ring->mbox->dev,
1250 "can't allocate BD memory for ring%d\n",
1251 ring->num);
1242 ret = -ENOMEM; 1252 ret = -ENOMEM;
1243 goto fail; 1253 goto fail;
1244 } 1254 }
@@ -1261,7 +1271,9 @@ static int flexrm_startup(struct mbox_chan *chan)
1261 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool, 1271 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
1262 GFP_KERNEL, &ring->cmpl_dma_base); 1272 GFP_KERNEL, &ring->cmpl_dma_base);
1263 if (!ring->cmpl_base) { 1273 if (!ring->cmpl_base) {
1264 dev_err(ring->mbox->dev, "can't allocate completion memory\n"); 1274 dev_err(ring->mbox->dev,
1275 "can't allocate completion memory for ring%d\n",
1276 ring->num);
1265 ret = -ENOMEM; 1277 ret = -ENOMEM;
1266 goto fail_free_bd_memory; 1278 goto fail_free_bd_memory;
1267 } 1279 }
@@ -1269,7 +1281,8 @@ static int flexrm_startup(struct mbox_chan *chan)
1269 1281
1270 /* Request IRQ */ 1282 /* Request IRQ */
1271 if (ring->irq == UINT_MAX) { 1283 if (ring->irq == UINT_MAX) {
1272 dev_err(ring->mbox->dev, "ring IRQ not available\n"); 1284 dev_err(ring->mbox->dev,
1285 "ring%d IRQ not available\n", ring->num);
1273 ret = -ENODEV; 1286 ret = -ENODEV;
1274 goto fail_free_cmpl_memory; 1287 goto fail_free_cmpl_memory;
1275 } 1288 }
@@ -1278,7 +1291,8 @@ static int flexrm_startup(struct mbox_chan *chan)
1278 flexrm_irq_thread, 1291 flexrm_irq_thread,
1279 0, dev_name(ring->mbox->dev), ring); 1292 0, dev_name(ring->mbox->dev), ring);
1280 if (ret) { 1293 if (ret) {
1281 dev_err(ring->mbox->dev, "failed to request ring IRQ\n"); 1294 dev_err(ring->mbox->dev,
1295 "failed to request ring%d IRQ\n", ring->num);
1282 goto fail_free_cmpl_memory; 1296 goto fail_free_cmpl_memory;
1283 } 1297 }
1284 ring->irq_requested = true; 1298 ring->irq_requested = true;
@@ -1291,7 +1305,9 @@ static int flexrm_startup(struct mbox_chan *chan)
1291 &ring->irq_aff_hint); 1305 &ring->irq_aff_hint);
1292 ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint); 1306 ret = irq_set_affinity_hint(ring->irq, &ring->irq_aff_hint);
1293 if (ret) { 1307 if (ret) {
1294 dev_err(ring->mbox->dev, "failed to set IRQ affinity hint\n"); 1308 dev_err(ring->mbox->dev,
1309 "failed to set IRQ affinity hint for ring%d\n",
1310 ring->num);
1295 goto fail_free_irq; 1311 goto fail_free_irq;
1296 } 1312 }
1297 1313
@@ -1365,8 +1381,8 @@ static void flexrm_shutdown(struct mbox_chan *chan)
1365 /* Disable/inactivate ring */ 1381 /* Disable/inactivate ring */
1366 writel_relaxed(0x0, ring->regs + RING_CONTROL); 1382 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1367 1383
1368 /* Flush ring with timeout of 1s */ 1384 /* Set ring flush state */
1369 timeout = 1000; 1385 timeout = 1000; /* timeout of 1s */
1370 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT), 1386 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1371 ring->regs + RING_CONTROL); 1387 ring->regs + RING_CONTROL);
1372 do { 1388 do {
@@ -1374,7 +1390,23 @@ static void flexrm_shutdown(struct mbox_chan *chan)
1374 FLUSH_DONE_MASK) 1390 FLUSH_DONE_MASK)
1375 break; 1391 break;
1376 mdelay(1); 1392 mdelay(1);
1377 } while (timeout--); 1393 } while (--timeout);
1394 if (!timeout)
1395 dev_err(ring->mbox->dev,
1396 "setting ring%d flush state timedout\n", ring->num);
1397
1398 /* Clear ring flush state */
1399 timeout = 1000; /* timeout of 1s */
1400 writel_relaxed(0x0, ring + RING_CONTROL);
1401 do {
1402 if (!(readl_relaxed(ring + RING_FLUSH_DONE) &
1403 FLUSH_DONE_MASK))
1404 break;
1405 mdelay(1);
1406 } while (--timeout);
1407 if (!timeout)
1408 dev_err(ring->mbox->dev,
1409 "clearing ring%d flush state timedout\n", ring->num);
1378 1410
1379 /* Abort all in-flight requests */ 1411 /* Abort all in-flight requests */
1380 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) { 1412 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c
index 97fb956bb6e0..93f3d4d61fa7 100644
--- a/drivers/mailbox/mailbox-test.c
+++ b/drivers/mailbox/mailbox-test.c
@@ -30,6 +30,7 @@
30#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \ 30#define MBOX_HEXDUMP_MAX_LEN (MBOX_HEXDUMP_LINE_LEN * \
31 (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE)) 31 (MBOX_MAX_MSG_LEN / MBOX_BYTES_PER_LINE))
32 32
33static bool mbox_data_ready;
33static struct dentry *root_debugfs_dir; 34static struct dentry *root_debugfs_dir;
34 35
35struct mbox_test_device { 36struct mbox_test_device {
@@ -152,16 +153,14 @@ out:
152 153
153static bool mbox_test_message_data_ready(struct mbox_test_device *tdev) 154static bool mbox_test_message_data_ready(struct mbox_test_device *tdev)
154{ 155{
155 unsigned char data; 156 bool data_ready;
156 unsigned long flags; 157 unsigned long flags;
157 158
158 spin_lock_irqsave(&tdev->lock, flags); 159 spin_lock_irqsave(&tdev->lock, flags);
159 data = tdev->rx_buffer[0]; 160 data_ready = mbox_data_ready;
160 spin_unlock_irqrestore(&tdev->lock, flags); 161 spin_unlock_irqrestore(&tdev->lock, flags);
161 162
162 if (data != '\0') 163 return data_ready;
163 return true;
164 return false;
165} 164}
166 165
167static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf, 166static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
@@ -223,6 +222,7 @@ static ssize_t mbox_test_message_read(struct file *filp, char __user *userbuf,
223 *(touser + l) = '\0'; 222 *(touser + l) = '\0';
224 223
225 memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN); 224 memset(tdev->rx_buffer, 0, MBOX_MAX_MSG_LEN);
225 mbox_data_ready = false;
226 226
227 spin_unlock_irqrestore(&tdev->lock, flags); 227 spin_unlock_irqrestore(&tdev->lock, flags);
228 228
@@ -292,6 +292,7 @@ static void mbox_test_receive_message(struct mbox_client *client, void *message)
292 message, MBOX_MAX_MSG_LEN); 292 message, MBOX_MAX_MSG_LEN);
293 memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN); 293 memcpy(tdev->rx_buffer, message, MBOX_MAX_MSG_LEN);
294 } 294 }
295 mbox_data_ready = true;
295 spin_unlock_irqrestore(&tdev->lock, flags); 296 spin_unlock_irqrestore(&tdev->lock, flags);
296 297
297 wake_up_interruptible(&tdev->waitq); 298 wake_up_interruptible(&tdev->waitq);
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 537f4f6d009b..674b35f402f5 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -351,7 +351,7 @@ struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
351 init_completion(&chan->tx_complete); 351 init_completion(&chan->tx_complete);
352 352
353 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 353 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
354 chan->txdone_method |= TXDONE_BY_ACK; 354 chan->txdone_method = TXDONE_BY_ACK;
355 355
356 spin_unlock_irqrestore(&chan->lock, flags); 356 spin_unlock_irqrestore(&chan->lock, flags);
357 357
@@ -418,7 +418,7 @@ void mbox_free_channel(struct mbox_chan *chan)
418 spin_lock_irqsave(&chan->lock, flags); 418 spin_lock_irqsave(&chan->lock, flags);
419 chan->cl = NULL; 419 chan->cl = NULL;
420 chan->active_req = NULL; 420 chan->active_req = NULL;
421 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) 421 if (chan->txdone_method == TXDONE_BY_ACK)
422 chan->txdone_method = TXDONE_BY_POLL; 422 chan->txdone_method = TXDONE_BY_POLL;
423 423
424 module_put(chan->mbox->dev->driver->owner); 424 module_put(chan->mbox->dev->driver->owner);
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index c5e8b9cb170d..2517038a8452 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -906,7 +906,11 @@ static int __init omap_mbox_init(void)
906 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size, 906 mbox_kfifo_size = max_t(unsigned int, mbox_kfifo_size,
907 sizeof(mbox_msg_t)); 907 sizeof(mbox_msg_t));
908 908
909 return platform_driver_register(&omap_mbox_driver); 909 err = platform_driver_register(&omap_mbox_driver);
910 if (err)
911 class_unregister(&omap_mbox_class);
912
913 return err;
910} 914}
911subsys_initcall(omap_mbox_init); 915subsys_initcall(omap_mbox_init);
912 916
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index e5a69679cfa2..3ef7f036ceea 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -265,7 +265,7 @@ struct mbox_chan *pcc_mbox_request_channel(struct mbox_client *cl,
265 init_completion(&chan->tx_complete); 265 init_completion(&chan->tx_complete);
266 266
267 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone) 267 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
268 chan->txdone_method |= TXDONE_BY_ACK; 268 chan->txdone_method = TXDONE_BY_ACK;
269 269
270 spin_unlock_irqrestore(&chan->lock, flags); 270 spin_unlock_irqrestore(&chan->lock, flags);
271 271
@@ -311,7 +311,7 @@ void pcc_mbox_free_channel(struct mbox_chan *chan)
311 spin_lock_irqsave(&chan->lock, flags); 311 spin_lock_irqsave(&chan->lock, flags);
312 chan->cl = NULL; 312 chan->cl = NULL;
313 chan->active_req = NULL; 313 chan->active_req = NULL;
314 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK)) 314 if (chan->txdone_method == TXDONE_BY_ACK)
315 chan->txdone_method = TXDONE_BY_POLL; 315 chan->txdone_method = TXDONE_BY_POLL;
316 316
317 spin_unlock_irqrestore(&chan->lock, flags); 317 spin_unlock_irqrestore(&chan->lock, flags);