aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2017-11-14 00:02:28 -0500
committerVinod Koul <vinod.koul@intel.com>2017-11-14 00:02:28 -0500
commit575d34b6de071599317e8cc173ec7057126d1097 (patch)
tree302a7b28822f7fdfbba38ef8131df6f21a9a2703
parent049d0d38499822f9117d04d8fbaecf12814cbafa (diff)
parent7076a1e4a4ea926ba9ae3b5f4a5eb6dced0a902d (diff)
Merge branch 'topic/bcom' into for-linus
-rw-r--r--drivers/dma/Kconfig2
-rw-r--r--drivers/dma/bcm-sba-raid.c117
2 files changed, 38 insertions, 81 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index fadc4d8783bd..48cf8df7255f 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -115,7 +115,7 @@ config BCM_SBA_RAID
115 select DMA_ENGINE_RAID 115 select DMA_ENGINE_RAID
116 select ASYNC_TX_DISABLE_XOR_VAL_DMA 116 select ASYNC_TX_DISABLE_XOR_VAL_DMA
117 select ASYNC_TX_DISABLE_PQ_VAL_DMA 117 select ASYNC_TX_DISABLE_PQ_VAL_DMA
118 default ARCH_BCM_IPROC 118 default m if ARCH_BCM_IPROC
119 help 119 help
120 Enable support for Broadcom SBA RAID Engine. The SBA RAID 120 Enable support for Broadcom SBA RAID Engine. The SBA RAID
121 engine is available on most of the Broadcom iProc SoCs. It 121 engine is available on most of the Broadcom iProc SoCs. It
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c
index 6c2c44724637..3956a018bf5a 100644
--- a/drivers/dma/bcm-sba-raid.c
+++ b/drivers/dma/bcm-sba-raid.c
@@ -1,9 +1,14 @@
1/* 1/*
2 * Copyright (C) 2017 Broadcom 2 * Copyright (C) 2017 Broadcom
3 * 3 *
4 * This program is free software; you can redistribute it and/or modify 4 * This program is free software; you can redistribute it and/or
5 * it under the terms of the GNU General Public License version 2 as 5 * modify it under the terms of the GNU General Public License as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation version 2.
7 *
8 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
9 * kind, whether express or implied; without even the implied warranty
10 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
7 */ 12 */
8 13
9/* 14/*
@@ -25,11 +30,8 @@
25 * 30 *
26 * The Broadcom SBA RAID driver does not require any register programming 31 * The Broadcom SBA RAID driver does not require any register programming
27 * except submitting request to SBA hardware device via mailbox channels. 32 * except submitting request to SBA hardware device via mailbox channels.
28 * This driver implements a DMA device with one DMA channel using a set 33 * This driver implements a DMA device with one DMA channel using a single
29 * of mailbox channels provided by Broadcom SoC specific ring manager 34 * mailbox channel provided by Broadcom SoC specific ring manager driver.
30 * driver. To exploit parallelism (as described above), all DMA request
31 * coming to SBA RAID DMA channel are broken down to smaller requests
32 * and submitted to multiple mailbox channels in round-robin fashion.
33 * For having more SBA DMA channels, we can create more SBA device nodes 35 * For having more SBA DMA channels, we can create more SBA device nodes
34 * in Broadcom SoC specific DTS based on number of hardware rings supported 36 * in Broadcom SoC specific DTS based on number of hardware rings supported
35 * by Broadcom SoC ring manager. 37 * by Broadcom SoC ring manager.
@@ -85,6 +87,7 @@
85#define SBA_CMD_GALOIS 0xe 87#define SBA_CMD_GALOIS 0xe
86 88
87#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 89#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
90#define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8
88 91
89/* Driver helper macros */ 92/* Driver helper macros */
90#define to_sba_request(tx) \ 93#define to_sba_request(tx) \
@@ -142,9 +145,7 @@ struct sba_device {
142 u32 max_cmds_pool_size; 145 u32 max_cmds_pool_size;
143 /* Maibox client and Mailbox channels */ 146 /* Maibox client and Mailbox channels */
144 struct mbox_client client; 147 struct mbox_client client;
145 int mchans_count; 148 struct mbox_chan *mchan;
146 atomic_t mchans_current;
147 struct mbox_chan **mchans;
148 struct device *mbox_dev; 149 struct device *mbox_dev;
149 /* DMA device and DMA channel */ 150 /* DMA device and DMA channel */
150 struct dma_device dma_dev; 151 struct dma_device dma_dev;
@@ -200,14 +201,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
200 201
201/* ====== General helper routines ===== */ 202/* ====== General helper routines ===== */
202 203
203static void sba_peek_mchans(struct sba_device *sba)
204{
205 int mchan_idx;
206
207 for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
208 mbox_client_peek_data(sba->mchans[mchan_idx]);
209}
210
211static struct sba_request *sba_alloc_request(struct sba_device *sba) 204static struct sba_request *sba_alloc_request(struct sba_device *sba)
212{ 205{
213 bool found = false; 206 bool found = false;
@@ -231,7 +224,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba)
231 * would have completed which will create more 224 * would have completed which will create more
232 * room for new requests. 225 * room for new requests.
233 */ 226 */
234 sba_peek_mchans(sba); 227 mbox_client_peek_data(sba->mchan);
235 return NULL; 228 return NULL;
236 } 229 }
237 230
@@ -369,15 +362,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba)
369static int sba_send_mbox_request(struct sba_device *sba, 362static int sba_send_mbox_request(struct sba_device *sba,
370 struct sba_request *req) 363 struct sba_request *req)
371{ 364{
372 int mchans_idx, ret = 0; 365 int ret = 0;
373
374 /* Select mailbox channel in round-robin fashion */
375 mchans_idx = atomic_inc_return(&sba->mchans_current);
376 mchans_idx = mchans_idx % sba->mchans_count;
377 366
378 /* Send message for the request */ 367 /* Send message for the request */
379 req->msg.error = 0; 368 req->msg.error = 0;
380 ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); 369 ret = mbox_send_message(sba->mchan, &req->msg);
381 if (ret < 0) { 370 if (ret < 0) {
382 dev_err(sba->dev, "send message failed with error %d", ret); 371 dev_err(sba->dev, "send message failed with error %d", ret);
383 return ret; 372 return ret;
@@ -390,7 +379,7 @@ static int sba_send_mbox_request(struct sba_device *sba,
390 } 379 }
391 380
392 /* Signal txdone for mailbox channel */ 381 /* Signal txdone for mailbox channel */
393 mbox_client_txdone(sba->mchans[mchans_idx], ret); 382 mbox_client_txdone(sba->mchan, ret);
394 383
395 return ret; 384 return ret;
396} 385}
@@ -402,13 +391,8 @@ static void _sba_process_pending_requests(struct sba_device *sba)
402 u32 count; 391 u32 count;
403 struct sba_request *req; 392 struct sba_request *req;
404 393
405 /* 394 /* Process few pending requests */
406 * Process few pending requests 395 count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL;
407 *
408 * For now, we process (<number_of_mailbox_channels> * 8)
409 * number of requests at a time.
410 */
411 count = sba->mchans_count * 8;
412 while (!list_empty(&sba->reqs_pending_list) && count) { 396 while (!list_empty(&sba->reqs_pending_list) && count) {
413 /* Get the first pending request */ 397 /* Get the first pending request */
414 req = list_first_entry(&sba->reqs_pending_list, 398 req = list_first_entry(&sba->reqs_pending_list,
@@ -442,7 +426,9 @@ static void sba_process_received_request(struct sba_device *sba,
442 426
443 WARN_ON(tx->cookie < 0); 427 WARN_ON(tx->cookie < 0);
444 if (tx->cookie > 0) { 428 if (tx->cookie > 0) {
429 spin_lock_irqsave(&sba->reqs_lock, flags);
445 dma_cookie_complete(tx); 430 dma_cookie_complete(tx);
431 spin_unlock_irqrestore(&sba->reqs_lock, flags);
446 dmaengine_desc_get_callback_invoke(tx, NULL); 432 dmaengine_desc_get_callback_invoke(tx, NULL);
447 dma_descriptor_unmap(tx); 433 dma_descriptor_unmap(tx);
448 tx->callback = NULL; 434 tx->callback = NULL;
@@ -570,7 +556,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan,
570 if (ret == DMA_COMPLETE) 556 if (ret == DMA_COMPLETE)
571 return ret; 557 return ret;
572 558
573 sba_peek_mchans(sba); 559 mbox_client_peek_data(sba->mchan);
574 560
575 return dma_cookie_status(dchan, cookie, txstate); 561 return dma_cookie_status(dchan, cookie, txstate);
576} 562}
@@ -1637,7 +1623,7 @@ static int sba_async_register(struct sba_device *sba)
1637 1623
1638static int sba_probe(struct platform_device *pdev) 1624static int sba_probe(struct platform_device *pdev)
1639{ 1625{
1640 int i, ret = 0, mchans_count; 1626 int ret = 0;
1641 struct sba_device *sba; 1627 struct sba_device *sba;
1642 struct platform_device *mbox_pdev; 1628 struct platform_device *mbox_pdev;
1643 struct of_phandle_args args; 1629 struct of_phandle_args args;
@@ -1650,12 +1636,11 @@ static int sba_probe(struct platform_device *pdev)
1650 sba->dev = &pdev->dev; 1636 sba->dev = &pdev->dev;
1651 platform_set_drvdata(pdev, sba); 1637 platform_set_drvdata(pdev, sba);
1652 1638
1653 /* Number of channels equals number of mailbox channels */ 1639 /* Number of mailbox channels should be atleast 1 */
1654 ret = of_count_phandle_with_args(pdev->dev.of_node, 1640 ret = of_count_phandle_with_args(pdev->dev.of_node,
1655 "mboxes", "#mbox-cells"); 1641 "mboxes", "#mbox-cells");
1656 if (ret <= 0) 1642 if (ret <= 0)
1657 return -ENODEV; 1643 return -ENODEV;
1658 mchans_count = ret;
1659 1644
1660 /* Determine SBA version from DT compatible string */ 1645 /* Determine SBA version from DT compatible string */
1661 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) 1646 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
@@ -1688,7 +1673,7 @@ static int sba_probe(struct platform_device *pdev)
1688 default: 1673 default:
1689 return -EINVAL; 1674 return -EINVAL;
1690 } 1675 }
1691 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; 1676 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL;
1692 sba->max_cmd_per_req = sba->max_pq_srcs + 3; 1677 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1693 sba->max_xor_srcs = sba->max_cmd_per_req - 1; 1678 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1694 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; 1679 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
@@ -1702,55 +1687,30 @@ static int sba_probe(struct platform_device *pdev)
1702 sba->client.knows_txdone = true; 1687 sba->client.knows_txdone = true;
1703 sba->client.tx_tout = 0; 1688 sba->client.tx_tout = 0;
1704 1689
1705 /* Allocate mailbox channel array */ 1690 /* Request mailbox channel */
1706 sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, 1691 sba->mchan = mbox_request_channel(&sba->client, 0);
1707 sizeof(*sba->mchans), GFP_KERNEL); 1692 if (IS_ERR(sba->mchan)) {
1708 if (!sba->mchans) 1693 ret = PTR_ERR(sba->mchan);
1709 return -ENOMEM; 1694 goto fail_free_mchan;
1710
1711 /* Request mailbox channels */
1712 sba->mchans_count = 0;
1713 for (i = 0; i < mchans_count; i++) {
1714 sba->mchans[i] = mbox_request_channel(&sba->client, i);
1715 if (IS_ERR(sba->mchans[i])) {
1716 ret = PTR_ERR(sba->mchans[i]);
1717 goto fail_free_mchans;
1718 }
1719 sba->mchans_count++;
1720 } 1695 }
1721 atomic_set(&sba->mchans_current, 0);
1722 1696
1723 /* Find-out underlying mailbox device */ 1697 /* Find-out underlying mailbox device */
1724 ret = of_parse_phandle_with_args(pdev->dev.of_node, 1698 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1725 "mboxes", "#mbox-cells", 0, &args); 1699 "mboxes", "#mbox-cells", 0, &args);
1726 if (ret) 1700 if (ret)
1727 goto fail_free_mchans; 1701 goto fail_free_mchan;
1728 mbox_pdev = of_find_device_by_node(args.np); 1702 mbox_pdev = of_find_device_by_node(args.np);
1729 of_node_put(args.np); 1703 of_node_put(args.np);
1730 if (!mbox_pdev) { 1704 if (!mbox_pdev) {
1731 ret = -ENODEV; 1705 ret = -ENODEV;
1732 goto fail_free_mchans; 1706 goto fail_free_mchan;
1733 } 1707 }
1734 sba->mbox_dev = &mbox_pdev->dev; 1708 sba->mbox_dev = &mbox_pdev->dev;
1735 1709
1736 /* All mailbox channels should be of same ring manager device */
1737 for (i = 1; i < mchans_count; i++) {
1738 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1739 "mboxes", "#mbox-cells", i, &args);
1740 if (ret)
1741 goto fail_free_mchans;
1742 mbox_pdev = of_find_device_by_node(args.np);
1743 of_node_put(args.np);
1744 if (sba->mbox_dev != &mbox_pdev->dev) {
1745 ret = -EINVAL;
1746 goto fail_free_mchans;
1747 }
1748 }
1749
1750 /* Prealloc channel resource */ 1710 /* Prealloc channel resource */
1751 ret = sba_prealloc_channel_resources(sba); 1711 ret = sba_prealloc_channel_resources(sba);
1752 if (ret) 1712 if (ret)
1753 goto fail_free_mchans; 1713 goto fail_free_mchan;
1754 1714
1755 /* Check availability of debugfs */ 1715 /* Check availability of debugfs */
1756 if (!debugfs_initialized()) 1716 if (!debugfs_initialized())
@@ -1777,24 +1737,22 @@ skip_debugfs:
1777 goto fail_free_resources; 1737 goto fail_free_resources;
1778 1738
1779 /* Print device info */ 1739 /* Print device info */
1780 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", 1740 dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s",
1781 dma_chan_name(&sba->dma_chan), sba->ver+1, 1741 dma_chan_name(&sba->dma_chan), sba->ver+1,
1782 sba->mchans_count); 1742 dev_name(sba->mbox_dev));
1783 1743
1784 return 0; 1744 return 0;
1785 1745
1786fail_free_resources: 1746fail_free_resources:
1787 debugfs_remove_recursive(sba->root); 1747 debugfs_remove_recursive(sba->root);
1788 sba_freeup_channel_resources(sba); 1748 sba_freeup_channel_resources(sba);
1789fail_free_mchans: 1749fail_free_mchan:
1790 for (i = 0; i < sba->mchans_count; i++) 1750 mbox_free_channel(sba->mchan);
1791 mbox_free_channel(sba->mchans[i]);
1792 return ret; 1751 return ret;
1793} 1752}
1794 1753
1795static int sba_remove(struct platform_device *pdev) 1754static int sba_remove(struct platform_device *pdev)
1796{ 1755{
1797 int i;
1798 struct sba_device *sba = platform_get_drvdata(pdev); 1756 struct sba_device *sba = platform_get_drvdata(pdev);
1799 1757
1800 dma_async_device_unregister(&sba->dma_dev); 1758 dma_async_device_unregister(&sba->dma_dev);
@@ -1803,8 +1761,7 @@ static int sba_remove(struct platform_device *pdev)
1803 1761
1804 sba_freeup_channel_resources(sba); 1762 sba_freeup_channel_resources(sba);
1805 1763
1806 for (i = 0; i < sba->mchans_count; i++) 1764 mbox_free_channel(sba->mchan);
1807 mbox_free_channel(sba->mchans[i]);
1808 1765
1809 return 0; 1766 return 0;
1810} 1767}