diff options
author | Anup Patel <anup.patel@broadcom.com> | 2017-10-03 01:22:58 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2017-10-23 02:05:47 -0400 |
commit | 4e9f8187aecb00d90ec385f5061c91549103a3cf (patch) | |
tree | 88486f2e78bdf3419a6d94c1673b60f2196ea52e | |
parent | 5d74aa7f641a8bf778b87941ae6a955121f64f7d (diff) |
dmaengine: bcm-sba-raid: Use only single mailbox channel
Each mailbox channel used by Broadcom SBA RAID driver is
a separate HW ring.
Currently, Broadcom SBA RAID driver creates one DMA channel
using one or more mailbox channels. When we are using more
than one mailbox channels for a DMA channel, the sba_request
are distributed evenly among multiple mailbox channels which
results in sba_request being completed out-of-order.
The above described out-of-order completion of sba_request
breaks the dma_async_is_complete() API because it assumes
DMA cookies are completed in orderly fashion.
To ensure correct behaviour of dma_async_is_complete() API,
this patch updates Broadcom SBA RAID driver to use only
single mailbox channel. If additional mailbox channels are
specified in DT then those will be ignored.
Signed-off-by: Anup Patel <anup.patel@broadcom.com>
Reviewed-by: Ray Jui <ray.jui@broadcom.com>
Reviewed-by: Scott Branden <scott.branden@broadcom.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/bcm-sba-raid.c | 104 |
1 files changed, 27 insertions, 77 deletions
diff --git a/drivers/dma/bcm-sba-raid.c b/drivers/dma/bcm-sba-raid.c index 15c558508345..409da59d9315 100644 --- a/drivers/dma/bcm-sba-raid.c +++ b/drivers/dma/bcm-sba-raid.c | |||
@@ -25,11 +25,8 @@ | |||
25 | * | 25 | * |
26 | * The Broadcom SBA RAID driver does not require any register programming | 26 | * The Broadcom SBA RAID driver does not require any register programming |
27 | * except submitting request to SBA hardware device via mailbox channels. | 27 | * except submitting request to SBA hardware device via mailbox channels. |
28 | * This driver implements a DMA device with one DMA channel using a set | 28 | * This driver implements a DMA device with one DMA channel using a single |
29 | * of mailbox channels provided by Broadcom SoC specific ring manager | 29 | * mailbox channel provided by Broadcom SoC specific ring manager driver. |
30 | * driver. To exploit parallelism (as described above), all DMA request | ||
31 | * coming to SBA RAID DMA channel are broken down to smaller requests | ||
32 | * and submitted to multiple mailbox channels in round-robin fashion. | ||
33 | * For having more SBA DMA channels, we can create more SBA device nodes | 30 | * For having more SBA DMA channels, we can create more SBA device nodes |
34 | * in Broadcom SoC specific DTS based on number of hardware rings supported | 31 | * in Broadcom SoC specific DTS based on number of hardware rings supported |
35 | * by Broadcom SoC ring manager. | 32 | * by Broadcom SoC ring manager. |
@@ -85,6 +82,7 @@ | |||
85 | #define SBA_CMD_GALOIS 0xe | 82 | #define SBA_CMD_GALOIS 0xe |
86 | 83 | ||
87 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 | 84 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 |
85 | #define SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL 8 | ||
88 | 86 | ||
89 | /* Driver helper macros */ | 87 | /* Driver helper macros */ |
90 | #define to_sba_request(tx) \ | 88 | #define to_sba_request(tx) \ |
@@ -142,9 +140,7 @@ struct sba_device { | |||
142 | u32 max_cmds_pool_size; | 140 | u32 max_cmds_pool_size; |
143 | /* Maibox client and Mailbox channels */ | 141 | /* Maibox client and Mailbox channels */ |
144 | struct mbox_client client; | 142 | struct mbox_client client; |
145 | int mchans_count; | 143 | struct mbox_chan *mchan; |
146 | atomic_t mchans_current; | ||
147 | struct mbox_chan **mchans; | ||
148 | struct device *mbox_dev; | 144 | struct device *mbox_dev; |
149 | /* DMA device and DMA channel */ | 145 | /* DMA device and DMA channel */ |
150 | struct dma_device dma_dev; | 146 | struct dma_device dma_dev; |
@@ -200,14 +196,6 @@ static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) | |||
200 | 196 | ||
201 | /* ====== General helper routines ===== */ | 197 | /* ====== General helper routines ===== */ |
202 | 198 | ||
203 | static void sba_peek_mchans(struct sba_device *sba) | ||
204 | { | ||
205 | int mchan_idx; | ||
206 | |||
207 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) | ||
208 | mbox_client_peek_data(sba->mchans[mchan_idx]); | ||
209 | } | ||
210 | |||
211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) | 199 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
212 | { | 200 | { |
213 | bool found = false; | 201 | bool found = false; |
@@ -231,7 +219,7 @@ static struct sba_request *sba_alloc_request(struct sba_device *sba) | |||
231 | * would have completed which will create more | 219 | * would have completed which will create more |
232 | * room for new requests. | 220 | * room for new requests. |
233 | */ | 221 | */ |
234 | sba_peek_mchans(sba); | 222 | mbox_client_peek_data(sba->mchan); |
235 | return NULL; | 223 | return NULL; |
236 | } | 224 | } |
237 | 225 | ||
@@ -369,15 +357,11 @@ static void sba_cleanup_pending_requests(struct sba_device *sba) | |||
369 | static int sba_send_mbox_request(struct sba_device *sba, | 357 | static int sba_send_mbox_request(struct sba_device *sba, |
370 | struct sba_request *req) | 358 | struct sba_request *req) |
371 | { | 359 | { |
372 | int mchans_idx, ret = 0; | 360 | int ret = 0; |
373 | |||
374 | /* Select mailbox channel in round-robin fashion */ | ||
375 | mchans_idx = atomic_inc_return(&sba->mchans_current); | ||
376 | mchans_idx = mchans_idx % sba->mchans_count; | ||
377 | 361 | ||
378 | /* Send message for the request */ | 362 | /* Send message for the request */ |
379 | req->msg.error = 0; | 363 | req->msg.error = 0; |
380 | ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); | 364 | ret = mbox_send_message(sba->mchan, &req->msg); |
381 | if (ret < 0) { | 365 | if (ret < 0) { |
382 | dev_err(sba->dev, "send message failed with error %d", ret); | 366 | dev_err(sba->dev, "send message failed with error %d", ret); |
383 | return ret; | 367 | return ret; |
@@ -390,7 +374,7 @@ static int sba_send_mbox_request(struct sba_device *sba, | |||
390 | } | 374 | } |
391 | 375 | ||
392 | /* Signal txdone for mailbox channel */ | 376 | /* Signal txdone for mailbox channel */ |
393 | mbox_client_txdone(sba->mchans[mchans_idx], ret); | 377 | mbox_client_txdone(sba->mchan, ret); |
394 | 378 | ||
395 | return ret; | 379 | return ret; |
396 | } | 380 | } |
@@ -402,13 +386,8 @@ static void _sba_process_pending_requests(struct sba_device *sba) | |||
402 | u32 count; | 386 | u32 count; |
403 | struct sba_request *req; | 387 | struct sba_request *req; |
404 | 388 | ||
405 | /* | 389 | /* Process few pending requests */ |
406 | * Process few pending requests | 390 | count = SBA_MAX_MSG_SEND_PER_MBOX_CHANNEL; |
407 | * | ||
408 | * For now, we process (<number_of_mailbox_channels> * 8) | ||
409 | * number of requests at a time. | ||
410 | */ | ||
411 | count = sba->mchans_count * 8; | ||
412 | while (!list_empty(&sba->reqs_pending_list) && count) { | 391 | while (!list_empty(&sba->reqs_pending_list) && count) { |
413 | /* Get the first pending request */ | 392 | /* Get the first pending request */ |
414 | req = list_first_entry(&sba->reqs_pending_list, | 393 | req = list_first_entry(&sba->reqs_pending_list, |
@@ -572,7 +551,7 @@ static enum dma_status sba_tx_status(struct dma_chan *dchan, | |||
572 | if (ret == DMA_COMPLETE) | 551 | if (ret == DMA_COMPLETE) |
573 | return ret; | 552 | return ret; |
574 | 553 | ||
575 | sba_peek_mchans(sba); | 554 | mbox_client_peek_data(sba->mchan); |
576 | 555 | ||
577 | return dma_cookie_status(dchan, cookie, txstate); | 556 | return dma_cookie_status(dchan, cookie, txstate); |
578 | } | 557 | } |
@@ -1639,7 +1618,7 @@ static int sba_async_register(struct sba_device *sba) | |||
1639 | 1618 | ||
1640 | static int sba_probe(struct platform_device *pdev) | 1619 | static int sba_probe(struct platform_device *pdev) |
1641 | { | 1620 | { |
1642 | int i, ret = 0, mchans_count; | 1621 | int ret = 0; |
1643 | struct sba_device *sba; | 1622 | struct sba_device *sba; |
1644 | struct platform_device *mbox_pdev; | 1623 | struct platform_device *mbox_pdev; |
1645 | struct of_phandle_args args; | 1624 | struct of_phandle_args args; |
@@ -1652,12 +1631,11 @@ static int sba_probe(struct platform_device *pdev) | |||
1652 | sba->dev = &pdev->dev; | 1631 | sba->dev = &pdev->dev; |
1653 | platform_set_drvdata(pdev, sba); | 1632 | platform_set_drvdata(pdev, sba); |
1654 | 1633 | ||
1655 | /* Number of channels equals number of mailbox channels */ | 1634 | /* Number of mailbox channels should be atleast 1 */ |
1656 | ret = of_count_phandle_with_args(pdev->dev.of_node, | 1635 | ret = of_count_phandle_with_args(pdev->dev.of_node, |
1657 | "mboxes", "#mbox-cells"); | 1636 | "mboxes", "#mbox-cells"); |
1658 | if (ret <= 0) | 1637 | if (ret <= 0) |
1659 | return -ENODEV; | 1638 | return -ENODEV; |
1660 | mchans_count = ret; | ||
1661 | 1639 | ||
1662 | /* Determine SBA version from DT compatible string */ | 1640 | /* Determine SBA version from DT compatible string */ |
1663 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) | 1641 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) |
@@ -1690,7 +1668,7 @@ static int sba_probe(struct platform_device *pdev) | |||
1690 | default: | 1668 | default: |
1691 | return -EINVAL; | 1669 | return -EINVAL; |
1692 | } | 1670 | } |
1693 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; | 1671 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL; |
1694 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; | 1672 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
1695 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; | 1673 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; |
1696 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; | 1674 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; |
@@ -1704,55 +1682,30 @@ static int sba_probe(struct platform_device *pdev) | |||
1704 | sba->client.knows_txdone = true; | 1682 | sba->client.knows_txdone = true; |
1705 | sba->client.tx_tout = 0; | 1683 | sba->client.tx_tout = 0; |
1706 | 1684 | ||
1707 | /* Allocate mailbox channel array */ | 1685 | /* Request mailbox channel */ |
1708 | sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, | 1686 | sba->mchan = mbox_request_channel(&sba->client, 0); |
1709 | sizeof(*sba->mchans), GFP_KERNEL); | 1687 | if (IS_ERR(sba->mchan)) { |
1710 | if (!sba->mchans) | 1688 | ret = PTR_ERR(sba->mchan); |
1711 | return -ENOMEM; | 1689 | goto fail_free_mchan; |
1712 | |||
1713 | /* Request mailbox channels */ | ||
1714 | sba->mchans_count = 0; | ||
1715 | for (i = 0; i < mchans_count; i++) { | ||
1716 | sba->mchans[i] = mbox_request_channel(&sba->client, i); | ||
1717 | if (IS_ERR(sba->mchans[i])) { | ||
1718 | ret = PTR_ERR(sba->mchans[i]); | ||
1719 | goto fail_free_mchans; | ||
1720 | } | ||
1721 | sba->mchans_count++; | ||
1722 | } | 1690 | } |
1723 | atomic_set(&sba->mchans_current, 0); | ||
1724 | 1691 | ||
1725 | /* Find-out underlying mailbox device */ | 1692 | /* Find-out underlying mailbox device */ |
1726 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | 1693 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
1727 | "mboxes", "#mbox-cells", 0, &args); | 1694 | "mboxes", "#mbox-cells", 0, &args); |
1728 | if (ret) | 1695 | if (ret) |
1729 | goto fail_free_mchans; | 1696 | goto fail_free_mchan; |
1730 | mbox_pdev = of_find_device_by_node(args.np); | 1697 | mbox_pdev = of_find_device_by_node(args.np); |
1731 | of_node_put(args.np); | 1698 | of_node_put(args.np); |
1732 | if (!mbox_pdev) { | 1699 | if (!mbox_pdev) { |
1733 | ret = -ENODEV; | 1700 | ret = -ENODEV; |
1734 | goto fail_free_mchans; | 1701 | goto fail_free_mchan; |
1735 | } | 1702 | } |
1736 | sba->mbox_dev = &mbox_pdev->dev; | 1703 | sba->mbox_dev = &mbox_pdev->dev; |
1737 | 1704 | ||
1738 | /* All mailbox channels should be of same ring manager device */ | ||
1739 | for (i = 1; i < mchans_count; i++) { | ||
1740 | ret = of_parse_phandle_with_args(pdev->dev.of_node, | ||
1741 | "mboxes", "#mbox-cells", i, &args); | ||
1742 | if (ret) | ||
1743 | goto fail_free_mchans; | ||
1744 | mbox_pdev = of_find_device_by_node(args.np); | ||
1745 | of_node_put(args.np); | ||
1746 | if (sba->mbox_dev != &mbox_pdev->dev) { | ||
1747 | ret = -EINVAL; | ||
1748 | goto fail_free_mchans; | ||
1749 | } | ||
1750 | } | ||
1751 | |||
1752 | /* Prealloc channel resource */ | 1705 | /* Prealloc channel resource */ |
1753 | ret = sba_prealloc_channel_resources(sba); | 1706 | ret = sba_prealloc_channel_resources(sba); |
1754 | if (ret) | 1707 | if (ret) |
1755 | goto fail_free_mchans; | 1708 | goto fail_free_mchan; |
1756 | 1709 | ||
1757 | /* Check availability of debugfs */ | 1710 | /* Check availability of debugfs */ |
1758 | if (!debugfs_initialized()) | 1711 | if (!debugfs_initialized()) |
@@ -1779,24 +1732,22 @@ skip_debugfs: | |||
1779 | goto fail_free_resources; | 1732 | goto fail_free_resources; |
1780 | 1733 | ||
1781 | /* Print device info */ | 1734 | /* Print device info */ |
1782 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", | 1735 | dev_info(sba->dev, "%s using SBAv%d mailbox channel from %s", |
1783 | dma_chan_name(&sba->dma_chan), sba->ver+1, | 1736 | dma_chan_name(&sba->dma_chan), sba->ver+1, |
1784 | sba->mchans_count); | 1737 | dev_name(sba->mbox_dev)); |
1785 | 1738 | ||
1786 | return 0; | 1739 | return 0; |
1787 | 1740 | ||
1788 | fail_free_resources: | 1741 | fail_free_resources: |
1789 | debugfs_remove_recursive(sba->root); | 1742 | debugfs_remove_recursive(sba->root); |
1790 | sba_freeup_channel_resources(sba); | 1743 | sba_freeup_channel_resources(sba); |
1791 | fail_free_mchans: | 1744 | fail_free_mchan: |
1792 | for (i = 0; i < sba->mchans_count; i++) | 1745 | mbox_free_channel(sba->mchan); |
1793 | mbox_free_channel(sba->mchans[i]); | ||
1794 | return ret; | 1746 | return ret; |
1795 | } | 1747 | } |
1796 | 1748 | ||
1797 | static int sba_remove(struct platform_device *pdev) | 1749 | static int sba_remove(struct platform_device *pdev) |
1798 | { | 1750 | { |
1799 | int i; | ||
1800 | struct sba_device *sba = platform_get_drvdata(pdev); | 1751 | struct sba_device *sba = platform_get_drvdata(pdev); |
1801 | 1752 | ||
1802 | dma_async_device_unregister(&sba->dma_dev); | 1753 | dma_async_device_unregister(&sba->dma_dev); |
@@ -1805,8 +1756,7 @@ static int sba_remove(struct platform_device *pdev) | |||
1805 | 1756 | ||
1806 | sba_freeup_channel_resources(sba); | 1757 | sba_freeup_channel_resources(sba); |
1807 | 1758 | ||
1808 | for (i = 0; i < sba->mchans_count; i++) | 1759 | mbox_free_channel(sba->mchan); |
1809 | mbox_free_channel(sba->mchans[i]); | ||
1810 | 1760 | ||
1811 | return 0; | 1761 | return 0; |
1812 | } | 1762 | } |