aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/dmaengine/provider.txt7
-rw-r--r--drivers/dma/qcom/bam_dma.c6
-rw-r--r--drivers/dma/qcom/hidma.c37
-rw-r--r--drivers/dma/qcom/hidma.h7
-rw-r--r--drivers/dma/qcom/hidma_ll.c11
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c16
-rw-r--r--include/linux/dma/qcom_bam_dma.h79
-rw-r--r--include/linux/dmaengine.h4
8 files changed, 151 insertions, 16 deletions
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
index e33bc1c8ed2c..bfadbfdf13ed 100644
--- a/Documentation/dmaengine/provider.txt
+++ b/Documentation/dmaengine/provider.txt
@@ -395,6 +395,13 @@ where to put them)
395 when DMA_CTRL_REUSE is already set 395 when DMA_CTRL_REUSE is already set
396 - Terminating the channel 396 - Terminating the channel
397 397
398 * DMA_PREP_CMD
399 - If set, the client driver tells DMA controller that passed data in DMA
400 API is command data.
401 - Interpretation of command data is DMA controller specific. It can be
402 used for issuing commands to other peripherals/register reads/register
403 writes for which the descriptor should be in different format from
404 normal data descriptors.
398 405
399General Design Notes 406General Design Notes
400-------------------- 407--------------------
diff --git a/drivers/dma/qcom/bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 03c4eb3fd314..6d89fb6a6a92 100644
--- a/drivers/dma/qcom/bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -65,6 +65,7 @@ struct bam_desc_hw {
65#define DESC_FLAG_EOT BIT(14) 65#define DESC_FLAG_EOT BIT(14)
66#define DESC_FLAG_EOB BIT(13) 66#define DESC_FLAG_EOB BIT(13)
67#define DESC_FLAG_NWD BIT(12) 67#define DESC_FLAG_NWD BIT(12)
68#define DESC_FLAG_CMD BIT(11)
68 69
69struct bam_async_desc { 70struct bam_async_desc {
70 struct virt_dma_desc vd; 71 struct virt_dma_desc vd;
@@ -645,6 +646,9 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
645 unsigned int curr_offset = 0; 646 unsigned int curr_offset = 0;
646 647
647 do { 648 do {
649 if (flags & DMA_PREP_CMD)
650 desc->flags |= cpu_to_le16(DESC_FLAG_CMD);
651
648 desc->addr = cpu_to_le32(sg_dma_address(sg) + 652 desc->addr = cpu_to_le32(sg_dma_address(sg) +
649 curr_offset); 653 curr_offset);
650 654
@@ -960,7 +964,7 @@ static void bam_start_dma(struct bam_chan *bchan)
960 964
961 /* set any special flags on the last descriptor */ 965 /* set any special flags on the last descriptor */
962 if (async_desc->num_desc == async_desc->xfer_len) 966 if (async_desc->num_desc == async_desc->xfer_len)
963 desc[async_desc->xfer_len - 1].flags = 967 desc[async_desc->xfer_len - 1].flags |=
964 cpu_to_le16(async_desc->flags); 968 cpu_to_le16(async_desc->flags);
965 else 969 else
966 desc[async_desc->xfer_len - 1].flags |= 970 desc[async_desc->xfer_len - 1].flags |=
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
index 34fb6afd229b..e3669850aef4 100644
--- a/drivers/dma/qcom/hidma.c
+++ b/drivers/dma/qcom/hidma.c
@@ -411,7 +411,40 @@ hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
411 return NULL; 411 return NULL;
412 412
413 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch, 413 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
414 src, dest, len, flags); 414 src, dest, len, flags,
415 HIDMA_TRE_MEMCPY);
416
417 /* Place descriptor in prepared list */
418 spin_lock_irqsave(&mchan->lock, irqflags);
419 list_add_tail(&mdesc->node, &mchan->prepared);
420 spin_unlock_irqrestore(&mchan->lock, irqflags);
421
422 return &mdesc->desc;
423}
424
425static struct dma_async_tx_descriptor *
426hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
427 size_t len, unsigned long flags)
428{
429 struct hidma_chan *mchan = to_hidma_chan(dmach);
430 struct hidma_desc *mdesc = NULL;
431 struct hidma_dev *mdma = mchan->dmadev;
432 unsigned long irqflags;
433
434 /* Get free descriptor */
435 spin_lock_irqsave(&mchan->lock, irqflags);
436 if (!list_empty(&mchan->free)) {
437 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
438 list_del(&mdesc->node);
439 }
440 spin_unlock_irqrestore(&mchan->lock, irqflags);
441
442 if (!mdesc)
443 return NULL;
444
445 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
446 value, dest, len, flags,
447 HIDMA_TRE_MEMSET);
415 448
416 /* Place descriptor in prepared list */ 449 /* Place descriptor in prepared list */
417 spin_lock_irqsave(&mchan->lock, irqflags); 450 spin_lock_irqsave(&mchan->lock, irqflags);
@@ -776,6 +809,7 @@ static int hidma_probe(struct platform_device *pdev)
776 pm_runtime_get_sync(dmadev->ddev.dev); 809 pm_runtime_get_sync(dmadev->ddev.dev);
777 810
778 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask); 811 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
812 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
779 if (WARN_ON(!pdev->dev.dma_mask)) { 813 if (WARN_ON(!pdev->dev.dma_mask)) {
780 rc = -ENXIO; 814 rc = -ENXIO;
781 goto dmafree; 815 goto dmafree;
@@ -786,6 +820,7 @@ static int hidma_probe(struct platform_device *pdev)
786 dmadev->dev_trca = trca; 820 dmadev->dev_trca = trca;
787 dmadev->trca_resource = trca_resource; 821 dmadev->trca_resource = trca_resource;
788 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy; 822 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
823 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
789 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources; 824 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
790 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources; 825 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
791 dmadev->ddev.device_tx_status = hidma_tx_status; 826 dmadev->ddev.device_tx_status = hidma_tx_status;
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
index 41e0aa283828..5f9966e82c0b 100644
--- a/drivers/dma/qcom/hidma.h
+++ b/drivers/dma/qcom/hidma.h
@@ -28,6 +28,11 @@
28#define HIDMA_TRE_DEST_LOW_IDX 4 28#define HIDMA_TRE_DEST_LOW_IDX 4
29#define HIDMA_TRE_DEST_HI_IDX 5 29#define HIDMA_TRE_DEST_HI_IDX 5
30 30
31enum tre_type {
32 HIDMA_TRE_MEMCPY = 3,
33 HIDMA_TRE_MEMSET = 4,
34};
35
31struct hidma_tre { 36struct hidma_tre {
32 atomic_t allocated; /* if this channel is allocated */ 37 atomic_t allocated; /* if this channel is allocated */
33 bool queued; /* flag whether this is pending */ 38 bool queued; /* flag whether this is pending */
@@ -150,7 +155,7 @@ void hidma_ll_start(struct hidma_lldev *llhndl);
150int hidma_ll_disable(struct hidma_lldev *lldev); 155int hidma_ll_disable(struct hidma_lldev *lldev);
151int hidma_ll_enable(struct hidma_lldev *llhndl); 156int hidma_ll_enable(struct hidma_lldev *llhndl);
152void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, 157void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
153 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags); 158 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype);
154void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); 159void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi);
155int hidma_ll_setup(struct hidma_lldev *lldev); 160int hidma_ll_setup(struct hidma_lldev *lldev);
156struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, 161struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
diff --git a/drivers/dma/qcom/hidma_ll.c b/drivers/dma/qcom/hidma_ll.c
index 1530a661518d..4999e266b2de 100644
--- a/drivers/dma/qcom/hidma_ll.c
+++ b/drivers/dma/qcom/hidma_ll.c
@@ -105,10 +105,6 @@ enum ch_state {
105 HIDMA_CH_STOPPED = 4, 105 HIDMA_CH_STOPPED = 4,
106}; 106};
107 107
108enum tre_type {
109 HIDMA_TRE_MEMCPY = 3,
110};
111
112enum err_code { 108enum err_code {
113 HIDMA_EVRE_STATUS_COMPLETE = 1, 109 HIDMA_EVRE_STATUS_COMPLETE = 1,
114 HIDMA_EVRE_STATUS_ERROR = 4, 110 HIDMA_EVRE_STATUS_ERROR = 4,
@@ -174,8 +170,7 @@ int hidma_ll_request(struct hidma_lldev *lldev, u32 sig, const char *dev_name,
174 tre->err_info = 0; 170 tre->err_info = 0;
175 tre->lldev = lldev; 171 tre->lldev = lldev;
176 tre_local = &tre->tre_local[0]; 172 tre_local = &tre->tre_local[0];
177 tre_local[HIDMA_TRE_CFG_IDX] = HIDMA_TRE_MEMCPY; 173 tre_local[HIDMA_TRE_CFG_IDX] = (lldev->chidx & 0xFF) << 8;
178 tre_local[HIDMA_TRE_CFG_IDX] |= (lldev->chidx & 0xFF) << 8;
179 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */ 174 tre_local[HIDMA_TRE_CFG_IDX] |= BIT(16); /* set IEOB */
180 *tre_ch = i; 175 *tre_ch = i;
181 if (callback) 176 if (callback)
@@ -607,7 +602,7 @@ int hidma_ll_disable(struct hidma_lldev *lldev)
607 602
608void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch, 603void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
609 dma_addr_t src, dma_addr_t dest, u32 len, 604 dma_addr_t src, dma_addr_t dest, u32 len,
610 u32 flags) 605 u32 flags, u32 txntype)
611{ 606{
612 struct hidma_tre *tre; 607 struct hidma_tre *tre;
613 u32 *tre_local; 608 u32 *tre_local;
@@ -626,6 +621,8 @@ void hidma_ll_set_transfer_params(struct hidma_lldev *lldev, u32 tre_ch,
626 } 621 }
627 622
628 tre_local = &tre->tre_local[0]; 623 tre_local = &tre->tre_local[0];
624 tre_local[HIDMA_TRE_CFG_IDX] &= ~GENMASK(7, 0);
625 tre_local[HIDMA_TRE_CFG_IDX] |= txntype;
629 tre_local[HIDMA_TRE_LEN_IDX] = len; 626 tre_local[HIDMA_TRE_LEN_IDX] = len;
630 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src); 627 tre_local[HIDMA_TRE_SRC_LOW_IDX] = lower_32_bits(src);
631 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src); 628 tre_local[HIDMA_TRE_SRC_HI_IDX] = upper_32_bits(src);
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
index 5a0991bc4787..7335e2eb9b72 100644
--- a/drivers/dma/qcom/hidma_mgmt.c
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -28,7 +28,7 @@
28 28
29#include "hidma_mgmt.h" 29#include "hidma_mgmt.h"
30 30
31#define HIDMA_QOS_N_OFFSET 0x300 31#define HIDMA_QOS_N_OFFSET 0x700
32#define HIDMA_CFG_OFFSET 0x400 32#define HIDMA_CFG_OFFSET 0x400
33#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C 33#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
34#define HIDMA_MAX_XACTIONS_OFFSET 0x420 34#define HIDMA_MAX_XACTIONS_OFFSET 0x420
@@ -227,7 +227,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
227 goto out; 227 goto out;
228 } 228 }
229 229
230 if (max_write_request) { 230 if (max_write_request &&
231 (max_write_request != mgmtdev->max_write_request)) {
231 dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n", 232 dev_info(&pdev->dev, "overriding max-write-burst-bytes: %d\n",
232 max_write_request); 233 max_write_request);
233 mgmtdev->max_write_request = max_write_request; 234 mgmtdev->max_write_request = max_write_request;
@@ -240,7 +241,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
240 dev_err(&pdev->dev, "max-read-burst-bytes missing\n"); 241 dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
241 goto out; 242 goto out;
242 } 243 }
243 if (max_read_request) { 244 if (max_read_request &&
245 (max_read_request != mgmtdev->max_read_request)) {
244 dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n", 246 dev_info(&pdev->dev, "overriding max-read-burst-bytes: %d\n",
245 max_read_request); 247 max_read_request);
246 mgmtdev->max_read_request = max_read_request; 248 mgmtdev->max_read_request = max_read_request;
@@ -253,7 +255,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
253 dev_err(&pdev->dev, "max-write-transactions missing\n"); 255 dev_err(&pdev->dev, "max-write-transactions missing\n");
254 goto out; 256 goto out;
255 } 257 }
256 if (max_wr_xactions) { 258 if (max_wr_xactions &&
259 (max_wr_xactions != mgmtdev->max_wr_xactions)) {
257 dev_info(&pdev->dev, "overriding max-write-transactions: %d\n", 260 dev_info(&pdev->dev, "overriding max-write-transactions: %d\n",
258 max_wr_xactions); 261 max_wr_xactions);
259 mgmtdev->max_wr_xactions = max_wr_xactions; 262 mgmtdev->max_wr_xactions = max_wr_xactions;
@@ -266,7 +269,8 @@ static int hidma_mgmt_probe(struct platform_device *pdev)
266 dev_err(&pdev->dev, "max-read-transactions missing\n"); 269 dev_err(&pdev->dev, "max-read-transactions missing\n");
267 goto out; 270 goto out;
268 } 271 }
269 if (max_rd_xactions) { 272 if (max_rd_xactions &&
273 (max_rd_xactions != mgmtdev->max_rd_xactions)) {
270 dev_info(&pdev->dev, "overriding max-read-transactions: %d\n", 274 dev_info(&pdev->dev, "overriding max-read-transactions: %d\n",
271 max_rd_xactions); 275 max_rd_xactions);
272 mgmtdev->max_rd_xactions = max_rd_xactions; 276 mgmtdev->max_rd_xactions = max_rd_xactions;
@@ -354,7 +358,7 @@ static int __init hidma_mgmt_of_populate_channels(struct device_node *np)
354 struct platform_device_info pdevinfo; 358 struct platform_device_info pdevinfo;
355 struct of_phandle_args out_irq; 359 struct of_phandle_args out_irq;
356 struct device_node *child; 360 struct device_node *child;
357 struct resource *res; 361 struct resource *res = NULL;
358 const __be32 *cell; 362 const __be32 *cell;
359 int ret = 0, size, i, num; 363 int ret = 0, size, i, num;
360 u64 addr, addr_size; 364 u64 addr, addr_size;
diff --git a/include/linux/dma/qcom_bam_dma.h b/include/linux/dma/qcom_bam_dma.h
new file mode 100644
index 000000000000..077d43a358e5
--- /dev/null
+++ b/include/linux/dma/qcom_bam_dma.h
@@ -0,0 +1,79 @@
1/*
2 * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
3 *
4 * This software is licensed under the terms of the GNU General Public
5 * License version 2, as published by the Free Software Foundation, and
6 * may be copied, distributed, and modified under those terms.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14#ifndef _QCOM_BAM_DMA_H
15#define _QCOM_BAM_DMA_H
16
17#include <asm/byteorder.h>
18
19/*
20 * This data type corresponds to the native Command Element
21 * supported by BAM DMA Engine.
22 *
23 * @cmd_and_addr - upper 8 bits command and lower 24 bits register address.
24 * @data - for write command: content to be written into peripheral register.
25 * for read command: dest addr to write peripheral register value.
26 * @mask - register mask.
27 * @reserved - for future usage.
28 *
29 */
30struct bam_cmd_element {
31 __le32 cmd_and_addr;
32 __le32 data;
33 __le32 mask;
34 __le32 reserved;
35};
36
37/*
38 * This enum indicates the command type in a command element
39 */
40enum bam_command_type {
41 BAM_WRITE_COMMAND = 0,
42 BAM_READ_COMMAND,
43};
44
45/*
46 * prep_bam_ce_le32 - Wrapper function to prepare a single BAM command
47 * element with the data already in le32 format.
48 *
49 * @bam_ce: bam command element
50 * @addr: target address
51 * @cmd: BAM command
52 * @data: actual data for write and dest addr for read in le32
53 */
54static inline void
55bam_prep_ce_le32(struct bam_cmd_element *bam_ce, u32 addr,
56 enum bam_command_type cmd, __le32 data)
57{
58 bam_ce->cmd_and_addr =
59 cpu_to_le32((addr & 0xffffff) | ((cmd & 0xff) << 24));
60 bam_ce->data = data;
61 bam_ce->mask = cpu_to_le32(0xffffffff);
62}
63
64/*
65 * bam_prep_ce - Wrapper function to prepare a single BAM command element
66 * with the data.
67 *
68 * @bam_ce: BAM command element
69 * @addr: target address
70 * @cmd: BAM command
71 * @data: actual data for write and dest addr for read
72 */
73static inline void
74bam_prep_ce(struct bam_cmd_element *bam_ce, u32 addr,
75 enum bam_command_type cmd, u32 data)
76{
77 bam_prep_ce_le32(bam_ce, addr, cmd, cpu_to_le32(data));
78}
79#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 533680860865..dd4de1d40166 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -186,6 +186,9 @@ struct dma_interleaved_template {
186 * on the result of this operation 186 * on the result of this operation
187 * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till 187 * @DMA_CTRL_REUSE: client can reuse the descriptor and submit again till
188 * cleared or freed 188 * cleared or freed
189 * @DMA_PREP_CMD: tell the driver that the data passed to DMA API is command
190 * data and the descriptor should be in different format from normal
191 * data descriptors.
189 */ 192 */
190enum dma_ctrl_flags { 193enum dma_ctrl_flags {
191 DMA_PREP_INTERRUPT = (1 << 0), 194 DMA_PREP_INTERRUPT = (1 << 0),
@@ -195,6 +198,7 @@ enum dma_ctrl_flags {
195 DMA_PREP_CONTINUE = (1 << 4), 198 DMA_PREP_CONTINUE = (1 << 4),
196 DMA_PREP_FENCE = (1 << 5), 199 DMA_PREP_FENCE = (1 << 5),
197 DMA_CTRL_REUSE = (1 << 6), 200 DMA_CTRL_REUSE = (1 << 6),
201 DMA_PREP_CMD = (1 << 7),
198}; 202};
199 203
200/** 204/**