aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-03-14 01:48:22 -0400
committerVinod Koul <vinod.koul@intel.com>2016-03-14 01:48:22 -0400
commit254efeec3154bb0f373f10a069d5ad05cff9d310 (patch)
tree4e3be2dbf6c9438ce96738d991a40abffe724680
parent8bce4c87657af3dc4625e873ec1201205e44375b (diff)
parent67a2003e060739747cfa3ea9b0d88b3d321ebf24 (diff)
Merge branch 'topic/qcom' into for-linus
-rw-r--r--Documentation/ABI/testing/sysfs-platform-hidma-mgmt97
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt89
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile2
-rw-r--r--drivers/dma/qcom/Kconfig29
-rw-r--r--drivers/dma/qcom/Makefile3
-rw-r--r--drivers/dma/qcom/bam_dma.c (renamed from drivers/dma/qcom_bam_dma.c)23
-rw-r--r--drivers/dma/qcom/hidma.c706
-rw-r--r--drivers/dma/qcom/hidma.h160
-rw-r--r--drivers/dma/qcom/hidma_mgmt.c302
-rw-r--r--drivers/dma/qcom/hidma_mgmt.h39
-rw-r--r--drivers/dma/qcom/hidma_mgmt_sys.c295
12 files changed, 1736 insertions, 20 deletions
diff --git a/Documentation/ABI/testing/sysfs-platform-hidma-mgmt b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt
new file mode 100644
index 000000000000..c2fb5d033f0e
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-platform-hidma-mgmt
@@ -0,0 +1,97 @@
1What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/priority
2 /sys/devices/platform/QCOM8060:*/chanops/chan*/priority
3Date: Nov 2015
4KernelVersion: 4.4
5Contact: "Sinan Kaya <okaya@cudeaurora.org>"
6Description:
7 Contains either 0 or 1 and indicates if the DMA channel is a
8 low priority (0) or high priority (1) channel.
9
10What: /sys/devices/platform/hidma-mgmt*/chanops/chan*/weight
11 /sys/devices/platform/QCOM8060:*/chanops/chan*/weight
12Date: Nov 2015
13KernelVersion: 4.4
14Contact: "Sinan Kaya <okaya@cudeaurora.org>"
15Description:
16 Contains 0..15 and indicates the weight of the channel among
17 equal priority channels during round robin scheduling.
18
19What: /sys/devices/platform/hidma-mgmt*/chreset_timeout_cycles
20 /sys/devices/platform/QCOM8060:*/chreset_timeout_cycles
21Date: Nov 2015
22KernelVersion: 4.4
23Contact: "Sinan Kaya <okaya@cudeaurora.org>"
24Description:
25 Contains the platform specific cycle value to wait after a
26 reset command is issued. If the value is chosen too short,
27 then the HW will issue a reset failure interrupt. The value
28 is platform specific and should not be changed without
29 consultance.
30
31What: /sys/devices/platform/hidma-mgmt*/dma_channels
32 /sys/devices/platform/QCOM8060:*/dma_channels
33Date: Nov 2015
34KernelVersion: 4.4
35Contact: "Sinan Kaya <okaya@cudeaurora.org>"
36Description:
37 Contains the number of dma channels supported by one instance
38 of HIDMA hardware. The value may change from chip to chip.
39
40What: /sys/devices/platform/hidma-mgmt*/hw_version_major
41 /sys/devices/platform/QCOM8060:*/hw_version_major
42Date: Nov 2015
43KernelVersion: 4.4
44Contact: "Sinan Kaya <okaya@cudeaurora.org>"
45Description:
46 Version number major for the hardware.
47
48What: /sys/devices/platform/hidma-mgmt*/hw_version_minor
49 /sys/devices/platform/QCOM8060:*/hw_version_minor
50Date: Nov 2015
51KernelVersion: 4.4
52Contact: "Sinan Kaya <okaya@cudeaurora.org>"
53Description:
54 Version number minor for the hardware.
55
56What: /sys/devices/platform/hidma-mgmt*/max_rd_xactions
57 /sys/devices/platform/QCOM8060:*/max_rd_xactions
58Date: Nov 2015
59KernelVersion: 4.4
60Contact: "Sinan Kaya <okaya@cudeaurora.org>"
61Description:
62 Contains a value between 0 and 31. Maximum number of
63 read transactions that can be issued back to back.
64 Choosing a higher number gives better performance but
65 can also cause performance reduction to other peripherals
66 sharing the same bus.
67
68What: /sys/devices/platform/hidma-mgmt*/max_read_request
69 /sys/devices/platform/QCOM8060:*/max_read_request
70Date: Nov 2015
71KernelVersion: 4.4
72Contact: "Sinan Kaya <okaya@cudeaurora.org>"
73Description:
74 Size of each read request. The value needs to be a power
75 of two and can be between 128 and 1024.
76
77What: /sys/devices/platform/hidma-mgmt*/max_wr_xactions
78 /sys/devices/platform/QCOM8060:*/max_wr_xactions
79Date: Nov 2015
80KernelVersion: 4.4
81Contact: "Sinan Kaya <okaya@cudeaurora.org>"
82Description:
83 Contains a value between 0 and 31. Maximum number of
84 write transactions that can be issued back to back.
85 Choosing a higher number gives better performance but
86 can also cause performance reduction to other peripherals
87 sharing the same bus.
88
89
90What: /sys/devices/platform/hidma-mgmt*/max_write_request
91 /sys/devices/platform/QCOM8060:*/max_write_request
92Date: Nov 2015
93KernelVersion: 4.4
94Contact: "Sinan Kaya <okaya@cudeaurora.org>"
95Description:
96 Size of each write request. The value needs to be a power
97 of two and can be between 128 and 1024.
diff --git a/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
new file mode 100644
index 000000000000..fd5618bd8fbc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/qcom_hidma_mgmt.txt
@@ -0,0 +1,89 @@
1Qualcomm Technologies HIDMA Management interface
2
3Qualcomm Technologies HIDMA is a high speed DMA device. It only supports
4memcpy and memset capabilities. It has been designed for virtualized
5environments.
6
7Each HIDMA HW instance consists of multiple DMA channels. These channels
8share the same bandwidth. The bandwidth utilization can be parititioned
9among channels based on the priority and weight assignments.
10
11There are only two priority levels and 15 weigh assignments possible.
12
13Other parameters here determine how much of the system bus this HIDMA
14instance can use like maximum read/write request and and number of bytes to
15read/write in a single burst.
16
17Main node required properties:
18- compatible: "qcom,hidma-mgmt-1.0";
19- reg: Address range for DMA device
20- dma-channels: Number of channels supported by this DMA controller.
21- max-write-burst-bytes: Maximum write burst in bytes that HIDMA can
22 occupy the bus for in a single transaction. A memcpy requested is
23 fragmented to multiples of this amount. This parameter is used while
24 writing into destination memory. Setting this value incorrectly can
25 starve other peripherals in the system.
26- max-read-burst-bytes: Maximum read burst in bytes that HIDMA can
27 occupy the bus for in a single transaction. A memcpy request is
28 fragmented to multiples of this amount. This parameter is used while
29 reading the source memory. Setting this value incorrectly can starve
30 other peripherals in the system.
31- max-write-transactions: This value is how many times a write burst is
32 applied back to back while writing to the destination before yielding
33 the bus.
34- max-read-transactions: This value is how many times a read burst is
35 applied back to back while reading the source before yielding the bus.
36- channel-reset-timeout-cycles: Channel reset timeout in cycles for this SOC.
37 Once a reset is applied to the HW, HW starts a timer for reset operation
38 to confirm. If reset is not completed within this time, HW reports reset
39 failure.
40
41Sub-nodes:
42
43HIDMA has one or more DMA channels that are used to move data from one
44memory location to another.
45
46When the OS is not in control of the management interface (i.e. it's a guest),
47the channel nodes appear on their own, not under a management node.
48
49Required properties:
50- compatible: must contain "qcom,hidma-1.0"
51- reg: Addresses for the transfer and event channel
52- interrupts: Should contain the event interrupt
53- desc-count: Number of asynchronous requests this channel can handle
54- iommus: required a iommu node
55
56Example:
57
58Hypervisor OS configuration:
59
60 hidma-mgmt@f9984000 = {
61 compatible = "qcom,hidma-mgmt-1.0";
62 reg = <0xf9984000 0x15000>;
63 dma-channels = <6>;
64 max-write-burst-bytes = <1024>;
65 max-read-burst-bytes = <1024>;
66 max-write-transactions = <31>;
67 max-read-transactions = <31>;
68 channel-reset-timeout-cycles = <0x500>;
69
70 hidma_24: dma-controller@0x5c050000 {
71 compatible = "qcom,hidma-1.0";
72 reg = <0 0x5c050000 0x0 0x1000>,
73 <0 0x5c0b0000 0x0 0x1000>;
74 interrupts = <0 389 0>;
75 desc-count = <10>;
76 iommus = <&system_mmu>;
77 };
78 };
79
80Guest OS configuration:
81
82 hidma_24: dma-controller@0x5c050000 {
83 compatible = "qcom,hidma-1.0";
84 reg = <0 0x5c050000 0x0 0x1000>,
85 <0 0x5c0b0000 0x0 0x1000>;
86 interrupts = <0 389 0>;
87 desc-count = <10>;
88 iommus = <&system_mmu>;
89 };
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index c77f214c9466..d96d87c56f2e 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -409,15 +409,6 @@ config PXA_DMA
409 16 to 32 channels for peripheral to memory or memory to memory 409 16 to 32 channels for peripheral to memory or memory to memory
410 transfers. 410 transfers.
411 411
412config QCOM_BAM_DMA
413 tristate "QCOM BAM DMA support"
414 depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
415 select DMA_ENGINE
416 select DMA_VIRTUAL_CHANNELS
417 ---help---
418 Enable support for the QCOM BAM DMA controller. This controller
419 provides DMA capabilities for a variety of on-chip devices.
420
421config SIRF_DMA 412config SIRF_DMA
422 tristate "CSR SiRFprimaII/SiRFmarco DMA support" 413 tristate "CSR SiRFprimaII/SiRFmarco DMA support"
423 depends on ARCH_SIRF 414 depends on ARCH_SIRF
@@ -540,6 +531,8 @@ config ZX_DMA
540# driver files 531# driver files
541source "drivers/dma/bestcomm/Kconfig" 532source "drivers/dma/bestcomm/Kconfig"
542 533
534source "drivers/dma/qcom/Kconfig"
535
543source "drivers/dma/dw/Kconfig" 536source "drivers/dma/dw/Kconfig"
544 537
545source "drivers/dma/hsu/Kconfig" 538source "drivers/dma/hsu/Kconfig"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index 2dd0a067a0ca..6084127c1486 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -52,7 +52,6 @@ obj-$(CONFIG_PCH_DMA) += pch_dma.o
52obj-$(CONFIG_PL330_DMA) += pl330.o 52obj-$(CONFIG_PL330_DMA) += pl330.o
53obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ 53obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
54obj-$(CONFIG_PXA_DMA) += pxa_dma.o 54obj-$(CONFIG_PXA_DMA) += pxa_dma.o
55obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o
56obj-$(CONFIG_RENESAS_DMA) += sh/ 55obj-$(CONFIG_RENESAS_DMA) += sh/
57obj-$(CONFIG_SIRF_DMA) += sirf-dma.o 56obj-$(CONFIG_SIRF_DMA) += sirf-dma.o
58obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o 57obj-$(CONFIG_STE_DMA40) += ste_dma40.o ste_dma40_ll.o
@@ -67,4 +66,5 @@ obj-$(CONFIG_TI_EDMA) += edma.o
67obj-$(CONFIG_XGENE_DMA) += xgene-dma.o 66obj-$(CONFIG_XGENE_DMA) += xgene-dma.o
68obj-$(CONFIG_ZX_DMA) += zx296702_dma.o 67obj-$(CONFIG_ZX_DMA) += zx296702_dma.o
69 68
69obj-y += qcom/
70obj-y += xilinx/ 70obj-y += xilinx/
diff --git a/drivers/dma/qcom/Kconfig b/drivers/dma/qcom/Kconfig
new file mode 100644
index 000000000000..a7761c4025f4
--- /dev/null
+++ b/drivers/dma/qcom/Kconfig
@@ -0,0 +1,29 @@
1config QCOM_BAM_DMA
2 tristate "QCOM BAM DMA support"
3 depends on ARCH_QCOM || (COMPILE_TEST && OF && ARM)
4 select DMA_ENGINE
5 select DMA_VIRTUAL_CHANNELS
6 ---help---
7 Enable support for the QCOM BAM DMA controller. This controller
8 provides DMA capabilities for a variety of on-chip devices.
9
10config QCOM_HIDMA_MGMT
11 tristate "Qualcomm Technologies HIDMA Management support"
12 select DMA_ENGINE
13 help
14 Enable support for the Qualcomm Technologies HIDMA Management.
15 Each DMA device requires one management interface driver
16 for basic initialization before QCOM_HIDMA channel driver can
17 start managing the channels. In a virtualized environment,
18 the guest OS would run QCOM_HIDMA channel driver and the
19 host would run the QCOM_HIDMA_MGMT management driver.
20
21config QCOM_HIDMA
22 tristate "Qualcomm Technologies HIDMA Channel support"
23 select DMA_ENGINE
24 help
25 Enable support for the Qualcomm Technologies HIDMA controller.
26 The HIDMA controller supports optimized buffer copies
27 (user to kernel, kernel to kernel, etc.). It only supports
28 memcpy interface. The core is not intended for general
29 purpose slave DMA.
diff --git a/drivers/dma/qcom/Makefile b/drivers/dma/qcom/Makefile
new file mode 100644
index 000000000000..bfea6990229f
--- /dev/null
+++ b/drivers/dma/qcom/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_QCOM_BAM_DMA) += bam_dma.o
2obj-$(CONFIG_QCOM_HIDMA_MGMT) += hdma_mgmt.o
3hdma_mgmt-objs := hidma_mgmt.o hidma_mgmt_sys.o
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom/bam_dma.c
index 5a250cdc8376..2d691a34a0ab 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom/bam_dma.c
@@ -49,13 +49,13 @@
49#include <linux/clk.h> 49#include <linux/clk.h>
50#include <linux/dmaengine.h> 50#include <linux/dmaengine.h>
51 51
52#include "dmaengine.h" 52#include "../dmaengine.h"
53#include "virt-dma.h" 53#include "../virt-dma.h"
54 54
55struct bam_desc_hw { 55struct bam_desc_hw {
56 u32 addr; /* Buffer physical address */ 56 __le32 addr; /* Buffer physical address */
57 u16 size; /* Buffer size in bytes */ 57 __le16 size; /* Buffer size in bytes */
58 u16 flags; 58 __le16 flags;
59}; 59};
60 60
61#define DESC_FLAG_INT BIT(15) 61#define DESC_FLAG_INT BIT(15)
@@ -632,14 +632,15 @@ static struct dma_async_tx_descriptor *bam_prep_slave_sg(struct dma_chan *chan,
632 unsigned int curr_offset = 0; 632 unsigned int curr_offset = 0;
633 633
634 do { 634 do {
635 desc->addr = sg_dma_address(sg) + curr_offset; 635 desc->addr = cpu_to_le32(sg_dma_address(sg) +
636 curr_offset);
636 637
637 if (remainder > BAM_MAX_DATA_SIZE) { 638 if (remainder > BAM_MAX_DATA_SIZE) {
638 desc->size = BAM_MAX_DATA_SIZE; 639 desc->size = cpu_to_le16(BAM_MAX_DATA_SIZE);
639 remainder -= BAM_MAX_DATA_SIZE; 640 remainder -= BAM_MAX_DATA_SIZE;
640 curr_offset += BAM_MAX_DATA_SIZE; 641 curr_offset += BAM_MAX_DATA_SIZE;
641 } else { 642 } else {
642 desc->size = remainder; 643 desc->size = cpu_to_le16(remainder);
643 remainder = 0; 644 remainder = 0;
644 } 645 }
645 646
@@ -915,9 +916,11 @@ static void bam_start_dma(struct bam_chan *bchan)
915 916
916 /* set any special flags on the last descriptor */ 917 /* set any special flags on the last descriptor */
917 if (async_desc->num_desc == async_desc->xfer_len) 918 if (async_desc->num_desc == async_desc->xfer_len)
918 desc[async_desc->xfer_len - 1].flags = async_desc->flags; 919 desc[async_desc->xfer_len - 1].flags =
920 cpu_to_le16(async_desc->flags);
919 else 921 else
920 desc[async_desc->xfer_len - 1].flags |= DESC_FLAG_INT; 922 desc[async_desc->xfer_len - 1].flags |=
923 cpu_to_le16(DESC_FLAG_INT);
921 924
922 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) { 925 if (bchan->tail + async_desc->xfer_len > MAX_DESCRIPTORS) {
923 u32 partial = MAX_DESCRIPTORS - bchan->tail; 926 u32 partial = MAX_DESCRIPTORS - bchan->tail;
diff --git a/drivers/dma/qcom/hidma.c b/drivers/dma/qcom/hidma.c
new file mode 100644
index 000000000000..cccc78efbca9
--- /dev/null
+++ b/drivers/dma/qcom/hidma.c
@@ -0,0 +1,706 @@
1/*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16/*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43/* Linux Foundation elects GPLv2 license only. */
44
45#include <linux/dmaengine.h>
46#include <linux/dma-mapping.h>
47#include <linux/list.h>
48#include <linux/module.h>
49#include <linux/platform_device.h>
50#include <linux/slab.h>
51#include <linux/spinlock.h>
52#include <linux/of_dma.h>
53#include <linux/property.h>
54#include <linux/delay.h>
55#include <linux/acpi.h>
56#include <linux/irq.h>
57#include <linux/atomic.h>
58#include <linux/pm_runtime.h>
59
60#include "../dmaengine.h"
61#include "hidma.h"
62
63/*
64 * Default idle time is 2 seconds. This parameter can
65 * be overridden by changing the following
66 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
67 * during kernel boot.
68 */
69#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
70#define HIDMA_ERR_INFO_SW 0xFF
71#define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
72#define HIDMA_NR_DEFAULT_DESC 10
73
74static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
75{
76 return container_of(dmadev, struct hidma_dev, ddev);
77}
78
79static inline
80struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
81{
82 return container_of(_lldevp, struct hidma_dev, lldev);
83}
84
85static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
86{
87 return container_of(dmach, struct hidma_chan, chan);
88}
89
90static inline
91struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
92{
93 return container_of(t, struct hidma_desc, desc);
94}
95
96static void hidma_free(struct hidma_dev *dmadev)
97{
98 INIT_LIST_HEAD(&dmadev->ddev.channels);
99}
100
101static unsigned int nr_desc_prm;
102module_param(nr_desc_prm, uint, 0644);
103MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
104
105
106/* process completed descriptors */
107static void hidma_process_completed(struct hidma_chan *mchan)
108{
109 struct dma_device *ddev = mchan->chan.device;
110 struct hidma_dev *mdma = to_hidma_dev(ddev);
111 struct dma_async_tx_descriptor *desc;
112 dma_cookie_t last_cookie;
113 struct hidma_desc *mdesc;
114 unsigned long irqflags;
115 struct list_head list;
116
117 INIT_LIST_HEAD(&list);
118
119 /* Get all completed descriptors */
120 spin_lock_irqsave(&mchan->lock, irqflags);
121 list_splice_tail_init(&mchan->completed, &list);
122 spin_unlock_irqrestore(&mchan->lock, irqflags);
123
124 /* Execute callbacks and run dependencies */
125 list_for_each_entry(mdesc, &list, node) {
126 enum dma_status llstat;
127
128 desc = &mdesc->desc;
129
130 spin_lock_irqsave(&mchan->lock, irqflags);
131 dma_cookie_complete(desc);
132 spin_unlock_irqrestore(&mchan->lock, irqflags);
133
134 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
135 if (desc->callback && (llstat == DMA_COMPLETE))
136 desc->callback(desc->callback_param);
137
138 last_cookie = desc->cookie;
139 dma_run_dependencies(desc);
140 }
141
142 /* Free descriptors */
143 spin_lock_irqsave(&mchan->lock, irqflags);
144 list_splice_tail_init(&list, &mchan->free);
145 spin_unlock_irqrestore(&mchan->lock, irqflags);
146
147}
148
149/*
150 * Called once for each submitted descriptor.
151 * PM is locked once for each descriptor that is currently
152 * in execution.
153 */
154static void hidma_callback(void *data)
155{
156 struct hidma_desc *mdesc = data;
157 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
158 struct dma_device *ddev = mchan->chan.device;
159 struct hidma_dev *dmadev = to_hidma_dev(ddev);
160 unsigned long irqflags;
161 bool queued = false;
162
163 spin_lock_irqsave(&mchan->lock, irqflags);
164 if (mdesc->node.next) {
165 /* Delete from the active list, add to completed list */
166 list_move_tail(&mdesc->node, &mchan->completed);
167 queued = true;
168
169 /* calculate the next running descriptor */
170 mchan->running = list_first_entry(&mchan->active,
171 struct hidma_desc, node);
172 }
173 spin_unlock_irqrestore(&mchan->lock, irqflags);
174
175 hidma_process_completed(mchan);
176
177 if (queued) {
178 pm_runtime_mark_last_busy(dmadev->ddev.dev);
179 pm_runtime_put_autosuspend(dmadev->ddev.dev);
180 }
181}
182
183static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
184{
185 struct hidma_chan *mchan;
186 struct dma_device *ddev;
187
188 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
189 if (!mchan)
190 return -ENOMEM;
191
192 ddev = &dmadev->ddev;
193 mchan->dma_sig = dma_sig;
194 mchan->dmadev = dmadev;
195 mchan->chan.device = ddev;
196 dma_cookie_init(&mchan->chan);
197
198 INIT_LIST_HEAD(&mchan->free);
199 INIT_LIST_HEAD(&mchan->prepared);
200 INIT_LIST_HEAD(&mchan->active);
201 INIT_LIST_HEAD(&mchan->completed);
202
203 spin_lock_init(&mchan->lock);
204 list_add_tail(&mchan->chan.device_node, &ddev->channels);
205 dmadev->ddev.chancnt++;
206 return 0;
207}
208
209static void hidma_issue_task(unsigned long arg)
210{
211 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
212
213 pm_runtime_get_sync(dmadev->ddev.dev);
214 hidma_ll_start(dmadev->lldev);
215}
216
217static void hidma_issue_pending(struct dma_chan *dmach)
218{
219 struct hidma_chan *mchan = to_hidma_chan(dmach);
220 struct hidma_dev *dmadev = mchan->dmadev;
221 unsigned long flags;
222 int status;
223
224 spin_lock_irqsave(&mchan->lock, flags);
225 if (!mchan->running) {
226 struct hidma_desc *desc = list_first_entry(&mchan->active,
227 struct hidma_desc,
228 node);
229 mchan->running = desc;
230 }
231 spin_unlock_irqrestore(&mchan->lock, flags);
232
233 /* PM will be released in hidma_callback function. */
234 status = pm_runtime_get(dmadev->ddev.dev);
235 if (status < 0)
236 tasklet_schedule(&dmadev->task);
237 else
238 hidma_ll_start(dmadev->lldev);
239}
240
241static enum dma_status hidma_tx_status(struct dma_chan *dmach,
242 dma_cookie_t cookie,
243 struct dma_tx_state *txstate)
244{
245 struct hidma_chan *mchan = to_hidma_chan(dmach);
246 enum dma_status ret;
247
248 ret = dma_cookie_status(dmach, cookie, txstate);
249 if (ret == DMA_COMPLETE)
250 return ret;
251
252 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
253 unsigned long flags;
254 dma_cookie_t runcookie;
255
256 spin_lock_irqsave(&mchan->lock, flags);
257 if (mchan->running)
258 runcookie = mchan->running->desc.cookie;
259 else
260 runcookie = -EINVAL;
261
262 if (runcookie == cookie)
263 ret = DMA_PAUSED;
264
265 spin_unlock_irqrestore(&mchan->lock, flags);
266 }
267
268 return ret;
269}
270
271/*
272 * Submit descriptor to hardware.
273 * Lock the PM for each descriptor we are sending.
274 */
275static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
276{
277 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
278 struct hidma_dev *dmadev = mchan->dmadev;
279 struct hidma_desc *mdesc;
280 unsigned long irqflags;
281 dma_cookie_t cookie;
282
283 pm_runtime_get_sync(dmadev->ddev.dev);
284 if (!hidma_ll_isenabled(dmadev->lldev)) {
285 pm_runtime_mark_last_busy(dmadev->ddev.dev);
286 pm_runtime_put_autosuspend(dmadev->ddev.dev);
287 return -ENODEV;
288 }
289
290 mdesc = container_of(txd, struct hidma_desc, desc);
291 spin_lock_irqsave(&mchan->lock, irqflags);
292
293 /* Move descriptor to active */
294 list_move_tail(&mdesc->node, &mchan->active);
295
296 /* Update cookie */
297 cookie = dma_cookie_assign(txd);
298
299 hidma_ll_queue_request(dmadev->lldev, mdesc->tre_ch);
300 spin_unlock_irqrestore(&mchan->lock, irqflags);
301
302 return cookie;
303}
304
305static int hidma_alloc_chan_resources(struct dma_chan *dmach)
306{
307 struct hidma_chan *mchan = to_hidma_chan(dmach);
308 struct hidma_dev *dmadev = mchan->dmadev;
309 struct hidma_desc *mdesc, *tmp;
310 unsigned long irqflags;
311 LIST_HEAD(descs);
312 unsigned int i;
313 int rc = 0;
314
315 if (mchan->allocated)
316 return 0;
317
318 /* Alloc descriptors for this channel */
319 for (i = 0; i < dmadev->nr_descriptors; i++) {
320 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
321 if (!mdesc) {
322 rc = -ENOMEM;
323 break;
324 }
325 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
326 mdesc->desc.tx_submit = hidma_tx_submit;
327
328 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
329 "DMA engine", hidma_callback, mdesc,
330 &mdesc->tre_ch);
331 if (rc) {
332 dev_err(dmach->device->dev,
333 "channel alloc failed at %u\n", i);
334 kfree(mdesc);
335 break;
336 }
337 list_add_tail(&mdesc->node, &descs);
338 }
339
340 if (rc) {
341 /* return the allocated descriptors */
342 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
343 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
344 kfree(mdesc);
345 }
346 return rc;
347 }
348
349 spin_lock_irqsave(&mchan->lock, irqflags);
350 list_splice_tail_init(&descs, &mchan->free);
351 mchan->allocated = true;
352 spin_unlock_irqrestore(&mchan->lock, irqflags);
353 return 1;
354}
355
356static struct dma_async_tx_descriptor *
357hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
358 size_t len, unsigned long flags)
359{
360 struct hidma_chan *mchan = to_hidma_chan(dmach);
361 struct hidma_desc *mdesc = NULL;
362 struct hidma_dev *mdma = mchan->dmadev;
363 unsigned long irqflags;
364
365 /* Get free descriptor */
366 spin_lock_irqsave(&mchan->lock, irqflags);
367 if (!list_empty(&mchan->free)) {
368 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
369 list_del(&mdesc->node);
370 }
371 spin_unlock_irqrestore(&mchan->lock, irqflags);
372
373 if (!mdesc)
374 return NULL;
375
376 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
377 src, dest, len, flags);
378
379 /* Place descriptor in prepared list */
380 spin_lock_irqsave(&mchan->lock, irqflags);
381 list_add_tail(&mdesc->node, &mchan->prepared);
382 spin_unlock_irqrestore(&mchan->lock, irqflags);
383
384 return &mdesc->desc;
385}
386
387static int hidma_terminate_channel(struct dma_chan *chan)
388{
389 struct hidma_chan *mchan = to_hidma_chan(chan);
390 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
391 struct hidma_desc *tmp, *mdesc;
392 unsigned long irqflags;
393 LIST_HEAD(list);
394 int rc;
395
396 pm_runtime_get_sync(dmadev->ddev.dev);
397 /* give completed requests a chance to finish */
398 hidma_process_completed(mchan);
399
400 spin_lock_irqsave(&mchan->lock, irqflags);
401 list_splice_init(&mchan->active, &list);
402 list_splice_init(&mchan->prepared, &list);
403 list_splice_init(&mchan->completed, &list);
404 spin_unlock_irqrestore(&mchan->lock, irqflags);
405
406 /* this suspends the existing transfer */
407 rc = hidma_ll_pause(dmadev->lldev);
408 if (rc) {
409 dev_err(dmadev->ddev.dev, "channel did not pause\n");
410 goto out;
411 }
412
413 /* return all user requests */
414 list_for_each_entry_safe(mdesc, tmp, &list, node) {
415 struct dma_async_tx_descriptor *txd = &mdesc->desc;
416 dma_async_tx_callback callback = mdesc->desc.callback;
417 void *param = mdesc->desc.callback_param;
418
419 dma_descriptor_unmap(txd);
420
421 if (callback)
422 callback(param);
423
424 dma_run_dependencies(txd);
425
426 /* move myself to free_list */
427 list_move(&mdesc->node, &mchan->free);
428 }
429
430 rc = hidma_ll_resume(dmadev->lldev);
431out:
432 pm_runtime_mark_last_busy(dmadev->ddev.dev);
433 pm_runtime_put_autosuspend(dmadev->ddev.dev);
434 return rc;
435}
436
437static int hidma_terminate_all(struct dma_chan *chan)
438{
439 struct hidma_chan *mchan = to_hidma_chan(chan);
440 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
441 int rc;
442
443 rc = hidma_terminate_channel(chan);
444 if (rc)
445 return rc;
446
447 /* reinitialize the hardware */
448 pm_runtime_get_sync(dmadev->ddev.dev);
449 rc = hidma_ll_setup(dmadev->lldev);
450 pm_runtime_mark_last_busy(dmadev->ddev.dev);
451 pm_runtime_put_autosuspend(dmadev->ddev.dev);
452 return rc;
453}
454
455static void hidma_free_chan_resources(struct dma_chan *dmach)
456{
457 struct hidma_chan *mchan = to_hidma_chan(dmach);
458 struct hidma_dev *mdma = mchan->dmadev;
459 struct hidma_desc *mdesc, *tmp;
460 unsigned long irqflags;
461 LIST_HEAD(descs);
462
463 /* terminate running transactions and free descriptors */
464 hidma_terminate_channel(dmach);
465
466 spin_lock_irqsave(&mchan->lock, irqflags);
467
468 /* Move data */
469 list_splice_tail_init(&mchan->free, &descs);
470
471 /* Free descriptors */
472 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
473 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
474 list_del(&mdesc->node);
475 kfree(mdesc);
476 }
477
478 mchan->allocated = 0;
479 spin_unlock_irqrestore(&mchan->lock, irqflags);
480}
481
482static int hidma_pause(struct dma_chan *chan)
483{
484 struct hidma_chan *mchan;
485 struct hidma_dev *dmadev;
486
487 mchan = to_hidma_chan(chan);
488 dmadev = to_hidma_dev(mchan->chan.device);
489 if (!mchan->paused) {
490 pm_runtime_get_sync(dmadev->ddev.dev);
491 if (hidma_ll_pause(dmadev->lldev))
492 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
493 mchan->paused = true;
494 pm_runtime_mark_last_busy(dmadev->ddev.dev);
495 pm_runtime_put_autosuspend(dmadev->ddev.dev);
496 }
497 return 0;
498}
499
500static int hidma_resume(struct dma_chan *chan)
501{
502 struct hidma_chan *mchan;
503 struct hidma_dev *dmadev;
504 int rc = 0;
505
506 mchan = to_hidma_chan(chan);
507 dmadev = to_hidma_dev(mchan->chan.device);
508 if (mchan->paused) {
509 pm_runtime_get_sync(dmadev->ddev.dev);
510 rc = hidma_ll_resume(dmadev->lldev);
511 if (!rc)
512 mchan->paused = false;
513 else
514 dev_err(dmadev->ddev.dev,
515 "failed to resume the channel");
516 pm_runtime_mark_last_busy(dmadev->ddev.dev);
517 pm_runtime_put_autosuspend(dmadev->ddev.dev);
518 }
519 return rc;
520}
521
522static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
523{
524 struct hidma_lldev *lldev = arg;
525
526 /*
527 * All interrupts are request driven.
528 * HW doesn't send an interrupt by itself.
529 */
530 return hidma_ll_inthandler(chirq, lldev);
531}
532
533static int hidma_probe(struct platform_device *pdev)
534{
535 struct hidma_dev *dmadev;
536 struct resource *trca_resource;
537 struct resource *evca_resource;
538 int chirq;
539 void __iomem *evca;
540 void __iomem *trca;
541 int rc;
542
543 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
544 pm_runtime_use_autosuspend(&pdev->dev);
545 pm_runtime_set_active(&pdev->dev);
546 pm_runtime_enable(&pdev->dev);
547
548 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
549 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
550 if (IS_ERR(trca)) {
551 rc = -ENOMEM;
552 goto bailout;
553 }
554
555 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
556 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
557 if (IS_ERR(evca)) {
558 rc = -ENOMEM;
559 goto bailout;
560 }
561
562 /*
563 * This driver only handles the channel IRQs.
564 * Common IRQ is handled by the management driver.
565 */
566 chirq = platform_get_irq(pdev, 0);
567 if (chirq < 0) {
568 rc = -ENODEV;
569 goto bailout;
570 }
571
572 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
573 if (!dmadev) {
574 rc = -ENOMEM;
575 goto bailout;
576 }
577
578 INIT_LIST_HEAD(&dmadev->ddev.channels);
579 spin_lock_init(&dmadev->lock);
580 dmadev->ddev.dev = &pdev->dev;
581 pm_runtime_get_sync(dmadev->ddev.dev);
582
583 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
584 if (WARN_ON(!pdev->dev.dma_mask)) {
585 rc = -ENXIO;
586 goto dmafree;
587 }
588
589 dmadev->dev_evca = evca;
590 dmadev->evca_resource = evca_resource;
591 dmadev->dev_trca = trca;
592 dmadev->trca_resource = trca_resource;
593 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
594 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
595 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
596 dmadev->ddev.device_tx_status = hidma_tx_status;
597 dmadev->ddev.device_issue_pending = hidma_issue_pending;
598 dmadev->ddev.device_pause = hidma_pause;
599 dmadev->ddev.device_resume = hidma_resume;
600 dmadev->ddev.device_terminate_all = hidma_terminate_all;
601 dmadev->ddev.copy_align = 8;
602
603 device_property_read_u32(&pdev->dev, "desc-count",
604 &dmadev->nr_descriptors);
605
606 if (!dmadev->nr_descriptors && nr_desc_prm)
607 dmadev->nr_descriptors = nr_desc_prm;
608
609 if (!dmadev->nr_descriptors)
610 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
611
612 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
613
614 /* Set DMA mask to 64 bits. */
615 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
616 if (rc) {
617 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
618 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
619 if (rc)
620 goto dmafree;
621 }
622
623 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
624 dmadev->nr_descriptors, dmadev->dev_trca,
625 dmadev->dev_evca, dmadev->chidx);
626 if (!dmadev->lldev) {
627 rc = -EPROBE_DEFER;
628 goto dmafree;
629 }
630
631 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler, 0,
632 "qcom-hidma", dmadev->lldev);
633 if (rc)
634 goto uninit;
635
636 INIT_LIST_HEAD(&dmadev->ddev.channels);
637 rc = hidma_chan_init(dmadev, 0);
638 if (rc)
639 goto uninit;
640
641 rc = dma_async_device_register(&dmadev->ddev);
642 if (rc)
643 goto uninit;
644
645 dmadev->irq = chirq;
646 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
647 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
648 platform_set_drvdata(pdev, dmadev);
649 pm_runtime_mark_last_busy(dmadev->ddev.dev);
650 pm_runtime_put_autosuspend(dmadev->ddev.dev);
651 return 0;
652
653uninit:
654 hidma_ll_uninit(dmadev->lldev);
655dmafree:
656 if (dmadev)
657 hidma_free(dmadev);
658bailout:
659 pm_runtime_put_sync(&pdev->dev);
660 pm_runtime_disable(&pdev->dev);
661 return rc;
662}
663
664static int hidma_remove(struct platform_device *pdev)
665{
666 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
667
668 pm_runtime_get_sync(dmadev->ddev.dev);
669 dma_async_device_unregister(&dmadev->ddev);
670 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
671 hidma_ll_uninit(dmadev->lldev);
672 hidma_free(dmadev);
673
674 dev_info(&pdev->dev, "HI-DMA engine removed\n");
675 pm_runtime_put_sync_suspend(&pdev->dev);
676 pm_runtime_disable(&pdev->dev);
677
678 return 0;
679}
680
681#if IS_ENABLED(CONFIG_ACPI)
682static const struct acpi_device_id hidma_acpi_ids[] = {
683 {"QCOM8061"},
684 {},
685};
686#endif
687
688static const struct of_device_id hidma_match[] = {
689 {.compatible = "qcom,hidma-1.0",},
690 {},
691};
692
693MODULE_DEVICE_TABLE(of, hidma_match);
694
695static struct platform_driver hidma_driver = {
696 .probe = hidma_probe,
697 .remove = hidma_remove,
698 .driver = {
699 .name = "hidma",
700 .of_match_table = hidma_match,
701 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
702 },
703};
704
705module_platform_driver(hidma_driver);
706MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma.h b/drivers/dma/qcom/hidma.h
new file mode 100644
index 000000000000..231e306f6d87
--- /dev/null
+++ b/drivers/dma/qcom/hidma.h
@@ -0,0 +1,160 @@
1/*
2 * Qualcomm Technologies HIDMA data structures
3 *
4 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#ifndef QCOM_HIDMA_H
17#define QCOM_HIDMA_H
18
19#include <linux/kfifo.h>
20#include <linux/interrupt.h>
21#include <linux/dmaengine.h>
22
23#define TRE_SIZE 32 /* each TRE is 32 bytes */
24#define TRE_CFG_IDX 0
25#define TRE_LEN_IDX 1
26#define TRE_SRC_LOW_IDX 2
27#define TRE_SRC_HI_IDX 3
28#define TRE_DEST_LOW_IDX 4
29#define TRE_DEST_HI_IDX 5
30
31struct hidma_tx_status {
32 u8 err_info; /* error record in this transfer */
33 u8 err_code; /* completion code */
34};
35
36struct hidma_tre {
37 atomic_t allocated; /* if this channel is allocated */
38 bool queued; /* flag whether this is pending */
39 u16 status; /* status */
40 u32 chidx; /* index of the tre */
41 u32 dma_sig; /* signature of the tre */
42 const char *dev_name; /* name of the device */
43 void (*callback)(void *data); /* requester callback */
44 void *data; /* Data associated with this channel*/
45 struct hidma_lldev *lldev; /* lldma device pointer */
46 u32 tre_local[TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */
47 u32 tre_index; /* the offset where this was written*/
48 u32 int_flags; /* interrupt flags */
49};
50
51struct hidma_lldev {
52 bool initialized; /* initialized flag */
53 u8 trch_state; /* trch_state of the device */
54 u8 evch_state; /* evch_state of the device */
55 u8 chidx; /* channel index in the core */
56 u32 nr_tres; /* max number of configs */
57 spinlock_t lock; /* reentrancy */
58 struct hidma_tre *trepool; /* trepool of user configs */
59 struct device *dev; /* device */
60 void __iomem *trca; /* Transfer Channel address */
61 void __iomem *evca; /* Event Channel address */
62 struct hidma_tre
63 **pending_tre_list; /* Pointers to pending TREs */
64 struct hidma_tx_status
65 *tx_status_list; /* Pointers to pending TREs status*/
66 s32 pending_tre_count; /* Number of TREs pending */
67
68 void *tre_ring; /* TRE ring */
69 dma_addr_t tre_ring_handle; /* TRE ring to be shared with HW */
70 u32 tre_ring_size; /* Byte size of the ring */
71 u32 tre_processed_off; /* last processed TRE */
72
73 void *evre_ring; /* EVRE ring */
74 dma_addr_t evre_ring_handle; /* EVRE ring to be shared with HW */
75 u32 evre_ring_size; /* Byte size of the ring */
76 u32 evre_processed_off; /* last processed EVRE */
77
78 u32 tre_write_offset; /* TRE write location */
79 struct tasklet_struct task; /* task delivering notifications */
80 DECLARE_KFIFO_PTR(handoff_fifo,
81 struct hidma_tre *); /* pending TREs FIFO */
82};
83
84struct hidma_desc {
85 struct dma_async_tx_descriptor desc;
86 /* link list node for this channel*/
87 struct list_head node;
88 u32 tre_ch;
89};
90
91struct hidma_chan {
92 bool paused;
93 bool allocated;
94 char dbg_name[16];
95 u32 dma_sig;
96
97 /*
98 * active descriptor on this channel
99 * It is used by the DMA complete notification to
100 * locate the descriptor that initiated the transfer.
101 */
102 struct dentry *debugfs;
103 struct dentry *stats;
104 struct hidma_dev *dmadev;
105 struct hidma_desc *running;
106
107 struct dma_chan chan;
108 struct list_head free;
109 struct list_head prepared;
110 struct list_head active;
111 struct list_head completed;
112
113 /* Lock for this structure */
114 spinlock_t lock;
115};
116
117struct hidma_dev {
118 int irq;
119 int chidx;
120 u32 nr_descriptors;
121
122 struct hidma_lldev *lldev;
123 void __iomem *dev_trca;
124 struct resource *trca_resource;
125 void __iomem *dev_evca;
126 struct resource *evca_resource;
127
128 /* used to protect the pending channel list*/
129 spinlock_t lock;
130 struct dma_device ddev;
131
132 struct dentry *debugfs;
133 struct dentry *stats;
134
135 /* Task delivering issue_pending */
136 struct tasklet_struct task;
137};
138
139int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id,
140 const char *dev_name,
141 void (*callback)(void *data), void *data, u32 *tre_ch);
142
143void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch);
144enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch);
145bool hidma_ll_isenabled(struct hidma_lldev *llhndl);
146void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch);
147void hidma_ll_start(struct hidma_lldev *llhndl);
148int hidma_ll_pause(struct hidma_lldev *llhndl);
149int hidma_ll_resume(struct hidma_lldev *llhndl);
150void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch,
151 dma_addr_t src, dma_addr_t dest, u32 len, u32 flags);
152int hidma_ll_setup(struct hidma_lldev *lldev);
153struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels,
154 void __iomem *trca, void __iomem *evca,
155 u8 chidx);
156int hidma_ll_uninit(struct hidma_lldev *llhndl);
157irqreturn_t hidma_ll_inthandler(int irq, void *arg);
158void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info,
159 u8 err_code);
160#endif
diff --git a/drivers/dma/qcom/hidma_mgmt.c b/drivers/dma/qcom/hidma_mgmt.c
new file mode 100644
index 000000000000..ef491b893f40
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.c
@@ -0,0 +1,302 @@
1/*
2 * Qualcomm Technologies HIDMA DMA engine Management interface
3 *
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/dmaengine.h>
17#include <linux/acpi.h>
18#include <linux/of.h>
19#include <linux/property.h>
20#include <linux/interrupt.h>
21#include <linux/platform_device.h>
22#include <linux/module.h>
23#include <linux/uaccess.h>
24#include <linux/slab.h>
25#include <linux/pm_runtime.h>
26#include <linux/bitops.h>
27
28#include "hidma_mgmt.h"
29
30#define HIDMA_QOS_N_OFFSET 0x300
31#define HIDMA_CFG_OFFSET 0x400
32#define HIDMA_MAX_BUS_REQ_LEN_OFFSET 0x41C
33#define HIDMA_MAX_XACTIONS_OFFSET 0x420
34#define HIDMA_HW_VERSION_OFFSET 0x424
35#define HIDMA_CHRESET_TIMEOUT_OFFSET 0x418
36
37#define HIDMA_MAX_WR_XACTIONS_MASK GENMASK(4, 0)
38#define HIDMA_MAX_RD_XACTIONS_MASK GENMASK(4, 0)
39#define HIDMA_WEIGHT_MASK GENMASK(6, 0)
40#define HIDMA_MAX_BUS_REQ_LEN_MASK GENMASK(15, 0)
41#define HIDMA_CHRESET_TIMEOUT_MASK GENMASK(19, 0)
42
43#define HIDMA_MAX_WR_XACTIONS_BIT_POS 16
44#define HIDMA_MAX_BUS_WR_REQ_BIT_POS 16
45#define HIDMA_WRR_BIT_POS 8
46#define HIDMA_PRIORITY_BIT_POS 15
47
48#define HIDMA_AUTOSUSPEND_TIMEOUT 2000
49#define HIDMA_MAX_CHANNEL_WEIGHT 15
50
51int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev)
52{
53 unsigned int i;
54 u32 val;
55
56 if (!is_power_of_2(mgmtdev->max_write_request) ||
57 (mgmtdev->max_write_request < 128) ||
58 (mgmtdev->max_write_request > 1024)) {
59 dev_err(&mgmtdev->pdev->dev, "invalid write request %d\n",
60 mgmtdev->max_write_request);
61 return -EINVAL;
62 }
63
64 if (!is_power_of_2(mgmtdev->max_read_request) ||
65 (mgmtdev->max_read_request < 128) ||
66 (mgmtdev->max_read_request > 1024)) {
67 dev_err(&mgmtdev->pdev->dev, "invalid read request %d\n",
68 mgmtdev->max_read_request);
69 return -EINVAL;
70 }
71
72 if (mgmtdev->max_wr_xactions > HIDMA_MAX_WR_XACTIONS_MASK) {
73 dev_err(&mgmtdev->pdev->dev,
74 "max_wr_xactions cannot be bigger than %ld\n",
75 HIDMA_MAX_WR_XACTIONS_MASK);
76 return -EINVAL;
77 }
78
79 if (mgmtdev->max_rd_xactions > HIDMA_MAX_RD_XACTIONS_MASK) {
80 dev_err(&mgmtdev->pdev->dev,
81 "max_rd_xactions cannot be bigger than %ld\n",
82 HIDMA_MAX_RD_XACTIONS_MASK);
83 return -EINVAL;
84 }
85
86 for (i = 0; i < mgmtdev->dma_channels; i++) {
87 if (mgmtdev->priority[i] > 1) {
88 dev_err(&mgmtdev->pdev->dev,
89 "priority can be 0 or 1\n");
90 return -EINVAL;
91 }
92
93 if (mgmtdev->weight[i] > HIDMA_MAX_CHANNEL_WEIGHT) {
94 dev_err(&mgmtdev->pdev->dev,
95 "max value of weight can be %d.\n",
96 HIDMA_MAX_CHANNEL_WEIGHT);
97 return -EINVAL;
98 }
99
100 /* weight needs to be at least one */
101 if (mgmtdev->weight[i] == 0)
102 mgmtdev->weight[i] = 1;
103 }
104
105 pm_runtime_get_sync(&mgmtdev->pdev->dev);
106 val = readl(mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
107 val &= ~(HIDMA_MAX_BUS_REQ_LEN_MASK << HIDMA_MAX_BUS_WR_REQ_BIT_POS);
108 val |= mgmtdev->max_write_request << HIDMA_MAX_BUS_WR_REQ_BIT_POS;
109 val &= ~HIDMA_MAX_BUS_REQ_LEN_MASK;
110 val |= mgmtdev->max_read_request;
111 writel(val, mgmtdev->virtaddr + HIDMA_MAX_BUS_REQ_LEN_OFFSET);
112
113 val = readl(mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
114 val &= ~(HIDMA_MAX_WR_XACTIONS_MASK << HIDMA_MAX_WR_XACTIONS_BIT_POS);
115 val |= mgmtdev->max_wr_xactions << HIDMA_MAX_WR_XACTIONS_BIT_POS;
116 val &= ~HIDMA_MAX_RD_XACTIONS_MASK;
117 val |= mgmtdev->max_rd_xactions;
118 writel(val, mgmtdev->virtaddr + HIDMA_MAX_XACTIONS_OFFSET);
119
120 mgmtdev->hw_version =
121 readl(mgmtdev->virtaddr + HIDMA_HW_VERSION_OFFSET);
122 mgmtdev->hw_version_major = (mgmtdev->hw_version >> 28) & 0xF;
123 mgmtdev->hw_version_minor = (mgmtdev->hw_version >> 16) & 0xF;
124
125 for (i = 0; i < mgmtdev->dma_channels; i++) {
126 u32 weight = mgmtdev->weight[i];
127 u32 priority = mgmtdev->priority[i];
128
129 val = readl(mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
130 val &= ~(1 << HIDMA_PRIORITY_BIT_POS);
131 val |= (priority & 0x1) << HIDMA_PRIORITY_BIT_POS;
132 val &= ~(HIDMA_WEIGHT_MASK << HIDMA_WRR_BIT_POS);
133 val |= (weight & HIDMA_WEIGHT_MASK) << HIDMA_WRR_BIT_POS;
134 writel(val, mgmtdev->virtaddr + HIDMA_QOS_N_OFFSET + (4 * i));
135 }
136
137 val = readl(mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
138 val &= ~HIDMA_CHRESET_TIMEOUT_MASK;
139 val |= mgmtdev->chreset_timeout_cycles & HIDMA_CHRESET_TIMEOUT_MASK;
140 writel(val, mgmtdev->virtaddr + HIDMA_CHRESET_TIMEOUT_OFFSET);
141
142 pm_runtime_mark_last_busy(&mgmtdev->pdev->dev);
143 pm_runtime_put_autosuspend(&mgmtdev->pdev->dev);
144 return 0;
145}
146EXPORT_SYMBOL_GPL(hidma_mgmt_setup);
147
148static int hidma_mgmt_probe(struct platform_device *pdev)
149{
150 struct hidma_mgmt_dev *mgmtdev;
151 struct resource *res;
152 void __iomem *virtaddr;
153 int irq;
154 int rc;
155 u32 val;
156
157 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
158 pm_runtime_use_autosuspend(&pdev->dev);
159 pm_runtime_set_active(&pdev->dev);
160 pm_runtime_enable(&pdev->dev);
161 pm_runtime_get_sync(&pdev->dev);
162
163 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
164 virtaddr = devm_ioremap_resource(&pdev->dev, res);
165 if (IS_ERR(virtaddr)) {
166 rc = -ENOMEM;
167 goto out;
168 }
169
170 irq = platform_get_irq(pdev, 0);
171 if (irq < 0) {
172 dev_err(&pdev->dev, "irq resources not found\n");
173 rc = irq;
174 goto out;
175 }
176
177 mgmtdev = devm_kzalloc(&pdev->dev, sizeof(*mgmtdev), GFP_KERNEL);
178 if (!mgmtdev) {
179 rc = -ENOMEM;
180 goto out;
181 }
182
183 mgmtdev->pdev = pdev;
184 mgmtdev->addrsize = resource_size(res);
185 mgmtdev->virtaddr = virtaddr;
186
187 rc = device_property_read_u32(&pdev->dev, "dma-channels",
188 &mgmtdev->dma_channels);
189 if (rc) {
190 dev_err(&pdev->dev, "number of channels missing\n");
191 goto out;
192 }
193
194 rc = device_property_read_u32(&pdev->dev,
195 "channel-reset-timeout-cycles",
196 &mgmtdev->chreset_timeout_cycles);
197 if (rc) {
198 dev_err(&pdev->dev, "channel reset timeout missing\n");
199 goto out;
200 }
201
202 rc = device_property_read_u32(&pdev->dev, "max-write-burst-bytes",
203 &mgmtdev->max_write_request);
204 if (rc) {
205 dev_err(&pdev->dev, "max-write-burst-bytes missing\n");
206 goto out;
207 }
208
209 rc = device_property_read_u32(&pdev->dev, "max-read-burst-bytes",
210 &mgmtdev->max_read_request);
211 if (rc) {
212 dev_err(&pdev->dev, "max-read-burst-bytes missing\n");
213 goto out;
214 }
215
216 rc = device_property_read_u32(&pdev->dev, "max-write-transactions",
217 &mgmtdev->max_wr_xactions);
218 if (rc) {
219 dev_err(&pdev->dev, "max-write-transactions missing\n");
220 goto out;
221 }
222
223 rc = device_property_read_u32(&pdev->dev, "max-read-transactions",
224 &mgmtdev->max_rd_xactions);
225 if (rc) {
226 dev_err(&pdev->dev, "max-read-transactions missing\n");
227 goto out;
228 }
229
230 mgmtdev->priority = devm_kcalloc(&pdev->dev,
231 mgmtdev->dma_channels,
232 sizeof(*mgmtdev->priority),
233 GFP_KERNEL);
234 if (!mgmtdev->priority) {
235 rc = -ENOMEM;
236 goto out;
237 }
238
239 mgmtdev->weight = devm_kcalloc(&pdev->dev,
240 mgmtdev->dma_channels,
241 sizeof(*mgmtdev->weight), GFP_KERNEL);
242 if (!mgmtdev->weight) {
243 rc = -ENOMEM;
244 goto out;
245 }
246
247 rc = hidma_mgmt_setup(mgmtdev);
248 if (rc) {
249 dev_err(&pdev->dev, "setup failed\n");
250 goto out;
251 }
252
253 /* start the HW */
254 val = readl(mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
255 val |= 1;
256 writel(val, mgmtdev->virtaddr + HIDMA_CFG_OFFSET);
257
258 rc = hidma_mgmt_init_sys(mgmtdev);
259 if (rc) {
260 dev_err(&pdev->dev, "sysfs setup failed\n");
261 goto out;
262 }
263
264 dev_info(&pdev->dev,
265 "HW rev: %d.%d @ %pa with %d physical channels\n",
266 mgmtdev->hw_version_major, mgmtdev->hw_version_minor,
267 &res->start, mgmtdev->dma_channels);
268
269 platform_set_drvdata(pdev, mgmtdev);
270 pm_runtime_mark_last_busy(&pdev->dev);
271 pm_runtime_put_autosuspend(&pdev->dev);
272 return 0;
273out:
274 pm_runtime_put_sync_suspend(&pdev->dev);
275 pm_runtime_disable(&pdev->dev);
276 return rc;
277}
278
279#if IS_ENABLED(CONFIG_ACPI)
280static const struct acpi_device_id hidma_mgmt_acpi_ids[] = {
281 {"QCOM8060"},
282 {},
283};
284#endif
285
286static const struct of_device_id hidma_mgmt_match[] = {
287 {.compatible = "qcom,hidma-mgmt-1.0",},
288 {},
289};
290MODULE_DEVICE_TABLE(of, hidma_mgmt_match);
291
292static struct platform_driver hidma_mgmt_driver = {
293 .probe = hidma_mgmt_probe,
294 .driver = {
295 .name = "hidma-mgmt",
296 .of_match_table = hidma_mgmt_match,
297 .acpi_match_table = ACPI_PTR(hidma_mgmt_acpi_ids),
298 },
299};
300
301module_platform_driver(hidma_mgmt_driver);
302MODULE_LICENSE("GPL v2");
diff --git a/drivers/dma/qcom/hidma_mgmt.h b/drivers/dma/qcom/hidma_mgmt.h
new file mode 100644
index 000000000000..f7daf33769f4
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt.h
@@ -0,0 +1,39 @@
1/*
2 * Qualcomm Technologies HIDMA Management common header
3 *
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16struct hidma_mgmt_dev {
17 u8 hw_version_major;
18 u8 hw_version_minor;
19
20 u32 max_wr_xactions;
21 u32 max_rd_xactions;
22 u32 max_write_request;
23 u32 max_read_request;
24 u32 dma_channels;
25 u32 chreset_timeout_cycles;
26 u32 hw_version;
27 u32 *priority;
28 u32 *weight;
29
30 /* Hardware device constants */
31 void __iomem *virtaddr;
32 resource_size_t addrsize;
33
34 struct kobject **chroots;
35 struct platform_device *pdev;
36};
37
38int hidma_mgmt_init_sys(struct hidma_mgmt_dev *dev);
39int hidma_mgmt_setup(struct hidma_mgmt_dev *mgmtdev);
diff --git a/drivers/dma/qcom/hidma_mgmt_sys.c b/drivers/dma/qcom/hidma_mgmt_sys.c
new file mode 100644
index 000000000000..d61f1068a34b
--- /dev/null
+++ b/drivers/dma/qcom/hidma_mgmt_sys.c
@@ -0,0 +1,295 @@
1/*
2 * Qualcomm Technologies HIDMA Management SYS interface
3 *
4 * Copyright (c) 2015, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16#include <linux/sysfs.h>
17#include <linux/platform_device.h>
18
19#include "hidma_mgmt.h"
20
21struct hidma_chan_attr {
22 struct hidma_mgmt_dev *mdev;
23 int index;
24 struct kobj_attribute attr;
25};
26
27struct hidma_mgmt_fileinfo {
28 char *name;
29 int mode;
30 int (*get)(struct hidma_mgmt_dev *mdev);
31 int (*set)(struct hidma_mgmt_dev *mdev, u64 val);
32};
33
34#define IMPLEMENT_GETSET(name) \
35static int get_##name(struct hidma_mgmt_dev *mdev) \
36{ \
37 return mdev->name; \
38} \
39static int set_##name(struct hidma_mgmt_dev *mdev, u64 val) \
40{ \
41 u64 tmp; \
42 int rc; \
43 \
44 tmp = mdev->name; \
45 mdev->name = val; \
46 rc = hidma_mgmt_setup(mdev); \
47 if (rc) \
48 mdev->name = tmp; \
49 return rc; \
50}
51
52#define DECLARE_ATTRIBUTE(name, mode) \
53 {#name, mode, get_##name, set_##name}
54
55IMPLEMENT_GETSET(hw_version_major)
56IMPLEMENT_GETSET(hw_version_minor)
57IMPLEMENT_GETSET(max_wr_xactions)
58IMPLEMENT_GETSET(max_rd_xactions)
59IMPLEMENT_GETSET(max_write_request)
60IMPLEMENT_GETSET(max_read_request)
61IMPLEMENT_GETSET(dma_channels)
62IMPLEMENT_GETSET(chreset_timeout_cycles)
63
64static int set_priority(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
65{
66 u64 tmp;
67 int rc;
68
69 if (i >= mdev->dma_channels)
70 return -EINVAL;
71
72 tmp = mdev->priority[i];
73 mdev->priority[i] = val;
74 rc = hidma_mgmt_setup(mdev);
75 if (rc)
76 mdev->priority[i] = tmp;
77 return rc;
78}
79
80static int set_weight(struct hidma_mgmt_dev *mdev, unsigned int i, u64 val)
81{
82 u64 tmp;
83 int rc;
84
85 if (i >= mdev->dma_channels)
86 return -EINVAL;
87
88 tmp = mdev->weight[i];
89 mdev->weight[i] = val;
90 rc = hidma_mgmt_setup(mdev);
91 if (rc)
92 mdev->weight[i] = tmp;
93 return rc;
94}
95
96static struct hidma_mgmt_fileinfo hidma_mgmt_files[] = {
97 DECLARE_ATTRIBUTE(hw_version_major, S_IRUGO),
98 DECLARE_ATTRIBUTE(hw_version_minor, S_IRUGO),
99 DECLARE_ATTRIBUTE(dma_channels, S_IRUGO),
100 DECLARE_ATTRIBUTE(chreset_timeout_cycles, S_IRUGO),
101 DECLARE_ATTRIBUTE(max_wr_xactions, S_IRUGO),
102 DECLARE_ATTRIBUTE(max_rd_xactions, S_IRUGO),
103 DECLARE_ATTRIBUTE(max_write_request, S_IRUGO),
104 DECLARE_ATTRIBUTE(max_read_request, S_IRUGO),
105};
106
107static ssize_t show_values(struct device *dev, struct device_attribute *attr,
108 char *buf)
109{
110 struct platform_device *pdev = to_platform_device(dev);
111 struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
112 unsigned int i;
113
114 buf[0] = 0;
115
116 for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
117 if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
118 sprintf(buf, "%d\n", hidma_mgmt_files[i].get(mdev));
119 break;
120 }
121 }
122 return strlen(buf);
123}
124
125static ssize_t set_values(struct device *dev, struct device_attribute *attr,
126 const char *buf, size_t count)
127{
128 struct platform_device *pdev = to_platform_device(dev);
129 struct hidma_mgmt_dev *mdev = platform_get_drvdata(pdev);
130 unsigned long tmp;
131 unsigned int i;
132 int rc;
133
134 rc = kstrtoul(buf, 0, &tmp);
135 if (rc)
136 return rc;
137
138 for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
139 if (strcmp(attr->attr.name, hidma_mgmt_files[i].name) == 0) {
140 rc = hidma_mgmt_files[i].set(mdev, tmp);
141 if (rc)
142 return rc;
143
144 break;
145 }
146 }
147 return count;
148}
149
150static ssize_t show_values_channel(struct kobject *kobj,
151 struct kobj_attribute *attr, char *buf)
152{
153 struct hidma_chan_attr *chattr;
154 struct hidma_mgmt_dev *mdev;
155
156 buf[0] = 0;
157 chattr = container_of(attr, struct hidma_chan_attr, attr);
158 mdev = chattr->mdev;
159 if (strcmp(attr->attr.name, "priority") == 0)
160 sprintf(buf, "%d\n", mdev->priority[chattr->index]);
161 else if (strcmp(attr->attr.name, "weight") == 0)
162 sprintf(buf, "%d\n", mdev->weight[chattr->index]);
163
164 return strlen(buf);
165}
166
167static ssize_t set_values_channel(struct kobject *kobj,
168 struct kobj_attribute *attr, const char *buf,
169 size_t count)
170{
171 struct hidma_chan_attr *chattr;
172 struct hidma_mgmt_dev *mdev;
173 unsigned long tmp;
174 int rc;
175
176 chattr = container_of(attr, struct hidma_chan_attr, attr);
177 mdev = chattr->mdev;
178
179 rc = kstrtoul(buf, 0, &tmp);
180 if (rc)
181 return rc;
182
183 if (strcmp(attr->attr.name, "priority") == 0) {
184 rc = set_priority(mdev, chattr->index, tmp);
185 if (rc)
186 return rc;
187 } else if (strcmp(attr->attr.name, "weight") == 0) {
188 rc = set_weight(mdev, chattr->index, tmp);
189 if (rc)
190 return rc;
191 }
192 return count;
193}
194
195static int create_sysfs_entry(struct hidma_mgmt_dev *dev, char *name, int mode)
196{
197 struct device_attribute *attrs;
198 char *name_copy;
199
200 attrs = devm_kmalloc(&dev->pdev->dev,
201 sizeof(struct device_attribute), GFP_KERNEL);
202 if (!attrs)
203 return -ENOMEM;
204
205 name_copy = devm_kstrdup(&dev->pdev->dev, name, GFP_KERNEL);
206 if (!name_copy)
207 return -ENOMEM;
208
209 attrs->attr.name = name_copy;
210 attrs->attr.mode = mode;
211 attrs->show = show_values;
212 attrs->store = set_values;
213 sysfs_attr_init(&attrs->attr);
214
215 return device_create_file(&dev->pdev->dev, attrs);
216}
217
218static int create_sysfs_entry_channel(struct hidma_mgmt_dev *mdev, char *name,
219 int mode, int index,
220 struct kobject *parent)
221{
222 struct hidma_chan_attr *chattr;
223 char *name_copy;
224
225 chattr = devm_kmalloc(&mdev->pdev->dev, sizeof(*chattr), GFP_KERNEL);
226 if (!chattr)
227 return -ENOMEM;
228
229 name_copy = devm_kstrdup(&mdev->pdev->dev, name, GFP_KERNEL);
230 if (!name_copy)
231 return -ENOMEM;
232
233 chattr->mdev = mdev;
234 chattr->index = index;
235 chattr->attr.attr.name = name_copy;
236 chattr->attr.attr.mode = mode;
237 chattr->attr.show = show_values_channel;
238 chattr->attr.store = set_values_channel;
239 sysfs_attr_init(&chattr->attr.attr);
240
241 return sysfs_create_file(parent, &chattr->attr.attr);
242}
243
244int hidma_mgmt_init_sys(struct hidma_mgmt_dev *mdev)
245{
246 unsigned int i;
247 int rc;
248 int required;
249 struct kobject *chanops;
250
251 required = sizeof(*mdev->chroots) * mdev->dma_channels;
252 mdev->chroots = devm_kmalloc(&mdev->pdev->dev, required, GFP_KERNEL);
253 if (!mdev->chroots)
254 return -ENOMEM;
255
256 chanops = kobject_create_and_add("chanops", &mdev->pdev->dev.kobj);
257 if (!chanops)
258 return -ENOMEM;
259
260 /* create each channel directory here */
261 for (i = 0; i < mdev->dma_channels; i++) {
262 char name[20];
263
264 snprintf(name, sizeof(name), "chan%d", i);
265 mdev->chroots[i] = kobject_create_and_add(name, chanops);
266 if (!mdev->chroots[i])
267 return -ENOMEM;
268 }
269
270 /* populate common parameters */
271 for (i = 0; i < ARRAY_SIZE(hidma_mgmt_files); i++) {
272 rc = create_sysfs_entry(mdev, hidma_mgmt_files[i].name,
273 hidma_mgmt_files[i].mode);
274 if (rc)
275 return rc;
276 }
277
278 /* populate parameters that are per channel */
279 for (i = 0; i < mdev->dma_channels; i++) {
280 rc = create_sysfs_entry_channel(mdev, "priority",
281 (S_IRUGO | S_IWUGO), i,
282 mdev->chroots[i]);
283 if (rc)
284 return rc;
285
286 rc = create_sysfs_entry_channel(mdev, "weight",
287 (S_IRUGO | S_IWUGO), i,
288 mdev->chroots[i]);
289 if (rc)
290 return rc;
291 }
292
293 return 0;
294}
295EXPORT_SYMBOL_GPL(hidma_mgmt_init_sys);