diff options
author | Vinod Koul <vinod.koul@intel.com> | 2015-06-24 23:51:58 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2015-06-24 23:51:58 -0400 |
commit | f2704052cb42aabfa19b3f897cbceb70e2e63c45 (patch) | |
tree | 441522897d4b6dcc2d5197d7711280214326f190 | |
parent | 4fb9c15b4f2371b8640c411ceff2c100857aee2c (diff) | |
parent | c91134d9194478144ba579ca6efeddf628055650 (diff) |
Merge branch 'topic/pxa' into for-linus
-rw-r--r-- | Documentation/dmaengine/pxa_dma.txt | 153 | ||||
-rw-r--r-- | MAINTAINERS | 1 | ||||
-rw-r--r-- | drivers/dma/Kconfig | 11 | ||||
-rw-r--r-- | drivers/dma/Makefile | 1 | ||||
-rw-r--r-- | drivers/dma/pxa_dma.c | 1467 | ||||
-rw-r--r-- | include/linux/dma/pxa-dma.h | 27 |
6 files changed, 1660 insertions, 0 deletions
diff --git a/Documentation/dmaengine/pxa_dma.txt b/Documentation/dmaengine/pxa_dma.txt new file mode 100644 index 000000000000..413ef9cfaa4d --- /dev/null +++ b/Documentation/dmaengine/pxa_dma.txt | |||
@@ -0,0 +1,153 @@ | |||
1 | PXA/MMP - DMA Slave controller | ||
2 | ============================== | ||
3 | |||
4 | Constraints | ||
5 | ----------- | ||
6 | a) Transfers hot queuing | ||
7 | A driver submitting a transfer and issuing it should be granted the transfer | ||
8 | is queued even on a running DMA channel. | ||
9 | This implies that the queuing doesn't wait for the previous transfer end, | ||
10 | and that the descriptor chaining is not only done in the irq/tasklet code | ||
11 | triggered by the end of the transfer. | ||
12 | A transfer which is submitted and issued on a phy doesn't wait for a phy to | ||
13 | stop and restart, but is submitted on a "running channel". The other | ||
14 | drivers, especially mmp_pdma waited for the phy to stop before relaunching | ||
15 | a new transfer. | ||
16 | |||
17 | b) All transfers having asked for confirmation should be signaled | ||
18 | Any issued transfer with DMA_PREP_INTERRUPT should trigger a callback call. | ||
19 | This implies that even if an irq/tasklet is triggered by end of tx1, but | ||
20 | at the time of irq/dma tx2 is already finished, tx1->complete() and | ||
21 | tx2->complete() should be called. | ||
22 | |||
23 | c) Channel running state | ||
24 | A driver should be able to query if a channel is running or not. For the | ||
25 | multimedia case, such as video capture, if a transfer is submitted and then | ||
26 | a check of the DMA channel reports a "stopped channel", the transfer should | ||
27 | not be issued until the next "start of frame interrupt", hence the need to | ||
28 | know if a channel is in running or stopped state. | ||
29 | |||
30 | d) Bandwidth guarantee | ||
31 | The PXA architecture has 4 levels of DMAs priorities : high, normal, low. | ||
32 | The high prorities get twice as much bandwidth as the normal, which get twice | ||
33 | as much as the low priorities. | ||
34 | A driver should be able to request a priority, especially the real-time | ||
35 | ones such as pxa_camera with (big) throughputs. | ||
36 | |||
37 | Design | ||
38 | ------ | ||
39 | a) Virtual channels | ||
40 | Same concept as in sa11x0 driver, ie. a driver was assigned a "virtual | ||
41 | channel" linked to the requestor line, and the physical DMA channel is | ||
42 | assigned on the fly when the transfer is issued. | ||
43 | |||
44 | b) Transfer anatomy for a scatter-gather transfer | ||
45 | +------------+-----+---------------+----------------+-----------------+ | ||
46 | | desc-sg[0] | ... | desc-sg[last] | status updater | finisher/linker | | ||
47 | +------------+-----+---------------+----------------+-----------------+ | ||
48 | |||
49 | This structure is pointed by dma->sg_cpu. | ||
50 | The descriptors are used as follows : | ||
51 | - desc-sg[i]: i-th descriptor, transferring the i-th sg | ||
52 | element to the video buffer scatter gather | ||
53 | - status updater | ||
54 | Transfers a single u32 to a well known dma coherent memory to leave | ||
55 | a trace that this transfer is done. The "well known" is unique per | ||
56 | physical channel, meaning that a read of this value will tell which | ||
57 | is the last finished transfer at that point in time. | ||
58 | - finisher: has ddadr=DADDR_STOP, dcmd=ENDIRQEN | ||
59 | - linker: has ddadr= desc-sg[0] of next transfer, dcmd=0 | ||
60 | |||
61 | c) Transfers hot-chaining | ||
62 | Suppose the running chain is : | ||
63 | Buffer 1 Buffer 2 | ||
64 | +---------+----+---+ +----+----+----+---+ | ||
65 | | d0 | .. | dN | l | | d0 | .. | dN | f | | ||
66 | +---------+----+-|-+ ^----+----+----+---+ | ||
67 | | | | ||
68 | +----+ | ||
69 | |||
70 | After a call to dmaengine_submit(b3), the chain will look like : | ||
71 | Buffer 1 Buffer 2 Buffer 3 | ||
72 | +---------+----+---+ +----+----+----+---+ +----+----+----+---+ | ||
73 | | d0 | .. | dN | l | | d0 | .. | dN | l | | d0 | .. | dN | f | | ||
74 | +---------+----+-|-+ ^----+----+----+-|-+ ^----+----+----+---+ | ||
75 | | | | | | ||
76 | +----+ +----+ | ||
77 | new_link | ||
78 | |||
79 | If while new_link was created the DMA channel stopped, it is _not_ | ||
80 | restarted. Hot-chaining doesn't break the assumption that | ||
81 | dma_async_issue_pending() is to be used to ensure the transfer is actually started. | ||
82 | |||
83 | One exception to this rule : | ||
84 | - if Buffer1 and Buffer2 had all their addresses 8 bytes aligned | ||
85 | - and if Buffer3 has at least one address not 4 bytes aligned | ||
86 | - then hot-chaining cannot happen, as the channel must be stopped, the | ||
87 | "align bit" must be set, and the channel restarted As a consequence, | ||
88 | such a transfer tx_submit() will be queued on the submitted queue, and | ||
89 | this specific case if the DMA is already running in aligned mode. | ||
90 | |||
91 | d) Transfers completion updater | ||
92 | Each time a transfer is completed on a channel, an interrupt might be | ||
93 | generated or not, up to the client's request. But in each case, the last | ||
94 | descriptor of a transfer, the "status updater", will write the latest | ||
95 | transfer being completed into the physical channel's completion mark. | ||
96 | |||
97 | This will speed up residue calculation, for large transfers such as video | ||
98 | buffers which hold around 6k descriptors or more. This also allows without | ||
99 | any lock to find out what is the latest completed transfer in a running | ||
100 | DMA chain. | ||
101 | |||
102 | e) Transfers completion, irq and tasklet | ||
103 | When a transfer flagged as "DMA_PREP_INTERRUPT" is finished, the dma irq | ||
104 | is raised. Upon this interrupt, a tasklet is scheduled for the physical | ||
105 | channel. | ||
106 | The tasklet is responsible for : | ||
107 | - reading the physical channel last updater mark | ||
108 | - calling all the transfer callbacks of finished transfers, based on | ||
109 | that mark, and each transfer flags. | ||
110 | If a transfer is completed while this handling is done, a dma irq will | ||
111 | be raised, and the tasklet will be scheduled once again, having a new | ||
112 | updater mark. | ||
113 | |||
114 | f) Residue | ||
115 | Residue granularity will be descriptor based. The issued but not completed | ||
116 | transfers will be scanned for all of their descriptors against the | ||
117 | currently running descriptor. | ||
118 | |||
119 | g) Most complicated case of driver's tx queues | ||
120 | The most tricky situation is when : | ||
121 | - there are not "acked" transfers (tx0) | ||
122 | - a driver submitted an aligned tx1, not chained | ||
123 | - a driver submitted an aligned tx2 => tx2 is cold chained to tx1 | ||
124 | - a driver issued tx1+tx2 => channel is running in aligned mode | ||
125 | - a driver submitted an aligned tx3 => tx3 is hot-chained | ||
126 | - a driver submitted an unaligned tx4 => tx4 is put in submitted queue, | ||
127 | not chained | ||
128 | - a driver issued tx4 => tx4 is put in issued queue, not chained | ||
129 | - a driver submitted an aligned tx5 => tx5 is put in submitted queue, not | ||
130 | chained | ||
131 | - a driver submitted an aligned tx6 => tx6 is put in submitted queue, | ||
132 | cold chained to tx5 | ||
133 | |||
134 | This translates into (after tx4 is issued) : | ||
135 | - issued queue | ||
136 | +-----+ +-----+ +-----+ +-----+ | ||
137 | | tx1 | | tx2 | | tx3 | | tx4 | | ||
138 | +---|-+ ^---|-+ ^-----+ +-----+ | ||
139 | | | | | | ||
140 | +---+ +---+ | ||
141 | - submitted queue | ||
142 | +-----+ +-----+ | ||
143 | | tx5 | | tx6 | | ||
144 | +---|-+ ^-----+ | ||
145 | | | | ||
146 | +---+ | ||
147 | - completed queue : empty | ||
148 | - allocated queue : tx0 | ||
149 | |||
150 | It should be noted that after tx3 is completed, the channel is stopped, and | ||
151 | restarted in "unaligned mode" to handle tx4. | ||
152 | |||
153 | Author: Robert Jarzmik <robert.jarzmik@free.fr> | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 781e099495d3..238ea990ed29 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -7937,6 +7937,7 @@ T: git git://github.com/hzhuang1/linux.git | |||
7937 | T: git git://github.com/rjarzmik/linux.git | 7937 | T: git git://github.com/rjarzmik/linux.git |
7938 | S: Maintained | 7938 | S: Maintained |
7939 | F: arch/arm/mach-pxa/ | 7939 | F: arch/arm/mach-pxa/ |
7940 | F: drivers/dma/pxa* | ||
7940 | F: drivers/pcmcia/pxa2xx* | 7941 | F: drivers/pcmcia/pxa2xx* |
7941 | F: drivers/spi/spi-pxa2xx* | 7942 | F: drivers/spi/spi-pxa2xx* |
7942 | F: drivers/usb/gadget/udc/pxa2* | 7943 | F: drivers/usb/gadget/udc/pxa2* |
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig index 0205ade0ba32..88d474b78076 100644 --- a/drivers/dma/Kconfig +++ b/drivers/dma/Kconfig | |||
@@ -162,6 +162,17 @@ config MX3_IPU_IRQS | |||
162 | To avoid bloating the irq_desc[] array we allocate a sufficient | 162 | To avoid bloating the irq_desc[] array we allocate a sufficient |
163 | number of IRQ slots and map them dynamically to specific sources. | 163 | number of IRQ slots and map them dynamically to specific sources. |
164 | 164 | ||
165 | config PXA_DMA | ||
166 | bool "PXA DMA support" | ||
167 | depends on (ARCH_MMP || ARCH_PXA) | ||
168 | select DMA_ENGINE | ||
169 | select DMA_VIRTUAL_CHANNELS | ||
170 | help | ||
171 | Support the DMA engine for PXA. It is also compatible with MMP PDMA | ||
172 | platform. The internal DMA IP of all PXA variants is supported, with | ||
173 | 16 to 32 channels for peripheral to memory or memory to memory | ||
174 | transfers. | ||
175 | |||
165 | config TXX9_DMAC | 176 | config TXX9_DMAC |
166 | tristate "Toshiba TXx9 SoC DMA support" | 177 | tristate "Toshiba TXx9 SoC DMA support" |
167 | depends on MACH_TX49XX || MACH_TX39XX | 178 | depends on MACH_TX49XX || MACH_TX39XX |
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile index 535919559f12..6a4d6f2827da 100644 --- a/drivers/dma/Makefile +++ b/drivers/dma/Makefile | |||
@@ -25,6 +25,7 @@ obj-$(CONFIG_AMCC_PPC440SPE_ADMA) += ppc4xx/ | |||
25 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o | 25 | obj-$(CONFIG_IMX_SDMA) += imx-sdma.o |
26 | obj-$(CONFIG_IMX_DMA) += imx-dma.o | 26 | obj-$(CONFIG_IMX_DMA) += imx-dma.o |
27 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o | 27 | obj-$(CONFIG_MXS_DMA) += mxs-dma.o |
28 | obj-$(CONFIG_PXA_DMA) += pxa_dma.o | ||
28 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o | 29 | obj-$(CONFIG_TIMB_DMA) += timb_dma.o |
29 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o | 30 | obj-$(CONFIG_SIRF_DMA) += sirf-dma.o |
30 | obj-$(CONFIG_TI_EDMA) += edma.o | 31 | obj-$(CONFIG_TI_EDMA) += edma.o |
diff --git a/drivers/dma/pxa_dma.c b/drivers/dma/pxa_dma.c new file mode 100644 index 000000000000..ddcbbf5cd9e9 --- /dev/null +++ b/drivers/dma/pxa_dma.c | |||
@@ -0,0 +1,1467 @@ | |||
1 | /* | ||
2 | * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/err.h> | ||
10 | #include <linux/module.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/dma-mapping.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/dmaengine.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/platform_data/mmp_dma.h> | ||
20 | #include <linux/dmapool.h> | ||
21 | #include <linux/of_device.h> | ||
22 | #include <linux/of_dma.h> | ||
23 | #include <linux/of.h> | ||
24 | #include <linux/dma/pxa-dma.h> | ||
25 | |||
26 | #include "dmaengine.h" | ||
27 | #include "virt-dma.h" | ||
28 | |||
29 | #define DCSR(n) (0x0000 + ((n) << 2)) | ||
30 | #define DALGN(n) 0x00a0 | ||
31 | #define DINT 0x00f0 | ||
32 | #define DDADR(n) (0x0200 + ((n) << 4)) | ||
33 | #define DSADR(n) (0x0204 + ((n) << 4)) | ||
34 | #define DTADR(n) (0x0208 + ((n) << 4)) | ||
35 | #define DCMD(n) (0x020c + ((n) << 4)) | ||
36 | |||
37 | #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */ | ||
38 | #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ | ||
39 | #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */ | ||
40 | #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ | ||
41 | #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ | ||
42 | #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ | ||
43 | #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ | ||
44 | #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ | ||
45 | |||
46 | #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */ | ||
47 | #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ | ||
48 | #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ | ||
49 | #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ | ||
50 | #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ | ||
51 | #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ | ||
52 | #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */ | ||
53 | |||
54 | #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ | ||
55 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | ||
56 | |||
57 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | ||
58 | #define DDADR_STOP BIT(0) /* Stop (read / write) */ | ||
59 | |||
60 | #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ | ||
61 | #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ | ||
62 | #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ | ||
63 | #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ | ||
64 | #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ | ||
65 | #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ | ||
66 | #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ | ||
67 | #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */ | ||
68 | #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */ | ||
69 | #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */ | ||
70 | #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */ | ||
71 | #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */ | ||
72 | #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */ | ||
73 | #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ | ||
74 | |||
75 | #define PDMA_ALIGNMENT 3 | ||
76 | #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1)) | ||
77 | |||
78 | struct pxad_desc_hw { | ||
79 | u32 ddadr; /* Points to the next descriptor + flags */ | ||
80 | u32 dsadr; /* DSADR value for the current transfer */ | ||
81 | u32 dtadr; /* DTADR value for the current transfer */ | ||
82 | u32 dcmd; /* DCMD value for the current transfer */ | ||
83 | } __aligned(16); | ||
84 | |||
85 | struct pxad_desc_sw { | ||
86 | struct virt_dma_desc vd; /* Virtual descriptor */ | ||
87 | int nb_desc; /* Number of hw. descriptors */ | ||
88 | size_t len; /* Number of bytes xfered */ | ||
89 | dma_addr_t first; /* First descriptor's addr */ | ||
90 | |||
91 | /* At least one descriptor has an src/dst address not multiple of 8 */ | ||
92 | bool misaligned; | ||
93 | bool cyclic; | ||
94 | struct dma_pool *desc_pool; /* Channel's used allocator */ | ||
95 | |||
96 | struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */ | ||
97 | }; | ||
98 | |||
99 | struct pxad_phy { | ||
100 | int idx; | ||
101 | void __iomem *base; | ||
102 | struct pxad_chan *vchan; | ||
103 | }; | ||
104 | |||
105 | struct pxad_chan { | ||
106 | struct virt_dma_chan vc; /* Virtual channel */ | ||
107 | u32 drcmr; /* Requestor of the channel */ | ||
108 | enum pxad_chan_prio prio; /* Required priority of phy */ | ||
109 | /* | ||
110 | * At least one desc_sw in submitted or issued transfers on this channel | ||
111 | * has one address such as: addr % 8 != 0. This implies the DALGN | ||
112 | * setting on the phy. | ||
113 | */ | ||
114 | bool misaligned; | ||
115 | struct dma_slave_config cfg; /* Runtime config */ | ||
116 | |||
117 | /* protected by vc->lock */ | ||
118 | struct pxad_phy *phy; | ||
119 | struct dma_pool *desc_pool; /* Descriptors pool */ | ||
120 | }; | ||
121 | |||
122 | struct pxad_device { | ||
123 | struct dma_device slave; | ||
124 | int nr_chans; | ||
125 | void __iomem *base; | ||
126 | struct pxad_phy *phys; | ||
127 | spinlock_t phy_lock; /* Phy association */ | ||
128 | #ifdef CONFIG_DEBUG_FS | ||
129 | struct dentry *dbgfs_root; | ||
130 | struct dentry *dbgfs_state; | ||
131 | struct dentry **dbgfs_chan; | ||
132 | #endif | ||
133 | }; | ||
134 | |||
135 | #define tx_to_pxad_desc(tx) \ | ||
136 | container_of(tx, struct pxad_desc_sw, async_tx) | ||
137 | #define to_pxad_chan(dchan) \ | ||
138 | container_of(dchan, struct pxad_chan, vc.chan) | ||
139 | #define to_pxad_dev(dmadev) \ | ||
140 | container_of(dmadev, struct pxad_device, slave) | ||
141 | #define to_pxad_sw_desc(_vd) \ | ||
142 | container_of((_vd), struct pxad_desc_sw, vd) | ||
143 | |||
144 | #define _phy_readl_relaxed(phy, _reg) \ | ||
145 | readl_relaxed((phy)->base + _reg((phy)->idx)) | ||
146 | #define phy_readl_relaxed(phy, _reg) \ | ||
147 | ({ \ | ||
148 | u32 _v; \ | ||
149 | _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \ | ||
150 | dev_vdbg(&phy->vchan->vc.chan.dev->device, \ | ||
151 | "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \ | ||
152 | _v); \ | ||
153 | _v; \ | ||
154 | }) | ||
155 | #define phy_writel(phy, val, _reg) \ | ||
156 | do { \ | ||
157 | writel((val), (phy)->base + _reg((phy)->idx)); \ | ||
158 | dev_vdbg(&phy->vchan->vc.chan.dev->device, \ | ||
159 | "%s(): writel(0x%08x, %s)\n", \ | ||
160 | __func__, (u32)(val), #_reg); \ | ||
161 | } while (0) | ||
162 | #define phy_writel_relaxed(phy, val, _reg) \ | ||
163 | do { \ | ||
164 | writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \ | ||
165 | dev_vdbg(&phy->vchan->vc.chan.dev->device, \ | ||
166 | "%s(): writel_relaxed(0x%08x, %s)\n", \ | ||
167 | __func__, (u32)(val), #_reg); \ | ||
168 | } while (0) | ||
169 | |||
170 | static unsigned int pxad_drcmr(unsigned int line) | ||
171 | { | ||
172 | if (line < 64) | ||
173 | return 0x100 + line * 4; | ||
174 | return 0x1000 + line * 4; | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * Debug fs | ||
179 | */ | ||
180 | #ifdef CONFIG_DEBUG_FS | ||
181 | #include <linux/debugfs.h> | ||
182 | #include <linux/uaccess.h> | ||
183 | #include <linux/seq_file.h> | ||
184 | |||
185 | static int dbg_show_requester_chan(struct seq_file *s, void *p) | ||
186 | { | ||
187 | int pos = 0; | ||
188 | struct pxad_phy *phy = s->private; | ||
189 | int i; | ||
190 | u32 drcmr; | ||
191 | |||
192 | pos += seq_printf(s, "DMA channel %d requester :\n", phy->idx); | ||
193 | for (i = 0; i < 70; i++) { | ||
194 | drcmr = readl_relaxed(phy->base + pxad_drcmr(i)); | ||
195 | if ((drcmr & DRCMR_CHLNUM) == phy->idx) | ||
196 | pos += seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i, | ||
197 | !!(drcmr & DRCMR_MAPVLD)); | ||
198 | } | ||
199 | return pos; | ||
200 | } | ||
201 | |||
202 | static inline int dbg_burst_from_dcmd(u32 dcmd) | ||
203 | { | ||
204 | int burst = (dcmd >> 16) & 0x3; | ||
205 | |||
206 | return burst ? 4 << burst : 0; | ||
207 | } | ||
208 | |||
209 | static int is_phys_valid(unsigned long addr) | ||
210 | { | ||
211 | return pfn_valid(__phys_to_pfn(addr)); | ||
212 | } | ||
213 | |||
214 | #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "") | ||
215 | #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "") | ||
216 | |||
217 | static int dbg_show_descriptors(struct seq_file *s, void *p) | ||
218 | { | ||
219 | struct pxad_phy *phy = s->private; | ||
220 | int i, max_show = 20, burst, width; | ||
221 | u32 dcmd; | ||
222 | unsigned long phys_desc, ddadr; | ||
223 | struct pxad_desc_hw *desc; | ||
224 | |||
225 | phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR); | ||
226 | |||
227 | seq_printf(s, "DMA channel %d descriptors :\n", phy->idx); | ||
228 | seq_printf(s, "[%03d] First descriptor unknown\n", 0); | ||
229 | for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) { | ||
230 | desc = phys_to_virt(phys_desc); | ||
231 | dcmd = desc->dcmd; | ||
232 | burst = dbg_burst_from_dcmd(dcmd); | ||
233 | width = (1 << ((dcmd >> 14) & 0x3)) >> 1; | ||
234 | |||
235 | seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n", | ||
236 | i, phys_desc, desc); | ||
237 | seq_printf(s, "\tDDADR = %08x\n", desc->ddadr); | ||
238 | seq_printf(s, "\tDSADR = %08x\n", desc->dsadr); | ||
239 | seq_printf(s, "\tDTADR = %08x\n", desc->dtadr); | ||
240 | seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", | ||
241 | dcmd, | ||
242 | PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), | ||
243 | PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), | ||
244 | PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), | ||
245 | PXA_DCMD_STR(ENDIAN), burst, width, | ||
246 | dcmd & PXA_DCMD_LENGTH); | ||
247 | phys_desc = desc->ddadr; | ||
248 | } | ||
249 | if (i == max_show) | ||
250 | seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n", | ||
251 | i, phys_desc); | ||
252 | else | ||
253 | seq_printf(s, "[%03d] Desc at %08lx is %s\n", | ||
254 | i, phys_desc, phys_desc == DDADR_STOP ? | ||
255 | "DDADR_STOP" : "invalid"); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | |||
260 | static int dbg_show_chan_state(struct seq_file *s, void *p) | ||
261 | { | ||
262 | struct pxad_phy *phy = s->private; | ||
263 | u32 dcsr, dcmd; | ||
264 | int burst, width; | ||
265 | static const char * const str_prio[] = { | ||
266 | "high", "normal", "low", "invalid" | ||
267 | }; | ||
268 | |||
269 | dcsr = _phy_readl_relaxed(phy, DCSR); | ||
270 | dcmd = _phy_readl_relaxed(phy, DCMD); | ||
271 | burst = dbg_burst_from_dcmd(dcmd); | ||
272 | width = (1 << ((dcmd >> 14) & 0x3)) >> 1; | ||
273 | |||
274 | seq_printf(s, "DMA channel %d\n", phy->idx); | ||
275 | seq_printf(s, "\tPriority : %s\n", | ||
276 | str_prio[(phy->idx & 0xf) / 4]); | ||
277 | seq_printf(s, "\tUnaligned transfer bit: %s\n", | ||
278 | _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ? | ||
279 | "yes" : "no"); | ||
280 | seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n", | ||
281 | dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC), | ||
282 | PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN), | ||
283 | PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN), | ||
284 | PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST), | ||
285 | PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR), | ||
286 | PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE), | ||
287 | PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR), | ||
288 | PXA_DCSR_STR(BUSERR)); | ||
289 | |||
290 | seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n", | ||
291 | dcmd, | ||
292 | PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR), | ||
293 | PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG), | ||
294 | PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN), | ||
295 | PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH); | ||
296 | seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR)); | ||
297 | seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR)); | ||
298 | seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR)); | ||
299 | |||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static int dbg_show_state(struct seq_file *s, void *p) | ||
304 | { | ||
305 | struct pxad_device *pdev = s->private; | ||
306 | |||
307 | /* basic device status */ | ||
308 | seq_puts(s, "DMA engine status\n"); | ||
309 | seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans); | ||
310 | |||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | #define DBGFS_FUNC_DECL(name) \ | ||
315 | static int dbg_open_##name(struct inode *inode, struct file *file) \ | ||
316 | { \ | ||
317 | return single_open(file, dbg_show_##name, inode->i_private); \ | ||
318 | } \ | ||
319 | static const struct file_operations dbg_fops_##name = { \ | ||
320 | .owner = THIS_MODULE, \ | ||
321 | .open = dbg_open_##name, \ | ||
322 | .llseek = seq_lseek, \ | ||
323 | .read = seq_read, \ | ||
324 | .release = single_release, \ | ||
325 | } | ||
326 | |||
327 | DBGFS_FUNC_DECL(state); | ||
328 | DBGFS_FUNC_DECL(chan_state); | ||
329 | DBGFS_FUNC_DECL(descriptors); | ||
330 | DBGFS_FUNC_DECL(requester_chan); | ||
331 | |||
332 | static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev, | ||
333 | int ch, struct dentry *chandir) | ||
334 | { | ||
335 | char chan_name[11]; | ||
336 | struct dentry *chan, *chan_state = NULL, *chan_descr = NULL; | ||
337 | struct dentry *chan_reqs = NULL; | ||
338 | void *dt; | ||
339 | |||
340 | scnprintf(chan_name, sizeof(chan_name), "%d", ch); | ||
341 | chan = debugfs_create_dir(chan_name, chandir); | ||
342 | dt = (void *)&pdev->phys[ch]; | ||
343 | |||
344 | if (chan) | ||
345 | chan_state = debugfs_create_file("state", 0400, chan, dt, | ||
346 | &dbg_fops_chan_state); | ||
347 | if (chan_state) | ||
348 | chan_descr = debugfs_create_file("descriptors", 0400, chan, dt, | ||
349 | &dbg_fops_descriptors); | ||
350 | if (chan_descr) | ||
351 | chan_reqs = debugfs_create_file("requesters", 0400, chan, dt, | ||
352 | &dbg_fops_requester_chan); | ||
353 | if (!chan_reqs) | ||
354 | goto err_state; | ||
355 | |||
356 | return chan; | ||
357 | |||
358 | err_state: | ||
359 | debugfs_remove_recursive(chan); | ||
360 | return NULL; | ||
361 | } | ||
362 | |||
363 | static void pxad_init_debugfs(struct pxad_device *pdev) | ||
364 | { | ||
365 | int i; | ||
366 | struct dentry *chandir; | ||
367 | |||
368 | pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL); | ||
369 | if (IS_ERR(pdev->dbgfs_root) || !pdev->dbgfs_root) | ||
370 | goto err_root; | ||
371 | |||
372 | pdev->dbgfs_state = debugfs_create_file("state", 0400, pdev->dbgfs_root, | ||
373 | pdev, &dbg_fops_state); | ||
374 | if (!pdev->dbgfs_state) | ||
375 | goto err_state; | ||
376 | |||
377 | pdev->dbgfs_chan = | ||
378 | kmalloc_array(pdev->nr_chans, sizeof(*pdev->dbgfs_state), | ||
379 | GFP_KERNEL); | ||
380 | if (!pdev->dbgfs_chan) | ||
381 | goto err_alloc; | ||
382 | |||
383 | chandir = debugfs_create_dir("channels", pdev->dbgfs_root); | ||
384 | if (!chandir) | ||
385 | goto err_chandir; | ||
386 | |||
387 | for (i = 0; i < pdev->nr_chans; i++) { | ||
388 | pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir); | ||
389 | if (!pdev->dbgfs_chan[i]) | ||
390 | goto err_chans; | ||
391 | } | ||
392 | |||
393 | return; | ||
394 | err_chans: | ||
395 | err_chandir: | ||
396 | kfree(pdev->dbgfs_chan); | ||
397 | err_alloc: | ||
398 | err_state: | ||
399 | debugfs_remove_recursive(pdev->dbgfs_root); | ||
400 | err_root: | ||
401 | pr_err("pxad: debugfs is not available\n"); | ||
402 | } | ||
403 | |||
404 | static void pxad_cleanup_debugfs(struct pxad_device *pdev) | ||
405 | { | ||
406 | debugfs_remove_recursive(pdev->dbgfs_root); | ||
407 | } | ||
408 | #else | ||
409 | static inline void pxad_init_debugfs(struct pxad_device *pdev) {} | ||
410 | static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {} | ||
411 | #endif | ||
412 | |||
413 | /* | ||
414 | * In the transition phase where legacy pxa handling is done at the same time as | ||
415 | * mmp_dma, the DMA physical channel split between the 2 DMA providers is done | ||
416 | * through legacy_reserved. Legacy code reserves DMA channels by settings | ||
417 | * corresponding bits in legacy_reserved. | ||
418 | */ | ||
419 | static u32 legacy_reserved; | ||
420 | static u32 legacy_unavailable; | ||
421 | |||
422 | static struct pxad_phy *lookup_phy(struct pxad_chan *pchan) | ||
423 | { | ||
424 | int prio, i; | ||
425 | struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device); | ||
426 | struct pxad_phy *phy, *found = NULL; | ||
427 | unsigned long flags; | ||
428 | |||
429 | /* | ||
430 | * dma channel priorities | ||
431 | * ch 0 - 3, 16 - 19 <--> (0) | ||
432 | * ch 4 - 7, 20 - 23 <--> (1) | ||
433 | * ch 8 - 11, 24 - 27 <--> (2) | ||
434 | * ch 12 - 15, 28 - 31 <--> (3) | ||
435 | */ | ||
436 | |||
437 | spin_lock_irqsave(&pdev->phy_lock, flags); | ||
438 | for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) { | ||
439 | for (i = 0; i < pdev->nr_chans; i++) { | ||
440 | if (prio != (i & 0xf) >> 2) | ||
441 | continue; | ||
442 | if ((i < 32) && (legacy_reserved & BIT(i))) | ||
443 | continue; | ||
444 | phy = &pdev->phys[i]; | ||
445 | if (!phy->vchan) { | ||
446 | phy->vchan = pchan; | ||
447 | found = phy; | ||
448 | if (i < 32) | ||
449 | legacy_unavailable |= BIT(i); | ||
450 | goto out_unlock; | ||
451 | } | ||
452 | } | ||
453 | } | ||
454 | |||
455 | out_unlock: | ||
456 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | ||
457 | dev_dbg(&pchan->vc.chan.dev->device, | ||
458 | "%s(): phy=%p(%d)\n", __func__, found, | ||
459 | found ? found->idx : -1); | ||
460 | |||
461 | return found; | ||
462 | } | ||
463 | |||
464 | static void pxad_free_phy(struct pxad_chan *chan) | ||
465 | { | ||
466 | struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); | ||
467 | unsigned long flags; | ||
468 | u32 reg; | ||
469 | int i; | ||
470 | |||
471 | dev_dbg(&chan->vc.chan.dev->device, | ||
472 | "%s(): freeing\n", __func__); | ||
473 | if (!chan->phy) | ||
474 | return; | ||
475 | |||
476 | /* clear the channel mapping in DRCMR */ | ||
477 | reg = pxad_drcmr(chan->drcmr); | ||
478 | writel_relaxed(0, chan->phy->base + reg); | ||
479 | |||
480 | spin_lock_irqsave(&pdev->phy_lock, flags); | ||
481 | for (i = 0; i < 32; i++) | ||
482 | if (chan->phy == &pdev->phys[i]) | ||
483 | legacy_unavailable &= ~BIT(i); | ||
484 | chan->phy->vchan = NULL; | ||
485 | chan->phy = NULL; | ||
486 | spin_unlock_irqrestore(&pdev->phy_lock, flags); | ||
487 | } | ||
488 | |||
489 | static bool is_chan_running(struct pxad_chan *chan) | ||
490 | { | ||
491 | u32 dcsr; | ||
492 | struct pxad_phy *phy = chan->phy; | ||
493 | |||
494 | if (!phy) | ||
495 | return false; | ||
496 | dcsr = phy_readl_relaxed(phy, DCSR); | ||
497 | return dcsr & PXA_DCSR_RUN; | ||
498 | } | ||
499 | |||
500 | static bool is_running_chan_misaligned(struct pxad_chan *chan) | ||
501 | { | ||
502 | u32 dalgn; | ||
503 | |||
504 | BUG_ON(!chan->phy); | ||
505 | dalgn = phy_readl_relaxed(chan->phy, DALGN); | ||
506 | return dalgn & (BIT(chan->phy->idx)); | ||
507 | } | ||
508 | |||
509 | static void phy_enable(struct pxad_phy *phy, bool misaligned) | ||
510 | { | ||
511 | u32 reg, dalgn; | ||
512 | |||
513 | if (!phy->vchan) | ||
514 | return; | ||
515 | |||
516 | dev_dbg(&phy->vchan->vc.chan.dev->device, | ||
517 | "%s(); phy=%p(%d) misaligned=%d\n", __func__, | ||
518 | phy, phy->idx, misaligned); | ||
519 | |||
520 | reg = pxad_drcmr(phy->vchan->drcmr); | ||
521 | writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg); | ||
522 | |||
523 | dalgn = phy_readl_relaxed(phy, DALGN); | ||
524 | if (misaligned) | ||
525 | dalgn |= BIT(phy->idx); | ||
526 | else | ||
527 | dalgn &= ~BIT(phy->idx); | ||
528 | phy_writel_relaxed(phy, dalgn, DALGN); | ||
529 | |||
530 | phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR | | ||
531 | PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR); | ||
532 | } | ||
533 | |||
534 | static void phy_disable(struct pxad_phy *phy) | ||
535 | { | ||
536 | u32 dcsr; | ||
537 | |||
538 | if (!phy) | ||
539 | return; | ||
540 | |||
541 | dcsr = phy_readl_relaxed(phy, DCSR); | ||
542 | dev_dbg(&phy->vchan->vc.chan.dev->device, | ||
543 | "%s(): phy=%p(%d)\n", __func__, phy, phy->idx); | ||
544 | phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR); | ||
545 | } | ||
546 | |||
547 | static void pxad_launch_chan(struct pxad_chan *chan, | ||
548 | struct pxad_desc_sw *desc) | ||
549 | { | ||
550 | dev_dbg(&chan->vc.chan.dev->device, | ||
551 | "%s(): desc=%p\n", __func__, desc); | ||
552 | if (!chan->phy) { | ||
553 | chan->phy = lookup_phy(chan); | ||
554 | if (!chan->phy) { | ||
555 | dev_dbg(&chan->vc.chan.dev->device, | ||
556 | "%s(): no free dma channel\n", __func__); | ||
557 | return; | ||
558 | } | ||
559 | } | ||
560 | |||
561 | /* | ||
562 | * Program the descriptor's address into the DMA controller, | ||
563 | * then start the DMA transaction | ||
564 | */ | ||
565 | phy_writel(chan->phy, desc->first, DDADR); | ||
566 | phy_enable(chan->phy, chan->misaligned); | ||
567 | } | ||
568 | |||
569 | static void set_updater_desc(struct pxad_desc_sw *sw_desc, | ||
570 | unsigned long flags) | ||
571 | { | ||
572 | struct pxad_desc_hw *updater = | ||
573 | sw_desc->hw_desc[sw_desc->nb_desc - 1]; | ||
574 | dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr; | ||
575 | |||
576 | updater->ddadr = DDADR_STOP; | ||
577 | updater->dsadr = dma; | ||
578 | updater->dtadr = dma + 8; | ||
579 | updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 | | ||
580 | (PXA_DCMD_LENGTH & sizeof(u32)); | ||
581 | if (flags & DMA_PREP_INTERRUPT) | ||
582 | updater->dcmd |= PXA_DCMD_ENDIRQEN; | ||
583 | } | ||
584 | |||
585 | static bool is_desc_completed(struct virt_dma_desc *vd) | ||
586 | { | ||
587 | struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); | ||
588 | struct pxad_desc_hw *updater = | ||
589 | sw_desc->hw_desc[sw_desc->nb_desc - 1]; | ||
590 | |||
591 | return updater->dtadr != (updater->dsadr + 8); | ||
592 | } | ||
593 | |||
594 | static void pxad_desc_chain(struct virt_dma_desc *vd1, | ||
595 | struct virt_dma_desc *vd2) | ||
596 | { | ||
597 | struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1); | ||
598 | struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2); | ||
599 | dma_addr_t dma_to_chain; | ||
600 | |||
601 | dma_to_chain = desc2->first; | ||
602 | desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain; | ||
603 | } | ||
604 | |||
605 | static bool pxad_try_hotchain(struct virt_dma_chan *vc, | ||
606 | struct virt_dma_desc *vd) | ||
607 | { | ||
608 | struct virt_dma_desc *vd_last_issued = NULL; | ||
609 | struct pxad_chan *chan = to_pxad_chan(&vc->chan); | ||
610 | |||
611 | /* | ||
612 | * Attempt to hot chain the tx if the phy is still running. This is | ||
613 | * considered successful only if either the channel is still running | ||
614 | * after the chaining, or if the chained transfer is completed after | ||
615 | * having been hot chained. | ||
616 | * A change of alignment is not allowed, and forbids hotchaining. | ||
617 | */ | ||
618 | if (is_chan_running(chan)) { | ||
619 | BUG_ON(list_empty(&vc->desc_issued)); | ||
620 | |||
621 | if (!is_running_chan_misaligned(chan) && | ||
622 | to_pxad_sw_desc(vd)->misaligned) | ||
623 | return false; | ||
624 | |||
625 | vd_last_issued = list_entry(vc->desc_issued.prev, | ||
626 | struct virt_dma_desc, node); | ||
627 | pxad_desc_chain(vd_last_issued, vd); | ||
628 | if (is_chan_running(chan) || is_desc_completed(vd_last_issued)) | ||
629 | return true; | ||
630 | } | ||
631 | |||
632 | return false; | ||
633 | } | ||
634 | |||
635 | static unsigned int clear_chan_irq(struct pxad_phy *phy) | ||
636 | { | ||
637 | u32 dcsr; | ||
638 | u32 dint = readl(phy->base + DINT); | ||
639 | |||
640 | if (!(dint & BIT(phy->idx))) | ||
641 | return PXA_DCSR_RUN; | ||
642 | |||
643 | /* clear irq */ | ||
644 | dcsr = phy_readl_relaxed(phy, DCSR); | ||
645 | phy_writel(phy, dcsr, DCSR); | ||
646 | if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan)) | ||
647 | dev_warn(&phy->vchan->vc.chan.dev->device, | ||
648 | "%s(chan=%p): PXA_DCSR_BUSERR\n", | ||
649 | __func__, &phy->vchan); | ||
650 | |||
651 | return dcsr & ~PXA_DCSR_RUN; | ||
652 | } | ||
653 | |||
654 | static irqreturn_t pxad_chan_handler(int irq, void *dev_id) | ||
655 | { | ||
656 | struct pxad_phy *phy = dev_id; | ||
657 | struct pxad_chan *chan = phy->vchan; | ||
658 | struct virt_dma_desc *vd, *tmp; | ||
659 | unsigned int dcsr; | ||
660 | unsigned long flags; | ||
661 | |||
662 | BUG_ON(!chan); | ||
663 | |||
664 | dcsr = clear_chan_irq(phy); | ||
665 | if (dcsr & PXA_DCSR_RUN) | ||
666 | return IRQ_NONE; | ||
667 | |||
668 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
669 | list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) { | ||
670 | dev_dbg(&chan->vc.chan.dev->device, | ||
671 | "%s(): checking txd %p[%x]: completed=%d\n", | ||
672 | __func__, vd, vd->tx.cookie, is_desc_completed(vd)); | ||
673 | if (is_desc_completed(vd)) { | ||
674 | list_del(&vd->node); | ||
675 | vchan_cookie_complete(vd); | ||
676 | } else { | ||
677 | break; | ||
678 | } | ||
679 | } | ||
680 | |||
681 | if (dcsr & PXA_DCSR_STOPSTATE) { | ||
682 | dev_dbg(&chan->vc.chan.dev->device, | ||
683 | "%s(): channel stopped, submitted_empty=%d issued_empty=%d", | ||
684 | __func__, | ||
685 | list_empty(&chan->vc.desc_submitted), | ||
686 | list_empty(&chan->vc.desc_issued)); | ||
687 | phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR); | ||
688 | |||
689 | if (list_empty(&chan->vc.desc_issued)) { | ||
690 | chan->misaligned = | ||
691 | !list_empty(&chan->vc.desc_submitted); | ||
692 | } else { | ||
693 | vd = list_first_entry(&chan->vc.desc_issued, | ||
694 | struct virt_dma_desc, node); | ||
695 | pxad_launch_chan(chan, to_pxad_sw_desc(vd)); | ||
696 | } | ||
697 | } | ||
698 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
699 | |||
700 | return IRQ_HANDLED; | ||
701 | } | ||
702 | |||
703 | static irqreturn_t pxad_int_handler(int irq, void *dev_id) | ||
704 | { | ||
705 | struct pxad_device *pdev = dev_id; | ||
706 | struct pxad_phy *phy; | ||
707 | u32 dint = readl(pdev->base + DINT); | ||
708 | int i, ret = IRQ_NONE; | ||
709 | |||
710 | while (dint) { | ||
711 | i = __ffs(dint); | ||
712 | dint &= (dint - 1); | ||
713 | phy = &pdev->phys[i]; | ||
714 | if ((i < 32) && (legacy_reserved & BIT(i))) | ||
715 | continue; | ||
716 | if (pxad_chan_handler(irq, phy) == IRQ_HANDLED) | ||
717 | ret = IRQ_HANDLED; | ||
718 | } | ||
719 | |||
720 | return ret; | ||
721 | } | ||
722 | |||
723 | static int pxad_alloc_chan_resources(struct dma_chan *dchan) | ||
724 | { | ||
725 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
726 | struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); | ||
727 | |||
728 | if (chan->desc_pool) | ||
729 | return 1; | ||
730 | |||
731 | chan->desc_pool = dma_pool_create(dma_chan_name(dchan), | ||
732 | pdev->slave.dev, | ||
733 | sizeof(struct pxad_desc_hw), | ||
734 | __alignof__(struct pxad_desc_hw), | ||
735 | 0); | ||
736 | if (!chan->desc_pool) { | ||
737 | dev_err(&chan->vc.chan.dev->device, | ||
738 | "%s(): unable to allocate descriptor pool\n", | ||
739 | __func__); | ||
740 | return -ENOMEM; | ||
741 | } | ||
742 | |||
743 | return 1; | ||
744 | } | ||
745 | |||
746 | static void pxad_free_chan_resources(struct dma_chan *dchan) | ||
747 | { | ||
748 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
749 | |||
750 | vchan_free_chan_resources(&chan->vc); | ||
751 | dma_pool_destroy(chan->desc_pool); | ||
752 | chan->desc_pool = NULL; | ||
753 | |||
754 | } | ||
755 | |||
756 | static void pxad_free_desc(struct virt_dma_desc *vd) | ||
757 | { | ||
758 | int i; | ||
759 | dma_addr_t dma; | ||
760 | struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd); | ||
761 | |||
762 | BUG_ON(sw_desc->nb_desc == 0); | ||
763 | for (i = sw_desc->nb_desc - 1; i >= 0; i--) { | ||
764 | if (i > 0) | ||
765 | dma = sw_desc->hw_desc[i - 1]->ddadr; | ||
766 | else | ||
767 | dma = sw_desc->first; | ||
768 | dma_pool_free(sw_desc->desc_pool, | ||
769 | sw_desc->hw_desc[i], dma); | ||
770 | } | ||
771 | sw_desc->nb_desc = 0; | ||
772 | kfree(sw_desc); | ||
773 | } | ||
774 | |||
775 | static struct pxad_desc_sw * | ||
776 | pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc) | ||
777 | { | ||
778 | struct pxad_desc_sw *sw_desc; | ||
779 | dma_addr_t dma; | ||
780 | int i; | ||
781 | |||
782 | sw_desc = kzalloc(sizeof(*sw_desc) + | ||
783 | nb_hw_desc * sizeof(struct pxad_desc_hw *), | ||
784 | GFP_NOWAIT); | ||
785 | if (!sw_desc) | ||
786 | return NULL; | ||
787 | sw_desc->desc_pool = chan->desc_pool; | ||
788 | |||
789 | for (i = 0; i < nb_hw_desc; i++) { | ||
790 | sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool, | ||
791 | GFP_NOWAIT, &dma); | ||
792 | if (!sw_desc->hw_desc[i]) { | ||
793 | dev_err(&chan->vc.chan.dev->device, | ||
794 | "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n", | ||
795 | __func__, i, sw_desc->desc_pool); | ||
796 | goto err; | ||
797 | } | ||
798 | |||
799 | if (i == 0) | ||
800 | sw_desc->first = dma; | ||
801 | else | ||
802 | sw_desc->hw_desc[i - 1]->ddadr = dma; | ||
803 | sw_desc->nb_desc++; | ||
804 | } | ||
805 | |||
806 | return sw_desc; | ||
807 | err: | ||
808 | pxad_free_desc(&sw_desc->vd); | ||
809 | return NULL; | ||
810 | } | ||
811 | |||
812 | static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx) | ||
813 | { | ||
814 | struct virt_dma_chan *vc = to_virt_chan(tx->chan); | ||
815 | struct pxad_chan *chan = to_pxad_chan(&vc->chan); | ||
816 | struct virt_dma_desc *vd_chained = NULL, | ||
817 | *vd = container_of(tx, struct virt_dma_desc, tx); | ||
818 | dma_cookie_t cookie; | ||
819 | unsigned long flags; | ||
820 | |||
821 | set_updater_desc(to_pxad_sw_desc(vd), tx->flags); | ||
822 | |||
823 | spin_lock_irqsave(&vc->lock, flags); | ||
824 | cookie = dma_cookie_assign(tx); | ||
825 | |||
826 | if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) { | ||
827 | list_move_tail(&vd->node, &vc->desc_issued); | ||
828 | dev_dbg(&chan->vc.chan.dev->device, | ||
829 | "%s(): txd %p[%x]: submitted (hot linked)\n", | ||
830 | __func__, vd, cookie); | ||
831 | goto out; | ||
832 | } | ||
833 | |||
834 | /* | ||
835 | * Fallback to placing the tx in the submitted queue | ||
836 | */ | ||
837 | if (!list_empty(&vc->desc_submitted)) { | ||
838 | vd_chained = list_entry(vc->desc_submitted.prev, | ||
839 | struct virt_dma_desc, node); | ||
840 | /* | ||
841 | * Only chain the descriptors if no new misalignment is | ||
842 | * introduced. If a new misalignment is chained, let the channel | ||
843 | * stop, and be relaunched in misalign mode from the irq | ||
844 | * handler. | ||
845 | */ | ||
846 | if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned) | ||
847 | pxad_desc_chain(vd_chained, vd); | ||
848 | else | ||
849 | vd_chained = NULL; | ||
850 | } | ||
851 | dev_dbg(&chan->vc.chan.dev->device, | ||
852 | "%s(): txd %p[%x]: submitted (%s linked)\n", | ||
853 | __func__, vd, cookie, vd_chained ? "cold" : "not"); | ||
854 | list_move_tail(&vd->node, &vc->desc_submitted); | ||
855 | chan->misaligned |= to_pxad_sw_desc(vd)->misaligned; | ||
856 | |||
857 | out: | ||
858 | spin_unlock_irqrestore(&vc->lock, flags); | ||
859 | return cookie; | ||
860 | } | ||
861 | |||
862 | static void pxad_issue_pending(struct dma_chan *dchan) | ||
863 | { | ||
864 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
865 | struct virt_dma_desc *vd_first; | ||
866 | unsigned long flags; | ||
867 | |||
868 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
869 | if (list_empty(&chan->vc.desc_submitted)) | ||
870 | goto out; | ||
871 | |||
872 | vd_first = list_first_entry(&chan->vc.desc_submitted, | ||
873 | struct virt_dma_desc, node); | ||
874 | dev_dbg(&chan->vc.chan.dev->device, | ||
875 | "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie); | ||
876 | |||
877 | vchan_issue_pending(&chan->vc); | ||
878 | if (!pxad_try_hotchain(&chan->vc, vd_first)) | ||
879 | pxad_launch_chan(chan, to_pxad_sw_desc(vd_first)); | ||
880 | out: | ||
881 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
882 | } | ||
883 | |||
884 | static inline struct dma_async_tx_descriptor * | ||
885 | pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd, | ||
886 | unsigned long tx_flags) | ||
887 | { | ||
888 | struct dma_async_tx_descriptor *tx; | ||
889 | struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc); | ||
890 | |||
891 | tx = vchan_tx_prep(vc, vd, tx_flags); | ||
892 | tx->tx_submit = pxad_tx_submit; | ||
893 | dev_dbg(&chan->vc.chan.dev->device, | ||
894 | "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__, | ||
895 | vc, vd, vd->tx.cookie, | ||
896 | tx_flags); | ||
897 | |||
898 | return tx; | ||
899 | } | ||
900 | |||
901 | static void pxad_get_config(struct pxad_chan *chan, | ||
902 | enum dma_transfer_direction dir, | ||
903 | u32 *dcmd, u32 *dev_src, u32 *dev_dst) | ||
904 | { | ||
905 | u32 maxburst = 0, dev_addr = 0; | ||
906 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | ||
907 | |||
908 | *dcmd = 0; | ||
909 | if (chan->cfg.direction == DMA_DEV_TO_MEM) { | ||
910 | maxburst = chan->cfg.src_maxburst; | ||
911 | width = chan->cfg.src_addr_width; | ||
912 | dev_addr = chan->cfg.src_addr; | ||
913 | *dev_src = dev_addr; | ||
914 | *dcmd |= PXA_DCMD_INCTRGADDR | PXA_DCMD_FLOWSRC; | ||
915 | } | ||
916 | if (chan->cfg.direction == DMA_MEM_TO_DEV) { | ||
917 | maxburst = chan->cfg.dst_maxburst; | ||
918 | width = chan->cfg.dst_addr_width; | ||
919 | dev_addr = chan->cfg.dst_addr; | ||
920 | *dev_dst = dev_addr; | ||
921 | *dcmd |= PXA_DCMD_INCSRCADDR | PXA_DCMD_FLOWTRG; | ||
922 | } | ||
923 | if (chan->cfg.direction == DMA_MEM_TO_MEM) | ||
924 | *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR | | ||
925 | PXA_DCMD_INCSRCADDR; | ||
926 | |||
927 | dev_dbg(&chan->vc.chan.dev->device, | ||
928 | "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n", | ||
929 | __func__, dev_addr, maxburst, width, dir); | ||
930 | |||
931 | if (width == DMA_SLAVE_BUSWIDTH_1_BYTE) | ||
932 | *dcmd |= PXA_DCMD_WIDTH1; | ||
933 | else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES) | ||
934 | *dcmd |= PXA_DCMD_WIDTH2; | ||
935 | else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES) | ||
936 | *dcmd |= PXA_DCMD_WIDTH4; | ||
937 | |||
938 | if (maxburst == 8) | ||
939 | *dcmd |= PXA_DCMD_BURST8; | ||
940 | else if (maxburst == 16) | ||
941 | *dcmd |= PXA_DCMD_BURST16; | ||
942 | else if (maxburst == 32) | ||
943 | *dcmd |= PXA_DCMD_BURST32; | ||
944 | |||
945 | /* FIXME: drivers should be ported over to use the filter | ||
946 | * function. Once that's done, the following two lines can | ||
947 | * be removed. | ||
948 | */ | ||
949 | if (chan->cfg.slave_id) | ||
950 | chan->drcmr = chan->cfg.slave_id; | ||
951 | } | ||
952 | |||
953 | static struct dma_async_tx_descriptor * | ||
954 | pxad_prep_memcpy(struct dma_chan *dchan, | ||
955 | dma_addr_t dma_dst, dma_addr_t dma_src, | ||
956 | size_t len, unsigned long flags) | ||
957 | { | ||
958 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
959 | struct pxad_desc_sw *sw_desc; | ||
960 | struct pxad_desc_hw *hw_desc; | ||
961 | u32 dcmd; | ||
962 | unsigned int i, nb_desc = 0; | ||
963 | size_t copy; | ||
964 | |||
965 | if (!dchan || !len) | ||
966 | return NULL; | ||
967 | |||
968 | dev_dbg(&chan->vc.chan.dev->device, | ||
969 | "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n", | ||
970 | __func__, (unsigned long)dma_dst, (unsigned long)dma_src, | ||
971 | len, flags); | ||
972 | pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL); | ||
973 | |||
974 | nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES); | ||
975 | sw_desc = pxad_alloc_desc(chan, nb_desc + 1); | ||
976 | if (!sw_desc) | ||
977 | return NULL; | ||
978 | sw_desc->len = len; | ||
979 | |||
980 | if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) || | ||
981 | !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT)) | ||
982 | sw_desc->misaligned = true; | ||
983 | |||
984 | i = 0; | ||
985 | do { | ||
986 | hw_desc = sw_desc->hw_desc[i++]; | ||
987 | copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES); | ||
988 | hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy); | ||
989 | hw_desc->dsadr = dma_src; | ||
990 | hw_desc->dtadr = dma_dst; | ||
991 | len -= copy; | ||
992 | dma_src += copy; | ||
993 | dma_dst += copy; | ||
994 | } while (len); | ||
995 | set_updater_desc(sw_desc, flags); | ||
996 | |||
997 | return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); | ||
998 | } | ||
999 | |||
1000 | static struct dma_async_tx_descriptor * | ||
1001 | pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | ||
1002 | unsigned int sg_len, enum dma_transfer_direction dir, | ||
1003 | unsigned long flags, void *context) | ||
1004 | { | ||
1005 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
1006 | struct pxad_desc_sw *sw_desc; | ||
1007 | size_t len, avail; | ||
1008 | struct scatterlist *sg; | ||
1009 | dma_addr_t dma; | ||
1010 | u32 dcmd, dsadr = 0, dtadr = 0; | ||
1011 | unsigned int nb_desc = 0, i, j = 0; | ||
1012 | |||
1013 | if ((sgl == NULL) || (sg_len == 0)) | ||
1014 | return NULL; | ||
1015 | |||
1016 | pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); | ||
1017 | dev_dbg(&chan->vc.chan.dev->device, | ||
1018 | "%s(): dir=%d flags=%lx\n", __func__, dir, flags); | ||
1019 | |||
1020 | for_each_sg(sgl, sg, sg_len, i) | ||
1021 | nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES); | ||
1022 | sw_desc = pxad_alloc_desc(chan, nb_desc + 1); | ||
1023 | if (!sw_desc) | ||
1024 | return NULL; | ||
1025 | |||
1026 | for_each_sg(sgl, sg, sg_len, i) { | ||
1027 | dma = sg_dma_address(sg); | ||
1028 | avail = sg_dma_len(sg); | ||
1029 | sw_desc->len += avail; | ||
1030 | |||
1031 | do { | ||
1032 | len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES); | ||
1033 | if (dma & 0x7) | ||
1034 | sw_desc->misaligned = true; | ||
1035 | |||
1036 | sw_desc->hw_desc[j]->dcmd = | ||
1037 | dcmd | (PXA_DCMD_LENGTH & len); | ||
1038 | sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma; | ||
1039 | sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma; | ||
1040 | |||
1041 | dma += len; | ||
1042 | avail -= len; | ||
1043 | } while (avail); | ||
1044 | } | ||
1045 | set_updater_desc(sw_desc, flags); | ||
1046 | |||
1047 | return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); | ||
1048 | } | ||
1049 | |||
1050 | static struct dma_async_tx_descriptor * | ||
1051 | pxad_prep_dma_cyclic(struct dma_chan *dchan, | ||
1052 | dma_addr_t buf_addr, size_t len, size_t period_len, | ||
1053 | enum dma_transfer_direction dir, unsigned long flags) | ||
1054 | { | ||
1055 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
1056 | struct pxad_desc_sw *sw_desc; | ||
1057 | struct pxad_desc_hw **phw_desc; | ||
1058 | dma_addr_t dma; | ||
1059 | u32 dcmd, dsadr = 0, dtadr = 0; | ||
1060 | unsigned int nb_desc = 0; | ||
1061 | |||
1062 | if (!dchan || !len || !period_len) | ||
1063 | return NULL; | ||
1064 | if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) { | ||
1065 | dev_err(&chan->vc.chan.dev->device, | ||
1066 | "Unsupported direction for cyclic DMA\n"); | ||
1067 | return NULL; | ||
1068 | } | ||
1069 | /* the buffer length must be a multiple of period_len */ | ||
1070 | if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES || | ||
1071 | !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT)) | ||
1072 | return NULL; | ||
1073 | |||
1074 | pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr); | ||
1075 | dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH | period_len); | ||
1076 | dev_dbg(&chan->vc.chan.dev->device, | ||
1077 | "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n", | ||
1078 | __func__, (unsigned long)buf_addr, len, period_len, dir, flags); | ||
1079 | |||
1080 | nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES); | ||
1081 | nb_desc *= DIV_ROUND_UP(len, period_len); | ||
1082 | sw_desc = pxad_alloc_desc(chan, nb_desc + 1); | ||
1083 | if (!sw_desc) | ||
1084 | return NULL; | ||
1085 | sw_desc->cyclic = true; | ||
1086 | sw_desc->len = len; | ||
1087 | |||
1088 | phw_desc = sw_desc->hw_desc; | ||
1089 | dma = buf_addr; | ||
1090 | do { | ||
1091 | phw_desc[0]->dsadr = dsadr ? dsadr : dma; | ||
1092 | phw_desc[0]->dtadr = dtadr ? dtadr : dma; | ||
1093 | phw_desc[0]->dcmd = dcmd; | ||
1094 | phw_desc++; | ||
1095 | dma += period_len; | ||
1096 | len -= period_len; | ||
1097 | } while (len); | ||
1098 | set_updater_desc(sw_desc, flags); | ||
1099 | |||
1100 | return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags); | ||
1101 | } | ||
1102 | |||
1103 | static int pxad_config(struct dma_chan *dchan, | ||
1104 | struct dma_slave_config *cfg) | ||
1105 | { | ||
1106 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
1107 | |||
1108 | if (!dchan) | ||
1109 | return -EINVAL; | ||
1110 | |||
1111 | chan->cfg = *cfg; | ||
1112 | return 0; | ||
1113 | } | ||
1114 | |||
1115 | static int pxad_terminate_all(struct dma_chan *dchan) | ||
1116 | { | ||
1117 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
1118 | struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device); | ||
1119 | struct virt_dma_desc *vd = NULL; | ||
1120 | unsigned long flags; | ||
1121 | struct pxad_phy *phy; | ||
1122 | LIST_HEAD(head); | ||
1123 | |||
1124 | dev_dbg(&chan->vc.chan.dev->device, | ||
1125 | "%s(): vchan %p: terminate all\n", __func__, &chan->vc); | ||
1126 | |||
1127 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
1128 | vchan_get_all_descriptors(&chan->vc, &head); | ||
1129 | |||
1130 | list_for_each_entry(vd, &head, node) { | ||
1131 | dev_dbg(&chan->vc.chan.dev->device, | ||
1132 | "%s(): cancelling txd %p[%x] (completed=%d)", __func__, | ||
1133 | vd, vd->tx.cookie, is_desc_completed(vd)); | ||
1134 | } | ||
1135 | |||
1136 | phy = chan->phy; | ||
1137 | if (phy) { | ||
1138 | phy_disable(chan->phy); | ||
1139 | pxad_free_phy(chan); | ||
1140 | chan->phy = NULL; | ||
1141 | spin_lock(&pdev->phy_lock); | ||
1142 | phy->vchan = NULL; | ||
1143 | spin_unlock(&pdev->phy_lock); | ||
1144 | } | ||
1145 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
1146 | vchan_dma_desc_free_list(&chan->vc, &head); | ||
1147 | |||
1148 | return 0; | ||
1149 | } | ||
1150 | |||
1151 | static unsigned int pxad_residue(struct pxad_chan *chan, | ||
1152 | dma_cookie_t cookie) | ||
1153 | { | ||
1154 | struct virt_dma_desc *vd = NULL; | ||
1155 | struct pxad_desc_sw *sw_desc = NULL; | ||
1156 | struct pxad_desc_hw *hw_desc = NULL; | ||
1157 | u32 curr, start, len, end, residue = 0; | ||
1158 | unsigned long flags; | ||
1159 | bool passed = false; | ||
1160 | int i; | ||
1161 | |||
1162 | /* | ||
1163 | * If the channel does not have a phy pointer anymore, it has already | ||
1164 | * been completed. Therefore, its residue is 0. | ||
1165 | */ | ||
1166 | if (!chan->phy) | ||
1167 | return 0; | ||
1168 | |||
1169 | spin_lock_irqsave(&chan->vc.lock, flags); | ||
1170 | |||
1171 | vd = vchan_find_desc(&chan->vc, cookie); | ||
1172 | if (!vd) | ||
1173 | goto out; | ||
1174 | |||
1175 | sw_desc = to_pxad_sw_desc(vd); | ||
1176 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) | ||
1177 | curr = phy_readl_relaxed(chan->phy, DSADR); | ||
1178 | else | ||
1179 | curr = phy_readl_relaxed(chan->phy, DTADR); | ||
1180 | |||
1181 | for (i = 0; i < sw_desc->nb_desc - 1; i++) { | ||
1182 | hw_desc = sw_desc->hw_desc[i]; | ||
1183 | if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR) | ||
1184 | start = hw_desc->dsadr; | ||
1185 | else | ||
1186 | start = hw_desc->dtadr; | ||
1187 | len = hw_desc->dcmd & PXA_DCMD_LENGTH; | ||
1188 | end = start + len; | ||
1189 | |||
1190 | /* | ||
1191 | * 'passed' will be latched once we found the descriptor | ||
1192 | * which lies inside the boundaries of the curr | ||
1193 | * pointer. All descriptors that occur in the list | ||
1194 | * _after_ we found that partially handled descriptor | ||
1195 | * are still to be processed and are hence added to the | ||
1196 | * residual bytes counter. | ||
1197 | */ | ||
1198 | |||
1199 | if (passed) { | ||
1200 | residue += len; | ||
1201 | } else if (curr >= start && curr <= end) { | ||
1202 | residue += end - curr; | ||
1203 | passed = true; | ||
1204 | } | ||
1205 | } | ||
1206 | if (!passed) | ||
1207 | residue = sw_desc->len; | ||
1208 | |||
1209 | out: | ||
1210 | spin_unlock_irqrestore(&chan->vc.lock, flags); | ||
1211 | dev_dbg(&chan->vc.chan.dev->device, | ||
1212 | "%s(): txd %p[%x] sw_desc=%p: %d\n", | ||
1213 | __func__, vd, cookie, sw_desc, residue); | ||
1214 | return residue; | ||
1215 | } | ||
1216 | |||
1217 | static enum dma_status pxad_tx_status(struct dma_chan *dchan, | ||
1218 | dma_cookie_t cookie, | ||
1219 | struct dma_tx_state *txstate) | ||
1220 | { | ||
1221 | struct pxad_chan *chan = to_pxad_chan(dchan); | ||
1222 | enum dma_status ret; | ||
1223 | |||
1224 | ret = dma_cookie_status(dchan, cookie, txstate); | ||
1225 | if (likely(txstate && (ret != DMA_ERROR))) | ||
1226 | dma_set_residue(txstate, pxad_residue(chan, cookie)); | ||
1227 | |||
1228 | return ret; | ||
1229 | } | ||
1230 | |||
1231 | static void pxad_free_channels(struct dma_device *dmadev) | ||
1232 | { | ||
1233 | struct pxad_chan *c, *cn; | ||
1234 | |||
1235 | list_for_each_entry_safe(c, cn, &dmadev->channels, | ||
1236 | vc.chan.device_node) { | ||
1237 | list_del(&c->vc.chan.device_node); | ||
1238 | tasklet_kill(&c->vc.task); | ||
1239 | } | ||
1240 | } | ||
1241 | |||
1242 | static int pxad_remove(struct platform_device *op) | ||
1243 | { | ||
1244 | struct pxad_device *pdev = platform_get_drvdata(op); | ||
1245 | |||
1246 | pxad_cleanup_debugfs(pdev); | ||
1247 | pxad_free_channels(&pdev->slave); | ||
1248 | dma_async_device_unregister(&pdev->slave); | ||
1249 | return 0; | ||
1250 | } | ||
1251 | |||
1252 | static int pxad_init_phys(struct platform_device *op, | ||
1253 | struct pxad_device *pdev, | ||
1254 | unsigned int nb_phy_chans) | ||
1255 | { | ||
1256 | int irq0, irq, nr_irq = 0, i, ret; | ||
1257 | struct pxad_phy *phy; | ||
1258 | |||
1259 | irq0 = platform_get_irq(op, 0); | ||
1260 | if (irq0 < 0) | ||
1261 | return irq0; | ||
1262 | |||
1263 | pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans, | ||
1264 | sizeof(pdev->phys[0]), GFP_KERNEL); | ||
1265 | if (!pdev->phys) | ||
1266 | return -ENOMEM; | ||
1267 | |||
1268 | for (i = 0; i < nb_phy_chans; i++) | ||
1269 | if (platform_get_irq(op, i) > 0) | ||
1270 | nr_irq++; | ||
1271 | |||
1272 | for (i = 0; i < nb_phy_chans; i++) { | ||
1273 | phy = &pdev->phys[i]; | ||
1274 | phy->base = pdev->base; | ||
1275 | phy->idx = i; | ||
1276 | irq = platform_get_irq(op, i); | ||
1277 | if ((nr_irq > 1) && (irq > 0)) | ||
1278 | ret = devm_request_irq(&op->dev, irq, | ||
1279 | pxad_chan_handler, | ||
1280 | IRQF_SHARED, "pxa-dma", phy); | ||
1281 | if ((nr_irq == 1) && (i == 0)) | ||
1282 | ret = devm_request_irq(&op->dev, irq0, | ||
1283 | pxad_int_handler, | ||
1284 | IRQF_SHARED, "pxa-dma", pdev); | ||
1285 | if (ret) { | ||
1286 | dev_err(pdev->slave.dev, | ||
1287 | "%s(): can't request irq %d:%d\n", __func__, | ||
1288 | irq, ret); | ||
1289 | return ret; | ||
1290 | } | ||
1291 | } | ||
1292 | |||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | static const struct of_device_id const pxad_dt_ids[] = { | ||
1297 | { .compatible = "marvell,pdma-1.0", }, | ||
1298 | {} | ||
1299 | }; | ||
1300 | MODULE_DEVICE_TABLE(of, pxad_dt_ids); | ||
1301 | |||
1302 | static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec, | ||
1303 | struct of_dma *ofdma) | ||
1304 | { | ||
1305 | struct pxad_device *d = ofdma->of_dma_data; | ||
1306 | struct dma_chan *chan; | ||
1307 | |||
1308 | chan = dma_get_any_slave_channel(&d->slave); | ||
1309 | if (!chan) | ||
1310 | return NULL; | ||
1311 | |||
1312 | to_pxad_chan(chan)->drcmr = dma_spec->args[0]; | ||
1313 | to_pxad_chan(chan)->prio = dma_spec->args[1]; | ||
1314 | |||
1315 | return chan; | ||
1316 | } | ||
1317 | |||
1318 | static int pxad_init_dmadev(struct platform_device *op, | ||
1319 | struct pxad_device *pdev, | ||
1320 | unsigned int nr_phy_chans) | ||
1321 | { | ||
1322 | int ret; | ||
1323 | unsigned int i; | ||
1324 | struct pxad_chan *c; | ||
1325 | |||
1326 | pdev->nr_chans = nr_phy_chans; | ||
1327 | INIT_LIST_HEAD(&pdev->slave.channels); | ||
1328 | pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources; | ||
1329 | pdev->slave.device_free_chan_resources = pxad_free_chan_resources; | ||
1330 | pdev->slave.device_tx_status = pxad_tx_status; | ||
1331 | pdev->slave.device_issue_pending = pxad_issue_pending; | ||
1332 | pdev->slave.device_config = pxad_config; | ||
1333 | pdev->slave.device_terminate_all = pxad_terminate_all; | ||
1334 | |||
1335 | if (op->dev.coherent_dma_mask) | ||
1336 | dma_set_mask(&op->dev, op->dev.coherent_dma_mask); | ||
1337 | else | ||
1338 | dma_set_mask(&op->dev, DMA_BIT_MASK(32)); | ||
1339 | |||
1340 | ret = pxad_init_phys(op, pdev, nr_phy_chans); | ||
1341 | if (ret) | ||
1342 | return ret; | ||
1343 | |||
1344 | for (i = 0; i < nr_phy_chans; i++) { | ||
1345 | c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL); | ||
1346 | if (!c) | ||
1347 | return -ENOMEM; | ||
1348 | c->vc.desc_free = pxad_free_desc; | ||
1349 | vchan_init(&c->vc, &pdev->slave); | ||
1350 | } | ||
1351 | |||
1352 | return dma_async_device_register(&pdev->slave); | ||
1353 | } | ||
1354 | |||
1355 | static int pxad_probe(struct platform_device *op) | ||
1356 | { | ||
1357 | struct pxad_device *pdev; | ||
1358 | const struct of_device_id *of_id; | ||
1359 | struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev); | ||
1360 | struct resource *iores; | ||
1361 | int ret, dma_channels = 0; | ||
1362 | const enum dma_slave_buswidth widths = | ||
1363 | DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES | | ||
1364 | DMA_SLAVE_BUSWIDTH_4_BYTES; | ||
1365 | |||
1366 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | ||
1367 | if (!pdev) | ||
1368 | return -ENOMEM; | ||
1369 | |||
1370 | spin_lock_init(&pdev->phy_lock); | ||
1371 | |||
1372 | iores = platform_get_resource(op, IORESOURCE_MEM, 0); | ||
1373 | pdev->base = devm_ioremap_resource(&op->dev, iores); | ||
1374 | if (IS_ERR(pdev->base)) | ||
1375 | return PTR_ERR(pdev->base); | ||
1376 | |||
1377 | of_id = of_match_device(pxad_dt_ids, &op->dev); | ||
1378 | if (of_id) | ||
1379 | of_property_read_u32(op->dev.of_node, "#dma-channels", | ||
1380 | &dma_channels); | ||
1381 | else if (pdata && pdata->dma_channels) | ||
1382 | dma_channels = pdata->dma_channels; | ||
1383 | else | ||
1384 | dma_channels = 32; /* default 32 channel */ | ||
1385 | |||
1386 | dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask); | ||
1387 | dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask); | ||
1388 | dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask); | ||
1389 | dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask); | ||
1390 | pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy; | ||
1391 | pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg; | ||
1392 | pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic; | ||
1393 | |||
1394 | pdev->slave.copy_align = PDMA_ALIGNMENT; | ||
1395 | pdev->slave.src_addr_widths = widths; | ||
1396 | pdev->slave.dst_addr_widths = widths; | ||
1397 | pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM); | ||
1398 | pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; | ||
1399 | |||
1400 | pdev->slave.dev = &op->dev; | ||
1401 | ret = pxad_init_dmadev(op, pdev, dma_channels); | ||
1402 | if (ret) { | ||
1403 | dev_err(pdev->slave.dev, "unable to register\n"); | ||
1404 | return ret; | ||
1405 | } | ||
1406 | |||
1407 | if (op->dev.of_node) { | ||
1408 | /* Device-tree DMA controller registration */ | ||
1409 | ret = of_dma_controller_register(op->dev.of_node, | ||
1410 | pxad_dma_xlate, pdev); | ||
1411 | if (ret < 0) { | ||
1412 | dev_err(pdev->slave.dev, | ||
1413 | "of_dma_controller_register failed\n"); | ||
1414 | return ret; | ||
1415 | } | ||
1416 | } | ||
1417 | |||
1418 | platform_set_drvdata(op, pdev); | ||
1419 | pxad_init_debugfs(pdev); | ||
1420 | dev_info(pdev->slave.dev, "initialized %d channels\n", dma_channels); | ||
1421 | return 0; | ||
1422 | } | ||
1423 | |||
1424 | static const struct platform_device_id pxad_id_table[] = { | ||
1425 | { "pxa-dma", }, | ||
1426 | { }, | ||
1427 | }; | ||
1428 | |||
1429 | static struct platform_driver pxad_driver = { | ||
1430 | .driver = { | ||
1431 | .name = "pxa-dma", | ||
1432 | .of_match_table = pxad_dt_ids, | ||
1433 | }, | ||
1434 | .id_table = pxad_id_table, | ||
1435 | .probe = pxad_probe, | ||
1436 | .remove = pxad_remove, | ||
1437 | }; | ||
1438 | |||
1439 | bool pxad_filter_fn(struct dma_chan *chan, void *param) | ||
1440 | { | ||
1441 | struct pxad_chan *c = to_pxad_chan(chan); | ||
1442 | struct pxad_param *p = param; | ||
1443 | |||
1444 | if (chan->device->dev->driver != &pxad_driver.driver) | ||
1445 | return false; | ||
1446 | |||
1447 | c->drcmr = p->drcmr; | ||
1448 | c->prio = p->prio; | ||
1449 | |||
1450 | return true; | ||
1451 | } | ||
1452 | EXPORT_SYMBOL_GPL(pxad_filter_fn); | ||
1453 | |||
1454 | int pxad_toggle_reserved_channel(int legacy_channel) | ||
1455 | { | ||
1456 | if (legacy_unavailable & (BIT(legacy_channel))) | ||
1457 | return -EBUSY; | ||
1458 | legacy_reserved ^= BIT(legacy_channel); | ||
1459 | return 0; | ||
1460 | } | ||
1461 | EXPORT_SYMBOL_GPL(pxad_toggle_reserved_channel); | ||
1462 | |||
1463 | module_platform_driver(pxad_driver); | ||
1464 | |||
1465 | MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver"); | ||
1466 | MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>"); | ||
1467 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h new file mode 100644 index 000000000000..3edc99294bf6 --- /dev/null +++ b/include/linux/dma/pxa-dma.h | |||
@@ -0,0 +1,27 @@ | |||
1 | #ifndef _PXA_DMA_H_ | ||
2 | #define _PXA_DMA_H_ | ||
3 | |||
4 | enum pxad_chan_prio { | ||
5 | PXAD_PRIO_HIGHEST = 0, | ||
6 | PXAD_PRIO_NORMAL, | ||
7 | PXAD_PRIO_LOW, | ||
8 | PXAD_PRIO_LOWEST, | ||
9 | }; | ||
10 | |||
11 | struct pxad_param { | ||
12 | unsigned int drcmr; | ||
13 | enum pxad_chan_prio prio; | ||
14 | }; | ||
15 | |||
16 | struct dma_chan; | ||
17 | |||
18 | #ifdef CONFIG_PXA_DMA | ||
19 | bool pxad_filter_fn(struct dma_chan *chan, void *param); | ||
20 | #else | ||
21 | static inline bool pxad_filter_fn(struct dma_chan *chan, void *param) | ||
22 | { | ||
23 | return false; | ||
24 | } | ||
25 | #endif | ||
26 | |||
27 | #endif /* _PXA_DMA_H_ */ | ||