diff options
author | Joe Perches <joe@perches.com> | 2013-11-17 15:12:56 -0500 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2013-12-15 22:48:48 -0500 |
commit | 2b7f65b11d87f9f3925dee5df020303b362c98ee (patch) | |
tree | 00269e2cb051d2896e67a37601b0e8d868d92220 | |
parent | 0da9e55e71bc239102d47ac422162c9915c99074 (diff) |
mmp_pdma: Style neatening
Neaten code used as a template for other drivers.
Make the code more consistent with kernel styles.
o Convert #defines with (1<<foo) to BIT(foo)
o Alignment wrapping
o Logic inversions to put return at end of functions
o Convert devm_kzalloc with multiply to devm_kcalloc
o typo of Peripheral fix
Signed-off-by: Joe Perches <joe@perches.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r-- | drivers/dma/mmp_pdma.c | 204 |
1 files changed, 105 insertions, 99 deletions
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c index 8869500ab92b..3f7712c4d3fa 100644 --- a/drivers/dma/mmp_pdma.c +++ b/drivers/dma/mmp_pdma.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * it under the terms of the GNU General Public License version 2 as | 5 | * it under the terms of the GNU General Public License version 2 as |
6 | * published by the Free Software Foundation. | 6 | * published by the Free Software Foundation. |
7 | */ | 7 | */ |
8 | |||
8 | #include <linux/err.h> | 9 | #include <linux/err.h> |
9 | #include <linux/module.h> | 10 | #include <linux/module.h> |
10 | #include <linux/init.h> | 11 | #include <linux/init.h> |
@@ -32,38 +33,37 @@ | |||
32 | #define DTADR 0x0208 | 33 | #define DTADR 0x0208 |
33 | #define DCMD 0x020c | 34 | #define DCMD 0x020c |
34 | 35 | ||
35 | #define DCSR_RUN (1 << 31) /* Run Bit (read / write) */ | 36 | #define DCSR_RUN BIT(31) /* Run Bit (read / write) */ |
36 | #define DCSR_NODESC (1 << 30) /* No-Descriptor Fetch (read / write) */ | 37 | #define DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */ |
37 | #define DCSR_STOPIRQEN (1 << 29) /* Stop Interrupt Enable (read / write) */ | 38 | #define DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (read / write) */ |
38 | #define DCSR_REQPEND (1 << 8) /* Request Pending (read-only) */ | 39 | #define DCSR_REQPEND BIT(8) /* Request Pending (read-only) */ |
39 | #define DCSR_STOPSTATE (1 << 3) /* Stop State (read-only) */ | 40 | #define DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */ |
40 | #define DCSR_ENDINTR (1 << 2) /* End Interrupt (read / write) */ | 41 | #define DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */ |
41 | #define DCSR_STARTINTR (1 << 1) /* Start Interrupt (read / write) */ | 42 | #define DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */ |
42 | #define DCSR_BUSERR (1 << 0) /* Bus Error Interrupt (read / write) */ | 43 | #define DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */ |
43 | 44 | ||
44 | #define DCSR_EORIRQEN (1 << 28) /* End of Receive Interrupt Enable (R/W) */ | 45 | #define DCSR_EORIRQEN BIT(28) /* End of Receive Interrupt Enable (R/W) */ |
45 | #define DCSR_EORJMPEN (1 << 27) /* Jump to next descriptor on EOR */ | 46 | #define DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */ |
46 | #define DCSR_EORSTOPEN (1 << 26) /* STOP on an EOR */ | 47 | #define DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */ |
47 | #define DCSR_SETCMPST (1 << 25) /* Set Descriptor Compare Status */ | 48 | #define DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */ |
48 | #define DCSR_CLRCMPST (1 << 24) /* Clear Descriptor Compare Status */ | 49 | #define DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */ |
49 | #define DCSR_CMPST (1 << 10) /* The Descriptor Compare Status */ | 50 | #define DCSR_CMPST BIT(10) /* The Descriptor Compare Status */ |
50 | #define DCSR_EORINTR (1 << 9) /* The end of Receive */ | 51 | #define DCSR_EORINTR BIT(9) /* The end of Receive */ |
51 | 52 | ||
52 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + \ | 53 | #define DRCMR(n) ((((n) < 64) ? 0x0100 : 0x1100) + (((n) & 0x3f) << 2)) |
53 | (((n) & 0x3f) << 2)) | 54 | #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */ |
54 | #define DRCMR_MAPVLD (1 << 7) /* Map Valid (read / write) */ | 55 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ |
55 | #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */ | ||
56 | 56 | ||
57 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ | 57 | #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */ |
58 | #define DDADR_STOP (1 << 0) /* Stop (read / write) */ | 58 | #define DDADR_STOP BIT(0) /* Stop (read / write) */ |
59 | 59 | ||
60 | #define DCMD_INCSRCADDR (1 << 31) /* Source Address Increment Setting. */ | 60 | #define DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */ |
61 | #define DCMD_INCTRGADDR (1 << 30) /* Target Address Increment Setting. */ | 61 | #define DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */ |
62 | #define DCMD_FLOWSRC (1 << 29) /* Flow Control by the source. */ | 62 | #define DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */ |
63 | #define DCMD_FLOWTRG (1 << 28) /* Flow Control by the target. */ | 63 | #define DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */ |
64 | #define DCMD_STARTIRQEN (1 << 22) /* Start Interrupt Enable */ | 64 | #define DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */ |
65 | #define DCMD_ENDIRQEN (1 << 21) /* End Interrupt Enable */ | 65 | #define DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */ |
66 | #define DCMD_ENDIAN (1 << 18) /* Device Endian-ness. */ | 66 | #define DCMD_ENDIAN BIT(18) /* Device Endian-ness. */ |
67 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ | 67 | #define DCMD_BURST8 (1 << 16) /* 8 byte burst */ |
68 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ | 68 | #define DCMD_BURST16 (2 << 16) /* 16 byte burst */ |
69 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ | 69 | #define DCMD_BURST32 (3 << 16) /* 32 byte burst */ |
@@ -132,10 +132,14 @@ struct mmp_pdma_device { | |||
132 | spinlock_t phy_lock; /* protect alloc/free phy channels */ | 132 | spinlock_t phy_lock; /* protect alloc/free phy channels */ |
133 | }; | 133 | }; |
134 | 134 | ||
135 | #define tx_to_mmp_pdma_desc(tx) container_of(tx, struct mmp_pdma_desc_sw, async_tx) | 135 | #define tx_to_mmp_pdma_desc(tx) \ |
136 | #define to_mmp_pdma_desc(lh) container_of(lh, struct mmp_pdma_desc_sw, node) | 136 | container_of(tx, struct mmp_pdma_desc_sw, async_tx) |
137 | #define to_mmp_pdma_chan(dchan) container_of(dchan, struct mmp_pdma_chan, chan) | 137 | #define to_mmp_pdma_desc(lh) \ |
138 | #define to_mmp_pdma_dev(dmadev) container_of(dmadev, struct mmp_pdma_device, device) | 138 | container_of(lh, struct mmp_pdma_desc_sw, node) |
139 | #define to_mmp_pdma_chan(dchan) \ | ||
140 | container_of(dchan, struct mmp_pdma_chan, chan) | ||
141 | #define to_mmp_pdma_dev(dmadev) \ | ||
142 | container_of(dmadev, struct mmp_pdma_device, device) | ||
139 | 143 | ||
140 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) | 144 | static void set_desc(struct mmp_pdma_phy *phy, dma_addr_t addr) |
141 | { | 145 | { |
@@ -162,19 +166,18 @@ static void enable_chan(struct mmp_pdma_phy *phy) | |||
162 | writel(dalgn, phy->base + DALGN); | 166 | writel(dalgn, phy->base + DALGN); |
163 | 167 | ||
164 | reg = (phy->idx << 2) + DCSR; | 168 | reg = (phy->idx << 2) + DCSR; |
165 | writel(readl(phy->base + reg) | DCSR_RUN, | 169 | writel(readl(phy->base + reg) | DCSR_RUN, phy->base + reg); |
166 | phy->base + reg); | ||
167 | } | 170 | } |
168 | 171 | ||
169 | static void disable_chan(struct mmp_pdma_phy *phy) | 172 | static void disable_chan(struct mmp_pdma_phy *phy) |
170 | { | 173 | { |
171 | u32 reg; | 174 | u32 reg; |
172 | 175 | ||
173 | if (phy) { | 176 | if (!phy) |
174 | reg = (phy->idx << 2) + DCSR; | 177 | return; |
175 | writel(readl(phy->base + reg) & ~DCSR_RUN, | 178 | |
176 | phy->base + reg); | 179 | reg = (phy->idx << 2) + DCSR; |
177 | } | 180 | writel(readl(phy->base + reg) & ~DCSR_RUN, phy->base + reg); |
178 | } | 181 | } |
179 | 182 | ||
180 | static int clear_chan_irq(struct mmp_pdma_phy *phy) | 183 | static int clear_chan_irq(struct mmp_pdma_phy *phy) |
@@ -183,26 +186,27 @@ static int clear_chan_irq(struct mmp_pdma_phy *phy) | |||
183 | u32 dint = readl(phy->base + DINT); | 186 | u32 dint = readl(phy->base + DINT); |
184 | u32 reg = (phy->idx << 2) + DCSR; | 187 | u32 reg = (phy->idx << 2) + DCSR; |
185 | 188 | ||
186 | if (dint & BIT(phy->idx)) { | 189 | if (!(dint & BIT(phy->idx))) |
187 | /* clear irq */ | 190 | return -EAGAIN; |
188 | dcsr = readl(phy->base + reg); | 191 | |
189 | writel(dcsr, phy->base + reg); | 192 | /* clear irq */ |
190 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) | 193 | dcsr = readl(phy->base + reg); |
191 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); | 194 | writel(dcsr, phy->base + reg); |
192 | return 0; | 195 | if ((dcsr & DCSR_BUSERR) && (phy->vchan)) |
193 | } | 196 | dev_warn(phy->vchan->dev, "DCSR_BUSERR\n"); |
194 | return -EAGAIN; | 197 | |
198 | return 0; | ||
195 | } | 199 | } |
196 | 200 | ||
197 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) | 201 | static irqreturn_t mmp_pdma_chan_handler(int irq, void *dev_id) |
198 | { | 202 | { |
199 | struct mmp_pdma_phy *phy = dev_id; | 203 | struct mmp_pdma_phy *phy = dev_id; |
200 | 204 | ||
201 | if (clear_chan_irq(phy) == 0) { | 205 | if (clear_chan_irq(phy) != 0) |
202 | tasklet_schedule(&phy->vchan->tasklet); | ||
203 | return IRQ_HANDLED; | ||
204 | } else | ||
205 | return IRQ_NONE; | 206 | return IRQ_NONE; |
207 | |||
208 | tasklet_schedule(&phy->vchan->tasklet); | ||
209 | return IRQ_HANDLED; | ||
206 | } | 210 | } |
207 | 211 | ||
208 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | 212 | static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) |
@@ -224,8 +228,8 @@ static irqreturn_t mmp_pdma_int_handler(int irq, void *dev_id) | |||
224 | 228 | ||
225 | if (irq_num) | 229 | if (irq_num) |
226 | return IRQ_HANDLED; | 230 | return IRQ_HANDLED; |
227 | else | 231 | |
228 | return IRQ_NONE; | 232 | return IRQ_NONE; |
229 | } | 233 | } |
230 | 234 | ||
231 | /* lookup free phy channel as descending priority */ | 235 | /* lookup free phy channel as descending priority */ |
@@ -245,9 +249,9 @@ static struct mmp_pdma_phy *lookup_phy(struct mmp_pdma_chan *pchan) | |||
245 | */ | 249 | */ |
246 | 250 | ||
247 | spin_lock_irqsave(&pdev->phy_lock, flags); | 251 | spin_lock_irqsave(&pdev->phy_lock, flags); |
248 | for (prio = 0; prio <= (((pdev->dma_channels - 1) & 0xf) >> 2); prio++) { | 252 | for (prio = 0; prio <= ((pdev->dma_channels - 1) & 0xf) >> 2; prio++) { |
249 | for (i = 0; i < pdev->dma_channels; i++) { | 253 | for (i = 0; i < pdev->dma_channels; i++) { |
250 | if (prio != ((i & 0xf) >> 2)) | 254 | if (prio != (i & 0xf) >> 2) |
251 | continue; | 255 | continue; |
252 | phy = &pdev->phy[i]; | 256 | phy = &pdev->phy[i]; |
253 | if (!phy->vchan) { | 257 | if (!phy->vchan) { |
@@ -389,14 +393,16 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | |||
389 | if (chan->desc_pool) | 393 | if (chan->desc_pool) |
390 | return 1; | 394 | return 1; |
391 | 395 | ||
392 | chan->desc_pool = | 396 | chan->desc_pool = dma_pool_create(dev_name(&dchan->dev->device), |
393 | dma_pool_create(dev_name(&dchan->dev->device), chan->dev, | 397 | chan->dev, |
394 | sizeof(struct mmp_pdma_desc_sw), | 398 | sizeof(struct mmp_pdma_desc_sw), |
395 | __alignof__(struct mmp_pdma_desc_sw), 0); | 399 | __alignof__(struct mmp_pdma_desc_sw), |
400 | 0); | ||
396 | if (!chan->desc_pool) { | 401 | if (!chan->desc_pool) { |
397 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); | 402 | dev_err(chan->dev, "unable to allocate descriptor pool\n"); |
398 | return -ENOMEM; | 403 | return -ENOMEM; |
399 | } | 404 | } |
405 | |||
400 | mmp_pdma_free_phy(chan); | 406 | mmp_pdma_free_phy(chan); |
401 | chan->idle = true; | 407 | chan->idle = true; |
402 | chan->dev_addr = 0; | 408 | chan->dev_addr = 0; |
@@ -404,7 +410,7 @@ static int mmp_pdma_alloc_chan_resources(struct dma_chan *dchan) | |||
404 | } | 410 | } |
405 | 411 | ||
406 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, | 412 | static void mmp_pdma_free_desc_list(struct mmp_pdma_chan *chan, |
407 | struct list_head *list) | 413 | struct list_head *list) |
408 | { | 414 | { |
409 | struct mmp_pdma_desc_sw *desc, *_desc; | 415 | struct mmp_pdma_desc_sw *desc, *_desc; |
410 | 416 | ||
@@ -434,8 +440,8 @@ static void mmp_pdma_free_chan_resources(struct dma_chan *dchan) | |||
434 | 440 | ||
435 | static struct dma_async_tx_descriptor * | 441 | static struct dma_async_tx_descriptor * |
436 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, | 442 | mmp_pdma_prep_memcpy(struct dma_chan *dchan, |
437 | dma_addr_t dma_dst, dma_addr_t dma_src, | 443 | dma_addr_t dma_dst, dma_addr_t dma_src, |
438 | size_t len, unsigned long flags) | 444 | size_t len, unsigned long flags) |
439 | { | 445 | { |
440 | struct mmp_pdma_chan *chan; | 446 | struct mmp_pdma_chan *chan; |
441 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | 447 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
@@ -515,8 +521,8 @@ fail: | |||
515 | 521 | ||
516 | static struct dma_async_tx_descriptor * | 522 | static struct dma_async_tx_descriptor * |
517 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, | 523 | mmp_pdma_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl, |
518 | unsigned int sg_len, enum dma_transfer_direction dir, | 524 | unsigned int sg_len, enum dma_transfer_direction dir, |
519 | unsigned long flags, void *context) | 525 | unsigned long flags, void *context) |
520 | { | 526 | { |
521 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 527 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
522 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; | 528 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new = NULL; |
@@ -591,10 +597,11 @@ fail: | |||
591 | return NULL; | 597 | return NULL; |
592 | } | 598 | } |
593 | 599 | ||
594 | static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( | 600 | static struct dma_async_tx_descriptor * |
595 | struct dma_chan *dchan, dma_addr_t buf_addr, size_t len, | 601 | mmp_pdma_prep_dma_cyclic(struct dma_chan *dchan, |
596 | size_t period_len, enum dma_transfer_direction direction, | 602 | dma_addr_t buf_addr, size_t len, size_t period_len, |
597 | unsigned long flags, void *context) | 603 | enum dma_transfer_direction direction, |
604 | unsigned long flags, void *context) | ||
598 | { | 605 | { |
599 | struct mmp_pdma_chan *chan; | 606 | struct mmp_pdma_chan *chan; |
600 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; | 607 | struct mmp_pdma_desc_sw *first = NULL, *prev = NULL, *new; |
@@ -636,8 +643,8 @@ static struct dma_async_tx_descriptor *mmp_pdma_prep_dma_cyclic( | |||
636 | goto fail; | 643 | goto fail; |
637 | } | 644 | } |
638 | 645 | ||
639 | new->desc.dcmd = chan->dcmd | DCMD_ENDIRQEN | | 646 | new->desc.dcmd = (chan->dcmd | DCMD_ENDIRQEN | |
640 | (DCMD_LENGTH & period_len); | 647 | (DCMD_LENGTH & period_len)); |
641 | new->desc.dsadr = dma_src; | 648 | new->desc.dsadr = dma_src; |
642 | new->desc.dtadr = dma_dst; | 649 | new->desc.dtadr = dma_dst; |
643 | 650 | ||
@@ -677,12 +684,11 @@ fail: | |||
677 | } | 684 | } |
678 | 685 | ||
679 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | 686 | static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, |
680 | unsigned long arg) | 687 | unsigned long arg) |
681 | { | 688 | { |
682 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); | 689 | struct mmp_pdma_chan *chan = to_mmp_pdma_chan(dchan); |
683 | struct dma_slave_config *cfg = (void *)arg; | 690 | struct dma_slave_config *cfg = (void *)arg; |
684 | unsigned long flags; | 691 | unsigned long flags; |
685 | int ret = 0; | ||
686 | u32 maxburst = 0, addr = 0; | 692 | u32 maxburst = 0, addr = 0; |
687 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; | 693 | enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED; |
688 | 694 | ||
@@ -739,11 +745,12 @@ static int mmp_pdma_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd, | |||
739 | return -ENOSYS; | 745 | return -ENOSYS; |
740 | } | 746 | } |
741 | 747 | ||
742 | return ret; | 748 | return 0; |
743 | } | 749 | } |
744 | 750 | ||
745 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, | 751 | static enum dma_status mmp_pdma_tx_status(struct dma_chan *dchan, |
746 | dma_cookie_t cookie, struct dma_tx_state *txstate) | 752 | dma_cookie_t cookie, |
753 | struct dma_tx_state *txstate) | ||
747 | { | 754 | { |
748 | return dma_cookie_status(dchan, cookie, txstate); | 755 | return dma_cookie_status(dchan, cookie, txstate); |
749 | } | 756 | } |
@@ -845,15 +852,14 @@ static int mmp_pdma_remove(struct platform_device *op) | |||
845 | return 0; | 852 | return 0; |
846 | } | 853 | } |
847 | 854 | ||
848 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | 855 | static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, int idx, int irq) |
849 | int idx, int irq) | ||
850 | { | 856 | { |
851 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; | 857 | struct mmp_pdma_phy *phy = &pdev->phy[idx]; |
852 | struct mmp_pdma_chan *chan; | 858 | struct mmp_pdma_chan *chan; |
853 | int ret; | 859 | int ret; |
854 | 860 | ||
855 | chan = devm_kzalloc(pdev->dev, | 861 | chan = devm_kzalloc(pdev->dev, sizeof(struct mmp_pdma_chan), |
856 | sizeof(struct mmp_pdma_chan), GFP_KERNEL); | 862 | GFP_KERNEL); |
857 | if (chan == NULL) | 863 | if (chan == NULL) |
858 | return -ENOMEM; | 864 | return -ENOMEM; |
859 | 865 | ||
@@ -861,8 +867,8 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | |||
861 | phy->base = pdev->base; | 867 | phy->base = pdev->base; |
862 | 868 | ||
863 | if (irq) { | 869 | if (irq) { |
864 | ret = devm_request_irq(pdev->dev, irq, | 870 | ret = devm_request_irq(pdev->dev, irq, mmp_pdma_chan_handler, 0, |
865 | mmp_pdma_chan_handler, 0, "pdma", phy); | 871 | "pdma", phy); |
866 | if (ret) { | 872 | if (ret) { |
867 | dev_err(pdev->dev, "channel request irq fail!\n"); | 873 | dev_err(pdev->dev, "channel request irq fail!\n"); |
868 | return ret; | 874 | return ret; |
@@ -877,8 +883,7 @@ static int mmp_pdma_chan_init(struct mmp_pdma_device *pdev, | |||
877 | INIT_LIST_HEAD(&chan->chain_running); | 883 | INIT_LIST_HEAD(&chan->chain_running); |
878 | 884 | ||
879 | /* register virt channel to dma engine */ | 885 | /* register virt channel to dma engine */ |
880 | list_add_tail(&chan->chan.device_node, | 886 | list_add_tail(&chan->chan.device_node, &pdev->device.channels); |
881 | &pdev->device.channels); | ||
882 | 887 | ||
883 | return 0; | 888 | return 0; |
884 | } | 889 | } |
@@ -913,13 +918,12 @@ retry: | |||
913 | * the lookup and the reservation */ | 918 | * the lookup and the reservation */ |
914 | chan = dma_get_slave_channel(candidate); | 919 | chan = dma_get_slave_channel(candidate); |
915 | 920 | ||
916 | if (chan) { | 921 | if (!chan) |
917 | struct mmp_pdma_chan *c = to_mmp_pdma_chan(chan); | 922 | goto retry; |
918 | c->drcmr = dma_spec->args[0]; | ||
919 | return chan; | ||
920 | } | ||
921 | 923 | ||
922 | goto retry; | 924 | to_mmp_pdma_chan(chan)->drcmr = dma_spec->args[0]; |
925 | |||
926 | return chan; | ||
923 | } | 927 | } |
924 | 928 | ||
925 | static int mmp_pdma_probe(struct platform_device *op) | 929 | static int mmp_pdma_probe(struct platform_device *op) |
@@ -934,6 +938,7 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
934 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); | 938 | pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL); |
935 | if (!pdev) | 939 | if (!pdev) |
936 | return -ENOMEM; | 940 | return -ENOMEM; |
941 | |||
937 | pdev->dev = &op->dev; | 942 | pdev->dev = &op->dev; |
938 | 943 | ||
939 | spin_lock_init(&pdev->phy_lock); | 944 | spin_lock_init(&pdev->phy_lock); |
@@ -945,8 +950,8 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
945 | 950 | ||
946 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); | 951 | of_id = of_match_device(mmp_pdma_dt_ids, pdev->dev); |
947 | if (of_id) | 952 | if (of_id) |
948 | of_property_read_u32(pdev->dev->of_node, | 953 | of_property_read_u32(pdev->dev->of_node, "#dma-channels", |
949 | "#dma-channels", &dma_channels); | 954 | &dma_channels); |
950 | else if (pdata && pdata->dma_channels) | 955 | else if (pdata && pdata->dma_channels) |
951 | dma_channels = pdata->dma_channels; | 956 | dma_channels = pdata->dma_channels; |
952 | else | 957 | else |
@@ -958,8 +963,9 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
958 | irq_num++; | 963 | irq_num++; |
959 | } | 964 | } |
960 | 965 | ||
961 | pdev->phy = devm_kzalloc(pdev->dev, | 966 | pdev->phy = devm_kcalloc(pdev->dev, |
962 | dma_channels * sizeof(struct mmp_pdma_chan), GFP_KERNEL); | 967 | dma_channels, sizeof(struct mmp_pdma_chan), |
968 | GFP_KERNEL); | ||
963 | if (pdev->phy == NULL) | 969 | if (pdev->phy == NULL) |
964 | return -ENOMEM; | 970 | return -ENOMEM; |
965 | 971 | ||
@@ -968,8 +974,8 @@ static int mmp_pdma_probe(struct platform_device *op) | |||
968 | if (irq_num != dma_channels) { | 974 | if (irq_num != dma_channels) { |
969 | /* all chan share one irq, demux inside */ | 975 | /* all chan share one irq, demux inside */ |
970 | irq = platform_get_irq(op, 0); | 976 | irq = platform_get_irq(op, 0); |
971 | ret = devm_request_irq(pdev->dev, irq, | 977 | ret = devm_request_irq(pdev->dev, irq, mmp_pdma_int_handler, 0, |
972 | mmp_pdma_int_handler, 0, "pdma", pdev); | 978 | "pdma", pdev); |
973 | if (ret) | 979 | if (ret) |
974 | return ret; | 980 | return ret; |
975 | } | 981 | } |
@@ -1045,7 +1051,7 @@ bool mmp_pdma_filter_fn(struct dma_chan *chan, void *param) | |||
1045 | if (chan->device->dev->driver != &mmp_pdma_driver.driver) | 1051 | if (chan->device->dev->driver != &mmp_pdma_driver.driver) |
1046 | return false; | 1052 | return false; |
1047 | 1053 | ||
1048 | c->drcmr = *(unsigned int *) param; | 1054 | c->drcmr = *(unsigned int *)param; |
1049 | 1055 | ||
1050 | return true; | 1056 | return true; |
1051 | } | 1057 | } |
@@ -1053,6 +1059,6 @@ EXPORT_SYMBOL_GPL(mmp_pdma_filter_fn); | |||
1053 | 1059 | ||
1054 | module_platform_driver(mmp_pdma_driver); | 1060 | module_platform_driver(mmp_pdma_driver); |
1055 | 1061 | ||
1056 | MODULE_DESCRIPTION("MARVELL MMP Periphera DMA Driver"); | 1062 | MODULE_DESCRIPTION("MARVELL MMP Peripheral DMA Driver"); |
1057 | MODULE_AUTHOR("Marvell International Ltd."); | 1063 | MODULE_AUTHOR("Marvell International Ltd."); |
1058 | MODULE_LICENSE("GPL v2"); | 1064 | MODULE_LICENSE("GPL v2"); |