aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/shdma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/dma/shdma.c')
-rw-r--r--drivers/dma/shdma.c332
1 files changed, 279 insertions, 53 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c
index eb6b54dbb806..028330044201 100644
--- a/drivers/dma/shdma.c
+++ b/drivers/dma/shdma.c
@@ -27,7 +27,10 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/pm_runtime.h> 28#include <linux/pm_runtime.h>
29#include <linux/sh_dma.h> 29#include <linux/sh_dma.h>
30 30#include <linux/notifier.h>
31#include <linux/kdebug.h>
32#include <linux/spinlock.h>
33#include <linux/rculist.h>
31#include "shdma.h" 34#include "shdma.h"
32 35
33/* DMA descriptor control */ 36/* DMA descriptor control */
@@ -43,6 +46,13 @@ enum sh_dmae_desc_status {
43/* Default MEMCPY transfer size = 2^2 = 4 bytes */ 46/* Default MEMCPY transfer size = 2^2 = 4 bytes */
44#define LOG2_DEFAULT_XFER_SIZE 2 47#define LOG2_DEFAULT_XFER_SIZE 2
45 48
49/*
50 * Used for write-side mutual exclusion for the global device list,
51 * read-side synchronization by way of RCU, and per-controller data.
52 */
53static DEFINE_SPINLOCK(sh_dmae_lock);
54static LIST_HEAD(sh_dmae_devices);
55
46/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ 56/* A bitmask with bits enough for enum sh_dmae_slave_chan_id */
47static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; 57static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)];
48 58
@@ -75,22 +85,35 @@ static void dmaor_write(struct sh_dmae_device *shdev, u16 data)
75 */ 85 */
76static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev) 86static void sh_dmae_ctl_stop(struct sh_dmae_device *shdev)
77{ 87{
78 unsigned short dmaor = dmaor_read(shdev); 88 unsigned short dmaor;
89 unsigned long flags;
79 90
91 spin_lock_irqsave(&sh_dmae_lock, flags);
92
93 dmaor = dmaor_read(shdev);
80 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME)); 94 dmaor_write(shdev, dmaor & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME));
95
96 spin_unlock_irqrestore(&sh_dmae_lock, flags);
81} 97}
82 98
83static int sh_dmae_rst(struct sh_dmae_device *shdev) 99static int sh_dmae_rst(struct sh_dmae_device *shdev)
84{ 100{
85 unsigned short dmaor; 101 unsigned short dmaor;
102 unsigned long flags;
86 103
87 sh_dmae_ctl_stop(shdev); 104 spin_lock_irqsave(&sh_dmae_lock, flags);
88 dmaor = dmaor_read(shdev) | shdev->pdata->dmaor_init;
89 105
90 dmaor_write(shdev, dmaor); 106 dmaor = dmaor_read(shdev) & ~(DMAOR_NMIF | DMAOR_AE | DMAOR_DME);
91 if (dmaor_read(shdev) & (DMAOR_AE | DMAOR_NMIF)) { 107
92 pr_warning("dma-sh: Can't initialize DMAOR.\n"); 108 dmaor_write(shdev, dmaor | shdev->pdata->dmaor_init);
93 return -EINVAL; 109
110 dmaor = dmaor_read(shdev);
111
112 spin_unlock_irqrestore(&sh_dmae_lock, flags);
113
114 if (dmaor & (DMAOR_AE | DMAOR_NMIF)) {
115 dev_warn(shdev->common.dev, "Can't initialize DMAOR.\n");
116 return -EIO;
94 } 117 }
95 return 0; 118 return 0;
96} 119}
@@ -174,7 +197,7 @@ static void dmae_init(struct sh_dmae_chan *sh_chan)
174 197
175static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val) 198static int dmae_set_chcr(struct sh_dmae_chan *sh_chan, u32 val)
176{ 199{
177 /* When DMA was working, can not set data to CHCR */ 200 /* If DMA is active, cannot set CHCR. TODO: remove this superfluous check */
178 if (dmae_is_busy(sh_chan)) 201 if (dmae_is_busy(sh_chan))
179 return -EBUSY; 202 return -EBUSY;
180 203
@@ -190,12 +213,17 @@ static int dmae_set_dmars(struct sh_dmae_chan *sh_chan, u16 val)
190 struct sh_dmae_device, common); 213 struct sh_dmae_device, common);
191 struct sh_dmae_pdata *pdata = shdev->pdata; 214 struct sh_dmae_pdata *pdata = shdev->pdata;
192 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id]; 215 const struct sh_dmae_channel *chan_pdata = &pdata->channel[sh_chan->id];
193 u16 __iomem *addr = shdev->dmars + chan_pdata->dmars / sizeof(u16); 216 u16 __iomem *addr = shdev->dmars;
194 int shift = chan_pdata->dmars_bit; 217 int shift = chan_pdata->dmars_bit;
195 218
196 if (dmae_is_busy(sh_chan)) 219 if (dmae_is_busy(sh_chan))
197 return -EBUSY; 220 return -EBUSY;
198 221
222 /* in the case of a missing DMARS resource use first memory window */
223 if (!addr)
224 addr = (u16 __iomem *)shdev->chan_reg;
225 addr += chan_pdata->dmars / sizeof(u16);
226
199 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift), 227 __raw_writew((__raw_readw(addr) & (0xff00 >> shift)) | (val << shift),
200 addr); 228 addr);
201 229
@@ -315,7 +343,7 @@ static int sh_dmae_alloc_chan_resources(struct dma_chan *chan)
315 343
316 dmae_set_dmars(sh_chan, cfg->mid_rid); 344 dmae_set_dmars(sh_chan, cfg->mid_rid);
317 dmae_set_chcr(sh_chan, cfg->chcr); 345 dmae_set_chcr(sh_chan, cfg->chcr);
318 } else if ((sh_dmae_readl(sh_chan, CHCR) & 0xf00) != 0x400) { 346 } else {
319 dmae_init(sh_chan); 347 dmae_init(sh_chan);
320 } 348 }
321 349
@@ -364,7 +392,12 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
364 LIST_HEAD(list); 392 LIST_HEAD(list);
365 int descs = sh_chan->descs_allocated; 393 int descs = sh_chan->descs_allocated;
366 394
395 /* Protect against ISR */
396 spin_lock_irq(&sh_chan->desc_lock);
367 dmae_halt(sh_chan); 397 dmae_halt(sh_chan);
398 spin_unlock_irq(&sh_chan->desc_lock);
399
400 /* Now no new interrupts will occur */
368 401
369 /* Prepared and not submitted descriptors can still be on the queue */ 402 /* Prepared and not submitted descriptors can still be on the queue */
370 if (!list_empty(&sh_chan->ld_queue)) 403 if (!list_empty(&sh_chan->ld_queue))
@@ -374,6 +407,7 @@ static void sh_dmae_free_chan_resources(struct dma_chan *chan)
374 /* The caller is holding dma_list_mutex */ 407 /* The caller is holding dma_list_mutex */
375 struct sh_dmae_slave *param = chan->private; 408 struct sh_dmae_slave *param = chan->private;
376 clear_bit(param->slave_id, sh_dmae_slave_used); 409 clear_bit(param->slave_id, sh_dmae_slave_used);
410 chan->private = NULL;
377 } 411 }
378 412
379 spin_lock_bh(&sh_chan->desc_lock); 413 spin_lock_bh(&sh_chan->desc_lock);
@@ -553,8 +587,6 @@ static struct dma_async_tx_descriptor *sh_dmae_prep_memcpy(
553 if (!chan || !len) 587 if (!chan || !len)
554 return NULL; 588 return NULL;
555 589
556 chan->private = NULL;
557
558 sh_chan = to_sh_chan(chan); 590 sh_chan = to_sh_chan(chan);
559 591
560 sg_init_table(&sg, 1); 592 sg_init_table(&sg, 1);
@@ -610,9 +642,9 @@ static int sh_dmae_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
610 if (!chan) 642 if (!chan)
611 return -EINVAL; 643 return -EINVAL;
612 644
645 spin_lock_bh(&sh_chan->desc_lock);
613 dmae_halt(sh_chan); 646 dmae_halt(sh_chan);
614 647
615 spin_lock_bh(&sh_chan->desc_lock);
616 if (!list_empty(&sh_chan->ld_queue)) { 648 if (!list_empty(&sh_chan->ld_queue)) {
617 /* Record partial transfer */ 649 /* Record partial transfer */
618 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next, 650 struct sh_desc *desc = list_entry(sh_chan->ld_queue.next,
@@ -706,6 +738,14 @@ static dma_async_tx_callback __ld_cleanup(struct sh_dmae_chan *sh_chan, bool all
706 list_move(&desc->node, &sh_chan->ld_free); 738 list_move(&desc->node, &sh_chan->ld_free);
707 } 739 }
708 } 740 }
741
742 if (all && !callback)
743 /*
744 * Terminating and the loop completed normally: forgive
745 * uncompleted cookies
746 */
747 sh_chan->completed_cookie = sh_chan->common.cookie;
748
709 spin_unlock_bh(&sh_chan->desc_lock); 749 spin_unlock_bh(&sh_chan->desc_lock);
710 750
711 if (callback) 751 if (callback)
@@ -723,10 +763,6 @@ static void sh_dmae_chan_ld_cleanup(struct sh_dmae_chan *sh_chan, bool all)
723{ 763{
724 while (__ld_cleanup(sh_chan, all)) 764 while (__ld_cleanup(sh_chan, all))
725 ; 765 ;
726
727 if (all)
728 /* Terminating - forgive uncompleted cookies */
729 sh_chan->completed_cookie = sh_chan->common.cookie;
730} 766}
731 767
732static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan) 768static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
@@ -740,7 +776,7 @@ static void sh_chan_xfer_ld_queue(struct sh_dmae_chan *sh_chan)
740 return; 776 return;
741 } 777 }
742 778
743 /* Find the first not transferred desciptor */ 779 /* Find the first not transferred descriptor */
744 list_for_each_entry(desc, &sh_chan->ld_queue, node) 780 list_for_each_entry(desc, &sh_chan->ld_queue, node)
745 if (desc->mark == DESC_SUBMITTED) { 781 if (desc->mark == DESC_SUBMITTED) {
746 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n", 782 dev_dbg(sh_chan->dev, "Queue #%d to %d: %u@%x -> %x\n",
@@ -772,8 +808,10 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
772 808
773 sh_dmae_chan_ld_cleanup(sh_chan, false); 809 sh_dmae_chan_ld_cleanup(sh_chan, false);
774 810
775 last_used = chan->cookie; 811 /* First read completed cookie to avoid a skew */
776 last_complete = sh_chan->completed_cookie; 812 last_complete = sh_chan->completed_cookie;
813 rmb();
814 last_used = chan->cookie;
777 BUG_ON(last_complete < 0); 815 BUG_ON(last_complete < 0);
778 dma_set_tx_state(txstate, last_complete, last_used, 0); 816 dma_set_tx_state(txstate, last_complete, last_used, 0);
779 817
@@ -803,8 +841,12 @@ static enum dma_status sh_dmae_tx_status(struct dma_chan *chan,
803static irqreturn_t sh_dmae_interrupt(int irq, void *data) 841static irqreturn_t sh_dmae_interrupt(int irq, void *data)
804{ 842{
805 irqreturn_t ret = IRQ_NONE; 843 irqreturn_t ret = IRQ_NONE;
806 struct sh_dmae_chan *sh_chan = (struct sh_dmae_chan *)data; 844 struct sh_dmae_chan *sh_chan = data;
807 u32 chcr = sh_dmae_readl(sh_chan, CHCR); 845 u32 chcr;
846
847 spin_lock(&sh_chan->desc_lock);
848
849 chcr = sh_dmae_readl(sh_chan, CHCR);
808 850
809 if (chcr & CHCR_TE) { 851 if (chcr & CHCR_TE) {
810 /* DMA stop */ 852 /* DMA stop */
@@ -814,13 +856,15 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data)
814 tasklet_schedule(&sh_chan->tasklet); 856 tasklet_schedule(&sh_chan->tasklet);
815 } 857 }
816 858
859 spin_unlock(&sh_chan->desc_lock);
860
817 return ret; 861 return ret;
818} 862}
819 863
820#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 864/* Called from error IRQ or NMI */
821static irqreturn_t sh_dmae_err(int irq, void *data) 865static bool sh_dmae_reset(struct sh_dmae_device *shdev)
822{ 866{
823 struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; 867 unsigned int handled = 0;
824 int i; 868 int i;
825 869
826 /* halt the dma controller */ 870 /* halt the dma controller */
@@ -829,25 +873,51 @@ static irqreturn_t sh_dmae_err(int irq, void *data)
829 /* We cannot detect, which channel caused the error, have to reset all */ 873 /* We cannot detect, which channel caused the error, have to reset all */
830 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { 874 for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) {
831 struct sh_dmae_chan *sh_chan = shdev->chan[i]; 875 struct sh_dmae_chan *sh_chan = shdev->chan[i];
832 if (sh_chan) { 876 struct sh_desc *desc;
833 struct sh_desc *desc; 877 LIST_HEAD(dl);
834 /* Stop the channel */ 878
835 dmae_halt(sh_chan); 879 if (!sh_chan)
836 /* Complete all */ 880 continue;
837 list_for_each_entry(desc, &sh_chan->ld_queue, node) { 881
838 struct dma_async_tx_descriptor *tx = &desc->async_tx; 882 spin_lock(&sh_chan->desc_lock);
839 desc->mark = DESC_IDLE; 883
840 if (tx->callback) 884 /* Stop the channel */
841 tx->callback(tx->callback_param); 885 dmae_halt(sh_chan);
842 } 886
843 list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); 887 list_splice_init(&sh_chan->ld_queue, &dl);
888
889 spin_unlock(&sh_chan->desc_lock);
890
891 /* Complete all */
892 list_for_each_entry(desc, &dl, node) {
893 struct dma_async_tx_descriptor *tx = &desc->async_tx;
894 desc->mark = DESC_IDLE;
895 if (tx->callback)
896 tx->callback(tx->callback_param);
844 } 897 }
898
899 spin_lock(&sh_chan->desc_lock);
900 list_splice(&dl, &sh_chan->ld_free);
901 spin_unlock(&sh_chan->desc_lock);
902
903 handled++;
845 } 904 }
905
846 sh_dmae_rst(shdev); 906 sh_dmae_rst(shdev);
847 907
908 return !!handled;
909}
910
911static irqreturn_t sh_dmae_err(int irq, void *data)
912{
913 struct sh_dmae_device *shdev = data;
914
915 if (!(dmaor_read(shdev) & DMAOR_AE))
916 return IRQ_NONE;
917
918 sh_dmae_reset(data);
848 return IRQ_HANDLED; 919 return IRQ_HANDLED;
849} 920}
850#endif
851 921
852static void dmae_do_tasklet(unsigned long data) 922static void dmae_do_tasklet(unsigned long data)
853{ 923{
@@ -876,6 +946,54 @@ static void dmae_do_tasklet(unsigned long data)
876 sh_dmae_chan_ld_cleanup(sh_chan, false); 946 sh_dmae_chan_ld_cleanup(sh_chan, false);
877} 947}
878 948
949static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev)
950{
951 /* Fast path out if NMIF is not asserted for this controller */
952 if ((dmaor_read(shdev) & DMAOR_NMIF) == 0)
953 return false;
954
955 return sh_dmae_reset(shdev);
956}
957
958static int sh_dmae_nmi_handler(struct notifier_block *self,
959 unsigned long cmd, void *data)
960{
961 struct sh_dmae_device *shdev;
962 int ret = NOTIFY_DONE;
963 bool triggered;
964
965 /*
966 * Only concern ourselves with NMI events.
967 *
968 * Normally we would check the die chain value, but as this needs
969 * to be architecture independent, check for NMI context instead.
970 */
971 if (!in_nmi())
972 return NOTIFY_DONE;
973
974 rcu_read_lock();
975 list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) {
976 /*
977 * Only stop if one of the controllers has NMIF asserted,
978 * we do not want to interfere with regular address error
979 * handling or NMI events that don't concern the DMACs.
980 */
981 triggered = sh_dmae_nmi_notify(shdev);
982 if (triggered == true)
983 ret = NOTIFY_OK;
984 }
985 rcu_read_unlock();
986
987 return ret;
988}
989
990static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
991 .notifier_call = sh_dmae_nmi_handler,
992
993 /* Run before NMI debug handler and KGDB */
994 .priority = 1,
995};
996
879static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, 997static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
880 int irq, unsigned long flags) 998 int irq, unsigned long flags)
881{ 999{
@@ -904,9 +1022,6 @@ static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id,
904 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet, 1022 tasklet_init(&new_sh_chan->tasklet, dmae_do_tasklet,
905 (unsigned long)new_sh_chan); 1023 (unsigned long)new_sh_chan);
906 1024
907 /* Init the channel */
908 dmae_init(new_sh_chan);
909
910 spin_lock_init(&new_sh_chan->desc_lock); 1025 spin_lock_init(&new_sh_chan->desc_lock);
911 1026
912 /* Init descripter manage list */ 1027 /* Init descripter manage list */
@@ -968,7 +1083,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
968 unsigned long irqflags = IRQF_DISABLED, 1083 unsigned long irqflags = IRQF_DISABLED,
969 chan_flag[SH_DMAC_MAX_CHANNELS] = {}; 1084 chan_flag[SH_DMAC_MAX_CHANNELS] = {};
970 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; 1085 int errirq, chan_irq[SH_DMAC_MAX_CHANNELS];
971 int err, i, irq_cnt = 0, irqres = 0; 1086 int err, i, irq_cnt = 0, irqres = 0, irq_cap = 0;
972 struct sh_dmae_device *shdev; 1087 struct sh_dmae_device *shdev;
973 struct resource *chan, *dmars, *errirq_res, *chanirq_res; 1088 struct resource *chan, *dmars, *errirq_res, *chanirq_res;
974 1089
@@ -977,7 +1092,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
977 return -ENODEV; 1092 return -ENODEV;
978 1093
979 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1094 chan = platform_get_resource(pdev, IORESOURCE_MEM, 0);
980 /* DMARS area is optional, if absent, this controller cannot do slave DMA */ 1095 /* DMARS area is optional */
981 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1096 dmars = platform_get_resource(pdev, IORESOURCE_MEM, 1);
982 /* 1097 /*
983 * IRQ resources: 1098 * IRQ resources:
@@ -1029,10 +1144,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1029 /* platform data */ 1144 /* platform data */
1030 shdev->pdata = pdata; 1145 shdev->pdata = pdata;
1031 1146
1147 platform_set_drvdata(pdev, shdev);
1148
1032 pm_runtime_enable(&pdev->dev); 1149 pm_runtime_enable(&pdev->dev);
1033 pm_runtime_get_sync(&pdev->dev); 1150 pm_runtime_get_sync(&pdev->dev);
1034 1151
1035 /* reset dma controller */ 1152 spin_lock_irq(&sh_dmae_lock);
1153 list_add_tail_rcu(&shdev->node, &sh_dmae_devices);
1154 spin_unlock_irq(&sh_dmae_lock);
1155
1156 /* reset dma controller - only needed as a test */
1036 err = sh_dmae_rst(shdev); 1157 err = sh_dmae_rst(shdev);
1037 if (err) 1158 if (err)
1038 goto rst_err; 1159 goto rst_err;
@@ -1040,7 +1161,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1040 INIT_LIST_HEAD(&shdev->common.channels); 1161 INIT_LIST_HEAD(&shdev->common.channels);
1041 1162
1042 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask); 1163 dma_cap_set(DMA_MEMCPY, shdev->common.cap_mask);
1043 if (dmars) 1164 if (pdata->slave && pdata->slave_num)
1044 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask); 1165 dma_cap_set(DMA_SLAVE, shdev->common.cap_mask);
1045 1166
1046 shdev->common.device_alloc_chan_resources 1167 shdev->common.device_alloc_chan_resources
@@ -1089,12 +1210,22 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1089 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) { 1210 !platform_get_resource(pdev, IORESOURCE_IRQ, 1)) {
1090 /* Special case - all multiplexed */ 1211 /* Special case - all multiplexed */
1091 for (; irq_cnt < pdata->channel_num; irq_cnt++) { 1212 for (; irq_cnt < pdata->channel_num; irq_cnt++) {
1092 chan_irq[irq_cnt] = chanirq_res->start; 1213 if (irq_cnt < SH_DMAC_MAX_CHANNELS) {
1093 chan_flag[irq_cnt] = IRQF_SHARED; 1214 chan_irq[irq_cnt] = chanirq_res->start;
1215 chan_flag[irq_cnt] = IRQF_SHARED;
1216 } else {
1217 irq_cap = 1;
1218 break;
1219 }
1094 } 1220 }
1095 } else { 1221 } else {
1096 do { 1222 do {
1097 for (i = chanirq_res->start; i <= chanirq_res->end; i++) { 1223 for (i = chanirq_res->start; i <= chanirq_res->end; i++) {
1224 if (irq_cnt >= SH_DMAC_MAX_CHANNELS) {
1225 irq_cap = 1;
1226 break;
1227 }
1228
1098 if ((errirq_res->flags & IORESOURCE_BITS) == 1229 if ((errirq_res->flags & IORESOURCE_BITS) ==
1099 IORESOURCE_IRQ_SHAREABLE) 1230 IORESOURCE_IRQ_SHAREABLE)
1100 chan_flag[irq_cnt] = IRQF_SHARED; 1231 chan_flag[irq_cnt] = IRQF_SHARED;
@@ -1105,41 +1236,55 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
1105 i, irq_cnt); 1236 i, irq_cnt);
1106 chan_irq[irq_cnt++] = i; 1237 chan_irq[irq_cnt++] = i;
1107 } 1238 }
1239
1240 if (irq_cnt >= SH_DMAC_MAX_CHANNELS)
1241 break;
1242
1108 chanirq_res = platform_get_resource(pdev, 1243 chanirq_res = platform_get_resource(pdev,
1109 IORESOURCE_IRQ, ++irqres); 1244 IORESOURCE_IRQ, ++irqres);
1110 } while (irq_cnt < pdata->channel_num && chanirq_res); 1245 } while (irq_cnt < pdata->channel_num && chanirq_res);
1111 } 1246 }
1112 1247
1113 if (irq_cnt < pdata->channel_num)
1114 goto eirqres;
1115
1116 /* Create DMA Channel */ 1248 /* Create DMA Channel */
1117 for (i = 0; i < pdata->channel_num; i++) { 1249 for (i = 0; i < irq_cnt; i++) {
1118 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]); 1250 err = sh_dmae_chan_probe(shdev, i, chan_irq[i], chan_flag[i]);
1119 if (err) 1251 if (err)
1120 goto chan_probe_err; 1252 goto chan_probe_err;
1121 } 1253 }
1122 1254
1255 if (irq_cap)
1256 dev_notice(&pdev->dev, "Attempting to register %d DMA "
1257 "channels when a maximum of %d are supported.\n",
1258 pdata->channel_num, SH_DMAC_MAX_CHANNELS);
1259
1123 pm_runtime_put(&pdev->dev); 1260 pm_runtime_put(&pdev->dev);
1124 1261
1125 platform_set_drvdata(pdev, shdev);
1126 dma_async_device_register(&shdev->common); 1262 dma_async_device_register(&shdev->common);
1127 1263
1128 return err; 1264 return err;
1129 1265
1130chan_probe_err: 1266chan_probe_err:
1131 sh_dmae_chan_remove(shdev); 1267 sh_dmae_chan_remove(shdev);
1132eirqres: 1268
1133#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 1269#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
1134 free_irq(errirq, shdev); 1270 free_irq(errirq, shdev);
1135eirq_err: 1271eirq_err:
1136#endif 1272#endif
1137rst_err: 1273rst_err:
1274 spin_lock_irq(&sh_dmae_lock);
1275 list_del_rcu(&shdev->node);
1276 spin_unlock_irq(&sh_dmae_lock);
1277
1138 pm_runtime_put(&pdev->dev); 1278 pm_runtime_put(&pdev->dev);
1279 pm_runtime_disable(&pdev->dev);
1280
1139 if (dmars) 1281 if (dmars)
1140 iounmap(shdev->dmars); 1282 iounmap(shdev->dmars);
1283
1284 platform_set_drvdata(pdev, NULL);
1141emapdmars: 1285emapdmars:
1142 iounmap(shdev->chan_reg); 1286 iounmap(shdev->chan_reg);
1287 synchronize_rcu();
1143emapchan: 1288emapchan:
1144 kfree(shdev); 1289 kfree(shdev);
1145ealloc: 1290ealloc:
@@ -1162,6 +1307,10 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1162 if (errirq > 0) 1307 if (errirq > 0)
1163 free_irq(errirq, shdev); 1308 free_irq(errirq, shdev);
1164 1309
1310 spin_lock_irq(&sh_dmae_lock);
1311 list_del_rcu(&shdev->node);
1312 spin_unlock_irq(&sh_dmae_lock);
1313
1165 /* channel data remove */ 1314 /* channel data remove */
1166 sh_dmae_chan_remove(shdev); 1315 sh_dmae_chan_remove(shdev);
1167 1316
@@ -1171,6 +1320,9 @@ static int __exit sh_dmae_remove(struct platform_device *pdev)
1171 iounmap(shdev->dmars); 1320 iounmap(shdev->dmars);
1172 iounmap(shdev->chan_reg); 1321 iounmap(shdev->chan_reg);
1173 1322
1323 platform_set_drvdata(pdev, NULL);
1324
1325 synchronize_rcu();
1174 kfree(shdev); 1326 kfree(shdev);
1175 1327
1176 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1328 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1189,17 +1341,88 @@ static void sh_dmae_shutdown(struct platform_device *pdev)
1189 sh_dmae_ctl_stop(shdev); 1341 sh_dmae_ctl_stop(shdev);
1190} 1342}
1191 1343
1344static int sh_dmae_runtime_suspend(struct device *dev)
1345{
1346 return 0;
1347}
1348
1349static int sh_dmae_runtime_resume(struct device *dev)
1350{
1351 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1352
1353 return sh_dmae_rst(shdev);
1354}
1355
1356#ifdef CONFIG_PM
1357static int sh_dmae_suspend(struct device *dev)
1358{
1359 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1360 int i;
1361
1362 for (i = 0; i < shdev->pdata->channel_num; i++) {
1363 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1364 if (sh_chan->descs_allocated)
1365 sh_chan->pm_error = pm_runtime_put_sync(dev);
1366 }
1367
1368 return 0;
1369}
1370
1371static int sh_dmae_resume(struct device *dev)
1372{
1373 struct sh_dmae_device *shdev = dev_get_drvdata(dev);
1374 int i;
1375
1376 for (i = 0; i < shdev->pdata->channel_num; i++) {
1377 struct sh_dmae_chan *sh_chan = shdev->chan[i];
1378 struct sh_dmae_slave *param = sh_chan->common.private;
1379
1380 if (!sh_chan->descs_allocated)
1381 continue;
1382
1383 if (!sh_chan->pm_error)
1384 pm_runtime_get_sync(dev);
1385
1386 if (param) {
1387 const struct sh_dmae_slave_config *cfg = param->config;
1388 dmae_set_dmars(sh_chan, cfg->mid_rid);
1389 dmae_set_chcr(sh_chan, cfg->chcr);
1390 } else {
1391 dmae_init(sh_chan);
1392 }
1393 }
1394
1395 return 0;
1396}
1397#else
1398#define sh_dmae_suspend NULL
1399#define sh_dmae_resume NULL
1400#endif
1401
1402const struct dev_pm_ops sh_dmae_pm = {
1403 .suspend = sh_dmae_suspend,
1404 .resume = sh_dmae_resume,
1405 .runtime_suspend = sh_dmae_runtime_suspend,
1406 .runtime_resume = sh_dmae_runtime_resume,
1407};
1408
1192static struct platform_driver sh_dmae_driver = { 1409static struct platform_driver sh_dmae_driver = {
1193 .remove = __exit_p(sh_dmae_remove), 1410 .remove = __exit_p(sh_dmae_remove),
1194 .shutdown = sh_dmae_shutdown, 1411 .shutdown = sh_dmae_shutdown,
1195 .driver = { 1412 .driver = {
1196 .owner = THIS_MODULE, 1413 .owner = THIS_MODULE,
1197 .name = "sh-dma-engine", 1414 .name = "sh-dma-engine",
1415 .pm = &sh_dmae_pm,
1198 }, 1416 },
1199}; 1417};
1200 1418
1201static int __init sh_dmae_init(void) 1419static int __init sh_dmae_init(void)
1202{ 1420{
1421 /* Wire up NMI handling */
1422 int err = register_die_notifier(&sh_dmae_nmi_notifier);
1423 if (err)
1424 return err;
1425
1203 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe); 1426 return platform_driver_probe(&sh_dmae_driver, sh_dmae_probe);
1204} 1427}
1205module_init(sh_dmae_init); 1428module_init(sh_dmae_init);
@@ -1207,9 +1430,12 @@ module_init(sh_dmae_init);
1207static void __exit sh_dmae_exit(void) 1430static void __exit sh_dmae_exit(void)
1208{ 1431{
1209 platform_driver_unregister(&sh_dmae_driver); 1432 platform_driver_unregister(&sh_dmae_driver);
1433
1434 unregister_die_notifier(&sh_dmae_nmi_notifier);
1210} 1435}
1211module_exit(sh_dmae_exit); 1436module_exit(sh_dmae_exit);
1212 1437
1213MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>"); 1438MODULE_AUTHOR("Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>");
1214MODULE_DESCRIPTION("Renesas SH DMA Engine driver"); 1439MODULE_DESCRIPTION("Renesas SH DMA Engine driver");
1215MODULE_LICENSE("GPL"); 1440MODULE_LICENSE("GPL");
1441MODULE_ALIAS("platform:sh-dma-engine");