diff options
author | Paul Mundt <lethal@linux-sh.org> | 2010-12-17 05:16:10 -0500 |
---|---|---|
committer | Paul Mundt <lethal@linux-sh.org> | 2010-12-17 05:16:10 -0500 |
commit | 03aa18f550900855c1d3d17ac83c14a3d668d344 (patch) | |
tree | 6aab2e924e1c11a931fa6e491215e7f6b45b343a | |
parent | 76496f8f2e104b8bb08db09c063a6817d18829a6 (diff) |
dma: shdma: NMI support.
Presently DMA transfers are interrupted and aborted by the NMI. This
implements some basic logic for more gracefully handling and clearing
each controller's NMIF flag via the NMI die chain, needed to resume
transfers post-NMI.
Reported-by: Michael Szafranek <Michael.Szafranek@emtrion.de>
Signed-off-by: Paul Mundt <lethal@linux-sh.org>
-rw-r--r-- | drivers/dma/shdma.c | 132 | ||||
-rw-r--r-- | drivers/dma/shdma.h | 1 |
2 files changed, 115 insertions, 18 deletions
diff --git a/drivers/dma/shdma.c b/drivers/dma/shdma.c index 85ffd5e38c50..a0069c171518 100644 --- a/drivers/dma/shdma.c +++ b/drivers/dma/shdma.c | |||
@@ -27,7 +27,10 @@ | |||
27 | #include <linux/platform_device.h> | 27 | #include <linux/platform_device.h> |
28 | #include <linux/pm_runtime.h> | 28 | #include <linux/pm_runtime.h> |
29 | #include <linux/sh_dma.h> | 29 | #include <linux/sh_dma.h> |
30 | 30 | #include <linux/notifier.h> | |
31 | #include <linux/kdebug.h> | ||
32 | #include <linux/spinlock.h> | ||
33 | #include <linux/rculist.h> | ||
31 | #include "shdma.h" | 34 | #include "shdma.h" |
32 | 35 | ||
33 | /* DMA descriptor control */ | 36 | /* DMA descriptor control */ |
@@ -43,6 +46,13 @@ enum sh_dmae_desc_status { | |||
43 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ | 46 | /* Default MEMCPY transfer size = 2^2 = 4 bytes */ |
44 | #define LOG2_DEFAULT_XFER_SIZE 2 | 47 | #define LOG2_DEFAULT_XFER_SIZE 2 |
45 | 48 | ||
49 | /* | ||
50 | * Used for write-side mutual exclusion for the global device list, | ||
51 | * read-side synchronization by way of RCU. | ||
52 | */ | ||
53 | static DEFINE_SPINLOCK(sh_dmae_lock); | ||
54 | static LIST_HEAD(sh_dmae_devices); | ||
55 | |||
46 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ | 56 | /* A bitmask with bits enough for enum sh_dmae_slave_chan_id */ |
47 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; | 57 | static unsigned long sh_dmae_slave_used[BITS_TO_LONGS(SH_DMA_SLAVE_NUMBER)]; |
48 | 58 | ||
@@ -817,10 +827,9 @@ static irqreturn_t sh_dmae_interrupt(int irq, void *data) | |||
817 | return ret; | 827 | return ret; |
818 | } | 828 | } |
819 | 829 | ||
820 | #if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) | 830 | static unsigned int sh_dmae_reset(struct sh_dmae_device *shdev) |
821 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
822 | { | 831 | { |
823 | struct sh_dmae_device *shdev = (struct sh_dmae_device *)data; | 832 | unsigned int handled = 0; |
824 | int i; | 833 | int i; |
825 | 834 | ||
826 | /* halt the dma controller */ | 835 | /* halt the dma controller */ |
@@ -829,25 +838,35 @@ static irqreturn_t sh_dmae_err(int irq, void *data) | |||
829 | /* We cannot detect, which channel caused the error, have to reset all */ | 838 | /* We cannot detect, which channel caused the error, have to reset all */ |
830 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { | 839 | for (i = 0; i < SH_DMAC_MAX_CHANNELS; i++) { |
831 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; | 840 | struct sh_dmae_chan *sh_chan = shdev->chan[i]; |
832 | if (sh_chan) { | 841 | struct sh_desc *desc; |
833 | struct sh_desc *desc; | 842 | |
834 | /* Stop the channel */ | 843 | if (!sh_chan) |
835 | dmae_halt(sh_chan); | 844 | continue; |
836 | /* Complete all */ | 845 | |
837 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { | 846 | /* Stop the channel */ |
838 | struct dma_async_tx_descriptor *tx = &desc->async_tx; | 847 | dmae_halt(sh_chan); |
839 | desc->mark = DESC_IDLE; | 848 | |
840 | if (tx->callback) | 849 | /* Complete all */ |
841 | tx->callback(tx->callback_param); | 850 | list_for_each_entry(desc, &sh_chan->ld_queue, node) { |
842 | } | 851 | struct dma_async_tx_descriptor *tx = &desc->async_tx; |
843 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | 852 | desc->mark = DESC_IDLE; |
853 | if (tx->callback) | ||
854 | tx->callback(tx->callback_param); | ||
844 | } | 855 | } |
856 | |||
857 | list_splice_init(&sh_chan->ld_queue, &sh_chan->ld_free); | ||
858 | handled++; | ||
845 | } | 859 | } |
860 | |||
846 | sh_dmae_rst(shdev); | 861 | sh_dmae_rst(shdev); |
847 | 862 | ||
848 | return IRQ_HANDLED; | 863 | return !!handled; |
864 | } | ||
865 | |||
866 | static irqreturn_t sh_dmae_err(int irq, void *data) | ||
867 | { | ||
868 | return IRQ_RETVAL(sh_dmae_reset(data)); | ||
849 | } | 869 | } |
850 | #endif | ||
851 | 870 | ||
852 | static void dmae_do_tasklet(unsigned long data) | 871 | static void dmae_do_tasklet(unsigned long data) |
853 | { | 872 | { |
@@ -876,6 +895,60 @@ static void dmae_do_tasklet(unsigned long data) | |||
876 | sh_dmae_chan_ld_cleanup(sh_chan, false); | 895 | sh_dmae_chan_ld_cleanup(sh_chan, false); |
877 | } | 896 | } |
878 | 897 | ||
898 | static bool sh_dmae_nmi_notify(struct sh_dmae_device *shdev) | ||
899 | { | ||
900 | unsigned int handled; | ||
901 | |||
902 | /* Fast path out if NMIF is not asserted for this controller */ | ||
903 | if ((dmaor_read(shdev) & DMAOR_NMIF) == 0) | ||
904 | return false; | ||
905 | |||
906 | handled = sh_dmae_reset(shdev); | ||
907 | if (handled) | ||
908 | return true; | ||
909 | |||
910 | return false; | ||
911 | } | ||
912 | |||
913 | static int sh_dmae_nmi_handler(struct notifier_block *self, | ||
914 | unsigned long cmd, void *data) | ||
915 | { | ||
916 | struct sh_dmae_device *shdev; | ||
917 | int ret = NOTIFY_DONE; | ||
918 | bool triggered; | ||
919 | |||
920 | /* | ||
921 | * Only concern ourselves with NMI events. | ||
922 | * | ||
923 | * Normally we would check the die chain value, but as this needs | ||
924 | * to be architecture independent, check for NMI context instead. | ||
925 | */ | ||
926 | if (!in_nmi()) | ||
927 | return NOTIFY_DONE; | ||
928 | |||
929 | rcu_read_lock(); | ||
930 | list_for_each_entry_rcu(shdev, &sh_dmae_devices, node) { | ||
931 | /* | ||
932 | * Only stop if one of the controllers has NMIF asserted, | ||
933 | * we do not want to interfere with regular address error | ||
934 | * handling or NMI events that don't concern the DMACs. | ||
935 | */ | ||
936 | triggered = sh_dmae_nmi_notify(shdev); | ||
937 | if (triggered == true) | ||
938 | ret = NOTIFY_OK; | ||
939 | } | ||
940 | rcu_read_unlock(); | ||
941 | |||
942 | return ret; | ||
943 | } | ||
944 | |||
945 | static struct notifier_block sh_dmae_nmi_notifier __read_mostly = { | ||
946 | .notifier_call = sh_dmae_nmi_handler, | ||
947 | |||
948 | /* Run before NMI debug handler and KGDB */ | ||
949 | .priority = 1, | ||
950 | }; | ||
951 | |||
879 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, | 952 | static int __devinit sh_dmae_chan_probe(struct sh_dmae_device *shdev, int id, |
880 | int irq, unsigned long flags) | 953 | int irq, unsigned long flags) |
881 | { | 954 | { |
@@ -967,6 +1040,7 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
967 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; | 1040 | struct sh_dmae_pdata *pdata = pdev->dev.platform_data; |
968 | unsigned long irqflags = IRQF_DISABLED, | 1041 | unsigned long irqflags = IRQF_DISABLED, |
969 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; | 1042 | chan_flag[SH_DMAC_MAX_CHANNELS] = {}; |
1043 | unsigned long flags; | ||
970 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; | 1044 | int errirq, chan_irq[SH_DMAC_MAX_CHANNELS]; |
971 | int err, i, irq_cnt = 0, irqres = 0; | 1045 | int err, i, irq_cnt = 0, irqres = 0; |
972 | struct sh_dmae_device *shdev; | 1046 | struct sh_dmae_device *shdev; |
@@ -1032,6 +1106,15 @@ static int __init sh_dmae_probe(struct platform_device *pdev) | |||
1032 | pm_runtime_enable(&pdev->dev); | 1106 | pm_runtime_enable(&pdev->dev); |
1033 | pm_runtime_get_sync(&pdev->dev); | 1107 | pm_runtime_get_sync(&pdev->dev); |
1034 | 1108 | ||
1109 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1110 | list_add_tail_rcu(&shdev->node, &sh_dmae_devices); | ||
1111 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1112 | |||
1113 | /* Wire up NMI handling before bringing the controller online */ | ||
1114 | err = register_die_notifier(&sh_dmae_nmi_notifier); | ||
1115 | if (err) | ||
1116 | goto notifier_err; | ||
1117 | |||
1035 | /* reset dma controller */ | 1118 | /* reset dma controller */ |
1036 | err = sh_dmae_rst(shdev); | 1119 | err = sh_dmae_rst(shdev); |
1037 | if (err) | 1120 | if (err) |
@@ -1135,6 +1218,12 @@ eirqres: | |||
1135 | eirq_err: | 1218 | eirq_err: |
1136 | #endif | 1219 | #endif |
1137 | rst_err: | 1220 | rst_err: |
1221 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
1222 | notifier_err: | ||
1223 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1224 | list_del_rcu(&shdev->node); | ||
1225 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1226 | |||
1138 | pm_runtime_put(&pdev->dev); | 1227 | pm_runtime_put(&pdev->dev); |
1139 | if (dmars) | 1228 | if (dmars) |
1140 | iounmap(shdev->dmars); | 1229 | iounmap(shdev->dmars); |
@@ -1155,6 +1244,7 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1155 | { | 1244 | { |
1156 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); | 1245 | struct sh_dmae_device *shdev = platform_get_drvdata(pdev); |
1157 | struct resource *res; | 1246 | struct resource *res; |
1247 | unsigned long flags; | ||
1158 | int errirq = platform_get_irq(pdev, 0); | 1248 | int errirq = platform_get_irq(pdev, 0); |
1159 | 1249 | ||
1160 | dma_async_device_unregister(&shdev->common); | 1250 | dma_async_device_unregister(&shdev->common); |
@@ -1162,6 +1252,12 @@ static int __exit sh_dmae_remove(struct platform_device *pdev) | |||
1162 | if (errirq > 0) | 1252 | if (errirq > 0) |
1163 | free_irq(errirq, shdev); | 1253 | free_irq(errirq, shdev); |
1164 | 1254 | ||
1255 | unregister_die_notifier(&sh_dmae_nmi_notifier); | ||
1256 | |||
1257 | spin_lock_irqsave(&sh_dmae_lock, flags); | ||
1258 | list_del_rcu(&shdev->node); | ||
1259 | spin_unlock_irqrestore(&sh_dmae_lock, flags); | ||
1260 | |||
1165 | /* channel data remove */ | 1261 | /* channel data remove */ |
1166 | sh_dmae_chan_remove(shdev); | 1262 | sh_dmae_chan_remove(shdev); |
1167 | 1263 | ||
diff --git a/drivers/dma/shdma.h b/drivers/dma/shdma.h index 4021275a0a43..52e4fb173805 100644 --- a/drivers/dma/shdma.h +++ b/drivers/dma/shdma.h | |||
@@ -43,6 +43,7 @@ struct sh_dmae_device { | |||
43 | struct dma_device common; | 43 | struct dma_device common; |
44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; | 44 | struct sh_dmae_chan *chan[SH_DMAC_MAX_CHANNELS]; |
45 | struct sh_dmae_pdata *pdata; | 45 | struct sh_dmae_pdata *pdata; |
46 | struct list_head node; | ||
46 | u32 __iomem *chan_reg; | 47 | u32 __iomem *chan_reg; |
47 | u16 __iomem *dmars; | 48 | u16 __iomem *dmars; |
48 | }; | 49 | }; |