aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorHauke Mehrtens <hauke@hauke-m.de>2016-12-30 06:18:27 -0500
committerRalf Baechle <ralf@linux-mips.org>2017-01-24 20:51:12 -0500
commit98e58b01e1671e5784c67972ec1b58a8997f784e (patch)
tree60198b478b7668cc2fcb944ad3eb53d04df8318a
parentb3d91db3f71d5f70ea60d900425a3f96aeb3d065 (diff)
MIPS: Lantiq: Lock DMA register accesses for SMP
The DMA controller channel and port configuration is changed by selecting the port or channel in one register and then update the configuration in other registers. This has to be done in an atomic operation. Previously only the local interrupts were deactivated which works for single CPU systems. If the system supports SMP a better locking is needed, use spinlocks instead. On more recent SoCs (at least xrx200 and later) there are two memory regions to change the configuration, there we could use one area for each CPU and do not have to synchronize between the CPUs and more. Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> Cc: john@phrozen.org Cc: linux-mips@linux-mips.org Patchwork: https://patchwork.linux-mips.org/patch/14912/ Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
-rw-r--r--arch/mips/lantiq/xway/dma.c38
1 files changed, 20 insertions, 18 deletions
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index cef811755123..a4ec07bf126a 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -20,6 +20,7 @@
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/module.h> 22#include <linux/module.h>
23#include <linux/spinlock.h>
23#include <linux/clk.h> 24#include <linux/clk.h>
24#include <linux/err.h> 25#include <linux/err.h>
25 26
@@ -59,16 +60,17 @@
59 ltq_dma_membase + (z)) 60 ltq_dma_membase + (z))
60 61
61static void __iomem *ltq_dma_membase; 62static void __iomem *ltq_dma_membase;
63static DEFINE_SPINLOCK(ltq_dma_lock);
62 64
63void 65void
64ltq_dma_enable_irq(struct ltq_dma_channel *ch) 66ltq_dma_enable_irq(struct ltq_dma_channel *ch)
65{ 67{
66 unsigned long flags; 68 unsigned long flags;
67 69
68 local_irq_save(flags); 70 spin_lock_irqsave(&ltq_dma_lock, flags);
69 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 71 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
70 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 72 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
71 local_irq_restore(flags); 73 spin_unlock_irqrestore(&ltq_dma_lock, flags);
72} 74}
73EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); 75EXPORT_SYMBOL_GPL(ltq_dma_enable_irq);
74 76
@@ -77,10 +79,10 @@ ltq_dma_disable_irq(struct ltq_dma_channel *ch)
77{ 79{
78 unsigned long flags; 80 unsigned long flags;
79 81
80 local_irq_save(flags); 82 spin_lock_irqsave(&ltq_dma_lock, flags);
81 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 83 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
82 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); 84 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
83 local_irq_restore(flags); 85 spin_unlock_irqrestore(&ltq_dma_lock, flags);
84} 86}
85EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); 87EXPORT_SYMBOL_GPL(ltq_dma_disable_irq);
86 88
@@ -89,10 +91,10 @@ ltq_dma_ack_irq(struct ltq_dma_channel *ch)
89{ 91{
90 unsigned long flags; 92 unsigned long flags;
91 93
92 local_irq_save(flags); 94 spin_lock_irqsave(&ltq_dma_lock, flags);
93 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 95 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
94 ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); 96 ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS);
95 local_irq_restore(flags); 97 spin_unlock_irqrestore(&ltq_dma_lock, flags);
96} 98}
97EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); 99EXPORT_SYMBOL_GPL(ltq_dma_ack_irq);
98 100
@@ -101,11 +103,11 @@ ltq_dma_open(struct ltq_dma_channel *ch)
101{ 103{
102 unsigned long flag; 104 unsigned long flag;
103 105
104 local_irq_save(flag); 106 spin_lock_irqsave(&ltq_dma_lock, flag);
105 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 107 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
106 ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); 108 ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL);
107 ltq_dma_enable_irq(ch); 109 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
108 local_irq_restore(flag); 110 spin_unlock_irqrestore(&ltq_dma_lock, flag);
109} 111}
110EXPORT_SYMBOL_GPL(ltq_dma_open); 112EXPORT_SYMBOL_GPL(ltq_dma_open);
111 113
@@ -114,11 +116,11 @@ ltq_dma_close(struct ltq_dma_channel *ch)
114{ 116{
115 unsigned long flag; 117 unsigned long flag;
116 118
117 local_irq_save(flag); 119 spin_lock_irqsave(&ltq_dma_lock, flag);
118 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 120 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
119 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 121 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
120 ltq_dma_disable_irq(ch); 122 ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN);
121 local_irq_restore(flag); 123 spin_unlock_irqrestore(&ltq_dma_lock, flag);
122} 124}
123EXPORT_SYMBOL_GPL(ltq_dma_close); 125EXPORT_SYMBOL_GPL(ltq_dma_close);
124 126
@@ -133,7 +135,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
133 &ch->phys, GFP_ATOMIC); 135 &ch->phys, GFP_ATOMIC);
134 memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); 136 memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE);
135 137
136 local_irq_save(flags); 138 spin_lock_irqsave(&ltq_dma_lock, flags);
137 ltq_dma_w32(ch->nr, LTQ_DMA_CS); 139 ltq_dma_w32(ch->nr, LTQ_DMA_CS);
138 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); 140 ltq_dma_w32(ch->phys, LTQ_DMA_CDBA);
139 ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); 141 ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN);
@@ -142,7 +144,7 @@ ltq_dma_alloc(struct ltq_dma_channel *ch)
142 ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); 144 ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL);
143 while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) 145 while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST)
144 ; 146 ;
145 local_irq_restore(flags); 147 spin_unlock_irqrestore(&ltq_dma_lock, flags);
146} 148}
147 149
148void 150void
@@ -152,11 +154,11 @@ ltq_dma_alloc_tx(struct ltq_dma_channel *ch)
152 154
153 ltq_dma_alloc(ch); 155 ltq_dma_alloc(ch);
154 156
155 local_irq_save(flags); 157 spin_lock_irqsave(&ltq_dma_lock, flags);
156 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 158 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
157 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 159 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
158 ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); 160 ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL);
159 local_irq_restore(flags); 161 spin_unlock_irqrestore(&ltq_dma_lock, flags);
160} 162}
161EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); 163EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx);
162 164
@@ -167,11 +169,11 @@ ltq_dma_alloc_rx(struct ltq_dma_channel *ch)
167 169
168 ltq_dma_alloc(ch); 170 ltq_dma_alloc(ch);
169 171
170 local_irq_save(flags); 172 spin_lock_irqsave(&ltq_dma_lock, flags);
171 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); 173 ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE);
172 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); 174 ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN);
173 ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); 175 ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL);
174 local_irq_restore(flags); 176 spin_unlock_irqrestore(&ltq_dma_lock, flags);
175} 177}
176EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); 178EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx);
177 179