aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorThomas Petazzoni <thomas.petazzoni@free-electrons.com>2013-07-29 11:42:13 -0400
committerDan Williams <djbw@fb.com>2013-08-23 01:57:36 -0400
commit5733c38ae3473115ac7df3fe19bd2502149d8c51 (patch)
tree6c1c6a5d5f2d7c07ec9df5719d5f9edb59aa36f7
parentad5278cd8d4b12e14a9a00fa7443a7a239ae2219 (diff)
mv_xor: use {readl, writel}_relaxed instead of __raw_{readl, writel}
In order to support big-endian execution, the mv_xor driver is changed to use the readl_relaxed() and writel_relaxed() accessors that properly convert from the CPU endianess to the device endianess (which in the case of Marvell XOR hardware is always little-endian). Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> Signed-off-by: Dan Williams <djbw@fb.com>
-rw-r--r--drivers/dma/mv_xor.c36
1 files changed, 18 insertions, 18 deletions
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index 200f1a3c9a44..c026b27f76e1 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -114,25 +114,25 @@ static void mv_desc_set_src_addr(struct mv_xor_desc_slot *desc,
114 114
115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan) 115static u32 mv_chan_get_current_desc(struct mv_xor_chan *chan)
116{ 116{
117 return __raw_readl(XOR_CURR_DESC(chan)); 117 return readl_relaxed(XOR_CURR_DESC(chan));
118} 118}
119 119
120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan, 120static void mv_chan_set_next_descriptor(struct mv_xor_chan *chan,
121 u32 next_desc_addr) 121 u32 next_desc_addr)
122{ 122{
123 __raw_writel(next_desc_addr, XOR_NEXT_DESC(chan)); 123 writel_relaxed(next_desc_addr, XOR_NEXT_DESC(chan));
124} 124}
125 125
126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan) 126static void mv_chan_unmask_interrupts(struct mv_xor_chan *chan)
127{ 127{
128 u32 val = __raw_readl(XOR_INTR_MASK(chan)); 128 u32 val = readl_relaxed(XOR_INTR_MASK(chan));
129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16); 129 val |= XOR_INTR_MASK_VALUE << (chan->idx * 16);
130 __raw_writel(val, XOR_INTR_MASK(chan)); 130 writel_relaxed(val, XOR_INTR_MASK(chan));
131} 131}
132 132
133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan) 133static u32 mv_chan_get_intr_cause(struct mv_xor_chan *chan)
134{ 134{
135 u32 intr_cause = __raw_readl(XOR_INTR_CAUSE(chan)); 135 u32 intr_cause = readl_relaxed(XOR_INTR_CAUSE(chan));
136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF; 136 intr_cause = (intr_cause >> (chan->idx * 16)) & 0xFFFF;
137 return intr_cause; 137 return intr_cause;
138} 138}
@@ -149,13 +149,13 @@ static void mv_xor_device_clear_eoc_cause(struct mv_xor_chan *chan)
149{ 149{
150 u32 val = ~(1 << (chan->idx * 16)); 150 u32 val = ~(1 << (chan->idx * 16));
151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val); 151 dev_dbg(mv_chan_to_devp(chan), "%s, val 0x%08x\n", __func__, val);
152 __raw_writel(val, XOR_INTR_CAUSE(chan)); 152 writel_relaxed(val, XOR_INTR_CAUSE(chan));
153} 153}
154 154
155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan) 155static void mv_xor_device_clear_err_status(struct mv_xor_chan *chan)
156{ 156{
157 u32 val = 0xFFFF0000 >> (chan->idx * 16); 157 u32 val = 0xFFFF0000 >> (chan->idx * 16);
158 __raw_writel(val, XOR_INTR_CAUSE(chan)); 158 writel_relaxed(val, XOR_INTR_CAUSE(chan));
159} 159}
160 160
161static int mv_can_chain(struct mv_xor_desc_slot *desc) 161static int mv_can_chain(struct mv_xor_desc_slot *desc)
@@ -173,7 +173,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
173 enum dma_transaction_type type) 173 enum dma_transaction_type type)
174{ 174{
175 u32 op_mode; 175 u32 op_mode;
176 u32 config = __raw_readl(XOR_CONFIG(chan)); 176 u32 config = readl_relaxed(XOR_CONFIG(chan));
177 177
178 switch (type) { 178 switch (type) {
179 case DMA_XOR: 179 case DMA_XOR:
@@ -192,7 +192,7 @@ static void mv_set_mode(struct mv_xor_chan *chan,
192 192
193 config &= ~0x7; 193 config &= ~0x7;
194 config |= op_mode; 194 config |= op_mode;
195 __raw_writel(config, XOR_CONFIG(chan)); 195 writel_relaxed(config, XOR_CONFIG(chan));
196 chan->current_type = type; 196 chan->current_type = type;
197} 197}
198 198
@@ -201,14 +201,14 @@ static void mv_chan_activate(struct mv_xor_chan *chan)
201 u32 activation; 201 u32 activation;
202 202
203 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n"); 203 dev_dbg(mv_chan_to_devp(chan), " activate chan.\n");
204 activation = __raw_readl(XOR_ACTIVATION(chan)); 204 activation = readl_relaxed(XOR_ACTIVATION(chan));
205 activation |= 0x1; 205 activation |= 0x1;
206 __raw_writel(activation, XOR_ACTIVATION(chan)); 206 writel_relaxed(activation, XOR_ACTIVATION(chan));
207} 207}
208 208
209static char mv_chan_is_busy(struct mv_xor_chan *chan) 209static char mv_chan_is_busy(struct mv_xor_chan *chan)
210{ 210{
211 u32 state = __raw_readl(XOR_ACTIVATION(chan)); 211 u32 state = readl_relaxed(XOR_ACTIVATION(chan));
212 212
213 state = (state >> 4) & 0x3; 213 state = (state >> 4) & 0x3;
214 214
@@ -755,22 +755,22 @@ static void mv_dump_xor_regs(struct mv_xor_chan *chan)
755{ 755{
756 u32 val; 756 u32 val;
757 757
758 val = __raw_readl(XOR_CONFIG(chan)); 758 val = readl_relaxed(XOR_CONFIG(chan));
759 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val); 759 dev_err(mv_chan_to_devp(chan), "config 0x%08x\n", val);
760 760
761 val = __raw_readl(XOR_ACTIVATION(chan)); 761 val = readl_relaxed(XOR_ACTIVATION(chan));
762 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val); 762 dev_err(mv_chan_to_devp(chan), "activation 0x%08x\n", val);
763 763
764 val = __raw_readl(XOR_INTR_CAUSE(chan)); 764 val = readl_relaxed(XOR_INTR_CAUSE(chan));
765 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val); 765 dev_err(mv_chan_to_devp(chan), "intr cause 0x%08x\n", val);
766 766
767 val = __raw_readl(XOR_INTR_MASK(chan)); 767 val = readl_relaxed(XOR_INTR_MASK(chan));
768 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val); 768 dev_err(mv_chan_to_devp(chan), "intr mask 0x%08x\n", val);
769 769
770 val = __raw_readl(XOR_ERROR_CAUSE(chan)); 770 val = readl_relaxed(XOR_ERROR_CAUSE(chan));
771 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val); 771 dev_err(mv_chan_to_devp(chan), "error cause 0x%08x\n", val);
772 772
773 val = __raw_readl(XOR_ERROR_ADDR(chan)); 773 val = readl_relaxed(XOR_ERROR_ADDR(chan));
774 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val); 774 dev_err(mv_chan_to_devp(chan), "error addr 0x%08x\n", val);
775} 775}
776 776