diff options
Diffstat (limited to 'drivers/net/ethernet/altera/altera_sgdma.c')
-rw-r--r-- | drivers/net/ethernet/altera/altera_sgdma.c | 181 |
1 files changed, 90 insertions, 91 deletions
diff --git a/drivers/net/ethernet/altera/altera_sgdma.c b/drivers/net/ethernet/altera/altera_sgdma.c index 9ce8630692b6..99cc56f451cf 100644 --- a/drivers/net/ethernet/altera/altera_sgdma.c +++ b/drivers/net/ethernet/altera/altera_sgdma.c | |||
@@ -20,8 +20,8 @@ | |||
20 | #include "altera_sgdmahw.h" | 20 | #include "altera_sgdmahw.h" |
21 | #include "altera_sgdma.h" | 21 | #include "altera_sgdma.h" |
22 | 22 | ||
23 | static void sgdma_setup_descrip(struct sgdma_descrip *desc, | 23 | static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, |
24 | struct sgdma_descrip *ndesc, | 24 | struct sgdma_descrip __iomem *ndesc, |
25 | dma_addr_t ndesc_phys, | 25 | dma_addr_t ndesc_phys, |
26 | dma_addr_t raddr, | 26 | dma_addr_t raddr, |
27 | dma_addr_t waddr, | 27 | dma_addr_t waddr, |
@@ -31,17 +31,17 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
31 | int wfixed); | 31 | int wfixed); |
32 | 32 | ||
33 | static int sgdma_async_write(struct altera_tse_private *priv, | 33 | static int sgdma_async_write(struct altera_tse_private *priv, |
34 | struct sgdma_descrip *desc); | 34 | struct sgdma_descrip __iomem *desc); |
35 | 35 | ||
36 | static int sgdma_async_read(struct altera_tse_private *priv); | 36 | static int sgdma_async_read(struct altera_tse_private *priv); |
37 | 37 | ||
38 | static dma_addr_t | 38 | static dma_addr_t |
39 | sgdma_txphysaddr(struct altera_tse_private *priv, | 39 | sgdma_txphysaddr(struct altera_tse_private *priv, |
40 | struct sgdma_descrip *desc); | 40 | struct sgdma_descrip __iomem *desc); |
41 | 41 | ||
42 | static dma_addr_t | 42 | static dma_addr_t |
43 | sgdma_rxphysaddr(struct altera_tse_private *priv, | 43 | sgdma_rxphysaddr(struct altera_tse_private *priv, |
44 | struct sgdma_descrip *desc); | 44 | struct sgdma_descrip __iomem *desc); |
45 | 45 | ||
46 | static int sgdma_txbusy(struct altera_tse_private *priv); | 46 | static int sgdma_txbusy(struct altera_tse_private *priv); |
47 | 47 | ||
@@ -79,7 +79,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
79 | priv->rxdescphys = (dma_addr_t) 0; | 79 | priv->rxdescphys = (dma_addr_t) 0; |
80 | priv->txdescphys = (dma_addr_t) 0; | 80 | priv->txdescphys = (dma_addr_t) 0; |
81 | 81 | ||
82 | priv->rxdescphys = dma_map_single(priv->device, priv->rx_dma_desc, | 82 | priv->rxdescphys = dma_map_single(priv->device, |
83 | (void __force *)priv->rx_dma_desc, | ||
83 | priv->rxdescmem, DMA_BIDIRECTIONAL); | 84 | priv->rxdescmem, DMA_BIDIRECTIONAL); |
84 | 85 | ||
85 | if (dma_mapping_error(priv->device, priv->rxdescphys)) { | 86 | if (dma_mapping_error(priv->device, priv->rxdescphys)) { |
@@ -88,7 +89,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
88 | return -EINVAL; | 89 | return -EINVAL; |
89 | } | 90 | } |
90 | 91 | ||
91 | priv->txdescphys = dma_map_single(priv->device, priv->tx_dma_desc, | 92 | priv->txdescphys = dma_map_single(priv->device, |
93 | (void __force *)priv->tx_dma_desc, | ||
92 | priv->txdescmem, DMA_TO_DEVICE); | 94 | priv->txdescmem, DMA_TO_DEVICE); |
93 | 95 | ||
94 | if (dma_mapping_error(priv->device, priv->txdescphys)) { | 96 | if (dma_mapping_error(priv->device, priv->txdescphys)) { |
@@ -98,8 +100,8 @@ int sgdma_initialize(struct altera_tse_private *priv) | |||
98 | } | 100 | } |
99 | 101 | ||
100 | /* Initialize descriptor memory to all 0's, sync memory to cache */ | 102 | /* Initialize descriptor memory to all 0's, sync memory to cache */ |
101 | memset(priv->tx_dma_desc, 0, priv->txdescmem); | 103 | memset_io(priv->tx_dma_desc, 0, priv->txdescmem); |
102 | memset(priv->rx_dma_desc, 0, priv->rxdescmem); | 104 | memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); |
103 | 105 | ||
104 | dma_sync_single_for_device(priv->device, priv->txdescphys, | 106 | dma_sync_single_for_device(priv->device, priv->txdescphys, |
105 | priv->txdescmem, DMA_TO_DEVICE); | 107 | priv->txdescmem, DMA_TO_DEVICE); |
@@ -126,22 +128,15 @@ void sgdma_uninitialize(struct altera_tse_private *priv) | |||
126 | */ | 128 | */ |
127 | void sgdma_reset(struct altera_tse_private *priv) | 129 | void sgdma_reset(struct altera_tse_private *priv) |
128 | { | 130 | { |
129 | u32 *ptxdescripmem = (u32 *)priv->tx_dma_desc; | ||
130 | u32 txdescriplen = priv->txdescmem; | ||
131 | u32 *prxdescripmem = (u32 *)priv->rx_dma_desc; | ||
132 | u32 rxdescriplen = priv->rxdescmem; | ||
133 | struct sgdma_csr *ptxsgdma = (struct sgdma_csr *)priv->tx_dma_csr; | ||
134 | struct sgdma_csr *prxsgdma = (struct sgdma_csr *)priv->rx_dma_csr; | ||
135 | |||
136 | /* Initialize descriptor memory to 0 */ | 131 | /* Initialize descriptor memory to 0 */ |
137 | memset(ptxdescripmem, 0, txdescriplen); | 132 | memset_io(priv->tx_dma_desc, 0, priv->txdescmem); |
138 | memset(prxdescripmem, 0, rxdescriplen); | 133 | memset_io(priv->rx_dma_desc, 0, priv->rxdescmem); |
139 | 134 | ||
140 | iowrite32(SGDMA_CTRLREG_RESET, &ptxsgdma->control); | 135 | csrwr32(SGDMA_CTRLREG_RESET, priv->tx_dma_csr, sgdma_csroffs(control)); |
141 | iowrite32(0, &ptxsgdma->control); | 136 | csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); |
142 | 137 | ||
143 | iowrite32(SGDMA_CTRLREG_RESET, &prxsgdma->control); | 138 | csrwr32(SGDMA_CTRLREG_RESET, priv->rx_dma_csr, sgdma_csroffs(control)); |
144 | iowrite32(0, &prxsgdma->control); | 139 | csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); |
145 | } | 140 | } |
146 | 141 | ||
147 | /* For SGDMA, interrupts remain enabled after initially enabling, | 142 | /* For SGDMA, interrupts remain enabled after initially enabling, |
@@ -167,14 +162,14 @@ void sgdma_disable_txirq(struct altera_tse_private *priv) | |||
167 | 162 | ||
168 | void sgdma_clear_rxirq(struct altera_tse_private *priv) | 163 | void sgdma_clear_rxirq(struct altera_tse_private *priv) |
169 | { | 164 | { |
170 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 165 | tse_set_bit(priv->rx_dma_csr, sgdma_csroffs(control), |
171 | tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); | 166 | SGDMA_CTRLREG_CLRINT); |
172 | } | 167 | } |
173 | 168 | ||
174 | void sgdma_clear_txirq(struct altera_tse_private *priv) | 169 | void sgdma_clear_txirq(struct altera_tse_private *priv) |
175 | { | 170 | { |
176 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | 171 | tse_set_bit(priv->tx_dma_csr, sgdma_csroffs(control), |
177 | tse_set_bit(&csr->control, SGDMA_CTRLREG_CLRINT); | 172 | SGDMA_CTRLREG_CLRINT); |
178 | } | 173 | } |
179 | 174 | ||
180 | /* transmits buffer through SGDMA. Returns number of buffers | 175 | /* transmits buffer through SGDMA. Returns number of buffers |
@@ -184,12 +179,11 @@ void sgdma_clear_txirq(struct altera_tse_private *priv) | |||
184 | */ | 179 | */ |
185 | int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | 180 | int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) |
186 | { | 181 | { |
187 | int pktstx = 0; | 182 | struct sgdma_descrip __iomem *descbase = |
188 | struct sgdma_descrip *descbase = | 183 | (struct sgdma_descrip __iomem *)priv->tx_dma_desc; |
189 | (struct sgdma_descrip *)priv->tx_dma_desc; | ||
190 | 184 | ||
191 | struct sgdma_descrip *cdesc = &descbase[0]; | 185 | struct sgdma_descrip __iomem *cdesc = &descbase[0]; |
192 | struct sgdma_descrip *ndesc = &descbase[1]; | 186 | struct sgdma_descrip __iomem *ndesc = &descbase[1]; |
193 | 187 | ||
194 | /* wait 'til the tx sgdma is ready for the next transmit request */ | 188 | /* wait 'til the tx sgdma is ready for the next transmit request */ |
195 | if (sgdma_txbusy(priv)) | 189 | if (sgdma_txbusy(priv)) |
@@ -205,7 +199,7 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | |||
205 | 0, /* read fixed */ | 199 | 0, /* read fixed */ |
206 | SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ | 200 | SGDMA_CONTROL_WR_FIXED); /* Generate SOP */ |
207 | 201 | ||
208 | pktstx = sgdma_async_write(priv, cdesc); | 202 | sgdma_async_write(priv, cdesc); |
209 | 203 | ||
210 | /* enqueue the request to the pending transmit queue */ | 204 | /* enqueue the request to the pending transmit queue */ |
211 | queue_tx(priv, buffer); | 205 | queue_tx(priv, buffer); |
@@ -219,10 +213,10 @@ int sgdma_tx_buffer(struct altera_tse_private *priv, struct tse_buffer *buffer) | |||
219 | u32 sgdma_tx_completions(struct altera_tse_private *priv) | 213 | u32 sgdma_tx_completions(struct altera_tse_private *priv) |
220 | { | 214 | { |
221 | u32 ready = 0; | 215 | u32 ready = 0; |
222 | struct sgdma_descrip *desc = (struct sgdma_descrip *)priv->tx_dma_desc; | ||
223 | 216 | ||
224 | if (!sgdma_txbusy(priv) && | 217 | if (!sgdma_txbusy(priv) && |
225 | ((desc->control & SGDMA_CONTROL_HW_OWNED) == 0) && | 218 | ((csrrd8(priv->tx_dma_desc, sgdma_descroffs(control)) |
219 | & SGDMA_CONTROL_HW_OWNED) == 0) && | ||
226 | (dequeue_tx(priv))) { | 220 | (dequeue_tx(priv))) { |
227 | ready = 1; | 221 | ready = 1; |
228 | } | 222 | } |
@@ -246,32 +240,31 @@ void sgdma_add_rx_desc(struct altera_tse_private *priv, | |||
246 | */ | 240 | */ |
247 | u32 sgdma_rx_status(struct altera_tse_private *priv) | 241 | u32 sgdma_rx_status(struct altera_tse_private *priv) |
248 | { | 242 | { |
249 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 243 | struct sgdma_descrip __iomem *base = |
250 | struct sgdma_descrip *base = (struct sgdma_descrip *)priv->rx_dma_desc; | 244 | (struct sgdma_descrip __iomem *)priv->rx_dma_desc; |
251 | struct sgdma_descrip *desc = NULL; | 245 | struct sgdma_descrip __iomem *desc = NULL; |
252 | int pktsrx; | ||
253 | unsigned int rxstatus = 0; | ||
254 | unsigned int pktlength = 0; | ||
255 | unsigned int pktstatus = 0; | ||
256 | struct tse_buffer *rxbuffer = NULL; | 246 | struct tse_buffer *rxbuffer = NULL; |
247 | unsigned int rxstatus = 0; | ||
257 | 248 | ||
258 | u32 sts = ioread32(&csr->status); | 249 | u32 sts = csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)); |
259 | 250 | ||
260 | desc = &base[0]; | 251 | desc = &base[0]; |
261 | if (sts & SGDMA_STSREG_EOP) { | 252 | if (sts & SGDMA_STSREG_EOP) { |
253 | unsigned int pktlength = 0; | ||
254 | unsigned int pktstatus = 0; | ||
262 | dma_sync_single_for_cpu(priv->device, | 255 | dma_sync_single_for_cpu(priv->device, |
263 | priv->rxdescphys, | 256 | priv->rxdescphys, |
264 | priv->sgdmadesclen, | 257 | priv->sgdmadesclen, |
265 | DMA_FROM_DEVICE); | 258 | DMA_FROM_DEVICE); |
266 | 259 | ||
267 | pktlength = desc->bytes_xferred; | 260 | pktlength = csrrd16(desc, sgdma_descroffs(bytes_xferred)); |
268 | pktstatus = desc->status & 0x3f; | 261 | pktstatus = csrrd8(desc, sgdma_descroffs(status)); |
269 | rxstatus = pktstatus; | 262 | rxstatus = pktstatus & ~SGDMA_STATUS_EOP; |
270 | rxstatus = rxstatus << 16; | 263 | rxstatus = rxstatus << 16; |
271 | rxstatus |= (pktlength & 0xffff); | 264 | rxstatus |= (pktlength & 0xffff); |
272 | 265 | ||
273 | if (rxstatus) { | 266 | if (rxstatus) { |
274 | desc->status = 0; | 267 | csrwr8(0, desc, sgdma_descroffs(status)); |
275 | 268 | ||
276 | rxbuffer = dequeue_rx(priv); | 269 | rxbuffer = dequeue_rx(priv); |
277 | if (rxbuffer == NULL) | 270 | if (rxbuffer == NULL) |
@@ -279,12 +272,12 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
279 | "sgdma rx and rx queue empty!\n"); | 272 | "sgdma rx and rx queue empty!\n"); |
280 | 273 | ||
281 | /* Clear control */ | 274 | /* Clear control */ |
282 | iowrite32(0, &csr->control); | 275 | csrwr32(0, priv->rx_dma_csr, sgdma_csroffs(control)); |
283 | /* clear status */ | 276 | /* clear status */ |
284 | iowrite32(0xf, &csr->status); | 277 | csrwr32(0xf, priv->rx_dma_csr, sgdma_csroffs(status)); |
285 | 278 | ||
286 | /* kick the rx sgdma after reaping this descriptor */ | 279 | /* kick the rx sgdma after reaping this descriptor */ |
287 | pktsrx = sgdma_async_read(priv); | 280 | sgdma_async_read(priv); |
288 | 281 | ||
289 | } else { | 282 | } else { |
290 | /* If the SGDMA indicated an end of packet on recv, | 283 | /* If the SGDMA indicated an end of packet on recv, |
@@ -298,10 +291,11 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
298 | */ | 291 | */ |
299 | netdev_err(priv->dev, | 292 | netdev_err(priv->dev, |
300 | "SGDMA RX Error Info: %x, %x, %x\n", | 293 | "SGDMA RX Error Info: %x, %x, %x\n", |
301 | sts, desc->status, rxstatus); | 294 | sts, csrrd8(desc, sgdma_descroffs(status)), |
295 | rxstatus); | ||
302 | } | 296 | } |
303 | } else if (sts == 0) { | 297 | } else if (sts == 0) { |
304 | pktsrx = sgdma_async_read(priv); | 298 | sgdma_async_read(priv); |
305 | } | 299 | } |
306 | 300 | ||
307 | return rxstatus; | 301 | return rxstatus; |
@@ -309,8 +303,8 @@ u32 sgdma_rx_status(struct altera_tse_private *priv) | |||
309 | 303 | ||
310 | 304 | ||
311 | /* Private functions */ | 305 | /* Private functions */ |
312 | static void sgdma_setup_descrip(struct sgdma_descrip *desc, | 306 | static void sgdma_setup_descrip(struct sgdma_descrip __iomem *desc, |
313 | struct sgdma_descrip *ndesc, | 307 | struct sgdma_descrip __iomem *ndesc, |
314 | dma_addr_t ndesc_phys, | 308 | dma_addr_t ndesc_phys, |
315 | dma_addr_t raddr, | 309 | dma_addr_t raddr, |
316 | dma_addr_t waddr, | 310 | dma_addr_t waddr, |
@@ -320,27 +314,30 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
320 | int wfixed) | 314 | int wfixed) |
321 | { | 315 | { |
322 | /* Clear the next descriptor as not owned by hardware */ | 316 | /* Clear the next descriptor as not owned by hardware */ |
323 | u32 ctrl = ndesc->control; | 317 | |
318 | u32 ctrl = csrrd8(ndesc, sgdma_descroffs(control)); | ||
324 | ctrl &= ~SGDMA_CONTROL_HW_OWNED; | 319 | ctrl &= ~SGDMA_CONTROL_HW_OWNED; |
325 | ndesc->control = ctrl; | 320 | csrwr8(ctrl, ndesc, sgdma_descroffs(control)); |
326 | 321 | ||
327 | ctrl = 0; | ||
328 | ctrl = SGDMA_CONTROL_HW_OWNED; | 322 | ctrl = SGDMA_CONTROL_HW_OWNED; |
329 | ctrl |= generate_eop; | 323 | ctrl |= generate_eop; |
330 | ctrl |= rfixed; | 324 | ctrl |= rfixed; |
331 | ctrl |= wfixed; | 325 | ctrl |= wfixed; |
332 | 326 | ||
333 | /* Channel is implicitly zero, initialized to 0 by default */ | 327 | /* Channel is implicitly zero, initialized to 0 by default */ |
334 | 328 | csrwr32(lower_32_bits(raddr), desc, sgdma_descroffs(raddr)); | |
335 | desc->raddr = raddr; | 329 | csrwr32(lower_32_bits(waddr), desc, sgdma_descroffs(waddr)); |
336 | desc->waddr = waddr; | 330 | |
337 | desc->next = lower_32_bits(ndesc_phys); | 331 | csrwr32(0, desc, sgdma_descroffs(pad1)); |
338 | desc->control = ctrl; | 332 | csrwr32(0, desc, sgdma_descroffs(pad2)); |
339 | desc->status = 0; | 333 | csrwr32(lower_32_bits(ndesc_phys), desc, sgdma_descroffs(next)); |
340 | desc->rburst = 0; | 334 | |
341 | desc->wburst = 0; | 335 | csrwr8(ctrl, desc, sgdma_descroffs(control)); |
342 | desc->bytes = length; | 336 | csrwr8(0, desc, sgdma_descroffs(status)); |
343 | desc->bytes_xferred = 0; | 337 | csrwr8(0, desc, sgdma_descroffs(wburst)); |
338 | csrwr8(0, desc, sgdma_descroffs(rburst)); | ||
339 | csrwr16(length, desc, sgdma_descroffs(bytes)); | ||
340 | csrwr16(0, desc, sgdma_descroffs(bytes_xferred)); | ||
344 | } | 341 | } |
345 | 342 | ||
346 | /* If hardware is busy, don't restart async read. | 343 | /* If hardware is busy, don't restart async read. |
@@ -351,12 +348,11 @@ static void sgdma_setup_descrip(struct sgdma_descrip *desc, | |||
351 | */ | 348 | */ |
352 | static int sgdma_async_read(struct altera_tse_private *priv) | 349 | static int sgdma_async_read(struct altera_tse_private *priv) |
353 | { | 350 | { |
354 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 351 | struct sgdma_descrip __iomem *descbase = |
355 | struct sgdma_descrip *descbase = | 352 | (struct sgdma_descrip __iomem *)priv->rx_dma_desc; |
356 | (struct sgdma_descrip *)priv->rx_dma_desc; | ||
357 | 353 | ||
358 | struct sgdma_descrip *cdesc = &descbase[0]; | 354 | struct sgdma_descrip __iomem *cdesc = &descbase[0]; |
359 | struct sgdma_descrip *ndesc = &descbase[1]; | 355 | struct sgdma_descrip __iomem *ndesc = &descbase[1]; |
360 | 356 | ||
361 | struct tse_buffer *rxbuffer = NULL; | 357 | struct tse_buffer *rxbuffer = NULL; |
362 | 358 | ||
@@ -382,11 +378,13 @@ static int sgdma_async_read(struct altera_tse_private *priv) | |||
382 | priv->sgdmadesclen, | 378 | priv->sgdmadesclen, |
383 | DMA_TO_DEVICE); | 379 | DMA_TO_DEVICE); |
384 | 380 | ||
385 | iowrite32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), | 381 | csrwr32(lower_32_bits(sgdma_rxphysaddr(priv, cdesc)), |
386 | &csr->next_descrip); | 382 | priv->rx_dma_csr, |
383 | sgdma_csroffs(next_descrip)); | ||
387 | 384 | ||
388 | iowrite32((priv->rxctrlreg | SGDMA_CTRLREG_START), | 385 | csrwr32((priv->rxctrlreg | SGDMA_CTRLREG_START), |
389 | &csr->control); | 386 | priv->rx_dma_csr, |
387 | sgdma_csroffs(control)); | ||
390 | 388 | ||
391 | return 1; | 389 | return 1; |
392 | } | 390 | } |
@@ -395,32 +393,32 @@ static int sgdma_async_read(struct altera_tse_private *priv) | |||
395 | } | 393 | } |
396 | 394 | ||
397 | static int sgdma_async_write(struct altera_tse_private *priv, | 395 | static int sgdma_async_write(struct altera_tse_private *priv, |
398 | struct sgdma_descrip *desc) | 396 | struct sgdma_descrip __iomem *desc) |
399 | { | 397 | { |
400 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | ||
401 | |||
402 | if (sgdma_txbusy(priv)) | 398 | if (sgdma_txbusy(priv)) |
403 | return 0; | 399 | return 0; |
404 | 400 | ||
405 | /* clear control and status */ | 401 | /* clear control and status */ |
406 | iowrite32(0, &csr->control); | 402 | csrwr32(0, priv->tx_dma_csr, sgdma_csroffs(control)); |
407 | iowrite32(0x1f, &csr->status); | 403 | csrwr32(0x1f, priv->tx_dma_csr, sgdma_csroffs(status)); |
408 | 404 | ||
409 | dma_sync_single_for_device(priv->device, priv->txdescphys, | 405 | dma_sync_single_for_device(priv->device, priv->txdescphys, |
410 | priv->sgdmadesclen, DMA_TO_DEVICE); | 406 | priv->sgdmadesclen, DMA_TO_DEVICE); |
411 | 407 | ||
412 | iowrite32(lower_32_bits(sgdma_txphysaddr(priv, desc)), | 408 | csrwr32(lower_32_bits(sgdma_txphysaddr(priv, desc)), |
413 | &csr->next_descrip); | 409 | priv->tx_dma_csr, |
410 | sgdma_csroffs(next_descrip)); | ||
414 | 411 | ||
415 | iowrite32((priv->txctrlreg | SGDMA_CTRLREG_START), | 412 | csrwr32((priv->txctrlreg | SGDMA_CTRLREG_START), |
416 | &csr->control); | 413 | priv->tx_dma_csr, |
414 | sgdma_csroffs(control)); | ||
417 | 415 | ||
418 | return 1; | 416 | return 1; |
419 | } | 417 | } |
420 | 418 | ||
421 | static dma_addr_t | 419 | static dma_addr_t |
422 | sgdma_txphysaddr(struct altera_tse_private *priv, | 420 | sgdma_txphysaddr(struct altera_tse_private *priv, |
423 | struct sgdma_descrip *desc) | 421 | struct sgdma_descrip __iomem *desc) |
424 | { | 422 | { |
425 | dma_addr_t paddr = priv->txdescmem_busaddr; | 423 | dma_addr_t paddr = priv->txdescmem_busaddr; |
426 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; | 424 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->tx_dma_desc; |
@@ -429,7 +427,7 @@ sgdma_txphysaddr(struct altera_tse_private *priv, | |||
429 | 427 | ||
430 | static dma_addr_t | 428 | static dma_addr_t |
431 | sgdma_rxphysaddr(struct altera_tse_private *priv, | 429 | sgdma_rxphysaddr(struct altera_tse_private *priv, |
432 | struct sgdma_descrip *desc) | 430 | struct sgdma_descrip __iomem *desc) |
433 | { | 431 | { |
434 | dma_addr_t paddr = priv->rxdescmem_busaddr; | 432 | dma_addr_t paddr = priv->rxdescmem_busaddr; |
435 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; | 433 | uintptr_t offs = (uintptr_t)desc - (uintptr_t)priv->rx_dma_desc; |
@@ -518,8 +516,8 @@ queue_rx_peekhead(struct altera_tse_private *priv) | |||
518 | */ | 516 | */ |
519 | static int sgdma_rxbusy(struct altera_tse_private *priv) | 517 | static int sgdma_rxbusy(struct altera_tse_private *priv) |
520 | { | 518 | { |
521 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->rx_dma_csr; | 519 | return csrrd32(priv->rx_dma_csr, sgdma_csroffs(status)) |
522 | return ioread32(&csr->status) & SGDMA_STSREG_BUSY; | 520 | & SGDMA_STSREG_BUSY; |
523 | } | 521 | } |
524 | 522 | ||
525 | /* waits for the tx sgdma to finish it's current operation, returns 0 | 523 | /* waits for the tx sgdma to finish it's current operation, returns 0 |
@@ -528,13 +526,14 @@ static int sgdma_rxbusy(struct altera_tse_private *priv) | |||
528 | static int sgdma_txbusy(struct altera_tse_private *priv) | 526 | static int sgdma_txbusy(struct altera_tse_private *priv) |
529 | { | 527 | { |
530 | int delay = 0; | 528 | int delay = 0; |
531 | struct sgdma_csr *csr = (struct sgdma_csr *)priv->tx_dma_csr; | ||
532 | 529 | ||
533 | /* if DMA is busy, wait for current transactino to finish */ | 530 | /* if DMA is busy, wait for current transactino to finish */ |
534 | while ((ioread32(&csr->status) & SGDMA_STSREG_BUSY) && (delay++ < 100)) | 531 | while ((csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) |
532 | & SGDMA_STSREG_BUSY) && (delay++ < 100)) | ||
535 | udelay(1); | 533 | udelay(1); |
536 | 534 | ||
537 | if (ioread32(&csr->status) & SGDMA_STSREG_BUSY) { | 535 | if (csrrd32(priv->tx_dma_csr, sgdma_csroffs(status)) |
536 | & SGDMA_STSREG_BUSY) { | ||
538 | netdev_err(priv->dev, "timeout waiting for tx dma\n"); | 537 | netdev_err(priv->dev, "timeout waiting for tx dma\n"); |
539 | return 1; | 538 | return 1; |
540 | } | 539 | } |