aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/bcm43xx/bcm43xx_dma.c')
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c583
1 files changed, 386 insertions, 197 deletions
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index d0318e525ba7..76e3aed4b471 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -4,7 +4,7 @@
4 4
5 DMA ringbuffer and descriptor allocation/management 5 DMA ringbuffer and descriptor allocation/management
6 6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> 7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8 8
9 Some code in this file is derived from the b44.c driver 9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller 10 Copyright (C) 2002 David S. Miller
@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
109 } 109 }
110} 110}
111 111
112u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113{
114 static const u16 map64[] = {
115 BCM43xx_MMIO_DMA64_BASE0,
116 BCM43xx_MMIO_DMA64_BASE1,
117 BCM43xx_MMIO_DMA64_BASE2,
118 BCM43xx_MMIO_DMA64_BASE3,
119 BCM43xx_MMIO_DMA64_BASE4,
120 BCM43xx_MMIO_DMA64_BASE5,
121 };
122 static const u16 map32[] = {
123 BCM43xx_MMIO_DMA32_BASE0,
124 BCM43xx_MMIO_DMA32_BASE1,
125 BCM43xx_MMIO_DMA32_BASE2,
126 BCM43xx_MMIO_DMA32_BASE3,
127 BCM43xx_MMIO_DMA32_BASE4,
128 BCM43xx_MMIO_DMA32_BASE5,
129 };
130
131 if (dma64bit) {
132 assert(controller_idx >= 0 &&
133 controller_idx < ARRAY_SIZE(map64));
134 return map64[controller_idx];
135 }
136 assert(controller_idx >= 0 &&
137 controller_idx < ARRAY_SIZE(map32));
138 return map32[controller_idx];
139}
140
112static inline 141static inline
113dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, 142dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114 unsigned char *buf, 143 unsigned char *buf,
@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
172/* Unmap and free a descriptor buffer. */ 201/* Unmap and free a descriptor buffer. */
173static inline 202static inline
174void free_descriptor_buffer(struct bcm43xx_dmaring *ring, 203void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
175 struct bcm43xx_dmadesc *desc,
176 struct bcm43xx_dmadesc_meta *meta, 204 struct bcm43xx_dmadesc_meta *meta,
177 int irq_context) 205 int irq_context)
178{ 206{
@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
188{ 216{
189 struct device *dev = &(ring->bcm->pci_dev->dev); 217 struct device *dev = &(ring->bcm->pci_dev->dev);
190 218
191 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 219 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
192 &(ring->dmabase), GFP_KERNEL); 220 &(ring->dmabase), GFP_KERNEL);
193 if (!ring->vbase) { 221 if (!ring->descbase) {
194 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); 222 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
195 return -ENOMEM; 223 return -ENOMEM;
196 } 224 }
197 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { 225 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
198 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
199 "(0x%llx, len: %lu)\n",
200 (unsigned long long)ring->dmabase,
201 BCM43xx_DMA_RINGMEMSIZE);
202 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
203 ring->vbase, ring->dmabase);
204 return -ENOMEM;
205 }
206 assert(!(ring->dmabase & 0x000003FF));
207 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
208 226
209 return 0; 227 return 0;
210} 228}
@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
214 struct device *dev = &(ring->bcm->pci_dev->dev); 232 struct device *dev = &(ring->bcm->pci_dev->dev);
215 233
216 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 234 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
217 ring->vbase, ring->dmabase); 235 ring->descbase, ring->dmabase);
218} 236}
219 237
220/* Reset the RX DMA channel */ 238/* Reset the RX DMA channel */
221int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 239int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
222 u16 mmio_base) 240 u16 mmio_base, int dma64)
223{ 241{
224 int i; 242 int i;
225 u32 value; 243 u32 value;
244 u16 offset;
226 245
227 bcm43xx_write32(bcm, 246 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
228 mmio_base + BCM43xx_DMA_RX_CONTROL, 247 bcm43xx_write32(bcm, mmio_base + offset, 0);
229 0x00000000);
230 for (i = 0; i < 1000; i++) { 248 for (i = 0; i < 1000; i++) {
231 value = bcm43xx_read32(bcm, 249 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
232 mmio_base + BCM43xx_DMA_RX_STATUS); 250 value = bcm43xx_read32(bcm, mmio_base + offset);
233 value &= BCM43xx_DMA_RXSTAT_STAT_MASK; 251 if (dma64) {
234 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { 252 value &= BCM43xx_DMA64_RXSTAT;
235 i = -1; 253 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
236 break; 254 i = -1;
255 break;
256 }
257 } else {
258 value &= BCM43xx_DMA32_RXSTATE;
259 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
260 i = -1;
261 break;
262 }
237 } 263 }
238 udelay(10); 264 udelay(10);
239 } 265 }
@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
247 273
248/* Reset the RX DMA channel */ 274/* Reset the RX DMA channel */
249int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 275int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
250 u16 mmio_base) 276 u16 mmio_base, int dma64)
251{ 277{
252 int i; 278 int i;
253 u32 value; 279 u32 value;
280 u16 offset;
254 281
255 for (i = 0; i < 1000; i++) { 282 for (i = 0; i < 1000; i++) {
256 value = bcm43xx_read32(bcm, 283 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
257 mmio_base + BCM43xx_DMA_TX_STATUS); 284 value = bcm43xx_read32(bcm, mmio_base + offset);
258 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 285 if (dma64) {
259 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || 286 value &= BCM43xx_DMA64_TXSTAT;
260 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || 287 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
261 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) 288 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
262 break; 289 value == BCM43xx_DMA64_TXSTAT_STOPPED)
290 break;
291 } else {
292 value &= BCM43xx_DMA32_TXSTATE;
293 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
294 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
295 value == BCM43xx_DMA32_TXSTAT_STOPPED)
296 break;
297 }
263 udelay(10); 298 udelay(10);
264 } 299 }
265 bcm43xx_write32(bcm, 300 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
266 mmio_base + BCM43xx_DMA_TX_CONTROL, 301 bcm43xx_write32(bcm, mmio_base + offset, 0);
267 0x00000000);
268 for (i = 0; i < 1000; i++) { 302 for (i = 0; i < 1000; i++) {
269 value = bcm43xx_read32(bcm, 303 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
270 mmio_base + BCM43xx_DMA_TX_STATUS); 304 value = bcm43xx_read32(bcm, mmio_base + offset);
271 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 305 if (dma64) {
272 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { 306 value &= BCM43xx_DMA64_TXSTAT;
273 i = -1; 307 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
274 break; 308 i = -1;
309 break;
310 }
311 } else {
312 value &= BCM43xx_DMA32_TXSTATE;
313 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
314 i = -1;
315 break;
316 }
275 } 317 }
276 udelay(10); 318 udelay(10);
277 } 319 }
@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
285 return 0; 327 return 0;
286} 328}
287 329
330static void fill_descriptor(struct bcm43xx_dmaring *ring,
331 struct bcm43xx_dmadesc_generic *desc,
332 dma_addr_t dmaaddr,
333 u16 bufsize,
334 int start, int end, int irq)
335{
336 int slot;
337
338 slot = bcm43xx_dma_desc2idx(ring, desc);
339 assert(slot >= 0 && slot < ring->nr_slots);
340
341 if (ring->dma64) {
342 u32 ctl0 = 0, ctl1 = 0;
343 u32 addrlo, addrhi;
344 u32 addrext;
345
346 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
347 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
348 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
349 addrhi |= ring->routing;
350 if (slot == ring->nr_slots - 1)
351 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
352 if (start)
353 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
354 if (end)
355 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
356 if (irq)
357 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
358 ctl1 |= (bufsize - ring->frameoffset)
359 & BCM43xx_DMA64_DCTL1_BYTECNT;
360 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
361 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
362
363 desc->dma64.control0 = cpu_to_le32(ctl0);
364 desc->dma64.control1 = cpu_to_le32(ctl1);
365 desc->dma64.address_low = cpu_to_le32(addrlo);
366 desc->dma64.address_high = cpu_to_le32(addrhi);
367 } else {
368 u32 ctl;
369 u32 addr;
370 u32 addrext;
371
372 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
373 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
374 >> BCM43xx_DMA32_ROUTING_SHIFT;
375 addr |= ring->routing;
376 ctl = (bufsize - ring->frameoffset)
377 & BCM43xx_DMA32_DCTL_BYTECNT;
378 if (slot == ring->nr_slots - 1)
379 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
380 if (start)
381 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
382 if (end)
383 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
384 if (irq)
385 ctl |= BCM43xx_DMA32_DCTL_IRQ;
386 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
387 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
388
389 desc->dma32.control = cpu_to_le32(ctl);
390 desc->dma32.address = cpu_to_le32(addr);
391 }
392}
393
288static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, 394static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
289 struct bcm43xx_dmadesc *desc, 395 struct bcm43xx_dmadesc_generic *desc,
290 struct bcm43xx_dmadesc_meta *meta, 396 struct bcm43xx_dmadesc_meta *meta,
291 gfp_t gfp_flags) 397 gfp_t gfp_flags)
292{ 398{
293 struct bcm43xx_rxhdr *rxhdr; 399 struct bcm43xx_rxhdr *rxhdr;
400 struct bcm43xx_hwxmitstatus *xmitstat;
294 dma_addr_t dmaaddr; 401 dma_addr_t dmaaddr;
295 u32 desc_addr;
296 u32 desc_ctl;
297 const int slot = (int)(desc - ring->vbase);
298 struct sk_buff *skb; 402 struct sk_buff *skb;
299 403
300 assert(slot >= 0 && slot < ring->nr_slots);
301 assert(!ring->tx); 404 assert(!ring->tx);
302 405
303 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 406 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
304 if (unlikely(!skb)) 407 if (unlikely(!skb))
305 return -ENOMEM; 408 return -ENOMEM;
306 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 409 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
307 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
308 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
309 dev_kfree_skb_any(skb);
310 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
311 "(0x%llx, len: %u)\n",
312 (unsigned long long)dmaaddr, ring->rx_buffersize);
313 return -ENOMEM;
314 }
315 meta->skb = skb; 410 meta->skb = skb;
316 meta->dmaaddr = dmaaddr; 411 meta->dmaaddr = dmaaddr;
317 skb->dev = ring->bcm->net_dev; 412 skb->dev = ring->bcm->net_dev;
318 desc_addr = (u32)(dmaaddr + ring->memoffset); 413
319 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & 414 fill_descriptor(ring, desc, dmaaddr,
320 (u32)(ring->rx_buffersize - ring->frameoffset)); 415 ring->rx_buffersize, 0, 0, 0);
321 if (slot == ring->nr_slots - 1)
322 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
323 set_desc_addr(desc, desc_addr);
324 set_desc_ctl(desc, desc_ctl);
325 416
326 rxhdr = (struct bcm43xx_rxhdr *)(skb->data); 417 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
327 rxhdr->frame_length = 0; 418 rxhdr->frame_length = 0;
328 rxhdr->flags1 = 0; 419 rxhdr->flags1 = 0;
420 xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
421 xmitstat->cookie = 0;
329 422
330 return 0; 423 return 0;
331} 424}
@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
336static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) 429static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
337{ 430{
338 int i, err = -ENOMEM; 431 int i, err = -ENOMEM;
339 struct bcm43xx_dmadesc *desc; 432 struct bcm43xx_dmadesc_generic *desc;
340 struct bcm43xx_dmadesc_meta *meta; 433 struct bcm43xx_dmadesc_meta *meta;
341 434
342 for (i = 0; i < ring->nr_slots; i++) { 435 for (i = 0; i < ring->nr_slots; i++) {
343 desc = ring->vbase + i; 436 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
344 meta = ring->meta + i;
345 437
346 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 438 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
347 if (err) 439 if (err)
348 goto err_unwind; 440 goto err_unwind;
349 } 441 }
442 mb();
350 ring->used_slots = ring->nr_slots; 443 ring->used_slots = ring->nr_slots;
351 err = 0; 444 err = 0;
352out: 445out:
@@ -354,8 +447,7 @@ out:
354 447
355err_unwind: 448err_unwind:
356 for (i--; i >= 0; i--) { 449 for (i--; i >= 0; i--) {
357 desc = ring->vbase + i; 450 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
358 meta = ring->meta + i;
359 451
360 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 452 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
361 dev_kfree_skb(meta->skb); 453 dev_kfree_skb(meta->skb);
@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
371{ 463{
372 int err = 0; 464 int err = 0;
373 u32 value; 465 u32 value;
466 u32 addrext;
374 467
375 if (ring->tx) { 468 if (ring->tx) {
376 /* Set Transmit Control register to "transmit enable" */ 469 if (ring->dma64) {
377 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 470 u64 ringbase = (u64)(ring->dmabase);
378 BCM43xx_DMA_TXCTRL_ENABLE); 471
379 /* Set Transmit Descriptor ring address. */ 472 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
380 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 473 value = BCM43xx_DMA64_TXENABLE;
381 ring->dmabase + ring->memoffset); 474 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
475 & BCM43xx_DMA64_TXADDREXT_MASK;
476 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
477 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
478 (ringbase & 0xFFFFFFFF));
479 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
480 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
481 | ring->routing);
482 } else {
483 u32 ringbase = (u32)(ring->dmabase);
484
485 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
486 value = BCM43xx_DMA32_TXENABLE;
487 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
488 & BCM43xx_DMA32_TXADDREXT_MASK;
489 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
490 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
491 (ringbase & ~BCM43xx_DMA32_ROUTING)
492 | ring->routing);
493 }
382 } else { 494 } else {
383 err = alloc_initial_descbuffers(ring); 495 err = alloc_initial_descbuffers(ring);
384 if (err) 496 if (err)
385 goto out; 497 goto out;
386 /* Set Receive Control "receive enable" and frame offset */ 498 if (ring->dma64) {
387 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); 499 u64 ringbase = (u64)(ring->dmabase);
388 value |= BCM43xx_DMA_RXCTRL_ENABLE; 500
389 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value); 501 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
390 /* Set Receive Descriptor ring address. */ 502 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
391 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 503 value |= BCM43xx_DMA64_RXENABLE;
392 ring->dmabase + ring->memoffset); 504 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
393 /* Init the descriptor pointer. */ 505 & BCM43xx_DMA64_RXADDREXT_MASK;
394 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200); 506 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
507 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
508 (ringbase & 0xFFFFFFFF));
509 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
510 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
511 | ring->routing);
512 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
513 } else {
514 u32 ringbase = (u32)(ring->dmabase);
515
516 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
517 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
518 value |= BCM43xx_DMA32_RXENABLE;
519 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
520 & BCM43xx_DMA32_RXADDREXT_MASK;
521 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
522 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
523 (ringbase & ~BCM43xx_DMA32_ROUTING)
524 | ring->routing);
525 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
526 }
395 } 527 }
396 528
397out: 529out:
@@ -402,27 +534,32 @@ out:
402static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) 534static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
403{ 535{
404 if (ring->tx) { 536 if (ring->tx) {
405 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); 537 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
406 /* Zero out Transmit Descriptor ring address. */ 538 if (ring->dma64) {
407 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0); 539 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
540 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
541 } else
542 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
408 } else { 543 } else {
409 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); 544 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
410 /* Zero out Receive Descriptor ring address. */ 545 if (ring->dma64) {
411 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0); 546 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
547 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
548 } else
549 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
412 } 550 }
413} 551}
414 552
415static void free_all_descbuffers(struct bcm43xx_dmaring *ring) 553static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
416{ 554{
417 struct bcm43xx_dmadesc *desc; 555 struct bcm43xx_dmadesc_generic *desc;
418 struct bcm43xx_dmadesc_meta *meta; 556 struct bcm43xx_dmadesc_meta *meta;
419 int i; 557 int i;
420 558
421 if (!ring->used_slots) 559 if (!ring->used_slots)
422 return; 560 return;
423 for (i = 0; i < ring->nr_slots; i++) { 561 for (i = 0; i < ring->nr_slots; i++) {
424 desc = ring->vbase + i; 562 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
425 meta = ring->meta + i;
426 563
427 if (!meta->skb) { 564 if (!meta->skb) {
428 assert(ring->tx); 565 assert(ring->tx);
@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
430 } 567 }
431 if (ring->tx) { 568 if (ring->tx) {
432 unmap_descbuffer(ring, meta->dmaaddr, 569 unmap_descbuffer(ring, meta->dmaaddr,
433 meta->skb->len, 1); 570 meta->skb->len, 1);
434 } else { 571 } else {
435 unmap_descbuffer(ring, meta->dmaaddr, 572 unmap_descbuffer(ring, meta->dmaaddr,
436 ring->rx_buffersize, 0); 573 ring->rx_buffersize, 0);
437 } 574 }
438 free_descriptor_buffer(ring, desc, meta, 0); 575 free_descriptor_buffer(ring, meta, 0);
439 } 576 }
440} 577}
441 578
442/* Main initialization function. */ 579/* Main initialization function. */
443static 580static
444struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, 581struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
445 u16 dma_controller_base, 582 int controller_index,
446 int nr_descriptor_slots, 583 int for_tx,
447 int tx) 584 int dma64)
448{ 585{
449 struct bcm43xx_dmaring *ring; 586 struct bcm43xx_dmaring *ring;
450 int err; 587 int err;
588 int nr_slots;
451 589
452 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 590 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
453 if (!ring) 591 if (!ring)
454 goto out; 592 goto out;
455 593
456 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, 594 nr_slots = BCM43xx_RXRING_SLOTS;
595 if (for_tx)
596 nr_slots = BCM43xx_TXRING_SLOTS;
597
598 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
457 GFP_KERNEL); 599 GFP_KERNEL);
458 if (!ring->meta) 600 if (!ring->meta)
459 goto err_kfree_ring; 601 goto err_kfree_ring;
460 602
461 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET; 603 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
604 if (dma64)
605 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
462#ifdef CONFIG_BCM947XX 606#ifdef CONFIG_BCM947XX
463 if (bcm->pci_dev->bus->number == 0) 607 if (bcm->pci_dev->bus->number == 0)
464 ring->memoffset = 0; 608 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
465#endif 609#endif
466 610
467 ring->bcm = bcm; 611 ring->bcm = bcm;
468 ring->nr_slots = nr_descriptor_slots; 612 ring->nr_slots = nr_slots;
469 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; 613 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
470 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; 614 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
471 assert(ring->suspend_mark < ring->resume_mark); 615 assert(ring->suspend_mark < ring->resume_mark);
472 ring->mmio_base = dma_controller_base; 616 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
473 if (tx) { 617 ring->index = controller_index;
618 ring->dma64 = !!dma64;
619 if (for_tx) {
474 ring->tx = 1; 620 ring->tx = 1;
475 ring->current_slot = -1; 621 ring->current_slot = -1;
476 } else { 622 } else {
477 switch (dma_controller_base) { 623 if (ring->index == 0) {
478 case BCM43xx_MMIO_DMA1_BASE: 624 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
479 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE; 625 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
480 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET; 626 } else if (ring->index == 3) {
481 break; 627 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
482 case BCM43xx_MMIO_DMA4_BASE: 628 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
483 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE; 629 } else
484 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
485 break;
486 default:
487 assert(0); 630 assert(0);
488 }
489 } 631 }
490 632
491 err = alloc_ringmemory(ring); 633 err = alloc_ringmemory(ring);
@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
514 if (!ring) 656 if (!ring)
515 return; 657 return;
516 658
517 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n", 659 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
660 (ring->dma64) ? "64" : "32",
518 ring->mmio_base, 661 ring->mmio_base,
519 (ring->tx) ? "TX" : "RX", 662 (ring->tx) ? "TX" : "RX",
520 ring->max_used_slots, ring->nr_slots); 663 ring->max_used_slots, ring->nr_slots);
@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
537 return; 680 return;
538 dma = bcm43xx_current_dma(bcm); 681 dma = bcm43xx_current_dma(bcm);
539 682
540 bcm43xx_destroy_dmaring(dma->rx_ring1); 683 bcm43xx_destroy_dmaring(dma->rx_ring3);
541 dma->rx_ring1 = NULL; 684 dma->rx_ring3 = NULL;
542 bcm43xx_destroy_dmaring(dma->rx_ring0); 685 bcm43xx_destroy_dmaring(dma->rx_ring0);
543 dma->rx_ring0 = NULL; 686 dma->rx_ring0 = NULL;
687
688 bcm43xx_destroy_dmaring(dma->tx_ring5);
689 dma->tx_ring5 = NULL;
690 bcm43xx_destroy_dmaring(dma->tx_ring4);
691 dma->tx_ring4 = NULL;
544 bcm43xx_destroy_dmaring(dma->tx_ring3); 692 bcm43xx_destroy_dmaring(dma->tx_ring3);
545 dma->tx_ring3 = NULL; 693 dma->tx_ring3 = NULL;
546 bcm43xx_destroy_dmaring(dma->tx_ring2); 694 bcm43xx_destroy_dmaring(dma->tx_ring2);
@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
556 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); 704 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
557 struct bcm43xx_dmaring *ring; 705 struct bcm43xx_dmaring *ring;
558 int err = -ENOMEM; 706 int err = -ENOMEM;
707 int dma64 = 0;
708 u32 sbtmstatehi;
709
710 sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
711 if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
712 dma64 = 1;
559 713
560 /* setup TX DMA channels. */ 714 /* setup TX DMA channels. */
561 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, 715 ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
562 BCM43xx_TXRING_SLOTS, 1);
563 if (!ring) 716 if (!ring)
564 goto out; 717 goto out;
565 dma->tx_ring0 = ring; 718 dma->tx_ring0 = ring;
566 719
567 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, 720 ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
568 BCM43xx_TXRING_SLOTS, 1);
569 if (!ring) 721 if (!ring)
570 goto err_destroy_tx0; 722 goto err_destroy_tx0;
571 dma->tx_ring1 = ring; 723 dma->tx_ring1 = ring;
572 724
573 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, 725 ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
574 BCM43xx_TXRING_SLOTS, 1);
575 if (!ring) 726 if (!ring)
576 goto err_destroy_tx1; 727 goto err_destroy_tx1;
577 dma->tx_ring2 = ring; 728 dma->tx_ring2 = ring;
578 729
579 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 730 ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
580 BCM43xx_TXRING_SLOTS, 1);
581 if (!ring) 731 if (!ring)
582 goto err_destroy_tx2; 732 goto err_destroy_tx2;
583 dma->tx_ring3 = ring; 733 dma->tx_ring3 = ring;
584 734
585 /* setup RX DMA channels. */ 735 ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
586 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
587 BCM43xx_RXRING_SLOTS, 0);
588 if (!ring) 736 if (!ring)
589 goto err_destroy_tx3; 737 goto err_destroy_tx3;
738 dma->tx_ring4 = ring;
739
740 ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
741 if (!ring)
742 goto err_destroy_tx4;
743 dma->tx_ring5 = ring;
744
745 /* setup RX DMA channels. */
746 ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
747 if (!ring)
748 goto err_destroy_tx5;
590 dma->rx_ring0 = ring; 749 dma->rx_ring0 = ring;
591 750
592 if (bcm->current_core->rev < 5) { 751 if (bcm->current_core->rev < 5) {
593 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 752 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
594 BCM43xx_RXRING_SLOTS, 0);
595 if (!ring) 753 if (!ring)
596 goto err_destroy_rx0; 754 goto err_destroy_rx0;
597 dma->rx_ring1 = ring; 755 dma->rx_ring3 = ring;
598 } 756 }
599 757
600 dprintk(KERN_INFO PFX "DMA initialized\n"); 758 dprintk(KERN_INFO PFX "%s DMA initialized\n",
759 dma64 ? "64-bit" : "32-bit");
601 err = 0; 760 err = 0;
602out: 761out:
603 return err; 762 return err;
@@ -605,6 +764,12 @@ out:
605err_destroy_rx0: 764err_destroy_rx0:
606 bcm43xx_destroy_dmaring(dma->rx_ring0); 765 bcm43xx_destroy_dmaring(dma->rx_ring0);
607 dma->rx_ring0 = NULL; 766 dma->rx_ring0 = NULL;
767err_destroy_tx5:
768 bcm43xx_destroy_dmaring(dma->tx_ring5);
769 dma->tx_ring5 = NULL;
770err_destroy_tx4:
771 bcm43xx_destroy_dmaring(dma->tx_ring4);
772 dma->tx_ring4 = NULL;
608err_destroy_tx3: 773err_destroy_tx3:
609 bcm43xx_destroy_dmaring(dma->tx_ring3); 774 bcm43xx_destroy_dmaring(dma->tx_ring3);
610 dma->tx_ring3 = NULL; 775 dma->tx_ring3 = NULL;
@@ -624,7 +789,7 @@ err_destroy_tx0:
624static u16 generate_cookie(struct bcm43xx_dmaring *ring, 789static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625 int slot) 790 int slot)
626{ 791{
627 u16 cookie = 0xF000; 792 u16 cookie = 0x1000;
628 793
629 /* Use the upper 4 bits of the cookie as 794 /* Use the upper 4 bits of the cookie as
630 * DMA controller ID and store the slot number 795 * DMA controller ID and store the slot number
@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
632 * Note that the cookie must never be 0, as this 797 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path. 798 * is a special value used in RX path.
634 */ 799 */
635 switch (ring->mmio_base) { 800 switch (ring->index) {
636 default: 801 case 0:
637 assert(0);
638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000; 802 cookie = 0xA000;
640 break; 803 break;
641 case BCM43xx_MMIO_DMA2_BASE: 804 case 1:
642 cookie = 0xB000; 805 cookie = 0xB000;
643 break; 806 break;
644 case BCM43xx_MMIO_DMA3_BASE: 807 case 2:
645 cookie = 0xC000; 808 cookie = 0xC000;
646 break; 809 break;
647 case BCM43xx_MMIO_DMA4_BASE: 810 case 3:
648 cookie = 0xD000; 811 cookie = 0xD000;
649 break; 812 break;
813 case 4:
814 cookie = 0xE000;
815 break;
816 case 5:
817 cookie = 0xF000;
818 break;
650 } 819 }
651 assert(((u16)slot & 0xF000) == 0x0000); 820 assert(((u16)slot & 0xF000) == 0x0000);
652 cookie |= (u16)slot; 821 cookie |= (u16)slot;
@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
675 case 0xD000: 844 case 0xD000:
676 ring = dma->tx_ring3; 845 ring = dma->tx_ring3;
677 break; 846 break;
847 case 0xE000:
848 ring = dma->tx_ring4;
849 break;
850 case 0xF000:
851 ring = dma->tx_ring5;
852 break;
678 default: 853 default:
679 assert(0); 854 assert(0);
680 } 855 }
@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
687static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, 862static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
688 int slot) 863 int slot)
689{ 864{
865 u16 offset;
866 int descsize;
867
690 /* Everything is ready to start. Buffers are DMA mapped and 868 /* Everything is ready to start. Buffers are DMA mapped and
691 * associated with slots. 869 * associated with slots.
692 * "slot" is the last slot of the new frame we want to transmit. 870 * "slot" is the last slot of the new frame we want to transmit.
@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
694 */ 872 */
695 wmb(); 873 wmb();
696 slot = next_slot(ring, slot); 874 slot = next_slot(ring, slot);
697 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX, 875 offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
698 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 876 descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
877 : sizeof(struct bcm43xx_dmadesc32);
878 bcm43xx_dma_write(ring, offset,
879 (u32)(slot * descsize));
699} 880}
700 881
701static int dma_tx_fragment(struct bcm43xx_dmaring *ring, 882static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
702 struct sk_buff *skb, 883 struct sk_buff *skb,
703 u8 cur_frag) 884 u8 cur_frag)
704{ 885{
705 int slot; 886 int slot;
706 struct bcm43xx_dmadesc *desc; 887 struct bcm43xx_dmadesc_generic *desc;
707 struct bcm43xx_dmadesc_meta *meta; 888 struct bcm43xx_dmadesc_meta *meta;
708 u32 desc_ctl; 889 dma_addr_t dmaaddr;
709 u32 desc_addr;
710 890
711 assert(skb_shinfo(skb)->nr_frags == 0); 891 assert(skb_shinfo(skb)->nr_frags == 0);
712 892
713 slot = request_slot(ring); 893 slot = request_slot(ring);
714 desc = ring->vbase + slot; 894 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
715 meta = ring->meta + slot;
716 895
717 /* Add a device specific TX header. */ 896 /* Add a device specific TX header. */
718 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); 897 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
729 generate_cookie(ring, slot)); 908 generate_cookie(ring, slot));
730 909
731 meta->skb = skb; 910 meta->skb = skb;
732 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 911 dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
733 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { 912 meta->dmaaddr = dmaaddr;
734 return_slot(ring, slot);
735 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
736 "(0x%llx, len: %u)\n",
737 (unsigned long long)meta->dmaaddr, skb->len);
738 return -ENOMEM;
739 }
740 913
741 desc_addr = (u32)(meta->dmaaddr + ring->memoffset); 914 fill_descriptor(ring, desc, dmaaddr,
742 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND; 915 skb->len, 1, 1, 1);
743 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
744 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
745 (u32)(meta->skb->len - ring->frameoffset));
746 if (slot == ring->nr_slots - 1)
747 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
748 916
749 set_desc_ctl(desc, desc_ctl);
750 set_desc_addr(desc, desc_addr);
751 /* Now transfer the whole frame. */ 917 /* Now transfer the whole frame. */
752 dmacontroller_poke_tx(ring, slot); 918 dmacontroller_poke_tx(ring, slot);
753
754 return 0;
755} 919}
756 920
757int bcm43xx_dma_tx(struct bcm43xx_private *bcm, 921int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
781 /* Take skb from ieee80211_txb_free */ 945 /* Take skb from ieee80211_txb_free */
782 txb->fragments[i] = NULL; 946 txb->fragments[i] = NULL;
783 dma_tx_fragment(ring, skb, i); 947 dma_tx_fragment(ring, skb, i);
784 //TODO: handle failure of dma_tx_fragment
785 } 948 }
786 ieee80211_txb_free(txb); 949 ieee80211_txb_free(txb);
787 950
@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
792 struct bcm43xx_xmitstatus *status) 955 struct bcm43xx_xmitstatus *status)
793{ 956{
794 struct bcm43xx_dmaring *ring; 957 struct bcm43xx_dmaring *ring;
795 struct bcm43xx_dmadesc *desc; 958 struct bcm43xx_dmadesc_generic *desc;
796 struct bcm43xx_dmadesc_meta *meta; 959 struct bcm43xx_dmadesc_meta *meta;
797 int is_last_fragment; 960 int is_last_fragment;
798 int slot; 961 int slot;
962 u32 tmp;
799 963
800 ring = parse_cookie(bcm, status->cookie, &slot); 964 ring = parse_cookie(bcm, status->cookie, &slot);
801 assert(ring); 965 assert(ring);
802 assert(ring->tx); 966 assert(ring->tx);
803 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
804 while (1) { 967 while (1) {
805 assert(slot >= 0 && slot < ring->nr_slots); 968 assert(slot >= 0 && slot < ring->nr_slots);
806 desc = ring->vbase + slot; 969 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
807 meta = ring->meta + slot;
808 970
809 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND); 971 if (ring->dma64) {
972 tmp = le32_to_cpu(desc->dma64.control0);
973 is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
974 } else {
975 tmp = le32_to_cpu(desc->dma32.control);
976 is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
977 }
810 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 978 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
811 free_descriptor_buffer(ring, desc, meta, 1); 979 free_descriptor_buffer(ring, meta, 1);
812 /* Everything belonging to the slot is unmapped 980 /* Everything belonging to the slot is unmapped
813 * and freed, so we can return it. 981 * and freed, so we can return it.
814 */ 982 */
@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
824static void dma_rx(struct bcm43xx_dmaring *ring, 992static void dma_rx(struct bcm43xx_dmaring *ring,
825 int *slot) 993 int *slot)
826{ 994{
827 struct bcm43xx_dmadesc *desc; 995 struct bcm43xx_dmadesc_generic *desc;
828 struct bcm43xx_dmadesc_meta *meta; 996 struct bcm43xx_dmadesc_meta *meta;
829 struct bcm43xx_rxhdr *rxhdr; 997 struct bcm43xx_rxhdr *rxhdr;
830 struct sk_buff *skb; 998 struct sk_buff *skb;
@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
832 int err; 1000 int err;
833 dma_addr_t dmaaddr; 1001 dma_addr_t dmaaddr;
834 1002
835 desc = ring->vbase + *slot; 1003 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
836 meta = ring->meta + *slot;
837 1004
838 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1005 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
839 skb = meta->skb; 1006 skb = meta->skb;
840 1007
841 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) { 1008 if (ring->index == 3) {
842 /* We received an xmit status. */ 1009 /* We received an xmit status. */
843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 1010 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
844 struct bcm43xx_xmitstatus stat; 1011 struct bcm43xx_xmitstatus stat;
@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
894 s32 tmp = len; 1061 s32 tmp = len;
895 1062
896 while (1) { 1063 while (1) {
897 desc = ring->vbase + *slot; 1064 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
898 meta = ring->meta + *slot;
899 /* recycle the descriptor buffer. */ 1065 /* recycle the descriptor buffer. */
900 sync_descbuffer_for_device(ring, meta->dmaaddr, 1066 sync_descbuffer_for_device(ring, meta->dmaaddr,
901 ring->rx_buffersize); 1067 ring->rx_buffersize);
@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
906 break; 1072 break;
907 } 1073 }
908 printkl(KERN_ERR PFX "DMA RX buffer too small " 1074 printkl(KERN_ERR PFX "DMA RX buffer too small "
909 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1075 "(len: %u, buffer: %u, nr-dropped: %d)\n",
910 len, ring->rx_buffersize, cnt); 1076 len, ring->rx_buffersize, cnt);
911 goto drop; 1077 goto drop;
912 } 1078 }
913 len -= IEEE80211_FCS_LEN; 1079 len -= IEEE80211_FCS_LEN;
@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
945#endif 1111#endif
946 1112
947 assert(!ring->tx); 1113 assert(!ring->tx);
948 status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS); 1114 if (ring->dma64) {
949 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); 1115 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
950 current_slot = descptr / sizeof(struct bcm43xx_dmadesc); 1116 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1117 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1118 } else {
1119 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1120 descptr = (status & BCM43xx_DMA32_RXDPTR);
1121 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1122 }
951 assert(current_slot >= 0 && current_slot < ring->nr_slots); 1123 assert(current_slot >= 0 && current_slot < ring->nr_slots);
952 1124
953 slot = ring->current_slot; 1125 slot = ring->current_slot;
@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
958 ring->max_used_slots = used_slots; 1130 ring->max_used_slots = used_slots;
959#endif 1131#endif
960 } 1132 }
961 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 1133 if (ring->dma64) {
962 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 1134 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1135 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1136 } else {
1137 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1138 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1139 }
963 ring->current_slot = slot; 1140 ring->current_slot = slot;
964} 1141}
965 1142
@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
967{ 1144{
968 assert(ring->tx); 1145 assert(ring->tx);
969 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); 1146 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
970 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1147 if (ring->dma64) {
971 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1148 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
972 | BCM43xx_DMA_TXCTRL_SUSPEND); 1149 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1150 | BCM43xx_DMA64_TXSUSPEND);
1151 } else {
1152 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1153 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1154 | BCM43xx_DMA32_TXSUSPEND);
1155 }
973} 1156}
974 1157
975void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) 1158void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
976{ 1159{
977 assert(ring->tx); 1160 assert(ring->tx);
978 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1161 if (ring->dma64) {
979 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1162 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
980 & ~BCM43xx_DMA_TXCTRL_SUSPEND); 1163 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1164 & ~BCM43xx_DMA64_TXSUSPEND);
1165 } else {
1166 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1167 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1168 & ~BCM43xx_DMA32_TXSUSPEND);
1169 }
981 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); 1170 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
982} 1171}