aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/b43legacy/dma.c
diff options
context:
space:
mode:
authorPavel Roskin <proski@gnu.org>2011-07-25 17:40:22 -0400
committerJohn W. Linville <linville@tuxdriver.com>2011-08-08 16:04:08 -0400
commit191d6a8cc2d282db3707e9c71f49815ccdc79c54 (patch)
tree5ce5d0ff4972227f543e1adaf21d4e42ea1d7783 /drivers/net/wireless/b43legacy/dma.c
parentae7f9a740b4ac5a64306abc47a440b794c5b827a (diff)
b43legacy: remove 64-bit DMA support
Devices supported by b43legacy don't support 64-bit DMA. Signed-off-by: Pavel Roskin <proski@gnu.org> Acked-by: Larry Finger <Larry.Finger@lwfinger.net> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless/b43legacy/dma.c')
-rw-r--r--drivers/net/wireless/b43legacy/dma.c374
1 files changed, 71 insertions, 303 deletions
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 5010c477abdf..c5535adf6991 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -42,10 +42,9 @@
42 42
43/* 32bit DMA ops. */ 43/* 32bit DMA ops. */
44static 44static
45struct b43legacy_dmadesc_generic *op32_idx2desc( 45struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
46 struct b43legacy_dmaring *ring, 46 int slot,
47 int slot, 47 struct b43legacy_dmadesc_meta **meta)
48 struct b43legacy_dmadesc_meta **meta)
49{ 48{
50 struct b43legacy_dmadesc32 *desc; 49 struct b43legacy_dmadesc32 *desc;
51 50
@@ -53,11 +52,11 @@ struct b43legacy_dmadesc_generic *op32_idx2desc(
53 desc = ring->descbase; 52 desc = ring->descbase;
54 desc = &(desc[slot]); 53 desc = &(desc[slot]);
55 54
56 return (struct b43legacy_dmadesc_generic *)desc; 55 return (struct b43legacy_dmadesc32 *)desc;
57} 56}
58 57
59static void op32_fill_descriptor(struct b43legacy_dmaring *ring, 58static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
60 struct b43legacy_dmadesc_generic *desc, 59 struct b43legacy_dmadesc32 *desc,
61 dma_addr_t dmaaddr, u16 bufsize, 60 dma_addr_t dmaaddr, u16 bufsize,
62 int start, int end, int irq) 61 int start, int end, int irq)
63{ 62{
@@ -67,7 +66,7 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
67 u32 addr; 66 u32 addr;
68 u32 addrext; 67 u32 addrext;
69 68
70 slot = (int)(&(desc->dma32) - descbase); 69 slot = (int)(desc - descbase);
71 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 70 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
72 71
73 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK); 72 addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
@@ -87,8 +86,8 @@ static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
87 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT) 86 ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
88 & B43legacy_DMA32_DCTL_ADDREXT_MASK; 87 & B43legacy_DMA32_DCTL_ADDREXT_MASK;
89 88
90 desc->dma32.control = cpu_to_le32(ctl); 89 desc->control = cpu_to_le32(ctl);
91 desc->dma32.address = cpu_to_le32(addr); 90 desc->address = cpu_to_le32(addr);
92} 91}
93 92
94static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot) 93static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
@@ -128,121 +127,6 @@ static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
128 (u32)(slot * sizeof(struct b43legacy_dmadesc32))); 127 (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
129} 128}
130 129
131static const struct b43legacy_dma_ops dma32_ops = {
132 .idx2desc = op32_idx2desc,
133 .fill_descriptor = op32_fill_descriptor,
134 .poke_tx = op32_poke_tx,
135 .tx_suspend = op32_tx_suspend,
136 .tx_resume = op32_tx_resume,
137 .get_current_rxslot = op32_get_current_rxslot,
138 .set_current_rxslot = op32_set_current_rxslot,
139};
140
141/* 64bit DMA ops. */
142static
143struct b43legacy_dmadesc_generic *op64_idx2desc(
144 struct b43legacy_dmaring *ring,
145 int slot,
146 struct b43legacy_dmadesc_meta
147 **meta)
148{
149 struct b43legacy_dmadesc64 *desc;
150
151 *meta = &(ring->meta[slot]);
152 desc = ring->descbase;
153 desc = &(desc[slot]);
154
155 return (struct b43legacy_dmadesc_generic *)desc;
156}
157
158static void op64_fill_descriptor(struct b43legacy_dmaring *ring,
159 struct b43legacy_dmadesc_generic *desc,
160 dma_addr_t dmaaddr, u16 bufsize,
161 int start, int end, int irq)
162{
163 struct b43legacy_dmadesc64 *descbase = ring->descbase;
164 int slot;
165 u32 ctl0 = 0;
166 u32 ctl1 = 0;
167 u32 addrlo;
168 u32 addrhi;
169 u32 addrext;
170
171 slot = (int)(&(desc->dma64) - descbase);
172 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
173
174 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
175 addrhi = (((u64)dmaaddr >> 32) & ~SSB_DMA_TRANSLATION_MASK);
176 addrext = (((u64)dmaaddr >> 32) & SSB_DMA_TRANSLATION_MASK)
177 >> SSB_DMA_TRANSLATION_SHIFT;
178 addrhi |= ring->dev->dma.translation;
179 if (slot == ring->nr_slots - 1)
180 ctl0 |= B43legacy_DMA64_DCTL0_DTABLEEND;
181 if (start)
182 ctl0 |= B43legacy_DMA64_DCTL0_FRAMESTART;
183 if (end)
184 ctl0 |= B43legacy_DMA64_DCTL0_FRAMEEND;
185 if (irq)
186 ctl0 |= B43legacy_DMA64_DCTL0_IRQ;
187 ctl1 |= (bufsize - ring->frameoffset)
188 & B43legacy_DMA64_DCTL1_BYTECNT;
189 ctl1 |= (addrext << B43legacy_DMA64_DCTL1_ADDREXT_SHIFT)
190 & B43legacy_DMA64_DCTL1_ADDREXT_MASK;
191
192 desc->dma64.control0 = cpu_to_le32(ctl0);
193 desc->dma64.control1 = cpu_to_le32(ctl1);
194 desc->dma64.address_low = cpu_to_le32(addrlo);
195 desc->dma64.address_high = cpu_to_le32(addrhi);
196}
197
198static void op64_poke_tx(struct b43legacy_dmaring *ring, int slot)
199{
200 b43legacy_dma_write(ring, B43legacy_DMA64_TXINDEX,
201 (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
202}
203
204static void op64_tx_suspend(struct b43legacy_dmaring *ring)
205{
206 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
207 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
208 | B43legacy_DMA64_TXSUSPEND);
209}
210
211static void op64_tx_resume(struct b43legacy_dmaring *ring)
212{
213 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL,
214 b43legacy_dma_read(ring, B43legacy_DMA64_TXCTL)
215 & ~B43legacy_DMA64_TXSUSPEND);
216}
217
218static int op64_get_current_rxslot(struct b43legacy_dmaring *ring)
219{
220 u32 val;
221
222 val = b43legacy_dma_read(ring, B43legacy_DMA64_RXSTATUS);
223 val &= B43legacy_DMA64_RXSTATDPTR;
224
225 return (val / sizeof(struct b43legacy_dmadesc64));
226}
227
228static void op64_set_current_rxslot(struct b43legacy_dmaring *ring,
229 int slot)
230{
231 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
232 (u32)(slot * sizeof(struct b43legacy_dmadesc64)));
233}
234
235static const struct b43legacy_dma_ops dma64_ops = {
236 .idx2desc = op64_idx2desc,
237 .fill_descriptor = op64_fill_descriptor,
238 .poke_tx = op64_poke_tx,
239 .tx_suspend = op64_tx_suspend,
240 .tx_resume = op64_tx_resume,
241 .get_current_rxslot = op64_get_current_rxslot,
242 .set_current_rxslot = op64_set_current_rxslot,
243};
244
245
246static inline int free_slots(struct b43legacy_dmaring *ring) 130static inline int free_slots(struct b43legacy_dmaring *ring)
247{ 131{
248 return (ring->nr_slots - ring->used_slots); 132 return (ring->nr_slots - ring->used_slots);
@@ -358,14 +242,6 @@ return 0;
358static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type, 242static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
359 int controller_idx) 243 int controller_idx)
360{ 244{
361 static const u16 map64[] = {
362 B43legacy_MMIO_DMA64_BASE0,
363 B43legacy_MMIO_DMA64_BASE1,
364 B43legacy_MMIO_DMA64_BASE2,
365 B43legacy_MMIO_DMA64_BASE3,
366 B43legacy_MMIO_DMA64_BASE4,
367 B43legacy_MMIO_DMA64_BASE5,
368 };
369 static const u16 map32[] = { 245 static const u16 map32[] = {
370 B43legacy_MMIO_DMA32_BASE0, 246 B43legacy_MMIO_DMA32_BASE0,
371 B43legacy_MMIO_DMA32_BASE1, 247 B43legacy_MMIO_DMA32_BASE1,
@@ -375,11 +251,6 @@ static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
375 B43legacy_MMIO_DMA32_BASE5, 251 B43legacy_MMIO_DMA32_BASE5,
376 }; 252 };
377 253
378 if (type == B43legacy_DMA_64BIT) {
379 B43legacy_WARN_ON(!(controller_idx >= 0 &&
380 controller_idx < ARRAY_SIZE(map64)));
381 return map64[controller_idx];
382 }
383 B43legacy_WARN_ON(!(controller_idx >= 0 && 254 B43legacy_WARN_ON(!(controller_idx >= 0 &&
384 controller_idx < ARRAY_SIZE(map32))); 255 controller_idx < ARRAY_SIZE(map32)));
385 return map32[controller_idx]; 256 return map32[controller_idx];
@@ -491,25 +362,15 @@ static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
491 362
492 might_sleep(); 363 might_sleep();
493 364
494 offset = (type == B43legacy_DMA_64BIT) ? 365 offset = B43legacy_DMA32_RXCTL;
495 B43legacy_DMA64_RXCTL : B43legacy_DMA32_RXCTL;
496 b43legacy_write32(dev, mmio_base + offset, 0); 366 b43legacy_write32(dev, mmio_base + offset, 0);
497 for (i = 0; i < 10; i++) { 367 for (i = 0; i < 10; i++) {
498 offset = (type == B43legacy_DMA_64BIT) ? 368 offset = B43legacy_DMA32_RXSTATUS;
499 B43legacy_DMA64_RXSTATUS : B43legacy_DMA32_RXSTATUS;
500 value = b43legacy_read32(dev, mmio_base + offset); 369 value = b43legacy_read32(dev, mmio_base + offset);
501 if (type == B43legacy_DMA_64BIT) { 370 value &= B43legacy_DMA32_RXSTATE;
502 value &= B43legacy_DMA64_RXSTAT; 371 if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
503 if (value == B43legacy_DMA64_RXSTAT_DISABLED) { 372 i = -1;
504 i = -1; 373 break;
505 break;
506 }
507 } else {
508 value &= B43legacy_DMA32_RXSTATE;
509 if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
510 i = -1;
511 break;
512 }
513 } 374 }
514 msleep(1); 375 msleep(1);
515 } 376 }
@@ -533,43 +394,24 @@ static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
533 might_sleep(); 394 might_sleep();
534 395
535 for (i = 0; i < 10; i++) { 396 for (i = 0; i < 10; i++) {
536 offset = (type == B43legacy_DMA_64BIT) ? 397 offset = B43legacy_DMA32_TXSTATUS;
537 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
538 value = b43legacy_read32(dev, mmio_base + offset); 398 value = b43legacy_read32(dev, mmio_base + offset);
539 if (type == B43legacy_DMA_64BIT) { 399 value &= B43legacy_DMA32_TXSTATE;
540 value &= B43legacy_DMA64_TXSTAT; 400 if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
541 if (value == B43legacy_DMA64_TXSTAT_DISABLED || 401 value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
542 value == B43legacy_DMA64_TXSTAT_IDLEWAIT || 402 value == B43legacy_DMA32_TXSTAT_STOPPED)
543 value == B43legacy_DMA64_TXSTAT_STOPPED) 403 break;
544 break;
545 } else {
546 value &= B43legacy_DMA32_TXSTATE;
547 if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
548 value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
549 value == B43legacy_DMA32_TXSTAT_STOPPED)
550 break;
551 }
552 msleep(1); 404 msleep(1);
553 } 405 }
554 offset = (type == B43legacy_DMA_64BIT) ? B43legacy_DMA64_TXCTL : 406 offset = B43legacy_DMA32_TXCTL;
555 B43legacy_DMA32_TXCTL;
556 b43legacy_write32(dev, mmio_base + offset, 0); 407 b43legacy_write32(dev, mmio_base + offset, 0);
557 for (i = 0; i < 10; i++) { 408 for (i = 0; i < 10; i++) {
558 offset = (type == B43legacy_DMA_64BIT) ? 409 offset = B43legacy_DMA32_TXSTATUS;
559 B43legacy_DMA64_TXSTATUS : B43legacy_DMA32_TXSTATUS;
560 value = b43legacy_read32(dev, mmio_base + offset); 410 value = b43legacy_read32(dev, mmio_base + offset);
561 if (type == B43legacy_DMA_64BIT) { 411 value &= B43legacy_DMA32_TXSTATE;
562 value &= B43legacy_DMA64_TXSTAT; 412 if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
563 if (value == B43legacy_DMA64_TXSTAT_DISABLED) { 413 i = -1;
564 i = -1; 414 break;
565 break;
566 }
567 } else {
568 value &= B43legacy_DMA32_TXSTATE;
569 if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
570 i = -1;
571 break;
572 }
573 } 415 }
574 msleep(1); 416 msleep(1);
575 } 417 }
@@ -601,9 +443,6 @@ static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
601 if ((u64)addr + buffersize > (1ULL << 32)) 443 if ((u64)addr + buffersize > (1ULL << 32))
602 goto address_error; 444 goto address_error;
603 break; 445 break;
604 case B43legacy_DMA_64BIT:
605 /* Currently we can't have addresses beyond 64 bits in the kernel. */
606 break;
607 } 446 }
608 447
609 /* The address is OK. */ 448 /* The address is OK. */
@@ -617,7 +456,7 @@ address_error:
617} 456}
618 457
619static int setup_rx_descbuffer(struct b43legacy_dmaring *ring, 458static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
620 struct b43legacy_dmadesc_generic *desc, 459 struct b43legacy_dmadesc32 *desc,
621 struct b43legacy_dmadesc_meta *meta, 460 struct b43legacy_dmadesc_meta *meta,
622 gfp_t gfp_flags) 461 gfp_t gfp_flags)
623{ 462{
@@ -653,8 +492,7 @@ static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
653 492
654 meta->skb = skb; 493 meta->skb = skb;
655 meta->dmaaddr = dmaaddr; 494 meta->dmaaddr = dmaaddr;
656 ring->ops->fill_descriptor(ring, desc, dmaaddr, 495 op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
657 ring->rx_buffersize, 0, 0, 0);
658 496
659 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data); 497 rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
660 rxhdr->frame_len = 0; 498 rxhdr->frame_len = 0;
@@ -671,11 +509,11 @@ static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
671{ 509{
672 int i; 510 int i;
673 int err = -ENOMEM; 511 int err = -ENOMEM;
674 struct b43legacy_dmadesc_generic *desc; 512 struct b43legacy_dmadesc32 *desc;
675 struct b43legacy_dmadesc_meta *meta; 513 struct b43legacy_dmadesc_meta *meta;
676 514
677 for (i = 0; i < ring->nr_slots; i++) { 515 for (i = 0; i < ring->nr_slots; i++) {
678 desc = ring->ops->idx2desc(ring, i, &meta); 516 desc = op32_idx2desc(ring, i, &meta);
679 517
680 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 518 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
681 if (err) { 519 if (err) {
@@ -692,7 +530,7 @@ out:
692 530
693err_unwind: 531err_unwind:
694 for (i--; i >= 0; i--) { 532 for (i--; i >= 0; i--) {
695 desc = ring->ops->idx2desc(ring, i, &meta); 533 desc = op32_idx2desc(ring, i, &meta);
696 534
697 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 535 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
698 dev_kfree_skb(meta->skb); 536 dev_kfree_skb(meta->skb);
@@ -710,83 +548,35 @@ static int dmacontroller_setup(struct b43legacy_dmaring *ring)
710 u32 value; 548 u32 value;
711 u32 addrext; 549 u32 addrext;
712 u32 trans = ring->dev->dma.translation; 550 u32 trans = ring->dev->dma.translation;
551 u32 ringbase = (u32)(ring->dmabase);
713 552
714 if (ring->tx) { 553 if (ring->tx) {
715 if (ring->type == B43legacy_DMA_64BIT) { 554 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
716 u64 ringbase = (u64)(ring->dmabase); 555 >> SSB_DMA_TRANSLATION_SHIFT;
717 556 value = B43legacy_DMA32_TXENABLE;
718 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 557 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
719 >> SSB_DMA_TRANSLATION_SHIFT; 558 & B43legacy_DMA32_TXADDREXT_MASK;
720 value = B43legacy_DMA64_TXENABLE; 559 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
721 value |= (addrext << B43legacy_DMA64_TXADDREXT_SHIFT) 560 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
722 & B43legacy_DMA64_TXADDREXT_MASK; 561 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
723 b43legacy_dma_write(ring, B43legacy_DMA64_TXCTL, 562 | trans);
724 value);
725 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO,
726 (ringbase & 0xFFFFFFFF));
727 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI,
728 ((ringbase >> 32)
729 & ~SSB_DMA_TRANSLATION_MASK)
730 | trans);
731 } else {
732 u32 ringbase = (u32)(ring->dmabase);
733
734 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
735 >> SSB_DMA_TRANSLATION_SHIFT;
736 value = B43legacy_DMA32_TXENABLE;
737 value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
738 & B43legacy_DMA32_TXADDREXT_MASK;
739 b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
740 value);
741 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
742 (ringbase &
743 ~SSB_DMA_TRANSLATION_MASK)
744 | trans);
745 }
746 } else { 563 } else {
747 err = alloc_initial_descbuffers(ring); 564 err = alloc_initial_descbuffers(ring);
748 if (err) 565 if (err)
749 goto out; 566 goto out;
750 if (ring->type == B43legacy_DMA_64BIT) { 567
751 u64 ringbase = (u64)(ring->dmabase); 568 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
752 569 >> SSB_DMA_TRANSLATION_SHIFT;
753 addrext = ((ringbase >> 32) & SSB_DMA_TRANSLATION_MASK) 570 value = (ring->frameoffset <<
754 >> SSB_DMA_TRANSLATION_SHIFT; 571 B43legacy_DMA32_RXFROFF_SHIFT);
755 value = (ring->frameoffset << 572 value |= B43legacy_DMA32_RXENABLE;
756 B43legacy_DMA64_RXFROFF_SHIFT); 573 value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
757 value |= B43legacy_DMA64_RXENABLE; 574 & B43legacy_DMA32_RXADDREXT_MASK;
758 value |= (addrext << B43legacy_DMA64_RXADDREXT_SHIFT) 575 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
759 & B43legacy_DMA64_RXADDREXT_MASK; 576 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
760 b43legacy_dma_write(ring, B43legacy_DMA64_RXCTL, 577 (ringbase & ~SSB_DMA_TRANSLATION_MASK)
761 value); 578 | trans);
762 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 579 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
763 (ringbase & 0xFFFFFFFF));
764 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI,
765 ((ringbase >> 32) &
766 ~SSB_DMA_TRANSLATION_MASK) |
767 trans);
768 b43legacy_dma_write(ring, B43legacy_DMA64_RXINDEX,
769 200);
770 } else {
771 u32 ringbase = (u32)(ring->dmabase);
772
773 addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
774 >> SSB_DMA_TRANSLATION_SHIFT;
775 value = (ring->frameoffset <<
776 B43legacy_DMA32_RXFROFF_SHIFT);
777 value |= B43legacy_DMA32_RXENABLE;
778 value |= (addrext <<
779 B43legacy_DMA32_RXADDREXT_SHIFT)
780 & B43legacy_DMA32_RXADDREXT_MASK;
781 b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL,
782 value);
783 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
784 (ringbase &
785 ~SSB_DMA_TRANSLATION_MASK)
786 | trans);
787 b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
788 200);
789 }
790 } 580 }
791 581
792out: 582out:
@@ -799,19 +589,11 @@ static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
799 if (ring->tx) { 589 if (ring->tx) {
800 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base, 590 b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
801 ring->type); 591 ring->type);
802 if (ring->type == B43legacy_DMA_64BIT) { 592 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
803 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGLO, 0);
804 b43legacy_dma_write(ring, B43legacy_DMA64_TXRINGHI, 0);
805 } else
806 b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
807 } else { 593 } else {
808 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base, 594 b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
809 ring->type); 595 ring->type);
810 if (ring->type == B43legacy_DMA_64BIT) { 596 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
811 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGLO, 0);
812 b43legacy_dma_write(ring, B43legacy_DMA64_RXRINGHI, 0);
813 } else
814 b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
815 } 597 }
816} 598}
817 599
@@ -823,7 +605,7 @@ static void free_all_descbuffers(struct b43legacy_dmaring *ring)
823 if (!ring->used_slots) 605 if (!ring->used_slots)
824 return; 606 return;
825 for (i = 0; i < ring->nr_slots; i++) { 607 for (i = 0; i < ring->nr_slots; i++) {
826 ring->ops->idx2desc(ring, i, &meta); 608 op32_idx2desc(ring, i, &meta);
827 609
828 if (!meta->skb) { 610 if (!meta->skb) {
829 B43legacy_WARN_ON(!ring->tx); 611 B43legacy_WARN_ON(!ring->tx);
@@ -844,9 +626,6 @@ static u64 supported_dma_mask(struct b43legacy_wldev *dev)
844 u32 tmp; 626 u32 tmp;
845 u16 mmio_base; 627 u16 mmio_base;
846 628
847 tmp = b43legacy_read32(dev, SSB_TMSHIGH);
848 if (tmp & SSB_TMSHIGH_DMA64)
849 return DMA_BIT_MASK(64);
850 mmio_base = b43legacy_dmacontroller_base(0, 0); 629 mmio_base = b43legacy_dmacontroller_base(0, 0);
851 b43legacy_write32(dev, 630 b43legacy_write32(dev,
852 mmio_base + B43legacy_DMA32_TXCTL, 631 mmio_base + B43legacy_DMA32_TXCTL,
@@ -865,8 +644,6 @@ static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
865 return B43legacy_DMA_30BIT; 644 return B43legacy_DMA_30BIT;
866 if (dmamask == DMA_BIT_MASK(32)) 645 if (dmamask == DMA_BIT_MASK(32))
867 return B43legacy_DMA_32BIT; 646 return B43legacy_DMA_32BIT;
868 if (dmamask == DMA_BIT_MASK(64))
869 return B43legacy_DMA_64BIT;
870 B43legacy_WARN_ON(1); 647 B43legacy_WARN_ON(1);
871 return B43legacy_DMA_30BIT; 648 return B43legacy_DMA_30BIT;
872} 649}
@@ -937,10 +714,6 @@ struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
937 ring->nr_slots = nr_slots; 714 ring->nr_slots = nr_slots;
938 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index); 715 ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
939 ring->index = controller_index; 716 ring->index = controller_index;
940 if (type == B43legacy_DMA_64BIT)
941 ring->ops = &dma64_ops;
942 else
943 ring->ops = &dma32_ops;
944 if (for_tx) { 717 if (for_tx) {
945 ring->tx = 1; 718 ring->tx = 1;
946 ring->current_slot = -1; 719 ring->current_slot = -1;
@@ -1247,12 +1020,11 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1247 struct sk_buff **in_skb) 1020 struct sk_buff **in_skb)
1248{ 1021{
1249 struct sk_buff *skb = *in_skb; 1022 struct sk_buff *skb = *in_skb;
1250 const struct b43legacy_dma_ops *ops = ring->ops;
1251 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1023 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1252 u8 *header; 1024 u8 *header;
1253 int slot, old_top_slot, old_used_slots; 1025 int slot, old_top_slot, old_used_slots;
1254 int err; 1026 int err;
1255 struct b43legacy_dmadesc_generic *desc; 1027 struct b43legacy_dmadesc32 *desc;
1256 struct b43legacy_dmadesc_meta *meta; 1028 struct b43legacy_dmadesc_meta *meta;
1257 struct b43legacy_dmadesc_meta *meta_hdr; 1029 struct b43legacy_dmadesc_meta *meta_hdr;
1258 struct sk_buff *bounce_skb; 1030 struct sk_buff *bounce_skb;
@@ -1265,7 +1037,7 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1265 1037
1266 /* Get a slot for the header. */ 1038 /* Get a slot for the header. */
1267 slot = request_slot(ring); 1039 slot = request_slot(ring);
1268 desc = ops->idx2desc(ring, slot, &meta_hdr); 1040 desc = op32_idx2desc(ring, slot, &meta_hdr);
1269 memset(meta_hdr, 0, sizeof(*meta_hdr)); 1041 memset(meta_hdr, 0, sizeof(*meta_hdr));
1270 1042
1271 header = &(ring->txhdr_cache[slot * sizeof( 1043 header = &(ring->txhdr_cache[slot * sizeof(
@@ -1287,12 +1059,12 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1287 ring->used_slots = old_used_slots; 1059 ring->used_slots = old_used_slots;
1288 return -EIO; 1060 return -EIO;
1289 } 1061 }
1290 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr, 1062 op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1291 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0); 1063 sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1292 1064
1293 /* Get a slot for the payload. */ 1065 /* Get a slot for the payload. */
1294 slot = request_slot(ring); 1066 slot = request_slot(ring);
1295 desc = ops->idx2desc(ring, slot, &meta); 1067 desc = op32_idx2desc(ring, slot, &meta);
1296 memset(meta, 0, sizeof(*meta)); 1068 memset(meta, 0, sizeof(*meta));
1297 1069
1298 meta->skb = skb; 1070 meta->skb = skb;
@@ -1328,12 +1100,12 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1328 } 1100 }
1329 } 1101 }
1330 1102
1331 ops->fill_descriptor(ring, desc, meta->dmaaddr, 1103 op32_fill_descriptor(ring, desc, meta->dmaaddr,
1332 skb->len, 0, 1, 1); 1104 skb->len, 0, 1, 1);
1333 1105
1334 wmb(); /* previous stuff MUST be done */ 1106 wmb(); /* previous stuff MUST be done */
1335 /* Now transfer the whole frame. */ 1107 /* Now transfer the whole frame. */
1336 ops->poke_tx(ring, next_slot(ring, slot)); 1108 op32_poke_tx(ring, next_slot(ring, slot));
1337 return 0; 1109 return 0;
1338 1110
1339out_free_bounce: 1111out_free_bounce:
@@ -1429,7 +1201,6 @@ out_unlock:
1429void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev, 1201void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1430 const struct b43legacy_txstatus *status) 1202 const struct b43legacy_txstatus *status)
1431{ 1203{
1432 const struct b43legacy_dma_ops *ops;
1433 struct b43legacy_dmaring *ring; 1204 struct b43legacy_dmaring *ring;
1434 struct b43legacy_dmadesc_meta *meta; 1205 struct b43legacy_dmadesc_meta *meta;
1435 int retry_limit; 1206 int retry_limit;
@@ -1442,10 +1213,9 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1442 spin_lock(&ring->lock); 1213 spin_lock(&ring->lock);
1443 1214
1444 B43legacy_WARN_ON(!ring->tx); 1215 B43legacy_WARN_ON(!ring->tx);
1445 ops = ring->ops;
1446 while (1) { 1216 while (1) {
1447 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots)); 1217 B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1448 ops->idx2desc(ring, slot, &meta); 1218 op32_idx2desc(ring, slot, &meta);
1449 1219
1450 if (meta->skb) 1220 if (meta->skb)
1451 unmap_descbuffer(ring, meta->dmaaddr, 1221 unmap_descbuffer(ring, meta->dmaaddr,
@@ -1528,8 +1298,7 @@ void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1528static void dma_rx(struct b43legacy_dmaring *ring, 1298static void dma_rx(struct b43legacy_dmaring *ring,
1529 int *slot) 1299 int *slot)
1530{ 1300{
1531 const struct b43legacy_dma_ops *ops = ring->ops; 1301 struct b43legacy_dmadesc32 *desc;
1532 struct b43legacy_dmadesc_generic *desc;
1533 struct b43legacy_dmadesc_meta *meta; 1302 struct b43legacy_dmadesc_meta *meta;
1534 struct b43legacy_rxhdr_fw3 *rxhdr; 1303 struct b43legacy_rxhdr_fw3 *rxhdr;
1535 struct sk_buff *skb; 1304 struct sk_buff *skb;
@@ -1537,7 +1306,7 @@ static void dma_rx(struct b43legacy_dmaring *ring,
1537 int err; 1306 int err;
1538 dma_addr_t dmaaddr; 1307 dma_addr_t dmaaddr;
1539 1308
1540 desc = ops->idx2desc(ring, *slot, &meta); 1309 desc = op32_idx2desc(ring, *slot, &meta);
1541 1310
1542 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1311 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1543 skb = meta->skb; 1312 skb = meta->skb;
@@ -1589,7 +1358,7 @@ static void dma_rx(struct b43legacy_dmaring *ring,
1589 s32 tmp = len; 1358 s32 tmp = len;
1590 1359
1591 while (1) { 1360 while (1) {
1592 desc = ops->idx2desc(ring, *slot, &meta); 1361 desc = op32_idx2desc(ring, *slot, &meta);
1593 /* recycle the descriptor buffer. */ 1362 /* recycle the descriptor buffer. */
1594 sync_descbuffer_for_device(ring, meta->dmaaddr, 1363 sync_descbuffer_for_device(ring, meta->dmaaddr,
1595 ring->rx_buffersize); 1364 ring->rx_buffersize);
@@ -1626,13 +1395,12 @@ drop:
1626 1395
1627void b43legacy_dma_rx(struct b43legacy_dmaring *ring) 1396void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1628{ 1397{
1629 const struct b43legacy_dma_ops *ops = ring->ops;
1630 int slot; 1398 int slot;
1631 int current_slot; 1399 int current_slot;
1632 int used_slots = 0; 1400 int used_slots = 0;
1633 1401
1634 B43legacy_WARN_ON(ring->tx); 1402 B43legacy_WARN_ON(ring->tx);
1635 current_slot = ops->get_current_rxslot(ring); 1403 current_slot = op32_get_current_rxslot(ring);
1636 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot < 1404 B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1637 ring->nr_slots)); 1405 ring->nr_slots));
1638 1406
@@ -1641,7 +1409,7 @@ void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1641 dma_rx(ring, &slot); 1409 dma_rx(ring, &slot);
1642 update_max_used_slots(ring, ++used_slots); 1410 update_max_used_slots(ring, ++used_slots);
1643 } 1411 }
1644 ops->set_current_rxslot(ring, slot); 1412 op32_set_current_rxslot(ring, slot);
1645 ring->current_slot = slot; 1413 ring->current_slot = slot;
1646} 1414}
1647 1415
@@ -1651,7 +1419,7 @@ static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1651 1419
1652 spin_lock_irqsave(&ring->lock, flags); 1420 spin_lock_irqsave(&ring->lock, flags);
1653 B43legacy_WARN_ON(!ring->tx); 1421 B43legacy_WARN_ON(!ring->tx);
1654 ring->ops->tx_suspend(ring); 1422 op32_tx_suspend(ring);
1655 spin_unlock_irqrestore(&ring->lock, flags); 1423 spin_unlock_irqrestore(&ring->lock, flags);
1656} 1424}
1657 1425
@@ -1661,7 +1429,7 @@ static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1661 1429
1662 spin_lock_irqsave(&ring->lock, flags); 1430 spin_lock_irqsave(&ring->lock, flags);
1663 B43legacy_WARN_ON(!ring->tx); 1431 B43legacy_WARN_ON(!ring->tx);
1664 ring->ops->tx_resume(ring); 1432 op32_tx_resume(ring);
1665 spin_unlock_irqrestore(&ring->lock, flags); 1433 spin_unlock_irqrestore(&ring->lock, flags);
1666} 1434}
1667 1435