aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless
diff options
context:
space:
mode:
authorMichael Buesch <mb@bu3sch.de>2006-08-15 18:25:16 -0400
committerJohn W. Linville <linville@tuxdriver.com>2006-08-29 17:06:28 -0400
commit9218e02bd4dba86e458d9c57d66c18517fb642ff (patch)
treecb24a5ed0fe9859d4e4cdfa9470b9bd0a382fb05 /drivers/net/wireless
parent3b4c7d640376dbccfe80fc4f7b8772ecc7de28c5 (diff)
[PATCH] bcm43xx: >1G and 64bit DMA support
This is a rewrite of the bcm43xx DMA engine. It adds support for >1G of memory (for chips that support the extension bits) and 64-bit DMA (for chips that support it). Signed-off-by: Michael Buesch <mb@bu3sch.de> Signed-off-by: John W. Linville <linville@tuxdriver.com>
Diffstat (limited to 'drivers/net/wireless')
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx.h58
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c583
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.h289
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_main.c85
4 files changed, 691 insertions, 324 deletions
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx.h b/drivers/net/wireless/bcm43xx/bcm43xx.h
index c6ee1e974c84..62fd7e237789 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx.h
@@ -33,14 +33,18 @@
33#define BCM43xx_PCICFG_ICR 0x94 33#define BCM43xx_PCICFG_ICR 0x94
34 34
35/* MMIO offsets */ 35/* MMIO offsets */
36#define BCM43xx_MMIO_DMA1_REASON 0x20 36#define BCM43xx_MMIO_DMA0_REASON 0x20
37#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x24 37#define BCM43xx_MMIO_DMA0_IRQ_MASK 0x24
38#define BCM43xx_MMIO_DMA2_REASON 0x28 38#define BCM43xx_MMIO_DMA1_REASON 0x28
39#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x2C 39#define BCM43xx_MMIO_DMA1_IRQ_MASK 0x2C
40#define BCM43xx_MMIO_DMA3_REASON 0x30 40#define BCM43xx_MMIO_DMA2_REASON 0x30
41#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x34 41#define BCM43xx_MMIO_DMA2_IRQ_MASK 0x34
42#define BCM43xx_MMIO_DMA4_REASON 0x38 42#define BCM43xx_MMIO_DMA3_REASON 0x38
43#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x3C 43#define BCM43xx_MMIO_DMA3_IRQ_MASK 0x3C
44#define BCM43xx_MMIO_DMA4_REASON 0x40
45#define BCM43xx_MMIO_DMA4_IRQ_MASK 0x44
46#define BCM43xx_MMIO_DMA5_REASON 0x48
47#define BCM43xx_MMIO_DMA5_IRQ_MASK 0x4C
44#define BCM43xx_MMIO_STATUS_BITFIELD 0x120 48#define BCM43xx_MMIO_STATUS_BITFIELD 0x120
45#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124 49#define BCM43xx_MMIO_STATUS2_BITFIELD 0x124
46#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128 50#define BCM43xx_MMIO_GEN_IRQ_REASON 0x128
@@ -56,14 +60,27 @@
56#define BCM43xx_MMIO_XMITSTAT_1 0x174 60#define BCM43xx_MMIO_XMITSTAT_1 0x174
57#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */ 61#define BCM43xx_MMIO_REV3PLUS_TSF_LOW 0x180 /* core rev >= 3 only */
58#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */ 62#define BCM43xx_MMIO_REV3PLUS_TSF_HIGH 0x184 /* core rev >= 3 only */
59#define BCM43xx_MMIO_DMA1_BASE 0x200 63
60#define BCM43xx_MMIO_DMA2_BASE 0x220 64/* 32-bit DMA */
61#define BCM43xx_MMIO_DMA3_BASE 0x240 65#define BCM43xx_MMIO_DMA32_BASE0 0x200
62#define BCM43xx_MMIO_DMA4_BASE 0x260 66#define BCM43xx_MMIO_DMA32_BASE1 0x220
67#define BCM43xx_MMIO_DMA32_BASE2 0x240
68#define BCM43xx_MMIO_DMA32_BASE3 0x260
69#define BCM43xx_MMIO_DMA32_BASE4 0x280
70#define BCM43xx_MMIO_DMA32_BASE5 0x2A0
71/* 64-bit DMA */
72#define BCM43xx_MMIO_DMA64_BASE0 0x200
73#define BCM43xx_MMIO_DMA64_BASE1 0x240
74#define BCM43xx_MMIO_DMA64_BASE2 0x280
75#define BCM43xx_MMIO_DMA64_BASE3 0x2C0
76#define BCM43xx_MMIO_DMA64_BASE4 0x300
77#define BCM43xx_MMIO_DMA64_BASE5 0x340
78/* PIO */
63#define BCM43xx_MMIO_PIO1_BASE 0x300 79#define BCM43xx_MMIO_PIO1_BASE 0x300
64#define BCM43xx_MMIO_PIO2_BASE 0x310 80#define BCM43xx_MMIO_PIO2_BASE 0x310
65#define BCM43xx_MMIO_PIO3_BASE 0x320 81#define BCM43xx_MMIO_PIO3_BASE 0x320
66#define BCM43xx_MMIO_PIO4_BASE 0x330 82#define BCM43xx_MMIO_PIO4_BASE 0x330
83
67#define BCM43xx_MMIO_PHY_VER 0x3E0 84#define BCM43xx_MMIO_PHY_VER 0x3E0
68#define BCM43xx_MMIO_PHY_RADIO 0x3E2 85#define BCM43xx_MMIO_PHY_RADIO 0x3E2
69#define BCM43xx_MMIO_ANTENNA 0x3E8 86#define BCM43xx_MMIO_ANTENNA 0x3E8
@@ -233,8 +250,14 @@
233#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000 250#define BCM43xx_SBTMSTATELOW_FORCE_GATE_CLOCK 0x20000
234 251
235/* sbtmstatehigh state flags */ 252/* sbtmstatehigh state flags */
236#define BCM43xx_SBTMSTATEHIGH_SERROR 0x1 253#define BCM43xx_SBTMSTATEHIGH_SERROR 0x00000001
237#define BCM43xx_SBTMSTATEHIGH_BUSY 0x4 254#define BCM43xx_SBTMSTATEHIGH_BUSY 0x00000004
255#define BCM43xx_SBTMSTATEHIGH_TIMEOUT 0x00000020
256#define BCM43xx_SBTMSTATEHIGH_COREFLAGS 0x1FFF0000
257#define BCM43xx_SBTMSTATEHIGH_DMA64BIT 0x10000000
258#define BCM43xx_SBTMSTATEHIGH_GATEDCLK 0x20000000
259#define BCM43xx_SBTMSTATEHIGH_BISTFAILED 0x40000000
260#define BCM43xx_SBTMSTATEHIGH_BISTCOMPLETE 0x80000000
238 261
239/* sbimstate flags */ 262/* sbimstate flags */
240#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000 263#define BCM43xx_SBIMSTATE_IB_ERROR 0x20000
@@ -574,8 +597,11 @@ struct bcm43xx_dma {
574 struct bcm43xx_dmaring *tx_ring1; 597 struct bcm43xx_dmaring *tx_ring1;
575 struct bcm43xx_dmaring *tx_ring2; 598 struct bcm43xx_dmaring *tx_ring2;
576 struct bcm43xx_dmaring *tx_ring3; 599 struct bcm43xx_dmaring *tx_ring3;
600 struct bcm43xx_dmaring *tx_ring4;
601 struct bcm43xx_dmaring *tx_ring5;
602
577 struct bcm43xx_dmaring *rx_ring0; 603 struct bcm43xx_dmaring *rx_ring0;
578 struct bcm43xx_dmaring *rx_ring1; /* only available on core.rev < 5 */ 604 struct bcm43xx_dmaring *rx_ring3; /* only available on core.rev < 5 */
579}; 605};
580 606
581/* Data structures for PIO transmission, per 80211 core. */ 607/* Data structures for PIO transmission, per 80211 core. */
@@ -739,7 +765,7 @@ struct bcm43xx_private {
739 765
740 /* Reason code of the last interrupt. */ 766 /* Reason code of the last interrupt. */
741 u32 irq_reason; 767 u32 irq_reason;
742 u32 dma_reason[4]; 768 u32 dma_reason[6];
743 /* saved irq enable/disable state bitfield. */ 769 /* saved irq enable/disable state bitfield. */
744 u32 irq_savedstate; 770 u32 irq_savedstate;
745 /* Link Quality calculation context. */ 771 /* Link Quality calculation context. */
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index d0318e525ba7..76e3aed4b471 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -4,7 +4,7 @@
4 4
5 DMA ringbuffer and descriptor allocation/management 5 DMA ringbuffer and descriptor allocation/management
6 6
7 Copyright (c) 2005 Michael Buesch <mbuesch@freenet.de> 7 Copyright (c) 2005, 2006 Michael Buesch <mbuesch@freenet.de>
8 8
9 Some code in this file is derived from the b44.c driver 9 Some code in this file is derived from the b44.c driver
10 Copyright (C) 2002 David S. Miller 10 Copyright (C) 2002 David S. Miller
@@ -109,6 +109,35 @@ void return_slot(struct bcm43xx_dmaring *ring, int slot)
109 } 109 }
110} 110}
111 111
112u16 bcm43xx_dmacontroller_base(int dma64bit, int controller_idx)
113{
114 static const u16 map64[] = {
115 BCM43xx_MMIO_DMA64_BASE0,
116 BCM43xx_MMIO_DMA64_BASE1,
117 BCM43xx_MMIO_DMA64_BASE2,
118 BCM43xx_MMIO_DMA64_BASE3,
119 BCM43xx_MMIO_DMA64_BASE4,
120 BCM43xx_MMIO_DMA64_BASE5,
121 };
122 static const u16 map32[] = {
123 BCM43xx_MMIO_DMA32_BASE0,
124 BCM43xx_MMIO_DMA32_BASE1,
125 BCM43xx_MMIO_DMA32_BASE2,
126 BCM43xx_MMIO_DMA32_BASE3,
127 BCM43xx_MMIO_DMA32_BASE4,
128 BCM43xx_MMIO_DMA32_BASE5,
129 };
130
131 if (dma64bit) {
132 assert(controller_idx >= 0 &&
133 controller_idx < ARRAY_SIZE(map64));
134 return map64[controller_idx];
135 }
136 assert(controller_idx >= 0 &&
137 controller_idx < ARRAY_SIZE(map32));
138 return map32[controller_idx];
139}
140
112static inline 141static inline
113dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring, 142dma_addr_t map_descbuffer(struct bcm43xx_dmaring *ring,
114 unsigned char *buf, 143 unsigned char *buf,
@@ -172,7 +201,6 @@ void sync_descbuffer_for_device(struct bcm43xx_dmaring *ring,
172/* Unmap and free a descriptor buffer. */ 201/* Unmap and free a descriptor buffer. */
173static inline 202static inline
174void free_descriptor_buffer(struct bcm43xx_dmaring *ring, 203void free_descriptor_buffer(struct bcm43xx_dmaring *ring,
175 struct bcm43xx_dmadesc *desc,
176 struct bcm43xx_dmadesc_meta *meta, 204 struct bcm43xx_dmadesc_meta *meta,
177 int irq_context) 205 int irq_context)
178{ 206{
@@ -188,23 +216,13 @@ static int alloc_ringmemory(struct bcm43xx_dmaring *ring)
188{ 216{
189 struct device *dev = &(ring->bcm->pci_dev->dev); 217 struct device *dev = &(ring->bcm->pci_dev->dev);
190 218
191 ring->vbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 219 ring->descbase = dma_alloc_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
192 &(ring->dmabase), GFP_KERNEL); 220 &(ring->dmabase), GFP_KERNEL);
193 if (!ring->vbase) { 221 if (!ring->descbase) {
194 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n"); 222 printk(KERN_ERR PFX "DMA ringmemory allocation failed\n");
195 return -ENOMEM; 223 return -ENOMEM;
196 } 224 }
197 if (ring->dmabase + BCM43xx_DMA_RINGMEMSIZE > BCM43xx_DMA_BUSADDRMAX) { 225 memset(ring->descbase, 0, BCM43xx_DMA_RINGMEMSIZE);
198 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RINGMEMORY >1G "
199 "(0x%llx, len: %lu)\n",
200 (unsigned long long)ring->dmabase,
201 BCM43xx_DMA_RINGMEMSIZE);
202 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
203 ring->vbase, ring->dmabase);
204 return -ENOMEM;
205 }
206 assert(!(ring->dmabase & 0x000003FF));
207 memset(ring->vbase, 0, BCM43xx_DMA_RINGMEMSIZE);
208 226
209 return 0; 227 return 0;
210} 228}
@@ -214,26 +232,34 @@ static void free_ringmemory(struct bcm43xx_dmaring *ring)
214 struct device *dev = &(ring->bcm->pci_dev->dev); 232 struct device *dev = &(ring->bcm->pci_dev->dev);
215 233
216 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE, 234 dma_free_coherent(dev, BCM43xx_DMA_RINGMEMSIZE,
217 ring->vbase, ring->dmabase); 235 ring->descbase, ring->dmabase);
218} 236}
219 237
220/* Reset the RX DMA channel */ 238/* Reset the RX DMA channel */
221int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 239int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
222 u16 mmio_base) 240 u16 mmio_base, int dma64)
223{ 241{
224 int i; 242 int i;
225 u32 value; 243 u32 value;
244 u16 offset;
226 245
227 bcm43xx_write32(bcm, 246 offset = dma64 ? BCM43xx_DMA64_RXCTL : BCM43xx_DMA32_RXCTL;
228 mmio_base + BCM43xx_DMA_RX_CONTROL, 247 bcm43xx_write32(bcm, mmio_base + offset, 0);
229 0x00000000);
230 for (i = 0; i < 1000; i++) { 248 for (i = 0; i < 1000; i++) {
231 value = bcm43xx_read32(bcm, 249 offset = dma64 ? BCM43xx_DMA64_RXSTATUS : BCM43xx_DMA32_RXSTATUS;
232 mmio_base + BCM43xx_DMA_RX_STATUS); 250 value = bcm43xx_read32(bcm, mmio_base + offset);
233 value &= BCM43xx_DMA_RXSTAT_STAT_MASK; 251 if (dma64) {
234 if (value == BCM43xx_DMA_RXSTAT_STAT_DISABLED) { 252 value &= BCM43xx_DMA64_RXSTAT;
235 i = -1; 253 if (value == BCM43xx_DMA64_RXSTAT_DISABLED) {
236 break; 254 i = -1;
255 break;
256 }
257 } else {
258 value &= BCM43xx_DMA32_RXSTATE;
259 if (value == BCM43xx_DMA32_RXSTAT_DISABLED) {
260 i = -1;
261 break;
262 }
237 } 263 }
238 udelay(10); 264 udelay(10);
239 } 265 }
@@ -247,31 +273,47 @@ int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
247 273
248/* Reset the RX DMA channel */ 274/* Reset the RX DMA channel */
249int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 275int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
250 u16 mmio_base) 276 u16 mmio_base, int dma64)
251{ 277{
252 int i; 278 int i;
253 u32 value; 279 u32 value;
280 u16 offset;
254 281
255 for (i = 0; i < 1000; i++) { 282 for (i = 0; i < 1000; i++) {
256 value = bcm43xx_read32(bcm, 283 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
257 mmio_base + BCM43xx_DMA_TX_STATUS); 284 value = bcm43xx_read32(bcm, mmio_base + offset);
258 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 285 if (dma64) {
259 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED || 286 value &= BCM43xx_DMA64_TXSTAT;
260 value == BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT || 287 if (value == BCM43xx_DMA64_TXSTAT_DISABLED ||
261 value == BCM43xx_DMA_TXSTAT_STAT_STOPPED) 288 value == BCM43xx_DMA64_TXSTAT_IDLEWAIT ||
262 break; 289 value == BCM43xx_DMA64_TXSTAT_STOPPED)
290 break;
291 } else {
292 value &= BCM43xx_DMA32_TXSTATE;
293 if (value == BCM43xx_DMA32_TXSTAT_DISABLED ||
294 value == BCM43xx_DMA32_TXSTAT_IDLEWAIT ||
295 value == BCM43xx_DMA32_TXSTAT_STOPPED)
296 break;
297 }
263 udelay(10); 298 udelay(10);
264 } 299 }
265 bcm43xx_write32(bcm, 300 offset = dma64 ? BCM43xx_DMA64_TXCTL : BCM43xx_DMA32_TXCTL;
266 mmio_base + BCM43xx_DMA_TX_CONTROL, 301 bcm43xx_write32(bcm, mmio_base + offset, 0);
267 0x00000000);
268 for (i = 0; i < 1000; i++) { 302 for (i = 0; i < 1000; i++) {
269 value = bcm43xx_read32(bcm, 303 offset = dma64 ? BCM43xx_DMA64_TXSTATUS : BCM43xx_DMA32_TXSTATUS;
270 mmio_base + BCM43xx_DMA_TX_STATUS); 304 value = bcm43xx_read32(bcm, mmio_base + offset);
271 value &= BCM43xx_DMA_TXSTAT_STAT_MASK; 305 if (dma64) {
272 if (value == BCM43xx_DMA_TXSTAT_STAT_DISABLED) { 306 value &= BCM43xx_DMA64_TXSTAT;
273 i = -1; 307 if (value == BCM43xx_DMA64_TXSTAT_DISABLED) {
274 break; 308 i = -1;
309 break;
310 }
311 } else {
312 value &= BCM43xx_DMA32_TXSTATE;
313 if (value == BCM43xx_DMA32_TXSTAT_DISABLED) {
314 i = -1;
315 break;
316 }
275 } 317 }
276 udelay(10); 318 udelay(10);
277 } 319 }
@@ -285,47 +327,98 @@ int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
285 return 0; 327 return 0;
286} 328}
287 329
330static void fill_descriptor(struct bcm43xx_dmaring *ring,
331 struct bcm43xx_dmadesc_generic *desc,
332 dma_addr_t dmaaddr,
333 u16 bufsize,
334 int start, int end, int irq)
335{
336 int slot;
337
338 slot = bcm43xx_dma_desc2idx(ring, desc);
339 assert(slot >= 0 && slot < ring->nr_slots);
340
341 if (ring->dma64) {
342 u32 ctl0 = 0, ctl1 = 0;
343 u32 addrlo, addrhi;
344 u32 addrext;
345
346 addrlo = (u32)(dmaaddr & 0xFFFFFFFF);
347 addrhi = (((u64)dmaaddr >> 32) & ~BCM43xx_DMA64_ROUTING);
348 addrext = (((u64)dmaaddr >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
349 addrhi |= ring->routing;
350 if (slot == ring->nr_slots - 1)
351 ctl0 |= BCM43xx_DMA64_DCTL0_DTABLEEND;
352 if (start)
353 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMESTART;
354 if (end)
355 ctl0 |= BCM43xx_DMA64_DCTL0_FRAMEEND;
356 if (irq)
357 ctl0 |= BCM43xx_DMA64_DCTL0_IRQ;
358 ctl1 |= (bufsize - ring->frameoffset)
359 & BCM43xx_DMA64_DCTL1_BYTECNT;
360 ctl1 |= (addrext << BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT)
361 & BCM43xx_DMA64_DCTL1_ADDREXT_MASK;
362
363 desc->dma64.control0 = cpu_to_le32(ctl0);
364 desc->dma64.control1 = cpu_to_le32(ctl1);
365 desc->dma64.address_low = cpu_to_le32(addrlo);
366 desc->dma64.address_high = cpu_to_le32(addrhi);
367 } else {
368 u32 ctl;
369 u32 addr;
370 u32 addrext;
371
372 addr = (u32)(dmaaddr & ~BCM43xx_DMA32_ROUTING);
373 addrext = (u32)(dmaaddr & BCM43xx_DMA32_ROUTING)
374 >> BCM43xx_DMA32_ROUTING_SHIFT;
375 addr |= ring->routing;
376 ctl = (bufsize - ring->frameoffset)
377 & BCM43xx_DMA32_DCTL_BYTECNT;
378 if (slot == ring->nr_slots - 1)
379 ctl |= BCM43xx_DMA32_DCTL_DTABLEEND;
380 if (start)
381 ctl |= BCM43xx_DMA32_DCTL_FRAMESTART;
382 if (end)
383 ctl |= BCM43xx_DMA32_DCTL_FRAMEEND;
384 if (irq)
385 ctl |= BCM43xx_DMA32_DCTL_IRQ;
386 ctl |= (addrext << BCM43xx_DMA32_DCTL_ADDREXT_SHIFT)
387 & BCM43xx_DMA32_DCTL_ADDREXT_MASK;
388
389 desc->dma32.control = cpu_to_le32(ctl);
390 desc->dma32.address = cpu_to_le32(addr);
391 }
392}
393
288static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring, 394static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
289 struct bcm43xx_dmadesc *desc, 395 struct bcm43xx_dmadesc_generic *desc,
290 struct bcm43xx_dmadesc_meta *meta, 396 struct bcm43xx_dmadesc_meta *meta,
291 gfp_t gfp_flags) 397 gfp_t gfp_flags)
292{ 398{
293 struct bcm43xx_rxhdr *rxhdr; 399 struct bcm43xx_rxhdr *rxhdr;
400 struct bcm43xx_hwxmitstatus *xmitstat;
294 dma_addr_t dmaaddr; 401 dma_addr_t dmaaddr;
295 u32 desc_addr;
296 u32 desc_ctl;
297 const int slot = (int)(desc - ring->vbase);
298 struct sk_buff *skb; 402 struct sk_buff *skb;
299 403
300 assert(slot >= 0 && slot < ring->nr_slots);
301 assert(!ring->tx); 404 assert(!ring->tx);
302 405
303 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags); 406 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
304 if (unlikely(!skb)) 407 if (unlikely(!skb))
305 return -ENOMEM; 408 return -ENOMEM;
306 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0); 409 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
307 if (unlikely(dmaaddr + ring->rx_buffersize > BCM43xx_DMA_BUSADDRMAX)) {
308 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
309 dev_kfree_skb_any(skb);
310 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA RX SKB >1G "
311 "(0x%llx, len: %u)\n",
312 (unsigned long long)dmaaddr, ring->rx_buffersize);
313 return -ENOMEM;
314 }
315 meta->skb = skb; 410 meta->skb = skb;
316 meta->dmaaddr = dmaaddr; 411 meta->dmaaddr = dmaaddr;
317 skb->dev = ring->bcm->net_dev; 412 skb->dev = ring->bcm->net_dev;
318 desc_addr = (u32)(dmaaddr + ring->memoffset); 413
319 desc_ctl = (BCM43xx_DMADTOR_BYTECNT_MASK & 414 fill_descriptor(ring, desc, dmaaddr,
320 (u32)(ring->rx_buffersize - ring->frameoffset)); 415 ring->rx_buffersize, 0, 0, 0);
321 if (slot == ring->nr_slots - 1)
322 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
323 set_desc_addr(desc, desc_addr);
324 set_desc_ctl(desc, desc_ctl);
325 416
326 rxhdr = (struct bcm43xx_rxhdr *)(skb->data); 417 rxhdr = (struct bcm43xx_rxhdr *)(skb->data);
327 rxhdr->frame_length = 0; 418 rxhdr->frame_length = 0;
328 rxhdr->flags1 = 0; 419 rxhdr->flags1 = 0;
420 xmitstat = (struct bcm43xx_hwxmitstatus *)(skb->data);
421 xmitstat->cookie = 0;
329 422
330 return 0; 423 return 0;
331} 424}
@@ -336,17 +429,17 @@ static int setup_rx_descbuffer(struct bcm43xx_dmaring *ring,
336static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring) 429static int alloc_initial_descbuffers(struct bcm43xx_dmaring *ring)
337{ 430{
338 int i, err = -ENOMEM; 431 int i, err = -ENOMEM;
339 struct bcm43xx_dmadesc *desc; 432 struct bcm43xx_dmadesc_generic *desc;
340 struct bcm43xx_dmadesc_meta *meta; 433 struct bcm43xx_dmadesc_meta *meta;
341 434
342 for (i = 0; i < ring->nr_slots; i++) { 435 for (i = 0; i < ring->nr_slots; i++) {
343 desc = ring->vbase + i; 436 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
344 meta = ring->meta + i;
345 437
346 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL); 438 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
347 if (err) 439 if (err)
348 goto err_unwind; 440 goto err_unwind;
349 } 441 }
442 mb();
350 ring->used_slots = ring->nr_slots; 443 ring->used_slots = ring->nr_slots;
351 err = 0; 444 err = 0;
352out: 445out:
@@ -354,8 +447,7 @@ out:
354 447
355err_unwind: 448err_unwind:
356 for (i--; i >= 0; i--) { 449 for (i--; i >= 0; i--) {
357 desc = ring->vbase + i; 450 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
358 meta = ring->meta + i;
359 451
360 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0); 452 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
361 dev_kfree_skb(meta->skb); 453 dev_kfree_skb(meta->skb);
@@ -371,27 +463,67 @@ static int dmacontroller_setup(struct bcm43xx_dmaring *ring)
371{ 463{
372 int err = 0; 464 int err = 0;
373 u32 value; 465 u32 value;
466 u32 addrext;
374 467
375 if (ring->tx) { 468 if (ring->tx) {
376 /* Set Transmit Control register to "transmit enable" */ 469 if (ring->dma64) {
377 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 470 u64 ringbase = (u64)(ring->dmabase);
378 BCM43xx_DMA_TXCTRL_ENABLE); 471
379 /* Set Transmit Descriptor ring address. */ 472 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
380 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 473 value = BCM43xx_DMA64_TXENABLE;
381 ring->dmabase + ring->memoffset); 474 value |= (addrext << BCM43xx_DMA64_TXADDREXT_SHIFT)
475 & BCM43xx_DMA64_TXADDREXT_MASK;
476 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL, value);
477 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO,
478 (ringbase & 0xFFFFFFFF));
479 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI,
480 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
481 | ring->routing);
482 } else {
483 u32 ringbase = (u32)(ring->dmabase);
484
485 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
486 value = BCM43xx_DMA32_TXENABLE;
487 value |= (addrext << BCM43xx_DMA32_TXADDREXT_SHIFT)
488 & BCM43xx_DMA32_TXADDREXT_MASK;
489 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL, value);
490 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING,
491 (ringbase & ~BCM43xx_DMA32_ROUTING)
492 | ring->routing);
493 }
382 } else { 494 } else {
383 err = alloc_initial_descbuffers(ring); 495 err = alloc_initial_descbuffers(ring);
384 if (err) 496 if (err)
385 goto out; 497 goto out;
386 /* Set Receive Control "receive enable" and frame offset */ 498 if (ring->dma64) {
387 value = (ring->frameoffset << BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT); 499 u64 ringbase = (u64)(ring->dmabase);
388 value |= BCM43xx_DMA_RXCTRL_ENABLE; 500
389 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_CONTROL, value); 501 addrext = ((ringbase >> 32) >> BCM43xx_DMA64_ROUTING_SHIFT);
390 /* Set Receive Descriptor ring address. */ 502 value = (ring->frameoffset << BCM43xx_DMA64_RXFROFF_SHIFT);
391 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 503 value |= BCM43xx_DMA64_RXENABLE;
392 ring->dmabase + ring->memoffset); 504 value |= (addrext << BCM43xx_DMA64_RXADDREXT_SHIFT)
393 /* Init the descriptor pointer. */ 505 & BCM43xx_DMA64_RXADDREXT_MASK;
394 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 200); 506 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXCTL, value);
507 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO,
508 (ringbase & 0xFFFFFFFF));
509 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI,
510 ((ringbase >> 32) & ~BCM43xx_DMA64_ROUTING)
511 | ring->routing);
512 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX, 200);
513 } else {
514 u32 ringbase = (u32)(ring->dmabase);
515
516 addrext = (ringbase >> BCM43xx_DMA32_ROUTING_SHIFT);
517 value = (ring->frameoffset << BCM43xx_DMA32_RXFROFF_SHIFT);
518 value |= BCM43xx_DMA32_RXENABLE;
519 value |= (addrext << BCM43xx_DMA32_RXADDREXT_SHIFT)
520 & BCM43xx_DMA32_RXADDREXT_MASK;
521 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXCTL, value);
522 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING,
523 (ringbase & ~BCM43xx_DMA32_ROUTING)
524 | ring->routing);
525 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX, 200);
526 }
395 } 527 }
396 528
397out: 529out:
@@ -402,27 +534,32 @@ out:
402static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring) 534static void dmacontroller_cleanup(struct bcm43xx_dmaring *ring)
403{ 535{
404 if (ring->tx) { 536 if (ring->tx) {
405 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base); 537 bcm43xx_dmacontroller_tx_reset(ring->bcm, ring->mmio_base, ring->dma64);
406 /* Zero out Transmit Descriptor ring address. */ 538 if (ring->dma64) {
407 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_RING, 0); 539 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGLO, 0);
540 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXRINGHI, 0);
541 } else
542 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXRING, 0);
408 } else { 543 } else {
409 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base); 544 bcm43xx_dmacontroller_rx_reset(ring->bcm, ring->mmio_base, ring->dma64);
410 /* Zero out Receive Descriptor ring address. */ 545 if (ring->dma64) {
411 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_RING, 0); 546 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGLO, 0);
547 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXRINGHI, 0);
548 } else
549 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXRING, 0);
412 } 550 }
413} 551}
414 552
415static void free_all_descbuffers(struct bcm43xx_dmaring *ring) 553static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
416{ 554{
417 struct bcm43xx_dmadesc *desc; 555 struct bcm43xx_dmadesc_generic *desc;
418 struct bcm43xx_dmadesc_meta *meta; 556 struct bcm43xx_dmadesc_meta *meta;
419 int i; 557 int i;
420 558
421 if (!ring->used_slots) 559 if (!ring->used_slots)
422 return; 560 return;
423 for (i = 0; i < ring->nr_slots; i++) { 561 for (i = 0; i < ring->nr_slots; i++) {
424 desc = ring->vbase + i; 562 desc = bcm43xx_dma_idx2desc(ring, i, &meta);
425 meta = ring->meta + i;
426 563
427 if (!meta->skb) { 564 if (!meta->skb) {
428 assert(ring->tx); 565 assert(ring->tx);
@@ -430,62 +567,67 @@ static void free_all_descbuffers(struct bcm43xx_dmaring *ring)
430 } 567 }
431 if (ring->tx) { 568 if (ring->tx) {
432 unmap_descbuffer(ring, meta->dmaaddr, 569 unmap_descbuffer(ring, meta->dmaaddr,
433 meta->skb->len, 1); 570 meta->skb->len, 1);
434 } else { 571 } else {
435 unmap_descbuffer(ring, meta->dmaaddr, 572 unmap_descbuffer(ring, meta->dmaaddr,
436 ring->rx_buffersize, 0); 573 ring->rx_buffersize, 0);
437 } 574 }
438 free_descriptor_buffer(ring, desc, meta, 0); 575 free_descriptor_buffer(ring, meta, 0);
439 } 576 }
440} 577}
441 578
442/* Main initialization function. */ 579/* Main initialization function. */
443static 580static
444struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm, 581struct bcm43xx_dmaring * bcm43xx_setup_dmaring(struct bcm43xx_private *bcm,
445 u16 dma_controller_base, 582 int controller_index,
446 int nr_descriptor_slots, 583 int for_tx,
447 int tx) 584 int dma64)
448{ 585{
449 struct bcm43xx_dmaring *ring; 586 struct bcm43xx_dmaring *ring;
450 int err; 587 int err;
588 int nr_slots;
451 589
452 ring = kzalloc(sizeof(*ring), GFP_KERNEL); 590 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
453 if (!ring) 591 if (!ring)
454 goto out; 592 goto out;
455 593
456 ring->meta = kzalloc(sizeof(*ring->meta) * nr_descriptor_slots, 594 nr_slots = BCM43xx_RXRING_SLOTS;
595 if (for_tx)
596 nr_slots = BCM43xx_TXRING_SLOTS;
597
598 ring->meta = kcalloc(nr_slots, sizeof(struct bcm43xx_dmadesc_meta),
457 GFP_KERNEL); 599 GFP_KERNEL);
458 if (!ring->meta) 600 if (!ring->meta)
459 goto err_kfree_ring; 601 goto err_kfree_ring;
460 602
461 ring->memoffset = BCM43xx_DMA_DMABUSADDROFFSET; 603 ring->routing = BCM43xx_DMA32_CLIENTTRANS;
604 if (dma64)
605 ring->routing = BCM43xx_DMA64_CLIENTTRANS;
462#ifdef CONFIG_BCM947XX 606#ifdef CONFIG_BCM947XX
463 if (bcm->pci_dev->bus->number == 0) 607 if (bcm->pci_dev->bus->number == 0)
464 ring->memoffset = 0; 608 ring->routing = dma64 ? BCM43xx_DMA64_NOTRANS : BCM43xx_DMA32_NOTRANS;
465#endif 609#endif
466 610
467 ring->bcm = bcm; 611 ring->bcm = bcm;
468 ring->nr_slots = nr_descriptor_slots; 612 ring->nr_slots = nr_slots;
469 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100; 613 ring->suspend_mark = ring->nr_slots * BCM43xx_TXSUSPEND_PERCENT / 100;
470 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100; 614 ring->resume_mark = ring->nr_slots * BCM43xx_TXRESUME_PERCENT / 100;
471 assert(ring->suspend_mark < ring->resume_mark); 615 assert(ring->suspend_mark < ring->resume_mark);
472 ring->mmio_base = dma_controller_base; 616 ring->mmio_base = bcm43xx_dmacontroller_base(dma64, controller_index);
473 if (tx) { 617 ring->index = controller_index;
618 ring->dma64 = !!dma64;
619 if (for_tx) {
474 ring->tx = 1; 620 ring->tx = 1;
475 ring->current_slot = -1; 621 ring->current_slot = -1;
476 } else { 622 } else {
477 switch (dma_controller_base) { 623 if (ring->index == 0) {
478 case BCM43xx_MMIO_DMA1_BASE: 624 ring->rx_buffersize = BCM43xx_DMA0_RX_BUFFERSIZE;
479 ring->rx_buffersize = BCM43xx_DMA1_RXBUFFERSIZE; 625 ring->frameoffset = BCM43xx_DMA0_RX_FRAMEOFFSET;
480 ring->frameoffset = BCM43xx_DMA1_RX_FRAMEOFFSET; 626 } else if (ring->index == 3) {
481 break; 627 ring->rx_buffersize = BCM43xx_DMA3_RX_BUFFERSIZE;
482 case BCM43xx_MMIO_DMA4_BASE: 628 ring->frameoffset = BCM43xx_DMA3_RX_FRAMEOFFSET;
483 ring->rx_buffersize = BCM43xx_DMA4_RXBUFFERSIZE; 629 } else
484 ring->frameoffset = BCM43xx_DMA4_RX_FRAMEOFFSET;
485 break;
486 default:
487 assert(0); 630 assert(0);
488 }
489 } 631 }
490 632
491 err = alloc_ringmemory(ring); 633 err = alloc_ringmemory(ring);
@@ -514,7 +656,8 @@ static void bcm43xx_destroy_dmaring(struct bcm43xx_dmaring *ring)
514 if (!ring) 656 if (!ring)
515 return; 657 return;
516 658
517 dprintk(KERN_INFO PFX "DMA 0x%04x (%s) max used slots: %d/%d\n", 659 dprintk(KERN_INFO PFX "DMA-%s 0x%04X (%s) max used slots: %d/%d\n",
660 (ring->dma64) ? "64" : "32",
518 ring->mmio_base, 661 ring->mmio_base,
519 (ring->tx) ? "TX" : "RX", 662 (ring->tx) ? "TX" : "RX",
520 ring->max_used_slots, ring->nr_slots); 663 ring->max_used_slots, ring->nr_slots);
@@ -537,10 +680,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
537 return; 680 return;
538 dma = bcm43xx_current_dma(bcm); 681 dma = bcm43xx_current_dma(bcm);
539 682
540 bcm43xx_destroy_dmaring(dma->rx_ring1); 683 bcm43xx_destroy_dmaring(dma->rx_ring3);
541 dma->rx_ring1 = NULL; 684 dma->rx_ring3 = NULL;
542 bcm43xx_destroy_dmaring(dma->rx_ring0); 685 bcm43xx_destroy_dmaring(dma->rx_ring0);
543 dma->rx_ring0 = NULL; 686 dma->rx_ring0 = NULL;
687
688 bcm43xx_destroy_dmaring(dma->tx_ring5);
689 dma->tx_ring5 = NULL;
690 bcm43xx_destroy_dmaring(dma->tx_ring4);
691 dma->tx_ring4 = NULL;
544 bcm43xx_destroy_dmaring(dma->tx_ring3); 692 bcm43xx_destroy_dmaring(dma->tx_ring3);
545 dma->tx_ring3 = NULL; 693 dma->tx_ring3 = NULL;
546 bcm43xx_destroy_dmaring(dma->tx_ring2); 694 bcm43xx_destroy_dmaring(dma->tx_ring2);
@@ -556,48 +704,59 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm)
556 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm); 704 struct bcm43xx_dma *dma = bcm43xx_current_dma(bcm);
557 struct bcm43xx_dmaring *ring; 705 struct bcm43xx_dmaring *ring;
558 int err = -ENOMEM; 706 int err = -ENOMEM;
707 int dma64 = 0;
708 u32 sbtmstatehi;
709
710 sbtmstatehi = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATEHIGH);
711 if (sbtmstatehi & BCM43xx_SBTMSTATEHIGH_DMA64BIT)
712 dma64 = 1;
559 713
560 /* setup TX DMA channels. */ 714 /* setup TX DMA channels. */
561 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE, 715 ring = bcm43xx_setup_dmaring(bcm, 0, 1, dma64);
562 BCM43xx_TXRING_SLOTS, 1);
563 if (!ring) 716 if (!ring)
564 goto out; 717 goto out;
565 dma->tx_ring0 = ring; 718 dma->tx_ring0 = ring;
566 719
567 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA2_BASE, 720 ring = bcm43xx_setup_dmaring(bcm, 1, 1, dma64);
568 BCM43xx_TXRING_SLOTS, 1);
569 if (!ring) 721 if (!ring)
570 goto err_destroy_tx0; 722 goto err_destroy_tx0;
571 dma->tx_ring1 = ring; 723 dma->tx_ring1 = ring;
572 724
573 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA3_BASE, 725 ring = bcm43xx_setup_dmaring(bcm, 2, 1, dma64);
574 BCM43xx_TXRING_SLOTS, 1);
575 if (!ring) 726 if (!ring)
576 goto err_destroy_tx1; 727 goto err_destroy_tx1;
577 dma->tx_ring2 = ring; 728 dma->tx_ring2 = ring;
578 729
579 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 730 ring = bcm43xx_setup_dmaring(bcm, 3, 1, dma64);
580 BCM43xx_TXRING_SLOTS, 1);
581 if (!ring) 731 if (!ring)
582 goto err_destroy_tx2; 732 goto err_destroy_tx2;
583 dma->tx_ring3 = ring; 733 dma->tx_ring3 = ring;
584 734
585 /* setup RX DMA channels. */ 735 ring = bcm43xx_setup_dmaring(bcm, 4, 1, dma64);
586 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA1_BASE,
587 BCM43xx_RXRING_SLOTS, 0);
588 if (!ring) 736 if (!ring)
589 goto err_destroy_tx3; 737 goto err_destroy_tx3;
738 dma->tx_ring4 = ring;
739
740 ring = bcm43xx_setup_dmaring(bcm, 5, 1, dma64);
741 if (!ring)
742 goto err_destroy_tx4;
743 dma->tx_ring5 = ring;
744
745 /* setup RX DMA channels. */
746 ring = bcm43xx_setup_dmaring(bcm, 0, 0, dma64);
747 if (!ring)
748 goto err_destroy_tx5;
590 dma->rx_ring0 = ring; 749 dma->rx_ring0 = ring;
591 750
592 if (bcm->current_core->rev < 5) { 751 if (bcm->current_core->rev < 5) {
593 ring = bcm43xx_setup_dmaring(bcm, BCM43xx_MMIO_DMA4_BASE, 752 ring = bcm43xx_setup_dmaring(bcm, 3, 0, dma64);
594 BCM43xx_RXRING_SLOTS, 0);
595 if (!ring) 753 if (!ring)
596 goto err_destroy_rx0; 754 goto err_destroy_rx0;
597 dma->rx_ring1 = ring; 755 dma->rx_ring3 = ring;
598 } 756 }
599 757
600 dprintk(KERN_INFO PFX "DMA initialized\n"); 758 dprintk(KERN_INFO PFX "%s DMA initialized\n",
759 dma64 ? "64-bit" : "32-bit");
601 err = 0; 760 err = 0;
602out: 761out:
603 return err; 762 return err;
@@ -605,6 +764,12 @@ out:
605err_destroy_rx0: 764err_destroy_rx0:
606 bcm43xx_destroy_dmaring(dma->rx_ring0); 765 bcm43xx_destroy_dmaring(dma->rx_ring0);
607 dma->rx_ring0 = NULL; 766 dma->rx_ring0 = NULL;
767err_destroy_tx5:
768 bcm43xx_destroy_dmaring(dma->tx_ring5);
769 dma->tx_ring5 = NULL;
770err_destroy_tx4:
771 bcm43xx_destroy_dmaring(dma->tx_ring4);
772 dma->tx_ring4 = NULL;
608err_destroy_tx3: 773err_destroy_tx3:
609 bcm43xx_destroy_dmaring(dma->tx_ring3); 774 bcm43xx_destroy_dmaring(dma->tx_ring3);
610 dma->tx_ring3 = NULL; 775 dma->tx_ring3 = NULL;
@@ -624,7 +789,7 @@ err_destroy_tx0:
624static u16 generate_cookie(struct bcm43xx_dmaring *ring, 789static u16 generate_cookie(struct bcm43xx_dmaring *ring,
625 int slot) 790 int slot)
626{ 791{
627 u16 cookie = 0xF000; 792 u16 cookie = 0x1000;
628 793
629 /* Use the upper 4 bits of the cookie as 794 /* Use the upper 4 bits of the cookie as
630 * DMA controller ID and store the slot number 795 * DMA controller ID and store the slot number
@@ -632,21 +797,25 @@ static u16 generate_cookie(struct bcm43xx_dmaring *ring,
632 * Note that the cookie must never be 0, as this 797 * Note that the cookie must never be 0, as this
633 * is a special value used in RX path. 798 * is a special value used in RX path.
634 */ 799 */
635 switch (ring->mmio_base) { 800 switch (ring->index) {
636 default: 801 case 0:
637 assert(0);
638 case BCM43xx_MMIO_DMA1_BASE:
639 cookie = 0xA000; 802 cookie = 0xA000;
640 break; 803 break;
641 case BCM43xx_MMIO_DMA2_BASE: 804 case 1:
642 cookie = 0xB000; 805 cookie = 0xB000;
643 break; 806 break;
644 case BCM43xx_MMIO_DMA3_BASE: 807 case 2:
645 cookie = 0xC000; 808 cookie = 0xC000;
646 break; 809 break;
647 case BCM43xx_MMIO_DMA4_BASE: 810 case 3:
648 cookie = 0xD000; 811 cookie = 0xD000;
649 break; 812 break;
813 case 4:
814 cookie = 0xE000;
815 break;
816 case 5:
817 cookie = 0xF000;
818 break;
650 } 819 }
651 assert(((u16)slot & 0xF000) == 0x0000); 820 assert(((u16)slot & 0xF000) == 0x0000);
652 cookie |= (u16)slot; 821 cookie |= (u16)slot;
@@ -675,6 +844,12 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
675 case 0xD000: 844 case 0xD000:
676 ring = dma->tx_ring3; 845 ring = dma->tx_ring3;
677 break; 846 break;
847 case 0xE000:
848 ring = dma->tx_ring4;
849 break;
850 case 0xF000:
851 ring = dma->tx_ring5;
852 break;
678 default: 853 default:
679 assert(0); 854 assert(0);
680 } 855 }
@@ -687,6 +862,9 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
687static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring, 862static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
688 int slot) 863 int slot)
689{ 864{
865 u16 offset;
866 int descsize;
867
690 /* Everything is ready to start. Buffers are DMA mapped and 868 /* Everything is ready to start. Buffers are DMA mapped and
691 * associated with slots. 869 * associated with slots.
692 * "slot" is the last slot of the new frame we want to transmit. 870 * "slot" is the last slot of the new frame we want to transmit.
@@ -694,25 +872,26 @@ static void dmacontroller_poke_tx(struct bcm43xx_dmaring *ring,
694 */ 872 */
695 wmb(); 873 wmb();
696 slot = next_slot(ring, slot); 874 slot = next_slot(ring, slot);
697 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_DESC_INDEX, 875 offset = (ring->dma64) ? BCM43xx_DMA64_TXINDEX : BCM43xx_DMA32_TXINDEX;
698 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 876 descsize = (ring->dma64) ? sizeof(struct bcm43xx_dmadesc64)
877 : sizeof(struct bcm43xx_dmadesc32);
878 bcm43xx_dma_write(ring, offset,
879 (u32)(slot * descsize));
699} 880}
700 881
701static int dma_tx_fragment(struct bcm43xx_dmaring *ring, 882static void dma_tx_fragment(struct bcm43xx_dmaring *ring,
702 struct sk_buff *skb, 883 struct sk_buff *skb,
703 u8 cur_frag) 884 u8 cur_frag)
704{ 885{
705 int slot; 886 int slot;
706 struct bcm43xx_dmadesc *desc; 887 struct bcm43xx_dmadesc_generic *desc;
707 struct bcm43xx_dmadesc_meta *meta; 888 struct bcm43xx_dmadesc_meta *meta;
708 u32 desc_ctl; 889 dma_addr_t dmaaddr;
709 u32 desc_addr;
710 890
711 assert(skb_shinfo(skb)->nr_frags == 0); 891 assert(skb_shinfo(skb)->nr_frags == 0);
712 892
713 slot = request_slot(ring); 893 slot = request_slot(ring);
714 desc = ring->vbase + slot; 894 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
715 meta = ring->meta + slot;
716 895
717 /* Add a device specific TX header. */ 896 /* Add a device specific TX header. */
718 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr)); 897 assert(skb_headroom(skb) >= sizeof(struct bcm43xx_txhdr));
@@ -729,29 +908,14 @@ static int dma_tx_fragment(struct bcm43xx_dmaring *ring,
729 generate_cookie(ring, slot)); 908 generate_cookie(ring, slot));
730 909
731 meta->skb = skb; 910 meta->skb = skb;
732 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 911 dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
733 if (unlikely(meta->dmaaddr + skb->len > BCM43xx_DMA_BUSADDRMAX)) { 912 meta->dmaaddr = dmaaddr;
734 return_slot(ring, slot);
735 printk(KERN_ERR PFX ">>>FATAL ERROR<<< DMA TX SKB >1G "
736 "(0x%llx, len: %u)\n",
737 (unsigned long long)meta->dmaaddr, skb->len);
738 return -ENOMEM;
739 }
740 913
741 desc_addr = (u32)(meta->dmaaddr + ring->memoffset); 914 fill_descriptor(ring, desc, dmaaddr,
742 desc_ctl = BCM43xx_DMADTOR_FRAMESTART | BCM43xx_DMADTOR_FRAMEEND; 915 skb->len, 1, 1, 1);
743 desc_ctl |= BCM43xx_DMADTOR_COMPIRQ;
744 desc_ctl |= (BCM43xx_DMADTOR_BYTECNT_MASK &
745 (u32)(meta->skb->len - ring->frameoffset));
746 if (slot == ring->nr_slots - 1)
747 desc_ctl |= BCM43xx_DMADTOR_DTABLEEND;
748 916
749 set_desc_ctl(desc, desc_ctl);
750 set_desc_addr(desc, desc_addr);
751 /* Now transfer the whole frame. */ 917 /* Now transfer the whole frame. */
752 dmacontroller_poke_tx(ring, slot); 918 dmacontroller_poke_tx(ring, slot);
753
754 return 0;
755} 919}
756 920
757int bcm43xx_dma_tx(struct bcm43xx_private *bcm, 921int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
@@ -781,7 +945,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
781 /* Take skb from ieee80211_txb_free */ 945 /* Take skb from ieee80211_txb_free */
782 txb->fragments[i] = NULL; 946 txb->fragments[i] = NULL;
783 dma_tx_fragment(ring, skb, i); 947 dma_tx_fragment(ring, skb, i);
784 //TODO: handle failure of dma_tx_fragment
785 } 948 }
786 ieee80211_txb_free(txb); 949 ieee80211_txb_free(txb);
787 950
@@ -792,23 +955,28 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
792 struct bcm43xx_xmitstatus *status) 955 struct bcm43xx_xmitstatus *status)
793{ 956{
794 struct bcm43xx_dmaring *ring; 957 struct bcm43xx_dmaring *ring;
795 struct bcm43xx_dmadesc *desc; 958 struct bcm43xx_dmadesc_generic *desc;
796 struct bcm43xx_dmadesc_meta *meta; 959 struct bcm43xx_dmadesc_meta *meta;
797 int is_last_fragment; 960 int is_last_fragment;
798 int slot; 961 int slot;
962 u32 tmp;
799 963
800 ring = parse_cookie(bcm, status->cookie, &slot); 964 ring = parse_cookie(bcm, status->cookie, &slot);
801 assert(ring); 965 assert(ring);
802 assert(ring->tx); 966 assert(ring->tx);
803 assert(get_desc_ctl(ring->vbase + slot) & BCM43xx_DMADTOR_FRAMESTART);
804 while (1) { 967 while (1) {
805 assert(slot >= 0 && slot < ring->nr_slots); 968 assert(slot >= 0 && slot < ring->nr_slots);
806 desc = ring->vbase + slot; 969 desc = bcm43xx_dma_idx2desc(ring, slot, &meta);
807 meta = ring->meta + slot;
808 970
809 is_last_fragment = !!(get_desc_ctl(desc) & BCM43xx_DMADTOR_FRAMEEND); 971 if (ring->dma64) {
972 tmp = le32_to_cpu(desc->dma64.control0);
973 is_last_fragment = !!(tmp & BCM43xx_DMA64_DCTL0_FRAMEEND);
974 } else {
975 tmp = le32_to_cpu(desc->dma32.control);
976 is_last_fragment = !!(tmp & BCM43xx_DMA32_DCTL_FRAMEEND);
977 }
810 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1); 978 unmap_descbuffer(ring, meta->dmaaddr, meta->skb->len, 1);
811 free_descriptor_buffer(ring, desc, meta, 1); 979 free_descriptor_buffer(ring, meta, 1);
812 /* Everything belonging to the slot is unmapped 980 /* Everything belonging to the slot is unmapped
813 * and freed, so we can return it. 981 * and freed, so we can return it.
814 */ 982 */
@@ -824,7 +992,7 @@ void bcm43xx_dma_handle_xmitstatus(struct bcm43xx_private *bcm,
824static void dma_rx(struct bcm43xx_dmaring *ring, 992static void dma_rx(struct bcm43xx_dmaring *ring,
825 int *slot) 993 int *slot)
826{ 994{
827 struct bcm43xx_dmadesc *desc; 995 struct bcm43xx_dmadesc_generic *desc;
828 struct bcm43xx_dmadesc_meta *meta; 996 struct bcm43xx_dmadesc_meta *meta;
829 struct bcm43xx_rxhdr *rxhdr; 997 struct bcm43xx_rxhdr *rxhdr;
830 struct sk_buff *skb; 998 struct sk_buff *skb;
@@ -832,13 +1000,12 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
832 int err; 1000 int err;
833 dma_addr_t dmaaddr; 1001 dma_addr_t dmaaddr;
834 1002
835 desc = ring->vbase + *slot; 1003 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
836 meta = ring->meta + *slot;
837 1004
838 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize); 1005 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
839 skb = meta->skb; 1006 skb = meta->skb;
840 1007
841 if (ring->mmio_base == BCM43xx_MMIO_DMA4_BASE) { 1008 if (ring->index == 3) {
842 /* We received an xmit status. */ 1009 /* We received an xmit status. */
843 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data; 1010 struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
844 struct bcm43xx_xmitstatus stat; 1011 struct bcm43xx_xmitstatus stat;
@@ -894,8 +1061,7 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
894 s32 tmp = len; 1061 s32 tmp = len;
895 1062
896 while (1) { 1063 while (1) {
897 desc = ring->vbase + *slot; 1064 desc = bcm43xx_dma_idx2desc(ring, *slot, &meta);
898 meta = ring->meta + *slot;
899 /* recycle the descriptor buffer. */ 1065 /* recycle the descriptor buffer. */
900 sync_descbuffer_for_device(ring, meta->dmaaddr, 1066 sync_descbuffer_for_device(ring, meta->dmaaddr,
901 ring->rx_buffersize); 1067 ring->rx_buffersize);
@@ -906,8 +1072,8 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
906 break; 1072 break;
907 } 1073 }
908 printkl(KERN_ERR PFX "DMA RX buffer too small " 1074 printkl(KERN_ERR PFX "DMA RX buffer too small "
909 "(len: %u, buffer: %u, nr-dropped: %d)\n", 1075 "(len: %u, buffer: %u, nr-dropped: %d)\n",
910 len, ring->rx_buffersize, cnt); 1076 len, ring->rx_buffersize, cnt);
911 goto drop; 1077 goto drop;
912 } 1078 }
913 len -= IEEE80211_FCS_LEN; 1079 len -= IEEE80211_FCS_LEN;
@@ -945,9 +1111,15 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
945#endif 1111#endif
946 1112
947 assert(!ring->tx); 1113 assert(!ring->tx);
948 status = bcm43xx_dma_read(ring, BCM43xx_DMA_RX_STATUS); 1114 if (ring->dma64) {
949 descptr = (status & BCM43xx_DMA_RXSTAT_DPTR_MASK); 1115 status = bcm43xx_dma_read(ring, BCM43xx_DMA64_RXSTATUS);
950 current_slot = descptr / sizeof(struct bcm43xx_dmadesc); 1116 descptr = (status & BCM43xx_DMA64_RXSTATDPTR);
1117 current_slot = descptr / sizeof(struct bcm43xx_dmadesc64);
1118 } else {
1119 status = bcm43xx_dma_read(ring, BCM43xx_DMA32_RXSTATUS);
1120 descptr = (status & BCM43xx_DMA32_RXDPTR);
1121 current_slot = descptr / sizeof(struct bcm43xx_dmadesc32);
1122 }
951 assert(current_slot >= 0 && current_slot < ring->nr_slots); 1123 assert(current_slot >= 0 && current_slot < ring->nr_slots);
952 1124
953 slot = ring->current_slot; 1125 slot = ring->current_slot;
@@ -958,8 +1130,13 @@ void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring)
958 ring->max_used_slots = used_slots; 1130 ring->max_used_slots = used_slots;
959#endif 1131#endif
960 } 1132 }
961 bcm43xx_dma_write(ring, BCM43xx_DMA_RX_DESC_INDEX, 1133 if (ring->dma64) {
962 (u32)(slot * sizeof(struct bcm43xx_dmadesc))); 1134 bcm43xx_dma_write(ring, BCM43xx_DMA64_RXINDEX,
1135 (u32)(slot * sizeof(struct bcm43xx_dmadesc64)));
1136 } else {
1137 bcm43xx_dma_write(ring, BCM43xx_DMA32_RXINDEX,
1138 (u32)(slot * sizeof(struct bcm43xx_dmadesc32)));
1139 }
963 ring->current_slot = slot; 1140 ring->current_slot = slot;
964} 1141}
965 1142
@@ -967,16 +1144,28 @@ void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring)
967{ 1144{
968 assert(ring->tx); 1145 assert(ring->tx);
969 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1); 1146 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, 1);
970 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1147 if (ring->dma64) {
971 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1148 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
972 | BCM43xx_DMA_TXCTRL_SUSPEND); 1149 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1150 | BCM43xx_DMA64_TXSUSPEND);
1151 } else {
1152 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1153 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1154 | BCM43xx_DMA32_TXSUSPEND);
1155 }
973} 1156}
974 1157
975void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring) 1158void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring)
976{ 1159{
977 assert(ring->tx); 1160 assert(ring->tx);
978 bcm43xx_dma_write(ring, BCM43xx_DMA_TX_CONTROL, 1161 if (ring->dma64) {
979 bcm43xx_dma_read(ring, BCM43xx_DMA_TX_CONTROL) 1162 bcm43xx_dma_write(ring, BCM43xx_DMA64_TXCTL,
980 & ~BCM43xx_DMA_TXCTRL_SUSPEND); 1163 bcm43xx_dma_read(ring, BCM43xx_DMA64_TXCTL)
1164 & ~BCM43xx_DMA64_TXSUSPEND);
1165 } else {
1166 bcm43xx_dma_write(ring, BCM43xx_DMA32_TXCTL,
1167 bcm43xx_dma_read(ring, BCM43xx_DMA32_TXCTL)
1168 & ~BCM43xx_DMA32_TXSUSPEND);
1169 }
981 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1); 1170 bcm43xx_power_saving_ctl_bits(ring->bcm, -1, -1);
982} 1171}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
index b7d77638ba8c..258a2f9bd7a6 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.h
@@ -14,63 +14,179 @@
14#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13) 14#define BCM43xx_DMAIRQ_NONFATALMASK (1 << 13)
15#define BCM43xx_DMAIRQ_RX_DONE (1 << 16) 15#define BCM43xx_DMAIRQ_RX_DONE (1 << 16)
16 16
17/* DMA controller register offsets. (relative to BCM43xx_DMA#_BASE) */ 17
18#define BCM43xx_DMA_TX_CONTROL 0x00 18/*** 32-bit DMA Engine. ***/
19#define BCM43xx_DMA_TX_DESC_RING 0x04 19
20#define BCM43xx_DMA_TX_DESC_INDEX 0x08 20/* 32-bit DMA controller registers. */
21#define BCM43xx_DMA_TX_STATUS 0x0c 21#define BCM43xx_DMA32_TXCTL 0x00
22#define BCM43xx_DMA_RX_CONTROL 0x10 22#define BCM43xx_DMA32_TXENABLE 0x00000001
23#define BCM43xx_DMA_RX_DESC_RING 0x14 23#define BCM43xx_DMA32_TXSUSPEND 0x00000002
24#define BCM43xx_DMA_RX_DESC_INDEX 0x18 24#define BCM43xx_DMA32_TXLOOPBACK 0x00000004
25#define BCM43xx_DMA_RX_STATUS 0x1c 25#define BCM43xx_DMA32_TXFLUSH 0x00000010
26 26#define BCM43xx_DMA32_TXADDREXT_MASK 0x00030000
27/* DMA controller channel control word values. */ 27#define BCM43xx_DMA32_TXADDREXT_SHIFT 16
28#define BCM43xx_DMA_TXCTRL_ENABLE (1 << 0) 28#define BCM43xx_DMA32_TXRING 0x04
29#define BCM43xx_DMA_TXCTRL_SUSPEND (1 << 1) 29#define BCM43xx_DMA32_TXINDEX 0x08
30#define BCM43xx_DMA_TXCTRL_LOOPBACK (1 << 2) 30#define BCM43xx_DMA32_TXSTATUS 0x0C
31#define BCM43xx_DMA_TXCTRL_FLUSH (1 << 4) 31#define BCM43xx_DMA32_TXDPTR 0x00000FFF
32#define BCM43xx_DMA_RXCTRL_ENABLE (1 << 0) 32#define BCM43xx_DMA32_TXSTATE 0x0000F000
33#define BCM43xx_DMA_RXCTRL_FRAMEOFF_MASK 0x000000fe 33#define BCM43xx_DMA32_TXSTAT_DISABLED 0x00000000
34#define BCM43xx_DMA_RXCTRL_FRAMEOFF_SHIFT 1 34#define BCM43xx_DMA32_TXSTAT_ACTIVE 0x00001000
35#define BCM43xx_DMA_RXCTRL_PIO (1 << 8) 35#define BCM43xx_DMA32_TXSTAT_IDLEWAIT 0x00002000
36/* DMA controller channel status word values. */ 36#define BCM43xx_DMA32_TXSTAT_STOPPED 0x00003000
37#define BCM43xx_DMA_TXSTAT_DPTR_MASK 0x00000fff 37#define BCM43xx_DMA32_TXSTAT_SUSP 0x00004000
38#define BCM43xx_DMA_TXSTAT_STAT_MASK 0x0000f000 38#define BCM43xx_DMA32_TXERROR 0x000F0000
39#define BCM43xx_DMA_TXSTAT_STAT_DISABLED 0x00000000 39#define BCM43xx_DMA32_TXERR_NOERR 0x00000000
40#define BCM43xx_DMA_TXSTAT_STAT_ACTIVE 0x00001000 40#define BCM43xx_DMA32_TXERR_PROT 0x00010000
41#define BCM43xx_DMA_TXSTAT_STAT_IDLEWAIT 0x00002000 41#define BCM43xx_DMA32_TXERR_UNDERRUN 0x00020000
42#define BCM43xx_DMA_TXSTAT_STAT_STOPPED 0x00003000 42#define BCM43xx_DMA32_TXERR_BUFREAD 0x00030000
43#define BCM43xx_DMA_TXSTAT_STAT_SUSP 0x00004000 43#define BCM43xx_DMA32_TXERR_DESCREAD 0x00040000
44#define BCM43xx_DMA_TXSTAT_ERROR_MASK 0x000f0000 44#define BCM43xx_DMA32_TXACTIVE 0xFFF00000
45#define BCM43xx_DMA_TXSTAT_FLUSHED (1 << 20) 45#define BCM43xx_DMA32_RXCTL 0x10
46#define BCM43xx_DMA_RXSTAT_DPTR_MASK 0x00000fff 46#define BCM43xx_DMA32_RXENABLE 0x00000001
47#define BCM43xx_DMA_RXSTAT_STAT_MASK 0x0000f000 47#define BCM43xx_DMA32_RXFROFF_MASK 0x000000FE
48#define BCM43xx_DMA_RXSTAT_STAT_DISABLED 0x00000000 48#define BCM43xx_DMA32_RXFROFF_SHIFT 1
49#define BCM43xx_DMA_RXSTAT_STAT_ACTIVE 0x00001000 49#define BCM43xx_DMA32_RXDIRECTFIFO 0x00000100
50#define BCM43xx_DMA_RXSTAT_STAT_IDLEWAIT 0x00002000 50#define BCM43xx_DMA32_RXADDREXT_MASK 0x00030000
51#define BCM43xx_DMA_RXSTAT_STAT_RESERVED 0x00003000 51#define BCM43xx_DMA32_RXADDREXT_SHIFT 16
52#define BCM43xx_DMA_RXSTAT_STAT_ERRORS 0x00004000 52#define BCM43xx_DMA32_RXRING 0x14
53#define BCM43xx_DMA_RXSTAT_ERROR_MASK 0x000f0000 53#define BCM43xx_DMA32_RXINDEX 0x18
54 54#define BCM43xx_DMA32_RXSTATUS 0x1C
55/* DMA descriptor control field values. */ 55#define BCM43xx_DMA32_RXDPTR 0x00000FFF
56#define BCM43xx_DMADTOR_BYTECNT_MASK 0x00001fff 56#define BCM43xx_DMA32_RXSTATE 0x0000F000
57#define BCM43xx_DMADTOR_DTABLEEND (1 << 28) /* End of descriptor table */ 57#define BCM43xx_DMA32_RXSTAT_DISABLED 0x00000000
58#define BCM43xx_DMADTOR_COMPIRQ (1 << 29) /* IRQ on completion request */ 58#define BCM43xx_DMA32_RXSTAT_ACTIVE 0x00001000
59#define BCM43xx_DMADTOR_FRAMEEND (1 << 30) 59#define BCM43xx_DMA32_RXSTAT_IDLEWAIT 0x00002000
60#define BCM43xx_DMADTOR_FRAMESTART (1 << 31) 60#define BCM43xx_DMA32_RXSTAT_STOPPED 0x00003000
61#define BCM43xx_DMA32_RXERROR 0x000F0000
62#define BCM43xx_DMA32_RXERR_NOERR 0x00000000
63#define BCM43xx_DMA32_RXERR_PROT 0x00010000
64#define BCM43xx_DMA32_RXERR_OVERFLOW 0x00020000
65#define BCM43xx_DMA32_RXERR_BUFWRITE 0x00030000
66#define BCM43xx_DMA32_RXERR_DESCREAD 0x00040000
67#define BCM43xx_DMA32_RXACTIVE 0xFFF00000
68
69/* 32-bit DMA descriptor. */
70struct bcm43xx_dmadesc32 {
71 __le32 control;
72 __le32 address;
73} __attribute__((__packed__));
74#define BCM43xx_DMA32_DCTL_BYTECNT 0x00001FFF
75#define BCM43xx_DMA32_DCTL_ADDREXT_MASK 0x00030000
76#define BCM43xx_DMA32_DCTL_ADDREXT_SHIFT 16
77#define BCM43xx_DMA32_DCTL_DTABLEEND 0x10000000
78#define BCM43xx_DMA32_DCTL_IRQ 0x20000000
79#define BCM43xx_DMA32_DCTL_FRAMEEND 0x40000000
80#define BCM43xx_DMA32_DCTL_FRAMESTART 0x80000000
81
82/* Address field Routing value. */
83#define BCM43xx_DMA32_ROUTING 0xC0000000
84#define BCM43xx_DMA32_ROUTING_SHIFT 30
85#define BCM43xx_DMA32_NOTRANS 0x00000000
86#define BCM43xx_DMA32_CLIENTTRANS 0x40000000
87
88
89
90/*** 64-bit DMA Engine. ***/
91
92/* 64-bit DMA controller registers. */
93#define BCM43xx_DMA64_TXCTL 0x00
94#define BCM43xx_DMA64_TXENABLE 0x00000001
95#define BCM43xx_DMA64_TXSUSPEND 0x00000002
96#define BCM43xx_DMA64_TXLOOPBACK 0x00000004
97#define BCM43xx_DMA64_TXFLUSH 0x00000010
98#define BCM43xx_DMA64_TXADDREXT_MASK 0x00030000
99#define BCM43xx_DMA64_TXADDREXT_SHIFT 16
100#define BCM43xx_DMA64_TXINDEX 0x04
101#define BCM43xx_DMA64_TXRINGLO 0x08
102#define BCM43xx_DMA64_TXRINGHI 0x0C
103#define BCM43xx_DMA64_TXSTATUS 0x10
104#define BCM43xx_DMA64_TXSTATDPTR 0x00001FFF
105#define BCM43xx_DMA64_TXSTAT 0xF0000000
106#define BCM43xx_DMA64_TXSTAT_DISABLED 0x00000000
107#define BCM43xx_DMA64_TXSTAT_ACTIVE 0x10000000
108#define BCM43xx_DMA64_TXSTAT_IDLEWAIT 0x20000000
109#define BCM43xx_DMA64_TXSTAT_STOPPED 0x30000000
110#define BCM43xx_DMA64_TXSTAT_SUSP 0x40000000
111#define BCM43xx_DMA64_TXERROR 0x14
112#define BCM43xx_DMA64_TXERRDPTR 0x0001FFFF
113#define BCM43xx_DMA64_TXERR 0xF0000000
114#define BCM43xx_DMA64_TXERR_NOERR 0x00000000
115#define BCM43xx_DMA64_TXERR_PROT 0x10000000
116#define BCM43xx_DMA64_TXERR_UNDERRUN 0x20000000
117#define BCM43xx_DMA64_TXERR_TRANSFER 0x30000000
118#define BCM43xx_DMA64_TXERR_DESCREAD 0x40000000
119#define BCM43xx_DMA64_TXERR_CORE 0x50000000
120#define BCM43xx_DMA64_RXCTL 0x20
121#define BCM43xx_DMA64_RXENABLE 0x00000001
122#define BCM43xx_DMA64_RXFROFF_MASK 0x000000FE
123#define BCM43xx_DMA64_RXFROFF_SHIFT 1
124#define BCM43xx_DMA64_RXDIRECTFIFO 0x00000100
125#define BCM43xx_DMA64_RXADDREXT_MASK 0x00030000
126#define BCM43xx_DMA64_RXADDREXT_SHIFT 16
127#define BCM43xx_DMA64_RXINDEX 0x24
128#define BCM43xx_DMA64_RXRINGLO 0x28
129#define BCM43xx_DMA64_RXRINGHI 0x2C
130#define BCM43xx_DMA64_RXSTATUS 0x30
131#define BCM43xx_DMA64_RXSTATDPTR 0x00001FFF
132#define BCM43xx_DMA64_RXSTAT 0xF0000000
133#define BCM43xx_DMA64_RXSTAT_DISABLED 0x00000000
134#define BCM43xx_DMA64_RXSTAT_ACTIVE 0x10000000
135#define BCM43xx_DMA64_RXSTAT_IDLEWAIT 0x20000000
136#define BCM43xx_DMA64_RXSTAT_STOPPED 0x30000000
137#define BCM43xx_DMA64_RXSTAT_SUSP 0x40000000
138#define BCM43xx_DMA64_RXERROR 0x34
139#define BCM43xx_DMA64_RXERRDPTR 0x0001FFFF
140#define BCM43xx_DMA64_RXERR 0xF0000000
141#define BCM43xx_DMA64_RXERR_NOERR 0x00000000
142#define BCM43xx_DMA64_RXERR_PROT 0x10000000
143#define BCM43xx_DMA64_RXERR_UNDERRUN 0x20000000
144#define BCM43xx_DMA64_RXERR_TRANSFER 0x30000000
145#define BCM43xx_DMA64_RXERR_DESCREAD 0x40000000
146#define BCM43xx_DMA64_RXERR_CORE 0x50000000
147
148/* 64-bit DMA descriptor. */
149struct bcm43xx_dmadesc64 {
150 __le32 control0;
151 __le32 control1;
152 __le32 address_low;
153 __le32 address_high;
154} __attribute__((__packed__));
155#define BCM43xx_DMA64_DCTL0_DTABLEEND 0x10000000
156#define BCM43xx_DMA64_DCTL0_IRQ 0x20000000
157#define BCM43xx_DMA64_DCTL0_FRAMEEND 0x40000000
158#define BCM43xx_DMA64_DCTL0_FRAMESTART 0x80000000
159#define BCM43xx_DMA64_DCTL1_BYTECNT 0x00001FFF
160#define BCM43xx_DMA64_DCTL1_ADDREXT_MASK 0x00030000
161#define BCM43xx_DMA64_DCTL1_ADDREXT_SHIFT 16
162
163/* Address field Routing value. */
164#define BCM43xx_DMA64_ROUTING 0xC0000000
165#define BCM43xx_DMA64_ROUTING_SHIFT 30
166#define BCM43xx_DMA64_NOTRANS 0x00000000
167#define BCM43xx_DMA64_CLIENTTRANS 0x80000000
168
169
170
171struct bcm43xx_dmadesc_generic {
172 union {
173 struct bcm43xx_dmadesc32 dma32;
174 struct bcm43xx_dmadesc64 dma64;
175 } __attribute__((__packed__));
176} __attribute__((__packed__));
177
61 178
62/* Misc DMA constants */ 179/* Misc DMA constants */
63#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE 180#define BCM43xx_DMA_RINGMEMSIZE PAGE_SIZE
64#define BCM43xx_DMA_BUSADDRMAX 0x3FFFFFFF 181#define BCM43xx_DMA0_RX_FRAMEOFFSET 30
65#define BCM43xx_DMA_DMABUSADDROFFSET (1 << 30) 182#define BCM43xx_DMA3_RX_FRAMEOFFSET 0
66#define BCM43xx_DMA1_RX_FRAMEOFFSET 30 183
67#define BCM43xx_DMA4_RX_FRAMEOFFSET 0
68 184
69/* DMA engine tuning knobs */ 185/* DMA engine tuning knobs */
70#define BCM43xx_TXRING_SLOTS 512 186#define BCM43xx_TXRING_SLOTS 512
71#define BCM43xx_RXRING_SLOTS 64 187#define BCM43xx_RXRING_SLOTS 64
72#define BCM43xx_DMA1_RXBUFFERSIZE (2304 + 100) 188#define BCM43xx_DMA0_RX_BUFFERSIZE (2304 + 100)
73#define BCM43xx_DMA4_RXBUFFERSIZE 16 189#define BCM43xx_DMA3_RX_BUFFERSIZE 16
74/* Suspend the tx queue, if less than this percent slots are free. */ 190/* Suspend the tx queue, if less than this percent slots are free. */
75#define BCM43xx_TXSUSPEND_PERCENT 20 191#define BCM43xx_TXSUSPEND_PERCENT 20
76/* Resume the tx queue, if more than this percent slots are free. */ 192/* Resume the tx queue, if more than this percent slots are free. */
@@ -86,17 +202,6 @@ struct bcm43xx_private;
86struct bcm43xx_xmitstatus; 202struct bcm43xx_xmitstatus;
87 203
88 204
89struct bcm43xx_dmadesc {
90 __le32 _control;
91 __le32 _address;
92} __attribute__((__packed__));
93
94/* Macros to access the bcm43xx_dmadesc struct */
95#define get_desc_ctl(desc) le32_to_cpu((desc)->_control)
96#define set_desc_ctl(desc, ctl) do { (desc)->_control = cpu_to_le32(ctl); } while (0)
97#define get_desc_addr(desc) le32_to_cpu((desc)->_address)
98#define set_desc_addr(desc, addr) do { (desc)->_address = cpu_to_le32(addr); } while (0)
99
100struct bcm43xx_dmadesc_meta { 205struct bcm43xx_dmadesc_meta {
101 /* The kernel DMA-able buffer. */ 206 /* The kernel DMA-able buffer. */
102 struct sk_buff *skb; 207 struct sk_buff *skb;
@@ -105,15 +210,14 @@ struct bcm43xx_dmadesc_meta {
105}; 210};
106 211
107struct bcm43xx_dmaring { 212struct bcm43xx_dmaring {
108 struct bcm43xx_private *bcm;
109 /* Kernel virtual base address of the ring memory. */ 213 /* Kernel virtual base address of the ring memory. */
110 struct bcm43xx_dmadesc *vbase; 214 void *descbase;
111 /* DMA memory offset */
112 dma_addr_t memoffset;
113 /* (Unadjusted) DMA base bus-address of the ring memory. */
114 dma_addr_t dmabase;
115 /* Meta data about all descriptors. */ 215 /* Meta data about all descriptors. */
116 struct bcm43xx_dmadesc_meta *meta; 216 struct bcm43xx_dmadesc_meta *meta;
217 /* DMA Routing value. */
218 u32 routing;
219 /* (Unadjusted) DMA base bus-address of the ring memory. */
220 dma_addr_t dmabase;
117 /* Number of descriptor slots in the ring. */ 221 /* Number of descriptor slots in the ring. */
118 int nr_slots; 222 int nr_slots;
119 /* Number of used descriptor slots. */ 223 /* Number of used descriptor slots. */
@@ -127,12 +231,14 @@ struct bcm43xx_dmaring {
127 u32 frameoffset; 231 u32 frameoffset;
128 /* Descriptor buffer size. */ 232 /* Descriptor buffer size. */
129 u16 rx_buffersize; 233 u16 rx_buffersize;
130 /* The MMIO base register of the DMA controller, this 234 /* The MMIO base register of the DMA controller. */
131 * ring is posted to.
132 */
133 u16 mmio_base; 235 u16 mmio_base;
236 /* DMA controller index number (0-5). */
237 int index;
134 u8 tx:1, /* TRUE, if this is a TX ring. */ 238 u8 tx:1, /* TRUE, if this is a TX ring. */
239 dma64:1, /* TRUE, if 64-bit DMA is enabled (FALSE if 32bit). */
135 suspended:1; /* TRUE, if transfers are suspended on this ring. */ 240 suspended:1; /* TRUE, if transfers are suspended on this ring. */
241 struct bcm43xx_private *bcm;
136#ifdef CONFIG_BCM43XX_DEBUG 242#ifdef CONFIG_BCM43XX_DEBUG
137 /* Maximum number of used slots. */ 243 /* Maximum number of used slots. */
138 int max_used_slots; 244 int max_used_slots;
@@ -141,6 +247,34 @@ struct bcm43xx_dmaring {
141 247
142 248
143static inline 249static inline
250int bcm43xx_dma_desc2idx(struct bcm43xx_dmaring *ring,
251 struct bcm43xx_dmadesc_generic *desc)
252{
253 if (ring->dma64) {
254 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
255 return (int)(&(desc->dma64) - dd64);
256 } else {
257 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
258 return (int)(&(desc->dma32) - dd32);
259 }
260}
261
262static inline
263struct bcm43xx_dmadesc_generic * bcm43xx_dma_idx2desc(struct bcm43xx_dmaring *ring,
264 int slot,
265 struct bcm43xx_dmadesc_meta **meta)
266{
267 *meta = &(ring->meta[slot]);
268 if (ring->dma64) {
269 struct bcm43xx_dmadesc64 *dd64 = ring->descbase;
270 return (struct bcm43xx_dmadesc_generic *)(&(dd64[slot]));
271 } else {
272 struct bcm43xx_dmadesc32 *dd32 = ring->descbase;
273 return (struct bcm43xx_dmadesc_generic *)(&(dd32[slot]));
274 }
275}
276
277static inline
144u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring, 278u32 bcm43xx_dma_read(struct bcm43xx_dmaring *ring,
145 u16 offset) 279 u16 offset)
146{ 280{
@@ -159,9 +293,13 @@ int bcm43xx_dma_init(struct bcm43xx_private *bcm);
159void bcm43xx_dma_free(struct bcm43xx_private *bcm); 293void bcm43xx_dma_free(struct bcm43xx_private *bcm);
160 294
161int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 295int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
162 u16 dmacontroller_mmio_base); 296 u16 dmacontroller_mmio_base,
297 int dma64);
163int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 298int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
164 u16 dmacontroller_mmio_base); 299 u16 dmacontroller_mmio_base,
300 int dma64);
301
302u16 bcm43xx_dmacontroller_base(int dma64bit, int dmacontroller_idx);
165 303
166void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring); 304void bcm43xx_dma_tx_suspend(struct bcm43xx_dmaring *ring);
167void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring); 305void bcm43xx_dma_tx_resume(struct bcm43xx_dmaring *ring);
@@ -173,7 +311,6 @@ int bcm43xx_dma_tx(struct bcm43xx_private *bcm,
173 struct ieee80211_txb *txb); 311 struct ieee80211_txb *txb);
174void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring); 312void bcm43xx_dma_rx(struct bcm43xx_dmaring *ring);
175 313
176
177#else /* CONFIG_BCM43XX_DMA */ 314#else /* CONFIG_BCM43XX_DMA */
178 315
179 316
@@ -188,13 +325,15 @@ void bcm43xx_dma_free(struct bcm43xx_private *bcm)
188} 325}
189static inline 326static inline
190int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm, 327int bcm43xx_dmacontroller_rx_reset(struct bcm43xx_private *bcm,
191 u16 dmacontroller_mmio_base) 328 u16 dmacontroller_mmio_base,
329 int dma64)
192{ 330{
193 return 0; 331 return 0;
194} 332}
195static inline 333static inline
196int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm, 334int bcm43xx_dmacontroller_tx_reset(struct bcm43xx_private *bcm,
197 u16 dmacontroller_mmio_base) 335 u16 dmacontroller_mmio_base,
336 int dma64)
198{ 337{
199 return 0; 338 return 0;
200} 339}
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_main.c b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
index b095f3cc6730..6dd475eb5510 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_main.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_main.c
@@ -1371,6 +1371,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1371 if ((bcm43xx_core_enabled(bcm)) && 1371 if ((bcm43xx_core_enabled(bcm)) &&
1372 !bcm43xx_using_pio(bcm)) { 1372 !bcm43xx_using_pio(bcm)) {
1373//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here? 1373//FIXME: Do we _really_ want #ifndef CONFIG_BCM947XX here?
1374#if 0
1374#ifndef CONFIG_BCM947XX 1375#ifndef CONFIG_BCM947XX
1375 /* reset all used DMA controllers. */ 1376 /* reset all used DMA controllers. */
1376 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE); 1377 bcm43xx_dmacontroller_tx_reset(bcm, BCM43xx_MMIO_DMA1_BASE);
@@ -1381,6 +1382,7 @@ void bcm43xx_wireless_core_reset(struct bcm43xx_private *bcm, int connect_phy)
1381 if (bcm->current_core->rev < 5) 1382 if (bcm->current_core->rev < 5)
1382 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE); 1383 bcm43xx_dmacontroller_rx_reset(bcm, BCM43xx_MMIO_DMA4_BASE);
1383#endif 1384#endif
1385#endif
1384 } 1386 }
1385 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) { 1387 if (bcm43xx_status(bcm) == BCM43xx_STAT_SHUTTINGDOWN) {
1386 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD, 1388 bcm43xx_write32(bcm, BCM43xx_MMIO_STATUS_BITFIELD,
@@ -1671,8 +1673,9 @@ static void handle_irq_beacon(struct bcm43xx_private *bcm)
1671static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm) 1673static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1672{ 1674{
1673 u32 reason; 1675 u32 reason;
1674 u32 dma_reason[4]; 1676 u32 dma_reason[6];
1675 int activity = 0; 1677 u32 merged_dma_reason = 0;
1678 int i, activity = 0;
1676 unsigned long flags; 1679 unsigned long flags;
1677 1680
1678#ifdef CONFIG_BCM43XX_DEBUG 1681#ifdef CONFIG_BCM43XX_DEBUG
@@ -1684,10 +1687,10 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1684 1687
1685 spin_lock_irqsave(&bcm->irq_lock, flags); 1688 spin_lock_irqsave(&bcm->irq_lock, flags);
1686 reason = bcm->irq_reason; 1689 reason = bcm->irq_reason;
1687 dma_reason[0] = bcm->dma_reason[0]; 1690 for (i = 5; i >= 0; i--) {
1688 dma_reason[1] = bcm->dma_reason[1]; 1691 dma_reason[i] = bcm->dma_reason[i];
1689 dma_reason[2] = bcm->dma_reason[2]; 1692 merged_dma_reason |= dma_reason[i];
1690 dma_reason[3] = bcm->dma_reason[3]; 1693 }
1691 1694
1692 if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) { 1695 if (unlikely(reason & BCM43xx_IRQ_XMIT_ERROR)) {
1693 /* TX error. We get this when Template Ram is written in wrong endianess 1696 /* TX error. We get this when Template Ram is written in wrong endianess
@@ -1698,27 +1701,25 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1698 printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n"); 1701 printkl(KERN_ERR PFX "FATAL ERROR: BCM43xx_IRQ_XMIT_ERROR\n");
1699 bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR); 1702 bcmirq_handled(BCM43xx_IRQ_XMIT_ERROR);
1700 } 1703 }
1701 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_FATALMASK) | 1704 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_FATALMASK)) {
1702 (dma_reason[1] & BCM43xx_DMAIRQ_FATALMASK) |
1703 (dma_reason[2] & BCM43xx_DMAIRQ_FATALMASK) |
1704 (dma_reason[3] & BCM43xx_DMAIRQ_FATALMASK))) {
1705 printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: " 1705 printkl(KERN_ERR PFX "FATAL ERROR: Fatal DMA error: "
1706 "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", 1706 "0x%08X, 0x%08X, 0x%08X, "
1707 "0x%08X, 0x%08X, 0x%08X\n",
1707 dma_reason[0], dma_reason[1], 1708 dma_reason[0], dma_reason[1],
1708 dma_reason[2], dma_reason[3]); 1709 dma_reason[2], dma_reason[3],
1710 dma_reason[4], dma_reason[5]);
1709 bcm43xx_controller_restart(bcm, "DMA error"); 1711 bcm43xx_controller_restart(bcm, "DMA error");
1710 mmiowb(); 1712 mmiowb();
1711 spin_unlock_irqrestore(&bcm->irq_lock, flags); 1713 spin_unlock_irqrestore(&bcm->irq_lock, flags);
1712 return; 1714 return;
1713 } 1715 }
1714 if (unlikely((dma_reason[0] & BCM43xx_DMAIRQ_NONFATALMASK) | 1716 if (unlikely(merged_dma_reason & BCM43xx_DMAIRQ_NONFATALMASK)) {
1715 (dma_reason[1] & BCM43xx_DMAIRQ_NONFATALMASK) |
1716 (dma_reason[2] & BCM43xx_DMAIRQ_NONFATALMASK) |
1717 (dma_reason[3] & BCM43xx_DMAIRQ_NONFATALMASK))) {
1718 printkl(KERN_ERR PFX "DMA error: " 1717 printkl(KERN_ERR PFX "DMA error: "
1719 "0x%08X, 0x%08X, 0x%08X, 0x%08X\n", 1718 "0x%08X, 0x%08X, 0x%08X, "
1719 "0x%08X, 0x%08X, 0x%08X\n",
1720 dma_reason[0], dma_reason[1], 1720 dma_reason[0], dma_reason[1],
1721 dma_reason[2], dma_reason[3]); 1721 dma_reason[2], dma_reason[3],
1722 dma_reason[4], dma_reason[5]);
1722 } 1723 }
1723 1724
1724 if (reason & BCM43xx_IRQ_PS) { 1725 if (reason & BCM43xx_IRQ_PS) {
@@ -1753,8 +1754,6 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1753 } 1754 }
1754 1755
1755 /* Check the DMA reason registers for received data. */ 1756 /* Check the DMA reason registers for received data. */
1756 assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
1757 assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
1758 if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) { 1757 if (dma_reason[0] & BCM43xx_DMAIRQ_RX_DONE) {
1759 if (bcm43xx_using_pio(bcm)) 1758 if (bcm43xx_using_pio(bcm))
1760 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0); 1759 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue0);
@@ -1762,13 +1761,17 @@ static void bcm43xx_interrupt_tasklet(struct bcm43xx_private *bcm)
1762 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0); 1761 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring0);
1763 /* We intentionally don't set "activity" to 1, here. */ 1762 /* We intentionally don't set "activity" to 1, here. */
1764 } 1763 }
1764 assert(!(dma_reason[1] & BCM43xx_DMAIRQ_RX_DONE));
1765 assert(!(dma_reason[2] & BCM43xx_DMAIRQ_RX_DONE));
1765 if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) { 1766 if (dma_reason[3] & BCM43xx_DMAIRQ_RX_DONE) {
1766 if (bcm43xx_using_pio(bcm)) 1767 if (bcm43xx_using_pio(bcm))
1767 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3); 1768 bcm43xx_pio_rx(bcm43xx_current_pio(bcm)->queue3);
1768 else 1769 else
1769 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring1); 1770 bcm43xx_dma_rx(bcm43xx_current_dma(bcm)->rx_ring3);
1770 activity = 1; 1771 activity = 1;
1771 } 1772 }
1773 assert(!(dma_reason[4] & BCM43xx_DMAIRQ_RX_DONE));
1774 assert(!(dma_reason[5] & BCM43xx_DMAIRQ_RX_DONE));
1772 bcmirq_handled(BCM43xx_IRQ_RX); 1775 bcmirq_handled(BCM43xx_IRQ_RX);
1773 1776
1774 if (reason & BCM43xx_IRQ_XMIT_STATUS) { 1777 if (reason & BCM43xx_IRQ_XMIT_STATUS) {
@@ -1825,14 +1828,18 @@ static void bcm43xx_interrupt_ack(struct bcm43xx_private *bcm, u32 reason)
1825 1828
1826 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason); 1829 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, reason);
1827 1830
1828 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON, 1831 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_REASON,
1829 bcm->dma_reason[0]); 1832 bcm->dma_reason[0]);
1830 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON, 1833 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_REASON,
1831 bcm->dma_reason[1]); 1834 bcm->dma_reason[1]);
1832 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON, 1835 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_REASON,
1833 bcm->dma_reason[2]); 1836 bcm->dma_reason[2]);
1834 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON, 1837 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_REASON,
1835 bcm->dma_reason[3]); 1838 bcm->dma_reason[3]);
1839 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_REASON,
1840 bcm->dma_reason[4]);
1841 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_REASON,
1842 bcm->dma_reason[5]);
1836} 1843}
1837 1844
1838/* Interrupt handler top-half */ 1845/* Interrupt handler top-half */
@@ -1860,14 +1867,18 @@ static irqreturn_t bcm43xx_interrupt_handler(int irq, void *dev_id, struct pt_re
1860 if (!reason) 1867 if (!reason)
1861 goto out; 1868 goto out;
1862 1869
1863 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON) 1870 bcm->dma_reason[0] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA0_REASON)
1864 & 0x0001dc00; 1871 & 0x0001DC00;
1865 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON) 1872 bcm->dma_reason[1] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA1_REASON)
1866 & 0x0000dc00; 1873 & 0x0000DC00;
1867 bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON) 1874 bcm->dma_reason[2] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA2_REASON)
1868 & 0x0000dc00; 1875 & 0x0000DC00;
1869 bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON) 1876 bcm->dma_reason[3] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA3_REASON)
1870 & 0x0001dc00; 1877 & 0x0001DC00;
1878 bcm->dma_reason[4] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA4_REASON)
1879 & 0x0000DC00;
1880 bcm->dma_reason[5] = bcm43xx_read32(bcm, BCM43xx_MMIO_DMA5_REASON)
1881 & 0x0000DC00;
1871 1882
1872 bcm43xx_interrupt_ack(bcm, reason); 1883 bcm43xx_interrupt_ack(bcm, reason);
1873 1884
@@ -2448,10 +2459,12 @@ static int bcm43xx_chip_init(struct bcm43xx_private *bcm)
2448 bcm43xx_write32(bcm, 0x018C, 0x02000000); 2459 bcm43xx_write32(bcm, 0x018C, 0x02000000);
2449 } 2460 }
2450 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000); 2461 bcm43xx_write32(bcm, BCM43xx_MMIO_GEN_IRQ_REASON, 0x00004000);
2451 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0001DC00); 2462 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA0_IRQ_MASK, 0x0001DC00);
2463 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA1_IRQ_MASK, 0x0000DC00);
2452 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00); 2464 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA2_IRQ_MASK, 0x0000DC00);
2453 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0000DC00); 2465 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA3_IRQ_MASK, 0x0001DC00);
2454 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0001DC00); 2466 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA4_IRQ_MASK, 0x0000DC00);
2467 bcm43xx_write32(bcm, BCM43xx_MMIO_DMA5_IRQ_MASK, 0x0000DC00);
2455 2468
2456 value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW); 2469 value32 = bcm43xx_read32(bcm, BCM43xx_CIR_SBTMSTATELOW);
2457 value32 |= 0x00100000; 2470 value32 |= 0x00100000;