aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/staging
diff options
context:
space:
mode:
authorAlan Cox <alan@linux.intel.com>2009-10-06 10:49:45 -0400
committerGreg Kroah-Hartman <gregkh@suse.de>2009-12-11 15:23:07 -0500
commitc78732ad75b3917f6a93292be1612527d9c0be9d (patch)
treeb5da4b71b20479f2596b69c99f3cd504176bb7e5 /drivers/staging
parent9251d71a4ec3e20189bb182cdc3af00f1152da81 (diff)
Staging: et131x: Clean up tx naming
Clean up the names to be Linux like Remove the unused pad buffer Signed-off-by: Alan Cox <alan@linux.intel.com> Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging')
-rw-r--r--drivers/staging/et131x/et1310_tx.c234
-rw-r--r--drivers/staging/et131x/et1310_tx.h87
-rw-r--r--drivers/staging/et131x/et131x_isr.c4
-rw-r--r--drivers/staging/et131x/et131x_netdev.c10
4 files changed, 142 insertions, 193 deletions
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c
index e9f30b5a356..b2b4b688446 100644
--- a/drivers/staging/et131x/et1310_tx.c
+++ b/drivers/staging/et131x/et1310_tx.c
@@ -118,9 +118,9 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
118 struct tx_ring *tx_ring = &adapter->tx_ring; 118 struct tx_ring *tx_ring = &adapter->tx_ring;
119 119
120 /* Allocate memory for the TCB's (Transmit Control Block) */ 120 /* Allocate memory for the TCB's (Transmit Control Block) */
121 adapter->tx_ring.MpTcbMem = (struct tcb *) 121 adapter->tx_ring.tcb_ring = (struct tcb *)
122 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA); 122 kcalloc(NUM_TCB, sizeof(struct tcb), GFP_ATOMIC | GFP_DMA);
123 if (!adapter->tx_ring.MpTcbMem) { 123 if (!adapter->tx_ring.tcb_ring) {
124 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); 124 dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n");
125 return -ENOMEM; 125 return -ENOMEM;
126 } 126 }
@@ -145,25 +145,14 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter)
145 * storing the adjusted address. 145 * storing the adjusted address.
146 */ 146 */
147 /* Allocate memory for the Tx status block */ 147 /* Allocate memory for the Tx status block */
148 tx_ring->pTxStatusVa = pci_alloc_consistent(adapter->pdev, 148 tx_ring->tx_status = pci_alloc_consistent(adapter->pdev,
149 sizeof(TX_STATUS_BLOCK_t), 149 sizeof(u32),
150 &tx_ring->pTxStatusPa); 150 &tx_ring->tx_status_pa);
151 if (!adapter->tx_ring.pTxStatusPa) { 151 if (!adapter->tx_ring.tx_status_pa) {
152 dev_err(&adapter->pdev->dev, 152 dev_err(&adapter->pdev->dev,
153 "Cannot alloc memory for Tx status block\n"); 153 "Cannot alloc memory for Tx status block\n");
154 return -ENOMEM; 154 return -ENOMEM;
155 } 155 }
156
157 /* Allocate memory for a dummy buffer */
158 tx_ring->pTxDummyBlkVa = pci_alloc_consistent(adapter->pdev,
159 NIC_MIN_PACKET_SIZE,
160 &tx_ring->pTxDummyBlkPa);
161 if (!adapter->tx_ring.pTxDummyBlkPa) {
162 dev_err(&adapter->pdev->dev,
163 "Cannot alloc memory for Tx dummy buffer\n");
164 return -ENOMEM;
165 }
166
167 return 0; 156 return 0;
168} 157}
169 158
@@ -189,27 +178,16 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter)
189 } 178 }
190 179
191 /* Free memory for the Tx status block */ 180 /* Free memory for the Tx status block */
192 if (adapter->tx_ring.pTxStatusVa) { 181 if (adapter->tx_ring.tx_status) {
193 pci_free_consistent(adapter->pdev,
194 sizeof(TX_STATUS_BLOCK_t),
195 adapter->tx_ring.pTxStatusVa,
196 adapter->tx_ring.pTxStatusPa);
197
198 adapter->tx_ring.pTxStatusVa = NULL;
199 }
200
201 /* Free memory for the dummy buffer */
202 if (adapter->tx_ring.pTxDummyBlkVa) {
203 pci_free_consistent(adapter->pdev, 182 pci_free_consistent(adapter->pdev,
204 NIC_MIN_PACKET_SIZE, 183 sizeof(u32),
205 adapter->tx_ring.pTxDummyBlkVa, 184 adapter->tx_ring.tx_status,
206 adapter->tx_ring.pTxDummyBlkPa); 185 adapter->tx_ring.tx_status_pa);
207 186
208 adapter->tx_ring.pTxDummyBlkVa = NULL; 187 adapter->tx_ring.tx_status = NULL;
209 } 188 }
210
211 /* Free the memory for the tcb structures */ 189 /* Free the memory for the tcb structures */
212 kfree(adapter->tx_ring.MpTcbMem); 190 kfree(adapter->tx_ring.tcb_ring);
213} 191}
214 192
215/** 193/**
@@ -230,14 +208,14 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev)
230 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value); 208 writel(NUM_DESC_PER_RING_TX - 1, &txdma->pr_num_des.value);
231 209
232 /* Load the completion writeback physical address */ 210 /* Load the completion writeback physical address */
233 writel((u32)((u64)etdev->tx_ring.pTxStatusPa >> 32), 211 writel((u32)((u64)etdev->tx_ring.tx_status_pa >> 32),
234 &txdma->dma_wb_base_hi); 212 &txdma->dma_wb_base_hi);
235 writel((u32)etdev->tx_ring.pTxStatusPa, &txdma->dma_wb_base_lo); 213 writel((u32)etdev->tx_ring.tx_status_pa, &txdma->dma_wb_base_lo);
236 214
237 memset(etdev->tx_ring.pTxStatusVa, 0, sizeof(TX_STATUS_BLOCK_t)); 215 *etdev->tx_ring.tx_status = 0;
238 216
239 writel(0, &txdma->service_request); 217 writel(0, &txdma->service_request);
240 etdev->tx_ring.txDmaReadyToSend = 0; 218 etdev->tx_ring.send_idx = 0;
241} 219}
242 220
243/** 221/**
@@ -278,26 +256,26 @@ void et131x_init_send(struct et131x_adapter *adapter)
278 256
279 /* Setup some convenience pointers */ 257 /* Setup some convenience pointers */
280 tx_ring = &adapter->tx_ring; 258 tx_ring = &adapter->tx_ring;
281 tcb = adapter->tx_ring.MpTcbMem; 259 tcb = adapter->tx_ring.tcb_ring;
282 260
283 tx_ring->TCBReadyQueueHead = tcb; 261 tx_ring->tcb_qhead = tcb;
284 262
285 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB); 263 memset(tcb, 0, sizeof(struct tcb) * NUM_TCB);
286 264
287 /* Go through and set up each TCB */ 265 /* Go through and set up each TCB */
288 for (ct = 0; ct++ < NUM_TCB; tcb++) { 266 for (ct = 0; ct++ < NUM_TCB; tcb++)
289 /* Set the link pointer in HW TCB to the next TCB in the 267 /* Set the link pointer in HW TCB to the next TCB in the
290 * chain. If this is the last TCB in the chain, also set the 268 * chain. If this is the last TCB in the chain, also set the
291 * tail pointer. 269 * tail pointer.
292 */ 270 */
293 tcb->Next = tcb + 1; 271 tcb->next = tcb + 1;
294 272
295 tcb--; 273 tcb--;
296 tx_ring->TCBReadyQueueTail = tcb; 274 tx_ring->tcb_qtail = tcb;
297 tcb->Next = NULL; 275 tcb->next = NULL;
298 /* Curr send queue should now be empty */ 276 /* Curr send queue should now be empty */
299 tx_ring->CurrSendHead = NULL; 277 tx_ring->send_head = NULL;
300 tx_ring->CurrSendTail = NULL; 278 tx_ring->send_tail = NULL;
301} 279}
302 280
303/** 281/**
@@ -321,7 +299,7 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev)
321 */ 299 */
322 300
323 /* TCB is not available */ 301 /* TCB is not available */
324 if (etdev->tx_ring.nBusySend >= NUM_TCB) { 302 if (etdev->tx_ring.used >= NUM_TCB) {
325 /* NOTE: If there's an error on send, no need to queue the 303 /* NOTE: If there's an error on send, no need to queue the
326 * packet under Linux; if we just send an error up to the 304 * packet under Linux; if we just send an error up to the
327 * netif layer, it will resend the skb to us. 305 * netif layer, it will resend the skb to us.
@@ -376,35 +354,35 @@ static int et131x_send_packet(struct sk_buff *skb,
376 /* Get a TCB for this packet */ 354 /* Get a TCB for this packet */
377 spin_lock_irqsave(&etdev->TCBReadyQLock, flags); 355 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
378 356
379 tcb = etdev->tx_ring.TCBReadyQueueHead; 357 tcb = etdev->tx_ring.tcb_qhead;
380 358
381 if (tcb == NULL) { 359 if (tcb == NULL) {
382 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); 360 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
383 return -ENOMEM; 361 return -ENOMEM;
384 } 362 }
385 363
386 etdev->tx_ring.TCBReadyQueueHead = tcb->Next; 364 etdev->tx_ring.tcb_qhead = tcb->next;
387 365
388 if (etdev->tx_ring.TCBReadyQueueHead == NULL) 366 if (etdev->tx_ring.tcb_qhead == NULL)
389 etdev->tx_ring.TCBReadyQueueTail = NULL; 367 etdev->tx_ring.tcb_qtail = NULL;
390 368
391 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); 369 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
392 370
393 tcb->PacketLength = skb->len; 371 tcb->len = skb->len;
394 tcb->Packet = skb; 372 tcb->skb = skb;
395 373
396 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) { 374 if ((skb->data != NULL) && ((skb->len - skb->data_len) >= 6)) {
397 shbufva = (u16 *) skb->data; 375 shbufva = (u16 *) skb->data;
398 376
399 if ((shbufva[0] == 0xffff) && 377 if ((shbufva[0] == 0xffff) &&
400 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) { 378 (shbufva[1] == 0xffff) && (shbufva[2] == 0xffff)) {
401 tcb->Flags |= fMP_DEST_BROAD; 379 tcb->flags |= fMP_DEST_BROAD;
402 } else if ((shbufva[0] & 0x3) == 0x0001) { 380 } else if ((shbufva[0] & 0x3) == 0x0001) {
403 tcb->Flags |= fMP_DEST_MULTI; 381 tcb->flags |= fMP_DEST_MULTI;
404 } 382 }
405 } 383 }
406 384
407 tcb->Next = NULL; 385 tcb->next = NULL;
408 386
409 /* Call the NIC specific send handler. */ 387 /* Call the NIC specific send handler. */
410 status = nic_send_packet(etdev, tcb); 388 status = nic_send_packet(etdev, tcb);
@@ -412,18 +390,18 @@ static int et131x_send_packet(struct sk_buff *skb,
412 if (status != 0) { 390 if (status != 0) {
413 spin_lock_irqsave(&etdev->TCBReadyQLock, flags); 391 spin_lock_irqsave(&etdev->TCBReadyQLock, flags);
414 392
415 if (etdev->tx_ring.TCBReadyQueueTail) { 393 if (etdev->tx_ring.tcb_qtail) {
416 etdev->tx_ring.TCBReadyQueueTail->Next = tcb; 394 etdev->tx_ring.tcb_qtail->next = tcb;
417 } else { 395 } else {
418 /* Apparently ready Q is empty. */ 396 /* Apparently ready Q is empty. */
419 etdev->tx_ring.TCBReadyQueueHead = tcb; 397 etdev->tx_ring.tcb_qhead = tcb;
420 } 398 }
421 399
422 etdev->tx_ring.TCBReadyQueueTail = tcb; 400 etdev->tx_ring.tcb_qtail = tcb;
423 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); 401 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
424 return status; 402 return status;
425 } 403 }
426 WARN_ON(etdev->tx_ring.nBusySend > NUM_TCB); 404 WARN_ON(etdev->tx_ring.used > NUM_TCB);
427 return 0; 405 return 0;
428} 406}
429 407
@@ -440,7 +418,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
440 struct tx_desc desc[24]; /* 24 x 16 byte */ 418 struct tx_desc desc[24]; /* 24 x 16 byte */
441 u32 frag = 0; 419 u32 frag = 0;
442 u32 thiscopy, remainder; 420 u32 thiscopy, remainder;
443 struct sk_buff *skb = tcb->Packet; 421 struct sk_buff *skb = tcb->skb;
444 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1; 422 u32 nr_frags = skb_shinfo(skb)->nr_frags + 1;
445 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0]; 423 struct skb_frag_struct *frags = &skb_shinfo(skb)->frags[0];
446 unsigned long flags; 424 unsigned long flags;
@@ -558,26 +536,26 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
558 return -EIO; 536 return -EIO;
559 537
560 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { 538 if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) {
561 if (++etdev->tx_ring.TxPacketsSinceLastinterrupt == 539 if (++etdev->tx_ring.since_irq ==
562 PARM_TX_NUM_BUFS_DEF) { 540 PARM_TX_NUM_BUFS_DEF) {
563 /* Last element & Interrupt flag */ 541 /* Last element & Interrupt flag */
564 desc[frag - 1].flags = 0x5; 542 desc[frag - 1].flags = 0x5;
565 etdev->tx_ring.TxPacketsSinceLastinterrupt = 0; 543 etdev->tx_ring.since_irq = 0;
566 } else { /* Last element */ 544 } else { /* Last element */
567 desc[frag - 1].flags = 0x1; 545 desc[frag - 1].flags = 0x1;
568 } 546 }
569 } else { 547 } else
570 desc[frag - 1].flags = 0x5; 548 desc[frag - 1].flags = 0x5;
571 } 549
572 desc[0].flags |= 2; /* First element flag */ 550 desc[0].flags |= 2; /* First element flag */
573 551
574 tcb->WrIndexStart = etdev->tx_ring.txDmaReadyToSend; 552 tcb->index_start = etdev->tx_ring.send_idx;
575 tcb->PacketStaleCount = 0; 553 tcb->stale = 0;
576 554
577 spin_lock_irqsave(&etdev->SendHWLock, flags); 555 spin_lock_irqsave(&etdev->SendHWLock, flags);
578 556
579 thiscopy = NUM_DESC_PER_RING_TX - 557 thiscopy = NUM_DESC_PER_RING_TX -
580 INDEX10(etdev->tx_ring.txDmaReadyToSend); 558 INDEX10(etdev->tx_ring.send_idx);
581 559
582 if (thiscopy >= frag) { 560 if (thiscopy >= frag) {
583 remainder = 0; 561 remainder = 0;
@@ -587,15 +565,15 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
587 } 565 }
588 566
589 memcpy(etdev->tx_ring.tx_desc_ring + 567 memcpy(etdev->tx_ring.tx_desc_ring +
590 INDEX10(etdev->tx_ring.txDmaReadyToSend), desc, 568 INDEX10(etdev->tx_ring.send_idx), desc,
591 sizeof(struct tx_desc) * thiscopy); 569 sizeof(struct tx_desc) * thiscopy);
592 570
593 add_10bit(&etdev->tx_ring.txDmaReadyToSend, thiscopy); 571 add_10bit(&etdev->tx_ring.send_idx, thiscopy);
594 572
595 if (INDEX10(etdev->tx_ring.txDmaReadyToSend)== 0 || 573 if (INDEX10(etdev->tx_ring.send_idx)== 0 ||
596 INDEX10(etdev->tx_ring.txDmaReadyToSend) == NUM_DESC_PER_RING_TX) { 574 INDEX10(etdev->tx_ring.send_idx) == NUM_DESC_PER_RING_TX) {
597 etdev->tx_ring.txDmaReadyToSend &= ~ET_DMA10_MASK; 575 etdev->tx_ring.send_idx &= ~ET_DMA10_MASK;
598 etdev->tx_ring.txDmaReadyToSend ^= ET_DMA10_WRAP; 576 etdev->tx_ring.send_idx ^= ET_DMA10_WRAP;
599 } 577 }
600 578
601 if (remainder) { 579 if (remainder) {
@@ -603,34 +581,34 @@ static int nic_send_packet(struct et131x_adapter *etdev, struct tcb *tcb)
603 desc + thiscopy, 581 desc + thiscopy,
604 sizeof(struct tx_desc) * remainder); 582 sizeof(struct tx_desc) * remainder);
605 583
606 add_10bit(&etdev->tx_ring.txDmaReadyToSend, remainder); 584 add_10bit(&etdev->tx_ring.send_idx, remainder);
607 } 585 }
608 586
609 if (INDEX10(etdev->tx_ring.txDmaReadyToSend) == 0) { 587 if (INDEX10(etdev->tx_ring.send_idx) == 0) {
610 if (etdev->tx_ring.txDmaReadyToSend) 588 if (etdev->tx_ring.send_idx)
611 tcb->WrIndex = NUM_DESC_PER_RING_TX - 1; 589 tcb->index = NUM_DESC_PER_RING_TX - 1;
612 else 590 else
613 tcb->WrIndex= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1); 591 tcb->index= ET_DMA10_WRAP | (NUM_DESC_PER_RING_TX - 1);
614 } else 592 } else
615 tcb->WrIndex = etdev->tx_ring.txDmaReadyToSend - 1; 593 tcb->index = etdev->tx_ring.send_idx - 1;
616 594
617 spin_lock(&etdev->TCBSendQLock); 595 spin_lock(&etdev->TCBSendQLock);
618 596
619 if (etdev->tx_ring.CurrSendTail) 597 if (etdev->tx_ring.send_tail)
620 etdev->tx_ring.CurrSendTail->Next = tcb; 598 etdev->tx_ring.send_tail->next = tcb;
621 else 599 else
622 etdev->tx_ring.CurrSendHead = tcb; 600 etdev->tx_ring.send_head = tcb;
623 601
624 etdev->tx_ring.CurrSendTail = tcb; 602 etdev->tx_ring.send_tail = tcb;
625 603
626 WARN_ON(tcb->Next != NULL); 604 WARN_ON(tcb->next != NULL);
627 605
628 etdev->tx_ring.nBusySend++; 606 etdev->tx_ring.used++;
629 607
630 spin_unlock(&etdev->TCBSendQLock); 608 spin_unlock(&etdev->TCBSendQLock);
631 609
632 /* Write the new write pointer back to the device. */ 610 /* Write the new write pointer back to the device. */
633 writel(etdev->tx_ring.txDmaReadyToSend, 611 writel(etdev->tx_ring.send_idx,
634 &etdev->regs->txdma.service_request); 612 &etdev->regs->txdma.service_request);
635 613
636 /* For Gig only, we use Tx Interrupt coalescing. Enable the software 614 /* For Gig only, we use Tx Interrupt coalescing. Enable the software
@@ -661,15 +639,15 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
661 struct tx_desc *desc = NULL; 639 struct tx_desc *desc = NULL;
662 struct net_device_stats *stats = &etdev->net_stats; 640 struct net_device_stats *stats = &etdev->net_stats;
663 641
664 if (tcb->Flags & fMP_DEST_BROAD) 642 if (tcb->flags & fMP_DEST_BROAD)
665 atomic_inc(&etdev->Stats.brdcstxmt); 643 atomic_inc(&etdev->Stats.brdcstxmt);
666 else if (tcb->Flags & fMP_DEST_MULTI) 644 else if (tcb->flags & fMP_DEST_MULTI)
667 atomic_inc(&etdev->Stats.multixmt); 645 atomic_inc(&etdev->Stats.multixmt);
668 else 646 else
669 atomic_inc(&etdev->Stats.unixmt); 647 atomic_inc(&etdev->Stats.unixmt);
670 648
671 if (tcb->Packet) { 649 if (tcb->skb) {
672 stats->tx_bytes += tcb->Packet->len; 650 stats->tx_bytes += tcb->skb->len;
673 651
674 /* Iterate through the TX descriptors on the ring 652 /* Iterate through the TX descriptors on the ring
675 * corresponding to this packet and umap the fragments 653 * corresponding to this packet and umap the fragments
@@ -677,22 +655,22 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
677 */ 655 */
678 do { 656 do {
679 desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring + 657 desc =(struct tx_desc *) (etdev->tx_ring.tx_desc_ring +
680 INDEX10(tcb->WrIndexStart)); 658 INDEX10(tcb->index_start));
681 659
682 pci_unmap_single(etdev->pdev, 660 pci_unmap_single(etdev->pdev,
683 desc->addr_lo, 661 desc->addr_lo,
684 desc->len_vlan, PCI_DMA_TODEVICE); 662 desc->len_vlan, PCI_DMA_TODEVICE);
685 663
686 add_10bit(&tcb->WrIndexStart, 1); 664 add_10bit(&tcb->index_start, 1);
687 if (INDEX10(tcb->WrIndexStart) >= 665 if (INDEX10(tcb->index_start) >=
688 NUM_DESC_PER_RING_TX) { 666 NUM_DESC_PER_RING_TX) {
689 tcb->WrIndexStart &= ~ET_DMA10_MASK; 667 tcb->index_start &= ~ET_DMA10_MASK;
690 tcb->WrIndexStart ^= ET_DMA10_WRAP; 668 tcb->index_start ^= ET_DMA10_WRAP;
691 } 669 }
692 } while (desc != (etdev->tx_ring.tx_desc_ring + 670 } while (desc != (etdev->tx_ring.tx_desc_ring +
693 INDEX10(tcb->WrIndex))); 671 INDEX10(tcb->index)));
694 672
695 dev_kfree_skb_any(tcb->Packet); 673 dev_kfree_skb_any(tcb->skb);
696 } 674 }
697 675
698 memset(tcb, 0, sizeof(struct tcb)); 676 memset(tcb, 0, sizeof(struct tcb));
@@ -702,16 +680,16 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev,
702 680
703 etdev->Stats.opackets++; 681 etdev->Stats.opackets++;
704 682
705 if (etdev->tx_ring.TCBReadyQueueTail) 683 if (etdev->tx_ring.tcb_qtail)
706 etdev->tx_ring.TCBReadyQueueTail->Next = tcb; 684 etdev->tx_ring.tcb_qtail->next = tcb;
707 else 685 else
708 /* Apparently ready Q is empty. */ 686 /* Apparently ready Q is empty. */
709 etdev->tx_ring.TCBReadyQueueHead = tcb; 687 etdev->tx_ring.tcb_qhead = tcb;
710 688
711 etdev->tx_ring.TCBReadyQueueTail = tcb; 689 etdev->tx_ring.tcb_qtail = tcb;
712 690
713 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); 691 spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags);
714 WARN_ON(etdev->tx_ring.nBusySend < 0); 692 WARN_ON(etdev->tx_ring.used < 0);
715} 693}
716 694
717/** 695/**
@@ -729,17 +707,17 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
729 /* Any packets being sent? Check the first TCB on the send list */ 707 /* Any packets being sent? Check the first TCB on the send list */
730 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 708 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
731 709
732 tcb = etdev->tx_ring.CurrSendHead; 710 tcb = etdev->tx_ring.send_head;
733 711
734 while ((tcb != NULL) && (freed < NUM_TCB)) { 712 while ((tcb != NULL) && (freed < NUM_TCB)) {
735 struct tcb *pNext = tcb->Next; 713 struct tcb *next = tcb->next;
736 714
737 etdev->tx_ring.CurrSendHead = pNext; 715 etdev->tx_ring.send_head = next;
738 716
739 if (pNext == NULL) 717 if (next == NULL)
740 etdev->tx_ring.CurrSendTail = NULL; 718 etdev->tx_ring.send_tail = NULL;
741 719
742 etdev->tx_ring.nBusySend--; 720 etdev->tx_ring.used--;
743 721
744 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); 722 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
745 723
@@ -748,14 +726,14 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev)
748 726
749 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 727 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
750 728
751 tcb = etdev->tx_ring.CurrSendHead; 729 tcb = etdev->tx_ring.send_head;
752 } 730 }
753 731
754 WARN_ON(freed == NUM_TCB); 732 WARN_ON(freed == NUM_TCB);
755 733
756 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); 734 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
757 735
758 etdev->tx_ring.nBusySend = 0; 736 etdev->tx_ring.used = 0;
759} 737}
760 738
761/** 739/**
@@ -782,41 +760,41 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev)
782 */ 760 */
783 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 761 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
784 762
785 tcb = etdev->tx_ring.CurrSendHead; 763 tcb = etdev->tx_ring.send_head;
786 764
787 while (tcb && 765 while (tcb &&
788 ((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) && 766 ((serviced ^ tcb->index) & ET_DMA10_WRAP) &&
789 index < INDEX10(tcb->WrIndex)) { 767 index < INDEX10(tcb->index)) {
790 etdev->tx_ring.nBusySend--; 768 etdev->tx_ring.used--;
791 etdev->tx_ring.CurrSendHead = tcb->Next; 769 etdev->tx_ring.send_head = tcb->next;
792 if (tcb->Next == NULL) 770 if (tcb->next == NULL)
793 etdev->tx_ring.CurrSendTail = NULL; 771 etdev->tx_ring.send_tail = NULL;
794 772
795 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); 773 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
796 et131x_free_send_packet(etdev, tcb); 774 et131x_free_send_packet(etdev, tcb);
797 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 775 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
798 776
799 /* Goto the next packet */ 777 /* Goto the next packet */
800 tcb = etdev->tx_ring.CurrSendHead; 778 tcb = etdev->tx_ring.send_head;
801 } 779 }
802 while (tcb && 780 while (tcb &&
803 !((serviced ^ tcb->WrIndex) & ET_DMA10_WRAP) 781 !((serviced ^ tcb->index) & ET_DMA10_WRAP)
804 && index > (tcb->WrIndex & ET_DMA10_MASK)) { 782 && index > (tcb->index & ET_DMA10_MASK)) {
805 etdev->tx_ring.nBusySend--; 783 etdev->tx_ring.used--;
806 etdev->tx_ring.CurrSendHead = tcb->Next; 784 etdev->tx_ring.send_head = tcb->next;
807 if (tcb->Next == NULL) 785 if (tcb->next == NULL)
808 etdev->tx_ring.CurrSendTail = NULL; 786 etdev->tx_ring.send_tail = NULL;
809 787
810 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); 788 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
811 et131x_free_send_packet(etdev, tcb); 789 et131x_free_send_packet(etdev, tcb);
812 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 790 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
813 791
814 /* Goto the next packet */ 792 /* Goto the next packet */
815 tcb = etdev->tx_ring.CurrSendHead; 793 tcb = etdev->tx_ring.send_head;
816 } 794 }
817 795
818 /* Wake up the queue when we hit a low-water mark */ 796 /* Wake up the queue when we hit a low-water mark */
819 if (etdev->tx_ring.nBusySend <= (NUM_TCB / 3)) 797 if (etdev->tx_ring.used <= (NUM_TCB / 3))
820 netif_wake_queue(etdev->netdev); 798 netif_wake_queue(etdev->netdev);
821 799
822 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); 800 spin_unlock_irqrestore(&etdev->TCBSendQLock, flags);
diff --git a/drivers/staging/et131x/et1310_tx.h b/drivers/staging/et131x/et1310_tx.h
index 44ea9bb05b2..3e2f281e218 100644
--- a/drivers/staging/et131x/et1310_tx.h
+++ b/drivers/staging/et131x/et1310_tx.h
@@ -97,100 +97,71 @@ struct tx_desc {
97 u32 flags; /* data (detailed above) */ 97 u32 flags; /* data (detailed above) */
98}; 98};
99 99
100/* Typedefs for Tx DMA engine status writeback */
101
102/* 100/*
103 * TX_STATUS_BLOCK_t is sructure representing the status of the Tx DMA engine 101 * The status of the Tx DMA engine it sits in free memory, and is pointed to
104 * it sits in free memory, and is pointed to by 0x101c / 0x1020 102 * by 0x101c / 0x1020. This is a DMA10 type
105 */ 103 */
106typedef union _tx_status_block_t { 104
107 u32 value; 105/* TCB (Transmit Control Block: Host Side) */
108 struct {
109#ifdef _BIT_FIELDS_HTOL
110 u32 unused:21; /* bits 11-31 */
111 u32 serv_cpl_wrap:1; /* bit 10 */
112 u32 serv_cpl:10; /* bits 0-9 */
113#else
114 u32 serv_cpl:10; /* bits 0-9 */
115 u32 serv_cpl_wrap:1; /* bit 10 */
116 u32 unused:21; /* bits 11-31 */
117#endif
118 } bits;
119} TX_STATUS_BLOCK_t, *PTX_STATUS_BLOCK_t;
120
121/* TCB (Transmit Control Block) */
122struct tcb { 106struct tcb {
123 struct tcb *Next; 107 struct tcb *next; /* Next entry in ring */
124 u32 Flags; 108 u32 flags; /* Our flags for the packet */
125 u32 Count; 109 u32 count;
126 u32 PacketStaleCount; 110 u32 stale; /* Used to spot stuck/lost packets */
127 struct sk_buff *Packet; 111 struct sk_buff *skb; /* Network skb we are tied to */
128 u32 PacketLength; 112 u32 len;
129 u32 WrIndex; 113 u32 index;
130 u32 WrIndexStart; 114 u32 index_start;
131}; 115};
132 116
133/* Structure to hold the skb's in a list */
134typedef struct tx_skb_list_elem {
135 struct list_head skb_list_elem;
136 struct sk_buff *skb;
137} TX_SKB_LIST_ELEM, *PTX_SKB_LIST_ELEM;
138
139/* Structure representing our local reference(s) to the ring */ 117/* Structure representing our local reference(s) to the ring */
140struct tx_ring { 118struct tx_ring {
141 /* TCB (Transmit Control Block) memory and lists */ 119 /* TCB (Transmit Control Block) memory and lists */
142 struct tcb *MpTcbMem; 120 struct tcb *tcb_ring;
143 121
144 /* List of TCBs that are ready to be used */ 122 /* List of TCBs that are ready to be used */
145 struct tcb *TCBReadyQueueHead; 123 struct tcb *tcb_qhead;
146 struct tcb *TCBReadyQueueTail; 124 struct tcb *tcb_qtail;
147 125
148 /* list of TCBs that are currently being sent. NOTE that access to all 126 /* list of TCBs that are currently being sent. NOTE that access to all
149 * three of these (including nBusySend) are controlled via the 127 * three of these (including used) are controlled via the
150 * TCBSendQLock. This lock should be secured prior to incementing / 128 * TCBSendQLock. This lock should be secured prior to incementing /
151 * decrementing nBusySend, or any queue manipulation on CurrSendHead / 129 * decrementing used, or any queue manipulation on send_head /
152 * Tail 130 * Tail
153 */ 131 */
154 struct tcb *CurrSendHead; 132 struct tcb *send_head;
155 struct tcb *CurrSendTail; 133 struct tcb *send_tail;
156 int nBusySend; 134 int used;
157 135
158 /* The actual descriptor ring */ 136 /* The actual descriptor ring */
159 struct tx_desc *tx_desc_ring; 137 struct tx_desc *tx_desc_ring;
160 dma_addr_t tx_desc_ring_pa; 138 dma_addr_t tx_desc_ring_pa;
161 139
162 /* ReadyToSend indicates where we last wrote to in the descriptor ring. */ 140 /* ReadyToSend indicates where we last wrote to in the descriptor ring. */
163 u32 txDmaReadyToSend; 141 u32 send_idx;
164 142
165 /* The location of the write-back status block */ 143 /* The location of the write-back status block */
166 PTX_STATUS_BLOCK_t pTxStatusVa; 144 u32 *tx_status;
167 dma_addr_t pTxStatusPa; 145 dma_addr_t tx_status_pa;
168
169 /* A Block of zeroes used to pad packets that are less than 60 bytes */
170 void *pTxDummyBlkVa;
171 dma_addr_t pTxDummyBlkPa;
172 146
173 TXMAC_ERR_t TxMacErr; 147 TXMAC_ERR_t TxMacErr;
174 148
175 /* Variables to track the Tx interrupt coalescing features */ 149 /* Variables to track the Tx interrupt coalescing features */
176 int TxPacketsSinceLastinterrupt; 150 int since_irq;
177}; 151};
178 152
179/* Forward declaration of the frag-list for the following prototypes */
180typedef struct _MP_FRAG_LIST MP_FRAG_LIST, *PMP_FRAG_LIST;
181
182/* Forward declaration of the private adapter structure */ 153/* Forward declaration of the private adapter structure */
183struct et131x_adapter; 154struct et131x_adapter;
184 155
185/* PROTOTYPES for et1310_tx.c */ 156/* PROTOTYPES for et1310_tx.c */
186int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter); 157int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter);
187void et131x_tx_dma_memory_free(struct et131x_adapter *adapter); 158void et131x_tx_dma_memory_free(struct et131x_adapter *adapter);
188void ConfigTxDmaRegs(struct et131x_adapter *pAdapter); 159void ConfigTxDmaRegs(struct et131x_adapter *adapter);
189void et131x_init_send(struct et131x_adapter *adapter); 160void et131x_init_send(struct et131x_adapter *adapter);
190void et131x_tx_dma_disable(struct et131x_adapter *pAdapter); 161void et131x_tx_dma_disable(struct et131x_adapter *adapter);
191void et131x_tx_dma_enable(struct et131x_adapter *pAdapter); 162void et131x_tx_dma_enable(struct et131x_adapter *adapter);
192void et131x_handle_send_interrupt(struct et131x_adapter *pAdapter); 163void et131x_handle_send_interrupt(struct et131x_adapter *adapter);
193void et131x_free_busy_send_packets(struct et131x_adapter *pAdapter); 164void et131x_free_busy_send_packets(struct et131x_adapter *adapter);
194int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev); 165int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev);
195 166
196#endif /* __ET1310_TX_H__ */ 167#endif /* __ET1310_TX_H__ */
diff --git a/drivers/staging/et131x/et131x_isr.c b/drivers/staging/et131x/et131x_isr.c
index c22067baa8a..b6dab8c7699 100644
--- a/drivers/staging/et131x/et131x_isr.c
+++ b/drivers/staging/et131x/et131x_isr.c
@@ -179,10 +179,10 @@ irqreturn_t et131x_isr(int irq, void *dev_id)
179 /* This is our interrupt, so process accordingly */ 179 /* This is our interrupt, so process accordingly */
180 180
181 if (status & ET_INTR_WATCHDOG) { 181 if (status & ET_INTR_WATCHDOG) {
182 struct tcb *tcb = adapter->tx_ring.CurrSendHead; 182 struct tcb *tcb = adapter->tx_ring.send_head;
183 183
184 if (tcb) 184 if (tcb)
185 if (++tcb->PacketStaleCount > 1) 185 if (++tcb->stale > 1)
186 status |= ET_INTR_TXDMA_ISR; 186 status |= ET_INTR_TXDMA_ISR;
187 187
188 if (adapter->RxRing.UnfinishedReceives) 188 if (adapter->RxRing.UnfinishedReceives)
diff --git a/drivers/staging/et131x/et131x_netdev.c b/drivers/staging/et131x/et131x_netdev.c
index 7c55f5d52d5..24d97b4fa6f 100644
--- a/drivers/staging/et131x/et131x_netdev.c
+++ b/drivers/staging/et131x/et131x_netdev.c
@@ -541,19 +541,19 @@ void et131x_tx_timeout(struct net_device *netdev)
541 /* Is send stuck? */ 541 /* Is send stuck? */
542 spin_lock_irqsave(&etdev->TCBSendQLock, flags); 542 spin_lock_irqsave(&etdev->TCBSendQLock, flags);
543 543
544 tcb = etdev->tx_ring.CurrSendHead; 544 tcb = etdev->tx_ring.send_head;
545 545
546 if (tcb != NULL) { 546 if (tcb != NULL) {
547 tcb->Count++; 547 tcb->count++;
548 548
549 if (tcb->Count > NIC_SEND_HANG_THRESHOLD) { 549 if (tcb->count > NIC_SEND_HANG_THRESHOLD) {
550 spin_unlock_irqrestore(&etdev->TCBSendQLock, 550 spin_unlock_irqrestore(&etdev->TCBSendQLock,
551 flags); 551 flags);
552 552
553 dev_warn(&etdev->pdev->dev, 553 dev_warn(&etdev->pdev->dev,
554 "Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n", 554 "Send stuck - reset. tcb->WrIndex %x, Flags 0x%08x\n",
555 tcb->WrIndex, 555 tcb->index,
556 tcb->Flags); 556 tcb->flags);
557 557
558 et131x_close(netdev); 558 et131x_close(netdev);
559 et131x_open(netdev); 559 et131x_open(netdev);