diff options
author | Alan Cox <alan@linux.intel.com> | 2009-08-27 06:03:09 -0400 |
---|---|---|
committer | Greg Kroah-Hartman <gregkh@suse.de> | 2009-09-15 15:02:29 -0400 |
commit | 15700039b108fccc36507bcabdd4dda93f7c4c61 (patch) | |
tree | 31629674e011fb606072d89b913aa169cc689fbc /drivers/staging/et131x/et1310_tx.c | |
parent | bc7f9c597fa55814548845a7c43f53d6bbbce94b (diff) |
Staging: et131x: prune all the debug code
We don't need it, we have a perfectly good set of debug tools. For this pass
keep a few debug printks around which are "should not happen" items
Signed-off-by: Alan Cox <alan@linux.intel.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
Diffstat (limited to 'drivers/staging/et131x/et1310_tx.c')
-rw-r--r-- | drivers/staging/et131x/et1310_tx.c | 592 |
1 files changed, 11 insertions, 581 deletions
diff --git a/drivers/staging/et131x/et1310_tx.c b/drivers/staging/et131x/et1310_tx.c index 4c18e9a2d2e..387a697c4af 100644 --- a/drivers/staging/et131x/et1310_tx.c +++ b/drivers/staging/et131x/et1310_tx.c | |||
@@ -56,7 +56,6 @@ | |||
56 | */ | 56 | */ |
57 | 57 | ||
58 | #include "et131x_version.h" | 58 | #include "et131x_version.h" |
59 | #include "et131x_debug.h" | ||
60 | #include "et131x_defs.h" | 59 | #include "et131x_defs.h" |
61 | 60 | ||
62 | #include <linux/pci.h> | 61 | #include <linux/pci.h> |
@@ -95,11 +94,6 @@ | |||
95 | #include "et1310_tx.h" | 94 | #include "et1310_tx.h" |
96 | 95 | ||
97 | 96 | ||
98 | /* Data for debugging facilities */ | ||
99 | #ifdef CONFIG_ET131X_DEBUG | ||
100 | extern dbg_info_t *et131x_dbginfo; | ||
101 | #endif /* CONFIG_ET131X_DEBUG */ | ||
102 | |||
103 | static void et131x_update_tcb_list(struct et131x_adapter *etdev); | 97 | static void et131x_update_tcb_list(struct et131x_adapter *etdev); |
104 | static void et131x_check_send_wait_list(struct et131x_adapter *etdev); | 98 | static void et131x_check_send_wait_list(struct et131x_adapter *etdev); |
105 | static inline void et131x_free_send_packet(struct et131x_adapter *etdev, | 99 | static inline void et131x_free_send_packet(struct et131x_adapter *etdev, |
@@ -125,14 +119,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
125 | int desc_size = 0; | 119 | int desc_size = 0; |
126 | TX_RING_t *tx_ring = &adapter->TxRing; | 120 | TX_RING_t *tx_ring = &adapter->TxRing; |
127 | 121 | ||
128 | DBG_ENTER(et131x_dbginfo); | ||
129 | |||
130 | /* Allocate memory for the TCB's (Transmit Control Block) */ | 122 | /* Allocate memory for the TCB's (Transmit Control Block) */ |
131 | adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB), | 123 | adapter->TxRing.MpTcbMem = (MP_TCB *)kcalloc(NUM_TCB, sizeof(MP_TCB), |
132 | GFP_ATOMIC | GFP_DMA); | 124 | GFP_ATOMIC | GFP_DMA); |
133 | if (!adapter->TxRing.MpTcbMem) { | 125 | if (!adapter->TxRing.MpTcbMem) { |
134 | DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for TCBs\n"); | 126 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for TCBs\n"); |
135 | DBG_LEAVE(et131x_dbginfo); | ||
136 | return -ENOMEM; | 127 | return -ENOMEM; |
137 | } | 128 | } |
138 | 129 | ||
@@ -144,8 +135,7 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
144 | (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size, | 135 | (PTX_DESC_ENTRY_t) pci_alloc_consistent(adapter->pdev, desc_size, |
145 | &tx_ring->pTxDescRingPa); | 136 | &tx_ring->pTxDescRingPa); |
146 | if (!adapter->TxRing.pTxDescRingVa) { | 137 | if (!adapter->TxRing.pTxDescRingVa) { |
147 | DBG_ERROR(et131x_dbginfo, "Cannot alloc memory for Tx Ring\n"); | 138 | dev_err(&adapter->pdev->dev, "Cannot alloc memory for Tx Ring\n"); |
148 | DBG_LEAVE(et131x_dbginfo); | ||
149 | return -ENOMEM; | 139 | return -ENOMEM; |
150 | } | 140 | } |
151 | 141 | ||
@@ -170,9 +160,8 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
170 | sizeof(TX_STATUS_BLOCK_t), | 160 | sizeof(TX_STATUS_BLOCK_t), |
171 | &tx_ring->pTxStatusPa); | 161 | &tx_ring->pTxStatusPa); |
172 | if (!adapter->TxRing.pTxStatusPa) { | 162 | if (!adapter->TxRing.pTxStatusPa) { |
173 | DBG_ERROR(et131x_dbginfo, | 163 | dev_err(&adapter->pdev->dev, |
174 | "Cannot alloc memory for Tx status block\n"); | 164 | "Cannot alloc memory for Tx status block\n"); |
175 | DBG_LEAVE(et131x_dbginfo); | ||
176 | return -ENOMEM; | 165 | return -ENOMEM; |
177 | } | 166 | } |
178 | 167 | ||
@@ -181,13 +170,11 @@ int et131x_tx_dma_memory_alloc(struct et131x_adapter *adapter) | |||
181 | NIC_MIN_PACKET_SIZE, | 170 | NIC_MIN_PACKET_SIZE, |
182 | &tx_ring->pTxDummyBlkPa); | 171 | &tx_ring->pTxDummyBlkPa); |
183 | if (!adapter->TxRing.pTxDummyBlkPa) { | 172 | if (!adapter->TxRing.pTxDummyBlkPa) { |
184 | DBG_ERROR(et131x_dbginfo, | 173 | dev_err(&adapter->pdev->dev, |
185 | "Cannot alloc memory for Tx dummy buffer\n"); | 174 | "Cannot alloc memory for Tx dummy buffer\n"); |
186 | DBG_LEAVE(et131x_dbginfo); | ||
187 | return -ENOMEM; | 175 | return -ENOMEM; |
188 | } | 176 | } |
189 | 177 | ||
190 | DBG_LEAVE(et131x_dbginfo); | ||
191 | return 0; | 178 | return 0; |
192 | } | 179 | } |
193 | 180 | ||
@@ -201,8 +188,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | |||
201 | { | 188 | { |
202 | int desc_size = 0; | 189 | int desc_size = 0; |
203 | 190 | ||
204 | DBG_ENTER(et131x_dbginfo); | ||
205 | |||
206 | if (adapter->TxRing.pTxDescRingVa) { | 191 | if (adapter->TxRing.pTxDescRingVa) { |
207 | /* Free memory relating to Tx rings here */ | 192 | /* Free memory relating to Tx rings here */ |
208 | adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset; | 193 | adapter->TxRing.pTxDescRingVa -= adapter->TxRing.TxDescOffset; |
@@ -240,8 +225,6 @@ void et131x_tx_dma_memory_free(struct et131x_adapter *adapter) | |||
240 | 225 | ||
241 | /* Free the memory for MP_TCB structures */ | 226 | /* Free the memory for MP_TCB structures */ |
242 | kfree(adapter->TxRing.MpTcbMem); | 227 | kfree(adapter->TxRing.MpTcbMem); |
243 | |||
244 | DBG_LEAVE(et131x_dbginfo); | ||
245 | } | 228 | } |
246 | 229 | ||
247 | /** | 230 | /** |
@@ -252,8 +235,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
252 | { | 235 | { |
253 | struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma; | 236 | struct _TXDMA_t __iomem *txdma = &etdev->regs->txdma; |
254 | 237 | ||
255 | DBG_ENTER(et131x_dbginfo); | ||
256 | |||
257 | /* Load the hardware with the start of the transmit descriptor ring. */ | 238 | /* Load the hardware with the start of the transmit descriptor ring. */ |
258 | writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32), | 239 | writel((uint32_t) (etdev->TxRing.pTxDescRingAdjustedPa >> 32), |
259 | &txdma->pr_base_hi); | 240 | &txdma->pr_base_hi); |
@@ -277,8 +258,6 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
277 | 258 | ||
278 | writel(0, &txdma->service_request); | 259 | writel(0, &txdma->service_request); |
279 | etdev->TxRing.txDmaReadyToSend = 0; | 260 | etdev->TxRing.txDmaReadyToSend = 0; |
280 | |||
281 | DBG_LEAVE(et131x_dbginfo); | ||
282 | } | 261 | } |
283 | 262 | ||
284 | /** | 263 | /** |
@@ -287,12 +266,8 @@ void ConfigTxDmaRegs(struct et131x_adapter *etdev) | |||
287 | */ | 266 | */ |
288 | void et131x_tx_dma_disable(struct et131x_adapter *etdev) | 267 | void et131x_tx_dma_disable(struct et131x_adapter *etdev) |
289 | { | 268 | { |
290 | DBG_ENTER(et131x_dbginfo); | ||
291 | |||
292 | /* Setup the tramsmit dma configuration register */ | 269 | /* Setup the tramsmit dma configuration register */ |
293 | writel(0x101, &etdev->regs->txdma.csr.value); | 270 | writel(0x101, &etdev->regs->txdma.csr.value); |
294 | |||
295 | DBG_LEAVE(et131x_dbginfo); | ||
296 | } | 271 | } |
297 | 272 | ||
298 | /** | 273 | /** |
@@ -303,8 +278,6 @@ void et131x_tx_dma_disable(struct et131x_adapter *etdev) | |||
303 | */ | 278 | */ |
304 | void et131x_tx_dma_enable(struct et131x_adapter *etdev) | 279 | void et131x_tx_dma_enable(struct et131x_adapter *etdev) |
305 | { | 280 | { |
306 | DBG_ENTER(et131x_dbginfo); | ||
307 | |||
308 | if (etdev->RegistryPhyLoopbk) { | 281 | if (etdev->RegistryPhyLoopbk) { |
309 | /* TxDMA is disabled for loopback operation. */ | 282 | /* TxDMA is disabled for loopback operation. */ |
310 | writel(0x101, &etdev->regs->txdma.csr.value); | 283 | writel(0x101, &etdev->regs->txdma.csr.value); |
@@ -319,8 +292,6 @@ void et131x_tx_dma_enable(struct et131x_adapter *etdev) | |||
319 | csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF; | 292 | csr.bits.cache_thrshld = PARM_DMA_CACHE_DEF; |
320 | writel(csr.value, &etdev->regs->txdma.csr.value); | 293 | writel(csr.value, &etdev->regs->txdma.csr.value); |
321 | } | 294 | } |
322 | |||
323 | DBG_LEAVE(et131x_dbginfo); | ||
324 | } | 295 | } |
325 | 296 | ||
326 | /** | 297 | /** |
@@ -333,8 +304,6 @@ void et131x_init_send(struct et131x_adapter *adapter) | |||
333 | uint32_t TcbCount; | 304 | uint32_t TcbCount; |
334 | TX_RING_t *tx_ring; | 305 | TX_RING_t *tx_ring; |
335 | 306 | ||
336 | DBG_ENTER(et131x_dbginfo); | ||
337 | |||
338 | /* Setup some convenience pointers */ | 307 | /* Setup some convenience pointers */ |
339 | tx_ring = &adapter->TxRing; | 308 | tx_ring = &adapter->TxRing; |
340 | pMpTcb = adapter->TxRing.MpTcbMem; | 309 | pMpTcb = adapter->TxRing.MpTcbMem; |
@@ -364,8 +333,6 @@ void et131x_init_send(struct et131x_adapter *adapter) | |||
364 | tx_ring->CurrSendTail = (PMP_TCB) NULL; | 333 | tx_ring->CurrSendTail = (PMP_TCB) NULL; |
365 | 334 | ||
366 | INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue); | 335 | INIT_LIST_HEAD(&adapter->TxRing.SendWaitQueue); |
367 | |||
368 | DBG_LEAVE(et131x_dbginfo); | ||
369 | } | 336 | } |
370 | 337 | ||
371 | /** | 338 | /** |
@@ -380,8 +347,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
380 | int status = 0; | 347 | int status = 0; |
381 | struct et131x_adapter *etdev = NULL; | 348 | struct et131x_adapter *etdev = NULL; |
382 | 349 | ||
383 | DBG_TX_ENTER(et131x_dbginfo); | ||
384 | |||
385 | etdev = netdev_priv(netdev); | 350 | etdev = netdev_priv(netdev); |
386 | 351 | ||
387 | /* Send these packets | 352 | /* Send these packets |
@@ -397,7 +362,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
397 | * packet under Linux; if we just send an error up to the | 362 | * packet under Linux; if we just send an error up to the |
398 | * netif layer, it will resend the skb to us. | 363 | * netif layer, it will resend the skb to us. |
399 | */ | 364 | */ |
400 | DBG_VERBOSE(et131x_dbginfo, "TCB Resources Not Available\n"); | ||
401 | status = -ENOMEM; | 365 | status = -ENOMEM; |
402 | } else { | 366 | } else { |
403 | /* We need to see if the link is up; if it's not, make the | 367 | /* We need to see if the link is up; if it's not, make the |
@@ -409,9 +373,6 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
409 | */ | 373 | */ |
410 | if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess | 374 | if (MP_SHOULD_FAIL_SEND(etdev) || etdev->DriverNoPhyAccess |
411 | || !netif_carrier_ok(netdev)) { | 375 | || !netif_carrier_ok(netdev)) { |
412 | DBG_VERBOSE(et131x_dbginfo, | ||
413 | "Can't Tx, Link is DOWN; drop the packet\n"); | ||
414 | |||
415 | dev_kfree_skb_any(skb); | 376 | dev_kfree_skb_any(skb); |
416 | skb = NULL; | 377 | skb = NULL; |
417 | 378 | ||
@@ -426,24 +387,16 @@ int et131x_send_packets(struct sk_buff *skb, struct net_device *netdev) | |||
426 | * send an error up to the netif layer, it | 387 | * send an error up to the netif layer, it |
427 | * will resend the skb to us. | 388 | * will resend the skb to us. |
428 | */ | 389 | */ |
429 | DBG_WARNING(et131x_dbginfo, | ||
430 | "Resources problem, Queue tx packet\n"); | ||
431 | } else if (status != 0) { | 390 | } else if (status != 0) { |
432 | /* On any other error, make netif think we're | 391 | /* On any other error, make netif think we're |
433 | * OK and drop the packet | 392 | * OK and drop the packet |
434 | */ | 393 | */ |
435 | DBG_WARNING(et131x_dbginfo, | ||
436 | "General error, drop packet\n"); | ||
437 | |||
438 | dev_kfree_skb_any(skb); | 394 | dev_kfree_skb_any(skb); |
439 | skb = NULL; | 395 | skb = NULL; |
440 | |||
441 | etdev->net_stats.tx_dropped++; | 396 | etdev->net_stats.tx_dropped++; |
442 | } | 397 | } |
443 | } | 398 | } |
444 | } | 399 | } |
445 | |||
446 | DBG_TX_LEAVE(et131x_dbginfo); | ||
447 | return status; | 400 | return status; |
448 | } | 401 | } |
449 | 402 | ||
@@ -464,21 +417,8 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
464 | uint16_t *shbufva; | 417 | uint16_t *shbufva; |
465 | unsigned long flags; | 418 | unsigned long flags; |
466 | 419 | ||
467 | DBG_TX_ENTER(et131x_dbginfo); | ||
468 | |||
469 | /* Is our buffer scattered, or continuous? */ | ||
470 | if (skb_shinfo(skb)->nr_frags == 0) { | ||
471 | DBG_TX(et131x_dbginfo, "Scattered buffer: NO\n"); | ||
472 | } else { | ||
473 | DBG_TX(et131x_dbginfo, "Scattered buffer: YES, Num Frags: %d\n", | ||
474 | skb_shinfo(skb)->nr_frags); | ||
475 | } | ||
476 | |||
477 | /* All packets must have at least a MAC address and a protocol type */ | 420 | /* All packets must have at least a MAC address and a protocol type */ |
478 | if (skb->len < ETH_HLEN) { | 421 | if (skb->len < ETH_HLEN) { |
479 | DBG_ERROR(et131x_dbginfo, | ||
480 | "Packet size < ETH_HLEN (14 bytes)\n"); | ||
481 | DBG_LEAVE(et131x_dbginfo); | ||
482 | return -EIO; | 422 | return -EIO; |
483 | } | 423 | } |
484 | 424 | ||
@@ -489,9 +429,6 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
489 | 429 | ||
490 | if (pMpTcb == NULL) { | 430 | if (pMpTcb == NULL) { |
491 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 431 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
492 | |||
493 | DBG_WARNING(et131x_dbginfo, "Can't obtain a TCB\n"); | ||
494 | DBG_TX_LEAVE(et131x_dbginfo); | ||
495 | return -ENOMEM; | 432 | return -ENOMEM; |
496 | } | 433 | } |
497 | 434 | ||
@@ -533,16 +470,10 @@ static int et131x_send_packet(struct sk_buff *skb, | |||
533 | } | 470 | } |
534 | 471 | ||
535 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; | 472 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; |
536 | |||
537 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 473 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
538 | |||
539 | DBG_TX_LEAVE(et131x_dbginfo); | ||
540 | return status; | 474 | return status; |
541 | } | 475 | } |
542 | 476 | WARN_ON(etdev->TxRing.nBusySend > NUM_TCB); | |
543 | DBG_ASSERT(etdev->TxRing.nBusySend <= NUM_TCB); | ||
544 | |||
545 | DBG_TX_LEAVE(et131x_dbginfo); | ||
546 | return 0; | 477 | return 0; |
547 | } | 478 | } |
548 | 479 | ||
@@ -564,8 +495,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
564 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; | 495 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; |
565 | unsigned long flags; | 496 | unsigned long flags; |
566 | 497 | ||
567 | DBG_TX_ENTER(et131x_dbginfo); | ||
568 | |||
569 | /* Part of the optimizations of this send routine restrict us to | 498 | /* Part of the optimizations of this send routine restrict us to |
570 | * sending 24 fragments at a pass. In practice we should never see | 499 | * sending 24 fragments at a pass. In practice we should never see |
571 | * more than 5 fragments. | 500 | * more than 5 fragments. |
@@ -575,7 +504,6 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
575 | * although it is less efficient. | 504 | * although it is less efficient. |
576 | */ | 505 | */ |
577 | if (FragListCount > 23) { | 506 | if (FragListCount > 23) { |
578 | DBG_TX_LEAVE(et131x_dbginfo); | ||
579 | return -EIO; | 507 | return -EIO; |
580 | } | 508 | } |
581 | 509 | ||
@@ -596,15 +524,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
596 | * doesn't seem to like large fragments. | 524 | * doesn't seem to like large fragments. |
597 | */ | 525 | */ |
598 | if ((pPacket->len - pPacket->data_len) <= 1514) { | 526 | if ((pPacket->len - pPacket->data_len) <= 1514) { |
599 | DBG_TX(et131x_dbginfo, | ||
600 | "Got packet of length %d, " | ||
601 | "filling desc entry %d, " | ||
602 | "TCB: 0x%p\n", | ||
603 | (pPacket->len - pPacket->data_len), | ||
604 | etdev->TxRing.txDmaReadyToSend, pMpTcb); | ||
605 | |||
606 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 527 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
607 | |||
608 | CurDesc[FragmentNumber].word2.bits. | 528 | CurDesc[FragmentNumber].word2.bits. |
609 | length_in_bytes = | 529 | length_in_bytes = |
610 | pPacket->len - pPacket->data_len; | 530 | pPacket->len - pPacket->data_len; |
@@ -624,15 +544,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
624 | pPacket->data_len, | 544 | pPacket->data_len, |
625 | PCI_DMA_TODEVICE); | 545 | PCI_DMA_TODEVICE); |
626 | } else { | 546 | } else { |
627 | DBG_TX(et131x_dbginfo, | ||
628 | "Got packet of length %d, " | ||
629 | "filling desc entry %d, " | ||
630 | "TCB: 0x%p\n", | ||
631 | (pPacket->len - pPacket->data_len), | ||
632 | etdev->TxRing.txDmaReadyToSend, pMpTcb); | ||
633 | |||
634 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 547 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
635 | |||
636 | CurDesc[FragmentNumber].word2.bits. | 548 | CurDesc[FragmentNumber].word2.bits. |
637 | length_in_bytes = | 549 | length_in_bytes = |
638 | ((pPacket->len - pPacket->data_len) / 2); | 550 | ((pPacket->len - pPacket->data_len) / 2); |
@@ -675,16 +587,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
675 | PCI_DMA_TODEVICE); | 587 | PCI_DMA_TODEVICE); |
676 | } | 588 | } |
677 | } else { | 589 | } else { |
678 | DBG_TX(et131x_dbginfo, | ||
679 | "Got packet of length %d," | ||
680 | "filling desc entry %d\n" | ||
681 | "TCB: 0x%p\n", | ||
682 | pFragList[loopIndex].size, | ||
683 | etdev->TxRing.txDmaReadyToSend, | ||
684 | pMpTcb); | ||
685 | |||
686 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; | 590 | CurDesc[FragmentNumber].DataBufferPtrHigh = 0; |
687 | |||
688 | CurDesc[FragmentNumber].word2.bits.length_in_bytes = | 591 | CurDesc[FragmentNumber].word2.bits.length_in_bytes = |
689 | pFragList[loopIndex - 1].size; | 592 | pFragList[loopIndex - 1].size; |
690 | 593 | ||
@@ -703,10 +606,8 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
703 | } | 606 | } |
704 | } | 607 | } |
705 | 608 | ||
706 | if (FragmentNumber == 0) { | 609 | if (FragmentNumber == 0) |
707 | DBG_WARNING(et131x_dbginfo, "No. frags is 0\n"); | ||
708 | return -EIO; | 610 | return -EIO; |
709 | } | ||
710 | 611 | ||
711 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | 612 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { |
712 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt == | 613 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt == |
@@ -774,7 +675,7 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
774 | 675 | ||
775 | etdev->TxRing.CurrSendTail = pMpTcb; | 676 | etdev->TxRing.CurrSendTail = pMpTcb; |
776 | 677 | ||
777 | DBG_ASSERT(pMpTcb->Next == NULL); | 678 | WARN_ON(pMpTcb->Next != NULL); |
778 | 679 | ||
779 | etdev->TxRing.nBusySend++; | 680 | etdev->TxRing.nBusySend++; |
780 | 681 | ||
@@ -791,432 +692,11 @@ static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | |||
791 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | 692 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, |
792 | &etdev->regs->global.watchdog_timer); | 693 | &etdev->regs->global.watchdog_timer); |
793 | } | 694 | } |
794 | |||
795 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | 695 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); |
796 | 696 | ||
797 | DBG_TX_LEAVE(et131x_dbginfo); | ||
798 | return 0; | 697 | return 0; |
799 | } | 698 | } |
800 | 699 | ||
801 | /* | ||
802 | * NOTE: For now, keep this older version of NICSendPacket around for | ||
803 | * reference, even though it's not used | ||
804 | */ | ||
805 | #if 0 | ||
806 | |||
807 | /** | ||
808 | * NICSendPacket - NIC specific send handler. | ||
809 | * @etdev: pointer to our adapter | ||
810 | * @pMpTcb: pointer to MP_TCB | ||
811 | * | ||
812 | * Returns 0 on succes, errno on failure. | ||
813 | * | ||
814 | * This version of the send routine is designed for version A silicon. | ||
815 | * Assumption - Send spinlock has been acquired. | ||
816 | */ | ||
817 | static int nic_send_packet(struct et131x_adapter *etdev, PMP_TCB pMpTcb) | ||
818 | { | ||
819 | uint32_t loopIndex, fragIndex, loopEnd; | ||
820 | uint32_t splitfirstelem = 0; | ||
821 | uint32_t SegmentSize = 0; | ||
822 | TX_DESC_ENTRY_t CurDesc; | ||
823 | TX_DESC_ENTRY_t *CurDescPostCopy = NULL; | ||
824 | uint32_t SlotsAvailable; | ||
825 | DMA10W_t ServiceComplete; | ||
826 | unsigned int flags; | ||
827 | struct sk_buff *pPacket = pMpTcb->Packet; | ||
828 | uint32_t FragListCount = skb_shinfo(pPacket)->nr_frags + 1; | ||
829 | struct skb_frag_struct *pFragList = &skb_shinfo(pPacket)->frags[0]; | ||
830 | |||
831 | DBG_TX_ENTER(et131x_dbginfo); | ||
832 | |||
833 | ServiceComplete.value = | ||
834 | readl(&etdev->regs->txdma.NewServiceComplete.value); | ||
835 | |||
836 | /* | ||
837 | * Attempt to fix TWO hardware bugs: | ||
838 | * 1) NEVER write an odd number of descriptors. | ||
839 | * 2) If packet length is less than NIC_MIN_PACKET_SIZE, then pad the | ||
840 | * packet to NIC_MIN_PACKET_SIZE bytes by adding a new last | ||
841 | * descriptor IN HALF DUPLEX MODE ONLY | ||
842 | * NOTE that (2) interacts with (1). If the packet is less than | ||
843 | * NIC_MIN_PACKET_SIZE bytes then we will append a descriptor. | ||
844 | * Therefore if it is even now, it will eventually end up odd, and | ||
845 | * so will need adjusting. | ||
846 | * | ||
847 | * VLAN tags get involved since VLAN tags add another one or two | ||
848 | * segments. | ||
849 | */ | ||
850 | DBG_TX(et131x_dbginfo, | ||
851 | "pMpTcb->PacketLength: %d\n", pMpTcb->PacketLength); | ||
852 | |||
853 | if ((etdev->duplex_mode == 0) | ||
854 | && (pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE)) { | ||
855 | DBG_TX(et131x_dbginfo, | ||
856 | "HALF DUPLEX mode AND len < MIN_PKT_SIZE\n"); | ||
857 | if ((FragListCount & 0x1) == 0) { | ||
858 | DBG_TX(et131x_dbginfo, | ||
859 | "Even number of descs, split 1st elem\n"); | ||
860 | splitfirstelem = 1; | ||
861 | /* SegmentSize = pFragList[0].size / 2; */ | ||
862 | SegmentSize = (pPacket->len - pPacket->data_len) / 2; | ||
863 | } | ||
864 | } else if (FragListCount & 0x1) { | ||
865 | DBG_TX(et131x_dbginfo, "Odd number of descs, split 1st elem\n"); | ||
866 | |||
867 | splitfirstelem = 1; | ||
868 | /* SegmentSize = pFragList[0].size / 2; */ | ||
869 | SegmentSize = (pPacket->len - pPacket->data_len) / 2; | ||
870 | } | ||
871 | |||
872 | spin_lock_irqsave(&etdev->SendHWLock, flags); | ||
873 | |||
874 | if (etdev->TxRing.txDmaReadyToSend.bits.serv_req_wrap == | ||
875 | ServiceComplete.bits.serv_cpl_wrap) { | ||
876 | /* The ring hasn't wrapped. Slots available should be | ||
877 | * (RING_SIZE) - the difference between the two pointers. | ||
878 | */ | ||
879 | SlotsAvailable = NUM_DESC_PER_RING_TX - | ||
880 | (etdev->TxRing.txDmaReadyToSend.bits.serv_req - | ||
881 | ServiceComplete.bits.serv_cpl); | ||
882 | } else { | ||
883 | /* The ring has wrapped. Slots available should be the | ||
884 | * difference between the two pointers. | ||
885 | */ | ||
886 | SlotsAvailable = ServiceComplete.bits.serv_cpl - | ||
887 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
888 | } | ||
889 | |||
890 | if ((FragListCount + splitfirstelem) > SlotsAvailable) { | ||
891 | DBG_WARNING(et131x_dbginfo, | ||
892 | "Not Enough Space in Tx Desc Ring\n"); | ||
893 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | ||
894 | return -ENOMEM; | ||
895 | } | ||
896 | |||
897 | loopEnd = (FragListCount) + splitfirstelem; | ||
898 | fragIndex = 0; | ||
899 | |||
900 | DBG_TX(et131x_dbginfo, | ||
901 | "TCB : 0x%p\n" | ||
902 | "Packet (SKB) : 0x%p\t Packet->len: %d\t Packet->data_len: %d\n" | ||
903 | "FragListCount : %d\t splitfirstelem: %d\t loopEnd:%d\n", | ||
904 | pMpTcb, | ||
905 | pPacket, pPacket->len, pPacket->data_len, | ||
906 | FragListCount, splitfirstelem, loopEnd); | ||
907 | |||
908 | for (loopIndex = 0; loopIndex < loopEnd; loopIndex++) { | ||
909 | if (loopIndex > splitfirstelem) | ||
910 | fragIndex++; | ||
911 | |||
912 | DBG_TX(et131x_dbginfo, | ||
913 | "In loop, loopIndex: %d\t fragIndex: %d\n", loopIndex, | ||
914 | fragIndex); | ||
915 | |||
916 | /* If there is something in this element, let's get a | ||
917 | * descriptor from the ring and get the necessary data | ||
918 | */ | ||
919 | DBG_TX(et131x_dbginfo, | ||
920 | "Packet Length %d," | ||
921 | "filling desc entry %d\n", | ||
922 | pPacket->len, | ||
923 | etdev->TxRing.txDmaReadyToSend.bits.serv_req); | ||
924 | |||
925 | /* | ||
926 | * NOTE - Should we do a paranoia check here to make sure the fragment | ||
927 | * actually has a length? It's HIGHLY unlikely the fragment would | ||
928 | * contain no data... | ||
929 | */ | ||
930 | if (1) { | ||
931 | /* NOTE - Currently always getting 32-bit addrs, and | ||
932 | * dma_addr_t is only 32-bit, so leave "high" ptr | ||
933 | * value out for now | ||
934 | * CurDesc.DataBufferPtrHigh = 0; | ||
935 | */ | ||
936 | |||
937 | CurDesc.word2.value = 0; | ||
938 | CurDesc.word3.value = 0; | ||
939 | |||
940 | if (fragIndex == 0) { | ||
941 | if (splitfirstelem) { | ||
942 | DBG_TX(et131x_dbginfo, | ||
943 | "Split first element: YES\n"); | ||
944 | |||
945 | if (loopIndex == 0) { | ||
946 | DBG_TX(et131x_dbginfo, | ||
947 | "Got fragment of length %d, fragIndex: %d\n", | ||
948 | pPacket->len - | ||
949 | pPacket->data_len, | ||
950 | fragIndex); | ||
951 | DBG_TX(et131x_dbginfo, | ||
952 | "SegmentSize: %d\n", | ||
953 | SegmentSize); | ||
954 | |||
955 | CurDesc.word2.bits. | ||
956 | length_in_bytes = | ||
957 | SegmentSize; | ||
958 | CurDesc.DataBufferPtrLow = | ||
959 | pci_map_single(etdev-> | ||
960 | pdev, | ||
961 | pPacket-> | ||
962 | data, | ||
963 | SegmentSize, | ||
964 | PCI_DMA_TODEVICE); | ||
965 | DBG_TX(et131x_dbginfo, | ||
966 | "pci_map_single() returns: 0x%08x\n", | ||
967 | CurDesc. | ||
968 | DataBufferPtrLow); | ||
969 | } else { | ||
970 | DBG_TX(et131x_dbginfo, | ||
971 | "Got fragment of length %d, fragIndex: %d\n", | ||
972 | pPacket->len - | ||
973 | pPacket->data_len, | ||
974 | fragIndex); | ||
975 | DBG_TX(et131x_dbginfo, | ||
976 | "Leftover Size: %d\n", | ||
977 | (pPacket->len - | ||
978 | pPacket->data_len - | ||
979 | SegmentSize)); | ||
980 | |||
981 | CurDesc.word2.bits. | ||
982 | length_in_bytes = | ||
983 | ((pPacket->len - | ||
984 | pPacket->data_len) - | ||
985 | SegmentSize); | ||
986 | CurDesc.DataBufferPtrLow = | ||
987 | pci_map_single(etdev-> | ||
988 | pdev, | ||
989 | (pPacket-> | ||
990 | data + | ||
991 | SegmentSize), | ||
992 | (pPacket-> | ||
993 | len - | ||
994 | pPacket-> | ||
995 | data_len - | ||
996 | SegmentSize), | ||
997 | PCI_DMA_TODEVICE); | ||
998 | DBG_TX(et131x_dbginfo, | ||
999 | "pci_map_single() returns: 0x%08x\n", | ||
1000 | CurDesc. | ||
1001 | DataBufferPtrLow); | ||
1002 | } | ||
1003 | } else { | ||
1004 | DBG_TX(et131x_dbginfo, | ||
1005 | "Split first element: NO\n"); | ||
1006 | |||
1007 | CurDesc.word2.bits.length_in_bytes = | ||
1008 | pPacket->len - pPacket->data_len; | ||
1009 | |||
1010 | CurDesc.DataBufferPtrLow = | ||
1011 | pci_map_single(etdev->pdev, | ||
1012 | pPacket->data, | ||
1013 | (pPacket->len - | ||
1014 | pPacket->data_len), | ||
1015 | PCI_DMA_TODEVICE); | ||
1016 | DBG_TX(et131x_dbginfo, | ||
1017 | "pci_map_single() returns: 0x%08x\n", | ||
1018 | CurDesc.DataBufferPtrLow); | ||
1019 | } | ||
1020 | } else { | ||
1021 | |||
1022 | CurDesc.word2.bits.length_in_bytes = | ||
1023 | pFragList[fragIndex - 1].size; | ||
1024 | CurDesc.DataBufferPtrLow = | ||
1025 | pci_map_page(etdev->pdev, | ||
1026 | pFragList[fragIndex - 1].page, | ||
1027 | pFragList[fragIndex - | ||
1028 | 1].page_offset, | ||
1029 | pFragList[fragIndex - 1].size, | ||
1030 | PCI_DMA_TODEVICE); | ||
1031 | DBG_TX(et131x_dbginfo, | ||
1032 | "pci_map_page() returns: 0x%08x\n", | ||
1033 | CurDesc.DataBufferPtrLow); | ||
1034 | } | ||
1035 | |||
1036 | if (loopIndex == 0) { | ||
1037 | /* This is the first descriptor of the packet | ||
1038 | * | ||
1039 | * Set the "f" bit to indicate this is the | ||
1040 | * first descriptor in the packet. | ||
1041 | */ | ||
1042 | DBG_TX(et131x_dbginfo, | ||
1043 | "This is our FIRST descriptor\n"); | ||
1044 | CurDesc.word3.bits.f = 1; | ||
1045 | |||
1046 | pMpTcb->WrIndexStart = | ||
1047 | etdev->TxRing.txDmaReadyToSend; | ||
1048 | } | ||
1049 | |||
1050 | if ((loopIndex == (loopEnd - 1)) && | ||
1051 | (etdev->duplex_mode || | ||
1052 | (pMpTcb->PacketLength >= NIC_MIN_PACKET_SIZE))) { | ||
1053 | /* This is the Last descriptor of the packet */ | ||
1054 | DBG_TX(et131x_dbginfo, | ||
1055 | "THIS is our LAST descriptor\n"); | ||
1056 | |||
1057 | if (etdev->linkspeed == | ||
1058 | TRUEPHY_SPEED_1000MBPS) { | ||
1059 | if (++etdev->TxRing. | ||
1060 | TxPacketsSinceLastinterrupt >= | ||
1061 | PARM_TX_NUM_BUFS_DEF) { | ||
1062 | CurDesc.word3.value = 0x5; | ||
1063 | etdev->TxRing. | ||
1064 | TxPacketsSinceLastinterrupt | ||
1065 | = 0; | ||
1066 | } else { | ||
1067 | CurDesc.word3.value = 0x1; | ||
1068 | } | ||
1069 | } else { | ||
1070 | CurDesc.word3.value = 0x5; | ||
1071 | } | ||
1072 | |||
1073 | /* Following index will be used during freeing | ||
1074 | * of packet | ||
1075 | */ | ||
1076 | pMpTcb->WrIndex = | ||
1077 | etdev->TxRing.txDmaReadyToSend; | ||
1078 | pMpTcb->PacketStaleCount = 0; | ||
1079 | } | ||
1080 | |||
1081 | /* Copy the descriptor (filled above) into the | ||
1082 | * descriptor ring at the next free entry. Advance | ||
1083 | * the "next free entry" variable | ||
1084 | */ | ||
1085 | memcpy(etdev->TxRing.pTxDescRingVa + | ||
1086 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1087 | &CurDesc, sizeof(TX_DESC_ENTRY_t)); | ||
1088 | |||
1089 | CurDescPostCopy = | ||
1090 | etdev->TxRing.pTxDescRingVa + | ||
1091 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
1092 | |||
1093 | DBG_TX(et131x_dbginfo, | ||
1094 | "CURRENT DESCRIPTOR\n" | ||
1095 | "\tAddress : 0x%p\n" | ||
1096 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1097 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1098 | "\tword2 : 0x%08x\n" | ||
1099 | "\tword3 : 0x%08x\n", | ||
1100 | CurDescPostCopy, | ||
1101 | CurDescPostCopy->DataBufferPtrHigh, | ||
1102 | CurDescPostCopy->DataBufferPtrLow, | ||
1103 | CurDescPostCopy->word2.value, | ||
1104 | CurDescPostCopy->word3.value); | ||
1105 | |||
1106 | if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >= | ||
1107 | NUM_DESC_PER_RING_TX) { | ||
1108 | if (etdev->TxRing.txDmaReadyToSend.bits. | ||
1109 | serv_req_wrap) { | ||
1110 | etdev->TxRing.txDmaReadyToSend. | ||
1111 | value = 0; | ||
1112 | } else { | ||
1113 | etdev->TxRing.txDmaReadyToSend. | ||
1114 | value = 0x400; | ||
1115 | } | ||
1116 | } | ||
1117 | } | ||
1118 | } | ||
1119 | |||
1120 | if (etdev->duplex_mode == 0 && | ||
1121 | pMpTcb->PacketLength < NIC_MIN_PACKET_SIZE) { | ||
1122 | /* NOTE - Same 32/64-bit issue as above... */ | ||
1123 | CurDesc.DataBufferPtrHigh = 0x0; | ||
1124 | CurDesc.DataBufferPtrLow = etdev->TxRing.pTxDummyBlkPa; | ||
1125 | CurDesc.word2.value = 0; | ||
1126 | |||
1127 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | ||
1128 | if (++etdev->TxRing.TxPacketsSinceLastinterrupt >= | ||
1129 | PARM_TX_NUM_BUFS_DEF) { | ||
1130 | CurDesc.word3.value = 0x5; | ||
1131 | etdev->TxRing.TxPacketsSinceLastinterrupt = | ||
1132 | 0; | ||
1133 | } else { | ||
1134 | CurDesc.word3.value = 0x1; | ||
1135 | } | ||
1136 | } else { | ||
1137 | CurDesc.word3.value = 0x5; | ||
1138 | } | ||
1139 | |||
1140 | CurDesc.word2.bits.length_in_bytes = | ||
1141 | NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength; | ||
1142 | |||
1143 | pMpTcb->WrIndex = etdev->TxRing.txDmaReadyToSend; | ||
1144 | |||
1145 | memcpy(etdev->TxRing.pTxDescRingVa + | ||
1146 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1147 | &CurDesc, sizeof(TX_DESC_ENTRY_t)); | ||
1148 | |||
1149 | CurDescPostCopy = | ||
1150 | etdev->TxRing.pTxDescRingVa + | ||
1151 | etdev->TxRing.txDmaReadyToSend.bits.serv_req; | ||
1152 | |||
1153 | DBG_TX(et131x_dbginfo, | ||
1154 | "CURRENT DESCRIPTOR\n" | ||
1155 | "\tAddress : 0x%p\n" | ||
1156 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1157 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1158 | "\tword2 : 0x%08x\n" | ||
1159 | "\tword3 : 0x%08x\n", | ||
1160 | CurDescPostCopy, | ||
1161 | CurDescPostCopy->DataBufferPtrHigh, | ||
1162 | CurDescPostCopy->DataBufferPtrLow, | ||
1163 | CurDescPostCopy->word2.value, | ||
1164 | CurDescPostCopy->word3.value); | ||
1165 | |||
1166 | if (++etdev->TxRing.txDmaReadyToSend.bits.serv_req >= | ||
1167 | NUM_DESC_PER_RING_TX) { | ||
1168 | if (etdev->TxRing.txDmaReadyToSend.bits. | ||
1169 | serv_req_wrap) { | ||
1170 | etdev->TxRing.txDmaReadyToSend.value = 0; | ||
1171 | } else { | ||
1172 | etdev->TxRing.txDmaReadyToSend.value = 0x400; | ||
1173 | } | ||
1174 | } | ||
1175 | |||
1176 | DBG_TX(et131x_dbginfo, "Padding descriptor %d by %d bytes\n", | ||
1177 | /* etdev->TxRing.txDmaReadyToSend.value, */ | ||
1178 | etdev->TxRing.txDmaReadyToSend.bits.serv_req, | ||
1179 | NIC_MIN_PACKET_SIZE - pMpTcb->PacketLength); | ||
1180 | } | ||
1181 | |||
1182 | spin_lock(&etdev->TCBSendQLock); | ||
1183 | |||
1184 | if (etdev->TxRing.CurrSendTail) | ||
1185 | etdev->TxRing.CurrSendTail->Next = pMpTcb; | ||
1186 | else | ||
1187 | etdev->TxRing.CurrSendHead = pMpTcb; | ||
1188 | |||
1189 | etdev->TxRing.CurrSendTail = pMpTcb; | ||
1190 | |||
1191 | DBG_ASSERT(pMpTcb->Next == NULL); | ||
1192 | |||
1193 | etdev->TxRing.nBusySend++; | ||
1194 | |||
1195 | spin_unlock(&etdev->TCBSendQLock); | ||
1196 | |||
1197 | /* Write the new write pointer back to the device. */ | ||
1198 | writel(etdev->TxRing.txDmaReadyToSend.value, | ||
1199 | &etdev->regs->txdma.service_request.value); | ||
1200 | |||
1201 | #ifdef CONFIG_ET131X_DEBUG | ||
1202 | DumpDeviceBlock(DBG_TX_ON, etdev, 1); | ||
1203 | #endif | ||
1204 | |||
1205 | /* For Gig only, we use Tx Interrupt coalescing. Enable the software | ||
1206 | * timer to wake us up if this packet isn't followed by N more. | ||
1207 | */ | ||
1208 | if (etdev->linkspeed == TRUEPHY_SPEED_1000MBPS) { | ||
1209 | writel(PARM_TX_TIME_INT_DEF * NANO_IN_A_MICRO, | ||
1210 | &etdev->regs->global.watchdog_timer); | ||
1211 | } | ||
1212 | |||
1213 | spin_unlock_irqrestore(&etdev->SendHWLock, flags); | ||
1214 | |||
1215 | DBG_TX_LEAVE(et131x_dbginfo); | ||
1216 | return 0; | ||
1217 | } | ||
1218 | |||
1219 | #endif | ||
1220 | 700 | ||
1221 | /** | 701 | /** |
1222 | * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary | 702 | * et131x_free_send_packet - Recycle a MP_TCB, complete the packet if necessary |
@@ -1246,37 +726,11 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1246 | * corresponding to this packet and umap the fragments | 726 | * corresponding to this packet and umap the fragments |
1247 | * they point to | 727 | * they point to |
1248 | */ | 728 | */ |
1249 | DBG_TX(et131x_dbginfo, | ||
1250 | "Unmap descriptors Here\n" | ||
1251 | "TCB : 0x%p\n" | ||
1252 | "TCB Next : 0x%p\n" | ||
1253 | "TCB PacketLength : %d\n" | ||
1254 | "TCB WrIndexS.value : 0x%08x\n" | ||
1255 | "TCB WrIndex.value : 0x%08x\n", | ||
1256 | pMpTcb, | ||
1257 | pMpTcb->Next, | ||
1258 | pMpTcb->PacketLength, | ||
1259 | pMpTcb->WrIndexStart, | ||
1260 | pMpTcb->WrIndex); | ||
1261 | |||
1262 | do { | 729 | do { |
1263 | desc = | 730 | desc = |
1264 | (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa + | 731 | (TX_DESC_ENTRY_t *) (etdev->TxRing.pTxDescRingVa + |
1265 | INDEX10(pMpTcb->WrIndexStart)); | 732 | INDEX10(pMpTcb->WrIndexStart)); |
1266 | 733 | ||
1267 | DBG_TX(et131x_dbginfo, | ||
1268 | "CURRENT DESCRIPTOR\n" | ||
1269 | "\tAddress : 0x%p\n" | ||
1270 | "\tDataBufferPtrHigh : 0x%08x\n" | ||
1271 | "\tDataBufferPtrLow : 0x%08x\n" | ||
1272 | "\tword2 : 0x%08x\n" | ||
1273 | "\tword3 : 0x%08x\n", | ||
1274 | desc, | ||
1275 | desc->DataBufferPtrHigh, | ||
1276 | desc->DataBufferPtrLow, | ||
1277 | desc->word2.value, | ||
1278 | desc->word3.value); | ||
1279 | |||
1280 | pci_unmap_single(etdev->pdev, | 734 | pci_unmap_single(etdev->pdev, |
1281 | desc->DataBufferPtrLow, | 735 | desc->DataBufferPtrLow, |
1282 | desc->word2.value, PCI_DMA_TODEVICE); | 736 | desc->word2.value, PCI_DMA_TODEVICE); |
@@ -1290,9 +744,6 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1290 | } while (desc != (etdev->TxRing.pTxDescRingVa + | 744 | } while (desc != (etdev->TxRing.pTxDescRingVa + |
1291 | INDEX10(pMpTcb->WrIndex))); | 745 | INDEX10(pMpTcb->WrIndex))); |
1292 | 746 | ||
1293 | DBG_TX(et131x_dbginfo, | ||
1294 | "Free Packet (SKB) : 0x%p\n", pMpTcb->Packet); | ||
1295 | |||
1296 | dev_kfree_skb_any(pMpTcb->Packet); | 747 | dev_kfree_skb_any(pMpTcb->Packet); |
1297 | } | 748 | } |
1298 | 749 | ||
@@ -1313,8 +764,7 @@ inline void et131x_free_send_packet(struct et131x_adapter *etdev, | |||
1313 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; | 764 | etdev->TxRing.TCBReadyQueueTail = pMpTcb; |
1314 | 765 | ||
1315 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); | 766 | spin_unlock_irqrestore(&etdev->TCBReadyQLock, flags); |
1316 | 767 | WARN_ON(etdev->TxRing.nBusySend < 0); | |
1317 | DBG_ASSERT(etdev->TxRing.nBusySend >= 0); | ||
1318 | } | 768 | } |
1319 | 769 | ||
1320 | /** | 770 | /** |
@@ -1330,8 +780,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1330 | unsigned long flags; | 780 | unsigned long flags; |
1331 | uint32_t FreeCounter = 0; | 781 | uint32_t FreeCounter = 0; |
1332 | 782 | ||
1333 | DBG_ENTER(et131x_dbginfo); | ||
1334 | |||
1335 | while (!list_empty(&etdev->TxRing.SendWaitQueue)) { | 783 | while (!list_empty(&etdev->TxRing.SendWaitQueue)) { |
1336 | spin_lock_irqsave(&etdev->SendWaitLock, flags); | 784 | spin_lock_irqsave(&etdev->SendWaitLock, flags); |
1337 | 785 | ||
@@ -1360,8 +808,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1360 | 808 | ||
1361 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); | 809 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
1362 | 810 | ||
1363 | DBG_VERBOSE(et131x_dbginfo, "pMpTcb = 0x%p\n", pMpTcb); | ||
1364 | |||
1365 | FreeCounter++; | 811 | FreeCounter++; |
1366 | et131x_free_send_packet(etdev, pMpTcb); | 812 | et131x_free_send_packet(etdev, pMpTcb); |
1367 | 813 | ||
@@ -1370,17 +816,11 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1370 | pMpTcb = etdev->TxRing.CurrSendHead; | 816 | pMpTcb = etdev->TxRing.CurrSendHead; |
1371 | } | 817 | } |
1372 | 818 | ||
1373 | if (FreeCounter == NUM_TCB) { | 819 | WARN_ON(FreeCounter == NUM_TCB); |
1374 | DBG_ERROR(et131x_dbginfo, | ||
1375 | "MpFreeBusySendPackets exited loop for a bad reason\n"); | ||
1376 | BUG(); | ||
1377 | } | ||
1378 | 820 | ||
1379 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); | 821 | spin_unlock_irqrestore(&etdev->TCBSendQLock, flags); |
1380 | 822 | ||
1381 | etdev->TxRing.nBusySend = 0; | 823 | etdev->TxRing.nBusySend = 0; |
1382 | |||
1383 | DBG_LEAVE(et131x_dbginfo); | ||
1384 | } | 824 | } |
1385 | 825 | ||
1386 | /** | 826 | /** |
@@ -1394,8 +834,6 @@ void et131x_free_busy_send_packets(struct et131x_adapter *etdev) | |||
1394 | */ | 834 | */ |
1395 | void et131x_handle_send_interrupt(struct et131x_adapter *etdev) | 835 | void et131x_handle_send_interrupt(struct et131x_adapter *etdev) |
1396 | { | 836 | { |
1397 | DBG_TX_ENTER(et131x_dbginfo); | ||
1398 | |||
1399 | /* Mark as completed any packets which have been sent by the device. */ | 837 | /* Mark as completed any packets which have been sent by the device. */ |
1400 | et131x_update_tcb_list(etdev); | 838 | et131x_update_tcb_list(etdev); |
1401 | 839 | ||
@@ -1403,8 +841,6 @@ void et131x_handle_send_interrupt(struct et131x_adapter *etdev) | |||
1403 | * dequeue and send those packets now, as long as we have free TCBs. | 841 | * dequeue and send those packets now, as long as we have free TCBs. |
1404 | */ | 842 | */ |
1405 | et131x_check_send_wait_list(etdev); | 843 | et131x_check_send_wait_list(etdev); |
1406 | |||
1407 | DBG_TX_LEAVE(et131x_dbginfo); | ||
1408 | } | 844 | } |
1409 | 845 | ||
1410 | /** | 846 | /** |
@@ -1487,15 +923,9 @@ static void et131x_check_send_wait_list(struct et131x_adapter *etdev) | |||
1487 | MP_TCB_RESOURCES_AVAILABLE(etdev)) { | 923 | MP_TCB_RESOURCES_AVAILABLE(etdev)) { |
1488 | struct list_head *entry; | 924 | struct list_head *entry; |
1489 | 925 | ||
1490 | DBG_VERBOSE(et131x_dbginfo, "Tx packets on the wait queue\n"); | ||
1491 | |||
1492 | entry = etdev->TxRing.SendWaitQueue.next; | 926 | entry = etdev->TxRing.SendWaitQueue.next; |
1493 | 927 | ||
1494 | etdev->TxRing.nWaitSend--; | 928 | etdev->TxRing.nWaitSend--; |
1495 | |||
1496 | DBG_WARNING(et131x_dbginfo, | ||
1497 | "MpHandleSendInterrupt - sent a queued pkt. Waiting %d\n", | ||
1498 | etdev->TxRing.nWaitSend); | ||
1499 | } | 929 | } |
1500 | 930 | ||
1501 | spin_unlock_irqrestore(&etdev->SendWaitLock, flags); | 931 | spin_unlock_irqrestore(&etdev->SendWaitLock, flags); |