diff options
author | Rasesh Mody <rmody@brocade.com> | 2010-12-23 16:45:01 -0500 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-12-25 22:15:59 -0500 |
commit | be7fa3263a15d3f278c3bfbf606ec169aaa3a920 (patch) | |
tree | 758dca8183d4d5643dd23547bc470b38a15a8579 /drivers/net/bna | |
parent | e1928c86c4829703b800c81cc9edc939b5634e6f (diff) |
bna: TxRx and datapath fix
Change Details:
- Check HW ready condition before accessing h/w register in data-path
- Postpone clean-up of data buffers to the data-path restart path and
wait in the cleanup routines for in-flight DMA to complete
- Separate out Tx completion processing from Rx poll routine
Signed-off-by: Debashis Dutt <ddutt@brocade.com>
Signed-off-by: Rasesh Mody <rmody@brocade.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/bna')
-rw-r--r-- | drivers/net/bna/bnad.c | 353 | ||||
-rw-r--r-- | drivers/net/bna/bnad.h | 22 |
2 files changed, 178 insertions, 197 deletions
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c index 7e839b9cec22..3c405022d2bb 100644 --- a/drivers/net/bna/bnad.c +++ b/drivers/net/bna/bnad.c | |||
@@ -70,6 +70,8 @@ do { \ | |||
70 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ | 70 | (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ |
71 | } while (0) | 71 | } while (0) |
72 | 72 | ||
73 | #define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */ | ||
74 | |||
73 | /* | 75 | /* |
74 | * Reinitialize completions in CQ, once Rx is taken down | 76 | * Reinitialize completions in CQ, once Rx is taken down |
75 | */ | 77 | */ |
@@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad, | |||
130 | PCI_DMA_TODEVICE); | 132 | PCI_DMA_TODEVICE); |
131 | 133 | ||
132 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); | 134 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); |
133 | unmap_cons++; | 135 | if (++unmap_cons >= unmap_q->q_depth) |
136 | break; | ||
137 | |||
134 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 138 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
135 | pci_unmap_page(bnad->pcidev, | 139 | pci_unmap_page(bnad->pcidev, |
136 | pci_unmap_addr(&unmap_array[unmap_cons], | 140 | pci_unmap_addr(&unmap_array[unmap_cons], |
@@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad, | |||
139 | PCI_DMA_TODEVICE); | 143 | PCI_DMA_TODEVICE); |
140 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, | 144 | pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, |
141 | 0); | 145 | 0); |
142 | unmap_cons++; | 146 | if (++unmap_cons >= unmap_q->q_depth) |
147 | break; | ||
143 | } | 148 | } |
144 | dev_kfree_skb_any(skb); | 149 | dev_kfree_skb_any(skb); |
145 | } | 150 | } |
@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad, | |||
167 | /* | 172 | /* |
168 | * Just return if TX is stopped. This check is useful | 173 | * Just return if TX is stopped. This check is useful |
169 | * when bnad_free_txbufs() runs out of a tasklet scheduled | 174 | * when bnad_free_txbufs() runs out of a tasklet scheduled |
170 | * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit | 175 | * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit |
171 | * but this routine runs actually after the cleanup has been | 176 | * but this routine runs actually after the cleanup has been |
172 | * executed. | 177 | * executed. |
173 | */ | 178 | */ |
174 | if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) | 179 | if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
175 | return 0; | 180 | return 0; |
176 | 181 | ||
177 | updated_hw_cons = *(tcb->hw_consumer_index); | 182 | updated_hw_cons = *(tcb->hw_consumer_index); |
@@ -252,7 +257,9 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr) | |||
252 | (!test_and_set_bit(BNAD_TXQ_FREE_SENT, | 257 | (!test_and_set_bit(BNAD_TXQ_FREE_SENT, |
253 | &tcb->flags))) { | 258 | &tcb->flags))) { |
254 | acked = bnad_free_txbufs(bnad, tcb); | 259 | acked = bnad_free_txbufs(bnad, tcb); |
255 | bna_ib_ack(tcb->i_dbell, acked); | 260 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, |
261 | &tcb->flags))) | ||
262 | bna_ib_ack(tcb->i_dbell, acked); | ||
256 | smp_mb__before_clear_bit(); | 263 | smp_mb__before_clear_bit(); |
257 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 264 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
258 | } | 265 | } |
@@ -264,7 +271,7 @@ static u32 | |||
264 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | 271 | bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) |
265 | { | 272 | { |
266 | struct net_device *netdev = bnad->netdev; | 273 | struct net_device *netdev = bnad->netdev; |
267 | u32 sent; | 274 | u32 sent = 0; |
268 | 275 | ||
269 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 276 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) |
270 | return 0; | 277 | return 0; |
@@ -275,12 +282,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) | |||
275 | netif_carrier_ok(netdev) && | 282 | netif_carrier_ok(netdev) && |
276 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= | 283 | BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= |
277 | BNAD_NETIF_WAKE_THRESHOLD) { | 284 | BNAD_NETIF_WAKE_THRESHOLD) { |
278 | netif_wake_queue(netdev); | 285 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
279 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | 286 | netif_wake_queue(netdev); |
287 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
288 | } | ||
280 | } | 289 | } |
290 | } | ||
291 | |||
292 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | ||
281 | bna_ib_ack(tcb->i_dbell, sent); | 293 | bna_ib_ack(tcb->i_dbell, sent); |
282 | } else | ||
283 | bna_ib_ack(tcb->i_dbell, 0); | ||
284 | 294 | ||
285 | smp_mb__before_clear_bit(); | 295 | smp_mb__before_clear_bit(); |
286 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 296 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
@@ -313,25 +323,26 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb) | |||
313 | } | 323 | } |
314 | 324 | ||
315 | static void | 325 | static void |
316 | bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) | 326 | bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) |
317 | { | 327 | { |
318 | struct bnad_unmap_q *unmap_q; | 328 | struct bnad_unmap_q *unmap_q; |
319 | struct sk_buff *skb; | 329 | struct sk_buff *skb; |
330 | int unmap_cons; | ||
320 | 331 | ||
321 | unmap_q = rcb->unmap_q; | 332 | unmap_q = rcb->unmap_q; |
322 | while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { | 333 | for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) { |
323 | skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; | 334 | skb = unmap_q->unmap_array[unmap_cons].skb; |
324 | BUG_ON(!(skb)); | 335 | if (!skb) |
325 | unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; | 336 | continue; |
337 | BUG_ON(!(pci_unmap_addr( | ||
338 | &unmap_q->unmap_array[unmap_cons], dma_addr))); | ||
339 | unmap_q->unmap_array[unmap_cons].skb = NULL; | ||
326 | pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> | 340 | pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> |
327 | unmap_array[unmap_q->consumer_index], | 341 | unmap_array[unmap_cons], |
328 | dma_addr), rcb->rxq->buffer_size + | 342 | dma_addr), rcb->rxq->buffer_size, |
329 | NET_IP_ALIGN, PCI_DMA_FROMDEVICE); | 343 | PCI_DMA_FROMDEVICE); |
330 | dev_kfree_skb(skb); | 344 | dev_kfree_skb(skb); |
331 | BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth); | ||
332 | BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth); | ||
333 | } | 345 | } |
334 | |||
335 | bnad_reset_rcb(bnad, rcb); | 346 | bnad_reset_rcb(bnad, rcb); |
336 | } | 347 | } |
337 | 348 | ||
@@ -385,41 +396,9 @@ finishing: | |||
385 | unmap_q->producer_index = unmap_prod; | 396 | unmap_q->producer_index = unmap_prod; |
386 | rcb->producer_index = unmap_prod; | 397 | rcb->producer_index = unmap_prod; |
387 | smp_mb(); | 398 | smp_mb(); |
388 | bna_rxq_prod_indx_doorbell(rcb); | 399 | if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags))) |
389 | } | 400 | bna_rxq_prod_indx_doorbell(rcb); |
390 | } | ||
391 | |||
392 | /* | ||
393 | * Locking is required in the enable path | ||
394 | * because it is called from a napi poll | ||
395 | * context, where the bna_lock is not held | ||
396 | * unlike the IRQ context. | ||
397 | */ | ||
398 | static void | ||
399 | bnad_enable_txrx_irqs(struct bnad *bnad) | ||
400 | { | ||
401 | struct bna_tcb *tcb; | ||
402 | struct bna_ccb *ccb; | ||
403 | int i, j; | ||
404 | unsigned long flags; | ||
405 | |||
406 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
407 | for (i = 0; i < bnad->num_tx; i++) { | ||
408 | for (j = 0; j < bnad->num_txq_per_tx; j++) { | ||
409 | tcb = bnad->tx_info[i].tcb[j]; | ||
410 | bna_ib_coalescing_timer_set(tcb->i_dbell, | ||
411 | tcb->txq->ib->ib_config.coalescing_timeo); | ||
412 | bna_ib_ack(tcb->i_dbell, 0); | ||
413 | } | ||
414 | } | ||
415 | |||
416 | for (i = 0; i < bnad->num_rx; i++) { | ||
417 | for (j = 0; j < bnad->num_rxp_per_rx; j++) { | ||
418 | ccb = bnad->rx_info[i].rx_ctrl[j].ccb; | ||
419 | bnad_enable_rx_irq_unsafe(ccb); | ||
420 | } | ||
421 | } | 401 | } |
422 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
423 | } | 402 | } |
424 | 403 | ||
425 | static inline void | 404 | static inline void |
@@ -448,6 +427,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget) | |||
448 | u32 qid0 = ccb->rcb[0]->rxq->rxq_id; | 427 | u32 qid0 = ccb->rcb[0]->rxq->rxq_id; |
449 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; | 428 | struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; |
450 | 429 | ||
430 | if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)) | ||
431 | return 0; | ||
432 | |||
451 | prefetch(bnad->netdev); | 433 | prefetch(bnad->netdev); |
452 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, | 434 | BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, |
453 | wi_range); | 435 | wi_range); |
@@ -544,12 +526,15 @@ next: | |||
544 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); | 526 | BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); |
545 | 527 | ||
546 | if (likely(ccb)) { | 528 | if (likely(ccb)) { |
547 | bna_ib_ack(ccb->i_dbell, packets); | 529 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
530 | bna_ib_ack(ccb->i_dbell, packets); | ||
548 | bnad_refill_rxq(bnad, ccb->rcb[0]); | 531 | bnad_refill_rxq(bnad, ccb->rcb[0]); |
549 | if (ccb->rcb[1]) | 532 | if (ccb->rcb[1]) |
550 | bnad_refill_rxq(bnad, ccb->rcb[1]); | 533 | bnad_refill_rxq(bnad, ccb->rcb[1]); |
551 | } else | 534 | } else { |
552 | bna_ib_ack(ccb->i_dbell, 0); | 535 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) |
536 | bna_ib_ack(ccb->i_dbell, 0); | ||
537 | } | ||
553 | 538 | ||
554 | return packets; | 539 | return packets; |
555 | } | 540 | } |
@@ -557,6 +542,9 @@ next: | |||
557 | static void | 542 | static void |
558 | bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) | 543 | bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) |
559 | { | 544 | { |
545 | if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) | ||
546 | return; | ||
547 | |||
560 | bna_ib_coalescing_timer_set(ccb->i_dbell, 0); | 548 | bna_ib_coalescing_timer_set(ccb->i_dbell, 0); |
561 | bna_ib_ack(ccb->i_dbell, 0); | 549 | bna_ib_ack(ccb->i_dbell, 0); |
562 | } | 550 | } |
@@ -575,9 +563,11 @@ static void | |||
575 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) | 563 | bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) |
576 | { | 564 | { |
577 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); | 565 | struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); |
578 | if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { | 566 | struct napi_struct *napi = &rx_ctrl->napi; |
567 | |||
568 | if (likely(napi_schedule_prep(napi))) { | ||
579 | bnad_disable_rx_irq(bnad, ccb); | 569 | bnad_disable_rx_irq(bnad, ccb); |
580 | __napi_schedule((&rx_ctrl->napi)); | 570 | __napi_schedule(napi); |
581 | } | 571 | } |
582 | BNAD_UPDATE_CTR(bnad, netif_rx_schedule); | 572 | BNAD_UPDATE_CTR(bnad, netif_rx_schedule); |
583 | } | 573 | } |
@@ -602,12 +592,11 @@ bnad_msix_mbox_handler(int irq, void *data) | |||
602 | { | 592 | { |
603 | u32 intr_status; | 593 | u32 intr_status; |
604 | unsigned long flags; | 594 | unsigned long flags; |
605 | struct net_device *netdev = data; | 595 | struct bnad *bnad = (struct bnad *)data; |
606 | struct bnad *bnad; | ||
607 | 596 | ||
608 | bnad = netdev_priv(netdev); | 597 | if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))) |
598 | return IRQ_HANDLED; | ||
609 | 599 | ||
610 | /* BNA_ISR_GET(bnad); Inc Ref count */ | ||
611 | spin_lock_irqsave(&bnad->bna_lock, flags); | 600 | spin_lock_irqsave(&bnad->bna_lock, flags); |
612 | 601 | ||
613 | bna_intr_status_get(&bnad->bna, intr_status); | 602 | bna_intr_status_get(&bnad->bna, intr_status); |
@@ -617,7 +606,6 @@ bnad_msix_mbox_handler(int irq, void *data) | |||
617 | 606 | ||
618 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 607 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
619 | 608 | ||
620 | /* BNAD_ISR_PUT(bnad); Dec Ref count */ | ||
621 | return IRQ_HANDLED; | 609 | return IRQ_HANDLED; |
622 | } | 610 | } |
623 | 611 | ||
@@ -627,8 +615,7 @@ bnad_isr(int irq, void *data) | |||
627 | int i, j; | 615 | int i, j; |
628 | u32 intr_status; | 616 | u32 intr_status; |
629 | unsigned long flags; | 617 | unsigned long flags; |
630 | struct net_device *netdev = data; | 618 | struct bnad *bnad = (struct bnad *)data; |
631 | struct bnad *bnad = netdev_priv(netdev); | ||
632 | struct bnad_rx_info *rx_info; | 619 | struct bnad_rx_info *rx_info; |
633 | struct bnad_rx_ctrl *rx_ctrl; | 620 | struct bnad_rx_ctrl *rx_ctrl; |
634 | 621 | ||
@@ -642,16 +629,21 @@ bnad_isr(int irq, void *data) | |||
642 | 629 | ||
643 | spin_lock_irqsave(&bnad->bna_lock, flags); | 630 | spin_lock_irqsave(&bnad->bna_lock, flags); |
644 | 631 | ||
645 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) { | 632 | if (BNA_IS_MBOX_ERR_INTR(intr_status)) |
646 | bna_mbox_handler(&bnad->bna, intr_status); | 633 | bna_mbox_handler(&bnad->bna, intr_status); |
647 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) { | 634 | |
648 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
649 | goto done; | ||
650 | } | ||
651 | } | ||
652 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 635 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
653 | 636 | ||
637 | if (!BNA_IS_INTX_DATA_INTR(intr_status)) | ||
638 | return IRQ_HANDLED; | ||
639 | |||
654 | /* Process data interrupts */ | 640 | /* Process data interrupts */ |
641 | /* Tx processing */ | ||
642 | for (i = 0; i < bnad->num_tx; i++) { | ||
643 | for (j = 0; j < bnad->num_txq_per_tx; j++) | ||
644 | bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | ||
645 | } | ||
646 | /* Rx processing */ | ||
655 | for (i = 0; i < bnad->num_rx; i++) { | 647 | for (i = 0; i < bnad->num_rx; i++) { |
656 | rx_info = &bnad->rx_info[i]; | 648 | rx_info = &bnad->rx_info[i]; |
657 | if (!rx_info->rx) | 649 | if (!rx_info->rx) |
@@ -663,7 +655,6 @@ bnad_isr(int irq, void *data) | |||
663 | rx_ctrl->ccb); | 655 | rx_ctrl->ccb); |
664 | } | 656 | } |
665 | } | 657 | } |
666 | done: | ||
667 | return IRQ_HANDLED; | 658 | return IRQ_HANDLED; |
668 | } | 659 | } |
669 | 660 | ||
@@ -674,11 +665,7 @@ done: | |||
674 | static void | 665 | static void |
675 | bnad_enable_mbox_irq(struct bnad *bnad) | 666 | bnad_enable_mbox_irq(struct bnad *bnad) |
676 | { | 667 | { |
677 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 668 | clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
678 | |||
679 | if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | ||
680 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
681 | enable_irq(irq); | ||
682 | 669 | ||
683 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); | 670 | BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); |
684 | } | 671 | } |
@@ -690,14 +677,19 @@ bnad_enable_mbox_irq(struct bnad *bnad) | |||
690 | static void | 677 | static void |
691 | bnad_disable_mbox_irq(struct bnad *bnad) | 678 | bnad_disable_mbox_irq(struct bnad *bnad) |
692 | { | 679 | { |
693 | int irq = BNAD_GET_MBOX_IRQ(bnad); | 680 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
694 | 681 | ||
682 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | ||
683 | } | ||
695 | 684 | ||
696 | if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) | 685 | static void |
697 | if (bnad->cfg_flags & BNAD_CF_MSIX) | 686 | bnad_set_netdev_perm_addr(struct bnad *bnad) |
698 | disable_irq_nosync(irq); | 687 | { |
688 | struct net_device *netdev = bnad->netdev; | ||
699 | 689 | ||
700 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | 690 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); |
691 | if (is_zero_ether_addr(netdev->dev_addr)) | ||
692 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | ||
701 | } | 693 | } |
702 | 694 | ||
703 | /* Control Path Handlers */ | 695 | /* Control Path Handlers */ |
@@ -755,11 +747,14 @@ bnad_cb_port_link_status(struct bnad *bnad, | |||
755 | 747 | ||
756 | if (link_up) { | 748 | if (link_up) { |
757 | if (!netif_carrier_ok(bnad->netdev)) { | 749 | if (!netif_carrier_ok(bnad->netdev)) { |
750 | struct bna_tcb *tcb = bnad->tx_info[0].tcb[0]; | ||
751 | if (!tcb) | ||
752 | return; | ||
758 | pr_warn("bna: %s link up\n", | 753 | pr_warn("bna: %s link up\n", |
759 | bnad->netdev->name); | 754 | bnad->netdev->name); |
760 | netif_carrier_on(bnad->netdev); | 755 | netif_carrier_on(bnad->netdev); |
761 | BNAD_UPDATE_CTR(bnad, link_toggle); | 756 | BNAD_UPDATE_CTR(bnad, link_toggle); |
762 | if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { | 757 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) { |
763 | /* Force an immediate Transmit Schedule */ | 758 | /* Force an immediate Transmit Schedule */ |
764 | pr_info("bna: %s TX_STARTED\n", | 759 | pr_info("bna: %s TX_STARTED\n", |
765 | bnad->netdev->name); | 760 | bnad->netdev->name); |
@@ -807,6 +802,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb) | |||
807 | { | 802 | { |
808 | struct bnad_tx_info *tx_info = | 803 | struct bnad_tx_info *tx_info = |
809 | (struct bnad_tx_info *)tcb->txq->tx->priv; | 804 | (struct bnad_tx_info *)tcb->txq->tx->priv; |
805 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; | ||
806 | |||
807 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | ||
808 | cpu_relax(); | ||
809 | |||
810 | bnad_free_all_txbufs(bnad, tcb); | ||
811 | |||
812 | unmap_q->producer_index = 0; | ||
813 | unmap_q->consumer_index = 0; | ||
814 | |||
815 | smp_mb__before_clear_bit(); | ||
816 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | ||
810 | 817 | ||
811 | tx_info->tcb[tcb->id] = NULL; | 818 | tx_info->tcb[tcb->id] = NULL; |
812 | } | 819 | } |
@@ -822,6 +829,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb) | |||
822 | } | 829 | } |
823 | 830 | ||
824 | static void | 831 | static void |
832 | bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb) | ||
833 | { | ||
834 | bnad_free_all_rxbufs(bnad, rcb); | ||
835 | } | ||
836 | |||
837 | static void | ||
825 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) | 838 | bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) |
826 | { | 839 | { |
827 | struct bnad_rx_info *rx_info = | 840 | struct bnad_rx_info *rx_info = |
@@ -849,7 +862,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) | |||
849 | if (tx_info != &bnad->tx_info[0]) | 862 | if (tx_info != &bnad->tx_info[0]) |
850 | return; | 863 | return; |
851 | 864 | ||
852 | clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); | 865 | clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); |
853 | netif_stop_queue(bnad->netdev); | 866 | netif_stop_queue(bnad->netdev); |
854 | pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); | 867 | pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); |
855 | } | 868 | } |
@@ -857,30 +870,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb) | |||
857 | static void | 870 | static void |
858 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) | 871 | bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) |
859 | { | 872 | { |
860 | if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) | 873 | struct bnad_unmap_q *unmap_q = tcb->unmap_q; |
861 | return; | ||
862 | |||
863 | if (netif_carrier_ok(bnad->netdev)) { | ||
864 | pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); | ||
865 | netif_wake_queue(bnad->netdev); | ||
866 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
867 | } | ||
868 | } | ||
869 | |||
870 | static void | ||
871 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | ||
872 | { | ||
873 | struct bnad_unmap_q *unmap_q; | ||
874 | 874 | ||
875 | if (!tcb || (!tcb->unmap_q)) | 875 | if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) |
876 | return; | 876 | return; |
877 | 877 | ||
878 | unmap_q = tcb->unmap_q; | 878 | clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags); |
879 | if (!unmap_q->unmap_array) | ||
880 | return; | ||
881 | 879 | ||
882 | if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) | 880 | while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) |
883 | return; | 881 | cpu_relax(); |
884 | 882 | ||
885 | bnad_free_all_txbufs(bnad, tcb); | 883 | bnad_free_all_txbufs(bnad, tcb); |
886 | 884 | ||
@@ -889,21 +887,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | |||
889 | 887 | ||
890 | smp_mb__before_clear_bit(); | 888 | smp_mb__before_clear_bit(); |
891 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 889 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
890 | |||
891 | /* | ||
892 | * Workaround for first device enable failure & we | ||
893 | * get a 0 MAC address. We try to get the MAC address | ||
894 | * again here. | ||
895 | */ | ||
896 | if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) { | ||
897 | bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr); | ||
898 | bnad_set_netdev_perm_addr(bnad); | ||
899 | } | ||
900 | |||
901 | set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags); | ||
902 | |||
903 | if (netif_carrier_ok(bnad->netdev)) { | ||
904 | pr_info("bna: %s TX_STARTED\n", bnad->netdev->name); | ||
905 | netif_wake_queue(bnad->netdev); | ||
906 | BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); | ||
907 | } | ||
908 | } | ||
909 | |||
910 | static void | ||
911 | bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb) | ||
912 | { | ||
913 | /* Delay only once for the whole Tx Path Shutdown */ | ||
914 | if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags)) | ||
915 | mdelay(BNAD_TXRX_SYNC_MDELAY); | ||
892 | } | 916 | } |
893 | 917 | ||
894 | static void | 918 | static void |
895 | bnad_cb_rx_cleanup(struct bnad *bnad, | 919 | bnad_cb_rx_cleanup(struct bnad *bnad, |
896 | struct bna_ccb *ccb) | 920 | struct bna_ccb *ccb) |
897 | { | 921 | { |
898 | bnad_cq_cmpl_init(bnad, ccb); | ||
899 | |||
900 | bnad_free_rxbufs(bnad, ccb->rcb[0]); | ||
901 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); | 922 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); |
902 | 923 | ||
903 | if (ccb->rcb[1]) { | 924 | if (ccb->rcb[1]) |
904 | bnad_free_rxbufs(bnad, ccb->rcb[1]); | ||
905 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); | 925 | clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); |
906 | } | 926 | |
927 | if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags)) | ||
928 | mdelay(BNAD_TXRX_SYNC_MDELAY); | ||
907 | } | 929 | } |
908 | 930 | ||
909 | static void | 931 | static void |
@@ -911,6 +933,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb) | |||
911 | { | 933 | { |
912 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; | 934 | struct bnad_unmap_q *unmap_q = rcb->unmap_q; |
913 | 935 | ||
936 | clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags); | ||
937 | |||
938 | if (rcb == rcb->cq->ccb->rcb[0]) | ||
939 | bnad_cq_cmpl_init(bnad, rcb->cq->ccb); | ||
940 | |||
941 | bnad_free_all_rxbufs(bnad, rcb); | ||
942 | |||
914 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); | 943 | set_bit(BNAD_RXQ_STARTED, &rcb->flags); |
915 | 944 | ||
916 | /* Now allocate & post buffers for this RCB */ | 945 | /* Now allocate & post buffers for this RCB */ |
@@ -1047,7 +1076,7 @@ bnad_mbox_irq_free(struct bnad *bnad, | |||
1047 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | 1076 | spin_unlock_irqrestore(&bnad->bna_lock, flags); |
1048 | 1077 | ||
1049 | irq = BNAD_GET_MBOX_IRQ(bnad); | 1078 | irq = BNAD_GET_MBOX_IRQ(bnad); |
1050 | free_irq(irq, bnad->netdev); | 1079 | free_irq(irq, bnad); |
1051 | 1080 | ||
1052 | kfree(intr_info->idl); | 1081 | kfree(intr_info->idl); |
1053 | } | 1082 | } |
@@ -1061,7 +1090,7 @@ static int | |||
1061 | bnad_mbox_irq_alloc(struct bnad *bnad, | 1090 | bnad_mbox_irq_alloc(struct bnad *bnad, |
1062 | struct bna_intr_info *intr_info) | 1091 | struct bna_intr_info *intr_info) |
1063 | { | 1092 | { |
1064 | int err; | 1093 | int err = 0; |
1065 | unsigned long flags; | 1094 | unsigned long flags; |
1066 | u32 irq; | 1095 | u32 irq; |
1067 | irq_handler_t irq_handler; | 1096 | irq_handler_t irq_handler; |
@@ -1096,22 +1125,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad, | |||
1096 | */ | 1125 | */ |
1097 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); | 1126 | set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); |
1098 | 1127 | ||
1128 | BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); | ||
1129 | |||
1099 | err = request_irq(irq, irq_handler, flags, | 1130 | err = request_irq(irq, irq_handler, flags, |
1100 | bnad->mbox_irq_name, bnad->netdev); | 1131 | bnad->mbox_irq_name, bnad); |
1101 | 1132 | ||
1102 | if (err) { | 1133 | if (err) { |
1103 | kfree(intr_info->idl); | 1134 | kfree(intr_info->idl); |
1104 | intr_info->idl = NULL; | 1135 | intr_info->idl = NULL; |
1105 | return err; | ||
1106 | } | 1136 | } |
1107 | 1137 | ||
1108 | spin_lock_irqsave(&bnad->bna_lock, flags); | 1138 | return err; |
1109 | |||
1110 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
1111 | disable_irq_nosync(irq); | ||
1112 | |||
1113 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1114 | return 0; | ||
1115 | } | 1139 | } |
1116 | 1140 | ||
1117 | static void | 1141 | static void |
@@ -1555,62 +1579,19 @@ poll_exit: | |||
1555 | return rcvd; | 1579 | return rcvd; |
1556 | } | 1580 | } |
1557 | 1581 | ||
1558 | static int | ||
1559 | bnad_napi_poll_txrx(struct napi_struct *napi, int budget) | ||
1560 | { | ||
1561 | struct bnad_rx_ctrl *rx_ctrl = | ||
1562 | container_of(napi, struct bnad_rx_ctrl, napi); | ||
1563 | struct bna_ccb *ccb; | ||
1564 | struct bnad *bnad; | ||
1565 | int rcvd = 0; | ||
1566 | int i, j; | ||
1567 | |||
1568 | ccb = rx_ctrl->ccb; | ||
1569 | |||
1570 | bnad = ccb->bnad; | ||
1571 | |||
1572 | if (!netif_carrier_ok(bnad->netdev)) | ||
1573 | goto poll_exit; | ||
1574 | |||
1575 | /* Handle Tx Completions, if any */ | ||
1576 | for (i = 0; i < bnad->num_tx; i++) { | ||
1577 | for (j = 0; j < bnad->num_txq_per_tx; j++) | ||
1578 | bnad_tx(bnad, bnad->tx_info[i].tcb[j]); | ||
1579 | } | ||
1580 | |||
1581 | /* Handle Rx Completions */ | ||
1582 | rcvd = bnad_poll_cq(bnad, ccb, budget); | ||
1583 | if (rcvd == budget) | ||
1584 | return rcvd; | ||
1585 | poll_exit: | ||
1586 | napi_complete((napi)); | ||
1587 | |||
1588 | BNAD_UPDATE_CTR(bnad, netif_rx_complete); | ||
1589 | |||
1590 | bnad_enable_txrx_irqs(bnad); | ||
1591 | return rcvd; | ||
1592 | } | ||
1593 | |||
1594 | static void | 1582 | static void |
1595 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) | 1583 | bnad_napi_enable(struct bnad *bnad, u32 rx_id) |
1596 | { | 1584 | { |
1597 | int (*napi_poll) (struct napi_struct *, int); | ||
1598 | struct bnad_rx_ctrl *rx_ctrl; | 1585 | struct bnad_rx_ctrl *rx_ctrl; |
1599 | int i; | 1586 | int i; |
1600 | unsigned long flags; | ||
1601 | |||
1602 | spin_lock_irqsave(&bnad->bna_lock, flags); | ||
1603 | if (bnad->cfg_flags & BNAD_CF_MSIX) | ||
1604 | napi_poll = bnad_napi_poll_rx; | ||
1605 | else | ||
1606 | napi_poll = bnad_napi_poll_txrx; | ||
1607 | spin_unlock_irqrestore(&bnad->bna_lock, flags); | ||
1608 | 1587 | ||
1609 | /* Initialize & enable NAPI */ | 1588 | /* Initialize & enable NAPI */ |
1610 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { | 1589 | for (i = 0; i < bnad->num_rxp_per_rx; i++) { |
1611 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; | 1590 | rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; |
1591 | |||
1612 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, | 1592 | netif_napi_add(bnad->netdev, &rx_ctrl->napi, |
1613 | napi_poll, 64); | 1593 | bnad_napi_poll_rx, 64); |
1594 | |||
1614 | napi_enable(&rx_ctrl->napi); | 1595 | napi_enable(&rx_ctrl->napi); |
1615 | } | 1596 | } |
1616 | } | 1597 | } |
@@ -1825,6 +1806,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id) | |||
1825 | 1806 | ||
1826 | /* Initialize the Rx event handlers */ | 1807 | /* Initialize the Rx event handlers */ |
1827 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; | 1808 | rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; |
1809 | rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy; | ||
1828 | rx_cbfn.rcb_destroy_cbfn = NULL; | 1810 | rx_cbfn.rcb_destroy_cbfn = NULL; |
1829 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; | 1811 | rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; |
1830 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; | 1812 | rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; |
@@ -2152,16 +2134,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors) | |||
2152 | bnad->num_rxp_per_rx = 1; | 2134 | bnad->num_rxp_per_rx = 1; |
2153 | } | 2135 | } |
2154 | 2136 | ||
2155 | static void | ||
2156 | bnad_set_netdev_perm_addr(struct bnad *bnad) | ||
2157 | { | ||
2158 | struct net_device *netdev = bnad->netdev; | ||
2159 | |||
2160 | memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len); | ||
2161 | if (is_zero_ether_addr(netdev->dev_addr)) | ||
2162 | memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len); | ||
2163 | } | ||
2164 | |||
2165 | /* Enable / disable device */ | 2137 | /* Enable / disable device */ |
2166 | static void | 2138 | static void |
2167 | bnad_device_disable(struct bnad *bnad) | 2139 | bnad_device_disable(struct bnad *bnad) |
@@ -2433,21 +2405,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2433 | return NETDEV_TX_OK; | 2405 | return NETDEV_TX_OK; |
2434 | } | 2406 | } |
2435 | 2407 | ||
2408 | tx_id = 0; | ||
2409 | |||
2410 | tx_info = &bnad->tx_info[tx_id]; | ||
2411 | tcb = tx_info->tcb[tx_id]; | ||
2412 | unmap_q = tcb->unmap_q; | ||
2413 | |||
2436 | /* | 2414 | /* |
2437 | * Takes care of the Tx that is scheduled between clearing the flag | 2415 | * Takes care of the Tx that is scheduled between clearing the flag |
2438 | * and the netif_stop_queue() call. | 2416 | * and the netif_stop_queue() call. |
2439 | */ | 2417 | */ |
2440 | if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) { | 2418 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) { |
2441 | dev_kfree_skb(skb); | 2419 | dev_kfree_skb(skb); |
2442 | return NETDEV_TX_OK; | 2420 | return NETDEV_TX_OK; |
2443 | } | 2421 | } |
2444 | 2422 | ||
2445 | tx_id = 0; | ||
2446 | |||
2447 | tx_info = &bnad->tx_info[tx_id]; | ||
2448 | tcb = tx_info->tcb[tx_id]; | ||
2449 | unmap_q = tcb->unmap_q; | ||
2450 | |||
2451 | vectors = 1 + skb_shinfo(skb)->nr_frags; | 2423 | vectors = 1 + skb_shinfo(skb)->nr_frags; |
2452 | if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { | 2424 | if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { |
2453 | dev_kfree_skb(skb); | 2425 | dev_kfree_skb(skb); |
@@ -2462,7 +2434,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2462 | tcb->consumer_index && | 2434 | tcb->consumer_index && |
2463 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { | 2435 | !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { |
2464 | acked = bnad_free_txbufs(bnad, tcb); | 2436 | acked = bnad_free_txbufs(bnad, tcb); |
2465 | bna_ib_ack(tcb->i_dbell, acked); | 2437 | if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) |
2438 | bna_ib_ack(tcb->i_dbell, acked); | ||
2466 | smp_mb__before_clear_bit(); | 2439 | smp_mb__before_clear_bit(); |
2467 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); | 2440 | clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); |
2468 | } else { | 2441 | } else { |
@@ -2624,6 +2597,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev) | |||
2624 | tcb->producer_index = txq_prod; | 2597 | tcb->producer_index = txq_prod; |
2625 | 2598 | ||
2626 | smp_mb(); | 2599 | smp_mb(); |
2600 | |||
2601 | if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) | ||
2602 | return NETDEV_TX_OK; | ||
2603 | |||
2627 | bna_txq_prod_indx_doorbell(tcb); | 2604 | bna_txq_prod_indx_doorbell(tcb); |
2628 | 2605 | ||
2629 | if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) | 2606 | if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) |
@@ -3066,7 +3043,7 @@ bnad_pci_probe(struct pci_dev *pdev, | |||
3066 | /* | 3043 | /* |
3067 | * PCI initialization | 3044 | * PCI initialization |
3068 | * Output : using_dac = 1 for 64 bit DMA | 3045 | * Output : using_dac = 1 for 64 bit DMA |
3069 | * = 0 for 32 bit DMA | 3046 | * = 0 for 32 bit DMA |
3070 | */ | 3047 | */ |
3071 | err = bnad_pci_init(bnad, pdev, &using_dac); | 3048 | err = bnad_pci_init(bnad, pdev, &using_dac); |
3072 | if (err) | 3049 | if (err) |
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h index ebc3a9078642..f59685a5543d 100644 --- a/drivers/net/bna/bnad.h +++ b/drivers/net/bna/bnad.h | |||
@@ -51,6 +51,7 @@ | |||
51 | */ | 51 | */ |
52 | struct bnad_rx_ctrl { | 52 | struct bnad_rx_ctrl { |
53 | struct bna_ccb *ccb; | 53 | struct bna_ccb *ccb; |
54 | unsigned long flags; | ||
54 | struct napi_struct napi; | 55 | struct napi_struct napi; |
55 | }; | 56 | }; |
56 | 57 | ||
@@ -82,6 +83,7 @@ struct bnad_rx_ctrl { | |||
82 | 83 | ||
83 | /* Bit positions for tcb->flags */ | 84 | /* Bit positions for tcb->flags */ |
84 | #define BNAD_TXQ_FREE_SENT 0 | 85 | #define BNAD_TXQ_FREE_SENT 0 |
86 | #define BNAD_TXQ_TX_STARTED 1 | ||
85 | 87 | ||
86 | /* Bit positions for rcb->flags */ | 88 | /* Bit positions for rcb->flags */ |
87 | #define BNAD_RXQ_REFILL 0 | 89 | #define BNAD_RXQ_REFILL 0 |
@@ -199,12 +201,12 @@ struct bnad_unmap_q { | |||
199 | /* Set, tested & cleared using xxx_bit() functions */ | 201 | /* Set, tested & cleared using xxx_bit() functions */ |
200 | /* Values indicated bit positions */ | 202 | /* Values indicated bit positions */ |
201 | #define BNAD_RF_CEE_RUNNING 1 | 203 | #define BNAD_RF_CEE_RUNNING 1 |
202 | #define BNAD_RF_HW_ERROR 2 | 204 | #define BNAD_RF_MBOX_IRQ_DISABLED 2 |
203 | #define BNAD_RF_MBOX_IRQ_DISABLED 3 | 205 | #define BNAD_RF_RX_STARTED 3 |
204 | #define BNAD_RF_TX_STARTED 4 | 206 | #define BNAD_RF_DIM_TIMER_RUNNING 4 |
205 | #define BNAD_RF_RX_STARTED 5 | 207 | #define BNAD_RF_STATS_TIMER_RUNNING 5 |
206 | #define BNAD_RF_DIM_TIMER_RUNNING 6 | 208 | #define BNAD_RF_TX_SHUTDOWN_DELAYED 6 |
207 | #define BNAD_RF_STATS_TIMER_RUNNING 7 | 209 | #define BNAD_RF_RX_SHUTDOWN_DELAYED 7 |
208 | 210 | ||
209 | struct bnad { | 211 | struct bnad { |
210 | struct net_device *netdev; | 212 | struct net_device *netdev; |
@@ -320,9 +322,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 | |||
320 | 322 | ||
321 | #define bnad_enable_rx_irq_unsafe(_ccb) \ | 323 | #define bnad_enable_rx_irq_unsafe(_ccb) \ |
322 | { \ | 324 | { \ |
323 | bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ | 325 | if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\ |
324 | (_ccb)->rx_coalescing_timeo); \ | 326 | bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ |
325 | bna_ib_ack((_ccb)->i_dbell, 0); \ | 327 | (_ccb)->rx_coalescing_timeo); \ |
328 | bna_ib_ack((_ccb)->i_dbell, 0); \ | ||
329 | } \ | ||
326 | } | 330 | } |
327 | 331 | ||
328 | #define bnad_dim_timer_running(_bnad) \ | 332 | #define bnad_dim_timer_running(_bnad) \ |