diff options
author | Sathya Perla <sathyap@serverengines.com> | 2010-03-22 16:41:12 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-03-23 16:22:39 -0400 |
commit | 7101e111217581a36e2eeae7c4a3815d60673cbc (patch) | |
tree | 360d8de508a994b90b6d27297c9bc32ab3869239 /drivers/net/benet | |
parent | 7316ae88c43d47f6503f4c29b4973204e33c3411 (diff) |
be2net: handle dma mapping errors in Tx path
Signed-off-by: Sathya Perla <sathyap@serverengines.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet')
-rw-r--r-- | drivers/net/benet/be_main.c | 41 |
1 files changed, 38 insertions, 3 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index 43e8032f9236..0800c6363908 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -386,26 +386,48 @@ static void wrb_fill_hdr(struct be_eth_hdr_wrb *hdr, struct sk_buff *skb, | |||
386 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); | 386 | AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len); |
387 | } | 387 | } |
388 | 388 | ||
389 | static void unmap_tx_frag(struct pci_dev *pdev, struct be_eth_wrb *wrb, | ||
390 | bool unmap_single) | ||
391 | { | ||
392 | dma_addr_t dma; | ||
393 | |||
394 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | ||
395 | |||
396 | dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo; | ||
397 | if (dma != 0) { | ||
398 | if (unmap_single) | ||
399 | pci_unmap_single(pdev, dma, wrb->frag_len, | ||
400 | PCI_DMA_TODEVICE); | ||
401 | else | ||
402 | pci_unmap_page(pdev, dma, wrb->frag_len, | ||
403 | PCI_DMA_TODEVICE); | ||
404 | } | ||
405 | } | ||
389 | 406 | ||
390 | static int make_tx_wrbs(struct be_adapter *adapter, | 407 | static int make_tx_wrbs(struct be_adapter *adapter, |
391 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) | 408 | struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb) |
392 | { | 409 | { |
393 | u64 busaddr; | 410 | dma_addr_t busaddr; |
394 | u32 i, copied = 0; | 411 | int i, copied = 0; |
395 | struct pci_dev *pdev = adapter->pdev; | 412 | struct pci_dev *pdev = adapter->pdev; |
396 | struct sk_buff *first_skb = skb; | 413 | struct sk_buff *first_skb = skb; |
397 | struct be_queue_info *txq = &adapter->tx_obj.q; | 414 | struct be_queue_info *txq = &adapter->tx_obj.q; |
398 | struct be_eth_wrb *wrb; | 415 | struct be_eth_wrb *wrb; |
399 | struct be_eth_hdr_wrb *hdr; | 416 | struct be_eth_hdr_wrb *hdr; |
417 | bool map_single = false; | ||
418 | u16 map_head; | ||
400 | 419 | ||
401 | hdr = queue_head_node(txq); | 420 | hdr = queue_head_node(txq); |
402 | atomic_add(wrb_cnt, &txq->used); | ||
403 | queue_head_inc(txq); | 421 | queue_head_inc(txq); |
422 | map_head = txq->head; | ||
404 | 423 | ||
405 | if (skb->len > skb->data_len) { | 424 | if (skb->len > skb->data_len) { |
406 | int len = skb->len - skb->data_len; | 425 | int len = skb->len - skb->data_len; |
407 | busaddr = pci_map_single(pdev, skb->data, len, | 426 | busaddr = pci_map_single(pdev, skb->data, len, |
408 | PCI_DMA_TODEVICE); | 427 | PCI_DMA_TODEVICE); |
428 | if (pci_dma_mapping_error(pdev, busaddr)) | ||
429 | goto dma_err; | ||
430 | map_single = true; | ||
409 | wrb = queue_head_node(txq); | 431 | wrb = queue_head_node(txq); |
410 | wrb_fill(wrb, busaddr, len); | 432 | wrb_fill(wrb, busaddr, len); |
411 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 433 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); |
@@ -419,6 +441,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
419 | busaddr = pci_map_page(pdev, frag->page, | 441 | busaddr = pci_map_page(pdev, frag->page, |
420 | frag->page_offset, | 442 | frag->page_offset, |
421 | frag->size, PCI_DMA_TODEVICE); | 443 | frag->size, PCI_DMA_TODEVICE); |
444 | if (pci_dma_mapping_error(pdev, busaddr)) | ||
445 | goto dma_err; | ||
422 | wrb = queue_head_node(txq); | 446 | wrb = queue_head_node(txq); |
423 | wrb_fill(wrb, busaddr, frag->size); | 447 | wrb_fill(wrb, busaddr, frag->size); |
424 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 448 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); |
@@ -438,6 +462,16 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
438 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); | 462 | be_dws_cpu_to_le(hdr, sizeof(*hdr)); |
439 | 463 | ||
440 | return copied; | 464 | return copied; |
465 | dma_err: | ||
466 | txq->head = map_head; | ||
467 | while (copied) { | ||
468 | wrb = queue_head_node(txq); | ||
469 | unmap_tx_frag(pdev, wrb, map_single); | ||
470 | map_single = false; | ||
471 | copied -= wrb->frag_len; | ||
472 | queue_head_inc(txq); | ||
473 | } | ||
474 | return 0; | ||
441 | } | 475 | } |
442 | 476 | ||
443 | static netdev_tx_t be_xmit(struct sk_buff *skb, | 477 | static netdev_tx_t be_xmit(struct sk_buff *skb, |
@@ -462,6 +496,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
462 | * *BEFORE* ringing the tx doorbell, so that we serialze the | 496 | * *BEFORE* ringing the tx doorbell, so that we serialze the |
463 | * tx compls of the current transmit which'll wake up the queue | 497 | * tx compls of the current transmit which'll wake up the queue |
464 | */ | 498 | */ |
499 | atomic_add(wrb_cnt, &txq->used); | ||
465 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= | 500 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= |
466 | txq->len) { | 501 | txq->len) { |
467 | netif_stop_queue(netdev); | 502 | netif_stop_queue(netdev); |