diff options
author | Ajit Khaparde <ajitk@serverengines.com> | 2009-09-03 23:12:29 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2009-09-07 04:56:41 -0400 |
commit | c190e3c8f6b22004b9cdc62ee5e7ba2fa5f74dc7 (patch) | |
tree | 3ad85f57a622f6410cf897676687aa30b73cfddb /drivers/net/benet | |
parent | 84517482e19bc775de7b3b4e998dee2f506bc34e (diff) |
be2net: Code changes in Tx path to use skb_dma_map/skb_dma_unmap
Code changes to
- In the tx completion processing, there were instances of unmapping a
memory as a page which was originally mapped as single. This patch takes care
of this by using skb_dma_map()/skb_dma_unmap() to map/unmap Tx buffers.
- set gso_max_size to 65535. This was not done till now.
Signed-off-by: Ajit Khaparde <ajitk@serverengines.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/benet')
-rw-r--r-- | drivers/net/benet/be_main.c | 62 |
1 files changed, 32 insertions, 30 deletions
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c index d09106f2e084..ce11bba2cb67 100644 --- a/drivers/net/benet/be_main.c +++ b/drivers/net/benet/be_main.c | |||
@@ -385,15 +385,19 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
385 | struct be_eth_wrb *wrb; | 385 | struct be_eth_wrb *wrb; |
386 | struct be_eth_hdr_wrb *hdr; | 386 | struct be_eth_hdr_wrb *hdr; |
387 | 387 | ||
388 | atomic_add(wrb_cnt, &txq->used); | ||
389 | hdr = queue_head_node(txq); | 388 | hdr = queue_head_node(txq); |
389 | atomic_add(wrb_cnt, &txq->used); | ||
390 | queue_head_inc(txq); | 390 | queue_head_inc(txq); |
391 | 391 | ||
392 | if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) { | ||
393 | dev_err(&pdev->dev, "TX DMA mapping failed\n"); | ||
394 | return 0; | ||
395 | } | ||
396 | |||
392 | if (skb->len > skb->data_len) { | 397 | if (skb->len > skb->data_len) { |
393 | int len = skb->len - skb->data_len; | 398 | int len = skb->len - skb->data_len; |
394 | busaddr = pci_map_single(pdev, skb->data, len, | ||
395 | PCI_DMA_TODEVICE); | ||
396 | wrb = queue_head_node(txq); | 399 | wrb = queue_head_node(txq); |
400 | busaddr = skb_shinfo(skb)->dma_head; | ||
397 | wrb_fill(wrb, busaddr, len); | 401 | wrb_fill(wrb, busaddr, len); |
398 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 402 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); |
399 | queue_head_inc(txq); | 403 | queue_head_inc(txq); |
@@ -403,9 +407,8 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
403 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 407 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
404 | struct skb_frag_struct *frag = | 408 | struct skb_frag_struct *frag = |
405 | &skb_shinfo(skb)->frags[i]; | 409 | &skb_shinfo(skb)->frags[i]; |
406 | busaddr = pci_map_page(pdev, frag->page, | 410 | |
407 | frag->page_offset, | 411 | busaddr = skb_shinfo(skb)->dma_maps[i]; |
408 | frag->size, PCI_DMA_TODEVICE); | ||
409 | wrb = queue_head_node(txq); | 412 | wrb = queue_head_node(txq); |
410 | wrb_fill(wrb, busaddr, frag->size); | 413 | wrb_fill(wrb, busaddr, frag->size); |
411 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); | 414 | be_dws_cpu_to_le(wrb, sizeof(*wrb)); |
@@ -429,6 +432,7 @@ static int make_tx_wrbs(struct be_adapter *adapter, | |||
429 | 432 | ||
430 | static netdev_tx_t be_xmit(struct sk_buff *skb, | 433 | static netdev_tx_t be_xmit(struct sk_buff *skb, |
431 | struct net_device *netdev) | 434 | struct net_device *netdev) |
435 | |||
432 | { | 436 | { |
433 | struct be_adapter *adapter = netdev_priv(netdev); | 437 | struct be_adapter *adapter = netdev_priv(netdev); |
434 | struct be_tx_obj *tx_obj = &adapter->tx_obj; | 438 | struct be_tx_obj *tx_obj = &adapter->tx_obj; |
@@ -440,23 +444,28 @@ static netdev_tx_t be_xmit(struct sk_buff *skb, | |||
440 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); | 444 | wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); |
441 | 445 | ||
442 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); | 446 | copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); |
447 | if (copied) { | ||
448 | /* record the sent skb in the sent_skb table */ | ||
449 | BUG_ON(tx_obj->sent_skb_list[start]); | ||
450 | tx_obj->sent_skb_list[start] = skb; | ||
451 | |||
452 | /* Ensure txq has space for the next skb; Else stop the queue | ||
453 | * *BEFORE* ringing the tx doorbell, so that we serialze the | ||
454 | * tx compls of the current transmit which'll wake up the queue | ||
455 | */ | ||
456 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= | ||
457 | txq->len) { | ||
458 | netif_stop_queue(netdev); | ||
459 | stopped = true; | ||
460 | } | ||
443 | 461 | ||
444 | /* record the sent skb in the sent_skb table */ | 462 | be_txq_notify(adapter, txq->id, wrb_cnt); |
445 | BUG_ON(tx_obj->sent_skb_list[start]); | ||
446 | tx_obj->sent_skb_list[start] = skb; | ||
447 | 463 | ||
448 | /* Ensure that txq has space for the next skb; Else stop the queue | 464 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); |
449 | * *BEFORE* ringing the tx doorbell, so that we serialze the | 465 | } else { |
450 | * tx compls of the current transmit which'll wake up the queue | 466 | txq->head = start; |
451 | */ | 467 | dev_kfree_skb_any(skb); |
452 | if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >= txq->len) { | ||
453 | netif_stop_queue(netdev); | ||
454 | stopped = true; | ||
455 | } | 468 | } |
456 | |||
457 | be_txq_notify(adapter, txq->id, wrb_cnt); | ||
458 | |||
459 | be_tx_stats_update(adapter, wrb_cnt, copied, stopped); | ||
460 | return NETDEV_TX_OK; | 469 | return NETDEV_TX_OK; |
461 | } | 470 | } |
462 | 471 | ||
@@ -958,10 +967,8 @@ static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq) | |||
958 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | 967 | static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) |
959 | { | 968 | { |
960 | struct be_queue_info *txq = &adapter->tx_obj.q; | 969 | struct be_queue_info *txq = &adapter->tx_obj.q; |
961 | struct be_eth_wrb *wrb; | ||
962 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; | 970 | struct sk_buff **sent_skbs = adapter->tx_obj.sent_skb_list; |
963 | struct sk_buff *sent_skb; | 971 | struct sk_buff *sent_skb; |
964 | u64 busaddr; | ||
965 | u16 cur_index, num_wrbs = 0; | 972 | u16 cur_index, num_wrbs = 0; |
966 | 973 | ||
967 | cur_index = txq->tail; | 974 | cur_index = txq->tail; |
@@ -971,19 +978,12 @@ static void be_tx_compl_process(struct be_adapter *adapter, u16 last_index) | |||
971 | 978 | ||
972 | do { | 979 | do { |
973 | cur_index = txq->tail; | 980 | cur_index = txq->tail; |
974 | wrb = queue_tail_node(txq); | ||
975 | be_dws_le_to_cpu(wrb, sizeof(*wrb)); | ||
976 | busaddr = ((u64)wrb->frag_pa_hi << 32) | (u64)wrb->frag_pa_lo; | ||
977 | if (busaddr != 0) { | ||
978 | pci_unmap_single(adapter->pdev, busaddr, | ||
979 | wrb->frag_len, PCI_DMA_TODEVICE); | ||
980 | } | ||
981 | num_wrbs++; | 981 | num_wrbs++; |
982 | queue_tail_inc(txq); | 982 | queue_tail_inc(txq); |
983 | } while (cur_index != last_index); | 983 | } while (cur_index != last_index); |
984 | 984 | ||
985 | atomic_sub(num_wrbs, &txq->used); | 985 | atomic_sub(num_wrbs, &txq->used); |
986 | 986 | skb_dma_unmap(&adapter->pdev->dev, sent_skb, DMA_TO_DEVICE); | |
987 | kfree_skb(sent_skb); | 987 | kfree_skb(sent_skb); |
988 | } | 988 | } |
989 | 989 | ||
@@ -1892,6 +1892,8 @@ static void be_netdev_init(struct net_device *netdev) | |||
1892 | 1892 | ||
1893 | adapter->rx_csum = true; | 1893 | adapter->rx_csum = true; |
1894 | 1894 | ||
1895 | netif_set_gso_max_size(netdev, 65535); | ||
1896 | |||
1895 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); | 1897 | BE_SET_NETDEV_OPS(netdev, &be_netdev_ops); |
1896 | 1898 | ||
1897 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); | 1899 | SET_ETHTOOL_OPS(netdev, &be_ethtool_ops); |