aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c126
1 files changed, 86 insertions, 40 deletions
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 8477fb4de614..a6df4c96150f 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -811,7 +811,80 @@ static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
811 } 811 }
812} 812}
813 813
814/* Returns the number of WRBs used up by the skb */ 814/* Grab a WRB header for xmit */
815static u16 be_tx_get_wrb_hdr(struct be_tx_obj *txo)
816{
817 u16 head = txo->q.head;
818
819 queue_head_inc(&txo->q);
820 return head;
821}
822
823/* Set up the WRB header for xmit */
824static void be_tx_setup_wrb_hdr(struct be_adapter *adapter,
825 struct be_tx_obj *txo,
826 struct be_wrb_params *wrb_params,
827 struct sk_buff *skb, u16 head)
828{
829 u32 num_frags = skb_wrb_cnt(skb);
830 struct be_queue_info *txq = &txo->q;
831 struct be_eth_hdr_wrb *hdr = queue_index_node(txq, head);
832
833 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
834 be_dws_cpu_to_le(hdr, sizeof(*hdr));
835
836 BUG_ON(txo->sent_skb_list[head]);
837 txo->sent_skb_list[head] = skb;
838 txo->last_req_hdr = head;
839 atomic_add(num_frags, &txq->used);
840 txo->last_req_wrb_cnt = num_frags;
841 txo->pend_wrb_cnt += num_frags;
842}
843
844/* Setup a WRB fragment (buffer descriptor) for xmit */
845static void be_tx_setup_wrb_frag(struct be_tx_obj *txo, dma_addr_t busaddr,
846 int len)
847{
848 struct be_eth_wrb *wrb;
849 struct be_queue_info *txq = &txo->q;
850
851 wrb = queue_head_node(txq);
852 wrb_fill(wrb, busaddr, len);
853 queue_head_inc(txq);
854}
855
856/* Bring the queue back to the state it was in before be_xmit_enqueue() routine
857 * was invoked. The producer index is restored to the previous packet and the
858 * WRBs of the current packet are unmapped. Invoked to handle tx setup errors.
859 */
860static void be_xmit_restore(struct be_adapter *adapter,
861 struct be_tx_obj *txo, u16 head, bool map_single,
862 u32 copied)
863{
864 struct device *dev;
865 struct be_eth_wrb *wrb;
866 struct be_queue_info *txq = &txo->q;
867
868 dev = &adapter->pdev->dev;
869 txq->head = head;
870
871 /* skip the first wrb (hdr); it's not mapped */
872 queue_head_inc(txq);
873 while (copied) {
874 wrb = queue_head_node(txq);
875 unmap_tx_frag(dev, wrb, map_single);
876 map_single = false;
877 copied -= le32_to_cpu(wrb->frag_len);
878 queue_head_inc(txq);
879 }
880
881 txq->head = head;
882}
883
884/* Enqueue the given packet for transmit. This routine allocates WRBs for the
885 * packet, dma maps the packet buffers and sets up the WRBs. Returns the number
886 * of WRBs used up by the packet.
887 */
815static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo, 888static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
816 struct sk_buff *skb, 889 struct sk_buff *skb,
817 struct be_wrb_params *wrb_params) 890 struct be_wrb_params *wrb_params)
@@ -819,70 +892,43 @@ static u32 be_xmit_enqueue(struct be_adapter *adapter, struct be_tx_obj *txo,
819 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb); 892 u32 i, copied = 0, wrb_cnt = skb_wrb_cnt(skb);
820 struct device *dev = &adapter->pdev->dev; 893 struct device *dev = &adapter->pdev->dev;
821 struct be_queue_info *txq = &txo->q; 894 struct be_queue_info *txq = &txo->q;
822 struct be_eth_hdr_wrb *hdr;
823 bool map_single = false; 895 bool map_single = false;
824 struct be_eth_wrb *wrb;
825 dma_addr_t busaddr;
826 u16 head = txq->head; 896 u16 head = txq->head;
897 dma_addr_t busaddr;
898 int len;
827 899
828 hdr = queue_head_node(txq); 900 head = be_tx_get_wrb_hdr(txo);
829 wrb_fill_hdr(adapter, hdr, wrb_params, skb);
830 be_dws_cpu_to_le(hdr, sizeof(*hdr));
831
832 queue_head_inc(txq);
833 901
834 if (skb->len > skb->data_len) { 902 if (skb->len > skb->data_len) {
835 int len = skb_headlen(skb); 903 len = skb_headlen(skb);
836 904
837 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE); 905 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
838 if (dma_mapping_error(dev, busaddr)) 906 if (dma_mapping_error(dev, busaddr))
839 goto dma_err; 907 goto dma_err;
840 map_single = true; 908 map_single = true;
841 wrb = queue_head_node(txq); 909 be_tx_setup_wrb_frag(txo, busaddr, len);
842 wrb_fill(wrb, busaddr, len);
843 queue_head_inc(txq);
844 copied += len; 910 copied += len;
845 } 911 }
846 912
847 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 913 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
848 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i]; 914 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
915 len = skb_frag_size(frag);
849 916
850 busaddr = skb_frag_dma_map(dev, frag, 0, 917 busaddr = skb_frag_dma_map(dev, frag, 0, len, DMA_TO_DEVICE);
851 skb_frag_size(frag), DMA_TO_DEVICE);
852 if (dma_mapping_error(dev, busaddr)) 918 if (dma_mapping_error(dev, busaddr))
853 goto dma_err; 919 goto dma_err;
854 wrb = queue_head_node(txq); 920 be_tx_setup_wrb_frag(txo, busaddr, len);
855 wrb_fill(wrb, busaddr, skb_frag_size(frag)); 921 copied += len;
856 queue_head_inc(txq);
857 copied += skb_frag_size(frag);
858 } 922 }
859 923
860 BUG_ON(txo->sent_skb_list[head]); 924 be_tx_setup_wrb_hdr(adapter, txo, wrb_params, skb, head);
861 txo->sent_skb_list[head] = skb;
862 txo->last_req_hdr = head;
863 atomic_add(wrb_cnt, &txq->used);
864 txo->last_req_wrb_cnt = wrb_cnt;
865 txo->pend_wrb_cnt += wrb_cnt;
866 925
867 be_tx_stats_update(txo, skb); 926 be_tx_stats_update(txo, skb);
868 return wrb_cnt; 927 return wrb_cnt;
869 928
870dma_err: 929dma_err:
871 /* Bring the queue back to the state it was in before this 930 adapter->drv_stats.dma_map_errors++;
872 * routine was invoked. 931 be_xmit_restore(adapter, txo, head, map_single, copied);
873 */
874 txq->head = head;
875 /* skip the first wrb (hdr); it's not mapped */
876 queue_head_inc(txq);
877 while (copied) {
878 wrb = queue_head_node(txq);
879 unmap_tx_frag(dev, wrb, map_single);
880 map_single = false;
881 copied -= le32_to_cpu(wrb->frag_len);
882 adapter->drv_stats.dma_map_errors++;
883 queue_head_inc(txq);
884 }
885 txq->head = head;
886 return 0; 932 return 0;
887} 933}
888 934