aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c73
1 files changed, 46 insertions, 27 deletions
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 0cbf520cea77..fc895d0e85d9 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -814,7 +814,7 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
814 814
815 815
816/* 816/*
817 * parse and copy relevant protocol headers: 817 * parse relevant protocol headers:
818 * For a tso pkt, relevant headers are L2/3/4 including options 818 * For a tso pkt, relevant headers are L2/3/4 including options
819 * For a pkt requesting csum offloading, they are L2/3 and may include L4 819 * For a pkt requesting csum offloading, they are L2/3 and may include L4
820 * if it's a TCP/UDP pkt 820 * if it's a TCP/UDP pkt
@@ -827,15 +827,14 @@ vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
827 * Other effects: 827 * Other effects:
828 * 1. related *ctx fields are updated. 828 * 1. related *ctx fields are updated.
829 * 2. ctx->copy_size is # of bytes copied 829 * 2. ctx->copy_size is # of bytes copied
830 * 3. the portion copied is guaranteed to be in the linear part 830 * 3. the portion to be copied is guaranteed to be in the linear part
831 * 831 *
832 */ 832 */
833static int 833static int
834vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq, 834vmxnet3_parse_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
835 struct vmxnet3_tx_ctx *ctx, 835 struct vmxnet3_tx_ctx *ctx,
836 struct vmxnet3_adapter *adapter) 836 struct vmxnet3_adapter *adapter)
837{ 837{
838 struct Vmxnet3_TxDataDesc *tdd;
839 u8 protocol = 0; 838 u8 protocol = 0;
840 839
841 if (ctx->mss) { /* TSO */ 840 if (ctx->mss) { /* TSO */
@@ -892,16 +891,34 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
892 return 0; 891 return 0;
893 } 892 }
894 893
894 return 1;
895err:
896 return -1;
897}
898
899/*
900 * copy relevant protocol headers to the transmit ring:
901 * For a tso pkt, relevant headers are L2/3/4 including options
902 * For a pkt requesting csum offloading, they are L2/3 and may include L4
903 * if it's a TCP/UDP pkt
904 *
905 *
906 * Note that this requires that vmxnet3_parse_hdr be called first to set the
907 * appropriate bits in ctx first
908 */
909static void
910vmxnet3_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
911 struct vmxnet3_tx_ctx *ctx,
912 struct vmxnet3_adapter *adapter)
913{
914 struct Vmxnet3_TxDataDesc *tdd;
915
895 tdd = tq->data_ring.base + tq->tx_ring.next2fill; 916 tdd = tq->data_ring.base + tq->tx_ring.next2fill;
896 917
897 memcpy(tdd->data, skb->data, ctx->copy_size); 918 memcpy(tdd->data, skb->data, ctx->copy_size);
898 netdev_dbg(adapter->netdev, 919 netdev_dbg(adapter->netdev,
899 "copy %u bytes to dataRing[%u]\n", 920 "copy %u bytes to dataRing[%u]\n",
900 ctx->copy_size, tq->tx_ring.next2fill); 921 ctx->copy_size, tq->tx_ring.next2fill);
901 return 1;
902
903err:
904 return -1;
905} 922}
906 923
907 924
@@ -998,22 +1015,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
998 } 1015 }
999 } 1016 }
1000 1017
1001 spin_lock_irqsave(&tq->tx_lock, flags); 1018 ret = vmxnet3_parse_hdr(skb, tq, &ctx, adapter);
1002
1003 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1004 tq->stats.tx_ring_full++;
1005 netdev_dbg(adapter->netdev,
1006 "tx queue stopped on %s, next2comp %u"
1007 " next2fill %u\n", adapter->netdev->name,
1008 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1009
1010 vmxnet3_tq_stop(tq, adapter);
1011 spin_unlock_irqrestore(&tq->tx_lock, flags);
1012 return NETDEV_TX_BUSY;
1013 }
1014
1015
1016 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
1017 if (ret >= 0) { 1019 if (ret >= 0) {
1018 BUG_ON(ret <= 0 && ctx.copy_size != 0); 1020 BUG_ON(ret <= 0 && ctx.copy_size != 0);
1019 /* hdrs parsed, check against other limits */ 1021 /* hdrs parsed, check against other limits */
@@ -1033,9 +1035,26 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1033 } 1035 }
1034 } else { 1036 } else {
1035 tq->stats.drop_hdr_inspect_err++; 1037 tq->stats.drop_hdr_inspect_err++;
1036 goto unlock_drop_pkt; 1038 goto drop_pkt;
1037 } 1039 }
1038 1040
1041 spin_lock_irqsave(&tq->tx_lock, flags);
1042
1043 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
1044 tq->stats.tx_ring_full++;
1045 netdev_dbg(adapter->netdev,
1046 "tx queue stopped on %s, next2comp %u"
1047 " next2fill %u\n", adapter->netdev->name,
1048 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
1049
1050 vmxnet3_tq_stop(tq, adapter);
1051 spin_unlock_irqrestore(&tq->tx_lock, flags);
1052 return NETDEV_TX_BUSY;
1053 }
1054
1055
1056 vmxnet3_copy_hdr(skb, tq, &ctx, adapter);
1057
1039 /* fill tx descs related to addr & len */ 1058 /* fill tx descs related to addr & len */
1040 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter)) 1059 if (vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter))
1041 goto unlock_drop_pkt; 1060 goto unlock_drop_pkt;