aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid S. Miller <davem@davemloft.net>2018-10-16 13:09:59 -0400
committerDavid S. Miller <davem@davemloft.net>2018-10-16 13:09:59 -0400
commit29ce85f349c2f77a3c035f70fafeaa8b22608328 (patch)
tree0daebb53cf98740b417c343b8490f2d75a231d36
parenteb385146f90da429a94fb448905a32d7235e6fbf (diff)
parentbcdb12b70c5266a78430a248e57c8c122839b951 (diff)
Merge branch 'hns3-Some-cleanup-and-bugfix-for-desc-filling'
Yunsheng Lin says: ==================== Some cleanup and bugfix for desc filling When retransmiting packets, skb_cow_head which is called in hns3_set_tso may clone a new header. And driver will clear the checksum of the header after doing DMA map, so HW will read the old header whose L3 checksum is not cleared and calculate a wrong L3 checksum. Also When sending a big fragment using multiple buffer descriptor, hns3 does one maping, but do multiple unmapping when tx is done, which may cause unmapping problem. This patchset does some cleanup before fixing the above problem. ==================== Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.c137
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3_enet.h3
2 files changed, 62 insertions, 78 deletions
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
index ce93fcce2732..76ce2f21178b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c
@@ -977,35 +977,28 @@ static int hns3_fill_desc_vtags(struct sk_buff *skb,
977} 977}
978 978
979static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, 979static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
980 int size, dma_addr_t dma, int frag_end, 980 int size, int frag_end, enum hns_desc_type type)
981 enum hns_desc_type type)
982{ 981{
983 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; 982 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
984 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; 983 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
984 struct device *dev = ring_to_dev(ring);
985 u32 ol_type_vlan_len_msec = 0; 985 u32 ol_type_vlan_len_msec = 0;
986 u16 bdtp_fe_sc_vld_ra_ri = 0; 986 u16 bdtp_fe_sc_vld_ra_ri = 0;
987 struct skb_frag_struct *frag;
988 unsigned int frag_buf_num;
987 u32 type_cs_vlan_tso = 0; 989 u32 type_cs_vlan_tso = 0;
988 struct sk_buff *skb; 990 struct sk_buff *skb;
989 u16 inner_vtag = 0; 991 u16 inner_vtag = 0;
990 u16 out_vtag = 0; 992 u16 out_vtag = 0;
993 unsigned int k;
994 int sizeoflast;
991 u32 paylen = 0; 995 u32 paylen = 0;
996 dma_addr_t dma;
992 u16 mss = 0; 997 u16 mss = 0;
993 u8 ol4_proto; 998 u8 ol4_proto;
994 u8 il4_proto; 999 u8 il4_proto;
995 int ret; 1000 int ret;
996 1001
997 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
998 desc_cb->priv = priv;
999 desc_cb->length = size;
1000 desc_cb->dma = dma;
1001 desc_cb->type = type;
1002
1003 /* now, fill the descriptor */
1004 desc->addr = cpu_to_le64(dma);
1005 desc->tx.send_size = cpu_to_le16((u16)size);
1006 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
1007 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1008
1009 if (type == DESC_TYPE_SKB) { 1002 if (type == DESC_TYPE_SKB) {
1010 skb = (struct sk_buff *)priv; 1003 skb = (struct sk_buff *)priv;
1011 paylen = skb->len; 1004 paylen = skb->len;
@@ -1046,38 +1039,47 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
1046 desc->tx.mss = cpu_to_le16(mss); 1039 desc->tx.mss = cpu_to_le16(mss);
1047 desc->tx.vlan_tag = cpu_to_le16(inner_vtag); 1040 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
1048 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); 1041 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
1049 }
1050 1042
1051 /* move ring pointer to next.*/ 1043 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1052 ring_ptr_move_fw(ring, next_to_use); 1044 } else {
1045 frag = (struct skb_frag_struct *)priv;
1046 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1047 }
1053 1048
1054 return 0; 1049 if (dma_mapping_error(ring->dev, dma)) {
1055} 1050 ring->stats.sw_err_cnt++;
1051 return -ENOMEM;
1052 }
1056 1053
1057static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv, 1054 desc_cb->length = size;
1058 int size, dma_addr_t dma, int frag_end,
1059 enum hns_desc_type type)
1060{
1061 unsigned int frag_buf_num;
1062 unsigned int k;
1063 int sizeoflast;
1064 int ret;
1065 1055
1066 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE; 1056 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
1067 sizeoflast = size % HNS3_MAX_BD_SIZE; 1057 sizeoflast = size % HNS3_MAX_BD_SIZE;
1068 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE; 1058 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
1069 1059
1070 /* When the frag size is bigger than hardware, split this frag */ 1060 /* When frag size is bigger than hardware limit, split this frag */
1071 for (k = 0; k < frag_buf_num; k++) { 1061 for (k = 0; k < frag_buf_num; k++) {
1072 ret = hns3_fill_desc(ring, priv, 1062 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
1073 (k == frag_buf_num - 1) ? 1063 desc_cb->priv = priv;
1074 sizeoflast : HNS3_MAX_BD_SIZE, 1064 desc_cb->dma = dma + HNS3_MAX_BD_SIZE * k;
1075 dma + HNS3_MAX_BD_SIZE * k, 1065 desc_cb->type = (type == DESC_TYPE_SKB && !k) ?
1076 frag_end && (k == frag_buf_num - 1) ? 1 : 0, 1066 DESC_TYPE_SKB : DESC_TYPE_PAGE;
1077 (type == DESC_TYPE_SKB && !k) ? 1067
1078 DESC_TYPE_SKB : DESC_TYPE_PAGE); 1068 /* now, fill the descriptor */
1079 if (ret) 1069 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k);
1080 return ret; 1070 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ?
1071 (u16)sizeoflast : (u16)HNS3_MAX_BD_SIZE);
1072 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri,
1073 frag_end && (k == frag_buf_num - 1) ?
1074 1 : 0);
1075 desc->tx.bdtp_fe_sc_vld_ra_ri =
1076 cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
1077
1078 /* move ring pointer to next.*/
1079 ring_ptr_move_fw(ring, next_to_use);
1080
1081 desc_cb = &ring->desc_cb[ring->next_to_use];
1082 desc = &ring->desc[ring->next_to_use];
1081 } 1083 }
1082 1084
1083 return 0; 1085 return 0;
@@ -1133,7 +1135,7 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
1133 return 0; 1135 return 0;
1134} 1136}
1135 1137
1136static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig) 1138static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
1137{ 1139{
1138 struct device *dev = ring_to_dev(ring); 1140 struct device *dev = ring_to_dev(ring);
1139 unsigned int i; 1141 unsigned int i;
@@ -1149,12 +1151,14 @@ static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
1149 ring->desc_cb[ring->next_to_use].dma, 1151 ring->desc_cb[ring->next_to_use].dma,
1150 ring->desc_cb[ring->next_to_use].length, 1152 ring->desc_cb[ring->next_to_use].length,
1151 DMA_TO_DEVICE); 1153 DMA_TO_DEVICE);
1152 else 1154 else if (ring->desc_cb[ring->next_to_use].length)
1153 dma_unmap_page(dev, 1155 dma_unmap_page(dev,
1154 ring->desc_cb[ring->next_to_use].dma, 1156 ring->desc_cb[ring->next_to_use].dma,
1155 ring->desc_cb[ring->next_to_use].length, 1157 ring->desc_cb[ring->next_to_use].length,
1156 DMA_TO_DEVICE); 1158 DMA_TO_DEVICE);
1157 1159
1160 ring->desc_cb[ring->next_to_use].length = 0;
1161
1158 /* rollback one */ 1162 /* rollback one */
1159 ring_ptr_move_bw(ring, next_to_use); 1163 ring_ptr_move_bw(ring, next_to_use);
1160 } 1164 }
@@ -1166,12 +1170,10 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1166 struct hns3_nic_ring_data *ring_data = 1170 struct hns3_nic_ring_data *ring_data =
1167 &tx_ring_data(priv, skb->queue_mapping); 1171 &tx_ring_data(priv, skb->queue_mapping);
1168 struct hns3_enet_ring *ring = ring_data->ring; 1172 struct hns3_enet_ring *ring = ring_data->ring;
1169 struct device *dev = priv->dev;
1170 struct netdev_queue *dev_queue; 1173 struct netdev_queue *dev_queue;
1171 struct skb_frag_struct *frag; 1174 struct skb_frag_struct *frag;
1172 int next_to_use_head; 1175 int next_to_use_head;
1173 int next_to_use_frag; 1176 int next_to_use_frag;
1174 dma_addr_t dma;
1175 int buf_num; 1177 int buf_num;
1176 int seg_num; 1178 int seg_num;
1177 int size; 1179 int size;
@@ -1206,35 +1208,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1206 1208
1207 next_to_use_head = ring->next_to_use; 1209 next_to_use_head = ring->next_to_use;
1208 1210
1209 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); 1211 ret = priv->ops.fill_desc(ring, skb, size, seg_num == 1 ? 1 : 0,
1210 if (dma_mapping_error(dev, dma)) { 1212 DESC_TYPE_SKB);
1211 netdev_err(netdev, "TX head DMA map failed\n");
1212 ring->stats.sw_err_cnt++;
1213 goto out_err_tx_ok;
1214 }
1215
1216 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1217 DESC_TYPE_SKB);
1218 if (ret) 1213 if (ret)
1219 goto head_dma_map_err; 1214 goto head_fill_err;
1220 1215
1221 next_to_use_frag = ring->next_to_use; 1216 next_to_use_frag = ring->next_to_use;
1222 /* Fill the fragments */ 1217 /* Fill the fragments */
1223 for (i = 1; i < seg_num; i++) { 1218 for (i = 1; i < seg_num; i++) {
1224 frag = &skb_shinfo(skb)->frags[i - 1]; 1219 frag = &skb_shinfo(skb)->frags[i - 1];
1225 size = skb_frag_size(frag); 1220 size = skb_frag_size(frag);
1226 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE); 1221
1227 if (dma_mapping_error(dev, dma)) { 1222 ret = priv->ops.fill_desc(ring, frag, size,
1228 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i); 1223 seg_num - 1 == i ? 1 : 0,
1229 ring->stats.sw_err_cnt++; 1224 DESC_TYPE_PAGE);
1230 goto frag_dma_map_err;
1231 }
1232 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1233 seg_num - 1 == i ? 1 : 0,
1234 DESC_TYPE_PAGE);
1235 1225
1236 if (ret) 1226 if (ret)
1237 goto frag_dma_map_err; 1227 goto frag_fill_err;
1238 } 1228 }
1239 1229
1240 /* Complete translate all packets */ 1230 /* Complete translate all packets */
@@ -1247,11 +1237,11 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
1247 1237
1248 return NETDEV_TX_OK; 1238 return NETDEV_TX_OK;
1249 1239
1250frag_dma_map_err: 1240frag_fill_err:
1251 hns_nic_dma_unmap(ring, next_to_use_frag); 1241 hns3_clear_desc(ring, next_to_use_frag);
1252 1242
1253head_dma_map_err: 1243head_fill_err:
1254 hns_nic_dma_unmap(ring, next_to_use_head); 1244 hns3_clear_desc(ring, next_to_use_head);
1255 1245
1256out_err_tx_ok: 1246out_err_tx_ok:
1257 dev_kfree_skb_any(skb); 1247 dev_kfree_skb_any(skb);
@@ -1313,13 +1303,10 @@ static int hns3_nic_set_features(struct net_device *netdev,
1313 int ret; 1303 int ret;
1314 1304
1315 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) { 1305 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1316 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) { 1306 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1317 priv->ops.fill_desc = hns3_fill_desc_tso;
1318 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 1307 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1319 } else { 1308 else
1320 priv->ops.fill_desc = hns3_fill_desc;
1321 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 1309 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1322 }
1323 } 1310 }
1324 1311
1325 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) && 1312 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
@@ -1890,7 +1877,7 @@ static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1890 if (cb->type == DESC_TYPE_SKB) 1877 if (cb->type == DESC_TYPE_SKB)
1891 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, 1878 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1892 ring_to_dma_dir(ring)); 1879 ring_to_dma_dir(ring));
1893 else 1880 else if (cb->length)
1894 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, 1881 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1895 ring_to_dma_dir(ring)); 1882 ring_to_dma_dir(ring));
1896} 1883}
@@ -3247,14 +3234,12 @@ static void hns3_nic_set_priv_ops(struct net_device *netdev)
3247{ 3234{
3248 struct hns3_nic_priv *priv = netdev_priv(netdev); 3235 struct hns3_nic_priv *priv = netdev_priv(netdev);
3249 3236
3237 priv->ops.fill_desc = hns3_fill_desc;
3250 if ((netdev->features & NETIF_F_TSO) || 3238 if ((netdev->features & NETIF_F_TSO) ||
3251 (netdev->features & NETIF_F_TSO6)) { 3239 (netdev->features & NETIF_F_TSO6))
3252 priv->ops.fill_desc = hns3_fill_desc_tso;
3253 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso; 3240 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3254 } else { 3241 else
3255 priv->ops.fill_desc = hns3_fill_desc;
3256 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx; 3242 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3257 }
3258} 3243}
3259 3244
3260static int hns3_client_init(struct hnae3_handle *handle) 3245static int hns3_client_init(struct hnae3_handle *handle)
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
index f25b281ff2aa..71cfca132d0b 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.h
@@ -419,8 +419,7 @@ struct hns3_nic_ring_data {
419 419
420struct hns3_nic_ops { 420struct hns3_nic_ops {
421 int (*fill_desc)(struct hns3_enet_ring *ring, void *priv, 421 int (*fill_desc)(struct hns3_enet_ring *ring, void *priv,
422 int size, dma_addr_t dma, int frag_end, 422 int size, int frag_end, enum hns_desc_type type);
423 enum hns_desc_type type);
424 int (*maybe_stop_tx)(struct sk_buff **out_skb, 423 int (*maybe_stop_tx)(struct sk_buff **out_skb,
425 int *bnum, struct hns3_enet_ring *ring); 424 int *bnum, struct hns3_enet_ring *ring);
426 void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum); 425 void (*get_rxd_bnum)(u32 bnum_flag, int *out_bnum);