diff options
author | Zhao Chen <zhaochen6@huawei.com> | 2018-10-18 11:02:51 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-10-18 19:44:37 -0400 |
commit | cc18a7543d2f63a2c93fc61cfa7fd8be5464f75e (patch) | |
tree | f44fdb43030510881d2e3074e67579041c84f70a | |
parent | 40b06553c906a56ae31677b3ecbd49546947698d (diff) |
net-next/hinic: add checksum offload and TSO support
This patch adds checksum offload and TSO support for the HiNIC
driver. Perfomance test (Iperf) shows more than 100% improvement
in TCP streams.
Signed-off-by: Zhao Chen <zhaochen6@huawei.com>
Signed-off-by: Xue Chaojing <xuechaojing@huawei.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | 121 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | 27 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | 14 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | 97 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_main.c | 23 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_port.c | 32 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_port.h | 18 | ||||
-rw-r--r-- | drivers/net/ethernet/huawei/hinic/hinic_tx.c | 295 |
10 files changed, 571 insertions, 60 deletions
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h index 0f5563f3b779..097b5502603f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h | |||
@@ -58,6 +58,8 @@ enum hinic_port_cmd { | |||
58 | 58 | ||
59 | HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, | 59 | HINIC_PORT_CMD_GET_GLOBAL_QPN = 102, |
60 | 60 | ||
61 | HINIC_PORT_CMD_SET_TSO = 112, | ||
62 | |||
61 | HINIC_PORT_CMD_GET_CAP = 170, | 63 | HINIC_PORT_CMD_GET_CAP = 170, |
62 | }; | 64 | }; |
63 | 65 | ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c index cb239627770f..967c993d5303 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.c | |||
@@ -70,8 +70,6 @@ | |||
70 | #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) | 70 | #define SQ_MASKED_IDX(sq, idx) ((idx) & (sq)->wq->mask) |
71 | #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) | 71 | #define RQ_MASKED_IDX(rq, idx) ((idx) & (rq)->wq->mask) |
72 | 72 | ||
73 | #define TX_MAX_MSS_DEFAULT 0x3E00 | ||
74 | |||
75 | enum sq_wqe_type { | 73 | enum sq_wqe_type { |
76 | SQ_NORMAL_WQE = 0, | 74 | SQ_NORMAL_WQE = 0, |
77 | }; | 75 | }; |
@@ -494,33 +492,16 @@ static void sq_prepare_ctrl(struct hinic_sq_ctrl *ctrl, u16 prod_idx, | |||
494 | HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | | 492 | HINIC_SQ_CTRL_SET(SQ_NORMAL_WQE, DATA_FORMAT) | |
495 | HINIC_SQ_CTRL_SET(ctrl_size, LEN); | 493 | HINIC_SQ_CTRL_SET(ctrl_size, LEN); |
496 | 494 | ||
497 | ctrl->queue_info = HINIC_SQ_CTRL_SET(TX_MAX_MSS_DEFAULT, | 495 | ctrl->queue_info = HINIC_SQ_CTRL_SET(HINIC_MSS_DEFAULT, |
498 | QUEUE_INFO_MSS); | 496 | QUEUE_INFO_MSS) | |
497 | HINIC_SQ_CTRL_SET(1, QUEUE_INFO_UC); | ||
499 | } | 498 | } |
500 | 499 | ||
501 | static void sq_prepare_task(struct hinic_sq_task *task) | 500 | static void sq_prepare_task(struct hinic_sq_task *task) |
502 | { | 501 | { |
503 | task->pkt_info0 = | 502 | task->pkt_info0 = 0; |
504 | HINIC_SQ_TASK_INFO0_SET(0, L2HDR_LEN) | | 503 | task->pkt_info1 = 0; |
505 | HINIC_SQ_TASK_INFO0_SET(HINIC_L4_OFF_DISABLE, L4_OFFLOAD) | | 504 | task->pkt_info2 = 0; |
506 | HINIC_SQ_TASK_INFO0_SET(HINIC_OUTER_L3TYPE_UNKNOWN, | ||
507 | INNER_L3TYPE) | | ||
508 | HINIC_SQ_TASK_INFO0_SET(HINIC_VLAN_OFF_DISABLE, | ||
509 | VLAN_OFFLOAD) | | ||
510 | HINIC_SQ_TASK_INFO0_SET(HINIC_PKT_NOT_PARSED, PARSE_FLAG); | ||
511 | |||
512 | task->pkt_info1 = | ||
513 | HINIC_SQ_TASK_INFO1_SET(HINIC_MEDIA_UNKNOWN, MEDIA_TYPE) | | ||
514 | HINIC_SQ_TASK_INFO1_SET(0, INNER_L4_LEN) | | ||
515 | HINIC_SQ_TASK_INFO1_SET(0, INNER_L3_LEN); | ||
516 | |||
517 | task->pkt_info2 = | ||
518 | HINIC_SQ_TASK_INFO2_SET(0, TUNNEL_L4_LEN) | | ||
519 | HINIC_SQ_TASK_INFO2_SET(0, OUTER_L3_LEN) | | ||
520 | HINIC_SQ_TASK_INFO2_SET(HINIC_TUNNEL_L4TYPE_UNKNOWN, | ||
521 | TUNNEL_L4TYPE) | | ||
522 | HINIC_SQ_TASK_INFO2_SET(HINIC_OUTER_L3TYPE_UNKNOWN, | ||
523 | OUTER_L3TYPE); | ||
524 | 505 | ||
525 | task->ufo_v6_identify = 0; | 506 | task->ufo_v6_identify = 0; |
526 | 507 | ||
@@ -529,6 +510,86 @@ static void sq_prepare_task(struct hinic_sq_task *task) | |||
529 | task->zero_pad = 0; | 510 | task->zero_pad = 0; |
530 | } | 511 | } |
531 | 512 | ||
513 | void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len) | ||
514 | { | ||
515 | task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(len, L2HDR_LEN); | ||
516 | } | ||
517 | |||
518 | void hinic_task_set_outter_l3(struct hinic_sq_task *task, | ||
519 | enum hinic_l3_offload_type l3_type, | ||
520 | u32 network_len) | ||
521 | { | ||
522 | task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l3_type, OUTER_L3TYPE) | | ||
523 | HINIC_SQ_TASK_INFO2_SET(network_len, OUTER_L3LEN); | ||
524 | } | ||
525 | |||
526 | void hinic_task_set_inner_l3(struct hinic_sq_task *task, | ||
527 | enum hinic_l3_offload_type l3_type, | ||
528 | u32 network_len) | ||
529 | { | ||
530 | task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l3_type, INNER_L3TYPE); | ||
531 | task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(network_len, INNER_L3LEN); | ||
532 | } | ||
533 | |||
534 | void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, | ||
535 | enum hinic_l4_offload_type l4_type, | ||
536 | u32 tunnel_len) | ||
537 | { | ||
538 | task->pkt_info2 |= HINIC_SQ_TASK_INFO2_SET(l4_type, TUNNEL_L4TYPE) | | ||
539 | HINIC_SQ_TASK_INFO2_SET(tunnel_len, TUNNEL_L4LEN); | ||
540 | } | ||
541 | |||
542 | void hinic_set_cs_inner_l4(struct hinic_sq_task *task, u32 *queue_info, | ||
543 | enum hinic_l4_offload_type l4_offload, | ||
544 | u32 l4_len, u32 offset) | ||
545 | { | ||
546 | u32 tcp_udp_cs = 0, sctp = 0; | ||
547 | u32 mss = HINIC_MSS_DEFAULT; | ||
548 | |||
549 | if (l4_offload == TCP_OFFLOAD_ENABLE || | ||
550 | l4_offload == UDP_OFFLOAD_ENABLE) | ||
551 | tcp_udp_cs = 1; | ||
552 | else if (l4_offload == SCTP_OFFLOAD_ENABLE) | ||
553 | sctp = 1; | ||
554 | |||
555 | task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); | ||
556 | task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); | ||
557 | |||
558 | *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | | ||
559 | HINIC_SQ_CTRL_SET(tcp_udp_cs, QUEUE_INFO_TCPUDP_CS) | | ||
560 | HINIC_SQ_CTRL_SET(sctp, QUEUE_INFO_SCTP); | ||
561 | |||
562 | *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); | ||
563 | *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); | ||
564 | } | ||
565 | |||
566 | void hinic_set_tso_inner_l4(struct hinic_sq_task *task, u32 *queue_info, | ||
567 | enum hinic_l4_offload_type l4_offload, | ||
568 | u32 l4_len, u32 offset, u32 ip_ident, u32 mss) | ||
569 | { | ||
570 | u32 tso = 0, ufo = 0; | ||
571 | |||
572 | if (l4_offload == TCP_OFFLOAD_ENABLE) | ||
573 | tso = 1; | ||
574 | else if (l4_offload == UDP_OFFLOAD_ENABLE) | ||
575 | ufo = 1; | ||
576 | |||
577 | task->ufo_v6_identify = ip_ident; | ||
578 | |||
579 | task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(l4_offload, L4_OFFLOAD); | ||
580 | task->pkt_info0 |= HINIC_SQ_TASK_INFO0_SET(tso || ufo, TSO_FLAG); | ||
581 | task->pkt_info1 |= HINIC_SQ_TASK_INFO1_SET(l4_len, INNER_L4LEN); | ||
582 | |||
583 | *queue_info |= HINIC_SQ_CTRL_SET(offset, QUEUE_INFO_PLDOFF) | | ||
584 | HINIC_SQ_CTRL_SET(tso, QUEUE_INFO_TSO) | | ||
585 | HINIC_SQ_CTRL_SET(ufo, QUEUE_INFO_UFO) | | ||
586 | HINIC_SQ_CTRL_SET(!!l4_offload, QUEUE_INFO_TCPUDP_CS); | ||
587 | |||
588 | /* set MSS value */ | ||
589 | *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); | ||
590 | *queue_info |= HINIC_SQ_CTRL_SET(mss, QUEUE_INFO_MSS); | ||
591 | } | ||
592 | |||
532 | /** | 593 | /** |
533 | * hinic_sq_prepare_wqe - prepare wqe before insert to the queue | 594 | * hinic_sq_prepare_wqe - prepare wqe before insert to the queue |
534 | * @sq: send queue | 595 | * @sq: send queue |
@@ -613,6 +674,16 @@ struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, | |||
613 | } | 674 | } |
614 | 675 | ||
615 | /** | 676 | /** |
677 | * hinic_sq_return_wqe - return the wqe to the sq | ||
678 | * @sq: send queue | ||
679 | * @wqe_size: the size of the wqe | ||
680 | **/ | ||
681 | void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size) | ||
682 | { | ||
683 | hinic_return_wqe(sq->wq, wqe_size); | ||
684 | } | ||
685 | |||
686 | /** | ||
616 | * hinic_sq_write_wqe - write the wqe to the sq | 687 | * hinic_sq_write_wqe - write the wqe to the sq |
617 | * @sq: send queue | 688 | * @sq: send queue |
618 | * @prod_idx: pi of the wqe | 689 | * @prod_idx: pi of the wqe |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h index 6c84f83ec283..a0dc63a4bfc7 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h | |||
@@ -149,6 +149,31 @@ int hinic_get_sq_free_wqebbs(struct hinic_sq *sq); | |||
149 | 149 | ||
150 | int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); | 150 | int hinic_get_rq_free_wqebbs(struct hinic_rq *rq); |
151 | 151 | ||
152 | void hinic_task_set_l2hdr(struct hinic_sq_task *task, u32 len); | ||
153 | |||
154 | void hinic_task_set_outter_l3(struct hinic_sq_task *task, | ||
155 | enum hinic_l3_offload_type l3_type, | ||
156 | u32 network_len); | ||
157 | |||
158 | void hinic_task_set_inner_l3(struct hinic_sq_task *task, | ||
159 | enum hinic_l3_offload_type l3_type, | ||
160 | u32 network_len); | ||
161 | |||
162 | void hinic_task_set_tunnel_l4(struct hinic_sq_task *task, | ||
163 | enum hinic_l4_offload_type l4_type, | ||
164 | u32 tunnel_len); | ||
165 | |||
166 | void hinic_set_cs_inner_l4(struct hinic_sq_task *task, | ||
167 | u32 *queue_info, | ||
168 | enum hinic_l4_offload_type l4_offload, | ||
169 | u32 l4_len, u32 offset); | ||
170 | |||
171 | void hinic_set_tso_inner_l4(struct hinic_sq_task *task, | ||
172 | u32 *queue_info, | ||
173 | enum hinic_l4_offload_type l4_offload, | ||
174 | u32 l4_len, | ||
175 | u32 offset, u32 ip_ident, u32 mss); | ||
176 | |||
152 | void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, | 177 | void hinic_sq_prepare_wqe(struct hinic_sq *sq, u16 prod_idx, |
153 | struct hinic_sq_wqe *wqe, struct hinic_sge *sges, | 178 | struct hinic_sq_wqe *wqe, struct hinic_sge *sges, |
154 | int nr_sges); | 179 | int nr_sges); |
@@ -159,6 +184,8 @@ void hinic_sq_write_db(struct hinic_sq *sq, u16 prod_idx, unsigned int wqe_size, | |||
159 | struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, | 184 | struct hinic_sq_wqe *hinic_sq_get_wqe(struct hinic_sq *sq, |
160 | unsigned int wqe_size, u16 *prod_idx); | 185 | unsigned int wqe_size, u16 *prod_idx); |
161 | 186 | ||
187 | void hinic_sq_return_wqe(struct hinic_sq *sq, unsigned int wqe_size); | ||
188 | |||
162 | void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, | 189 | void hinic_sq_write_wqe(struct hinic_sq *sq, u16 prod_idx, |
163 | struct hinic_sq_wqe *wqe, struct sk_buff *skb, | 190 | struct hinic_sq_wqe *wqe, struct sk_buff *skb, |
164 | unsigned int wqe_size); | 191 | unsigned int wqe_size); |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c index 3e3181c089bd..f92f1bf3901a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.c | |||
@@ -775,6 +775,20 @@ struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, | |||
775 | } | 775 | } |
776 | 776 | ||
777 | /** | 777 | /** |
778 | * hinic_return_wqe - return the wqe when transmit failed | ||
779 | * @wq: wq to return wqe | ||
780 | * @wqe_size: wqe size | ||
781 | **/ | ||
782 | void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size) | ||
783 | { | ||
784 | int num_wqebbs = ALIGN(wqe_size, wq->wqebb_size) / wq->wqebb_size; | ||
785 | |||
786 | atomic_sub(num_wqebbs, &wq->prod_idx); | ||
787 | |||
788 | atomic_add(num_wqebbs, &wq->delta); | ||
789 | } | ||
790 | |||
791 | /** | ||
778 | * hinic_put_wqe - return the wqe place to use for a new wqe | 792 | * hinic_put_wqe - return the wqe place to use for a new wqe |
779 | * @wq: wq to return wqe | 793 | * @wq: wq to return wqe |
780 | * @wqe_size: wqe size | 794 | * @wqe_size: wqe size |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h index 9c030a0f035e..9b66545ba563 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wq.h | |||
@@ -104,6 +104,8 @@ void hinic_wq_free(struct hinic_wqs *wqs, struct hinic_wq *wq); | |||
104 | struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, | 104 | struct hinic_hw_wqe *hinic_get_wqe(struct hinic_wq *wq, unsigned int wqe_size, |
105 | u16 *prod_idx); | 105 | u16 *prod_idx); |
106 | 106 | ||
107 | void hinic_return_wqe(struct hinic_wq *wq, unsigned int wqe_size); | ||
108 | |||
107 | void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); | 109 | void hinic_put_wqe(struct hinic_wq *wq, unsigned int wqe_size); |
108 | 110 | ||
109 | struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, | 111 | struct hinic_hw_wqe *hinic_read_wqe(struct hinic_wq *wq, unsigned int wqe_size, |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h index bc73485483c5..9754d6ed5f4a 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_wqe.h | |||
@@ -62,19 +62,33 @@ | |||
62 | (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ | 62 | (((val) >> HINIC_CMDQ_WQE_HEADER_##member##_SHIFT) \ |
63 | & HINIC_CMDQ_WQE_HEADER_##member##_MASK) | 63 | & HINIC_CMDQ_WQE_HEADER_##member##_MASK) |
64 | 64 | ||
65 | #define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 | 65 | #define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_SHIFT 0 |
66 | #define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 | 66 | #define HINIC_SQ_CTRL_TASKSECT_LEN_SHIFT 16 |
67 | #define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 | 67 | #define HINIC_SQ_CTRL_DATA_FORMAT_SHIFT 22 |
68 | #define HINIC_SQ_CTRL_LEN_SHIFT 29 | 68 | #define HINIC_SQ_CTRL_LEN_SHIFT 29 |
69 | 69 | ||
70 | #define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF | 70 | #define HINIC_SQ_CTRL_BUFDESC_SECT_LEN_MASK 0xFF |
71 | #define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F | 71 | #define HINIC_SQ_CTRL_TASKSECT_LEN_MASK 0x1F |
72 | #define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 | 72 | #define HINIC_SQ_CTRL_DATA_FORMAT_MASK 0x1 |
73 | #define HINIC_SQ_CTRL_LEN_MASK 0x3 | 73 | #define HINIC_SQ_CTRL_LEN_MASK 0x3 |
74 | 74 | ||
75 | #define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 | 75 | #define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_SHIFT 2 |
76 | 76 | #define HINIC_SQ_CTRL_QUEUE_INFO_UFO_SHIFT 10 | |
77 | #define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF | 77 | #define HINIC_SQ_CTRL_QUEUE_INFO_TSO_SHIFT 11 |
78 | #define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_SHIFT 12 | ||
79 | #define HINIC_SQ_CTRL_QUEUE_INFO_MSS_SHIFT 13 | ||
80 | #define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_SHIFT 27 | ||
81 | #define HINIC_SQ_CTRL_QUEUE_INFO_UC_SHIFT 28 | ||
82 | #define HINIC_SQ_CTRL_QUEUE_INFO_PRI_SHIFT 29 | ||
83 | |||
84 | #define HINIC_SQ_CTRL_QUEUE_INFO_PLDOFF_MASK 0xFF | ||
85 | #define HINIC_SQ_CTRL_QUEUE_INFO_UFO_MASK 0x1 | ||
86 | #define HINIC_SQ_CTRL_QUEUE_INFO_TSO_MASK 0x1 | ||
87 | #define HINIC_SQ_CTRL_QUEUE_INFO_TCPUDP_CS_MASK 0x1 | ||
88 | #define HINIC_SQ_CTRL_QUEUE_INFO_MSS_MASK 0x3FFF | ||
89 | #define HINIC_SQ_CTRL_QUEUE_INFO_SCTP_MASK 0x1 | ||
90 | #define HINIC_SQ_CTRL_QUEUE_INFO_UC_MASK 0x1 | ||
91 | #define HINIC_SQ_CTRL_QUEUE_INFO_PRI_MASK 0x7 | ||
78 | 92 | ||
79 | #define HINIC_SQ_CTRL_SET(val, member) \ | 93 | #define HINIC_SQ_CTRL_SET(val, member) \ |
80 | (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ | 94 | (((u32)(val) & HINIC_SQ_CTRL_##member##_MASK) \ |
@@ -84,6 +98,10 @@ | |||
84 | (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ | 98 | (((val) >> HINIC_SQ_CTRL_##member##_SHIFT) \ |
85 | & HINIC_SQ_CTRL_##member##_MASK) | 99 | & HINIC_SQ_CTRL_##member##_MASK) |
86 | 100 | ||
101 | #define HINIC_SQ_CTRL_CLEAR(val, member) \ | ||
102 | ((u32)(val) & (~(HINIC_SQ_CTRL_##member##_MASK \ | ||
103 | << HINIC_SQ_CTRL_##member##_SHIFT))) | ||
104 | |||
87 | #define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 | 105 | #define HINIC_SQ_TASK_INFO0_L2HDR_LEN_SHIFT 0 |
88 | #define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 | 106 | #define HINIC_SQ_TASK_INFO0_L4_OFFLOAD_SHIFT 8 |
89 | #define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 | 107 | #define HINIC_SQ_TASK_INFO0_INNER_L3TYPE_SHIFT 10 |
@@ -108,28 +126,28 @@ | |||
108 | 126 | ||
109 | /* 8 bits reserved */ | 127 | /* 8 bits reserved */ |
110 | #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 | 128 | #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_SHIFT 8 |
111 | #define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_SHIFT 16 | 129 | #define HINIC_SQ_TASK_INFO1_INNER_L4LEN_SHIFT 16 |
112 | #define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_SHIFT 24 | 130 | #define HINIC_SQ_TASK_INFO1_INNER_L3LEN_SHIFT 24 |
113 | 131 | ||
114 | /* 8 bits reserved */ | 132 | /* 8 bits reserved */ |
115 | #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF | 133 | #define HINIC_SQ_TASK_INFO1_MEDIA_TYPE_MASK 0xFF |
116 | #define HINIC_SQ_TASK_INFO1_INNER_L4_LEN_MASK 0xFF | 134 | #define HINIC_SQ_TASK_INFO1_INNER_L4LEN_MASK 0xFF |
117 | #define HINIC_SQ_TASK_INFO1_INNER_L3_LEN_MASK 0xFF | 135 | #define HINIC_SQ_TASK_INFO1_INNER_L3LEN_MASK 0xFF |
118 | 136 | ||
119 | #define HINIC_SQ_TASK_INFO1_SET(val, member) \ | 137 | #define HINIC_SQ_TASK_INFO1_SET(val, member) \ |
120 | (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ | 138 | (((u32)(val) & HINIC_SQ_TASK_INFO1_##member##_MASK) << \ |
121 | HINIC_SQ_TASK_INFO1_##member##_SHIFT) | 139 | HINIC_SQ_TASK_INFO1_##member##_SHIFT) |
122 | 140 | ||
123 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_SHIFT 0 | 141 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_SHIFT 0 |
124 | #define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_SHIFT 12 | 142 | #define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_SHIFT 8 |
125 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 19 | 143 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_SHIFT 16 |
126 | /* 1 bit reserved */ | 144 | /* 1 bit reserved */ |
127 | #define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 22 | 145 | #define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_SHIFT 24 |
128 | /* 8 bits reserved */ | 146 | /* 8 bits reserved */ |
129 | 147 | ||
130 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4_LEN_MASK 0xFFF | 148 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4LEN_MASK 0xFF |
131 | #define HINIC_SQ_TASK_INFO2_OUTER_L3_LEN_MASK 0x7F | 149 | #define HINIC_SQ_TASK_INFO2_OUTER_L3LEN_MASK 0xFF |
132 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x3 | 150 | #define HINIC_SQ_TASK_INFO2_TUNNEL_L4TYPE_MASK 0x7 |
133 | /* 1 bit reserved */ | 151 | /* 1 bit reserved */ |
134 | #define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 | 152 | #define HINIC_SQ_TASK_INFO2_OUTER_L3TYPE_MASK 0x3 |
135 | /* 8 bits reserved */ | 153 | /* 8 bits reserved */ |
@@ -187,12 +205,15 @@ | |||
187 | sizeof(struct hinic_sq_task) + \ | 205 | sizeof(struct hinic_sq_task) + \ |
188 | (nr_sges) * sizeof(struct hinic_sq_bufdesc)) | 206 | (nr_sges) * sizeof(struct hinic_sq_bufdesc)) |
189 | 207 | ||
190 | #define HINIC_SCMD_DATA_LEN 16 | 208 | #define HINIC_SCMD_DATA_LEN 16 |
209 | |||
210 | #define HINIC_MAX_SQ_BUFDESCS 17 | ||
191 | 211 | ||
192 | #define HINIC_MAX_SQ_BUFDESCS 17 | 212 | #define HINIC_SQ_WQE_MAX_SIZE 320 |
213 | #define HINIC_RQ_WQE_SIZE 32 | ||
193 | 214 | ||
194 | #define HINIC_SQ_WQE_MAX_SIZE 320 | 215 | #define HINIC_MSS_DEFAULT 0x3E00 |
195 | #define HINIC_RQ_WQE_SIZE 32 | 216 | #define HINIC_MSS_MIN 0x50 |
196 | 217 | ||
197 | enum hinic_l4offload_type { | 218 | enum hinic_l4offload_type { |
198 | HINIC_L4_OFF_DISABLE = 0, | 219 | HINIC_L4_OFF_DISABLE = 0, |
@@ -211,6 +232,26 @@ enum hinic_pkt_parsed { | |||
211 | HINIC_PKT_PARSED = 1, | 232 | HINIC_PKT_PARSED = 1, |
212 | }; | 233 | }; |
213 | 234 | ||
235 | enum hinic_l3_offload_type { | ||
236 | L3TYPE_UNKNOWN = 0, | ||
237 | IPV6_PKT = 1, | ||
238 | IPV4_PKT_NO_CHKSUM_OFFLOAD = 2, | ||
239 | IPV4_PKT_WITH_CHKSUM_OFFLOAD = 3, | ||
240 | }; | ||
241 | |||
242 | enum hinic_l4_offload_type { | ||
243 | OFFLOAD_DISABLE = 0, | ||
244 | TCP_OFFLOAD_ENABLE = 1, | ||
245 | SCTP_OFFLOAD_ENABLE = 2, | ||
246 | UDP_OFFLOAD_ENABLE = 3, | ||
247 | }; | ||
248 | |||
249 | enum hinic_l4_tunnel_type { | ||
250 | NOT_TUNNEL, | ||
251 | TUNNEL_UDP_NO_CSUM, | ||
252 | TUNNEL_UDP_CSUM, | ||
253 | }; | ||
254 | |||
214 | enum hinic_outer_l3type { | 255 | enum hinic_outer_l3type { |
215 | HINIC_OUTER_L3TYPE_UNKNOWN = 0, | 256 | HINIC_OUTER_L3TYPE_UNKNOWN = 0, |
216 | HINIC_OUTER_L3TYPE_IPV6 = 1, | 257 | HINIC_OUTER_L3TYPE_IPV6 = 1, |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 4a8f82938ed5..fdf2bdb6b0d0 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c | |||
@@ -805,7 +805,8 @@ static const struct net_device_ops hinic_netdev_ops = { | |||
805 | 805 | ||
806 | static void netdev_features_init(struct net_device *netdev) | 806 | static void netdev_features_init(struct net_device *netdev) |
807 | { | 807 | { |
808 | netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA; | 808 | netdev->hw_features = NETIF_F_SG | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | |
809 | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6; | ||
809 | 810 | ||
810 | netdev->vlan_features = netdev->hw_features; | 811 | netdev->vlan_features = netdev->hw_features; |
811 | 812 | ||
@@ -863,6 +864,20 @@ static void link_status_event_handler(void *handle, void *buf_in, u16 in_size, | |||
863 | *out_size = sizeof(*ret_link_status); | 864 | *out_size = sizeof(*ret_link_status); |
864 | } | 865 | } |
865 | 866 | ||
867 | static int set_features(struct hinic_dev *nic_dev, | ||
868 | netdev_features_t pre_features, | ||
869 | netdev_features_t features, bool force_change) | ||
870 | { | ||
871 | netdev_features_t changed = force_change ? ~0 : pre_features ^ features; | ||
872 | int err = 0; | ||
873 | |||
874 | if (changed & NETIF_F_TSO) | ||
875 | err = hinic_port_set_tso(nic_dev, (features & NETIF_F_TSO) ? | ||
876 | HINIC_TSO_ENABLE : HINIC_TSO_DISABLE); | ||
877 | |||
878 | return err; | ||
879 | } | ||
880 | |||
866 | /** | 881 | /** |
867 | * nic_dev_init - Initialize the NIC device | 882 | * nic_dev_init - Initialize the NIC device |
868 | * @pdev: the NIC pci device | 883 | * @pdev: the NIC pci device |
@@ -963,7 +978,12 @@ static int nic_dev_init(struct pci_dev *pdev) | |||
963 | hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, | 978 | hinic_hwdev_cb_register(nic_dev->hwdev, HINIC_MGMT_MSG_CMD_LINK_STATUS, |
964 | nic_dev, link_status_event_handler); | 979 | nic_dev, link_status_event_handler); |
965 | 980 | ||
981 | err = set_features(nic_dev, 0, nic_dev->netdev->features, true); | ||
982 | if (err) | ||
983 | goto err_set_features; | ||
984 | |||
966 | SET_NETDEV_DEV(netdev, &pdev->dev); | 985 | SET_NETDEV_DEV(netdev, &pdev->dev); |
986 | |||
967 | err = register_netdev(netdev); | 987 | err = register_netdev(netdev); |
968 | if (err) { | 988 | if (err) { |
969 | dev_err(&pdev->dev, "Failed to register netdev\n"); | 989 | dev_err(&pdev->dev, "Failed to register netdev\n"); |
@@ -973,6 +993,7 @@ static int nic_dev_init(struct pci_dev *pdev) | |||
973 | return 0; | 993 | return 0; |
974 | 994 | ||
975 | err_reg_netdev: | 995 | err_reg_netdev: |
996 | err_set_features: | ||
976 | hinic_hwdev_cb_unregister(nic_dev->hwdev, | 997 | hinic_hwdev_cb_unregister(nic_dev->hwdev, |
977 | HINIC_MGMT_MSG_CMD_LINK_STATUS); | 998 | HINIC_MGMT_MSG_CMD_LINK_STATUS); |
978 | cancel_work_sync(&rx_mode_work->work); | 999 | cancel_work_sync(&rx_mode_work->work); |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.c b/drivers/net/ethernet/huawei/hinic/hinic_port.c index 4d4e3f05fb5f..7575a7d3bd9f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.c | |||
@@ -377,3 +377,35 @@ int hinic_port_get_cap(struct hinic_dev *nic_dev, | |||
377 | 377 | ||
378 | return 0; | 378 | return 0; |
379 | } | 379 | } |
380 | |||
381 | /** | ||
382 | * hinic_port_set_tso - set port tso configuration | ||
383 | * @nic_dev: nic device | ||
384 | * @state: the tso state to set | ||
385 | * | ||
386 | * Return 0 - Success, negative - Failure | ||
387 | **/ | ||
388 | int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state) | ||
389 | { | ||
390 | struct hinic_hwdev *hwdev = nic_dev->hwdev; | ||
391 | struct hinic_hwif *hwif = hwdev->hwif; | ||
392 | struct hinic_tso_config tso_cfg = {0}; | ||
393 | struct pci_dev *pdev = hwif->pdev; | ||
394 | u16 out_size; | ||
395 | int err; | ||
396 | |||
397 | tso_cfg.func_id = HINIC_HWIF_FUNC_IDX(hwif); | ||
398 | tso_cfg.tso_en = state; | ||
399 | |||
400 | err = hinic_port_msg_cmd(hwdev, HINIC_PORT_CMD_SET_TSO, | ||
401 | &tso_cfg, sizeof(tso_cfg), | ||
402 | &tso_cfg, &out_size); | ||
403 | if (err || out_size != sizeof(tso_cfg) || tso_cfg.status) { | ||
404 | dev_err(&pdev->dev, | ||
405 | "Failed to set port tso, ret = %d\n", | ||
406 | tso_cfg.status); | ||
407 | return -EINVAL; | ||
408 | } | ||
409 | |||
410 | return 0; | ||
411 | } | ||
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_port.h b/drivers/net/ethernet/huawei/hinic/hinic_port.h index 9404365195dd..f6e3220fe28f 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_port.h +++ b/drivers/net/ethernet/huawei/hinic/hinic_port.h | |||
@@ -72,6 +72,11 @@ enum hinic_speed { | |||
72 | HINIC_SPEED_UNKNOWN = 0xFF, | 72 | HINIC_SPEED_UNKNOWN = 0xFF, |
73 | }; | 73 | }; |
74 | 74 | ||
75 | enum hinic_tso_state { | ||
76 | HINIC_TSO_DISABLE = 0, | ||
77 | HINIC_TSO_ENABLE = 1, | ||
78 | }; | ||
79 | |||
75 | struct hinic_port_mac_cmd { | 80 | struct hinic_port_mac_cmd { |
76 | u8 status; | 81 | u8 status; |
77 | u8 version; | 82 | u8 version; |
@@ -167,6 +172,17 @@ struct hinic_port_cap { | |||
167 | u8 rsvd2[3]; | 172 | u8 rsvd2[3]; |
168 | }; | 173 | }; |
169 | 174 | ||
175 | struct hinic_tso_config { | ||
176 | u8 status; | ||
177 | u8 version; | ||
178 | u8 rsvd0[6]; | ||
179 | |||
180 | u16 func_id; | ||
181 | u16 rsvd1; | ||
182 | u8 tso_en; | ||
183 | u8 resv2[3]; | ||
184 | }; | ||
185 | |||
170 | int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, | 186 | int hinic_port_add_mac(struct hinic_dev *nic_dev, const u8 *addr, |
171 | u16 vlan_id); | 187 | u16 vlan_id); |
172 | 188 | ||
@@ -195,4 +211,6 @@ int hinic_port_set_func_state(struct hinic_dev *nic_dev, | |||
195 | int hinic_port_get_cap(struct hinic_dev *nic_dev, | 211 | int hinic_port_get_cap(struct hinic_dev *nic_dev, |
196 | struct hinic_port_cap *port_cap); | 212 | struct hinic_port_cap *port_cap); |
197 | 213 | ||
214 | int hinic_port_set_tso(struct hinic_dev *nic_dev, enum hinic_tso_state state); | ||
215 | |||
198 | #endif | 216 | #endif |
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index c5fca0356c9c..11e73e67358d 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c | |||
@@ -26,6 +26,13 @@ | |||
26 | #include <linux/skbuff.h> | 26 | #include <linux/skbuff.h> |
27 | #include <linux/smp.h> | 27 | #include <linux/smp.h> |
28 | #include <asm/byteorder.h> | 28 | #include <asm/byteorder.h> |
29 | #include <linux/ip.h> | ||
30 | #include <linux/tcp.h> | ||
31 | #include <linux/sctp.h> | ||
32 | #include <linux/ipv6.h> | ||
33 | #include <net/ipv6.h> | ||
34 | #include <net/checksum.h> | ||
35 | #include <net/ip6_checksum.h> | ||
29 | 36 | ||
30 | #include "hinic_common.h" | 37 | #include "hinic_common.h" |
31 | #include "hinic_hw_if.h" | 38 | #include "hinic_hw_if.h" |
@@ -45,9 +52,31 @@ | |||
45 | #define CI_UPDATE_NO_PENDING 0 | 52 | #define CI_UPDATE_NO_PENDING 0 |
46 | #define CI_UPDATE_NO_COALESC 0 | 53 | #define CI_UPDATE_NO_COALESC 0 |
47 | 54 | ||
48 | #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) | 55 | #define HW_CONS_IDX(sq) be16_to_cpu(*(u16 *)((sq)->hw_ci_addr)) |
49 | 56 | ||
50 | #define MIN_SKB_LEN 64 | 57 | #define MIN_SKB_LEN 17 |
58 | |||
59 | #define MAX_PAYLOAD_OFFSET 221 | ||
60 | #define TRANSPORT_OFFSET(l4_hdr, skb) ((u32)((l4_hdr) - (skb)->data)) | ||
61 | |||
62 | union hinic_l3 { | ||
63 | struct iphdr *v4; | ||
64 | struct ipv6hdr *v6; | ||
65 | unsigned char *hdr; | ||
66 | }; | ||
67 | |||
68 | union hinic_l4 { | ||
69 | struct tcphdr *tcp; | ||
70 | struct udphdr *udp; | ||
71 | unsigned char *hdr; | ||
72 | }; | ||
73 | |||
74 | enum hinic_offload_type { | ||
75 | TX_OFFLOAD_TSO = BIT(0), | ||
76 | TX_OFFLOAD_CSUM = BIT(1), | ||
77 | TX_OFFLOAD_VLAN = BIT(2), | ||
78 | TX_OFFLOAD_INVALID = BIT(3), | ||
79 | }; | ||
51 | 80 | ||
52 | /** | 81 | /** |
53 | * hinic_txq_clean_stats - Clean the statistics of specific queue | 82 | * hinic_txq_clean_stats - Clean the statistics of specific queue |
@@ -175,18 +204,263 @@ static void tx_unmap_skb(struct hinic_dev *nic_dev, struct sk_buff *skb, | |||
175 | DMA_TO_DEVICE); | 204 | DMA_TO_DEVICE); |
176 | } | 205 | } |
177 | 206 | ||
207 | static void get_inner_l3_l4_type(struct sk_buff *skb, union hinic_l3 *ip, | ||
208 | union hinic_l4 *l4, | ||
209 | enum hinic_offload_type offload_type, | ||
210 | enum hinic_l3_offload_type *l3_type, | ||
211 | u8 *l4_proto) | ||
212 | { | ||
213 | u8 *exthdr; | ||
214 | |||
215 | if (ip->v4->version == 4) { | ||
216 | *l3_type = (offload_type == TX_OFFLOAD_CSUM) ? | ||
217 | IPV4_PKT_NO_CHKSUM_OFFLOAD : | ||
218 | IPV4_PKT_WITH_CHKSUM_OFFLOAD; | ||
219 | *l4_proto = ip->v4->protocol; | ||
220 | } else if (ip->v4->version == 6) { | ||
221 | *l3_type = IPV6_PKT; | ||
222 | exthdr = ip->hdr + sizeof(*ip->v6); | ||
223 | *l4_proto = ip->v6->nexthdr; | ||
224 | if (exthdr != l4->hdr) { | ||
225 | int start = exthdr - skb->data; | ||
226 | __be16 frag_off; | ||
227 | |||
228 | ipv6_skip_exthdr(skb, start, l4_proto, &frag_off); | ||
229 | } | ||
230 | } else { | ||
231 | *l3_type = L3TYPE_UNKNOWN; | ||
232 | *l4_proto = 0; | ||
233 | } | ||
234 | } | ||
235 | |||
236 | static void get_inner_l4_info(struct sk_buff *skb, union hinic_l4 *l4, | ||
237 | enum hinic_offload_type offload_type, u8 l4_proto, | ||
238 | enum hinic_l4_offload_type *l4_offload, | ||
239 | u32 *l4_len, u32 *offset) | ||
240 | { | ||
241 | *l4_offload = OFFLOAD_DISABLE; | ||
242 | *offset = 0; | ||
243 | *l4_len = 0; | ||
244 | |||
245 | switch (l4_proto) { | ||
246 | case IPPROTO_TCP: | ||
247 | *l4_offload = TCP_OFFLOAD_ENABLE; | ||
248 | /* doff in unit of 4B */ | ||
249 | *l4_len = l4->tcp->doff * 4; | ||
250 | *offset = *l4_len + TRANSPORT_OFFSET(l4->hdr, skb); | ||
251 | break; | ||
252 | |||
253 | case IPPROTO_UDP: | ||
254 | *l4_offload = UDP_OFFLOAD_ENABLE; | ||
255 | *l4_len = sizeof(struct udphdr); | ||
256 | *offset = TRANSPORT_OFFSET(l4->hdr, skb); | ||
257 | break; | ||
258 | |||
259 | case IPPROTO_SCTP: | ||
260 | /* only csum offload support sctp */ | ||
261 | if (offload_type != TX_OFFLOAD_CSUM) | ||
262 | break; | ||
263 | |||
264 | *l4_offload = SCTP_OFFLOAD_ENABLE; | ||
265 | *l4_len = sizeof(struct sctphdr); | ||
266 | *offset = TRANSPORT_OFFSET(l4->hdr, skb); | ||
267 | break; | ||
268 | |||
269 | default: | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static __sum16 csum_magic(union hinic_l3 *ip, unsigned short proto) | ||
275 | { | ||
276 | return (ip->v4->version == 4) ? | ||
277 | csum_tcpudp_magic(ip->v4->saddr, ip->v4->daddr, 0, proto, 0) : | ||
278 | csum_ipv6_magic(&ip->v6->saddr, &ip->v6->daddr, 0, proto, 0); | ||
279 | } | ||
280 | |||
281 | static int offload_tso(struct hinic_sq_task *task, u32 *queue_info, | ||
282 | struct sk_buff *skb) | ||
283 | { | ||
284 | u32 offset, l4_len, ip_identify, network_hdr_len; | ||
285 | enum hinic_l3_offload_type l3_offload; | ||
286 | enum hinic_l4_offload_type l4_offload; | ||
287 | union hinic_l3 ip; | ||
288 | union hinic_l4 l4; | ||
289 | u8 l4_proto; | ||
290 | |||
291 | if (!skb_is_gso(skb)) | ||
292 | return 0; | ||
293 | |||
294 | if (skb_cow_head(skb, 0) < 0) | ||
295 | return -EPROTONOSUPPORT; | ||
296 | |||
297 | if (skb->encapsulation) { | ||
298 | u32 gso_type = skb_shinfo(skb)->gso_type; | ||
299 | u32 tunnel_type = 0; | ||
300 | u32 l4_tunnel_len; | ||
301 | |||
302 | ip.hdr = skb_network_header(skb); | ||
303 | l4.hdr = skb_transport_header(skb); | ||
304 | network_hdr_len = skb_inner_network_header_len(skb); | ||
305 | |||
306 | if (ip.v4->version == 4) { | ||
307 | ip.v4->tot_len = 0; | ||
308 | l3_offload = IPV4_PKT_WITH_CHKSUM_OFFLOAD; | ||
309 | } else if (ip.v4->version == 6) { | ||
310 | l3_offload = IPV6_PKT; | ||
311 | } else { | ||
312 | l3_offload = 0; | ||
313 | } | ||
314 | |||
315 | hinic_task_set_outter_l3(task, l3_offload, | ||
316 | skb_network_header_len(skb)); | ||
317 | |||
318 | if (gso_type & SKB_GSO_UDP_TUNNEL_CSUM) { | ||
319 | l4.udp->check = ~csum_magic(&ip, IPPROTO_UDP); | ||
320 | tunnel_type = TUNNEL_UDP_CSUM; | ||
321 | } else if (gso_type & SKB_GSO_UDP_TUNNEL) { | ||
322 | tunnel_type = TUNNEL_UDP_NO_CSUM; | ||
323 | } | ||
324 | |||
325 | l4_tunnel_len = skb_inner_network_offset(skb) - | ||
326 | skb_transport_offset(skb); | ||
327 | hinic_task_set_tunnel_l4(task, tunnel_type, l4_tunnel_len); | ||
328 | |||
329 | ip.hdr = skb_inner_network_header(skb); | ||
330 | l4.hdr = skb_inner_transport_header(skb); | ||
331 | } else { | ||
332 | ip.hdr = skb_network_header(skb); | ||
333 | l4.hdr = skb_transport_header(skb); | ||
334 | network_hdr_len = skb_network_header_len(skb); | ||
335 | } | ||
336 | |||
337 | /* initialize inner IP header fields */ | ||
338 | if (ip.v4->version == 4) | ||
339 | ip.v4->tot_len = 0; | ||
340 | else | ||
341 | ip.v6->payload_len = 0; | ||
342 | |||
343 | get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_TSO, &l3_offload, | ||
344 | &l4_proto); | ||
345 | |||
346 | hinic_task_set_inner_l3(task, l3_offload, network_hdr_len); | ||
347 | |||
348 | ip_identify = 0; | ||
349 | if (l4_proto == IPPROTO_TCP) | ||
350 | l4.tcp->check = ~csum_magic(&ip, IPPROTO_TCP); | ||
351 | |||
352 | get_inner_l4_info(skb, &l4, TX_OFFLOAD_TSO, l4_proto, &l4_offload, | ||
353 | &l4_len, &offset); | ||
354 | |||
355 | hinic_set_tso_inner_l4(task, queue_info, l4_offload, l4_len, offset, | ||
356 | ip_identify, skb_shinfo(skb)->gso_size); | ||
357 | |||
358 | return 1; | ||
359 | } | ||
360 | |||
361 | static int offload_csum(struct hinic_sq_task *task, u32 *queue_info, | ||
362 | struct sk_buff *skb) | ||
363 | { | ||
364 | enum hinic_l4_offload_type l4_offload; | ||
365 | u32 offset, l4_len, network_hdr_len; | ||
366 | enum hinic_l3_offload_type l3_type; | ||
367 | union hinic_l3 ip; | ||
368 | union hinic_l4 l4; | ||
369 | u8 l4_proto; | ||
370 | |||
371 | if (skb->ip_summed != CHECKSUM_PARTIAL) | ||
372 | return 0; | ||
373 | |||
374 | if (skb->encapsulation) { | ||
375 | u32 l4_tunnel_len; | ||
376 | |||
377 | ip.hdr = skb_network_header(skb); | ||
378 | |||
379 | if (ip.v4->version == 4) | ||
380 | l3_type = IPV4_PKT_NO_CHKSUM_OFFLOAD; | ||
381 | else if (ip.v4->version == 6) | ||
382 | l3_type = IPV6_PKT; | ||
383 | else | ||
384 | l3_type = L3TYPE_UNKNOWN; | ||
385 | |||
386 | hinic_task_set_outter_l3(task, l3_type, | ||
387 | skb_network_header_len(skb)); | ||
388 | |||
389 | l4_tunnel_len = skb_inner_network_offset(skb) - | ||
390 | skb_transport_offset(skb); | ||
391 | |||
392 | hinic_task_set_tunnel_l4(task, TUNNEL_UDP_NO_CSUM, | ||
393 | l4_tunnel_len); | ||
394 | |||
395 | ip.hdr = skb_inner_network_header(skb); | ||
396 | l4.hdr = skb_inner_transport_header(skb); | ||
397 | network_hdr_len = skb_inner_network_header_len(skb); | ||
398 | } else { | ||
399 | ip.hdr = skb_network_header(skb); | ||
400 | l4.hdr = skb_transport_header(skb); | ||
401 | network_hdr_len = skb_network_header_len(skb); | ||
402 | } | ||
403 | |||
404 | get_inner_l3_l4_type(skb, &ip, &l4, TX_OFFLOAD_CSUM, &l3_type, | ||
405 | &l4_proto); | ||
406 | |||
407 | hinic_task_set_inner_l3(task, l3_type, network_hdr_len); | ||
408 | |||
409 | get_inner_l4_info(skb, &l4, TX_OFFLOAD_CSUM, l4_proto, &l4_offload, | ||
410 | &l4_len, &offset); | ||
411 | |||
412 | hinic_set_cs_inner_l4(task, queue_info, l4_offload, l4_len, offset); | ||
413 | |||
414 | return 1; | ||
415 | } | ||
416 | |||
417 | static int hinic_tx_offload(struct sk_buff *skb, struct hinic_sq_task *task, | ||
418 | u32 *queue_info) | ||
419 | { | ||
420 | enum hinic_offload_type offload = 0; | ||
421 | int enabled; | ||
422 | |||
423 | enabled = offload_tso(task, queue_info, skb); | ||
424 | if (enabled > 0) { | ||
425 | offload |= TX_OFFLOAD_TSO; | ||
426 | } else if (enabled == 0) { | ||
427 | enabled = offload_csum(task, queue_info, skb); | ||
428 | if (enabled) | ||
429 | offload |= TX_OFFLOAD_CSUM; | ||
430 | } else { | ||
431 | return -EPROTONOSUPPORT; | ||
432 | } | ||
433 | |||
434 | if (offload) | ||
435 | hinic_task_set_l2hdr(task, skb_network_offset(skb)); | ||
436 | |||
437 | /* payload offset should not more than 221 */ | ||
438 | if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_PLDOFF) > | ||
439 | MAX_PAYLOAD_OFFSET) { | ||
440 | return -EPROTONOSUPPORT; | ||
441 | } | ||
442 | |||
443 | /* mss should not less than 80 */ | ||
444 | if (HINIC_SQ_CTRL_GET(*queue_info, QUEUE_INFO_MSS) < HINIC_MSS_MIN) { | ||
445 | *queue_info = HINIC_SQ_CTRL_CLEAR(*queue_info, QUEUE_INFO_MSS); | ||
446 | *queue_info |= HINIC_SQ_CTRL_SET(HINIC_MSS_MIN, QUEUE_INFO_MSS); | ||
447 | } | ||
448 | |||
449 | return 0; | ||
450 | } | ||
451 | |||
178 | netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | 452 | netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) |
179 | { | 453 | { |
180 | struct hinic_dev *nic_dev = netdev_priv(netdev); | 454 | struct hinic_dev *nic_dev = netdev_priv(netdev); |
455 | u16 prod_idx, q_id = skb->queue_mapping; | ||
181 | struct netdev_queue *netdev_txq; | 456 | struct netdev_queue *netdev_txq; |
182 | int nr_sges, err = NETDEV_TX_OK; | 457 | int nr_sges, err = NETDEV_TX_OK; |
183 | struct hinic_sq_wqe *sq_wqe; | 458 | struct hinic_sq_wqe *sq_wqe; |
184 | unsigned int wqe_size; | 459 | unsigned int wqe_size; |
185 | struct hinic_txq *txq; | 460 | struct hinic_txq *txq; |
186 | struct hinic_qp *qp; | 461 | struct hinic_qp *qp; |
187 | u16 prod_idx; | ||
188 | 462 | ||
189 | txq = &nic_dev->txqs[skb->queue_mapping]; | 463 | txq = &nic_dev->txqs[q_id]; |
190 | qp = container_of(txq->sq, struct hinic_qp, sq); | 464 | qp = container_of(txq->sq, struct hinic_qp, sq); |
191 | 465 | ||
192 | if (skb->len < MIN_SKB_LEN) { | 466 | if (skb->len < MIN_SKB_LEN) { |
@@ -236,15 +510,23 @@ netdev_tx_t hinic_xmit_frame(struct sk_buff *skb, struct net_device *netdev) | |||
236 | process_sq_wqe: | 510 | process_sq_wqe: |
237 | hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); | 511 | hinic_sq_prepare_wqe(txq->sq, prod_idx, sq_wqe, txq->sges, nr_sges); |
238 | 512 | ||
513 | err = hinic_tx_offload(skb, &sq_wqe->task, &sq_wqe->ctrl.queue_info); | ||
514 | if (err) | ||
515 | goto offload_error; | ||
516 | |||
239 | hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); | 517 | hinic_sq_write_wqe(txq->sq, prod_idx, sq_wqe, skb, wqe_size); |
240 | 518 | ||
241 | flush_skbs: | 519 | flush_skbs: |
242 | netdev_txq = netdev_get_tx_queue(netdev, skb->queue_mapping); | 520 | netdev_txq = netdev_get_tx_queue(netdev, q_id); |
243 | if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) | 521 | if ((!skb->xmit_more) || (netif_xmit_stopped(netdev_txq))) |
244 | hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); | 522 | hinic_sq_write_db(txq->sq, prod_idx, wqe_size, 0); |
245 | 523 | ||
246 | return err; | 524 | return err; |
247 | 525 | ||
526 | offload_error: | ||
527 | hinic_sq_return_wqe(txq->sq, wqe_size); | ||
528 | tx_unmap_skb(nic_dev, skb, txq->sges); | ||
529 | |||
248 | skb_error: | 530 | skb_error: |
249 | dev_kfree_skb_any(skb); | 531 | dev_kfree_skb_any(skb); |
250 | 532 | ||
@@ -252,7 +534,8 @@ update_error_stats: | |||
252 | u64_stats_update_begin(&txq->txq_stats.syncp); | 534 | u64_stats_update_begin(&txq->txq_stats.syncp); |
253 | txq->txq_stats.tx_dropped++; | 535 | txq->txq_stats.tx_dropped++; |
254 | u64_stats_update_end(&txq->txq_stats.syncp); | 536 | u64_stats_update_end(&txq->txq_stats.syncp); |
255 | return err; | 537 | |
538 | return NETDEV_TX_OK; | ||
256 | } | 539 | } |
257 | 540 | ||
258 | /** | 541 | /** |