aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net/ethernet/sfc/nic.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/ethernet/sfc/nic.c')
-rw-r--r--drivers/net/ethernet/sfc/nic.c90
1 files changed, 75 insertions, 15 deletions
diff --git a/drivers/net/ethernet/sfc/nic.c b/drivers/net/ethernet/sfc/nic.c
index 0ad790cc473c..f9f5df8b51fe 100644
--- a/drivers/net/ethernet/sfc/nic.c
+++ b/drivers/net/ethernet/sfc/nic.c
@@ -591,12 +591,22 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
591 struct efx_nic *efx = rx_queue->efx; 591 struct efx_nic *efx = rx_queue->efx;
592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0; 592 bool is_b0 = efx_nic_rev(efx) >= EFX_REV_FALCON_B0;
593 bool iscsi_digest_en = is_b0; 593 bool iscsi_digest_en = is_b0;
594 bool jumbo_en;
595
596 /* For kernel-mode queues in Falcon A1, the JUMBO flag enables
597 * DMA to continue after a PCIe page boundary (and scattering
598 * is not possible). In Falcon B0 and Siena, it enables
599 * scatter.
600 */
601 jumbo_en = !is_b0 || efx->rx_scatter;
594 602
595 netif_dbg(efx, hw, efx->net_dev, 603 netif_dbg(efx, hw, efx->net_dev,
596 "RX queue %d ring in special buffers %d-%d\n", 604 "RX queue %d ring in special buffers %d-%d\n",
597 efx_rx_queue_index(rx_queue), rx_queue->rxd.index, 605 efx_rx_queue_index(rx_queue), rx_queue->rxd.index,
598 rx_queue->rxd.index + rx_queue->rxd.entries - 1); 606 rx_queue->rxd.index + rx_queue->rxd.entries - 1);
599 607
608 rx_queue->scatter_n = 0;
609
600 /* Pin RX descriptor ring */ 610 /* Pin RX descriptor ring */
601 efx_init_special_buffer(efx, &rx_queue->rxd); 611 efx_init_special_buffer(efx, &rx_queue->rxd);
602 612
@@ -613,8 +623,7 @@ void efx_nic_init_rx(struct efx_rx_queue *rx_queue)
613 FRF_AZ_RX_DESCQ_SIZE, 623 FRF_AZ_RX_DESCQ_SIZE,
614 __ffs(rx_queue->rxd.entries), 624 __ffs(rx_queue->rxd.entries),
615 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ , 625 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
616 /* For >=B0 this is scatter so disable */ 626 FRF_AZ_RX_DESCQ_JUMBO, jumbo_en,
617 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
618 FRF_AZ_RX_DESCQ_EN, 1); 627 FRF_AZ_RX_DESCQ_EN, 1);
619 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 628 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
620 efx_rx_queue_index(rx_queue)); 629 efx_rx_queue_index(rx_queue));
@@ -968,13 +977,24 @@ static u16 efx_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
968 EFX_RX_PKT_DISCARD : 0; 977 EFX_RX_PKT_DISCARD : 0;
969} 978}
970 979
971/* Handle receive events that are not in-order. */ 980/* Handle receive events that are not in-order. Return true if this
972static void 981 * can be handled as a partial packet discard, false if it's more
982 * serious.
983 */
984static bool
973efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index) 985efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
974{ 986{
987 struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
975 struct efx_nic *efx = rx_queue->efx; 988 struct efx_nic *efx = rx_queue->efx;
976 unsigned expected, dropped; 989 unsigned expected, dropped;
977 990
991 if (rx_queue->scatter_n &&
992 index == ((rx_queue->removed_count + rx_queue->scatter_n - 1) &
993 rx_queue->ptr_mask)) {
994 ++channel->n_rx_nodesc_trunc;
995 return true;
996 }
997
978 expected = rx_queue->removed_count & rx_queue->ptr_mask; 998 expected = rx_queue->removed_count & rx_queue->ptr_mask;
979 dropped = (index - expected) & rx_queue->ptr_mask; 999 dropped = (index - expected) & rx_queue->ptr_mask;
980 netif_info(efx, rx_err, efx->net_dev, 1000 netif_info(efx, rx_err, efx->net_dev,
@@ -983,6 +1003,7 @@ efx_handle_rx_bad_index(struct efx_rx_queue *rx_queue, unsigned index)
983 1003
984 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ? 1004 efx_schedule_reset(efx, EFX_WORKAROUND_5676(efx) ?
985 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); 1005 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
1006 return false;
986} 1007}
987 1008
988/* Handle a packet received event 1009/* Handle a packet received event
@@ -998,7 +1019,7 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
998 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt; 1019 unsigned int rx_ev_desc_ptr, rx_ev_byte_cnt;
999 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt; 1020 unsigned int rx_ev_hdr_type, rx_ev_mcast_pkt;
1000 unsigned expected_ptr; 1021 unsigned expected_ptr;
1001 bool rx_ev_pkt_ok; 1022 bool rx_ev_pkt_ok, rx_ev_sop, rx_ev_cont;
1002 u16 flags; 1023 u16 flags;
1003 struct efx_rx_queue *rx_queue; 1024 struct efx_rx_queue *rx_queue;
1004 struct efx_nic *efx = channel->efx; 1025 struct efx_nic *efx = channel->efx;
@@ -1006,21 +1027,56 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1006 if (unlikely(ACCESS_ONCE(efx->reset_pending))) 1027 if (unlikely(ACCESS_ONCE(efx->reset_pending)))
1007 return; 1028 return;
1008 1029
1009 /* Basic packet information */ 1030 rx_ev_cont = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT);
1010 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT); 1031 rx_ev_sop = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP);
1011 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1012 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1013 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
1014 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
1015 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) != 1032 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
1016 channel->channel); 1033 channel->channel);
1017 1034
1018 rx_queue = efx_channel_get_rx_queue(channel); 1035 rx_queue = efx_channel_get_rx_queue(channel);
1019 1036
1020 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR); 1037 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
1021 expected_ptr = rx_queue->removed_count & rx_queue->ptr_mask; 1038 expected_ptr = ((rx_queue->removed_count + rx_queue->scatter_n) &
1022 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 1039 rx_queue->ptr_mask);
1023 efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 1040
1041 /* Check for partial drops and other errors */
1042 if (unlikely(rx_ev_desc_ptr != expected_ptr) ||
1043 unlikely(rx_ev_sop != (rx_queue->scatter_n == 0))) {
1044 if (rx_ev_desc_ptr != expected_ptr &&
1045 !efx_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr))
1046 return;
1047
1048 /* Discard all pending fragments */
1049 if (rx_queue->scatter_n) {
1050 efx_rx_packet(
1051 rx_queue,
1052 rx_queue->removed_count & rx_queue->ptr_mask,
1053 rx_queue->scatter_n, 0, EFX_RX_PKT_DISCARD);
1054 rx_queue->removed_count += rx_queue->scatter_n;
1055 rx_queue->scatter_n = 0;
1056 }
1057
1058 /* Return if there is no new fragment */
1059 if (rx_ev_desc_ptr != expected_ptr)
1060 return;
1061
1062 /* Discard new fragment if not SOP */
1063 if (!rx_ev_sop) {
1064 efx_rx_packet(
1065 rx_queue,
1066 rx_queue->removed_count & rx_queue->ptr_mask,
1067 1, 0, EFX_RX_PKT_DISCARD);
1068 ++rx_queue->removed_count;
1069 return;
1070 }
1071 }
1072
1073 ++rx_queue->scatter_n;
1074 if (rx_ev_cont)
1075 return;
1076
1077 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
1078 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
1079 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
1024 1080
1025 if (likely(rx_ev_pkt_ok)) { 1081 if (likely(rx_ev_pkt_ok)) {
1026 /* If packet is marked as OK and packet type is TCP/IP or 1082 /* If packet is marked as OK and packet type is TCP/IP or
@@ -1048,7 +1104,11 @@ efx_handle_rx_event(struct efx_channel *channel, const efx_qword_t *event)
1048 channel->irq_mod_score += 2; 1104 channel->irq_mod_score += 2;
1049 1105
1050 /* Handle received packet */ 1106 /* Handle received packet */
1051 efx_rx_packet(rx_queue, rx_ev_desc_ptr, rx_ev_byte_cnt, flags); 1107 efx_rx_packet(rx_queue,
1108 rx_queue->removed_count & rx_queue->ptr_mask,
1109 rx_queue->scatter_n, rx_ev_byte_cnt, flags);
1110 rx_queue->removed_count += rx_queue->scatter_n;
1111 rx_queue->scatter_n = 0;
1052} 1112}
1053 1113
1054/* If this flush done event corresponds to a &struct efx_tx_queue, then 1114/* If this flush done event corresponds to a &struct efx_tx_queue, then