diff options
author | David S. Miller <davem@davemloft.net> | 2017-06-11 16:36:48 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-06-11 16:36:48 -0400 |
commit | 77a6bb5ac00dd48934dae0df4a24461cc7893d64 (patch) | |
tree | e1a45fb7ebec315e96979a45b78602e8230041b9 | |
parent | b87fa0fafef4b16495740432f4eb8262efa500d0 (diff) | |
parent | e7ff7efae5708513a795e329909ccbe2ac367b1a (diff) |
Merge branch 'ena-fixes'
Netanel Belgazal says:
====================
Bugs fixes in ena ethernet driver
This patchset contains fixes for the bugs that were discovered so far.
====================
Signed-off-by: David S. Miller <davem@davemloft.net>
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_com.c | 35 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_ethtool.c | 2 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.c | 179 | ||||
-rw-r--r-- | drivers/net/ethernet/amazon/ena/ena_netdev.h | 18 |
4 files changed, 169 insertions, 65 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 08d11cede9c9..f5b237e0bd60 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
@@ -61,6 +61,8 @@ | |||
61 | 61 | ||
62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF | 62 | #define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF |
63 | 63 | ||
64 | #define ENA_REGS_ADMIN_INTR_MASK 1 | ||
65 | |||
64 | /*****************************************************************************/ | 66 | /*****************************************************************************/ |
65 | /*****************************************************************************/ | 67 | /*****************************************************************************/ |
66 | /*****************************************************************************/ | 68 | /*****************************************************************************/ |
@@ -232,11 +234,9 @@ static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queu | |||
232 | tail_masked = admin_queue->sq.tail & queue_size_mask; | 234 | tail_masked = admin_queue->sq.tail & queue_size_mask; |
233 | 235 | ||
234 | /* In case of queue FULL */ | 236 | /* In case of queue FULL */ |
235 | cnt = admin_queue->sq.tail - admin_queue->sq.head; | 237 | cnt = atomic_read(&admin_queue->outstanding_cmds); |
236 | if (cnt >= admin_queue->q_depth) { | 238 | if (cnt >= admin_queue->q_depth) { |
237 | pr_debug("admin queue is FULL (tail %d head %d depth: %d)\n", | 239 | pr_debug("admin queue is full.\n"); |
238 | admin_queue->sq.tail, admin_queue->sq.head, | ||
239 | admin_queue->q_depth); | ||
240 | admin_queue->stats.out_of_space++; | 240 | admin_queue->stats.out_of_space++; |
241 | return ERR_PTR(-ENOSPC); | 241 | return ERR_PTR(-ENOSPC); |
242 | } | 242 | } |
@@ -508,15 +508,20 @@ static int ena_com_comp_status_to_errno(u8 comp_status) | |||
508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, | 508 | static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx, |
509 | struct ena_com_admin_queue *admin_queue) | 509 | struct ena_com_admin_queue *admin_queue) |
510 | { | 510 | { |
511 | unsigned long flags; | 511 | unsigned long flags, timeout; |
512 | u32 start_time; | ||
513 | int ret; | 512 | int ret; |
514 | 513 | ||
515 | start_time = ((u32)jiffies_to_usecs(jiffies)); | 514 | timeout = jiffies + ADMIN_CMD_TIMEOUT_US; |
515 | |||
516 | while (1) { | ||
517 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
518 | ena_com_handle_admin_completion(admin_queue); | ||
519 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
520 | |||
521 | if (comp_ctx->status != ENA_CMD_SUBMITTED) | ||
522 | break; | ||
516 | 523 | ||
517 | while (comp_ctx->status == ENA_CMD_SUBMITTED) { | 524 | if (time_is_before_jiffies(timeout)) { |
518 | if ((((u32)jiffies_to_usecs(jiffies)) - start_time) > | ||
519 | ADMIN_CMD_TIMEOUT_US) { | ||
520 | pr_err("Wait for completion (polling) timeout\n"); | 525 | pr_err("Wait for completion (polling) timeout\n"); |
521 | /* ENA didn't have any completion */ | 526 | /* ENA didn't have any completion */ |
522 | spin_lock_irqsave(&admin_queue->q_lock, flags); | 527 | spin_lock_irqsave(&admin_queue->q_lock, flags); |
@@ -528,10 +533,6 @@ static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_c | |||
528 | goto err; | 533 | goto err; |
529 | } | 534 | } |
530 | 535 | ||
531 | spin_lock_irqsave(&admin_queue->q_lock, flags); | ||
532 | ena_com_handle_admin_completion(admin_queue); | ||
533 | spin_unlock_irqrestore(&admin_queue->q_lock, flags); | ||
534 | |||
535 | msleep(100); | 536 | msleep(100); |
536 | } | 537 | } |
537 | 538 | ||
@@ -1455,6 +1456,12 @@ void ena_com_admin_destroy(struct ena_com_dev *ena_dev) | |||
1455 | 1456 | ||
1456 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) | 1457 | void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling) |
1457 | { | 1458 | { |
1459 | u32 mask_value = 0; | ||
1460 | |||
1461 | if (polling) | ||
1462 | mask_value = ENA_REGS_ADMIN_INTR_MASK; | ||
1463 | |||
1464 | writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF); | ||
1458 | ena_dev->admin_queue.polling = polling; | 1465 | ena_dev->admin_queue.polling = polling; |
1459 | } | 1466 | } |
1460 | 1467 | ||
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c index 67b2338f8fb3..3ee55e2fd694 100644 --- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c +++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c | |||
@@ -80,7 +80,6 @@ static const struct ena_stats ena_stats_tx_strings[] = { | |||
80 | ENA_STAT_TX_ENTRY(tx_poll), | 80 | ENA_STAT_TX_ENTRY(tx_poll), |
81 | ENA_STAT_TX_ENTRY(doorbells), | 81 | ENA_STAT_TX_ENTRY(doorbells), |
82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), | 82 | ENA_STAT_TX_ENTRY(prepare_ctx_err), |
83 | ENA_STAT_TX_ENTRY(missing_tx_comp), | ||
84 | ENA_STAT_TX_ENTRY(bad_req_id), | 83 | ENA_STAT_TX_ENTRY(bad_req_id), |
85 | }; | 84 | }; |
86 | 85 | ||
@@ -94,6 +93,7 @@ static const struct ena_stats ena_stats_rx_strings[] = { | |||
94 | ENA_STAT_RX_ENTRY(dma_mapping_err), | 93 | ENA_STAT_RX_ENTRY(dma_mapping_err), |
95 | ENA_STAT_RX_ENTRY(bad_desc_num), | 94 | ENA_STAT_RX_ENTRY(bad_desc_num), |
96 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), | 95 | ENA_STAT_RX_ENTRY(rx_copybreak_pkt), |
96 | ENA_STAT_RX_ENTRY(empty_rx_ring), | ||
97 | }; | 97 | }; |
98 | 98 | ||
99 | static const struct ena_stats ena_stats_ena_com_strings[] = { | 99 | static const struct ena_stats ena_stats_ena_com_strings[] = { |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index 7c1214d78855..4f16ed38bcf3 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -190,6 +190,7 @@ static void ena_init_io_rings(struct ena_adapter *adapter) | |||
190 | rxr->sgl_size = adapter->max_rx_sgl_size; | 190 | rxr->sgl_size = adapter->max_rx_sgl_size; |
191 | rxr->smoothed_interval = | 191 | rxr->smoothed_interval = |
192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); | 192 | ena_com_get_nonadaptive_moderation_interval_rx(ena_dev); |
193 | rxr->empty_rx_queue = 0; | ||
193 | } | 194 | } |
194 | } | 195 | } |
195 | 196 | ||
@@ -1078,6 +1079,26 @@ inline void ena_adjust_intr_moderation(struct ena_ring *rx_ring, | |||
1078 | rx_ring->per_napi_bytes = 0; | 1079 | rx_ring->per_napi_bytes = 0; |
1079 | } | 1080 | } |
1080 | 1081 | ||
1082 | static inline void ena_unmask_interrupt(struct ena_ring *tx_ring, | ||
1083 | struct ena_ring *rx_ring) | ||
1084 | { | ||
1085 | struct ena_eth_io_intr_reg intr_reg; | ||
1086 | |||
1087 | /* Update intr register: rx intr delay, | ||
1088 | * tx intr delay and interrupt unmask | ||
1089 | */ | ||
1090 | ena_com_update_intr_reg(&intr_reg, | ||
1091 | rx_ring->smoothed_interval, | ||
1092 | tx_ring->smoothed_interval, | ||
1093 | true); | ||
1094 | |||
1095 | /* It is a shared MSI-X. | ||
1096 | * Tx and Rx CQ have pointer to it. | ||
1097 | * So we use one of them to reach the intr reg | ||
1098 | */ | ||
1099 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
1100 | } | ||
1101 | |||
1081 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, | 1102 | static inline void ena_update_ring_numa_node(struct ena_ring *tx_ring, |
1082 | struct ena_ring *rx_ring) | 1103 | struct ena_ring *rx_ring) |
1083 | { | 1104 | { |
@@ -1108,7 +1129,6 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
1108 | { | 1129 | { |
1109 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); | 1130 | struct ena_napi *ena_napi = container_of(napi, struct ena_napi, napi); |
1110 | struct ena_ring *tx_ring, *rx_ring; | 1131 | struct ena_ring *tx_ring, *rx_ring; |
1111 | struct ena_eth_io_intr_reg intr_reg; | ||
1112 | 1132 | ||
1113 | u32 tx_work_done; | 1133 | u32 tx_work_done; |
1114 | u32 rx_work_done; | 1134 | u32 rx_work_done; |
@@ -1149,22 +1169,9 @@ static int ena_io_poll(struct napi_struct *napi, int budget) | |||
1149 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) | 1169 | if (ena_com_get_adaptive_moderation_enabled(rx_ring->ena_dev)) |
1150 | ena_adjust_intr_moderation(rx_ring, tx_ring); | 1170 | ena_adjust_intr_moderation(rx_ring, tx_ring); |
1151 | 1171 | ||
1152 | /* Update intr register: rx intr delay, | 1172 | ena_unmask_interrupt(tx_ring, rx_ring); |
1153 | * tx intr delay and interrupt unmask | ||
1154 | */ | ||
1155 | ena_com_update_intr_reg(&intr_reg, | ||
1156 | rx_ring->smoothed_interval, | ||
1157 | tx_ring->smoothed_interval, | ||
1158 | true); | ||
1159 | |||
1160 | /* It is a shared MSI-X. | ||
1161 | * Tx and Rx CQ have pointer to it. | ||
1162 | * So we use one of them to reach the intr reg | ||
1163 | */ | ||
1164 | ena_com_unmask_intr(rx_ring->ena_com_io_cq, &intr_reg); | ||
1165 | } | 1173 | } |
1166 | 1174 | ||
1167 | |||
1168 | ena_update_ring_numa_node(tx_ring, rx_ring); | 1175 | ena_update_ring_numa_node(tx_ring, rx_ring); |
1169 | 1176 | ||
1170 | ret = rx_work_done; | 1177 | ret = rx_work_done; |
@@ -1485,6 +1492,11 @@ static int ena_up_complete(struct ena_adapter *adapter) | |||
1485 | 1492 | ||
1486 | ena_napi_enable_all(adapter); | 1493 | ena_napi_enable_all(adapter); |
1487 | 1494 | ||
1495 | /* Enable completion queues interrupt */ | ||
1496 | for (i = 0; i < adapter->num_queues; i++) | ||
1497 | ena_unmask_interrupt(&adapter->tx_ring[i], | ||
1498 | &adapter->rx_ring[i]); | ||
1499 | |||
1488 | /* schedule napi in case we had pending packets | 1500 | /* schedule napi in case we had pending packets |
1489 | * from the last time we disable napi | 1501 | * from the last time we disable napi |
1490 | */ | 1502 | */ |
@@ -1532,6 +1544,7 @@ static int ena_create_io_tx_queue(struct ena_adapter *adapter, int qid) | |||
1532 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", | 1544 | "Failed to get TX queue handlers. TX queue num %d rc: %d\n", |
1533 | qid, rc); | 1545 | qid, rc); |
1534 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1546 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
1547 | return rc; | ||
1535 | } | 1548 | } |
1536 | 1549 | ||
1537 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); | 1550 | ena_com_update_numa_node(tx_ring->ena_com_io_cq, ctx.numa_node); |
@@ -1596,6 +1609,7 @@ static int ena_create_io_rx_queue(struct ena_adapter *adapter, int qid) | |||
1596 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", | 1609 | "Failed to get RX queue handlers. RX queue num %d rc: %d\n", |
1597 | qid, rc); | 1610 | qid, rc); |
1598 | ena_com_destroy_io_queue(ena_dev, ena_qid); | 1611 | ena_com_destroy_io_queue(ena_dev, ena_qid); |
1612 | return rc; | ||
1599 | } | 1613 | } |
1600 | 1614 | ||
1601 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); | 1615 | ena_com_update_numa_node(rx_ring->ena_com_io_cq, ctx.numa_node); |
@@ -1981,6 +1995,7 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
1981 | 1995 | ||
1982 | tx_info->tx_descs = nb_hw_desc; | 1996 | tx_info->tx_descs = nb_hw_desc; |
1983 | tx_info->last_jiffies = jiffies; | 1997 | tx_info->last_jiffies = jiffies; |
1998 | tx_info->print_once = 0; | ||
1984 | 1999 | ||
1985 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | 2000 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
1986 | tx_ring->ring_size); | 2001 | tx_ring->ring_size); |
@@ -2550,13 +2565,44 @@ err: | |||
2550 | "Reset attempt failed. Can not reset the device\n"); | 2565 | "Reset attempt failed. Can not reset the device\n"); |
2551 | } | 2566 | } |
2552 | 2567 | ||
2553 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | 2568 | static int check_missing_comp_in_queue(struct ena_adapter *adapter, |
2569 | struct ena_ring *tx_ring) | ||
2554 | { | 2570 | { |
2555 | struct ena_tx_buffer *tx_buf; | 2571 | struct ena_tx_buffer *tx_buf; |
2556 | unsigned long last_jiffies; | 2572 | unsigned long last_jiffies; |
2573 | u32 missed_tx = 0; | ||
2574 | int i; | ||
2575 | |||
2576 | for (i = 0; i < tx_ring->ring_size; i++) { | ||
2577 | tx_buf = &tx_ring->tx_buffer_info[i]; | ||
2578 | last_jiffies = tx_buf->last_jiffies; | ||
2579 | if (unlikely(last_jiffies && | ||
2580 | time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
2581 | if (!tx_buf->print_once) | ||
2582 | netif_notice(adapter, tx_err, adapter->netdev, | ||
2583 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
2584 | tx_ring->qid, i); | ||
2585 | |||
2586 | tx_buf->print_once = 1; | ||
2587 | missed_tx++; | ||
2588 | |||
2589 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
2590 | netif_err(adapter, tx_err, adapter->netdev, | ||
2591 | "The number of lost tx completions is above the threshold (%d > %d). Reset the device\n", | ||
2592 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
2593 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
2594 | return -EIO; | ||
2595 | } | ||
2596 | } | ||
2597 | } | ||
2598 | |||
2599 | return 0; | ||
2600 | } | ||
2601 | |||
2602 | static void check_for_missing_tx_completions(struct ena_adapter *adapter) | ||
2603 | { | ||
2557 | struct ena_ring *tx_ring; | 2604 | struct ena_ring *tx_ring; |
2558 | int i, j, budget; | 2605 | int i, budget, rc; |
2559 | u32 missed_tx; | ||
2560 | 2606 | ||
2561 | /* Make sure the driver doesn't turn the device in other process */ | 2607 | /* Make sure the driver doesn't turn the device in other process */ |
2562 | smp_rmb(); | 2608 | smp_rmb(); |
@@ -2572,31 +2618,9 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
2572 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { | 2618 | for (i = adapter->last_monitored_tx_qid; i < adapter->num_queues; i++) { |
2573 | tx_ring = &adapter->tx_ring[i]; | 2619 | tx_ring = &adapter->tx_ring[i]; |
2574 | 2620 | ||
2575 | for (j = 0; j < tx_ring->ring_size; j++) { | 2621 | rc = check_missing_comp_in_queue(adapter, tx_ring); |
2576 | tx_buf = &tx_ring->tx_buffer_info[j]; | 2622 | if (unlikely(rc)) |
2577 | last_jiffies = tx_buf->last_jiffies; | 2623 | return; |
2578 | if (unlikely(last_jiffies && time_is_before_jiffies(last_jiffies + TX_TIMEOUT))) { | ||
2579 | netif_notice(adapter, tx_err, adapter->netdev, | ||
2580 | "Found a Tx that wasn't completed on time, qid %d, index %d.\n", | ||
2581 | tx_ring->qid, j); | ||
2582 | |||
2583 | u64_stats_update_begin(&tx_ring->syncp); | ||
2584 | missed_tx = tx_ring->tx_stats.missing_tx_comp++; | ||
2585 | u64_stats_update_end(&tx_ring->syncp); | ||
2586 | |||
2587 | /* Clear last jiffies so the lost buffer won't | ||
2588 | * be counted twice. | ||
2589 | */ | ||
2590 | tx_buf->last_jiffies = 0; | ||
2591 | |||
2592 | if (unlikely(missed_tx > MAX_NUM_OF_TIMEOUTED_PACKETS)) { | ||
2593 | netif_err(adapter, tx_err, adapter->netdev, | ||
2594 | "The number of lost tx completion is above the threshold (%d > %d). Reset the device\n", | ||
2595 | missed_tx, MAX_NUM_OF_TIMEOUTED_PACKETS); | ||
2596 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
2597 | } | ||
2598 | } | ||
2599 | } | ||
2600 | 2624 | ||
2601 | budget--; | 2625 | budget--; |
2602 | if (!budget) | 2626 | if (!budget) |
@@ -2606,6 +2630,58 @@ static void check_for_missing_tx_completions(struct ena_adapter *adapter) | |||
2606 | adapter->last_monitored_tx_qid = i % adapter->num_queues; | 2630 | adapter->last_monitored_tx_qid = i % adapter->num_queues; |
2607 | } | 2631 | } |
2608 | 2632 | ||
2633 | /* trigger napi schedule after 2 consecutive detections */ | ||
2634 | #define EMPTY_RX_REFILL 2 | ||
2635 | /* For the rare case where the device runs out of Rx descriptors and the | ||
2636 | * napi handler failed to refill new Rx descriptors (due to a lack of memory | ||
2637 | * for example). | ||
2638 | * This case will lead to a deadlock: | ||
2639 | * The device won't send interrupts since all the new Rx packets will be dropped | ||
2640 | * The napi handler won't allocate new Rx descriptors so the device will be | ||
2641 | * able to send new packets. | ||
2642 | * | ||
2643 | * This scenario can happen when the kernel's vm.min_free_kbytes is too small. | ||
2644 | * It is recommended to have at least 512MB, with a minimum of 128MB for | ||
2645 | * constrained environment). | ||
2646 | * | ||
2647 | * When such a situation is detected - Reschedule napi | ||
2648 | */ | ||
2649 | static void check_for_empty_rx_ring(struct ena_adapter *adapter) | ||
2650 | { | ||
2651 | struct ena_ring *rx_ring; | ||
2652 | int i, refill_required; | ||
2653 | |||
2654 | if (!test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | ||
2655 | return; | ||
2656 | |||
2657 | if (test_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags)) | ||
2658 | return; | ||
2659 | |||
2660 | for (i = 0; i < adapter->num_queues; i++) { | ||
2661 | rx_ring = &adapter->rx_ring[i]; | ||
2662 | |||
2663 | refill_required = | ||
2664 | ena_com_sq_empty_space(rx_ring->ena_com_io_sq); | ||
2665 | if (unlikely(refill_required == (rx_ring->ring_size - 1))) { | ||
2666 | rx_ring->empty_rx_queue++; | ||
2667 | |||
2668 | if (rx_ring->empty_rx_queue >= EMPTY_RX_REFILL) { | ||
2669 | u64_stats_update_begin(&rx_ring->syncp); | ||
2670 | rx_ring->rx_stats.empty_rx_ring++; | ||
2671 | u64_stats_update_end(&rx_ring->syncp); | ||
2672 | |||
2673 | netif_err(adapter, drv, adapter->netdev, | ||
2674 | "trigger refill for ring %d\n", i); | ||
2675 | |||
2676 | napi_schedule(rx_ring->napi); | ||
2677 | rx_ring->empty_rx_queue = 0; | ||
2678 | } | ||
2679 | } else { | ||
2680 | rx_ring->empty_rx_queue = 0; | ||
2681 | } | ||
2682 | } | ||
2683 | } | ||
2684 | |||
2609 | /* Check for keep alive expiration */ | 2685 | /* Check for keep alive expiration */ |
2610 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) | 2686 | static void check_for_missing_keep_alive(struct ena_adapter *adapter) |
2611 | { | 2687 | { |
@@ -2660,6 +2736,8 @@ static void ena_timer_service(unsigned long data) | |||
2660 | 2736 | ||
2661 | check_for_missing_tx_completions(adapter); | 2737 | check_for_missing_tx_completions(adapter); |
2662 | 2738 | ||
2739 | check_for_empty_rx_ring(adapter); | ||
2740 | |||
2663 | if (debug_area) | 2741 | if (debug_area) |
2664 | ena_dump_stats_to_buf(adapter, debug_area); | 2742 | ena_dump_stats_to_buf(adapter, debug_area); |
2665 | 2743 | ||
@@ -2840,6 +2918,11 @@ static void ena_release_bars(struct ena_com_dev *ena_dev, struct pci_dev *pdev) | |||
2840 | { | 2918 | { |
2841 | int release_bars; | 2919 | int release_bars; |
2842 | 2920 | ||
2921 | if (ena_dev->mem_bar) | ||
2922 | devm_iounmap(&pdev->dev, ena_dev->mem_bar); | ||
2923 | |||
2924 | devm_iounmap(&pdev->dev, ena_dev->reg_bar); | ||
2925 | |||
2843 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; | 2926 | release_bars = pci_select_bars(pdev, IORESOURCE_MEM) & ENA_BAR_MASK; |
2844 | pci_release_selected_regions(pdev, release_bars); | 2927 | pci_release_selected_regions(pdev, release_bars); |
2845 | } | 2928 | } |
@@ -2927,8 +3010,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2927 | goto err_free_ena_dev; | 3010 | goto err_free_ena_dev; |
2928 | } | 3011 | } |
2929 | 3012 | ||
2930 | ena_dev->reg_bar = ioremap(pci_resource_start(pdev, ENA_REG_BAR), | 3013 | ena_dev->reg_bar = devm_ioremap(&pdev->dev, |
2931 | pci_resource_len(pdev, ENA_REG_BAR)); | 3014 | pci_resource_start(pdev, ENA_REG_BAR), |
3015 | pci_resource_len(pdev, ENA_REG_BAR)); | ||
2932 | if (!ena_dev->reg_bar) { | 3016 | if (!ena_dev->reg_bar) { |
2933 | dev_err(&pdev->dev, "failed to remap regs bar\n"); | 3017 | dev_err(&pdev->dev, "failed to remap regs bar\n"); |
2934 | rc = -EFAULT; | 3018 | rc = -EFAULT; |
@@ -2948,8 +3032,9 @@ static int ena_probe(struct pci_dev *pdev, const struct pci_device_id *ent) | |||
2948 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); | 3032 | ena_set_push_mode(pdev, ena_dev, &get_feat_ctx); |
2949 | 3033 | ||
2950 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { | 3034 | if (ena_dev->tx_mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) { |
2951 | ena_dev->mem_bar = ioremap_wc(pci_resource_start(pdev, ENA_MEM_BAR), | 3035 | ena_dev->mem_bar = devm_ioremap_wc(&pdev->dev, |
2952 | pci_resource_len(pdev, ENA_MEM_BAR)); | 3036 | pci_resource_start(pdev, ENA_MEM_BAR), |
3037 | pci_resource_len(pdev, ENA_MEM_BAR)); | ||
2953 | if (!ena_dev->mem_bar) { | 3038 | if (!ena_dev->mem_bar) { |
2954 | rc = -EFAULT; | 3039 | rc = -EFAULT; |
2955 | goto err_device_destroy; | 3040 | goto err_device_destroy; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index 0e22bce6239d..a4d3d5e21068 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -45,7 +45,7 @@ | |||
45 | 45 | ||
46 | #define DRV_MODULE_VER_MAJOR 1 | 46 | #define DRV_MODULE_VER_MAJOR 1 |
47 | #define DRV_MODULE_VER_MINOR 1 | 47 | #define DRV_MODULE_VER_MINOR 1 |
48 | #define DRV_MODULE_VER_SUBMINOR 2 | 48 | #define DRV_MODULE_VER_SUBMINOR 7 |
49 | 49 | ||
50 | #define DRV_MODULE_NAME "ena" | 50 | #define DRV_MODULE_NAME "ena" |
51 | #ifndef DRV_MODULE_VERSION | 51 | #ifndef DRV_MODULE_VERSION |
@@ -146,7 +146,18 @@ struct ena_tx_buffer { | |||
146 | u32 tx_descs; | 146 | u32 tx_descs; |
147 | /* num of buffers used by this skb */ | 147 | /* num of buffers used by this skb */ |
148 | u32 num_of_bufs; | 148 | u32 num_of_bufs; |
149 | /* Save the last jiffies to detect missing tx packets */ | 149 | |
150 | /* Used for detect missing tx packets to limit the number of prints */ | ||
151 | u32 print_once; | ||
152 | /* Save the last jiffies to detect missing tx packets | ||
153 | * | ||
154 | * sets to non zero value on ena_start_xmit and set to zero on | ||
155 | * napi and timer_Service_routine. | ||
156 | * | ||
157 | * while this value is not protected by lock, | ||
158 | * a given packet is not expected to be handled by ena_start_xmit | ||
159 | * and by napi/timer_service at the same time. | ||
160 | */ | ||
150 | unsigned long last_jiffies; | 161 | unsigned long last_jiffies; |
151 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; | 162 | struct ena_com_buf bufs[ENA_PKT_MAX_BUFS]; |
152 | } ____cacheline_aligned; | 163 | } ____cacheline_aligned; |
@@ -170,7 +181,6 @@ struct ena_stats_tx { | |||
170 | u64 napi_comp; | 181 | u64 napi_comp; |
171 | u64 tx_poll; | 182 | u64 tx_poll; |
172 | u64 doorbells; | 183 | u64 doorbells; |
173 | u64 missing_tx_comp; | ||
174 | u64 bad_req_id; | 184 | u64 bad_req_id; |
175 | }; | 185 | }; |
176 | 186 | ||
@@ -184,6 +194,7 @@ struct ena_stats_rx { | |||
184 | u64 dma_mapping_err; | 194 | u64 dma_mapping_err; |
185 | u64 bad_desc_num; | 195 | u64 bad_desc_num; |
186 | u64 rx_copybreak_pkt; | 196 | u64 rx_copybreak_pkt; |
197 | u64 empty_rx_ring; | ||
187 | }; | 198 | }; |
188 | 199 | ||
189 | struct ena_ring { | 200 | struct ena_ring { |
@@ -231,6 +242,7 @@ struct ena_ring { | |||
231 | struct ena_stats_tx tx_stats; | 242 | struct ena_stats_tx tx_stats; |
232 | struct ena_stats_rx rx_stats; | 243 | struct ena_stats_rx rx_stats; |
233 | }; | 244 | }; |
245 | int empty_rx_queue; | ||
234 | } ____cacheline_aligned; | 246 | } ____cacheline_aligned; |
235 | 247 | ||
236 | struct ena_stats_dev { | 248 | struct ena_stats_dev { |