diff options
Diffstat (limited to 'drivers/net')
29 files changed, 313 insertions, 252 deletions
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c index 17f12c18d225..7635c38e77dd 100644 --- a/drivers/net/ethernet/amazon/ena/ena_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_com.c | |||
@@ -459,12 +459,12 @@ static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_qu | |||
459 | cqe = &admin_queue->cq.entries[head_masked]; | 459 | cqe = &admin_queue->cq.entries[head_masked]; |
460 | 460 | ||
461 | /* Go over all the completions */ | 461 | /* Go over all the completions */ |
462 | while ((cqe->acq_common_descriptor.flags & | 462 | while ((READ_ONCE(cqe->acq_common_descriptor.flags) & |
463 | ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { | 463 | ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) { |
464 | /* Do not read the rest of the completion entry before the | 464 | /* Do not read the rest of the completion entry before the |
465 | * phase bit was validated | 465 | * phase bit was validated |
466 | */ | 466 | */ |
467 | rmb(); | 467 | dma_rmb(); |
468 | ena_com_handle_single_admin_completion(admin_queue, cqe); | 468 | ena_com_handle_single_admin_completion(admin_queue, cqe); |
469 | 469 | ||
470 | head_masked++; | 470 | head_masked++; |
@@ -627,17 +627,10 @@ static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset) | |||
627 | mmio_read_reg |= mmio_read->seq_num & | 627 | mmio_read_reg |= mmio_read->seq_num & |
628 | ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; | 628 | ENA_REGS_MMIO_REG_READ_REQ_ID_MASK; |
629 | 629 | ||
630 | /* make sure read_resp->req_id get updated before the hw can write | 630 | writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); |
631 | * there | ||
632 | */ | ||
633 | wmb(); | ||
634 | |||
635 | writel_relaxed(mmio_read_reg, | ||
636 | ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF); | ||
637 | 631 | ||
638 | mmiowb(); | ||
639 | for (i = 0; i < timeout; i++) { | 632 | for (i = 0; i < timeout; i++) { |
640 | if (read_resp->req_id == mmio_read->seq_num) | 633 | if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num) |
641 | break; | 634 | break; |
642 | 635 | ||
643 | udelay(1); | 636 | udelay(1); |
@@ -1796,8 +1789,13 @@ void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data) | |||
1796 | aenq_common = &aenq_e->aenq_common_desc; | 1789 | aenq_common = &aenq_e->aenq_common_desc; |
1797 | 1790 | ||
1798 | /* Go over all the events */ | 1791 | /* Go over all the events */ |
1799 | while ((aenq_common->flags & ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == | 1792 | while ((READ_ONCE(aenq_common->flags) & |
1800 | phase) { | 1793 | ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) { |
1794 | /* Make sure the phase bit (ownership) is as expected before | ||
1795 | * reading the rest of the descriptor. | ||
1796 | */ | ||
1797 | dma_rmb(); | ||
1798 | |||
1801 | pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", | 1799 | pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n", |
1802 | aenq_common->group, aenq_common->syndrom, | 1800 | aenq_common->group, aenq_common->syndrom, |
1803 | (u64)aenq_common->timestamp_low + | 1801 | (u64)aenq_common->timestamp_low + |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.c b/drivers/net/ethernet/amazon/ena/ena_eth_com.c index ea149c134e15..1c682b76190f 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.c +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.c | |||
@@ -51,6 +51,11 @@ static inline struct ena_eth_io_rx_cdesc_base *ena_com_get_next_rx_cdesc( | |||
51 | if (desc_phase != expected_phase) | 51 | if (desc_phase != expected_phase) |
52 | return NULL; | 52 | return NULL; |
53 | 53 | ||
54 | /* Make sure we read the rest of the descriptor after the phase bit | ||
55 | * has been read | ||
56 | */ | ||
57 | dma_rmb(); | ||
58 | |||
54 | return cdesc; | 59 | return cdesc; |
55 | } | 60 | } |
56 | 61 | ||
@@ -493,6 +498,7 @@ int ena_com_tx_comp_req_id_get(struct ena_com_io_cq *io_cq, u16 *req_id) | |||
493 | if (cdesc_phase != expected_phase) | 498 | if (cdesc_phase != expected_phase) |
494 | return -EAGAIN; | 499 | return -EAGAIN; |
495 | 500 | ||
501 | dma_rmb(); | ||
496 | if (unlikely(cdesc->req_id >= io_cq->q_depth)) { | 502 | if (unlikely(cdesc->req_id >= io_cq->q_depth)) { |
497 | pr_err("Invalid req id %d\n", cdesc->req_id); | 503 | pr_err("Invalid req id %d\n", cdesc->req_id); |
498 | return -EINVAL; | 504 | return -EINVAL; |
diff --git a/drivers/net/ethernet/amazon/ena/ena_eth_com.h b/drivers/net/ethernet/amazon/ena/ena_eth_com.h index 6fdc753d9483..2f7657227cfe 100644 --- a/drivers/net/ethernet/amazon/ena/ena_eth_com.h +++ b/drivers/net/ethernet/amazon/ena/ena_eth_com.h | |||
@@ -107,8 +107,7 @@ static inline int ena_com_sq_empty_space(struct ena_com_io_sq *io_sq) | |||
107 | return io_sq->q_depth - 1 - cnt; | 107 | return io_sq->q_depth - 1 - cnt; |
108 | } | 108 | } |
109 | 109 | ||
110 | static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, | 110 | static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq) |
111 | bool relaxed) | ||
112 | { | 111 | { |
113 | u16 tail; | 112 | u16 tail; |
114 | 113 | ||
@@ -117,10 +116,7 @@ static inline int ena_com_write_sq_doorbell(struct ena_com_io_sq *io_sq, | |||
117 | pr_debug("write submission queue doorbell for queue: %d tail: %d\n", | 116 | pr_debug("write submission queue doorbell for queue: %d tail: %d\n", |
118 | io_sq->qid, tail); | 117 | io_sq->qid, tail); |
119 | 118 | ||
120 | if (relaxed) | 119 | writel(tail, io_sq->db_addr); |
121 | writel_relaxed(tail, io_sq->db_addr); | ||
122 | else | ||
123 | writel(tail, io_sq->db_addr); | ||
124 | 120 | ||
125 | return 0; | 121 | return 0; |
126 | } | 122 | } |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c index c673ac2df65b..29b5774dd32d 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.c +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c | |||
@@ -76,7 +76,7 @@ MODULE_DEVICE_TABLE(pci, ena_pci_tbl); | |||
76 | 76 | ||
77 | static int ena_rss_init_default(struct ena_adapter *adapter); | 77 | static int ena_rss_init_default(struct ena_adapter *adapter); |
78 | static void check_for_admin_com_state(struct ena_adapter *adapter); | 78 | static void check_for_admin_com_state(struct ena_adapter *adapter); |
79 | static void ena_destroy_device(struct ena_adapter *adapter); | 79 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful); |
80 | static int ena_restore_device(struct ena_adapter *adapter); | 80 | static int ena_restore_device(struct ena_adapter *adapter); |
81 | 81 | ||
82 | static void ena_tx_timeout(struct net_device *dev) | 82 | static void ena_tx_timeout(struct net_device *dev) |
@@ -461,7 +461,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, | |||
461 | return -ENOMEM; | 461 | return -ENOMEM; |
462 | } | 462 | } |
463 | 463 | ||
464 | dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE, | 464 | dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE, |
465 | DMA_FROM_DEVICE); | 465 | DMA_FROM_DEVICE); |
466 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { | 466 | if (unlikely(dma_mapping_error(rx_ring->dev, dma))) { |
467 | u64_stats_update_begin(&rx_ring->syncp); | 467 | u64_stats_update_begin(&rx_ring->syncp); |
@@ -478,7 +478,7 @@ static inline int ena_alloc_rx_page(struct ena_ring *rx_ring, | |||
478 | rx_info->page_offset = 0; | 478 | rx_info->page_offset = 0; |
479 | ena_buf = &rx_info->ena_buf; | 479 | ena_buf = &rx_info->ena_buf; |
480 | ena_buf->paddr = dma; | 480 | ena_buf->paddr = dma; |
481 | ena_buf->len = PAGE_SIZE; | 481 | ena_buf->len = ENA_PAGE_SIZE; |
482 | 482 | ||
483 | return 0; | 483 | return 0; |
484 | } | 484 | } |
@@ -495,7 +495,7 @@ static void ena_free_rx_page(struct ena_ring *rx_ring, | |||
495 | return; | 495 | return; |
496 | } | 496 | } |
497 | 497 | ||
498 | dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE, | 498 | dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE, |
499 | DMA_FROM_DEVICE); | 499 | DMA_FROM_DEVICE); |
500 | 500 | ||
501 | __free_page(page); | 501 | __free_page(page); |
@@ -551,14 +551,9 @@ static int ena_refill_rx_bufs(struct ena_ring *rx_ring, u32 num) | |||
551 | rx_ring->qid, i, num); | 551 | rx_ring->qid, i, num); |
552 | } | 552 | } |
553 | 553 | ||
554 | if (likely(i)) { | 554 | /* ena_com_write_sq_doorbell issues a wmb() */ |
555 | /* Add memory barrier to make sure the desc were written before | 555 | if (likely(i)) |
556 | * issue a doorbell | 556 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq); |
557 | */ | ||
558 | wmb(); | ||
559 | ena_com_write_sq_doorbell(rx_ring->ena_com_io_sq, true); | ||
560 | mmiowb(); | ||
561 | } | ||
562 | 557 | ||
563 | rx_ring->next_to_use = next_to_use; | 558 | rx_ring->next_to_use = next_to_use; |
564 | 559 | ||
@@ -916,10 +911,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring, | |||
916 | do { | 911 | do { |
917 | dma_unmap_page(rx_ring->dev, | 912 | dma_unmap_page(rx_ring->dev, |
918 | dma_unmap_addr(&rx_info->ena_buf, paddr), | 913 | dma_unmap_addr(&rx_info->ena_buf, paddr), |
919 | PAGE_SIZE, DMA_FROM_DEVICE); | 914 | ENA_PAGE_SIZE, DMA_FROM_DEVICE); |
920 | 915 | ||
921 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, | 916 | skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page, |
922 | rx_info->page_offset, len, PAGE_SIZE); | 917 | rx_info->page_offset, len, ENA_PAGE_SIZE); |
923 | 918 | ||
924 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, | 919 | netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev, |
925 | "rx skb updated. len %d. data_len %d\n", | 920 | "rx skb updated. len %d. data_len %d\n", |
@@ -1900,7 +1895,7 @@ static int ena_close(struct net_device *netdev) | |||
1900 | "Destroy failure, restarting device\n"); | 1895 | "Destroy failure, restarting device\n"); |
1901 | ena_dump_stats_to_dmesg(adapter); | 1896 | ena_dump_stats_to_dmesg(adapter); |
1902 | /* rtnl lock already obtained in dev_ioctl() layer */ | 1897 | /* rtnl lock already obtained in dev_ioctl() layer */ |
1903 | ena_destroy_device(adapter); | 1898 | ena_destroy_device(adapter, false); |
1904 | ena_restore_device(adapter); | 1899 | ena_restore_device(adapter); |
1905 | } | 1900 | } |
1906 | 1901 | ||
@@ -2112,12 +2107,6 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2112 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, | 2107 | tx_ring->next_to_use = ENA_TX_RING_IDX_NEXT(next_to_use, |
2113 | tx_ring->ring_size); | 2108 | tx_ring->ring_size); |
2114 | 2109 | ||
2115 | /* This WMB is aimed to: | ||
2116 | * 1 - perform smp barrier before reading next_to_completion | ||
2117 | * 2 - make sure the desc were written before trigger DB | ||
2118 | */ | ||
2119 | wmb(); | ||
2120 | |||
2121 | /* stop the queue when no more space available, the packet can have up | 2110 | /* stop the queue when no more space available, the packet can have up |
2122 | * to sgl_size + 2. one for the meta descriptor and one for header | 2111 | * to sgl_size + 2. one for the meta descriptor and one for header |
2123 | * (if the header is larger than tx_max_header_size). | 2112 | * (if the header is larger than tx_max_header_size). |
@@ -2136,10 +2125,11 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2136 | * stop the queue but meanwhile clean_tx_irq updates | 2125 | * stop the queue but meanwhile clean_tx_irq updates |
2137 | * next_to_completion and terminates. | 2126 | * next_to_completion and terminates. |
2138 | * The queue will remain stopped forever. | 2127 | * The queue will remain stopped forever. |
2139 | * To solve this issue this function perform rmb, check | 2128 | * To solve this issue add a mb() to make sure that |
2140 | * the wakeup condition and wake up the queue if needed. | 2129 | * netif_tx_stop_queue() write is vissible before checking if |
2130 | * there is additional space in the queue. | ||
2141 | */ | 2131 | */ |
2142 | smp_rmb(); | 2132 | smp_mb(); |
2143 | 2133 | ||
2144 | if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) | 2134 | if (ena_com_sq_empty_space(tx_ring->ena_com_io_sq) |
2145 | > ENA_TX_WAKEUP_THRESH) { | 2135 | > ENA_TX_WAKEUP_THRESH) { |
@@ -2151,8 +2141,10 @@ static netdev_tx_t ena_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2151 | } | 2141 | } |
2152 | 2142 | ||
2153 | if (netif_xmit_stopped(txq) || !skb->xmit_more) { | 2143 | if (netif_xmit_stopped(txq) || !skb->xmit_more) { |
2154 | /* trigger the dma engine */ | 2144 | /* trigger the dma engine. ena_com_write_sq_doorbell() |
2155 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq, false); | 2145 | * has a mb |
2146 | */ | ||
2147 | ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq); | ||
2156 | u64_stats_update_begin(&tx_ring->syncp); | 2148 | u64_stats_update_begin(&tx_ring->syncp); |
2157 | tx_ring->tx_stats.doorbells++; | 2149 | tx_ring->tx_stats.doorbells++; |
2158 | u64_stats_update_end(&tx_ring->syncp); | 2150 | u64_stats_update_end(&tx_ring->syncp); |
@@ -2550,12 +2542,15 @@ err_disable_msix: | |||
2550 | return rc; | 2542 | return rc; |
2551 | } | 2543 | } |
2552 | 2544 | ||
2553 | static void ena_destroy_device(struct ena_adapter *adapter) | 2545 | static void ena_destroy_device(struct ena_adapter *adapter, bool graceful) |
2554 | { | 2546 | { |
2555 | struct net_device *netdev = adapter->netdev; | 2547 | struct net_device *netdev = adapter->netdev; |
2556 | struct ena_com_dev *ena_dev = adapter->ena_dev; | 2548 | struct ena_com_dev *ena_dev = adapter->ena_dev; |
2557 | bool dev_up; | 2549 | bool dev_up; |
2558 | 2550 | ||
2551 | if (!test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
2552 | return; | ||
2553 | |||
2559 | netif_carrier_off(netdev); | 2554 | netif_carrier_off(netdev); |
2560 | 2555 | ||
2561 | del_timer_sync(&adapter->timer_service); | 2556 | del_timer_sync(&adapter->timer_service); |
@@ -2563,7 +2558,8 @@ static void ena_destroy_device(struct ena_adapter *adapter) | |||
2563 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); | 2558 | dev_up = test_bit(ENA_FLAG_DEV_UP, &adapter->flags); |
2564 | adapter->dev_up_before_reset = dev_up; | 2559 | adapter->dev_up_before_reset = dev_up; |
2565 | 2560 | ||
2566 | ena_com_set_admin_running_state(ena_dev, false); | 2561 | if (!graceful) |
2562 | ena_com_set_admin_running_state(ena_dev, false); | ||
2567 | 2563 | ||
2568 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) | 2564 | if (test_bit(ENA_FLAG_DEV_UP, &adapter->flags)) |
2569 | ena_down(adapter); | 2565 | ena_down(adapter); |
@@ -2591,6 +2587,7 @@ static void ena_destroy_device(struct ena_adapter *adapter) | |||
2591 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; | 2587 | adapter->reset_reason = ENA_REGS_RESET_NORMAL; |
2592 | 2588 | ||
2593 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | 2589 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
2590 | clear_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | ||
2594 | } | 2591 | } |
2595 | 2592 | ||
2596 | static int ena_restore_device(struct ena_adapter *adapter) | 2593 | static int ena_restore_device(struct ena_adapter *adapter) |
@@ -2635,6 +2632,7 @@ static int ena_restore_device(struct ena_adapter *adapter) | |||
2635 | } | 2632 | } |
2636 | } | 2633 | } |
2637 | 2634 | ||
2635 | set_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags); | ||
2638 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); | 2636 | mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ)); |
2639 | dev_err(&pdev->dev, "Device reset completed successfully\n"); | 2637 | dev_err(&pdev->dev, "Device reset completed successfully\n"); |
2640 | 2638 | ||
@@ -2665,7 +2663,7 @@ static void ena_fw_reset_device(struct work_struct *work) | |||
2665 | return; | 2663 | return; |
2666 | } | 2664 | } |
2667 | rtnl_lock(); | 2665 | rtnl_lock(); |
2668 | ena_destroy_device(adapter); | 2666 | ena_destroy_device(adapter, false); |
2669 | ena_restore_device(adapter); | 2667 | ena_restore_device(adapter); |
2670 | rtnl_unlock(); | 2668 | rtnl_unlock(); |
2671 | } | 2669 | } |
@@ -3409,30 +3407,24 @@ static void ena_remove(struct pci_dev *pdev) | |||
3409 | netdev->rx_cpu_rmap = NULL; | 3407 | netdev->rx_cpu_rmap = NULL; |
3410 | } | 3408 | } |
3411 | #endif /* CONFIG_RFS_ACCEL */ | 3409 | #endif /* CONFIG_RFS_ACCEL */ |
3412 | |||
3413 | unregister_netdev(netdev); | ||
3414 | del_timer_sync(&adapter->timer_service); | 3410 | del_timer_sync(&adapter->timer_service); |
3415 | 3411 | ||
3416 | cancel_work_sync(&adapter->reset_task); | 3412 | cancel_work_sync(&adapter->reset_task); |
3417 | 3413 | ||
3418 | /* Reset the device only if the device is running. */ | 3414 | unregister_netdev(netdev); |
3419 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
3420 | ena_com_dev_reset(ena_dev, adapter->reset_reason); | ||
3421 | 3415 | ||
3422 | ena_free_mgmnt_irq(adapter); | 3416 | /* If the device is running then we want to make sure the device will be |
3417 | * reset to make sure no more events will be issued by the device. | ||
3418 | */ | ||
3419 | if (test_bit(ENA_FLAG_DEVICE_RUNNING, &adapter->flags)) | ||
3420 | set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | ||
3423 | 3421 | ||
3424 | ena_disable_msix(adapter); | 3422 | rtnl_lock(); |
3423 | ena_destroy_device(adapter, true); | ||
3424 | rtnl_unlock(); | ||
3425 | 3425 | ||
3426 | free_netdev(netdev); | 3426 | free_netdev(netdev); |
3427 | 3427 | ||
3428 | ena_com_mmio_reg_read_request_destroy(ena_dev); | ||
3429 | |||
3430 | ena_com_abort_admin_commands(ena_dev); | ||
3431 | |||
3432 | ena_com_wait_for_abort_completion(ena_dev); | ||
3433 | |||
3434 | ena_com_admin_destroy(ena_dev); | ||
3435 | |||
3436 | ena_com_rss_destroy(ena_dev); | 3428 | ena_com_rss_destroy(ena_dev); |
3437 | 3429 | ||
3438 | ena_com_delete_debug_area(ena_dev); | 3430 | ena_com_delete_debug_area(ena_dev); |
@@ -3467,7 +3459,7 @@ static int ena_suspend(struct pci_dev *pdev, pm_message_t state) | |||
3467 | "ignoring device reset request as the device is being suspended\n"); | 3459 | "ignoring device reset request as the device is being suspended\n"); |
3468 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); | 3460 | clear_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags); |
3469 | } | 3461 | } |
3470 | ena_destroy_device(adapter); | 3462 | ena_destroy_device(adapter, true); |
3471 | rtnl_unlock(); | 3463 | rtnl_unlock(); |
3472 | return 0; | 3464 | return 0; |
3473 | } | 3465 | } |
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h index f1972b5ab650..7c7ae56c52cf 100644 --- a/drivers/net/ethernet/amazon/ena/ena_netdev.h +++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h | |||
@@ -355,4 +355,15 @@ void ena_dump_stats_to_buf(struct ena_adapter *adapter, u8 *buf); | |||
355 | 355 | ||
356 | int ena_get_sset_count(struct net_device *netdev, int sset); | 356 | int ena_get_sset_count(struct net_device *netdev, int sset); |
357 | 357 | ||
358 | /* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the | ||
359 | * driver passas 0. | ||
360 | * Since the max packet size the ENA handles is ~9kB limit the buffer length to | ||
361 | * 16kB. | ||
362 | */ | ||
363 | #if PAGE_SIZE > SZ_16K | ||
364 | #define ENA_PAGE_SIZE SZ_16K | ||
365 | #else | ||
366 | #define ENA_PAGE_SIZE PAGE_SIZE | ||
367 | #endif | ||
368 | |||
358 | #endif /* !(ENA_H) */ | 369 | #endif /* !(ENA_H) */ |
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.c b/drivers/net/ethernet/emulex/benet/be_cmds.c index ff92ab1daeb8..1e9d882c04ef 100644 --- a/drivers/net/ethernet/emulex/benet/be_cmds.c +++ b/drivers/net/ethernet/emulex/benet/be_cmds.c | |||
@@ -4500,7 +4500,7 @@ int be_cmd_get_profile_config(struct be_adapter *adapter, | |||
4500 | port_res->max_vfs += le16_to_cpu(pcie->num_vfs); | 4500 | port_res->max_vfs += le16_to_cpu(pcie->num_vfs); |
4501 | } | 4501 | } |
4502 | } | 4502 | } |
4503 | return status; | 4503 | goto err; |
4504 | } | 4504 | } |
4505 | 4505 | ||
4506 | pcie = be_get_pcie_desc(resp->func_param, desc_count, | 4506 | pcie = be_get_pcie_desc(resp->func_param, desc_count, |
diff --git a/drivers/net/ethernet/lantiq_etop.c b/drivers/net/ethernet/lantiq_etop.c index 7a637b51c7d2..e08301d833e2 100644 --- a/drivers/net/ethernet/lantiq_etop.c +++ b/drivers/net/ethernet/lantiq_etop.c | |||
@@ -274,6 +274,7 @@ ltq_etop_hw_init(struct net_device *dev) | |||
274 | struct ltq_etop_chan *ch = &priv->ch[i]; | 274 | struct ltq_etop_chan *ch = &priv->ch[i]; |
275 | 275 | ||
276 | ch->idx = ch->dma.nr = i; | 276 | ch->idx = ch->dma.nr = i; |
277 | ch->dma.dev = &priv->pdev->dev; | ||
277 | 278 | ||
278 | if (IS_TX(i)) { | 279 | if (IS_TX(i)) { |
279 | ltq_dma_alloc_tx(&ch->dma); | 280 | ltq_dma_alloc_tx(&ch->dma); |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c index b994b80d5714..37ba7c78859d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c | |||
@@ -132,11 +132,11 @@ void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv) | |||
132 | delayed_event_start(priv); | 132 | delayed_event_start(priv); |
133 | 133 | ||
134 | dev_ctx->context = intf->add(dev); | 134 | dev_ctx->context = intf->add(dev); |
135 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | ||
136 | if (intf->attach) | ||
137 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | ||
138 | |||
139 | if (dev_ctx->context) { | 135 | if (dev_ctx->context) { |
136 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | ||
137 | if (intf->attach) | ||
138 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | ||
139 | |||
140 | spin_lock_irq(&priv->ctx_lock); | 140 | spin_lock_irq(&priv->ctx_lock); |
141 | list_add_tail(&dev_ctx->list, &priv->ctx_list); | 141 | list_add_tail(&dev_ctx->list, &priv->ctx_list); |
142 | 142 | ||
@@ -211,12 +211,17 @@ static void mlx5_attach_interface(struct mlx5_interface *intf, struct mlx5_priv | |||
211 | if (intf->attach) { | 211 | if (intf->attach) { |
212 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) | 212 | if (test_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state)) |
213 | goto out; | 213 | goto out; |
214 | intf->attach(dev, dev_ctx->context); | 214 | if (intf->attach(dev, dev_ctx->context)) |
215 | goto out; | ||
216 | |||
215 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); | 217 | set_bit(MLX5_INTERFACE_ATTACHED, &dev_ctx->state); |
216 | } else { | 218 | } else { |
217 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) | 219 | if (test_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state)) |
218 | goto out; | 220 | goto out; |
219 | dev_ctx->context = intf->add(dev); | 221 | dev_ctx->context = intf->add(dev); |
222 | if (!dev_ctx->context) | ||
223 | goto out; | ||
224 | |||
220 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); | 225 | set_bit(MLX5_INTERFACE_ADDED, &dev_ctx->state); |
221 | } | 226 | } |
222 | 227 | ||
@@ -391,16 +396,17 @@ void mlx5_remove_dev_by_protocol(struct mlx5_core_dev *dev, int protocol) | |||
391 | } | 396 | } |
392 | } | 397 | } |
393 | 398 | ||
394 | static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev) | 399 | static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev) |
395 | { | 400 | { |
396 | return (u16)((dev->pdev->bus->number << 8) | | 401 | return (u32)((pci_domain_nr(dev->pdev->bus) << 16) | |
402 | (dev->pdev->bus->number << 8) | | ||
397 | PCI_SLOT(dev->pdev->devfn)); | 403 | PCI_SLOT(dev->pdev->devfn)); |
398 | } | 404 | } |
399 | 405 | ||
400 | /* Must be called with intf_mutex held */ | 406 | /* Must be called with intf_mutex held */ |
401 | struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) | 407 | struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev) |
402 | { | 408 | { |
403 | u16 pci_id = mlx5_gen_pci_id(dev); | 409 | u32 pci_id = mlx5_gen_pci_id(dev); |
404 | struct mlx5_core_dev *res = NULL; | 410 | struct mlx5_core_dev *res = NULL; |
405 | struct mlx5_core_dev *tmp_dev; | 411 | struct mlx5_core_dev *tmp_dev; |
406 | struct mlx5_priv *priv; | 412 | struct mlx5_priv *priv; |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c index 75bb981e00b7..41cde926cdab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_fs_ethtool.c | |||
@@ -191,7 +191,7 @@ set_udp(void *headers_c, void *headers_v, __be16 psrc_m, __be16 psrc_v, | |||
191 | { | 191 | { |
192 | if (psrc_m) { | 192 | if (psrc_m) { |
193 | MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); | 193 | MLX5E_FTE_SET(headers_c, udp_sport, 0xffff); |
194 | MLX5E_FTE_SET(headers_c, udp_sport, ntohs(psrc_v)); | 194 | MLX5E_FTE_SET(headers_v, udp_sport, ntohs(psrc_v)); |
195 | } | 195 | } |
196 | 196 | ||
197 | if (pdst_m) { | 197 | if (pdst_m) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index f72b5c9dcfe9..3028e8d90920 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c | |||
@@ -663,6 +663,7 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw, int nvports) | |||
663 | if (err) | 663 | if (err) |
664 | goto miss_rule_err; | 664 | goto miss_rule_err; |
665 | 665 | ||
666 | kvfree(flow_group_in); | ||
666 | return 0; | 667 | return 0; |
667 | 668 | ||
668 | miss_rule_err: | 669 | miss_rule_err: |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index f418541af7cf..37d114c668b7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c | |||
@@ -1578,6 +1578,33 @@ static u64 matched_fgs_get_version(struct list_head *match_head) | |||
1578 | return version; | 1578 | return version; |
1579 | } | 1579 | } |
1580 | 1580 | ||
1581 | static struct fs_fte * | ||
1582 | lookup_fte_locked(struct mlx5_flow_group *g, | ||
1583 | u32 *match_value, | ||
1584 | bool take_write) | ||
1585 | { | ||
1586 | struct fs_fte *fte_tmp; | ||
1587 | |||
1588 | if (take_write) | ||
1589 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | ||
1590 | else | ||
1591 | nested_down_read_ref_node(&g->node, FS_LOCK_PARENT); | ||
1592 | fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, match_value, | ||
1593 | rhash_fte); | ||
1594 | if (!fte_tmp || !tree_get_node(&fte_tmp->node)) { | ||
1595 | fte_tmp = NULL; | ||
1596 | goto out; | ||
1597 | } | ||
1598 | |||
1599 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | ||
1600 | out: | ||
1601 | if (take_write) | ||
1602 | up_write_ref_node(&g->node); | ||
1603 | else | ||
1604 | up_read_ref_node(&g->node); | ||
1605 | return fte_tmp; | ||
1606 | } | ||
1607 | |||
1581 | static struct mlx5_flow_handle * | 1608 | static struct mlx5_flow_handle * |
1582 | try_add_to_existing_fg(struct mlx5_flow_table *ft, | 1609 | try_add_to_existing_fg(struct mlx5_flow_table *ft, |
1583 | struct list_head *match_head, | 1610 | struct list_head *match_head, |
@@ -1600,10 +1627,6 @@ try_add_to_existing_fg(struct mlx5_flow_table *ft, | |||
1600 | if (IS_ERR(fte)) | 1627 | if (IS_ERR(fte)) |
1601 | return ERR_PTR(-ENOMEM); | 1628 | return ERR_PTR(-ENOMEM); |
1602 | 1629 | ||
1603 | list_for_each_entry(iter, match_head, list) { | ||
1604 | nested_down_read_ref_node(&iter->g->node, FS_LOCK_PARENT); | ||
1605 | } | ||
1606 | |||
1607 | search_again_locked: | 1630 | search_again_locked: |
1608 | version = matched_fgs_get_version(match_head); | 1631 | version = matched_fgs_get_version(match_head); |
1609 | /* Try to find a fg that already contains a matching fte */ | 1632 | /* Try to find a fg that already contains a matching fte */ |
@@ -1611,20 +1634,9 @@ search_again_locked: | |||
1611 | struct fs_fte *fte_tmp; | 1634 | struct fs_fte *fte_tmp; |
1612 | 1635 | ||
1613 | g = iter->g; | 1636 | g = iter->g; |
1614 | fte_tmp = rhashtable_lookup_fast(&g->ftes_hash, spec->match_value, | 1637 | fte_tmp = lookup_fte_locked(g, spec->match_value, take_write); |
1615 | rhash_fte); | 1638 | if (!fte_tmp) |
1616 | if (!fte_tmp || !tree_get_node(&fte_tmp->node)) | ||
1617 | continue; | 1639 | continue; |
1618 | |||
1619 | nested_down_write_ref_node(&fte_tmp->node, FS_LOCK_CHILD); | ||
1620 | if (!take_write) { | ||
1621 | list_for_each_entry(iter, match_head, list) | ||
1622 | up_read_ref_node(&iter->g->node); | ||
1623 | } else { | ||
1624 | list_for_each_entry(iter, match_head, list) | ||
1625 | up_write_ref_node(&iter->g->node); | ||
1626 | } | ||
1627 | |||
1628 | rule = add_rule_fg(g, spec->match_value, | 1640 | rule = add_rule_fg(g, spec->match_value, |
1629 | flow_act, dest, dest_num, fte_tmp); | 1641 | flow_act, dest, dest_num, fte_tmp); |
1630 | up_write_ref_node(&fte_tmp->node); | 1642 | up_write_ref_node(&fte_tmp->node); |
@@ -1633,19 +1645,6 @@ search_again_locked: | |||
1633 | return rule; | 1645 | return rule; |
1634 | } | 1646 | } |
1635 | 1647 | ||
1636 | /* No group with matching fte found. Try to add a new fte to any | ||
1637 | * matching fg. | ||
1638 | */ | ||
1639 | |||
1640 | if (!take_write) { | ||
1641 | list_for_each_entry(iter, match_head, list) | ||
1642 | up_read_ref_node(&iter->g->node); | ||
1643 | list_for_each_entry(iter, match_head, list) | ||
1644 | nested_down_write_ref_node(&iter->g->node, | ||
1645 | FS_LOCK_PARENT); | ||
1646 | take_write = true; | ||
1647 | } | ||
1648 | |||
1649 | /* Check the ft version, for case that new flow group | 1648 | /* Check the ft version, for case that new flow group |
1650 | * was added while the fgs weren't locked | 1649 | * was added while the fgs weren't locked |
1651 | */ | 1650 | */ |
@@ -1657,27 +1656,30 @@ search_again_locked: | |||
1657 | /* Check the fgs version, for case the new FTE with the | 1656 | /* Check the fgs version, for case the new FTE with the |
1658 | * same values was added while the fgs weren't locked | 1657 | * same values was added while the fgs weren't locked |
1659 | */ | 1658 | */ |
1660 | if (version != matched_fgs_get_version(match_head)) | 1659 | if (version != matched_fgs_get_version(match_head)) { |
1660 | take_write = true; | ||
1661 | goto search_again_locked; | 1661 | goto search_again_locked; |
1662 | } | ||
1662 | 1663 | ||
1663 | list_for_each_entry(iter, match_head, list) { | 1664 | list_for_each_entry(iter, match_head, list) { |
1664 | g = iter->g; | 1665 | g = iter->g; |
1665 | 1666 | ||
1666 | if (!g->node.active) | 1667 | if (!g->node.active) |
1667 | continue; | 1668 | continue; |
1669 | |||
1670 | nested_down_write_ref_node(&g->node, FS_LOCK_PARENT); | ||
1671 | |||
1668 | err = insert_fte(g, fte); | 1672 | err = insert_fte(g, fte); |
1669 | if (err) { | 1673 | if (err) { |
1674 | up_write_ref_node(&g->node); | ||
1670 | if (err == -ENOSPC) | 1675 | if (err == -ENOSPC) |
1671 | continue; | 1676 | continue; |
1672 | list_for_each_entry(iter, match_head, list) | ||
1673 | up_write_ref_node(&iter->g->node); | ||
1674 | kmem_cache_free(steering->ftes_cache, fte); | 1677 | kmem_cache_free(steering->ftes_cache, fte); |
1675 | return ERR_PTR(err); | 1678 | return ERR_PTR(err); |
1676 | } | 1679 | } |
1677 | 1680 | ||
1678 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); | 1681 | nested_down_write_ref_node(&fte->node, FS_LOCK_CHILD); |
1679 | list_for_each_entry(iter, match_head, list) | 1682 | up_write_ref_node(&g->node); |
1680 | up_write_ref_node(&iter->g->node); | ||
1681 | rule = add_rule_fg(g, spec->match_value, | 1683 | rule = add_rule_fg(g, spec->match_value, |
1682 | flow_act, dest, dest_num, fte); | 1684 | flow_act, dest, dest_num, fte); |
1683 | up_write_ref_node(&fte->node); | 1685 | up_write_ref_node(&fte->node); |
@@ -1686,8 +1688,6 @@ search_again_locked: | |||
1686 | } | 1688 | } |
1687 | rule = ERR_PTR(-ENOENT); | 1689 | rule = ERR_PTR(-ENOENT); |
1688 | out: | 1690 | out: |
1689 | list_for_each_entry(iter, match_head, list) | ||
1690 | up_write_ref_node(&iter->g->node); | ||
1691 | kmem_cache_free(steering->ftes_cache, fte); | 1691 | kmem_cache_free(steering->ftes_cache, fte); |
1692 | return rule; | 1692 | return rule; |
1693 | } | 1693 | } |
@@ -1726,6 +1726,8 @@ search_again_locked: | |||
1726 | if (err) { | 1726 | if (err) { |
1727 | if (take_write) | 1727 | if (take_write) |
1728 | up_write_ref_node(&ft->node); | 1728 | up_write_ref_node(&ft->node); |
1729 | else | ||
1730 | up_read_ref_node(&ft->node); | ||
1729 | return ERR_PTR(err); | 1731 | return ERR_PTR(err); |
1730 | } | 1732 | } |
1731 | 1733 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/health.c b/drivers/net/ethernet/mellanox/mlx5/core/health.c index d39b0b7011b2..9f39aeca863f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/health.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/health.c | |||
@@ -331,9 +331,17 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev) | |||
331 | add_timer(&health->timer); | 331 | add_timer(&health->timer); |
332 | } | 332 | } |
333 | 333 | ||
334 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev) | 334 | void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health) |
335 | { | 335 | { |
336 | struct mlx5_core_health *health = &dev->priv.health; | 336 | struct mlx5_core_health *health = &dev->priv.health; |
337 | unsigned long flags; | ||
338 | |||
339 | if (disable_health) { | ||
340 | spin_lock_irqsave(&health->wq_lock, flags); | ||
341 | set_bit(MLX5_DROP_NEW_HEALTH_WORK, &health->flags); | ||
342 | set_bit(MLX5_DROP_NEW_RECOVERY_WORK, &health->flags); | ||
343 | spin_unlock_irqrestore(&health->wq_lock, flags); | ||
344 | } | ||
337 | 345 | ||
338 | del_timer_sync(&health->timer); | 346 | del_timer_sync(&health->timer); |
339 | } | 347 | } |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c index cf3e4a659052..b5e9f664fc66 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c | |||
@@ -878,8 +878,10 @@ static int mlx5_pci_init(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
878 | priv->numa_node = dev_to_node(&dev->pdev->dev); | 878 | priv->numa_node = dev_to_node(&dev->pdev->dev); |
879 | 879 | ||
880 | priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); | 880 | priv->dbg_root = debugfs_create_dir(dev_name(&pdev->dev), mlx5_debugfs_root); |
881 | if (!priv->dbg_root) | 881 | if (!priv->dbg_root) { |
882 | dev_err(&pdev->dev, "Cannot create debugfs dir, aborting\n"); | ||
882 | return -ENOMEM; | 883 | return -ENOMEM; |
884 | } | ||
883 | 885 | ||
884 | err = mlx5_pci_enable_device(dev); | 886 | err = mlx5_pci_enable_device(dev); |
885 | if (err) { | 887 | if (err) { |
@@ -928,7 +930,7 @@ static void mlx5_pci_close(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | |||
928 | pci_clear_master(dev->pdev); | 930 | pci_clear_master(dev->pdev); |
929 | release_bar(dev->pdev); | 931 | release_bar(dev->pdev); |
930 | mlx5_pci_disable_device(dev); | 932 | mlx5_pci_disable_device(dev); |
931 | debugfs_remove(priv->dbg_root); | 933 | debugfs_remove_recursive(priv->dbg_root); |
932 | } | 934 | } |
933 | 935 | ||
934 | static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) | 936 | static int mlx5_init_once(struct mlx5_core_dev *dev, struct mlx5_priv *priv) |
@@ -1286,7 +1288,7 @@ err_cleanup_once: | |||
1286 | mlx5_cleanup_once(dev); | 1288 | mlx5_cleanup_once(dev); |
1287 | 1289 | ||
1288 | err_stop_poll: | 1290 | err_stop_poll: |
1289 | mlx5_stop_health_poll(dev); | 1291 | mlx5_stop_health_poll(dev, boot); |
1290 | if (mlx5_cmd_teardown_hca(dev)) { | 1292 | if (mlx5_cmd_teardown_hca(dev)) { |
1291 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); | 1293 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); |
1292 | goto out_err; | 1294 | goto out_err; |
@@ -1346,7 +1348,7 @@ static int mlx5_unload_one(struct mlx5_core_dev *dev, struct mlx5_priv *priv, | |||
1346 | mlx5_free_irq_vectors(dev); | 1348 | mlx5_free_irq_vectors(dev); |
1347 | if (cleanup) | 1349 | if (cleanup) |
1348 | mlx5_cleanup_once(dev); | 1350 | mlx5_cleanup_once(dev); |
1349 | mlx5_stop_health_poll(dev); | 1351 | mlx5_stop_health_poll(dev, cleanup); |
1350 | err = mlx5_cmd_teardown_hca(dev); | 1352 | err = mlx5_cmd_teardown_hca(dev); |
1351 | if (err) { | 1353 | if (err) { |
1352 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); | 1354 | dev_err(&dev->pdev->dev, "tear_down_hca failed, skip cleanup\n"); |
@@ -1608,7 +1610,7 @@ static int mlx5_try_fast_unload(struct mlx5_core_dev *dev) | |||
1608 | * with the HCA, so the health polll is no longer needed. | 1610 | * with the HCA, so the health polll is no longer needed. |
1609 | */ | 1611 | */ |
1610 | mlx5_drain_health_wq(dev); | 1612 | mlx5_drain_health_wq(dev); |
1611 | mlx5_stop_health_poll(dev); | 1613 | mlx5_stop_health_poll(dev, false); |
1612 | 1614 | ||
1613 | ret = mlx5_cmd_force_teardown_hca(dev); | 1615 | ret = mlx5_cmd_force_teardown_hca(dev); |
1614 | if (ret) { | 1616 | if (ret) { |
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c index c8c315eb5128..68e7f8df2a6d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c | |||
@@ -39,9 +39,9 @@ u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq) | |||
39 | return (u32)wq->fbc.sz_m1 + 1; | 39 | return (u32)wq->fbc.sz_m1 + 1; |
40 | } | 40 | } |
41 | 41 | ||
42 | u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) | 42 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq) |
43 | { | 43 | { |
44 | return (u32)wq->fbc.frag_sz_m1 + 1; | 44 | return wq->fbc.frag_sz_m1 + 1; |
45 | } | 45 | } |
46 | 46 | ||
47 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) | 47 | u32 mlx5_cqwq_get_size(struct mlx5_cqwq *wq) |
@@ -138,7 +138,7 @@ int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
138 | void *qpc, struct mlx5_wq_qp *wq, | 138 | void *qpc, struct mlx5_wq_qp *wq, |
139 | struct mlx5_wq_ctrl *wq_ctrl) | 139 | struct mlx5_wq_ctrl *wq_ctrl) |
140 | { | 140 | { |
141 | u32 sq_strides_offset; | 141 | u16 sq_strides_offset; |
142 | u32 rq_pg_remainder; | 142 | u32 rq_pg_remainder; |
143 | int err; | 143 | int err; |
144 | 144 | ||
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h index 2bd4c3184eba..3a1a170bb2d7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h | |||
@@ -80,7 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | |||
80 | void *wqc, struct mlx5_wq_cyc *wq, | 80 | void *wqc, struct mlx5_wq_cyc *wq, |
81 | struct mlx5_wq_ctrl *wq_ctrl); | 81 | struct mlx5_wq_ctrl *wq_ctrl); |
82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); | 82 | u32 mlx5_wq_cyc_get_size(struct mlx5_wq_cyc *wq); |
83 | u32 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); | 83 | u16 mlx5_wq_cyc_get_frag_size(struct mlx5_wq_cyc *wq); |
84 | 84 | ||
85 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, | 85 | int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param, |
86 | void *qpc, struct mlx5_wq_qp *wq, | 86 | void *qpc, struct mlx5_wq_qp *wq, |
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c index 4327487553c5..3589432d1643 100644 --- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c +++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_buffers.c | |||
@@ -337,14 +337,14 @@ static const struct mlxsw_sp_sb_cm mlxsw_sp_sb_cms_egress[] = { | |||
337 | MLXSW_SP_SB_CM(1500, 9, 0), | 337 | MLXSW_SP_SB_CM(1500, 9, 0), |
338 | MLXSW_SP_SB_CM(1500, 9, 0), | 338 | MLXSW_SP_SB_CM(1500, 9, 0), |
339 | MLXSW_SP_SB_CM(1500, 9, 0), | 339 | MLXSW_SP_SB_CM(1500, 9, 0), |
340 | MLXSW_SP_SB_CM(0, 0, 0), | 340 | MLXSW_SP_SB_CM(0, 140000, 15), |
341 | MLXSW_SP_SB_CM(0, 0, 0), | 341 | MLXSW_SP_SB_CM(0, 140000, 15), |
342 | MLXSW_SP_SB_CM(0, 0, 0), | 342 | MLXSW_SP_SB_CM(0, 140000, 15), |
343 | MLXSW_SP_SB_CM(0, 0, 0), | 343 | MLXSW_SP_SB_CM(0, 140000, 15), |
344 | MLXSW_SP_SB_CM(0, 0, 0), | 344 | MLXSW_SP_SB_CM(0, 140000, 15), |
345 | MLXSW_SP_SB_CM(0, 0, 0), | 345 | MLXSW_SP_SB_CM(0, 140000, 15), |
346 | MLXSW_SP_SB_CM(0, 0, 0), | 346 | MLXSW_SP_SB_CM(0, 140000, 15), |
347 | MLXSW_SP_SB_CM(0, 0, 0), | 347 | MLXSW_SP_SB_CM(0, 140000, 15), |
348 | MLXSW_SP_SB_CM(1, 0xff, 0), | 348 | MLXSW_SP_SB_CM(1, 0xff, 0), |
349 | }; | 349 | }; |
350 | 350 | ||
diff --git a/drivers/net/ethernet/netronome/nfp/flower/action.c b/drivers/net/ethernet/netronome/nfp/flower/action.c index 9044496803e6..46ba0cf257c6 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/action.c +++ b/drivers/net/ethernet/netronome/nfp/flower/action.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) | 52 | #define NFP_FL_TUNNEL_CSUM cpu_to_be16(0x01) |
53 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) | 53 | #define NFP_FL_TUNNEL_KEY cpu_to_be16(0x04) |
54 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) | 54 | #define NFP_FL_TUNNEL_GENEVE_OPT cpu_to_be16(0x0800) |
55 | #define NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS IP_TUNNEL_INFO_TX | ||
55 | #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ | 56 | #define NFP_FL_SUPPORTED_IPV4_UDP_TUN_FLAGS (NFP_FL_TUNNEL_CSUM | \ |
56 | NFP_FL_TUNNEL_KEY | \ | 57 | NFP_FL_TUNNEL_KEY | \ |
57 | NFP_FL_TUNNEL_GENEVE_OPT) | 58 | NFP_FL_TUNNEL_GENEVE_OPT) |
@@ -741,11 +742,16 @@ nfp_flower_loop_action(struct nfp_app *app, const struct tc_action *a, | |||
741 | nfp_fl_push_vlan(psh_v, a); | 742 | nfp_fl_push_vlan(psh_v, a); |
742 | *a_len += sizeof(struct nfp_fl_push_vlan); | 743 | *a_len += sizeof(struct nfp_fl_push_vlan); |
743 | } else if (is_tcf_tunnel_set(a)) { | 744 | } else if (is_tcf_tunnel_set(a)) { |
745 | struct ip_tunnel_info *ip_tun = tcf_tunnel_info(a); | ||
744 | struct nfp_repr *repr = netdev_priv(netdev); | 746 | struct nfp_repr *repr = netdev_priv(netdev); |
747 | |||
745 | *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); | 748 | *tun_type = nfp_fl_get_tun_from_act_l4_port(repr->app, a); |
746 | if (*tun_type == NFP_FL_TUNNEL_NONE) | 749 | if (*tun_type == NFP_FL_TUNNEL_NONE) |
747 | return -EOPNOTSUPP; | 750 | return -EOPNOTSUPP; |
748 | 751 | ||
752 | if (ip_tun->mode & ~NFP_FL_SUPPORTED_TUNNEL_INFO_FLAGS) | ||
753 | return -EOPNOTSUPP; | ||
754 | |||
749 | /* Pre-tunnel action is required for tunnel encap. | 755 | /* Pre-tunnel action is required for tunnel encap. |
750 | * This checks for next hop entries on NFP. | 756 | * This checks for next hop entries on NFP. |
751 | * If none, the packet falls back before applying other actions. | 757 | * If none, the packet falls back before applying other actions. |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/main.h b/drivers/net/ethernet/netronome/nfp/flower/main.h index 85f8209bf007..81d941ab895c 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/main.h +++ b/drivers/net/ethernet/netronome/nfp/flower/main.h | |||
@@ -70,6 +70,7 @@ struct nfp_app; | |||
70 | #define NFP_FL_FEATS_GENEVE BIT(0) | 70 | #define NFP_FL_FEATS_GENEVE BIT(0) |
71 | #define NFP_FL_NBI_MTU_SETTING BIT(1) | 71 | #define NFP_FL_NBI_MTU_SETTING BIT(1) |
72 | #define NFP_FL_FEATS_GENEVE_OPT BIT(2) | 72 | #define NFP_FL_FEATS_GENEVE_OPT BIT(2) |
73 | #define NFP_FL_FEATS_VLAN_PCP BIT(3) | ||
73 | #define NFP_FL_FEATS_LAG BIT(31) | 74 | #define NFP_FL_FEATS_LAG BIT(31) |
74 | 75 | ||
75 | struct nfp_fl_mask_id { | 76 | struct nfp_fl_mask_id { |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/match.c b/drivers/net/ethernet/netronome/nfp/flower/match.c index a0c72f277faa..17acb8cc6044 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/match.c +++ b/drivers/net/ethernet/netronome/nfp/flower/match.c | |||
@@ -56,7 +56,7 @@ nfp_flower_compile_meta_tci(struct nfp_flower_meta_tci *frame, | |||
56 | FLOW_DISSECTOR_KEY_VLAN, | 56 | FLOW_DISSECTOR_KEY_VLAN, |
57 | target); | 57 | target); |
58 | /* Populate the tci field. */ | 58 | /* Populate the tci field. */ |
59 | if (flow_vlan->vlan_id) { | 59 | if (flow_vlan->vlan_id || flow_vlan->vlan_priority) { |
60 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, | 60 | tmp_tci = FIELD_PREP(NFP_FLOWER_MASK_VLAN_PRIO, |
61 | flow_vlan->vlan_priority) | | 61 | flow_vlan->vlan_priority) | |
62 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, | 62 | FIELD_PREP(NFP_FLOWER_MASK_VLAN_VID, |
diff --git a/drivers/net/ethernet/netronome/nfp/flower/offload.c b/drivers/net/ethernet/netronome/nfp/flower/offload.c index 2edab01c3beb..bd19624f10cf 100644 --- a/drivers/net/ethernet/netronome/nfp/flower/offload.c +++ b/drivers/net/ethernet/netronome/nfp/flower/offload.c | |||
@@ -192,6 +192,17 @@ nfp_flower_calculate_key_layers(struct nfp_app *app, | |||
192 | key_size += sizeof(struct nfp_flower_mac_mpls); | 192 | key_size += sizeof(struct nfp_flower_mac_mpls); |
193 | } | 193 | } |
194 | 194 | ||
195 | if (dissector_uses_key(flow->dissector, FLOW_DISSECTOR_KEY_VLAN)) { | ||
196 | struct flow_dissector_key_vlan *flow_vlan; | ||
197 | |||
198 | flow_vlan = skb_flow_dissector_target(flow->dissector, | ||
199 | FLOW_DISSECTOR_KEY_VLAN, | ||
200 | flow->mask); | ||
201 | if (!(priv->flower_ext_feats & NFP_FL_FEATS_VLAN_PCP) && | ||
202 | flow_vlan->vlan_priority) | ||
203 | return -EOPNOTSUPP; | ||
204 | } | ||
205 | |||
195 | if (dissector_uses_key(flow->dissector, | 206 | if (dissector_uses_key(flow->dissector, |
196 | FLOW_DISSECTOR_KEY_ENC_CONTROL)) { | 207 | FLOW_DISSECTOR_KEY_ENC_CONTROL)) { |
197 | struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; | 208 | struct flow_dissector_key_ipv4_addrs *mask_ipv4 = NULL; |
diff --git a/drivers/net/ethernet/qualcomm/qca_7k.c b/drivers/net/ethernet/qualcomm/qca_7k.c index ffe7a16bdfc8..6c8543fb90c0 100644 --- a/drivers/net/ethernet/qualcomm/qca_7k.c +++ b/drivers/net/ethernet/qualcomm/qca_7k.c | |||
@@ -45,34 +45,33 @@ qcaspi_read_register(struct qcaspi *qca, u16 reg, u16 *result) | |||
45 | { | 45 | { |
46 | __be16 rx_data; | 46 | __be16 rx_data; |
47 | __be16 tx_data; | 47 | __be16 tx_data; |
48 | struct spi_transfer *transfer; | 48 | struct spi_transfer transfer[2]; |
49 | struct spi_message *msg; | 49 | struct spi_message msg; |
50 | int ret; | 50 | int ret; |
51 | 51 | ||
52 | memset(transfer, 0, sizeof(transfer)); | ||
53 | |||
54 | spi_message_init(&msg); | ||
55 | |||
52 | tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); | 56 | tx_data = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_INTERNAL | reg); |
57 | *result = 0; | ||
58 | |||
59 | transfer[0].tx_buf = &tx_data; | ||
60 | transfer[0].len = QCASPI_CMD_LEN; | ||
61 | transfer[1].rx_buf = &rx_data; | ||
62 | transfer[1].len = QCASPI_CMD_LEN; | ||
63 | |||
64 | spi_message_add_tail(&transfer[0], &msg); | ||
53 | 65 | ||
54 | if (qca->legacy_mode) { | 66 | if (qca->legacy_mode) { |
55 | msg = &qca->spi_msg1; | 67 | spi_sync(qca->spi_dev, &msg); |
56 | transfer = &qca->spi_xfer1; | 68 | spi_message_init(&msg); |
57 | transfer->tx_buf = &tx_data; | ||
58 | transfer->rx_buf = NULL; | ||
59 | transfer->len = QCASPI_CMD_LEN; | ||
60 | spi_sync(qca->spi_dev, msg); | ||
61 | } else { | ||
62 | msg = &qca->spi_msg2; | ||
63 | transfer = &qca->spi_xfer2[0]; | ||
64 | transfer->tx_buf = &tx_data; | ||
65 | transfer->rx_buf = NULL; | ||
66 | transfer->len = QCASPI_CMD_LEN; | ||
67 | transfer = &qca->spi_xfer2[1]; | ||
68 | } | 69 | } |
69 | transfer->tx_buf = NULL; | 70 | spi_message_add_tail(&transfer[1], &msg); |
70 | transfer->rx_buf = &rx_data; | 71 | ret = spi_sync(qca->spi_dev, &msg); |
71 | transfer->len = QCASPI_CMD_LEN; | ||
72 | ret = spi_sync(qca->spi_dev, msg); | ||
73 | 72 | ||
74 | if (!ret) | 73 | if (!ret) |
75 | ret = msg->status; | 74 | ret = msg.status; |
76 | 75 | ||
77 | if (ret) | 76 | if (ret) |
78 | qcaspi_spi_error(qca); | 77 | qcaspi_spi_error(qca); |
@@ -86,35 +85,32 @@ int | |||
86 | qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) | 85 | qcaspi_write_register(struct qcaspi *qca, u16 reg, u16 value) |
87 | { | 86 | { |
88 | __be16 tx_data[2]; | 87 | __be16 tx_data[2]; |
89 | struct spi_transfer *transfer; | 88 | struct spi_transfer transfer[2]; |
90 | struct spi_message *msg; | 89 | struct spi_message msg; |
91 | int ret; | 90 | int ret; |
92 | 91 | ||
92 | memset(&transfer, 0, sizeof(transfer)); | ||
93 | |||
94 | spi_message_init(&msg); | ||
95 | |||
93 | tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); | 96 | tx_data[0] = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_INTERNAL | reg); |
94 | tx_data[1] = cpu_to_be16(value); | 97 | tx_data[1] = cpu_to_be16(value); |
95 | 98 | ||
99 | transfer[0].tx_buf = &tx_data[0]; | ||
100 | transfer[0].len = QCASPI_CMD_LEN; | ||
101 | transfer[1].tx_buf = &tx_data[1]; | ||
102 | transfer[1].len = QCASPI_CMD_LEN; | ||
103 | |||
104 | spi_message_add_tail(&transfer[0], &msg); | ||
96 | if (qca->legacy_mode) { | 105 | if (qca->legacy_mode) { |
97 | msg = &qca->spi_msg1; | 106 | spi_sync(qca->spi_dev, &msg); |
98 | transfer = &qca->spi_xfer1; | 107 | spi_message_init(&msg); |
99 | transfer->tx_buf = &tx_data[0]; | ||
100 | transfer->rx_buf = NULL; | ||
101 | transfer->len = QCASPI_CMD_LEN; | ||
102 | spi_sync(qca->spi_dev, msg); | ||
103 | } else { | ||
104 | msg = &qca->spi_msg2; | ||
105 | transfer = &qca->spi_xfer2[0]; | ||
106 | transfer->tx_buf = &tx_data[0]; | ||
107 | transfer->rx_buf = NULL; | ||
108 | transfer->len = QCASPI_CMD_LEN; | ||
109 | transfer = &qca->spi_xfer2[1]; | ||
110 | } | 108 | } |
111 | transfer->tx_buf = &tx_data[1]; | 109 | spi_message_add_tail(&transfer[1], &msg); |
112 | transfer->rx_buf = NULL; | 110 | ret = spi_sync(qca->spi_dev, &msg); |
113 | transfer->len = QCASPI_CMD_LEN; | ||
114 | ret = spi_sync(qca->spi_dev, msg); | ||
115 | 111 | ||
116 | if (!ret) | 112 | if (!ret) |
117 | ret = msg->status; | 113 | ret = msg.status; |
118 | 114 | ||
119 | if (ret) | 115 | if (ret) |
120 | qcaspi_spi_error(qca); | 116 | qcaspi_spi_error(qca); |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.c b/drivers/net/ethernet/qualcomm/qca_spi.c index 206f0266463e..66b775d462fd 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.c +++ b/drivers/net/ethernet/qualcomm/qca_spi.c | |||
@@ -99,22 +99,24 @@ static u32 | |||
99 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) | 99 | qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) |
100 | { | 100 | { |
101 | __be16 cmd; | 101 | __be16 cmd; |
102 | struct spi_message *msg = &qca->spi_msg2; | 102 | struct spi_message msg; |
103 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; | 103 | struct spi_transfer transfer[2]; |
104 | int ret; | 104 | int ret; |
105 | 105 | ||
106 | memset(&transfer, 0, sizeof(transfer)); | ||
107 | spi_message_init(&msg); | ||
108 | |||
106 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); | 109 | cmd = cpu_to_be16(QCA7K_SPI_WRITE | QCA7K_SPI_EXTERNAL); |
107 | transfer->tx_buf = &cmd; | 110 | transfer[0].tx_buf = &cmd; |
108 | transfer->rx_buf = NULL; | 111 | transfer[0].len = QCASPI_CMD_LEN; |
109 | transfer->len = QCASPI_CMD_LEN; | 112 | transfer[1].tx_buf = src; |
110 | transfer = &qca->spi_xfer2[1]; | 113 | transfer[1].len = len; |
111 | transfer->tx_buf = src; | ||
112 | transfer->rx_buf = NULL; | ||
113 | transfer->len = len; | ||
114 | 114 | ||
115 | ret = spi_sync(qca->spi_dev, msg); | 115 | spi_message_add_tail(&transfer[0], &msg); |
116 | spi_message_add_tail(&transfer[1], &msg); | ||
117 | ret = spi_sync(qca->spi_dev, &msg); | ||
116 | 118 | ||
117 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { | 119 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
118 | qcaspi_spi_error(qca); | 120 | qcaspi_spi_error(qca); |
119 | return 0; | 121 | return 0; |
120 | } | 122 | } |
@@ -125,17 +127,20 @@ qcaspi_write_burst(struct qcaspi *qca, u8 *src, u32 len) | |||
125 | static u32 | 127 | static u32 |
126 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) | 128 | qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) |
127 | { | 129 | { |
128 | struct spi_message *msg = &qca->spi_msg1; | 130 | struct spi_message msg; |
129 | struct spi_transfer *transfer = &qca->spi_xfer1; | 131 | struct spi_transfer transfer; |
130 | int ret; | 132 | int ret; |
131 | 133 | ||
132 | transfer->tx_buf = src; | 134 | memset(&transfer, 0, sizeof(transfer)); |
133 | transfer->rx_buf = NULL; | 135 | spi_message_init(&msg); |
134 | transfer->len = len; | 136 | |
137 | transfer.tx_buf = src; | ||
138 | transfer.len = len; | ||
135 | 139 | ||
136 | ret = spi_sync(qca->spi_dev, msg); | 140 | spi_message_add_tail(&transfer, &msg); |
141 | ret = spi_sync(qca->spi_dev, &msg); | ||
137 | 142 | ||
138 | if (ret || (msg->actual_length != len)) { | 143 | if (ret || (msg.actual_length != len)) { |
139 | qcaspi_spi_error(qca); | 144 | qcaspi_spi_error(qca); |
140 | return 0; | 145 | return 0; |
141 | } | 146 | } |
@@ -146,23 +151,25 @@ qcaspi_write_legacy(struct qcaspi *qca, u8 *src, u32 len) | |||
146 | static u32 | 151 | static u32 |
147 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) | 152 | qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) |
148 | { | 153 | { |
149 | struct spi_message *msg = &qca->spi_msg2; | 154 | struct spi_message msg; |
150 | __be16 cmd; | 155 | __be16 cmd; |
151 | struct spi_transfer *transfer = &qca->spi_xfer2[0]; | 156 | struct spi_transfer transfer[2]; |
152 | int ret; | 157 | int ret; |
153 | 158 | ||
159 | memset(&transfer, 0, sizeof(transfer)); | ||
160 | spi_message_init(&msg); | ||
161 | |||
154 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); | 162 | cmd = cpu_to_be16(QCA7K_SPI_READ | QCA7K_SPI_EXTERNAL); |
155 | transfer->tx_buf = &cmd; | 163 | transfer[0].tx_buf = &cmd; |
156 | transfer->rx_buf = NULL; | 164 | transfer[0].len = QCASPI_CMD_LEN; |
157 | transfer->len = QCASPI_CMD_LEN; | 165 | transfer[1].rx_buf = dst; |
158 | transfer = &qca->spi_xfer2[1]; | 166 | transfer[1].len = len; |
159 | transfer->tx_buf = NULL; | ||
160 | transfer->rx_buf = dst; | ||
161 | transfer->len = len; | ||
162 | 167 | ||
163 | ret = spi_sync(qca->spi_dev, msg); | 168 | spi_message_add_tail(&transfer[0], &msg); |
169 | spi_message_add_tail(&transfer[1], &msg); | ||
170 | ret = spi_sync(qca->spi_dev, &msg); | ||
164 | 171 | ||
165 | if (ret || (msg->actual_length != QCASPI_CMD_LEN + len)) { | 172 | if (ret || (msg.actual_length != QCASPI_CMD_LEN + len)) { |
166 | qcaspi_spi_error(qca); | 173 | qcaspi_spi_error(qca); |
167 | return 0; | 174 | return 0; |
168 | } | 175 | } |
@@ -173,17 +180,20 @@ qcaspi_read_burst(struct qcaspi *qca, u8 *dst, u32 len) | |||
173 | static u32 | 180 | static u32 |
174 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) | 181 | qcaspi_read_legacy(struct qcaspi *qca, u8 *dst, u32 len) |
175 | { | 182 | { |
176 | struct spi_message *msg = &qca->spi_msg1; | 183 | struct spi_message msg; |
177 | struct spi_transfer *transfer = &qca->spi_xfer1; | 184 | struct spi_transfer transfer; |
178 | int ret; | 185 | int ret; |
179 | 186 | ||
180 | transfer->tx_buf = NULL; | 187 | memset(&transfer, 0, sizeof(transfer)); |
181 | transfer->rx_buf = dst; | 188 | spi_message_init(&msg); |
182 | transfer->len = len; | ||
183 | 189 | ||
184 | ret = spi_sync(qca->spi_dev, msg); | 190 | transfer.rx_buf = dst; |
191 | transfer.len = len; | ||
185 | 192 | ||
186 | if (ret || (msg->actual_length != len)) { | 193 | spi_message_add_tail(&transfer, &msg); |
194 | ret = spi_sync(qca->spi_dev, &msg); | ||
195 | |||
196 | if (ret || (msg.actual_length != len)) { | ||
187 | qcaspi_spi_error(qca); | 197 | qcaspi_spi_error(qca); |
188 | return 0; | 198 | return 0; |
189 | } | 199 | } |
@@ -195,19 +205,23 @@ static int | |||
195 | qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) | 205 | qcaspi_tx_cmd(struct qcaspi *qca, u16 cmd) |
196 | { | 206 | { |
197 | __be16 tx_data; | 207 | __be16 tx_data; |
198 | struct spi_message *msg = &qca->spi_msg1; | 208 | struct spi_message msg; |
199 | struct spi_transfer *transfer = &qca->spi_xfer1; | 209 | struct spi_transfer transfer; |
200 | int ret; | 210 | int ret; |
201 | 211 | ||
212 | memset(&transfer, 0, sizeof(transfer)); | ||
213 | |||
214 | spi_message_init(&msg); | ||
215 | |||
202 | tx_data = cpu_to_be16(cmd); | 216 | tx_data = cpu_to_be16(cmd); |
203 | transfer->len = sizeof(tx_data); | 217 | transfer.len = sizeof(cmd); |
204 | transfer->tx_buf = &tx_data; | 218 | transfer.tx_buf = &tx_data; |
205 | transfer->rx_buf = NULL; | 219 | spi_message_add_tail(&transfer, &msg); |
206 | 220 | ||
207 | ret = spi_sync(qca->spi_dev, msg); | 221 | ret = spi_sync(qca->spi_dev, &msg); |
208 | 222 | ||
209 | if (!ret) | 223 | if (!ret) |
210 | ret = msg->status; | 224 | ret = msg.status; |
211 | 225 | ||
212 | if (ret) | 226 | if (ret) |
213 | qcaspi_spi_error(qca); | 227 | qcaspi_spi_error(qca); |
@@ -835,16 +849,6 @@ qcaspi_netdev_setup(struct net_device *dev) | |||
835 | qca = netdev_priv(dev); | 849 | qca = netdev_priv(dev); |
836 | memset(qca, 0, sizeof(struct qcaspi)); | 850 | memset(qca, 0, sizeof(struct qcaspi)); |
837 | 851 | ||
838 | memset(&qca->spi_xfer1, 0, sizeof(struct spi_transfer)); | ||
839 | memset(&qca->spi_xfer2, 0, sizeof(struct spi_transfer) * 2); | ||
840 | |||
841 | spi_message_init(&qca->spi_msg1); | ||
842 | spi_message_add_tail(&qca->spi_xfer1, &qca->spi_msg1); | ||
843 | |||
844 | spi_message_init(&qca->spi_msg2); | ||
845 | spi_message_add_tail(&qca->spi_xfer2[0], &qca->spi_msg2); | ||
846 | spi_message_add_tail(&qca->spi_xfer2[1], &qca->spi_msg2); | ||
847 | |||
848 | memset(&qca->txr, 0, sizeof(qca->txr)); | 852 | memset(&qca->txr, 0, sizeof(qca->txr)); |
849 | qca->txr.count = TX_RING_MAX_LEN; | 853 | qca->txr.count = TX_RING_MAX_LEN; |
850 | } | 854 | } |
diff --git a/drivers/net/ethernet/qualcomm/qca_spi.h b/drivers/net/ethernet/qualcomm/qca_spi.h index fc4beb1b32d1..fc0e98726b36 100644 --- a/drivers/net/ethernet/qualcomm/qca_spi.h +++ b/drivers/net/ethernet/qualcomm/qca_spi.h | |||
@@ -83,11 +83,6 @@ struct qcaspi { | |||
83 | struct tx_ring txr; | 83 | struct tx_ring txr; |
84 | struct qcaspi_stats stats; | 84 | struct qcaspi_stats stats; |
85 | 85 | ||
86 | struct spi_message spi_msg1; | ||
87 | struct spi_message spi_msg2; | ||
88 | struct spi_transfer spi_xfer1; | ||
89 | struct spi_transfer spi_xfer2[2]; | ||
90 | |||
91 | u8 *rx_buffer; | 86 | u8 *rx_buffer; |
92 | u32 buffer_size; | 87 | u32 buffer_size; |
93 | u8 sync; | 88 | u8 sync; |
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c index b08d51bf7a20..1d8631303b53 100644 --- a/drivers/net/ethernet/realtek/r8169.c +++ b/drivers/net/ethernet/realtek/r8169.c | |||
@@ -631,7 +631,7 @@ struct rtl8169_tc_offsets { | |||
631 | }; | 631 | }; |
632 | 632 | ||
633 | enum rtl_flag { | 633 | enum rtl_flag { |
634 | RTL_FLAG_TASK_ENABLED, | 634 | RTL_FLAG_TASK_ENABLED = 0, |
635 | RTL_FLAG_TASK_SLOW_PENDING, | 635 | RTL_FLAG_TASK_SLOW_PENDING, |
636 | RTL_FLAG_TASK_RESET_PENDING, | 636 | RTL_FLAG_TASK_RESET_PENDING, |
637 | RTL_FLAG_MAX | 637 | RTL_FLAG_MAX |
@@ -4634,13 +4634,13 @@ static void rtl_hw_start(struct rtl8169_private *tp) | |||
4634 | 4634 | ||
4635 | rtl_set_rx_max_size(tp); | 4635 | rtl_set_rx_max_size(tp); |
4636 | rtl_set_rx_tx_desc_registers(tp); | 4636 | rtl_set_rx_tx_desc_registers(tp); |
4637 | rtl_set_tx_config_registers(tp); | ||
4638 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); | 4637 | RTL_W8(tp, Cfg9346, Cfg9346_Lock); |
4639 | 4638 | ||
4640 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ | 4639 | /* Initially a 10 us delay. Turned it into a PCI commit. - FR */ |
4641 | RTL_R8(tp, IntrMask); | 4640 | RTL_R8(tp, IntrMask); |
4642 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); | 4641 | RTL_W8(tp, ChipCmd, CmdTxEnb | CmdRxEnb); |
4643 | rtl_init_rxcfg(tp); | 4642 | rtl_init_rxcfg(tp); |
4643 | rtl_set_tx_config_registers(tp); | ||
4644 | 4644 | ||
4645 | rtl_set_rx_mode(tp->dev); | 4645 | rtl_set_rx_mode(tp->dev); |
4646 | /* no early-rx interrupts */ | 4646 | /* no early-rx interrupts */ |
@@ -6655,7 +6655,8 @@ static int rtl8169_close(struct net_device *dev) | |||
6655 | rtl8169_update_counters(tp); | 6655 | rtl8169_update_counters(tp); |
6656 | 6656 | ||
6657 | rtl_lock_work(tp); | 6657 | rtl_lock_work(tp); |
6658 | clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); | 6658 | /* Clear all task flags */ |
6659 | bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); | ||
6659 | 6660 | ||
6660 | rtl8169_down(dev); | 6661 | rtl8169_down(dev); |
6661 | rtl_unlock_work(tp); | 6662 | rtl_unlock_work(tp); |
@@ -6838,7 +6839,9 @@ static void rtl8169_net_suspend(struct net_device *dev) | |||
6838 | 6839 | ||
6839 | rtl_lock_work(tp); | 6840 | rtl_lock_work(tp); |
6840 | napi_disable(&tp->napi); | 6841 | napi_disable(&tp->napi); |
6841 | clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags); | 6842 | /* Clear all task flags */ |
6843 | bitmap_zero(tp->wk.flags, RTL_FLAG_MAX); | ||
6844 | |||
6842 | rtl_unlock_work(tp); | 6845 | rtl_unlock_work(tp); |
6843 | 6846 | ||
6844 | rtl_pll_power_down(tp); | 6847 | rtl_pll_power_down(tp); |
diff --git a/drivers/net/ethernet/renesas/Kconfig b/drivers/net/ethernet/renesas/Kconfig index f3f7477043ce..bb0ebdfd4459 100644 --- a/drivers/net/ethernet/renesas/Kconfig +++ b/drivers/net/ethernet/renesas/Kconfig | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Renesas device configuration | 3 | # Renesas device configuration |
3 | # | 4 | # |
diff --git a/drivers/net/ethernet/renesas/Makefile b/drivers/net/ethernet/renesas/Makefile index a05102a7df02..f21ab8c02af0 100644 --- a/drivers/net/ethernet/renesas/Makefile +++ b/drivers/net/ethernet/renesas/Makefile | |||
@@ -1,3 +1,4 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
1 | # | 2 | # |
2 | # Makefile for the Renesas device drivers. | 3 | # Makefile for the Renesas device drivers. |
3 | # | 4 | # |
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c index eede70ec37f8..0721b5c35d91 100644 --- a/drivers/net/ethernet/renesas/ravb_ptp.c +++ b/drivers/net/ethernet/renesas/ravb_ptp.c | |||
@@ -1,13 +1,9 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0+ | ||
1 | /* PTP 1588 clock using the Renesas Ethernet AVB | 2 | /* PTP 1588 clock using the Renesas Ethernet AVB |
2 | * | 3 | * |
3 | * Copyright (C) 2013-2015 Renesas Electronics Corporation | 4 | * Copyright (C) 2013-2015 Renesas Electronics Corporation |
4 | * Copyright (C) 2015 Renesas Solutions Corp. | 5 | * Copyright (C) 2015 Renesas Solutions Corp. |
5 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> | 6 | * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com> |
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | */ | 7 | */ |
12 | 8 | ||
13 | #include "ravb.h" | 9 | #include "ravb.h" |
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c index cb0cc30c3d6a..e3270deecec2 100644 --- a/drivers/net/usb/qmi_wwan.c +++ b/drivers/net/usb/qmi_wwan.c | |||
@@ -967,6 +967,13 @@ static const struct usb_device_id products[] = { | |||
967 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), | 967 | USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7), |
968 | .driver_info = (unsigned long)&qmi_wwan_info, | 968 | .driver_info = (unsigned long)&qmi_wwan_info, |
969 | }, | 969 | }, |
970 | { /* Quectel EP06/EG06/EM06 */ | ||
971 | USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0306, | ||
972 | USB_CLASS_VENDOR_SPEC, | ||
973 | USB_SUBCLASS_VENDOR_SPEC, | ||
974 | 0xff), | ||
975 | .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr, | ||
976 | }, | ||
970 | 977 | ||
971 | /* 3. Combined interface devices matching on interface number */ | 978 | /* 3. Combined interface devices matching on interface number */ |
972 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ | 979 | {QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */ |
@@ -1255,7 +1262,6 @@ static const struct usb_device_id products[] = { | |||
1255 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ | 1262 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0121, 4)}, /* Quectel EC21 Mini PCIe */ |
1256 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ | 1263 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0191, 4)}, /* Quectel EG91 */ |
1257 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ | 1264 | {QMI_FIXED_INTF(0x2c7c, 0x0296, 4)}, /* Quectel BG96 */ |
1258 | {QMI_QUIRK_SET_DTR(0x2c7c, 0x0306, 4)}, /* Quectel EP06 Mini PCIe */ | ||
1259 | 1265 | ||
1260 | /* 4. Gobi 1000 devices */ | 1266 | /* 4. Gobi 1000 devices */ |
1261 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ | 1267 | {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ |
@@ -1331,6 +1337,19 @@ static bool quectel_ec20_detected(struct usb_interface *intf) | |||
1331 | return false; | 1337 | return false; |
1332 | } | 1338 | } |
1333 | 1339 | ||
1340 | static bool quectel_ep06_diag_detected(struct usb_interface *intf) | ||
1341 | { | ||
1342 | struct usb_device *dev = interface_to_usbdev(intf); | ||
1343 | struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc; | ||
1344 | |||
1345 | if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c && | ||
1346 | le16_to_cpu(dev->descriptor.idProduct) == 0x0306 && | ||
1347 | intf_desc.bNumEndpoints == 2) | ||
1348 | return true; | ||
1349 | |||
1350 | return false; | ||
1351 | } | ||
1352 | |||
1334 | static int qmi_wwan_probe(struct usb_interface *intf, | 1353 | static int qmi_wwan_probe(struct usb_interface *intf, |
1335 | const struct usb_device_id *prod) | 1354 | const struct usb_device_id *prod) |
1336 | { | 1355 | { |
@@ -1365,6 +1384,15 @@ static int qmi_wwan_probe(struct usb_interface *intf, | |||
1365 | return -ENODEV; | 1384 | return -ENODEV; |
1366 | } | 1385 | } |
1367 | 1386 | ||
1387 | /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so | ||
1388 | * we need to match on class/subclass/protocol. These values are | ||
1389 | * identical for the diagnostic- and QMI-interface, but bNumEndpoints is | ||
1390 | * different. Ignore the current interface if the number of endpoints | ||
1391 | * the number for the diag interface (two). | ||
1392 | */ | ||
1393 | if (quectel_ep06_diag_detected(intf)) | ||
1394 | return -ENODEV; | ||
1395 | |||
1368 | return usbnet_probe(intf, id); | 1396 | return usbnet_probe(intf, id); |
1369 | } | 1397 | } |
1370 | 1398 | ||
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c index 73f596a90c69..9407acbd19a9 100644 --- a/drivers/net/xen-netfront.c +++ b/drivers/net/xen-netfront.c | |||
@@ -87,8 +87,7 @@ struct netfront_cb { | |||
87 | /* IRQ name is queue name with "-tx" or "-rx" appended */ | 87 | /* IRQ name is queue name with "-tx" or "-rx" appended */ |
88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) | 88 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
89 | 89 | ||
90 | static DECLARE_WAIT_QUEUE_HEAD(module_load_q); | 90 | static DECLARE_WAIT_QUEUE_HEAD(module_wq); |
91 | static DECLARE_WAIT_QUEUE_HEAD(module_unload_q); | ||
92 | 91 | ||
93 | struct netfront_stats { | 92 | struct netfront_stats { |
94 | u64 packets; | 93 | u64 packets; |
@@ -1332,11 +1331,11 @@ static struct net_device *xennet_create_dev(struct xenbus_device *dev) | |||
1332 | netif_carrier_off(netdev); | 1331 | netif_carrier_off(netdev); |
1333 | 1332 | ||
1334 | xenbus_switch_state(dev, XenbusStateInitialising); | 1333 | xenbus_switch_state(dev, XenbusStateInitialising); |
1335 | wait_event(module_load_q, | 1334 | wait_event(module_wq, |
1336 | xenbus_read_driver_state(dev->otherend) != | 1335 | xenbus_read_driver_state(dev->otherend) != |
1337 | XenbusStateClosed && | 1336 | XenbusStateClosed && |
1338 | xenbus_read_driver_state(dev->otherend) != | 1337 | xenbus_read_driver_state(dev->otherend) != |
1339 | XenbusStateUnknown); | 1338 | XenbusStateUnknown); |
1340 | return netdev; | 1339 | return netdev; |
1341 | 1340 | ||
1342 | exit: | 1341 | exit: |
@@ -2010,15 +2009,14 @@ static void netback_changed(struct xenbus_device *dev, | |||
2010 | 2009 | ||
2011 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); | 2010 | dev_dbg(&dev->dev, "%s\n", xenbus_strstate(backend_state)); |
2012 | 2011 | ||
2012 | wake_up_all(&module_wq); | ||
2013 | |||
2013 | switch (backend_state) { | 2014 | switch (backend_state) { |
2014 | case XenbusStateInitialising: | 2015 | case XenbusStateInitialising: |
2015 | case XenbusStateInitialised: | 2016 | case XenbusStateInitialised: |
2016 | case XenbusStateReconfiguring: | 2017 | case XenbusStateReconfiguring: |
2017 | case XenbusStateReconfigured: | 2018 | case XenbusStateReconfigured: |
2018 | break; | ||
2019 | |||
2020 | case XenbusStateUnknown: | 2019 | case XenbusStateUnknown: |
2021 | wake_up_all(&module_unload_q); | ||
2022 | break; | 2020 | break; |
2023 | 2021 | ||
2024 | case XenbusStateInitWait: | 2022 | case XenbusStateInitWait: |
@@ -2034,12 +2032,10 @@ static void netback_changed(struct xenbus_device *dev, | |||
2034 | break; | 2032 | break; |
2035 | 2033 | ||
2036 | case XenbusStateClosed: | 2034 | case XenbusStateClosed: |
2037 | wake_up_all(&module_unload_q); | ||
2038 | if (dev->state == XenbusStateClosed) | 2035 | if (dev->state == XenbusStateClosed) |
2039 | break; | 2036 | break; |
2040 | /* Missed the backend's CLOSING state -- fallthrough */ | 2037 | /* Missed the backend's CLOSING state -- fallthrough */ |
2041 | case XenbusStateClosing: | 2038 | case XenbusStateClosing: |
2042 | wake_up_all(&module_unload_q); | ||
2043 | xenbus_frontend_closed(dev); | 2039 | xenbus_frontend_closed(dev); |
2044 | break; | 2040 | break; |
2045 | } | 2041 | } |
@@ -2147,14 +2143,14 @@ static int xennet_remove(struct xenbus_device *dev) | |||
2147 | 2143 | ||
2148 | if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { | 2144 | if (xenbus_read_driver_state(dev->otherend) != XenbusStateClosed) { |
2149 | xenbus_switch_state(dev, XenbusStateClosing); | 2145 | xenbus_switch_state(dev, XenbusStateClosing); |
2150 | wait_event(module_unload_q, | 2146 | wait_event(module_wq, |
2151 | xenbus_read_driver_state(dev->otherend) == | 2147 | xenbus_read_driver_state(dev->otherend) == |
2152 | XenbusStateClosing || | 2148 | XenbusStateClosing || |
2153 | xenbus_read_driver_state(dev->otherend) == | 2149 | xenbus_read_driver_state(dev->otherend) == |
2154 | XenbusStateUnknown); | 2150 | XenbusStateUnknown); |
2155 | 2151 | ||
2156 | xenbus_switch_state(dev, XenbusStateClosed); | 2152 | xenbus_switch_state(dev, XenbusStateClosed); |
2157 | wait_event(module_unload_q, | 2153 | wait_event(module_wq, |
2158 | xenbus_read_driver_state(dev->otherend) == | 2154 | xenbus_read_driver_state(dev->otherend) == |
2159 | XenbusStateClosed || | 2155 | XenbusStateClosed || |
2160 | xenbus_read_driver_state(dev->otherend) == | 2156 | xenbus_read_driver_state(dev->otherend) == |